diff options
Diffstat (limited to 'arch')
313 files changed, 9551 insertions, 12507 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index dccf05245d4d..ac706c1d7ada 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile | |||
@@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS) | |||
7 | EXTRA_CFLAGS := -Werror -Wno-sign-compare | 7 | EXTRA_CFLAGS := -Werror -Wno-sign-compare |
8 | 8 | ||
9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ | 9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ |
10 | irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \ | 10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ |
11 | alpha_ksyms.o systbls.o err_common.o io.o | 11 | alpha_ksyms.o systbls.o err_common.o io.o |
12 | 12 | ||
13 | obj-$(CONFIG_VGA_HOSE) += console.o | 13 | obj-$(CONFIG_VGA_HOSE) += console.o |
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index e9762a33b043..d96e742d4dc2 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c | |||
@@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user); | |||
77 | EXPORT_SYMBOL(__strncpy_from_user); | 77 | EXPORT_SYMBOL(__strncpy_from_user); |
78 | EXPORT_SYMBOL(__strnlen_user); | 78 | EXPORT_SYMBOL(__strnlen_user); |
79 | 79 | ||
80 | /* Semaphore helper functions. */ | ||
81 | EXPORT_SYMBOL(__down_failed); | ||
82 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
83 | EXPORT_SYMBOL(__up_wakeup); | ||
84 | EXPORT_SYMBOL(down); | ||
85 | EXPORT_SYMBOL(down_interruptible); | ||
86 | EXPORT_SYMBOL(down_trylock); | ||
87 | EXPORT_SYMBOL(up); | ||
88 | |||
89 | /* | 80 | /* |
90 | * SMP-specific symbols. | 81 | * SMP-specific symbols. |
91 | */ | 82 | */ |
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c deleted file mode 100644 index 8d2982aa1b8d..000000000000 --- a/arch/alpha/kernel/semaphore.c +++ /dev/null | |||
@@ -1,224 +0,0 @@ | |||
1 | /* | ||
2 | * Alpha semaphore implementation. | ||
3 | * | ||
4 | * (C) Copyright 1996 Linus Torvalds | ||
5 | * (C) Copyright 1999, 2000 Richard Henderson | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | /* | ||
13 | * This is basically the PPC semaphore scheme ported to use | ||
14 | * the Alpha ll/sc sequences, so see the PPC code for | ||
15 | * credits. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * Atomically update sem->count. | ||
20 | * This does the equivalent of the following: | ||
21 | * | ||
22 | * old_count = sem->count; | ||
23 | * tmp = MAX(old_count, 0) + incr; | ||
24 | * sem->count = tmp; | ||
25 | * return old_count; | ||
26 | */ | ||
27 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
28 | { | ||
29 | long old_count, tmp = 0; | ||
30 | |||
31 | __asm__ __volatile__( | ||
32 | "1: ldl_l %0,%2\n" | ||
33 | " cmovgt %0,%0,%1\n" | ||
34 | " addl %1,%3,%1\n" | ||
35 | " stl_c %1,%2\n" | ||
36 | " beq %1,2f\n" | ||
37 | " mb\n" | ||
38 | ".subsection 2\n" | ||
39 | "2: br 1b\n" | ||
40 | ".previous" | ||
41 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
42 | : "Ir" (incr), "1" (tmp), "m" (sem->count)); | ||
43 | |||
44 | return old_count; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Perform the "down" function. Return zero for semaphore acquired, | ||
49 | * return negative for signalled out of the function. | ||
50 | * | ||
51 | * If called from down, the return is ignored and the wait loop is | ||
52 | * not interruptible. This means that a task waiting on a semaphore | ||
53 | * using "down()" cannot be killed until someone does an "up()" on | ||
54 | * the semaphore. | ||
55 | * | ||
56 | * If called from down_interruptible, the return value gets checked | ||
57 | * upon return. If the return value is negative then the task continues | ||
58 | * with the negative value in the return register (it can be tested by | ||
59 | * the caller). | ||
60 | * | ||
61 | * Either form may be used in conjunction with "up()". | ||
62 | */ | ||
63 | |||
64 | void __sched | ||
65 | __down_failed(struct semaphore *sem) | ||
66 | { | ||
67 | struct task_struct *tsk = current; | ||
68 | DECLARE_WAITQUEUE(wait, tsk); | ||
69 | |||
70 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
71 | printk("%s(%d): down failed(%p)\n", | ||
72 | tsk->comm, task_pid_nr(tsk), sem); | ||
73 | #endif | ||
74 | |||
75 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
76 | wmb(); | ||
77 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
78 | |||
79 | /* | ||
80 | * Try to get the semaphore. If the count is > 0, then we've | ||
81 | * got the semaphore; we decrement count and exit the loop. | ||
82 | * If the count is 0 or negative, we set it to -1, indicating | ||
83 | * that we are asleep, and then sleep. | ||
84 | */ | ||
85 | while (__sem_update_count(sem, -1) <= 0) { | ||
86 | schedule(); | ||
87 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
88 | } | ||
89 | remove_wait_queue(&sem->wait, &wait); | ||
90 | tsk->state = TASK_RUNNING; | ||
91 | |||
92 | /* | ||
93 | * If there are any more sleepers, wake one of them up so | ||
94 | * that it can either get the semaphore, or set count to -1 | ||
95 | * indicating that there are still processes sleeping. | ||
96 | */ | ||
97 | wake_up(&sem->wait); | ||
98 | |||
99 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
100 | printk("%s(%d): down acquired(%p)\n", | ||
101 | tsk->comm, task_pid_nr(tsk), sem); | ||
102 | #endif | ||
103 | } | ||
104 | |||
105 | int __sched | ||
106 | __down_failed_interruptible(struct semaphore *sem) | ||
107 | { | ||
108 | struct task_struct *tsk = current; | ||
109 | DECLARE_WAITQUEUE(wait, tsk); | ||
110 | long ret = 0; | ||
111 | |||
112 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
113 | printk("%s(%d): down failed(%p)\n", | ||
114 | tsk->comm, task_pid_nr(tsk), sem); | ||
115 | #endif | ||
116 | |||
117 | tsk->state = TASK_INTERRUPTIBLE; | ||
118 | wmb(); | ||
119 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
120 | |||
121 | while (__sem_update_count(sem, -1) <= 0) { | ||
122 | if (signal_pending(current)) { | ||
123 | /* | ||
124 | * A signal is pending - give up trying. | ||
125 | * Set sem->count to 0 if it is negative, | ||
126 | * since we are no longer sleeping. | ||
127 | */ | ||
128 | __sem_update_count(sem, 0); | ||
129 | ret = -EINTR; | ||
130 | break; | ||
131 | } | ||
132 | schedule(); | ||
133 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
134 | } | ||
135 | |||
136 | remove_wait_queue(&sem->wait, &wait); | ||
137 | tsk->state = TASK_RUNNING; | ||
138 | wake_up(&sem->wait); | ||
139 | |||
140 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
141 | printk("%s(%d): down %s(%p)\n", | ||
142 | current->comm, task_pid_nr(current), | ||
143 | (ret < 0 ? "interrupted" : "acquired"), sem); | ||
144 | #endif | ||
145 | return ret; | ||
146 | } | ||
147 | |||
148 | void | ||
149 | __up_wakeup(struct semaphore *sem) | ||
150 | { | ||
151 | /* | ||
152 | * Note that we incremented count in up() before we came here, | ||
153 | * but that was ineffective since the result was <= 0, and | ||
154 | * any negative value of count is equivalent to 0. | ||
155 | * This ends up setting count to 1, unless count is now > 0 | ||
156 | * (i.e. because some other cpu has called up() in the meantime), | ||
157 | * in which case we just increment count. | ||
158 | */ | ||
159 | __sem_update_count(sem, 1); | ||
160 | wake_up(&sem->wait); | ||
161 | } | ||
162 | |||
163 | void __sched | ||
164 | down(struct semaphore *sem) | ||
165 | { | ||
166 | #ifdef WAITQUEUE_DEBUG | ||
167 | CHECK_MAGIC(sem->__magic); | ||
168 | #endif | ||
169 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
170 | printk("%s(%d): down(%p) <count=%d> from %p\n", | ||
171 | current->comm, task_pid_nr(current), sem, | ||
172 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
173 | #endif | ||
174 | __down(sem); | ||
175 | } | ||
176 | |||
177 | int __sched | ||
178 | down_interruptible(struct semaphore *sem) | ||
179 | { | ||
180 | #ifdef WAITQUEUE_DEBUG | ||
181 | CHECK_MAGIC(sem->__magic); | ||
182 | #endif | ||
183 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
184 | printk("%s(%d): down(%p) <count=%d> from %p\n", | ||
185 | current->comm, task_pid_nr(current), sem, | ||
186 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
187 | #endif | ||
188 | return __down_interruptible(sem); | ||
189 | } | ||
190 | |||
191 | int | ||
192 | down_trylock(struct semaphore *sem) | ||
193 | { | ||
194 | int ret; | ||
195 | |||
196 | #ifdef WAITQUEUE_DEBUG | ||
197 | CHECK_MAGIC(sem->__magic); | ||
198 | #endif | ||
199 | |||
200 | ret = __down_trylock(sem); | ||
201 | |||
202 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
203 | printk("%s(%d): down_trylock %s from %p\n", | ||
204 | current->comm, task_pid_nr(current), | ||
205 | ret ? "failed" : "acquired", | ||
206 | __builtin_return_address(0)); | ||
207 | #endif | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | void | ||
213 | up(struct semaphore *sem) | ||
214 | { | ||
215 | #ifdef WAITQUEUE_DEBUG | ||
216 | CHECK_MAGIC(sem->__magic); | ||
217 | #endif | ||
218 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
219 | printk("%s(%d): up(%p) <count=%d> from %p\n", | ||
220 | current->comm, task_pid_nr(current), sem, | ||
221 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
222 | #endif | ||
223 | __up(sem); | ||
224 | } | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 00d44c6fbfe9..6235f72a14f0 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
7 | # Object file lists. | 7 | # Object file lists. |
8 | 8 | ||
9 | obj-y := compat.o entry-armv.o entry-common.o irq.o \ | 9 | obj-y := compat.o entry-armv.o entry-common.o irq.o \ |
10 | process.o ptrace.o semaphore.o setup.o signal.o \ | 10 | process.o ptrace.o setup.o signal.o \ |
11 | sys_arm.o stacktrace.o time.o traps.o | 11 | sys_arm.o stacktrace.o time.o traps.o |
12 | 12 | ||
13 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 13 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c deleted file mode 100644 index 981fe5c6ccbe..000000000000 --- a/arch/arm/kernel/semaphore.c +++ /dev/null | |||
@@ -1,221 +0,0 @@ | |||
1 | /* | ||
2 | * ARM semaphore implementation, taken from | ||
3 | * | ||
4 | * i386 semaphore implementation. | ||
5 | * | ||
6 | * (C) Copyright 1999 Linus Torvalds | ||
7 | * | ||
8 | * Modified for ARM by Russell King | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/semaphore.h> | ||
20 | |||
21 | /* | ||
22 | * Semaphores are implemented using a two-way counter: | ||
23 | * The "count" variable is decremented for each process | ||
24 | * that tries to acquire the semaphore, while the "sleeping" | ||
25 | * variable is a count of such acquires. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * "sleeping" and the contention routine ordering is | ||
33 | * protected by the semaphore spinlock. | ||
34 | * | ||
35 | * Note that these functions are only called when there is | ||
36 | * contention on the lock, and as such all this is the | ||
37 | * "non-critical" part of the whole semaphore business. The | ||
38 | * critical part is the inline stuff in <asm/semaphore.h> | ||
39 | * where we want to avoid any extra jumps and calls. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * Logic: | ||
44 | * - only on a boundary condition do we need to care. When we go | ||
45 | * from a negative count to a non-negative, we wake people up. | ||
46 | * - when we go from a non-negative count to a negative do we | ||
47 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
48 | * that we're on the wakeup list before we synchronize so that | ||
49 | * we cannot lose wakeup events. | ||
50 | */ | ||
51 | |||
52 | void __up(struct semaphore *sem) | ||
53 | { | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | static DEFINE_SPINLOCK(semaphore_lock); | ||
58 | |||
59 | void __sched __down(struct semaphore * sem) | ||
60 | { | ||
61 | struct task_struct *tsk = current; | ||
62 | DECLARE_WAITQUEUE(wait, tsk); | ||
63 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
64 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
65 | |||
66 | spin_lock_irq(&semaphore_lock); | ||
67 | sem->sleepers++; | ||
68 | for (;;) { | ||
69 | int sleepers = sem->sleepers; | ||
70 | |||
71 | /* | ||
72 | * Add "everybody else" into it. They aren't | ||
73 | * playing, because we own the spinlock. | ||
74 | */ | ||
75 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
76 | sem->sleepers = 0; | ||
77 | break; | ||
78 | } | ||
79 | sem->sleepers = 1; /* us - see -1 above */ | ||
80 | spin_unlock_irq(&semaphore_lock); | ||
81 | |||
82 | schedule(); | ||
83 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
84 | spin_lock_irq(&semaphore_lock); | ||
85 | } | ||
86 | spin_unlock_irq(&semaphore_lock); | ||
87 | remove_wait_queue(&sem->wait, &wait); | ||
88 | tsk->state = TASK_RUNNING; | ||
89 | wake_up(&sem->wait); | ||
90 | } | ||
91 | |||
92 | int __sched __down_interruptible(struct semaphore * sem) | ||
93 | { | ||
94 | int retval = 0; | ||
95 | struct task_struct *tsk = current; | ||
96 | DECLARE_WAITQUEUE(wait, tsk); | ||
97 | tsk->state = TASK_INTERRUPTIBLE; | ||
98 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
99 | |||
100 | spin_lock_irq(&semaphore_lock); | ||
101 | sem->sleepers ++; | ||
102 | for (;;) { | ||
103 | int sleepers = sem->sleepers; | ||
104 | |||
105 | /* | ||
106 | * With signals pending, this turns into | ||
107 | * the trylock failure case - we won't be | ||
108 | * sleeping, and we* can't get the lock as | ||
109 | * it has contention. Just correct the count | ||
110 | * and exit. | ||
111 | */ | ||
112 | if (signal_pending(current)) { | ||
113 | retval = -EINTR; | ||
114 | sem->sleepers = 0; | ||
115 | atomic_add(sleepers, &sem->count); | ||
116 | break; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Add "everybody else" into it. They aren't | ||
121 | * playing, because we own the spinlock. The | ||
122 | * "-1" is because we're still hoping to get | ||
123 | * the lock. | ||
124 | */ | ||
125 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
126 | sem->sleepers = 0; | ||
127 | break; | ||
128 | } | ||
129 | sem->sleepers = 1; /* us - see -1 above */ | ||
130 | spin_unlock_irq(&semaphore_lock); | ||
131 | |||
132 | schedule(); | ||
133 | tsk->state = TASK_INTERRUPTIBLE; | ||
134 | spin_lock_irq(&semaphore_lock); | ||
135 | } | ||
136 | spin_unlock_irq(&semaphore_lock); | ||
137 | tsk->state = TASK_RUNNING; | ||
138 | remove_wait_queue(&sem->wait, &wait); | ||
139 | wake_up(&sem->wait); | ||
140 | return retval; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Trylock failed - make sure we correct for | ||
145 | * having decremented the count. | ||
146 | * | ||
147 | * We could have done the trylock with a | ||
148 | * single "cmpxchg" without failure cases, | ||
149 | * but then it wouldn't work on a 386. | ||
150 | */ | ||
151 | int __down_trylock(struct semaphore * sem) | ||
152 | { | ||
153 | int sleepers; | ||
154 | unsigned long flags; | ||
155 | |||
156 | spin_lock_irqsave(&semaphore_lock, flags); | ||
157 | sleepers = sem->sleepers + 1; | ||
158 | sem->sleepers = 0; | ||
159 | |||
160 | /* | ||
161 | * Add "everybody else" and us into it. They aren't | ||
162 | * playing, because we own the spinlock. | ||
163 | */ | ||
164 | if (!atomic_add_negative(sleepers, &sem->count)) | ||
165 | wake_up(&sem->wait); | ||
166 | |||
167 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
168 | return 1; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * The semaphore operations have a special calling sequence that | ||
173 | * allow us to do a simpler in-line version of them. These routines | ||
174 | * need to convert that sequence back into the C sequence when | ||
175 | * there is contention on the semaphore. | ||
176 | * | ||
177 | * ip contains the semaphore pointer on entry. Save the C-clobbered | ||
178 | * registers (r0 to r3 and lr), but not ip, as we use it as a return | ||
179 | * value in some cases.. | ||
180 | * To remain AAPCS compliant (64-bit stack align) we save r4 as well. | ||
181 | */ | ||
182 | asm(" .section .sched.text,\"ax\",%progbits \n\ | ||
183 | .align 5 \n\ | ||
184 | .globl __down_failed \n\ | ||
185 | __down_failed: \n\ | ||
186 | stmfd sp!, {r0 - r4, lr} \n\ | ||
187 | mov r0, ip \n\ | ||
188 | bl __down \n\ | ||
189 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
190 | \n\ | ||
191 | .align 5 \n\ | ||
192 | .globl __down_interruptible_failed \n\ | ||
193 | __down_interruptible_failed: \n\ | ||
194 | stmfd sp!, {r0 - r4, lr} \n\ | ||
195 | mov r0, ip \n\ | ||
196 | bl __down_interruptible \n\ | ||
197 | mov ip, r0 \n\ | ||
198 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
199 | \n\ | ||
200 | .align 5 \n\ | ||
201 | .globl __down_trylock_failed \n\ | ||
202 | __down_trylock_failed: \n\ | ||
203 | stmfd sp!, {r0 - r4, lr} \n\ | ||
204 | mov r0, ip \n\ | ||
205 | bl __down_trylock \n\ | ||
206 | mov ip, r0 \n\ | ||
207 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
208 | \n\ | ||
209 | .align 5 \n\ | ||
210 | .globl __up_wakeup \n\ | ||
211 | __up_wakeup: \n\ | ||
212 | stmfd sp!, {r0 - r4, lr} \n\ | ||
213 | mov r0, ip \n\ | ||
214 | bl __up \n\ | ||
215 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
216 | "); | ||
217 | |||
218 | EXPORT_SYMBOL(__down_failed); | ||
219 | EXPORT_SYMBOL(__down_interruptible_failed); | ||
220 | EXPORT_SYMBOL(__down_trylock_failed); | ||
221 | EXPORT_SYMBOL(__up_wakeup); | ||
diff --git a/arch/avr32/kernel/Makefile b/arch/avr32/kernel/Makefile index e4b6d122b033..18229d0d1861 100644 --- a/arch/avr32/kernel/Makefile +++ b/arch/avr32/kernel/Makefile | |||
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds | |||
6 | 6 | ||
7 | obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o | 7 | obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o |
8 | obj-y += syscall_table.o syscall-stubs.o irq.o | 8 | obj-y += syscall_table.o syscall-stubs.o irq.o |
9 | obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o | 9 | obj-y += setup.o traps.o ocd.o ptrace.o |
10 | obj-y += signal.o sys_avr32.o process.o time.o | 10 | obj-y += signal.o sys_avr32.o process.o time.o |
11 | obj-y += init_task.o switch_to.o cpu.o | 11 | obj-y += init_task.o switch_to.o cpu.o |
12 | obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o | 12 | obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o |
diff --git a/arch/avr32/kernel/semaphore.c b/arch/avr32/kernel/semaphore.c deleted file mode 100644 index 1e2705a05016..000000000000 --- a/arch/avr32/kernel/semaphore.c +++ /dev/null | |||
@@ -1,148 +0,0 @@ | |||
1 | /* | ||
2 | * AVR32 sempahore implementation. | ||
3 | * | ||
4 | * Copyright (C) 2004-2006 Atmel Corporation | ||
5 | * | ||
6 | * Based on linux/arch/i386/kernel/semaphore.c | ||
7 | * Copyright (C) 1999 Linus Torvalds | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/sched.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include <asm/semaphore.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | /* | ||
22 | * Semaphores are implemented using a two-way counter: | ||
23 | * The "count" variable is decremented for each process | ||
24 | * that tries to acquire the semaphore, while the "sleeping" | ||
25 | * variable is a count of such acquires. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * "sleeping" and the contention routine ordering is protected | ||
33 | * by the spinlock in the semaphore's waitqueue head. | ||
34 | * | ||
35 | * Note that these functions are only called when there is | ||
36 | * contention on the lock, and as such all this is the | ||
37 | * "non-critical" part of the whole semaphore business. The | ||
38 | * critical part is the inline stuff in <asm/semaphore.h> | ||
39 | * where we want to avoid any extra jumps and calls. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * Logic: | ||
44 | * - only on a boundary condition do we need to care. When we go | ||
45 | * from a negative count to a non-negative, we wake people up. | ||
46 | * - when we go from a non-negative count to a negative do we | ||
47 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
48 | * that we're on the wakeup list before we synchronize so that | ||
49 | * we cannot lose wakeup events. | ||
50 | */ | ||
51 | |||
52 | void __up(struct semaphore *sem) | ||
53 | { | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | EXPORT_SYMBOL(__up); | ||
57 | |||
58 | void __sched __down(struct semaphore *sem) | ||
59 | { | ||
60 | struct task_struct *tsk = current; | ||
61 | DECLARE_WAITQUEUE(wait, tsk); | ||
62 | unsigned long flags; | ||
63 | |||
64 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
65 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
66 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
67 | |||
68 | sem->sleepers++; | ||
69 | for (;;) { | ||
70 | int sleepers = sem->sleepers; | ||
71 | |||
72 | /* | ||
73 | * Add "everybody else" into it. They aren't | ||
74 | * playing, because we own the spinlock in | ||
75 | * the wait_queue_head. | ||
76 | */ | ||
77 | if (atomic_add_return(sleepers - 1, &sem->count) >= 0) { | ||
78 | sem->sleepers = 0; | ||
79 | break; | ||
80 | } | ||
81 | sem->sleepers = 1; /* us - see -1 above */ | ||
82 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
83 | |||
84 | schedule(); | ||
85 | |||
86 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
87 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
88 | } | ||
89 | remove_wait_queue_locked(&sem->wait, &wait); | ||
90 | wake_up_locked(&sem->wait); | ||
91 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
92 | tsk->state = TASK_RUNNING; | ||
93 | } | ||
94 | EXPORT_SYMBOL(__down); | ||
95 | |||
96 | int __sched __down_interruptible(struct semaphore *sem) | ||
97 | { | ||
98 | int retval = 0; | ||
99 | struct task_struct *tsk = current; | ||
100 | DECLARE_WAITQUEUE(wait, tsk); | ||
101 | unsigned long flags; | ||
102 | |||
103 | tsk->state = TASK_INTERRUPTIBLE; | ||
104 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
105 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
106 | |||
107 | sem->sleepers++; | ||
108 | for (;;) { | ||
109 | int sleepers = sem->sleepers; | ||
110 | |||
111 | /* | ||
112 | * With signals pending, this turns into the trylock | ||
113 | * failure case - we won't be sleeping, and we can't | ||
114 | * get the lock as it has contention. Just correct the | ||
115 | * count and exit. | ||
116 | */ | ||
117 | if (signal_pending(current)) { | ||
118 | retval = -EINTR; | ||
119 | sem->sleepers = 0; | ||
120 | atomic_add(sleepers, &sem->count); | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Add "everybody else" into it. They aren't | ||
126 | * playing, because we own the spinlock in | ||
127 | * the wait_queue_head. | ||
128 | */ | ||
129 | if (atomic_add_return(sleepers - 1, &sem->count) >= 0) { | ||
130 | sem->sleepers = 0; | ||
131 | break; | ||
132 | } | ||
133 | sem->sleepers = 1; /* us - see -1 above */ | ||
134 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
135 | |||
136 | schedule(); | ||
137 | |||
138 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
139 | tsk->state = TASK_INTERRUPTIBLE; | ||
140 | } | ||
141 | remove_wait_queue_locked(&sem->wait, &wait); | ||
142 | wake_up_locked(&sem->wait); | ||
143 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
144 | |||
145 | tsk->state = TASK_RUNNING; | ||
146 | return retval; | ||
147 | } | ||
148 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 589c6aca4803..2dd1f300a5cf 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -31,10 +31,6 @@ config ZONE_DMA | |||
31 | bool | 31 | bool |
32 | default y | 32 | default y |
33 | 33 | ||
34 | config SEMAPHORE_SLEEPERS | ||
35 | bool | ||
36 | default y | ||
37 | |||
38 | config GENERIC_FIND_NEXT_BIT | 34 | config GENERIC_FIND_NEXT_BIT |
39 | bool | 35 | bool |
40 | default y | 36 | default y |
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index 0bfbb269e350..053edff6c0d8 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c | |||
@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum); | |||
42 | 42 | ||
43 | EXPORT_SYMBOL(kernel_thread); | 43 | EXPORT_SYMBOL(kernel_thread); |
44 | 44 | ||
45 | EXPORT_SYMBOL(__up); | ||
46 | EXPORT_SYMBOL(__down); | ||
47 | EXPORT_SYMBOL(__down_trylock); | ||
48 | EXPORT_SYMBOL(__down_interruptible); | ||
49 | |||
50 | EXPORT_SYMBOL(is_in_rom); | 45 | EXPORT_SYMBOL(is_in_rom); |
51 | EXPORT_SYMBOL(bfin_return_from_exception); | 46 | EXPORT_SYMBOL(bfin_return_from_exception); |
52 | 47 | ||
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile index c8e8ea570989..ee7bcd4d20b2 100644 --- a/arch/cris/kernel/Makefile +++ b/arch/cris/kernel/Makefile | |||
@@ -5,8 +5,7 @@ | |||
5 | 5 | ||
6 | extra-y := vmlinux.lds | 6 | extra-y := vmlinux.lds |
7 | 7 | ||
8 | obj-y := process.o traps.o irq.o ptrace.o setup.o \ | 8 | obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o |
9 | time.o sys_cris.o semaphore.o | ||
10 | 9 | ||
11 | obj-$(CONFIG_MODULES) += crisksyms.o | 10 | obj-$(CONFIG_MODULES) += crisksyms.o |
12 | obj-$(CONFIG_MODULES) += module.o | 11 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c index 62f0e752915a..7ac000f6a888 100644 --- a/arch/cris/kernel/crisksyms.c +++ b/arch/cris/kernel/crisksyms.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <linux/tty.h> | 10 | #include <linux/tty.h> |
11 | 11 | ||
12 | #include <asm/semaphore.h> | ||
13 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | #include <asm/checksum.h> | 14 | #include <asm/checksum.h> |
@@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2); | |||
49 | EXPORT_SYMBOL(__ioremap); | 48 | EXPORT_SYMBOL(__ioremap); |
50 | EXPORT_SYMBOL(iounmap); | 49 | EXPORT_SYMBOL(iounmap); |
51 | 50 | ||
52 | /* Semaphore functions */ | ||
53 | EXPORT_SYMBOL(__up); | ||
54 | EXPORT_SYMBOL(__down); | ||
55 | EXPORT_SYMBOL(__down_interruptible); | ||
56 | EXPORT_SYMBOL(__down_trylock); | ||
57 | |||
58 | /* Userspace access functions */ | 51 | /* Userspace access functions */ |
59 | EXPORT_SYMBOL(__copy_user_zeroing); | 52 | EXPORT_SYMBOL(__copy_user_zeroing); |
60 | EXPORT_SYMBOL(__copy_user); | 53 | EXPORT_SYMBOL(__copy_user); |
diff --git a/arch/cris/kernel/semaphore.c b/arch/cris/kernel/semaphore.c deleted file mode 100644 index f137a439041f..000000000000 --- a/arch/cris/kernel/semaphore.c +++ /dev/null | |||
@@ -1,129 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <asm/semaphore-helper.h> | ||
8 | |||
9 | /* | ||
10 | * Semaphores are implemented using a two-way counter: | ||
11 | * The "count" variable is decremented for each process | ||
12 | * that tries to sleep, while the "waking" variable is | ||
13 | * incremented when the "up()" code goes to wake up waiting | ||
14 | * processes. | ||
15 | * | ||
16 | * Notably, the inline "up()" and "down()" functions can | ||
17 | * efficiently test if they need to do any extra work (up | ||
18 | * needs to do something only if count was negative before | ||
19 | * the increment operation. | ||
20 | * | ||
21 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
22 | * atomically. | ||
23 | * | ||
24 | * When __up() is called, the count was negative before | ||
25 | * incrementing it, and we need to wake up somebody. | ||
26 | * | ||
27 | * This routine adds one to the count of processes that need to | ||
28 | * wake up and exit. ALL waiting processes actually wake up but | ||
29 | * only the one that gets to the "waking" field first will gate | ||
30 | * through and acquire the semaphore. The others will go back | ||
31 | * to sleep. | ||
32 | * | ||
33 | * Note that these functions are only called when there is | ||
34 | * contention on the lock, and as such all this is the | ||
35 | * "non-critical" part of the whole semaphore business. The | ||
36 | * critical part is the inline stuff in <asm/semaphore.h> | ||
37 | * where we want to avoid any extra jumps and calls. | ||
38 | */ | ||
39 | void __up(struct semaphore *sem) | ||
40 | { | ||
41 | wake_one_more(sem); | ||
42 | wake_up(&sem->wait); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Perform the "down" function. Return zero for semaphore acquired, | ||
47 | * return negative for signalled out of the function. | ||
48 | * | ||
49 | * If called from __down, the return is ignored and the wait loop is | ||
50 | * not interruptible. This means that a task waiting on a semaphore | ||
51 | * using "down()" cannot be killed until someone does an "up()" on | ||
52 | * the semaphore. | ||
53 | * | ||
54 | * If called from __down_interruptible, the return value gets checked | ||
55 | * upon return. If the return value is negative then the task continues | ||
56 | * with the negative value in the return register (it can be tested by | ||
57 | * the caller). | ||
58 | * | ||
59 | * Either form may be used in conjunction with "up()". | ||
60 | * | ||
61 | */ | ||
62 | |||
63 | #define DOWN_VAR \ | ||
64 | struct task_struct *tsk = current; \ | ||
65 | wait_queue_t wait; \ | ||
66 | init_waitqueue_entry(&wait, tsk); | ||
67 | |||
68 | #define DOWN_HEAD(task_state) \ | ||
69 | \ | ||
70 | \ | ||
71 | tsk->state = (task_state); \ | ||
72 | add_wait_queue(&sem->wait, &wait); \ | ||
73 | \ | ||
74 | /* \ | ||
75 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
76 | * so we must wait. \ | ||
77 | * \ | ||
78 | * We can let go the lock for purposes of waiting. \ | ||
79 | * We re-acquire it after awaking so as to protect \ | ||
80 | * all semaphore operations. \ | ||
81 | * \ | ||
82 | * If "up()" is called before we call waking_non_zero() then \ | ||
83 | * we will catch it right away. If it is called later then \ | ||
84 | * we will have to go through a wakeup cycle to catch it. \ | ||
85 | * \ | ||
86 | * Multiple waiters contend for the semaphore lock to see \ | ||
87 | * who gets to gate through and who has to wait some more. \ | ||
88 | */ \ | ||
89 | for (;;) { | ||
90 | |||
91 | #define DOWN_TAIL(task_state) \ | ||
92 | tsk->state = (task_state); \ | ||
93 | } \ | ||
94 | tsk->state = TASK_RUNNING; \ | ||
95 | remove_wait_queue(&sem->wait, &wait); | ||
96 | |||
97 | void __sched __down(struct semaphore * sem) | ||
98 | { | ||
99 | DOWN_VAR | ||
100 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
101 | if (waking_non_zero(sem)) | ||
102 | break; | ||
103 | schedule(); | ||
104 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
105 | } | ||
106 | |||
107 | int __sched __down_interruptible(struct semaphore * sem) | ||
108 | { | ||
109 | int ret = 0; | ||
110 | DOWN_VAR | ||
111 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
112 | |||
113 | ret = waking_non_zero_interruptible(sem, tsk); | ||
114 | if (ret) | ||
115 | { | ||
116 | if (ret == 1) | ||
117 | /* ret != 0 only if we get interrupted -arca */ | ||
118 | ret = 0; | ||
119 | break; | ||
120 | } | ||
121 | schedule(); | ||
122 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | int __down_trylock(struct semaphore * sem) | ||
127 | { | ||
128 | return waking_non_zero_trylock(sem); | ||
129 | } | ||
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile index e8f73ed28b52..c36f70b6699a 100644 --- a/arch/frv/kernel/Makefile +++ b/arch/frv/kernel/Makefile | |||
@@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds | |||
9 | 9 | ||
10 | obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ | 10 | obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ |
11 | kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \ | 11 | kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \ |
12 | sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \ | 12 | sys_frv.o time.o setup.o frv_ksyms.o \ |
13 | debug-stub.o irq.o sleep.o uaccess.o | 13 | debug-stub.o irq.o sleep.o uaccess.o |
14 | 14 | ||
15 | obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o | 15 | obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o |
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c index f772704b3d28..0316b3c50eff 100644 --- a/arch/frv/kernel/frv_ksyms.c +++ b/arch/frv/kernel/frv_ksyms.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
13 | #include <asm/irq.h> | 13 | #include <asm/irq.h> |
14 | #include <asm/io.h> | 14 | #include <asm/io.h> |
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/checksum.h> | 15 | #include <asm/checksum.h> |
17 | #include <asm/hardirq.h> | 16 | #include <asm/hardirq.h> |
18 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c deleted file mode 100644 index 7ee3a147b471..000000000000 --- a/arch/frv/kernel/semaphore.c +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | /* semaphore.c: FR-V semaphores | ||
2 | * | ||
3 | * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * - Derived from lib/rwsem-spinlock.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/semaphore.h> | ||
16 | |||
17 | struct sem_waiter { | ||
18 | struct list_head list; | ||
19 | struct task_struct *task; | ||
20 | }; | ||
21 | |||
22 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
23 | void semtrace(struct semaphore *sem, const char *str) | ||
24 | { | ||
25 | if (sem->debug) | ||
26 | printk("[%d] %s({%d,%d})\n", | ||
27 | current->pid, | ||
28 | str, | ||
29 | sem->counter, | ||
30 | list_empty(&sem->wait_list) ? 0 : 1); | ||
31 | } | ||
32 | #else | ||
33 | #define semtrace(SEM,STR) do { } while(0) | ||
34 | #endif | ||
35 | |||
36 | /* | ||
37 | * wait for a token to be granted from a semaphore | ||
38 | * - entered with lock held and interrupts disabled | ||
39 | */ | ||
40 | void __down(struct semaphore *sem, unsigned long flags) | ||
41 | { | ||
42 | struct task_struct *tsk = current; | ||
43 | struct sem_waiter waiter; | ||
44 | |||
45 | semtrace(sem, "Entering __down"); | ||
46 | |||
47 | /* set up my own style of waitqueue */ | ||
48 | waiter.task = tsk; | ||
49 | get_task_struct(tsk); | ||
50 | |||
51 | list_add_tail(&waiter.list, &sem->wait_list); | ||
52 | |||
53 | /* we don't need to touch the semaphore struct anymore */ | ||
54 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
55 | |||
56 | /* wait to be given the semaphore */ | ||
57 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
58 | |||
59 | for (;;) { | ||
60 | if (list_empty(&waiter.list)) | ||
61 | break; | ||
62 | schedule(); | ||
63 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
64 | } | ||
65 | |||
66 | tsk->state = TASK_RUNNING; | ||
67 | semtrace(sem, "Leaving __down"); | ||
68 | } | ||
69 | |||
70 | EXPORT_SYMBOL(__down); | ||
71 | |||
72 | /* | ||
73 | * interruptibly wait for a token to be granted from a semaphore | ||
74 | * - entered with lock held and interrupts disabled | ||
75 | */ | ||
76 | int __down_interruptible(struct semaphore *sem, unsigned long flags) | ||
77 | { | ||
78 | struct task_struct *tsk = current; | ||
79 | struct sem_waiter waiter; | ||
80 | int ret; | ||
81 | |||
82 | semtrace(sem,"Entering __down_interruptible"); | ||
83 | |||
84 | /* set up my own style of waitqueue */ | ||
85 | waiter.task = tsk; | ||
86 | get_task_struct(tsk); | ||
87 | |||
88 | list_add_tail(&waiter.list, &sem->wait_list); | ||
89 | |||
90 | /* we don't need to touch the semaphore struct anymore */ | ||
91 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
92 | |||
93 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
94 | |||
95 | /* wait to be given the semaphore */ | ||
96 | ret = 0; | ||
97 | for (;;) { | ||
98 | if (list_empty(&waiter.list)) | ||
99 | break; | ||
100 | if (unlikely(signal_pending(current))) | ||
101 | goto interrupted; | ||
102 | schedule(); | ||
103 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
104 | } | ||
105 | |||
106 | out: | ||
107 | tsk->state = TASK_RUNNING; | ||
108 | semtrace(sem, "Leaving __down_interruptible"); | ||
109 | return ret; | ||
110 | |||
111 | interrupted: | ||
112 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
113 | |||
114 | if (!list_empty(&waiter.list)) { | ||
115 | list_del(&waiter.list); | ||
116 | ret = -EINTR; | ||
117 | } | ||
118 | |||
119 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
120 | if (ret == -EINTR) | ||
121 | put_task_struct(current); | ||
122 | goto out; | ||
123 | } | ||
124 | |||
125 | EXPORT_SYMBOL(__down_interruptible); | ||
126 | |||
127 | /* | ||
128 | * release a single token back to a semaphore | ||
129 | * - entered with lock held and interrupts disabled | ||
130 | */ | ||
131 | void __up(struct semaphore *sem) | ||
132 | { | ||
133 | struct task_struct *tsk; | ||
134 | struct sem_waiter *waiter; | ||
135 | |||
136 | semtrace(sem,"Entering __up"); | ||
137 | |||
138 | /* grant the token to the process at the front of the queue */ | ||
139 | waiter = list_entry(sem->wait_list.next, struct sem_waiter, list); | ||
140 | |||
141 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | ||
142 | * It is allocated on the waiter's stack and may become invalid at | ||
143 | * any time after that point (due to a wakeup from another source). | ||
144 | */ | ||
145 | list_del_init(&waiter->list); | ||
146 | tsk = waiter->task; | ||
147 | mb(); | ||
148 | waiter->task = NULL; | ||
149 | wake_up_process(tsk); | ||
150 | put_task_struct(tsk); | ||
151 | |||
152 | semtrace(sem,"Leaving __up"); | ||
153 | } | ||
154 | |||
155 | EXPORT_SYMBOL(__up); | ||
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile index 874f6aefee65..6c248c3c5c3b 100644 --- a/arch/h8300/kernel/Makefile +++ b/arch/h8300/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := vmlinux.lds | 5 | extra-y := vmlinux.lds |
6 | 6 | ||
7 | obj-y := process.o traps.o ptrace.o irq.o \ | 7 | obj-y := process.o traps.o ptrace.o irq.o \ |
8 | sys_h8300.o time.o semaphore.o signal.o \ | 8 | sys_h8300.o time.o signal.o \ |
9 | setup.o gpio.o init_task.o syscalls.o \ | 9 | setup.o gpio.o init_task.o syscalls.o \ |
10 | entry.o | 10 | entry.o |
11 | 11 | ||
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c index d1b15267ac81..6866bd9c7fb4 100644 --- a/arch/h8300/kernel/h8300_ksyms.c +++ b/arch/h8300/kernel/h8300_ksyms.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
13 | #include <asm/irq.h> | 13 | #include <asm/irq.h> |
14 | #include <asm/io.h> | 14 | #include <asm/io.h> |
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/checksum.h> | 15 | #include <asm/checksum.h> |
17 | #include <asm/current.h> | 16 | #include <asm/current.h> |
18 | #include <asm/gpio.h> | 17 | #include <asm/gpio.h> |
diff --git a/arch/h8300/kernel/semaphore.c b/arch/h8300/kernel/semaphore.c deleted file mode 100644 index d12cbbfe6ebd..000000000000 --- a/arch/h8300/kernel/semaphore.c +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <asm/semaphore-helper.h> | ||
9 | |||
10 | #ifndef CONFIG_RMW_INSNS | ||
11 | spinlock_t semaphore_wake_lock; | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Semaphores are implemented using a two-way counter: | ||
16 | * The "count" variable is decremented for each process | ||
17 | * that tries to sleep, while the "waking" variable is | ||
18 | * incremented when the "up()" code goes to wake up waiting | ||
19 | * processes. | ||
20 | * | ||
21 | * Notably, the inline "up()" and "down()" functions can | ||
22 | * efficiently test if they need to do any extra work (up | ||
23 | * needs to do something only if count was negative before | ||
24 | * the increment operation. | ||
25 | * | ||
26 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
27 | * atomically. | ||
28 | * | ||
29 | * When __up() is called, the count was negative before | ||
30 | * incrementing it, and we need to wake up somebody. | ||
31 | * | ||
32 | * This routine adds one to the count of processes that need to | ||
33 | * wake up and exit. ALL waiting processes actually wake up but | ||
34 | * only the one that gets to the "waking" field first will gate | ||
35 | * through and acquire the semaphore. The others will go back | ||
36 | * to sleep. | ||
37 | * | ||
38 | * Note that these functions are only called when there is | ||
39 | * contention on the lock, and as such all this is the | ||
40 | * "non-critical" part of the whole semaphore business. The | ||
41 | * critical part is the inline stuff in <asm/semaphore.h> | ||
42 | * where we want to avoid any extra jumps and calls. | ||
43 | */ | ||
44 | void __up(struct semaphore *sem) | ||
45 | { | ||
46 | wake_one_more(sem); | ||
47 | wake_up(&sem->wait); | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Perform the "down" function. Return zero for semaphore acquired, | ||
52 | * return negative for signalled out of the function. | ||
53 | * | ||
54 | * If called from __down, the return is ignored and the wait loop is | ||
55 | * not interruptible. This means that a task waiting on a semaphore | ||
56 | * using "down()" cannot be killed until someone does an "up()" on | ||
57 | * the semaphore. | ||
58 | * | ||
59 | * If called from __down_interruptible, the return value gets checked | ||
60 | * upon return. If the return value is negative then the task continues | ||
61 | * with the negative value in the return register (it can be tested by | ||
62 | * the caller). | ||
63 | * | ||
64 | * Either form may be used in conjunction with "up()". | ||
65 | * | ||
66 | */ | ||
67 | |||
68 | |||
69 | #define DOWN_HEAD(task_state) \ | ||
70 | \ | ||
71 | \ | ||
72 | current->state = (task_state); \ | ||
73 | add_wait_queue(&sem->wait, &wait); \ | ||
74 | \ | ||
75 | /* \ | ||
76 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
77 | * so we must wait. \ | ||
78 | * \ | ||
79 | * We can let go the lock for purposes of waiting. \ | ||
80 | * We re-acquire it after awaking so as to protect \ | ||
81 | * all semaphore operations. \ | ||
82 | * \ | ||
83 | * If "up()" is called before we call waking_non_zero() then \ | ||
84 | * we will catch it right away. If it is called later then \ | ||
85 | * we will have to go through a wakeup cycle to catch it. \ | ||
86 | * \ | ||
87 | * Multiple waiters contend for the semaphore lock to see \ | ||
88 | * who gets to gate through and who has to wait some more. \ | ||
89 | */ \ | ||
90 | for (;;) { | ||
91 | |||
92 | #define DOWN_TAIL(task_state) \ | ||
93 | current->state = (task_state); \ | ||
94 | } \ | ||
95 | current->state = TASK_RUNNING; \ | ||
96 | remove_wait_queue(&sem->wait, &wait); | ||
97 | |||
98 | void __sched __down(struct semaphore * sem) | ||
99 | { | ||
100 | DECLARE_WAITQUEUE(wait, current); | ||
101 | |||
102 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
103 | if (waking_non_zero(sem)) | ||
104 | break; | ||
105 | schedule(); | ||
106 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
107 | } | ||
108 | |||
109 | int __sched __down_interruptible(struct semaphore * sem) | ||
110 | { | ||
111 | DECLARE_WAITQUEUE(wait, current); | ||
112 | int ret = 0; | ||
113 | |||
114 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
115 | |||
116 | ret = waking_non_zero_interruptible(sem, current); | ||
117 | if (ret) | ||
118 | { | ||
119 | if (ret == 1) | ||
120 | /* ret != 0 only if we get interrupted -arca */ | ||
121 | ret = 0; | ||
122 | break; | ||
123 | } | ||
124 | schedule(); | ||
125 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | int __down_trylock(struct semaphore * sem) | ||
130 | { | ||
131 | return waking_non_zero_trylock(sem); | ||
132 | } | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 33e5a598672d..13fd10e8699e 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
6 | 6 | ||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ |
9 | salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
10 | unwind.o mca.o mca_asm.o topology.o | 10 | unwind.o mca.o mca_asm.o topology.o |
11 | 11 | ||
12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o | 12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 8e7193d55528..6da1f20d7372 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page); | |||
19 | EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ | 19 | EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ |
20 | EXPORT_SYMBOL(csum_ipv6_magic); | 20 | EXPORT_SYMBOL(csum_ipv6_magic); |
21 | 21 | ||
22 | #include <asm/semaphore.h> | ||
23 | EXPORT_SYMBOL(__down); | ||
24 | EXPORT_SYMBOL(__down_interruptible); | ||
25 | EXPORT_SYMBOL(__down_trylock); | ||
26 | EXPORT_SYMBOL(__up); | ||
27 | |||
28 | #include <asm/page.h> | 22 | #include <asm/page.h> |
29 | EXPORT_SYMBOL(clear_page); | 23 | EXPORT_SYMBOL(clear_page); |
30 | 24 | ||
diff --git a/arch/ia64/kernel/semaphore.c b/arch/ia64/kernel/semaphore.c deleted file mode 100644 index 2724ef3fbae2..000000000000 --- a/arch/ia64/kernel/semaphore.c +++ /dev/null | |||
@@ -1,165 +0,0 @@ | |||
1 | /* | ||
2 | * IA-64 semaphore implementation (derived from x86 version). | ||
3 | * | ||
4 | * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * Semaphores are implemented using a two-way counter: The "count" | ||
10 | * variable is decremented for each process that tries to acquire the | ||
11 | * semaphore, while the "sleepers" variable is a count of such | ||
12 | * acquires. | ||
13 | * | ||
14 | * Notably, the inline "up()" and "down()" functions can efficiently | ||
15 | * test if they need to do any extra work (up needs to do something | ||
16 | * only if count was negative before the increment operation. | ||
17 | * | ||
18 | * "sleeping" and the contention routine ordering is protected | ||
19 | * by the spinlock in the semaphore's waitqueue head. | ||
20 | * | ||
21 | * Note that these functions are only called when there is contention | ||
22 | * on the lock, and as such all this is the "non-critical" part of the | ||
23 | * whole semaphore business. The critical part is the inline stuff in | ||
24 | * <asm/semaphore.h> where we want to avoid any extra jumps and calls. | ||
25 | */ | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/init.h> | ||
28 | |||
29 | #include <asm/errno.h> | ||
30 | #include <asm/semaphore.h> | ||
31 | |||
32 | /* | ||
33 | * Logic: | ||
34 | * - Only on a boundary condition do we need to care. When we go | ||
35 | * from a negative count to a non-negative, we wake people up. | ||
36 | * - When we go from a non-negative count to a negative do we | ||
37 | * (a) synchronize with the "sleepers" count and (b) make sure | ||
38 | * that we're on the wakeup list before we synchronize so that | ||
39 | * we cannot lose wakeup events. | ||
40 | */ | ||
41 | |||
42 | void | ||
43 | __up (struct semaphore *sem) | ||
44 | { | ||
45 | wake_up(&sem->wait); | ||
46 | } | ||
47 | |||
48 | void __sched __down (struct semaphore *sem) | ||
49 | { | ||
50 | struct task_struct *tsk = current; | ||
51 | DECLARE_WAITQUEUE(wait, tsk); | ||
52 | unsigned long flags; | ||
53 | |||
54 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
55 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
56 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
57 | |||
58 | sem->sleepers++; | ||
59 | for (;;) { | ||
60 | int sleepers = sem->sleepers; | ||
61 | |||
62 | /* | ||
63 | * Add "everybody else" into it. They aren't | ||
64 | * playing, because we own the spinlock in | ||
65 | * the wait_queue_head. | ||
66 | */ | ||
67 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
68 | sem->sleepers = 0; | ||
69 | break; | ||
70 | } | ||
71 | sem->sleepers = 1; /* us - see -1 above */ | ||
72 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
73 | |||
74 | schedule(); | ||
75 | |||
76 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
77 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
78 | } | ||
79 | remove_wait_queue_locked(&sem->wait, &wait); | ||
80 | wake_up_locked(&sem->wait); | ||
81 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
82 | tsk->state = TASK_RUNNING; | ||
83 | } | ||
84 | |||
85 | int __sched __down_interruptible (struct semaphore * sem) | ||
86 | { | ||
87 | int retval = 0; | ||
88 | struct task_struct *tsk = current; | ||
89 | DECLARE_WAITQUEUE(wait, tsk); | ||
90 | unsigned long flags; | ||
91 | |||
92 | tsk->state = TASK_INTERRUPTIBLE; | ||
93 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
94 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
95 | |||
96 | sem->sleepers ++; | ||
97 | for (;;) { | ||
98 | int sleepers = sem->sleepers; | ||
99 | |||
100 | /* | ||
101 | * With signals pending, this turns into | ||
102 | * the trylock failure case - we won't be | ||
103 | * sleeping, and we* can't get the lock as | ||
104 | * it has contention. Just correct the count | ||
105 | * and exit. | ||
106 | */ | ||
107 | if (signal_pending(current)) { | ||
108 | retval = -EINTR; | ||
109 | sem->sleepers = 0; | ||
110 | atomic_add(sleepers, &sem->count); | ||
111 | break; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Add "everybody else" into it. They aren't | ||
116 | * playing, because we own the spinlock in | ||
117 | * wait_queue_head. The "-1" is because we're | ||
118 | * still hoping to get the semaphore. | ||
119 | */ | ||
120 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
121 | sem->sleepers = 0; | ||
122 | break; | ||
123 | } | ||
124 | sem->sleepers = 1; /* us - see -1 above */ | ||
125 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
126 | |||
127 | schedule(); | ||
128 | |||
129 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
130 | tsk->state = TASK_INTERRUPTIBLE; | ||
131 | } | ||
132 | remove_wait_queue_locked(&sem->wait, &wait); | ||
133 | wake_up_locked(&sem->wait); | ||
134 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
135 | |||
136 | tsk->state = TASK_RUNNING; | ||
137 | return retval; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Trylock failed - make sure we correct for having decremented the | ||
142 | * count. | ||
143 | */ | ||
144 | int | ||
145 | __down_trylock (struct semaphore *sem) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | int sleepers; | ||
149 | |||
150 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
151 | sleepers = sem->sleepers + 1; | ||
152 | sem->sleepers = 0; | ||
153 | |||
154 | /* | ||
155 | * Add "everybody else" and us into it. They aren't | ||
156 | * playing, because we own the spinlock in the | ||
157 | * wait_queue_head. | ||
158 | */ | ||
159 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
160 | wake_up_locked(&sem->wait); | ||
161 | } | ||
162 | |||
163 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
164 | return 1; | ||
165 | } | ||
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile index e97e26e87c9e..09200d4886e3 100644 --- a/arch/m32r/kernel/Makefile +++ b/arch/m32r/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ | 7 | obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ |
8 | m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o | 8 | m32r_ksyms.o sys_m32r.o signal.o ptrace.o |
9 | 9 | ||
10 | obj-$(CONFIG_SMP) += smp.o smpboot.o | 10 | obj-$(CONFIG_SMP) += smp.o smpboot.o |
11 | obj-$(CONFIG_MODULES) += module.o | 11 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index 41a4c95e06d6..e6709fe950ba 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
8 | #include <linux/string.h> | 8 | #include <linux/string.h> |
9 | 9 | ||
10 | #include <asm/semaphore.h> | ||
11 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
12 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
13 | #include <asm/checksum.h> | 12 | #include <asm/checksum.h> |
@@ -22,10 +21,6 @@ EXPORT_SYMBOL(dump_fpu); | |||
22 | EXPORT_SYMBOL(__ioremap); | 21 | EXPORT_SYMBOL(__ioremap); |
23 | EXPORT_SYMBOL(iounmap); | 22 | EXPORT_SYMBOL(iounmap); |
24 | EXPORT_SYMBOL(kernel_thread); | 23 | EXPORT_SYMBOL(kernel_thread); |
25 | EXPORT_SYMBOL(__down); | ||
26 | EXPORT_SYMBOL(__down_interruptible); | ||
27 | EXPORT_SYMBOL(__up); | ||
28 | EXPORT_SYMBOL(__down_trylock); | ||
29 | 24 | ||
30 | /* Networking helper routines. */ | 25 | /* Networking helper routines. */ |
31 | /* Delay loops */ | 26 | /* Delay loops */ |
diff --git a/arch/m32r/kernel/semaphore.c b/arch/m32r/kernel/semaphore.c deleted file mode 100644 index 940c2d37cfd1..000000000000 --- a/arch/m32r/kernel/semaphore.c +++ /dev/null | |||
@@ -1,185 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/semaphore.c | ||
3 | * orig : i386 2.6.4 | ||
4 | * | ||
5 | * M32R semaphore implementation. | ||
6 | * | ||
7 | * Copyright (c) 2002 - 2004 Hitoshi Yamamoto | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * i386 semaphore implementation. | ||
12 | * | ||
13 | * (C) Copyright 1999 Linus Torvalds | ||
14 | * | ||
15 | * Portions Copyright 1999 Red Hat, Inc. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | * | ||
22 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
23 | */ | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <asm/semaphore.h> | ||
28 | |||
29 | /* | ||
30 | * Semaphores are implemented using a two-way counter: | ||
31 | * The "count" variable is decremented for each process | ||
32 | * that tries to acquire the semaphore, while the "sleeping" | ||
33 | * variable is a count of such acquires. | ||
34 | * | ||
35 | * Notably, the inline "up()" and "down()" functions can | ||
36 | * efficiently test if they need to do any extra work (up | ||
37 | * needs to do something only if count was negative before | ||
38 | * the increment operation. | ||
39 | * | ||
40 | * "sleeping" and the contention routine ordering is protected | ||
41 | * by the spinlock in the semaphore's waitqueue head. | ||
42 | * | ||
43 | * Note that these functions are only called when there is | ||
44 | * contention on the lock, and as such all this is the | ||
45 | * "non-critical" part of the whole semaphore business. The | ||
46 | * critical part is the inline stuff in <asm/semaphore.h> | ||
47 | * where we want to avoid any extra jumps and calls. | ||
48 | */ | ||
49 | |||
50 | /* | ||
51 | * Logic: | ||
52 | * - only on a boundary condition do we need to care. When we go | ||
53 | * from a negative count to a non-negative, we wake people up. | ||
54 | * - when we go from a non-negative count to a negative do we | ||
55 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
56 | * that we're on the wakeup list before we synchronize so that | ||
57 | * we cannot lose wakeup events. | ||
58 | */ | ||
59 | |||
60 | asmlinkage void __up(struct semaphore *sem) | ||
61 | { | ||
62 | wake_up(&sem->wait); | ||
63 | } | ||
64 | |||
65 | asmlinkage void __sched __down(struct semaphore * sem) | ||
66 | { | ||
67 | struct task_struct *tsk = current; | ||
68 | DECLARE_WAITQUEUE(wait, tsk); | ||
69 | unsigned long flags; | ||
70 | |||
71 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
72 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
73 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
74 | |||
75 | sem->sleepers++; | ||
76 | for (;;) { | ||
77 | int sleepers = sem->sleepers; | ||
78 | |||
79 | /* | ||
80 | * Add "everybody else" into it. They aren't | ||
81 | * playing, because we own the spinlock in | ||
82 | * the wait_queue_head. | ||
83 | */ | ||
84 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
85 | sem->sleepers = 0; | ||
86 | break; | ||
87 | } | ||
88 | sem->sleepers = 1; /* us - see -1 above */ | ||
89 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
90 | |||
91 | schedule(); | ||
92 | |||
93 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
94 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
95 | } | ||
96 | remove_wait_queue_locked(&sem->wait, &wait); | ||
97 | wake_up_locked(&sem->wait); | ||
98 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
99 | tsk->state = TASK_RUNNING; | ||
100 | } | ||
101 | |||
102 | asmlinkage int __sched __down_interruptible(struct semaphore * sem) | ||
103 | { | ||
104 | int retval = 0; | ||
105 | struct task_struct *tsk = current; | ||
106 | DECLARE_WAITQUEUE(wait, tsk); | ||
107 | unsigned long flags; | ||
108 | |||
109 | tsk->state = TASK_INTERRUPTIBLE; | ||
110 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
111 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
112 | |||
113 | sem->sleepers++; | ||
114 | for (;;) { | ||
115 | int sleepers = sem->sleepers; | ||
116 | |||
117 | /* | ||
118 | * With signals pending, this turns into | ||
119 | * the trylock failure case - we won't be | ||
120 | * sleeping, and we* can't get the lock as | ||
121 | * it has contention. Just correct the count | ||
122 | * and exit. | ||
123 | */ | ||
124 | if (signal_pending(current)) { | ||
125 | retval = -EINTR; | ||
126 | sem->sleepers = 0; | ||
127 | atomic_add(sleepers, &sem->count); | ||
128 | break; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Add "everybody else" into it. They aren't | ||
133 | * playing, because we own the spinlock in | ||
134 | * wait_queue_head. The "-1" is because we're | ||
135 | * still hoping to get the semaphore. | ||
136 | */ | ||
137 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
138 | sem->sleepers = 0; | ||
139 | break; | ||
140 | } | ||
141 | sem->sleepers = 1; /* us - see -1 above */ | ||
142 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
143 | |||
144 | schedule(); | ||
145 | |||
146 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
147 | tsk->state = TASK_INTERRUPTIBLE; | ||
148 | } | ||
149 | remove_wait_queue_locked(&sem->wait, &wait); | ||
150 | wake_up_locked(&sem->wait); | ||
151 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
152 | |||
153 | tsk->state = TASK_RUNNING; | ||
154 | return retval; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Trylock failed - make sure we correct for | ||
159 | * having decremented the count. | ||
160 | * | ||
161 | * We could have done the trylock with a | ||
162 | * single "cmpxchg" without failure cases, | ||
163 | * but then it wouldn't work on a 386. | ||
164 | */ | ||
165 | asmlinkage int __down_trylock(struct semaphore * sem) | ||
166 | { | ||
167 | int sleepers; | ||
168 | unsigned long flags; | ||
169 | |||
170 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
171 | sleepers = sem->sleepers + 1; | ||
172 | sem->sleepers = 0; | ||
173 | |||
174 | /* | ||
175 | * Add "everybody else" and us into it. They aren't | ||
176 | * playing, because we own the spinlock in the | ||
177 | * wait_queue_head. | ||
178 | */ | ||
179 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
180 | wake_up_locked(&sem->wait); | ||
181 | } | ||
182 | |||
183 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
184 | return 1; | ||
185 | } | ||
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index a806208c7fb5..7a62a718143b 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile | |||
@@ -10,7 +10,7 @@ endif | |||
10 | extra-y += vmlinux.lds | 10 | extra-y += vmlinux.lds |
11 | 11 | ||
12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ | 12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ |
13 | sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o | 13 | sys_m68k.o time.o setup.o m68k_ksyms.o devres.o |
14 | 14 | ||
15 | devres-y = ../../../kernel/irq/devres.o | 15 | devres-y = ../../../kernel/irq/devres.o |
16 | 16 | ||
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c index 6fc69c74fe2e..d900e77e5363 100644 --- a/arch/m68k/kernel/m68k_ksyms.c +++ b/arch/m68k/kernel/m68k_ksyms.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <asm/semaphore.h> | ||
3 | 2 | ||
4 | asmlinkage long long __ashldi3 (long long, int); | 3 | asmlinkage long long __ashldi3 (long long, int); |
5 | asmlinkage long long __ashrdi3 (long long, int); | 4 | asmlinkage long long __ashrdi3 (long long, int); |
@@ -15,8 +14,3 @@ EXPORT_SYMBOL(__ashrdi3); | |||
15 | EXPORT_SYMBOL(__lshrdi3); | 14 | EXPORT_SYMBOL(__lshrdi3); |
16 | EXPORT_SYMBOL(__muldi3); | 15 | EXPORT_SYMBOL(__muldi3); |
17 | 16 | ||
18 | EXPORT_SYMBOL(__down_failed); | ||
19 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
20 | EXPORT_SYMBOL(__down_failed_trylock); | ||
21 | EXPORT_SYMBOL(__up_wakeup); | ||
22 | |||
diff --git a/arch/m68k/kernel/semaphore.c b/arch/m68k/kernel/semaphore.c deleted file mode 100644 index d12cbbfe6ebd..000000000000 --- a/arch/m68k/kernel/semaphore.c +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <asm/semaphore-helper.h> | ||
9 | |||
10 | #ifndef CONFIG_RMW_INSNS | ||
11 | spinlock_t semaphore_wake_lock; | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Semaphores are implemented using a two-way counter: | ||
16 | * The "count" variable is decremented for each process | ||
17 | * that tries to sleep, while the "waking" variable is | ||
18 | * incremented when the "up()" code goes to wake up waiting | ||
19 | * processes. | ||
20 | * | ||
21 | * Notably, the inline "up()" and "down()" functions can | ||
22 | * efficiently test if they need to do any extra work (up | ||
23 | * needs to do something only if count was negative before | ||
24 | * the increment operation. | ||
25 | * | ||
26 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
27 | * atomically. | ||
28 | * | ||
29 | * When __up() is called, the count was negative before | ||
30 | * incrementing it, and we need to wake up somebody. | ||
31 | * | ||
32 | * This routine adds one to the count of processes that need to | ||
33 | * wake up and exit. ALL waiting processes actually wake up but | ||
34 | * only the one that gets to the "waking" field first will gate | ||
35 | * through and acquire the semaphore. The others will go back | ||
36 | * to sleep. | ||
37 | * | ||
38 | * Note that these functions are only called when there is | ||
39 | * contention on the lock, and as such all this is the | ||
40 | * "non-critical" part of the whole semaphore business. The | ||
41 | * critical part is the inline stuff in <asm/semaphore.h> | ||
42 | * where we want to avoid any extra jumps and calls. | ||
43 | */ | ||
44 | void __up(struct semaphore *sem) | ||
45 | { | ||
46 | wake_one_more(sem); | ||
47 | wake_up(&sem->wait); | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Perform the "down" function. Return zero for semaphore acquired, | ||
52 | * return negative for signalled out of the function. | ||
53 | * | ||
54 | * If called from __down, the return is ignored and the wait loop is | ||
55 | * not interruptible. This means that a task waiting on a semaphore | ||
56 | * using "down()" cannot be killed until someone does an "up()" on | ||
57 | * the semaphore. | ||
58 | * | ||
59 | * If called from __down_interruptible, the return value gets checked | ||
60 | * upon return. If the return value is negative then the task continues | ||
61 | * with the negative value in the return register (it can be tested by | ||
62 | * the caller). | ||
63 | * | ||
64 | * Either form may be used in conjunction with "up()". | ||
65 | * | ||
66 | */ | ||
67 | |||
68 | |||
69 | #define DOWN_HEAD(task_state) \ | ||
70 | \ | ||
71 | \ | ||
72 | current->state = (task_state); \ | ||
73 | add_wait_queue(&sem->wait, &wait); \ | ||
74 | \ | ||
75 | /* \ | ||
76 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
77 | * so we must wait. \ | ||
78 | * \ | ||
79 | * We can let go the lock for purposes of waiting. \ | ||
80 | * We re-acquire it after awaking so as to protect \ | ||
81 | * all semaphore operations. \ | ||
82 | * \ | ||
83 | * If "up()" is called before we call waking_non_zero() then \ | ||
84 | * we will catch it right away. If it is called later then \ | ||
85 | * we will have to go through a wakeup cycle to catch it. \ | ||
86 | * \ | ||
87 | * Multiple waiters contend for the semaphore lock to see \ | ||
88 | * who gets to gate through and who has to wait some more. \ | ||
89 | */ \ | ||
90 | for (;;) { | ||
91 | |||
92 | #define DOWN_TAIL(task_state) \ | ||
93 | current->state = (task_state); \ | ||
94 | } \ | ||
95 | current->state = TASK_RUNNING; \ | ||
96 | remove_wait_queue(&sem->wait, &wait); | ||
97 | |||
98 | void __sched __down(struct semaphore * sem) | ||
99 | { | ||
100 | DECLARE_WAITQUEUE(wait, current); | ||
101 | |||
102 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
103 | if (waking_non_zero(sem)) | ||
104 | break; | ||
105 | schedule(); | ||
106 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
107 | } | ||
108 | |||
109 | int __sched __down_interruptible(struct semaphore * sem) | ||
110 | { | ||
111 | DECLARE_WAITQUEUE(wait, current); | ||
112 | int ret = 0; | ||
113 | |||
114 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
115 | |||
116 | ret = waking_non_zero_interruptible(sem, current); | ||
117 | if (ret) | ||
118 | { | ||
119 | if (ret == 1) | ||
120 | /* ret != 0 only if we get interrupted -arca */ | ||
121 | ret = 0; | ||
122 | break; | ||
123 | } | ||
124 | schedule(); | ||
125 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | int __down_trylock(struct semaphore * sem) | ||
130 | { | ||
131 | return waking_non_zero_trylock(sem); | ||
132 | } | ||
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile index 6bbf19f96007..a18af095cd7c 100644 --- a/arch/m68k/lib/Makefile +++ b/arch/m68k/lib/Makefile | |||
@@ -5,4 +5,4 @@ | |||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ | 7 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ |
8 | checksum.o string.o semaphore.o uaccess.o | 8 | checksum.o string.o uaccess.o |
diff --git a/arch/m68k/lib/semaphore.S b/arch/m68k/lib/semaphore.S deleted file mode 100644 index 0215624c1602..000000000000 --- a/arch/m68k/lib/semaphore.S +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/lib/semaphore.S | ||
3 | * | ||
4 | * Copyright (C) 1996 Linus Torvalds | ||
5 | * | ||
6 | * m68k version by Andreas Schwab | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/semaphore.h> | ||
11 | |||
12 | /* | ||
13 | * The semaphore operations have a special calling sequence that | ||
14 | * allow us to do a simpler in-line version of them. These routines | ||
15 | * need to convert that sequence back into the C sequence when | ||
16 | * there is contention on the semaphore. | ||
17 | */ | ||
18 | ENTRY(__down_failed) | ||
19 | moveml %a0/%d0/%d1,-(%sp) | ||
20 | movel %a1,-(%sp) | ||
21 | jbsr __down | ||
22 | movel (%sp)+,%a1 | ||
23 | moveml (%sp)+,%a0/%d0/%d1 | ||
24 | rts | ||
25 | |||
26 | ENTRY(__down_failed_interruptible) | ||
27 | movel %a0,-(%sp) | ||
28 | movel %d1,-(%sp) | ||
29 | movel %a1,-(%sp) | ||
30 | jbsr __down_interruptible | ||
31 | movel (%sp)+,%a1 | ||
32 | movel (%sp)+,%d1 | ||
33 | movel (%sp)+,%a0 | ||
34 | rts | ||
35 | |||
36 | ENTRY(__down_failed_trylock) | ||
37 | movel %a0,-(%sp) | ||
38 | movel %d1,-(%sp) | ||
39 | movel %a1,-(%sp) | ||
40 | jbsr __down_trylock | ||
41 | movel (%sp)+,%a1 | ||
42 | movel (%sp)+,%d1 | ||
43 | movel (%sp)+,%a0 | ||
44 | rts | ||
45 | |||
46 | ENTRY(__up_wakeup) | ||
47 | moveml %a0/%d0/%d1,-(%sp) | ||
48 | movel %a1,-(%sp) | ||
49 | jbsr __up | ||
50 | movel (%sp)+,%a1 | ||
51 | moveml (%sp)+,%a0/%d0/%d1 | ||
52 | rts | ||
53 | |||
diff --git a/arch/m68knommu/kernel/Makefile b/arch/m68knommu/kernel/Makefile index 1524b39ad63f..f0eab3dedb5a 100644 --- a/arch/m68knommu/kernel/Makefile +++ b/arch/m68knommu/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := vmlinux.lds | 5 | extra-y := vmlinux.lds |
6 | 6 | ||
7 | obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \ | 7 | obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \ |
8 | semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o | 8 | setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o |
9 | 9 | ||
10 | obj-$(CONFIG_MODULES) += module.o | 10 | obj-$(CONFIG_MODULES) += module.o |
11 | obj-$(CONFIG_COMEMPCI) += comempci.o | 11 | obj-$(CONFIG_COMEMPCI) += comempci.o |
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c index 53fad1490282..39fe0a7aec32 100644 --- a/arch/m68knommu/kernel/m68k_ksyms.c +++ b/arch/m68knommu/kernel/m68k_ksyms.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <asm/pgalloc.h> | 13 | #include <asm/pgalloc.h> |
14 | #include <asm/irq.h> | 14 | #include <asm/irq.h> |
15 | #include <asm/io.h> | 15 | #include <asm/io.h> |
16 | #include <asm/semaphore.h> | ||
17 | #include <asm/checksum.h> | 16 | #include <asm/checksum.h> |
18 | #include <asm/current.h> | 17 | #include <asm/current.h> |
19 | 18 | ||
@@ -39,11 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); | |||
39 | EXPORT_SYMBOL(memcpy); | 38 | EXPORT_SYMBOL(memcpy); |
40 | EXPORT_SYMBOL(memset); | 39 | EXPORT_SYMBOL(memset); |
41 | 40 | ||
42 | EXPORT_SYMBOL(__down_failed); | ||
43 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
44 | EXPORT_SYMBOL(__down_failed_trylock); | ||
45 | EXPORT_SYMBOL(__up_wakeup); | ||
46 | |||
47 | /* | 41 | /* |
48 | * libgcc functions - functions that are used internally by the | 42 | * libgcc functions - functions that are used internally by the |
49 | * compiler... (prototypes are not correct though, but that | 43 | * compiler... (prototypes are not correct though, but that |
diff --git a/arch/m68knommu/kernel/semaphore.c b/arch/m68knommu/kernel/semaphore.c deleted file mode 100644 index bce2bc7d87c6..000000000000 --- a/arch/m68knommu/kernel/semaphore.c +++ /dev/null | |||
@@ -1,133 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <linux/err.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <asm/semaphore-helper.h> | ||
10 | |||
11 | #ifndef CONFIG_RMW_INSNS | ||
12 | spinlock_t semaphore_wake_lock; | ||
13 | #endif | ||
14 | |||
15 | /* | ||
16 | * Semaphores are implemented using a two-way counter: | ||
17 | * The "count" variable is decremented for each process | ||
18 | * that tries to sleep, while the "waking" variable is | ||
19 | * incremented when the "up()" code goes to wake up waiting | ||
20 | * processes. | ||
21 | * | ||
22 | * Notably, the inline "up()" and "down()" functions can | ||
23 | * efficiently test if they need to do any extra work (up | ||
24 | * needs to do something only if count was negative before | ||
25 | * the increment operation. | ||
26 | * | ||
27 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
28 | * atomically. | ||
29 | * | ||
30 | * When __up() is called, the count was negative before | ||
31 | * incrementing it, and we need to wake up somebody. | ||
32 | * | ||
33 | * This routine adds one to the count of processes that need to | ||
34 | * wake up and exit. ALL waiting processes actually wake up but | ||
35 | * only the one that gets to the "waking" field first will gate | ||
36 | * through and acquire the semaphore. The others will go back | ||
37 | * to sleep. | ||
38 | * | ||
39 | * Note that these functions are only called when there is | ||
40 | * contention on the lock, and as such all this is the | ||
41 | * "non-critical" part of the whole semaphore business. The | ||
42 | * critical part is the inline stuff in <asm/semaphore.h> | ||
43 | * where we want to avoid any extra jumps and calls. | ||
44 | */ | ||
45 | void __up(struct semaphore *sem) | ||
46 | { | ||
47 | wake_one_more(sem); | ||
48 | wake_up(&sem->wait); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Perform the "down" function. Return zero for semaphore acquired, | ||
53 | * return negative for signalled out of the function. | ||
54 | * | ||
55 | * If called from __down, the return is ignored and the wait loop is | ||
56 | * not interruptible. This means that a task waiting on a semaphore | ||
57 | * using "down()" cannot be killed until someone does an "up()" on | ||
58 | * the semaphore. | ||
59 | * | ||
60 | * If called from __down_interruptible, the return value gets checked | ||
61 | * upon return. If the return value is negative then the task continues | ||
62 | * with the negative value in the return register (it can be tested by | ||
63 | * the caller). | ||
64 | * | ||
65 | * Either form may be used in conjunction with "up()". | ||
66 | * | ||
67 | */ | ||
68 | |||
69 | |||
70 | #define DOWN_HEAD(task_state) \ | ||
71 | \ | ||
72 | \ | ||
73 | current->state = (task_state); \ | ||
74 | add_wait_queue(&sem->wait, &wait); \ | ||
75 | \ | ||
76 | /* \ | ||
77 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
78 | * so we must wait. \ | ||
79 | * \ | ||
80 | * We can let go the lock for purposes of waiting. \ | ||
81 | * We re-acquire it after awaking so as to protect \ | ||
82 | * all semaphore operations. \ | ||
83 | * \ | ||
84 | * If "up()" is called before we call waking_non_zero() then \ | ||
85 | * we will catch it right away. If it is called later then \ | ||
86 | * we will have to go through a wakeup cycle to catch it. \ | ||
87 | * \ | ||
88 | * Multiple waiters contend for the semaphore lock to see \ | ||
89 | * who gets to gate through and who has to wait some more. \ | ||
90 | */ \ | ||
91 | for (;;) { | ||
92 | |||
93 | #define DOWN_TAIL(task_state) \ | ||
94 | current->state = (task_state); \ | ||
95 | } \ | ||
96 | current->state = TASK_RUNNING; \ | ||
97 | remove_wait_queue(&sem->wait, &wait); | ||
98 | |||
99 | void __sched __down(struct semaphore * sem) | ||
100 | { | ||
101 | DECLARE_WAITQUEUE(wait, current); | ||
102 | |||
103 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
104 | if (waking_non_zero(sem)) | ||
105 | break; | ||
106 | schedule(); | ||
107 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
108 | } | ||
109 | |||
110 | int __sched __down_interruptible(struct semaphore * sem) | ||
111 | { | ||
112 | DECLARE_WAITQUEUE(wait, current); | ||
113 | int ret = 0; | ||
114 | |||
115 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
116 | |||
117 | ret = waking_non_zero_interruptible(sem, current); | ||
118 | if (ret) | ||
119 | { | ||
120 | if (ret == 1) | ||
121 | /* ret != 0 only if we get interrupted -arca */ | ||
122 | ret = 0; | ||
123 | break; | ||
124 | } | ||
125 | schedule(); | ||
126 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | int __down_trylock(struct semaphore * sem) | ||
131 | { | ||
132 | return waking_non_zero_trylock(sem); | ||
133 | } | ||
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile index e051a7913987..d94d709665aa 100644 --- a/arch/m68knommu/lib/Makefile +++ b/arch/m68knommu/lib/Makefile | |||
@@ -4,4 +4,4 @@ | |||
4 | 4 | ||
5 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ | 5 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ |
6 | muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ | 6 | muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ |
7 | checksum.o semaphore.o memcpy.o memset.o delay.o | 7 | checksum.o memcpy.o memset.o delay.o |
diff --git a/arch/m68knommu/lib/semaphore.S b/arch/m68knommu/lib/semaphore.S deleted file mode 100644 index 87c746034376..000000000000 --- a/arch/m68knommu/lib/semaphore.S +++ /dev/null | |||
@@ -1,66 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/lib/semaphore.S | ||
3 | * | ||
4 | * Copyright (C) 1996 Linus Torvalds | ||
5 | * | ||
6 | * m68k version by Andreas Schwab | ||
7 | * | ||
8 | * MAR/1999 -- modified to support ColdFire (gerg@snapgear.com) | ||
9 | */ | ||
10 | |||
11 | #include <linux/linkage.h> | ||
12 | #include <asm/semaphore.h> | ||
13 | |||
14 | /* | ||
15 | * "down_failed" is called with the eventual return address | ||
16 | * in %a0, and the address of the semaphore in %a1. We need | ||
17 | * to increment the number of waiters on the semaphore, | ||
18 | * call "__down()", and then eventually return to try again. | ||
19 | */ | ||
20 | ENTRY(__down_failed) | ||
21 | #ifdef CONFIG_COLDFIRE | ||
22 | subl #12,%sp | ||
23 | moveml %a0/%d0/%d1,(%sp) | ||
24 | #else | ||
25 | moveml %a0/%d0/%d1,-(%sp) | ||
26 | #endif | ||
27 | movel %a1,-(%sp) | ||
28 | jbsr __down | ||
29 | movel (%sp)+,%a1 | ||
30 | movel (%sp)+,%d0 | ||
31 | movel (%sp)+,%d1 | ||
32 | rts | ||
33 | |||
34 | ENTRY(__down_failed_interruptible) | ||
35 | movel %a0,-(%sp) | ||
36 | movel %d1,-(%sp) | ||
37 | movel %a1,-(%sp) | ||
38 | jbsr __down_interruptible | ||
39 | movel (%sp)+,%a1 | ||
40 | movel (%sp)+,%d1 | ||
41 | rts | ||
42 | |||
43 | ENTRY(__up_wakeup) | ||
44 | #ifdef CONFIG_COLDFIRE | ||
45 | subl #12,%sp | ||
46 | moveml %a0/%d0/%d1,(%sp) | ||
47 | #else | ||
48 | moveml %a0/%d0/%d1,-(%sp) | ||
49 | #endif | ||
50 | movel %a1,-(%sp) | ||
51 | jbsr __up | ||
52 | movel (%sp)+,%a1 | ||
53 | movel (%sp)+,%d0 | ||
54 | movel (%sp)+,%d1 | ||
55 | rts | ||
56 | |||
57 | ENTRY(__down_failed_trylock) | ||
58 | movel %a0,-(%sp) | ||
59 | movel %d1,-(%sp) | ||
60 | movel %a1,-(%sp) | ||
61 | jbsr __down_trylock | ||
62 | movel (%sp)+,%a1 | ||
63 | movel (%sp)+,%d1 | ||
64 | movel (%sp)+,%a0 | ||
65 | rts | ||
66 | |||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 9e78e1a4ca17..6fcdb6fda2e2 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ |
8 | ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ | 8 | ptrace.o reset.o setup.o signal.o syscall.o \ |
9 | time.o topology.o traps.o unaligned.o | 9 | time.o topology.o traps.o unaligned.o |
10 | 10 | ||
11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c deleted file mode 100644 index 1265358cdca1..000000000000 --- a/arch/mips/kernel/semaphore.c +++ /dev/null | |||
@@ -1,168 +0,0 @@ | |||
1 | /* | ||
2 | * MIPS-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
13 | * to eliminate the SMP races in the old version between the updates | ||
14 | * of `count' and `waking'. Now we use negative `count' values to | ||
15 | * indicate that some process(es) are waiting for the semaphore. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <asm/atomic.h> | ||
22 | #include <asm/cpu-features.h> | ||
23 | #include <asm/errno.h> | ||
24 | #include <asm/semaphore.h> | ||
25 | #include <asm/war.h> | ||
26 | /* | ||
27 | * Atomically update sem->count. | ||
28 | * This does the equivalent of the following: | ||
29 | * | ||
30 | * old_count = sem->count; | ||
31 | * tmp = MAX(old_count, 0) + incr; | ||
32 | * sem->count = tmp; | ||
33 | * return old_count; | ||
34 | * | ||
35 | * On machines without lld/scd we need a spinlock to make the manipulation of | ||
36 | * sem->count and sem->waking atomic. Scalability isn't an issue because | ||
37 | * this lock is used on UP only so it's just an empty variable. | ||
38 | */ | ||
39 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
40 | { | ||
41 | int old_count, tmp; | ||
42 | |||
43 | if (cpu_has_llsc && R10000_LLSC_WAR) { | ||
44 | __asm__ __volatile__( | ||
45 | " .set mips3 \n" | ||
46 | "1: ll %0, %2 # __sem_update_count \n" | ||
47 | " sra %1, %0, 31 \n" | ||
48 | " not %1 \n" | ||
49 | " and %1, %0, %1 \n" | ||
50 | " addu %1, %1, %3 \n" | ||
51 | " sc %1, %2 \n" | ||
52 | " beqzl %1, 1b \n" | ||
53 | " .set mips0 \n" | ||
54 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
55 | : "r" (incr), "m" (sem->count)); | ||
56 | } else if (cpu_has_llsc) { | ||
57 | __asm__ __volatile__( | ||
58 | " .set mips3 \n" | ||
59 | "1: ll %0, %2 # __sem_update_count \n" | ||
60 | " sra %1, %0, 31 \n" | ||
61 | " not %1 \n" | ||
62 | " and %1, %0, %1 \n" | ||
63 | " addu %1, %1, %3 \n" | ||
64 | " sc %1, %2 \n" | ||
65 | " beqz %1, 1b \n" | ||
66 | " .set mips0 \n" | ||
67 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
68 | : "r" (incr), "m" (sem->count)); | ||
69 | } else { | ||
70 | static DEFINE_SPINLOCK(semaphore_lock); | ||
71 | unsigned long flags; | ||
72 | |||
73 | spin_lock_irqsave(&semaphore_lock, flags); | ||
74 | old_count = atomic_read(&sem->count); | ||
75 | tmp = max_t(int, old_count, 0) + incr; | ||
76 | atomic_set(&sem->count, tmp); | ||
77 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
78 | } | ||
79 | |||
80 | return old_count; | ||
81 | } | ||
82 | |||
83 | void __up(struct semaphore *sem) | ||
84 | { | ||
85 | /* | ||
86 | * Note that we incremented count in up() before we came here, | ||
87 | * but that was ineffective since the result was <= 0, and | ||
88 | * any negative value of count is equivalent to 0. | ||
89 | * This ends up setting count to 1, unless count is now > 0 | ||
90 | * (i.e. because some other cpu has called up() in the meantime), | ||
91 | * in which case we just increment count. | ||
92 | */ | ||
93 | __sem_update_count(sem, 1); | ||
94 | wake_up(&sem->wait); | ||
95 | } | ||
96 | |||
97 | EXPORT_SYMBOL(__up); | ||
98 | |||
99 | /* | ||
100 | * Note that when we come in to __down or __down_interruptible, | ||
101 | * we have already decremented count, but that decrement was | ||
102 | * ineffective since the result was < 0, and any negative value | ||
103 | * of count is equivalent to 0. | ||
104 | * Thus it is only when we decrement count from some value > 0 | ||
105 | * that we have actually got the semaphore. | ||
106 | */ | ||
107 | void __sched __down(struct semaphore *sem) | ||
108 | { | ||
109 | struct task_struct *tsk = current; | ||
110 | DECLARE_WAITQUEUE(wait, tsk); | ||
111 | |||
112 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
113 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
114 | |||
115 | /* | ||
116 | * Try to get the semaphore. If the count is > 0, then we've | ||
117 | * got the semaphore; we decrement count and exit the loop. | ||
118 | * If the count is 0 or negative, we set it to -1, indicating | ||
119 | * that we are asleep, and then sleep. | ||
120 | */ | ||
121 | while (__sem_update_count(sem, -1) <= 0) { | ||
122 | schedule(); | ||
123 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
124 | } | ||
125 | remove_wait_queue(&sem->wait, &wait); | ||
126 | __set_task_state(tsk, TASK_RUNNING); | ||
127 | |||
128 | /* | ||
129 | * If there are any more sleepers, wake one of them up so | ||
130 | * that it can either get the semaphore, or set count to -1 | ||
131 | * indicating that there are still processes sleeping. | ||
132 | */ | ||
133 | wake_up(&sem->wait); | ||
134 | } | ||
135 | |||
136 | EXPORT_SYMBOL(__down); | ||
137 | |||
138 | int __sched __down_interruptible(struct semaphore * sem) | ||
139 | { | ||
140 | int retval = 0; | ||
141 | struct task_struct *tsk = current; | ||
142 | DECLARE_WAITQUEUE(wait, tsk); | ||
143 | |||
144 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
145 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
146 | |||
147 | while (__sem_update_count(sem, -1) <= 0) { | ||
148 | if (signal_pending(current)) { | ||
149 | /* | ||
150 | * A signal is pending - give up trying. | ||
151 | * Set sem->count to 0 if it is negative, | ||
152 | * since we are no longer sleeping. | ||
153 | */ | ||
154 | __sem_update_count(sem, 0); | ||
155 | retval = -EINTR; | ||
156 | break; | ||
157 | } | ||
158 | schedule(); | ||
159 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
160 | } | ||
161 | remove_wait_queue(&sem->wait, &wait); | ||
162 | __set_task_state(tsk, TASK_RUNNING); | ||
163 | |||
164 | wake_up(&sem->wait); | ||
165 | return retval; | ||
166 | } | ||
167 | |||
168 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile index ef07c956170a..23f2ab67574c 100644 --- a/arch/mn10300/kernel/Makefile +++ b/arch/mn10300/kernel/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | extra-y := head.o init_task.o vmlinux.lds | 4 | extra-y := head.o init_task.o vmlinux.lds |
5 | 5 | ||
6 | obj-y := process.o semaphore.o signal.o entry.o fpu.o traps.o irq.o \ | 6 | obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \ |
7 | ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ | 7 | ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ |
8 | switch_to.o mn10300_ksyms.o kernel_execve.o | 8 | switch_to.o mn10300_ksyms.o kernel_execve.o |
9 | 9 | ||
diff --git a/arch/mn10300/kernel/semaphore.c b/arch/mn10300/kernel/semaphore.c deleted file mode 100644 index 9153c4039fd2..000000000000 --- a/arch/mn10300/kernel/semaphore.c +++ /dev/null | |||
@@ -1,149 +0,0 @@ | |||
1 | /* MN10300 Semaphore implementation | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <asm/semaphore.h> | ||
14 | |||
15 | struct sem_waiter { | ||
16 | struct list_head list; | ||
17 | struct task_struct *task; | ||
18 | }; | ||
19 | |||
20 | #if SEMAPHORE_DEBUG | ||
21 | void semtrace(struct semaphore *sem, const char *str) | ||
22 | { | ||
23 | if (sem->debug) | ||
24 | printk(KERN_DEBUG "[%d] %s({%d,%d})\n", | ||
25 | current->pid, | ||
26 | str, | ||
27 | atomic_read(&sem->count), | ||
28 | list_empty(&sem->wait_list) ? 0 : 1); | ||
29 | } | ||
30 | #else | ||
31 | #define semtrace(SEM, STR) do { } while (0) | ||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * wait for a token to be granted from a semaphore | ||
36 | * - entered with lock held and interrupts disabled | ||
37 | */ | ||
38 | void __down(struct semaphore *sem, unsigned long flags) | ||
39 | { | ||
40 | struct task_struct *tsk = current; | ||
41 | struct sem_waiter waiter; | ||
42 | |||
43 | semtrace(sem, "Entering __down"); | ||
44 | |||
45 | /* set up my own style of waitqueue */ | ||
46 | waiter.task = tsk; | ||
47 | get_task_struct(tsk); | ||
48 | |||
49 | list_add_tail(&waiter.list, &sem->wait_list); | ||
50 | |||
51 | /* we don't need to touch the semaphore struct anymore */ | ||
52 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
53 | |||
54 | /* wait to be given the semaphore */ | ||
55 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
56 | |||
57 | for (;;) { | ||
58 | if (!waiter.task) | ||
59 | break; | ||
60 | schedule(); | ||
61 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
62 | } | ||
63 | |||
64 | tsk->state = TASK_RUNNING; | ||
65 | semtrace(sem, "Leaving __down"); | ||
66 | } | ||
67 | EXPORT_SYMBOL(__down); | ||
68 | |||
69 | /* | ||
70 | * interruptibly wait for a token to be granted from a semaphore | ||
71 | * - entered with lock held and interrupts disabled | ||
72 | */ | ||
73 | int __down_interruptible(struct semaphore *sem, unsigned long flags) | ||
74 | { | ||
75 | struct task_struct *tsk = current; | ||
76 | struct sem_waiter waiter; | ||
77 | int ret; | ||
78 | |||
79 | semtrace(sem, "Entering __down_interruptible"); | ||
80 | |||
81 | /* set up my own style of waitqueue */ | ||
82 | waiter.task = tsk; | ||
83 | get_task_struct(tsk); | ||
84 | |||
85 | list_add_tail(&waiter.list, &sem->wait_list); | ||
86 | |||
87 | /* we don't need to touch the semaphore struct anymore */ | ||
88 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
89 | |||
90 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
91 | |||
92 | /* wait to be given the semaphore */ | ||
93 | ret = 0; | ||
94 | for (;;) { | ||
95 | if (!waiter.task) | ||
96 | break; | ||
97 | if (unlikely(signal_pending(current))) | ||
98 | goto interrupted; | ||
99 | schedule(); | ||
100 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
101 | } | ||
102 | |||
103 | out: | ||
104 | tsk->state = TASK_RUNNING; | ||
105 | semtrace(sem, "Leaving __down_interruptible"); | ||
106 | return ret; | ||
107 | |||
108 | interrupted: | ||
109 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
110 | list_del(&waiter.list); | ||
111 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
112 | |||
113 | ret = 0; | ||
114 | if (!waiter.task) { | ||
115 | put_task_struct(current); | ||
116 | ret = -EINTR; | ||
117 | } | ||
118 | goto out; | ||
119 | } | ||
120 | EXPORT_SYMBOL(__down_interruptible); | ||
121 | |||
122 | /* | ||
123 | * release a single token back to a semaphore | ||
124 | * - entered with lock held and interrupts disabled | ||
125 | */ | ||
126 | void __up(struct semaphore *sem) | ||
127 | { | ||
128 | struct task_struct *tsk; | ||
129 | struct sem_waiter *waiter; | ||
130 | |||
131 | semtrace(sem, "Entering __up"); | ||
132 | |||
133 | /* grant the token to the process at the front of the queue */ | ||
134 | waiter = list_entry(sem->wait_list.next, struct sem_waiter, list); | ||
135 | |||
136 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | ||
137 | * It is an allocated on the waiter's stack and may become invalid at | ||
138 | * any time after that point (due to a wakeup from another source). | ||
139 | */ | ||
140 | list_del_init(&waiter->list); | ||
141 | tsk = waiter->task; | ||
142 | smp_mb(); | ||
143 | waiter->task = NULL; | ||
144 | wake_up_process(tsk); | ||
145 | put_task_struct(tsk); | ||
146 | |||
147 | semtrace(sem, "Leaving __up"); | ||
148 | } | ||
149 | EXPORT_SYMBOL(__up); | ||
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 27827bc3717e..1f6585a56f97 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile | |||
@@ -9,7 +9,7 @@ AFLAGS_pacache.o := -traditional | |||
9 | 9 | ||
10 | obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ | 10 | obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ |
11 | pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ | 11 | pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ |
12 | ptrace.o hardware.o inventory.o drivers.o semaphore.o \ | 12 | ptrace.o hardware.o inventory.o drivers.o \ |
13 | signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ | 13 | signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ |
14 | process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ | 14 | process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ |
15 | topology.o | 15 | topology.o |
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 7aca704e96f0..5b7fc4aa044d 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c | |||
@@ -69,11 +69,6 @@ EXPORT_SYMBOL(memcpy_toio); | |||
69 | EXPORT_SYMBOL(memcpy_fromio); | 69 | EXPORT_SYMBOL(memcpy_fromio); |
70 | EXPORT_SYMBOL(memset_io); | 70 | EXPORT_SYMBOL(memset_io); |
71 | 71 | ||
72 | #include <asm/semaphore.h> | ||
73 | EXPORT_SYMBOL(__up); | ||
74 | EXPORT_SYMBOL(__down_interruptible); | ||
75 | EXPORT_SYMBOL(__down); | ||
76 | |||
77 | extern void $$divI(void); | 72 | extern void $$divI(void); |
78 | extern void $$divU(void); | 73 | extern void $$divU(void); |
79 | extern void $$remI(void); | 74 | extern void $$remI(void); |
diff --git a/arch/parisc/kernel/semaphore.c b/arch/parisc/kernel/semaphore.c deleted file mode 100644 index ee806bcc3726..000000000000 --- a/arch/parisc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,102 +0,0 @@ | |||
1 | /* | ||
2 | * Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard | ||
3 | */ | ||
4 | |||
5 | #include <linux/sched.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/errno.h> | ||
8 | #include <linux/init.h> | ||
9 | |||
10 | /* | ||
11 | * Semaphores are complex as we wish to avoid using two variables. | ||
12 | * `count' has multiple roles, depending on its value. If it is positive | ||
13 | * or zero, there are no waiters. The functions here will never be | ||
14 | * called; see <asm/semaphore.h> | ||
15 | * | ||
16 | * When count is -1 it indicates there is at least one task waiting | ||
17 | * for the semaphore. | ||
18 | * | ||
19 | * When count is less than that, there are '- count - 1' wakeups | ||
20 | * pending. ie if it has value -3, there are 2 wakeups pending. | ||
21 | * | ||
22 | * Note that these functions are only called when there is contention | ||
23 | * on the lock, and as such all this is the "non-critical" part of the | ||
24 | * whole semaphore business. The critical part is the inline stuff in | ||
25 | * <asm/semaphore.h> where we want to avoid any extra jumps and calls. | ||
26 | */ | ||
27 | void __up(struct semaphore *sem) | ||
28 | { | ||
29 | sem->count--; | ||
30 | wake_up(&sem->wait); | ||
31 | } | ||
32 | |||
33 | #define wakers(count) (-1 - count) | ||
34 | |||
35 | #define DOWN_HEAD \ | ||
36 | int ret = 0; \ | ||
37 | DECLARE_WAITQUEUE(wait, current); \ | ||
38 | \ | ||
39 | /* Note that someone is waiting */ \ | ||
40 | if (sem->count == 0) \ | ||
41 | sem->count = -1; \ | ||
42 | \ | ||
43 | /* protected by the sentry still -- use unlocked version */ \ | ||
44 | wait.flags = WQ_FLAG_EXCLUSIVE; \ | ||
45 | __add_wait_queue_tail(&sem->wait, &wait); \ | ||
46 | lost_race: \ | ||
47 | spin_unlock_irq(&sem->sentry); \ | ||
48 | |||
49 | #define DOWN_TAIL \ | ||
50 | spin_lock_irq(&sem->sentry); \ | ||
51 | if (wakers(sem->count) == 0 && ret == 0) \ | ||
52 | goto lost_race; /* Someone stole our wakeup */ \ | ||
53 | __remove_wait_queue(&sem->wait, &wait); \ | ||
54 | current->state = TASK_RUNNING; \ | ||
55 | if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \ | ||
56 | sem->count = wakers(sem->count); | ||
57 | |||
58 | #define UPDATE_COUNT \ | ||
59 | sem->count += (sem->count < 0) ? 1 : - 1; | ||
60 | |||
61 | |||
62 | void __sched __down(struct semaphore * sem) | ||
63 | { | ||
64 | DOWN_HEAD | ||
65 | |||
66 | for(;;) { | ||
67 | set_task_state(current, TASK_UNINTERRUPTIBLE); | ||
68 | /* we can _read_ this without the sentry */ | ||
69 | if (sem->count != -1) | ||
70 | break; | ||
71 | schedule(); | ||
72 | } | ||
73 | |||
74 | DOWN_TAIL | ||
75 | UPDATE_COUNT | ||
76 | } | ||
77 | |||
78 | int __sched __down_interruptible(struct semaphore * sem) | ||
79 | { | ||
80 | DOWN_HEAD | ||
81 | |||
82 | for(;;) { | ||
83 | set_task_state(current, TASK_INTERRUPTIBLE); | ||
84 | /* we can _read_ this without the sentry */ | ||
85 | if (sem->count != -1) | ||
86 | break; | ||
87 | |||
88 | if (signal_pending(current)) { | ||
89 | ret = -EINTR; | ||
90 | break; | ||
91 | } | ||
92 | schedule(); | ||
93 | } | ||
94 | |||
95 | DOWN_TAIL | ||
96 | |||
97 | if (!ret) { | ||
98 | UPDATE_COUNT | ||
99 | } | ||
100 | |||
101 | return ret; | ||
102 | } | ||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index c1baf9d5903f..b9dbfff9afe9 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC | |||
12 | CFLAGS_btext.o += -fPIC | 12 | CFLAGS_btext.o += -fPIC |
13 | endif | 13 | endif |
14 | 14 | ||
15 | obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ | 15 | obj-y := cputable.o ptrace.o syscalls.o \ |
16 | irq.o align.o signal_32.o pmc.o vdso.o \ | 16 | irq.o align.o signal_32.o pmc.o vdso.o \ |
17 | init_task.o process.o systbl.o idle.o \ | 17 | init_task.o process.o systbl.o idle.o \ |
18 | signal.o | 18 | signal.o |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 9c98424277a8..65d14e6ddc3c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
16 | 16 | ||
17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
18 | #include <asm/semaphore.h> | ||
19 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
20 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
21 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c deleted file mode 100644 index 2f8c3c951394..000000000000 --- a/arch/powerpc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | /* | ||
2 | * PowerPC-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
12 | * to eliminate the SMP races in the old version between the updates | ||
13 | * of `count' and `waking'. Now we use negative `count' values to | ||
14 | * indicate that some process(es) are waiting for the semaphore. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | #include <asm/atomic.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | /* | ||
26 | * Atomically update sem->count. | ||
27 | * This does the equivalent of the following: | ||
28 | * | ||
29 | * old_count = sem->count; | ||
30 | * tmp = MAX(old_count, 0) + incr; | ||
31 | * sem->count = tmp; | ||
32 | * return old_count; | ||
33 | */ | ||
34 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
35 | { | ||
36 | int old_count, tmp; | ||
37 | |||
38 | __asm__ __volatile__("\n" | ||
39 | "1: lwarx %0,0,%3\n" | ||
40 | " srawi %1,%0,31\n" | ||
41 | " andc %1,%0,%1\n" | ||
42 | " add %1,%1,%4\n" | ||
43 | PPC405_ERR77(0,%3) | ||
44 | " stwcx. %1,0,%3\n" | ||
45 | " bne 1b" | ||
46 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
47 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
48 | : "cc"); | ||
49 | |||
50 | return old_count; | ||
51 | } | ||
52 | |||
53 | void __up(struct semaphore *sem) | ||
54 | { | ||
55 | /* | ||
56 | * Note that we incremented count in up() before we came here, | ||
57 | * but that was ineffective since the result was <= 0, and | ||
58 | * any negative value of count is equivalent to 0. | ||
59 | * This ends up setting count to 1, unless count is now > 0 | ||
60 | * (i.e. because some other cpu has called up() in the meantime), | ||
61 | * in which case we just increment count. | ||
62 | */ | ||
63 | __sem_update_count(sem, 1); | ||
64 | wake_up(&sem->wait); | ||
65 | } | ||
66 | EXPORT_SYMBOL(__up); | ||
67 | |||
68 | /* | ||
69 | * Note that when we come in to __down or __down_interruptible, | ||
70 | * we have already decremented count, but that decrement was | ||
71 | * ineffective since the result was < 0, and any negative value | ||
72 | * of count is equivalent to 0. | ||
73 | * Thus it is only when we decrement count from some value > 0 | ||
74 | * that we have actually got the semaphore. | ||
75 | */ | ||
76 | void __sched __down(struct semaphore *sem) | ||
77 | { | ||
78 | struct task_struct *tsk = current; | ||
79 | DECLARE_WAITQUEUE(wait, tsk); | ||
80 | |||
81 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
82 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
83 | |||
84 | /* | ||
85 | * Try to get the semaphore. If the count is > 0, then we've | ||
86 | * got the semaphore; we decrement count and exit the loop. | ||
87 | * If the count is 0 or negative, we set it to -1, indicating | ||
88 | * that we are asleep, and then sleep. | ||
89 | */ | ||
90 | while (__sem_update_count(sem, -1) <= 0) { | ||
91 | schedule(); | ||
92 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
93 | } | ||
94 | remove_wait_queue(&sem->wait, &wait); | ||
95 | __set_task_state(tsk, TASK_RUNNING); | ||
96 | |||
97 | /* | ||
98 | * If there are any more sleepers, wake one of them up so | ||
99 | * that it can either get the semaphore, or set count to -1 | ||
100 | * indicating that there are still processes sleeping. | ||
101 | */ | ||
102 | wake_up(&sem->wait); | ||
103 | } | ||
104 | EXPORT_SYMBOL(__down); | ||
105 | |||
106 | int __sched __down_interruptible(struct semaphore * sem) | ||
107 | { | ||
108 | int retval = 0; | ||
109 | struct task_struct *tsk = current; | ||
110 | DECLARE_WAITQUEUE(wait, tsk); | ||
111 | |||
112 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
113 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
114 | |||
115 | while (__sem_update_count(sem, -1) <= 0) { | ||
116 | if (signal_pending(current)) { | ||
117 | /* | ||
118 | * A signal is pending - give up trying. | ||
119 | * Set sem->count to 0 if it is negative, | ||
120 | * since we are no longer sleeping. | ||
121 | */ | ||
122 | __sem_update_count(sem, 0); | ||
123 | retval = -EINTR; | ||
124 | break; | ||
125 | } | ||
126 | schedule(); | ||
127 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
128 | } | ||
129 | remove_wait_queue(&sem->wait, &wait); | ||
130 | __set_task_state(tsk, TASK_RUNNING); | ||
131 | |||
132 | wake_up(&sem->wait); | ||
133 | return retval; | ||
134 | } | ||
135 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index cd870a823d18..06d918d94dd1 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -10,9 +10,6 @@ | |||
10 | #include <linux/reboot.h> | 10 | #include <linux/reboot.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/initrd.h> | 12 | #include <linux/initrd.h> |
13 | #if defined(CONFIG_IDE) || defined(CONFIG_IDE_MODULE) | ||
14 | #include <linux/ide.h> | ||
15 | #endif | ||
16 | #include <linux/tty.h> | 13 | #include <linux/tty.h> |
17 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
18 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
@@ -51,11 +48,6 @@ | |||
51 | 48 | ||
52 | extern void bootx_init(unsigned long r4, unsigned long phys); | 49 | extern void bootx_init(unsigned long r4, unsigned long phys); |
53 | 50 | ||
54 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
55 | struct ide_machdep_calls ppc_ide_md; | ||
56 | EXPORT_SYMBOL(ppc_ide_md); | ||
57 | #endif | ||
58 | |||
59 | int boot_cpuid; | 51 | int boot_cpuid; |
60 | EXPORT_SYMBOL_GPL(boot_cpuid); | 52 | EXPORT_SYMBOL_GPL(boot_cpuid); |
61 | int boot_cpuid_phys; | 53 | int boot_cpuid_phys; |
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 1c58db9d42cb..bcf50d7056e9 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -1144,28 +1144,6 @@ void __init pmac_pcibios_after_init(void) | |||
1144 | { | 1144 | { |
1145 | struct device_node* nd; | 1145 | struct device_node* nd; |
1146 | 1146 | ||
1147 | #ifdef CONFIG_BLK_DEV_IDE | ||
1148 | struct pci_dev *dev = NULL; | ||
1149 | |||
1150 | /* OF fails to initialize IDE controllers on macs | ||
1151 | * (and maybe other machines) | ||
1152 | * | ||
1153 | * Ideally, this should be moved to the IDE layer, but we need | ||
1154 | * to check specifically with Andre Hedrick how to do it cleanly | ||
1155 | * since the common IDE code seem to care about the fact that the | ||
1156 | * BIOS may have disabled a controller. | ||
1157 | * | ||
1158 | * -- BenH | ||
1159 | */ | ||
1160 | for_each_pci_dev(dev) { | ||
1161 | if ((dev->class >> 16) != PCI_BASE_CLASS_STORAGE) | ||
1162 | continue; | ||
1163 | if (pci_enable_device(dev)) | ||
1164 | printk(KERN_WARNING | ||
1165 | "pci: Failed to enable %s\n", pci_name(dev)); | ||
1166 | } | ||
1167 | #endif /* CONFIG_BLK_DEV_IDE */ | ||
1168 | |||
1169 | for_each_node_by_name(nd, "firewire") { | 1147 | for_each_node_by_name(nd, "firewire") { |
1170 | if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") || | 1148 | if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") || |
1171 | of_device_is_compatible(nd, "pci106b,30") || | 1149 | of_device_is_compatible(nd, "pci106b,30") || |
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index b3abaaf61eb4..3362e781b6a7 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define __PMAC_H__ | 2 | #define __PMAC_H__ |
3 | 3 | ||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <linux/ide.h> | ||
6 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
7 | 6 | ||
8 | /* | 7 | /* |
@@ -35,10 +34,6 @@ extern void pmac_check_ht_link(void); | |||
35 | 34 | ||
36 | extern void pmac_setup_smp(void); | 35 | extern void pmac_setup_smp(void); |
37 | 36 | ||
38 | extern unsigned long pmac_ide_get_base(int index); | ||
39 | extern void pmac_ide_init_hwif_ports(hw_regs_t *hw, | ||
40 | unsigned long data_port, unsigned long ctrl_port, int *irq); | ||
41 | |||
42 | extern int pmac_nvram_init(void); | 37 | extern int pmac_nvram_init(void); |
43 | extern void pmac_pic_init(void); | 38 | extern void pmac_pic_init(void); |
44 | 39 | ||
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 36ff1b6b7fac..2693fc371eab 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -574,14 +574,6 @@ static int __init pmac_probe(void) | |||
574 | ISA_DMA_THRESHOLD = ~0L; | 574 | ISA_DMA_THRESHOLD = ~0L; |
575 | DMA_MODE_READ = 1; | 575 | DMA_MODE_READ = 1; |
576 | DMA_MODE_WRITE = 2; | 576 | DMA_MODE_WRITE = 2; |
577 | |||
578 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
579 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
580 | ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports; | ||
581 | ppc_ide_md.default_io_base = pmac_ide_get_base; | ||
582 | #endif /* CONFIG_BLK_DEV_IDE_PMAC */ | ||
583 | #endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */ | ||
584 | |||
585 | #endif /* CONFIG_PPC32 */ | 577 | #endif /* CONFIG_PPC32 */ |
586 | 578 | ||
587 | #ifdef CONFIG_PMAC_SMU | 579 | #ifdef CONFIG_PMAC_SMU |
diff --git a/arch/ppc/configs/sandpoint_defconfig b/arch/ppc/configs/sandpoint_defconfig index fb493a67c60d..9525e34138fc 100644 --- a/arch/ppc/configs/sandpoint_defconfig +++ b/arch/ppc/configs/sandpoint_defconfig | |||
@@ -189,7 +189,7 @@ CONFIG_IDE_TASKFILE_IO=y | |||
189 | # | 189 | # |
190 | # IDE chipset support/bugfixes | 190 | # IDE chipset support/bugfixes |
191 | # | 191 | # |
192 | CONFIG_IDE_GENERIC=y | 192 | CONFIG_BLK_DEV_SL82C105=y |
193 | # CONFIG_BLK_DEV_IDEPCI is not set | 193 | # CONFIG_BLK_DEV_IDEPCI is not set |
194 | # CONFIG_BLK_DEV_IDEDMA is not set | 194 | # CONFIG_BLK_DEV_IDEDMA is not set |
195 | # CONFIG_IDEDMA_AUTO is not set | 195 | # CONFIG_IDEDMA_AUTO is not set |
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index c35350250cfc..2ba659f401be 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/ide.h> | ||
16 | #include <linux/pm.h> | 15 | #include <linux/pm.h> |
17 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
18 | 17 | ||
@@ -124,10 +123,6 @@ EXPORT_SYMBOL(__ioremap); | |||
124 | EXPORT_SYMBOL(iounmap); | 123 | EXPORT_SYMBOL(iounmap); |
125 | EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ | 124 | EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ |
126 | 125 | ||
127 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
128 | EXPORT_SYMBOL(ppc_ide_md); | ||
129 | #endif | ||
130 | |||
131 | #ifdef CONFIG_PCI | 126 | #ifdef CONFIG_PCI |
132 | EXPORT_SYMBOL(isa_io_base); | 127 | EXPORT_SYMBOL(isa_io_base); |
133 | EXPORT_SYMBOL(isa_mem_base); | 128 | EXPORT_SYMBOL(isa_mem_base); |
diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c deleted file mode 100644 index 2fe429b27c14..000000000000 --- a/arch/ppc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,131 +0,0 @@ | |||
1 | /* | ||
2 | * PowerPC-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
12 | * to eliminate the SMP races in the old version between the updates | ||
13 | * of `count' and `waking'. Now we use negative `count' values to | ||
14 | * indicate that some process(es) are waiting for the semaphore. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <asm/semaphore.h> | ||
21 | #include <asm/errno.h> | ||
22 | |||
23 | /* | ||
24 | * Atomically update sem->count. | ||
25 | * This does the equivalent of the following: | ||
26 | * | ||
27 | * old_count = sem->count; | ||
28 | * tmp = MAX(old_count, 0) + incr; | ||
29 | * sem->count = tmp; | ||
30 | * return old_count; | ||
31 | */ | ||
32 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
33 | { | ||
34 | int old_count, tmp; | ||
35 | |||
36 | __asm__ __volatile__("\n" | ||
37 | "1: lwarx %0,0,%3\n" | ||
38 | " srawi %1,%0,31\n" | ||
39 | " andc %1,%0,%1\n" | ||
40 | " add %1,%1,%4\n" | ||
41 | PPC405_ERR77(0,%3) | ||
42 | " stwcx. %1,0,%3\n" | ||
43 | " bne 1b" | ||
44 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
45 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
46 | : "cc"); | ||
47 | |||
48 | return old_count; | ||
49 | } | ||
50 | |||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | /* | ||
54 | * Note that we incremented count in up() before we came here, | ||
55 | * but that was ineffective since the result was <= 0, and | ||
56 | * any negative value of count is equivalent to 0. | ||
57 | * This ends up setting count to 1, unless count is now > 0 | ||
58 | * (i.e. because some other cpu has called up() in the meantime), | ||
59 | * in which case we just increment count. | ||
60 | */ | ||
61 | __sem_update_count(sem, 1); | ||
62 | wake_up(&sem->wait); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Note that when we come in to __down or __down_interruptible, | ||
67 | * we have already decremented count, but that decrement was | ||
68 | * ineffective since the result was < 0, and any negative value | ||
69 | * of count is equivalent to 0. | ||
70 | * Thus it is only when we decrement count from some value > 0 | ||
71 | * that we have actually got the semaphore. | ||
72 | */ | ||
73 | void __sched __down(struct semaphore *sem) | ||
74 | { | ||
75 | struct task_struct *tsk = current; | ||
76 | DECLARE_WAITQUEUE(wait, tsk); | ||
77 | |||
78 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
79 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
80 | smp_wmb(); | ||
81 | |||
82 | /* | ||
83 | * Try to get the semaphore. If the count is > 0, then we've | ||
84 | * got the semaphore; we decrement count and exit the loop. | ||
85 | * If the count is 0 or negative, we set it to -1, indicating | ||
86 | * that we are asleep, and then sleep. | ||
87 | */ | ||
88 | while (__sem_update_count(sem, -1) <= 0) { | ||
89 | schedule(); | ||
90 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
91 | } | ||
92 | remove_wait_queue(&sem->wait, &wait); | ||
93 | tsk->state = TASK_RUNNING; | ||
94 | |||
95 | /* | ||
96 | * If there are any more sleepers, wake one of them up so | ||
97 | * that it can either get the semaphore, or set count to -1 | ||
98 | * indicating that there are still processes sleeping. | ||
99 | */ | ||
100 | wake_up(&sem->wait); | ||
101 | } | ||
102 | |||
103 | int __sched __down_interruptible(struct semaphore * sem) | ||
104 | { | ||
105 | int retval = 0; | ||
106 | struct task_struct *tsk = current; | ||
107 | DECLARE_WAITQUEUE(wait, tsk); | ||
108 | |||
109 | tsk->state = TASK_INTERRUPTIBLE; | ||
110 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
111 | smp_wmb(); | ||
112 | |||
113 | while (__sem_update_count(sem, -1) <= 0) { | ||
114 | if (signal_pending(current)) { | ||
115 | /* | ||
116 | * A signal is pending - give up trying. | ||
117 | * Set sem->count to 0 if it is negative, | ||
118 | * since we are no longer sleeping. | ||
119 | */ | ||
120 | __sem_update_count(sem, 0); | ||
121 | retval = -EINTR; | ||
122 | break; | ||
123 | } | ||
124 | schedule(); | ||
125 | tsk->state = TASK_INTERRUPTIBLE; | ||
126 | } | ||
127 | tsk->state = TASK_RUNNING; | ||
128 | remove_wait_queue(&sem->wait, &wait); | ||
129 | wake_up(&sem->wait); | ||
130 | return retval; | ||
131 | } | ||
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index 294055902f0c..bfddfdee0b65 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/reboot.h> | 10 | #include <linux/reboot.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/initrd.h> | 12 | #include <linux/initrd.h> |
13 | #include <linux/ide.h> | ||
14 | #include <linux/screen_info.h> | 13 | #include <linux/screen_info.h> |
15 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
16 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
@@ -57,7 +56,6 @@ extern void ppc6xx_idle(void); | |||
57 | extern void power4_idle(void); | 56 | extern void power4_idle(void); |
58 | 57 | ||
59 | extern boot_infos_t *boot_infos; | 58 | extern boot_infos_t *boot_infos; |
60 | struct ide_machdep_calls ppc_ide_md; | ||
61 | 59 | ||
62 | /* Used with the BI_MEMSIZE bootinfo parameter to store the memory | 60 | /* Used with the BI_MEMSIZE bootinfo parameter to store the memory |
63 | size value reported by the boot loader. */ | 61 | size value reported by the boot loader. */ |
diff --git a/arch/ppc/platforms/4xx/bamboo.c b/arch/ppc/platforms/4xx/bamboo.c index 017623c9bc4b..01f20f4c14fe 100644 --- a/arch/ppc/platforms/4xx/bamboo.c +++ b/arch/ppc/platforms/4xx/bamboo.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/blkdev.h> | 22 | #include <linux/blkdev.h> |
23 | #include <linux/console.h> | 23 | #include <linux/console.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/ide.h> | ||
26 | #include <linux/initrd.h> | 25 | #include <linux/initrd.h> |
27 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
28 | #include <linux/root_dev.h> | 27 | #include <linux/root_dev.h> |
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c index 453643a0eeea..8027a36fc5bb 100644 --- a/arch/ppc/platforms/4xx/ebony.c +++ b/arch/ppc/platforms/4xx/ebony.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/blkdev.h> | 25 | #include <linux/blkdev.h> |
26 | #include <linux/console.h> | 26 | #include <linux/console.h> |
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/ide.h> | ||
29 | #include <linux/initrd.h> | 28 | #include <linux/initrd.h> |
30 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
31 | #include <linux/root_dev.h> | 30 | #include <linux/root_dev.h> |
diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c index b79ebb8a3e6c..f6d8c2e8b6b7 100644 --- a/arch/ppc/platforms/4xx/luan.c +++ b/arch/ppc/platforms/4xx/luan.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/blkdev.h> | 23 | #include <linux/blkdev.h> |
24 | #include <linux/console.h> | 24 | #include <linux/console.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/ide.h> | ||
27 | #include <linux/initrd.h> | 26 | #include <linux/initrd.h> |
28 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
29 | #include <linux/root_dev.h> | 28 | #include <linux/root_dev.h> |
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c index 28a712cd4800..308386ef6f77 100644 --- a/arch/ppc/platforms/4xx/ocotea.c +++ b/arch/ppc/platforms/4xx/ocotea.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/blkdev.h> | 23 | #include <linux/blkdev.h> |
24 | #include <linux/console.h> | 24 | #include <linux/console.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/ide.h> | ||
27 | #include <linux/initrd.h> | 26 | #include <linux/initrd.h> |
28 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
29 | #include <linux/root_dev.h> | 28 | #include <linux/root_dev.h> |
diff --git a/arch/ppc/platforms/4xx/taishan.c b/arch/ppc/platforms/4xx/taishan.c index f6a0c6650f33..115694275083 100644 --- a/arch/ppc/platforms/4xx/taishan.c +++ b/arch/ppc/platforms/4xx/taishan.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/blkdev.h> | 23 | #include <linux/blkdev.h> |
24 | #include <linux/console.h> | 24 | #include <linux/console.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/ide.h> | ||
27 | #include <linux/initrd.h> | 26 | #include <linux/initrd.h> |
28 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
29 | #include <linux/root_dev.h> | 28 | #include <linux/root_dev.h> |
diff --git a/arch/ppc/platforms/4xx/yucca.c b/arch/ppc/platforms/4xx/yucca.c index 66a44ff0d926..f6cfd44281fc 100644 --- a/arch/ppc/platforms/4xx/yucca.c +++ b/arch/ppc/platforms/4xx/yucca.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/blkdev.h> | 24 | #include <linux/blkdev.h> |
25 | #include <linux/console.h> | 25 | #include <linux/console.h> |
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/ide.h> | ||
28 | #include <linux/initrd.h> | 27 | #include <linux/initrd.h> |
29 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
30 | #include <linux/root_dev.h> | 29 | #include <linux/root_dev.h> |
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c index dcd6070b85eb..27c140f218ed 100644 --- a/arch/ppc/platforms/chestnut.c +++ b/arch/ppc/platforms/chestnut.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/initrd.h> | 22 | #include <linux/initrd.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/ide.h> | ||
26 | #include <linux/serial.h> | 25 | #include <linux/serial.h> |
27 | #include <linux/serial_core.h> | 26 | #include <linux/serial_core.h> |
28 | #include <linux/serial_8250.h> | 27 | #include <linux/serial_8250.h> |
diff --git a/arch/ppc/platforms/cpci690.c b/arch/ppc/platforms/cpci690.c index e78bccf96c9d..07f672d58767 100644 --- a/arch/ppc/platforms/cpci690.c +++ b/arch/ppc/platforms/cpci690.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | #include <linux/ide.h> | ||
14 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
15 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
16 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
diff --git a/arch/ppc/platforms/ev64260.c b/arch/ppc/platforms/ev64260.c index c1f77e1d368e..f522b31c46d7 100644 --- a/arch/ppc/platforms/ev64260.c +++ b/arch/ppc/platforms/ev64260.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <linux/ide.h> | ||
27 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
28 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
29 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c index ca5de13712fd..904b518c152e 100644 --- a/arch/ppc/platforms/hdpu.c +++ b/arch/ppc/platforms/hdpu.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
19 | #include <linux/ide.h> | ||
20 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
21 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
22 | 21 | ||
@@ -604,41 +603,6 @@ static void parse_bootinfo(unsigned long r3, | |||
604 | } | 603 | } |
605 | } | 604 | } |
606 | 605 | ||
607 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
608 | static void | ||
609 | hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name) | ||
610 | { | ||
611 | request_region(from, extent, name); | ||
612 | return; | ||
613 | } | ||
614 | |||
615 | static void hdpu_ide_release_region(ide_ioreg_t from, unsigned int extent) | ||
616 | { | ||
617 | release_region(from, extent); | ||
618 | return; | ||
619 | } | ||
620 | |||
621 | static void __init | ||
622 | hdpu_ide_pci_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port, | ||
623 | ide_ioreg_t ctrl_port, int *irq) | ||
624 | { | ||
625 | struct pci_dev *dev; | ||
626 | |||
627 | pci_for_each_dev(dev) { | ||
628 | if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) || | ||
629 | ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) { | ||
630 | hw->irq = dev->irq; | ||
631 | |||
632 | if (irq != NULL) { | ||
633 | *irq = dev->irq; | ||
634 | } | ||
635 | } | ||
636 | } | ||
637 | |||
638 | return; | ||
639 | } | ||
640 | #endif | ||
641 | |||
642 | void hdpu_heartbeat(void) | 606 | void hdpu_heartbeat(void) |
643 | { | 607 | { |
644 | if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5)) | 608 | if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5)) |
diff --git a/arch/ppc/platforms/lopec.c b/arch/ppc/platforms/lopec.c index b947c774f524..1e3aa6e9b6c7 100644 --- a/arch/ppc/platforms/lopec.c +++ b/arch/ppc/platforms/lopec.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/pci_ids.h> | 15 | #include <linux/pci_ids.h> |
16 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/ide.h> | ||
19 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
20 | #include <linux/initrd.h> | 19 | #include <linux/initrd.h> |
21 | #include <linux/console.h> | 20 | #include <linux/console.h> |
@@ -168,85 +167,6 @@ lopec_power_off(void) | |||
168 | lopec_halt(); | 167 | lopec_halt(); |
169 | } | 168 | } |
170 | 169 | ||
171 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
172 | int lopec_ide_ports_known = 0; | ||
173 | static unsigned long lopec_ide_regbase[MAX_HWIFS]; | ||
174 | static unsigned long lopec_ide_ctl_regbase[MAX_HWIFS]; | ||
175 | static unsigned long lopec_idedma_regbase; | ||
176 | |||
177 | static void | ||
178 | lopec_ide_probe(void) | ||
179 | { | ||
180 | struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_WINBOND, | ||
181 | PCI_DEVICE_ID_WINBOND_82C105, | ||
182 | NULL); | ||
183 | lopec_ide_ports_known = 1; | ||
184 | |||
185 | if (dev) { | ||
186 | lopec_ide_regbase[0] = dev->resource[0].start; | ||
187 | lopec_ide_regbase[1] = dev->resource[2].start; | ||
188 | lopec_ide_ctl_regbase[0] = dev->resource[1].start; | ||
189 | lopec_ide_ctl_regbase[1] = dev->resource[3].start; | ||
190 | lopec_idedma_regbase = dev->resource[4].start; | ||
191 | pci_dev_put(dev); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | static int | ||
196 | lopec_ide_default_irq(unsigned long base) | ||
197 | { | ||
198 | if (lopec_ide_ports_known == 0) | ||
199 | lopec_ide_probe(); | ||
200 | |||
201 | if (base == lopec_ide_regbase[0]) | ||
202 | return 14; | ||
203 | else if (base == lopec_ide_regbase[1]) | ||
204 | return 15; | ||
205 | else | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static unsigned long | ||
210 | lopec_ide_default_io_base(int index) | ||
211 | { | ||
212 | if (lopec_ide_ports_known == 0) | ||
213 | lopec_ide_probe(); | ||
214 | return lopec_ide_regbase[index]; | ||
215 | } | ||
216 | |||
217 | static void __init | ||
218 | lopec_ide_init_hwif_ports(hw_regs_t *hw, unsigned long data, | ||
219 | unsigned long ctl, int *irq) | ||
220 | { | ||
221 | unsigned long reg = data; | ||
222 | uint alt_status_base; | ||
223 | int i; | ||
224 | |||
225 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) | ||
226 | hw->io_ports[i] = reg++; | ||
227 | |||
228 | if (data == lopec_ide_regbase[0]) { | ||
229 | alt_status_base = lopec_ide_ctl_regbase[0] + 2; | ||
230 | hw->irq = 14; | ||
231 | } else if (data == lopec_ide_regbase[1]) { | ||
232 | alt_status_base = lopec_ide_ctl_regbase[1] + 2; | ||
233 | hw->irq = 15; | ||
234 | } else { | ||
235 | alt_status_base = 0; | ||
236 | hw->irq = 0; | ||
237 | } | ||
238 | |||
239 | if (ctl) | ||
240 | hw->io_ports[IDE_CONTROL_OFFSET] = ctl; | ||
241 | else | ||
242 | hw->io_ports[IDE_CONTROL_OFFSET] = alt_status_base; | ||
243 | |||
244 | if (irq != NULL) | ||
245 | *irq = hw->irq; | ||
246 | |||
247 | } | ||
248 | #endif /* BLK_DEV_IDE */ | ||
249 | |||
250 | static void __init | 170 | static void __init |
251 | lopec_init_IRQ(void) | 171 | lopec_init_IRQ(void) |
252 | { | 172 | { |
@@ -384,11 +304,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
384 | ppc_md.nvram_read_val = todc_direct_read_val; | 304 | ppc_md.nvram_read_val = todc_direct_read_val; |
385 | ppc_md.nvram_write_val = todc_direct_write_val; | 305 | ppc_md.nvram_write_val = todc_direct_write_val; |
386 | 306 | ||
387 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
388 | ppc_ide_md.default_irq = lopec_ide_default_irq; | ||
389 | ppc_ide_md.default_io_base = lopec_ide_default_io_base; | ||
390 | ppc_ide_md.ide_init_hwif = lopec_ide_init_hwif_ports; | ||
391 | #endif | ||
392 | #ifdef CONFIG_SERIAL_TEXT_DEBUG | 307 | #ifdef CONFIG_SERIAL_TEXT_DEBUG |
393 | ppc_md.progress = gen550_progress; | 308 | ppc_md.progress = gen550_progress; |
394 | #endif | 309 | #endif |
diff --git a/arch/ppc/platforms/mvme5100.c b/arch/ppc/platforms/mvme5100.c index bb8d4a45437a..053b54ac88f2 100644 --- a/arch/ppc/platforms/mvme5100.c +++ b/arch/ppc/platforms/mvme5100.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/initrd.h> | 17 | #include <linux/initrd.h> |
18 | #include <linux/console.h> | 18 | #include <linux/console.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/ide.h> | ||
21 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
22 | #include <linux/kdev_t.h> | 21 | #include <linux/kdev_t.h> |
23 | #include <linux/root_dev.h> | 22 | #include <linux/root_dev.h> |
diff --git a/arch/ppc/platforms/powerpmc250.c b/arch/ppc/platforms/powerpmc250.c index 4d46650e07fd..162dc85ff7be 100644 --- a/arch/ppc/platforms/powerpmc250.c +++ b/arch/ppc/platforms/powerpmc250.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
28 | #include <linux/ide.h> | ||
29 | #include <linux/root_dev.h> | 28 | #include <linux/root_dev.h> |
30 | 29 | ||
31 | #include <asm/byteorder.h> | 30 | #include <asm/byteorder.h> |
diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c index 8a1788c48155..cbcac85c7a78 100644 --- a/arch/ppc/platforms/pplus.c +++ b/arch/ppc/platforms/pplus.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/ioport.h> | 19 | #include <linux/ioport.h> |
20 | #include <linux/console.h> | 20 | #include <linux/console.h> |
21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
22 | #include <linux/ide.h> | ||
23 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
24 | #include <linux/root_dev.h> | 23 | #include <linux/root_dev.h> |
25 | 24 | ||
@@ -668,57 +667,6 @@ static void __init pplus_init_IRQ(void) | |||
668 | ppc_md.progress("init_irq: exit", 0); | 667 | ppc_md.progress("init_irq: exit", 0); |
669 | } | 668 | } |
670 | 669 | ||
671 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
672 | /* | ||
673 | * IDE stuff. | ||
674 | */ | ||
675 | static int pplus_ide_default_irq(unsigned long base) | ||
676 | { | ||
677 | switch (base) { | ||
678 | case 0x1f0: | ||
679 | return 14; | ||
680 | case 0x170: | ||
681 | return 15; | ||
682 | default: | ||
683 | return 0; | ||
684 | } | ||
685 | } | ||
686 | |||
687 | static unsigned long pplus_ide_default_io_base(int index) | ||
688 | { | ||
689 | switch (index) { | ||
690 | case 0: | ||
691 | return 0x1f0; | ||
692 | case 1: | ||
693 | return 0x170; | ||
694 | default: | ||
695 | return 0; | ||
696 | } | ||
697 | } | ||
698 | |||
699 | static void __init | ||
700 | pplus_ide_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, | ||
701 | unsigned long ctrl_port, int *irq) | ||
702 | { | ||
703 | unsigned long reg = data_port; | ||
704 | int i; | ||
705 | |||
706 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | ||
707 | hw->io_ports[i] = reg; | ||
708 | reg += 1; | ||
709 | } | ||
710 | |||
711 | if (ctrl_port) | ||
712 | hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; | ||
713 | else | ||
714 | hw->io_ports[IDE_CONTROL_OFFSET] = | ||
715 | hw->io_ports[IDE_DATA_OFFSET] + 0x206; | ||
716 | |||
717 | if (irq != NULL) | ||
718 | *irq = pplus_ide_default_irq(data_port); | ||
719 | } | ||
720 | #endif | ||
721 | |||
722 | #ifdef CONFIG_SMP | 670 | #ifdef CONFIG_SMP |
723 | /* PowerPlus (MTX) support */ | 671 | /* PowerPlus (MTX) support */ |
724 | static int __init smp_pplus_probe(void) | 672 | static int __init smp_pplus_probe(void) |
@@ -884,12 +832,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
884 | ppc_md.find_end_of_memory = pplus_find_end_of_memory; | 832 | ppc_md.find_end_of_memory = pplus_find_end_of_memory; |
885 | ppc_md.setup_io_mappings = pplus_map_io; | 833 | ppc_md.setup_io_mappings = pplus_map_io; |
886 | 834 | ||
887 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
888 | ppc_ide_md.default_irq = pplus_ide_default_irq; | ||
889 | ppc_ide_md.default_io_base = pplus_ide_default_io_base; | ||
890 | ppc_ide_md.ide_init_hwif = pplus_ide_init_hwif_ports; | ||
891 | #endif | ||
892 | |||
893 | #ifdef CONFIG_SERIAL_TEXT_DEBUG | 835 | #ifdef CONFIG_SERIAL_TEXT_DEBUG |
894 | ppc_md.progress = gen550_progress; | 836 | ppc_md.progress = gen550_progress; |
895 | #endif /* CONFIG_SERIAL_TEXT_DEBUG */ | 837 | #endif /* CONFIG_SERIAL_TEXT_DEBUG */ |
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c index 38449855d5ff..465b658c927d 100644 --- a/arch/ppc/platforms/prep_setup.c +++ b/arch/ppc/platforms/prep_setup.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/console.h> | 33 | #include <linux/console.h> |
34 | #include <linux/timex.h> | 34 | #include <linux/timex.h> |
35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
36 | #include <linux/ide.h> | ||
37 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
38 | #include <linux/root_dev.h> | 37 | #include <linux/root_dev.h> |
39 | 38 | ||
@@ -894,38 +893,6 @@ prep_init_IRQ(void) | |||
894 | i8259_init(MPC10X_MAPA_PCI_INTACK_ADDR, 0); | 893 | i8259_init(MPC10X_MAPA_PCI_INTACK_ADDR, 0); |
895 | } | 894 | } |
896 | 895 | ||
897 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
898 | /* | ||
899 | * IDE stuff. | ||
900 | */ | ||
901 | static int | ||
902 | prep_ide_default_irq(unsigned long base) | ||
903 | { | ||
904 | switch (base) { | ||
905 | case 0x1f0: return 13; | ||
906 | case 0x170: return 13; | ||
907 | case 0x1e8: return 11; | ||
908 | case 0x168: return 10; | ||
909 | case 0xfff0: return 14; /* MCP(N)750 ide0 */ | ||
910 | case 0xffe0: return 15; /* MCP(N)750 ide1 */ | ||
911 | default: return 0; | ||
912 | } | ||
913 | } | ||
914 | |||
915 | static unsigned long | ||
916 | prep_ide_default_io_base(int index) | ||
917 | { | ||
918 | switch (index) { | ||
919 | case 0: return 0x1f0; | ||
920 | case 1: return 0x170; | ||
921 | case 2: return 0x1e8; | ||
922 | case 3: return 0x168; | ||
923 | default: | ||
924 | return 0; | ||
925 | } | ||
926 | } | ||
927 | #endif | ||
928 | |||
929 | #ifdef CONFIG_SMP | 896 | #ifdef CONFIG_SMP |
930 | /* PReP (MTX) support */ | 897 | /* PReP (MTX) support */ |
931 | static int __init | 898 | static int __init |
@@ -1070,11 +1037,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
1070 | 1037 | ||
1071 | ppc_md.setup_io_mappings = prep_map_io; | 1038 | ppc_md.setup_io_mappings = prep_map_io; |
1072 | 1039 | ||
1073 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
1074 | ppc_ide_md.default_irq = prep_ide_default_irq; | ||
1075 | ppc_ide_md.default_io_base = prep_ide_default_io_base; | ||
1076 | #endif | ||
1077 | |||
1078 | #ifdef CONFIG_SMP | 1040 | #ifdef CONFIG_SMP |
1079 | smp_ops = &prep_smp_ops; | 1041 | smp_ops = &prep_smp_ops; |
1080 | #endif /* CONFIG_SMP */ | 1042 | #endif /* CONFIG_SMP */ |
diff --git a/arch/ppc/platforms/prpmc750.c b/arch/ppc/platforms/prpmc750.c index fcab513e206d..93bd593cf957 100644 --- a/arch/ppc/platforms/prpmc750.c +++ b/arch/ppc/platforms/prpmc750.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/ide.h> | ||
26 | #include <linux/root_dev.h> | 25 | #include <linux/root_dev.h> |
27 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
28 | #include <linux/serial_reg.h> | 27 | #include <linux/serial_reg.h> |
diff --git a/arch/ppc/platforms/prpmc800.c b/arch/ppc/platforms/prpmc800.c index f4ade5cd7a88..5bcda7f92cd0 100644 --- a/arch/ppc/platforms/prpmc800.c +++ b/arch/ppc/platforms/prpmc800.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/console.h> | 20 | #include <linux/console.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/ide.h> | ||
24 | #include <linux/root_dev.h> | 23 | #include <linux/root_dev.h> |
25 | #include <linux/harrier_defs.h> | 24 | #include <linux/harrier_defs.h> |
26 | 25 | ||
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c index 44d4398a36ff..179b4a99b5b5 100644 --- a/arch/ppc/platforms/radstone_ppc7d.c +++ b/arch/ppc/platforms/radstone_ppc7d.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/initrd.h> | 29 | #include <linux/initrd.h> |
30 | #include <linux/console.h> | 30 | #include <linux/console.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/ide.h> | ||
33 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
34 | #include <linux/root_dev.h> | 33 | #include <linux/root_dev.h> |
35 | #include <linux/serial.h> | 34 | #include <linux/serial.h> |
diff --git a/arch/ppc/platforms/residual.c b/arch/ppc/platforms/residual.c index c9911601cfdf..18495e754e30 100644 --- a/arch/ppc/platforms/residual.c +++ b/arch/ppc/platforms/residual.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/init.h> | 38 | #include <linux/init.h> |
39 | #include <linux/ioport.h> | 39 | #include <linux/ioport.h> |
40 | #include <linux/pci.h> | 40 | #include <linux/pci.h> |
41 | #include <linux/ide.h> | ||
42 | 41 | ||
43 | #include <asm/sections.h> | 42 | #include <asm/sections.h> |
44 | #include <asm/mmu.h> | 43 | #include <asm/mmu.h> |
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c index 3352fae1c722..b4897bdb742a 100644 --- a/arch/ppc/platforms/sandpoint.c +++ b/arch/ppc/platforms/sandpoint.c | |||
@@ -71,7 +71,6 @@ | |||
71 | #include <linux/initrd.h> | 71 | #include <linux/initrd.h> |
72 | #include <linux/console.h> | 72 | #include <linux/console.h> |
73 | #include <linux/delay.h> | 73 | #include <linux/delay.h> |
74 | #include <linux/ide.h> | ||
75 | #include <linux/seq_file.h> | 74 | #include <linux/seq_file.h> |
76 | #include <linux/root_dev.h> | 75 | #include <linux/root_dev.h> |
77 | #include <linux/serial.h> | 76 | #include <linux/serial.h> |
@@ -559,93 +558,6 @@ sandpoint_show_cpuinfo(struct seq_file *m) | |||
559 | return 0; | 558 | return 0; |
560 | } | 559 | } |
561 | 560 | ||
562 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
563 | /* | ||
564 | * IDE support. | ||
565 | */ | ||
566 | static int sandpoint_ide_ports_known = 0; | ||
567 | static unsigned long sandpoint_ide_regbase[MAX_HWIFS]; | ||
568 | static unsigned long sandpoint_ide_ctl_regbase[MAX_HWIFS]; | ||
569 | static unsigned long sandpoint_idedma_regbase; | ||
570 | |||
571 | static void | ||
572 | sandpoint_ide_probe(void) | ||
573 | { | ||
574 | struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_WINBOND, | ||
575 | PCI_DEVICE_ID_WINBOND_82C105, NULL); | ||
576 | |||
577 | if (pdev) { | ||
578 | sandpoint_ide_regbase[0]=pdev->resource[0].start; | ||
579 | sandpoint_ide_regbase[1]=pdev->resource[2].start; | ||
580 | sandpoint_ide_ctl_regbase[0]=pdev->resource[1].start; | ||
581 | sandpoint_ide_ctl_regbase[1]=pdev->resource[3].start; | ||
582 | sandpoint_idedma_regbase=pdev->resource[4].start; | ||
583 | pci_dev_put(pdev); | ||
584 | } | ||
585 | |||
586 | sandpoint_ide_ports_known = 1; | ||
587 | } | ||
588 | |||
589 | static int | ||
590 | sandpoint_ide_default_irq(unsigned long base) | ||
591 | { | ||
592 | if (sandpoint_ide_ports_known == 0) | ||
593 | sandpoint_ide_probe(); | ||
594 | |||
595 | if (base == sandpoint_ide_regbase[0]) | ||
596 | return SANDPOINT_IDE_INT0; | ||
597 | else if (base == sandpoint_ide_regbase[1]) | ||
598 | return SANDPOINT_IDE_INT1; | ||
599 | else | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static unsigned long | ||
604 | sandpoint_ide_default_io_base(int index) | ||
605 | { | ||
606 | if (sandpoint_ide_ports_known == 0) | ||
607 | sandpoint_ide_probe(); | ||
608 | |||
609 | return sandpoint_ide_regbase[index]; | ||
610 | } | ||
611 | |||
612 | static void __init | ||
613 | sandpoint_ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, | ||
614 | unsigned long ctrl_port, int *irq) | ||
615 | { | ||
616 | unsigned long reg = data_port; | ||
617 | uint alt_status_base; | ||
618 | int i; | ||
619 | |||
620 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | ||
621 | hw->io_ports[i] = reg++; | ||
622 | } | ||
623 | |||
624 | if (data_port == sandpoint_ide_regbase[0]) { | ||
625 | alt_status_base = sandpoint_ide_ctl_regbase[0] + 2; | ||
626 | hw->irq = 14; | ||
627 | } | ||
628 | else if (data_port == sandpoint_ide_regbase[1]) { | ||
629 | alt_status_base = sandpoint_ide_ctl_regbase[1] + 2; | ||
630 | hw->irq = 15; | ||
631 | } | ||
632 | else { | ||
633 | alt_status_base = 0; | ||
634 | hw->irq = 0; | ||
635 | } | ||
636 | |||
637 | if (ctrl_port) { | ||
638 | hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; | ||
639 | } else { | ||
640 | hw->io_ports[IDE_CONTROL_OFFSET] = alt_status_base; | ||
641 | } | ||
642 | |||
643 | if (irq != NULL) { | ||
644 | *irq = hw->irq; | ||
645 | } | ||
646 | } | ||
647 | #endif | ||
648 | |||
649 | /* | 561 | /* |
650 | * Set BAT 3 to map 0xf8000000 to end of physical memory space 1-to-1. | 562 | * Set BAT 3 to map 0xf8000000 to end of physical memory space 1-to-1. |
651 | */ | 563 | */ |
@@ -736,10 +648,4 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
736 | #ifdef CONFIG_SERIAL_TEXT_DEBUG | 648 | #ifdef CONFIG_SERIAL_TEXT_DEBUG |
737 | ppc_md.progress = gen550_progress; | 649 | ppc_md.progress = gen550_progress; |
738 | #endif | 650 | #endif |
739 | |||
740 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
741 | ppc_ide_md.default_irq = sandpoint_ide_default_irq; | ||
742 | ppc_ide_md.default_io_base = sandpoint_ide_default_io_base; | ||
743 | ppc_ide_md.ide_init_hwif = sandpoint_ide_init_hwif_ports; | ||
744 | #endif | ||
745 | } | 651 | } |
diff --git a/arch/ppc/platforms/sandpoint.h b/arch/ppc/platforms/sandpoint.h index 3b64e6418489..ed83759e4044 100644 --- a/arch/ppc/platforms/sandpoint.h +++ b/arch/ppc/platforms/sandpoint.h | |||
@@ -28,9 +28,6 @@ | |||
28 | */ | 28 | */ |
29 | #define SANDPOINT_IDE_INT0 23 /* EPIC 7 */ | 29 | #define SANDPOINT_IDE_INT0 23 /* EPIC 7 */ |
30 | #define SANDPOINT_IDE_INT1 24 /* EPIC 8 */ | 30 | #define SANDPOINT_IDE_INT1 24 /* EPIC 8 */ |
31 | #else | ||
32 | #define SANDPOINT_IDE_INT0 14 /* 8259 Test */ | ||
33 | #define SANDPOINT_IDE_INT1 15 /* 8259 Test */ | ||
34 | #endif | 31 | #endif |
35 | 32 | ||
36 | /* | 33 | /* |
diff --git a/arch/ppc/platforms/spruce.c b/arch/ppc/platforms/spruce.c index f4de50ba292e..a344134f14b8 100644 --- a/arch/ppc/platforms/spruce.c +++ b/arch/ppc/platforms/spruce.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/ide.h> | ||
26 | #include <linux/root_dev.h> | 25 | #include <linux/root_dev.h> |
27 | #include <linux/serial.h> | 26 | #include <linux/serial.h> |
28 | #include <linux/tty.h> | 27 | #include <linux/tty.h> |
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c index 9caf850c9b38..19749e9bcf91 100644 --- a/arch/ppc/syslib/m8xx_setup.c +++ b/arch/ppc/syslib/m8xx_setup.c | |||
@@ -87,8 +87,6 @@ void m8xx_calibrate_decr(void); | |||
87 | 87 | ||
88 | unsigned char __res[sizeof(bd_t)]; | 88 | unsigned char __res[sizeof(bd_t)]; |
89 | 89 | ||
90 | extern void m8xx_ide_init(void); | ||
91 | |||
92 | extern unsigned long find_available_memory(void); | 90 | extern unsigned long find_available_memory(void); |
93 | extern void m8xx_cpm_reset(void); | 91 | extern void m8xx_cpm_reset(void); |
94 | extern void m8xx_wdt_handler_install(bd_t *bp); | 92 | extern void m8xx_wdt_handler_install(bd_t *bp); |
@@ -474,8 +472,4 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
474 | 472 | ||
475 | ppc_md.find_end_of_memory = m8xx_find_end_of_memory; | 473 | ppc_md.find_end_of_memory = m8xx_find_end_of_memory; |
476 | ppc_md.setup_io_mappings = m8xx_map_io; | 474 | ppc_md.setup_io_mappings = m8xx_map_io; |
477 | |||
478 | #if defined(CONFIG_BLK_DEV_MPC8xx_IDE) | ||
479 | m8xx_ide_init(); | ||
480 | #endif | ||
481 | } | 475 | } |
diff --git a/arch/ppc/syslib/ppc4xx_setup.c b/arch/ppc/syslib/ppc4xx_setup.c index debe14c083a1..353d746b47e1 100644 --- a/arch/ppc/syslib/ppc4xx_setup.c +++ b/arch/ppc/syslib/ppc4xx_setup.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
25 | #include <linux/rtc.h> | 25 | #include <linux/rtc.h> |
26 | #include <linux/console.h> | 26 | #include <linux/console.h> |
27 | #include <linux/ide.h> | ||
28 | #include <linux/serial_reg.h> | 27 | #include <linux/serial_reg.h> |
29 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
30 | 29 | ||
@@ -189,24 +188,6 @@ ppc4xx_calibrate_decr(void) | |||
189 | mtspr(SPRN_PIT, tb_ticks_per_jiffy); | 188 | mtspr(SPRN_PIT, tb_ticks_per_jiffy); |
190 | } | 189 | } |
191 | 190 | ||
192 | /* | ||
193 | * IDE stuff. | ||
194 | * should be generic for every IDE PCI chipset | ||
195 | */ | ||
196 | #if defined(CONFIG_PCI) && defined(CONFIG_IDE) | ||
197 | static void | ||
198 | ppc4xx_ide_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, | ||
199 | unsigned long ctrl_port, int *irq) | ||
200 | { | ||
201 | int i; | ||
202 | |||
203 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i) | ||
204 | hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET; | ||
205 | |||
206 | hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; | ||
207 | } | ||
208 | #endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */ | ||
209 | |||
210 | TODC_ALLOC(); | 191 | TODC_ALLOC(); |
211 | 192 | ||
212 | /* | 193 | /* |
@@ -271,10 +252,6 @@ ppc4xx_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
271 | #ifdef CONFIG_SERIAL_TEXT_DEBUG | 252 | #ifdef CONFIG_SERIAL_TEXT_DEBUG |
272 | ppc_md.progress = gen550_progress; | 253 | ppc_md.progress = gen550_progress; |
273 | #endif | 254 | #endif |
274 | |||
275 | #if defined(CONFIG_PCI) && defined(CONFIG_IDE) | ||
276 | ppc_ide_md.ide_init_hwif = ppc4xx_ide_init_hwif_ports; | ||
277 | #endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */ | ||
278 | } | 255 | } |
279 | 256 | ||
280 | /* Called from machine_check_exception */ | 257 | /* Called from machine_check_exception */ |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1831833c430e..f6a68e178fc5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -3,6 +3,10 @@ | |||
3 | # see Documentation/kbuild/kconfig-language.txt. | 3 | # see Documentation/kbuild/kconfig-language.txt. |
4 | # | 4 | # |
5 | 5 | ||
6 | config SCHED_MC | ||
7 | def_bool y | ||
8 | depends on SMP | ||
9 | |||
6 | config MMU | 10 | config MMU |
7 | def_bool y | 11 | def_bool y |
8 | 12 | ||
@@ -39,6 +43,9 @@ config GENERIC_HWEIGHT | |||
39 | config GENERIC_TIME | 43 | config GENERIC_TIME |
40 | def_bool y | 44 | def_bool y |
41 | 45 | ||
46 | config GENERIC_CLOCKEVENTS | ||
47 | def_bool y | ||
48 | |||
42 | config GENERIC_BUG | 49 | config GENERIC_BUG |
43 | bool | 50 | bool |
44 | depends on BUG | 51 | depends on BUG |
@@ -69,6 +76,8 @@ menu "Base setup" | |||
69 | 76 | ||
70 | comment "Processor type and features" | 77 | comment "Processor type and features" |
71 | 78 | ||
79 | source "kernel/time/Kconfig" | ||
80 | |||
72 | config 64BIT | 81 | config 64BIT |
73 | bool "64 bit kernel" | 82 | bool "64 bit kernel" |
74 | help | 83 | help |
@@ -301,10 +310,7 @@ config QDIO | |||
301 | tristate "QDIO support" | 310 | tristate "QDIO support" |
302 | ---help--- | 311 | ---help--- |
303 | This driver provides the Queued Direct I/O base support for | 312 | This driver provides the Queued Direct I/O base support for |
304 | IBM mainframes. | 313 | IBM System z. |
305 | |||
306 | For details please refer to the documentation provided by IBM at | ||
307 | <http://www10.software.ibm.com/developerworks/opensource/linux390> | ||
308 | 314 | ||
309 | To compile this driver as a module, choose M here: the | 315 | To compile this driver as a module, choose M here: the |
310 | module will be called qdio. | 316 | module will be called qdio. |
@@ -486,25 +492,6 @@ config APPLDATA_NET_SUM | |||
486 | 492 | ||
487 | source kernel/Kconfig.hz | 493 | source kernel/Kconfig.hz |
488 | 494 | ||
489 | config NO_IDLE_HZ | ||
490 | bool "No HZ timer ticks in idle" | ||
491 | help | ||
492 | Switches the regular HZ timer off when the system is going idle. | ||
493 | This helps z/VM to detect that the Linux system is idle. VM can | ||
494 | then "swap-out" this guest which reduces memory usage. It also | ||
495 | reduces the overhead of idle systems. | ||
496 | |||
497 | The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer. | ||
498 | hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ | ||
499 | timer is active. | ||
500 | |||
501 | config NO_IDLE_HZ_INIT | ||
502 | bool "HZ timer in idle off by default" | ||
503 | depends on NO_IDLE_HZ | ||
504 | help | ||
505 | The HZ timer is switched off in idle by default. That means the | ||
506 | HZ timer is already disabled at boot time. | ||
507 | |||
508 | config S390_HYPFS_FS | 495 | config S390_HYPFS_FS |
509 | bool "s390 hypervisor file system support" | 496 | bool "s390 hypervisor file system support" |
510 | select SYS_HYPERVISOR | 497 | select SYS_HYPERVISOR |
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index a3f67f8b5427..e33f32b54c08 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -499,7 +499,7 @@ static struct crypto_alg cbc_aes_alg = { | |||
499 | } | 499 | } |
500 | }; | 500 | }; |
501 | 501 | ||
502 | static int __init aes_init(void) | 502 | static int __init aes_s390_init(void) |
503 | { | 503 | { |
504 | int ret; | 504 | int ret; |
505 | 505 | ||
@@ -542,15 +542,15 @@ aes_err: | |||
542 | goto out; | 542 | goto out; |
543 | } | 543 | } |
544 | 544 | ||
545 | static void __exit aes_fini(void) | 545 | static void __exit aes_s390_fini(void) |
546 | { | 546 | { |
547 | crypto_unregister_alg(&cbc_aes_alg); | 547 | crypto_unregister_alg(&cbc_aes_alg); |
548 | crypto_unregister_alg(&ecb_aes_alg); | 548 | crypto_unregister_alg(&ecb_aes_alg); |
549 | crypto_unregister_alg(&aes_alg); | 549 | crypto_unregister_alg(&aes_alg); |
550 | } | 550 | } |
551 | 551 | ||
552 | module_init(aes_init); | 552 | module_init(aes_s390_init); |
553 | module_exit(aes_fini); | 553 | module_exit(aes_s390_fini); |
554 | 554 | ||
555 | MODULE_ALIAS("aes"); | 555 | MODULE_ALIAS("aes"); |
556 | 556 | ||
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index ea22707f435f..4aba83b31596 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -550,7 +550,7 @@ static struct crypto_alg cbc_des3_192_alg = { | |||
550 | } | 550 | } |
551 | }; | 551 | }; |
552 | 552 | ||
553 | static int init(void) | 553 | static int des_s390_init(void) |
554 | { | 554 | { |
555 | int ret = 0; | 555 | int ret = 0; |
556 | 556 | ||
@@ -612,7 +612,7 @@ des_err: | |||
612 | goto out; | 612 | goto out; |
613 | } | 613 | } |
614 | 614 | ||
615 | static void __exit fini(void) | 615 | static void __exit des_s390_fini(void) |
616 | { | 616 | { |
617 | crypto_unregister_alg(&cbc_des3_192_alg); | 617 | crypto_unregister_alg(&cbc_des3_192_alg); |
618 | crypto_unregister_alg(&ecb_des3_192_alg); | 618 | crypto_unregister_alg(&ecb_des3_192_alg); |
@@ -625,8 +625,8 @@ static void __exit fini(void) | |||
625 | crypto_unregister_alg(&des_alg); | 625 | crypto_unregister_alg(&des_alg); |
626 | } | 626 | } |
627 | 627 | ||
628 | module_init(init); | 628 | module_init(des_s390_init); |
629 | module_exit(fini); | 629 | module_exit(des_s390_fini); |
630 | 630 | ||
631 | MODULE_ALIAS("des"); | 631 | MODULE_ALIAS("des"); |
632 | MODULE_ALIAS("des3_ede"); | 632 | MODULE_ALIAS("des3_ede"); |
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 5a834f6578ab..9cf9eca22747 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -137,7 +137,7 @@ static struct crypto_alg alg = { | |||
137 | .dia_final = sha1_final } } | 137 | .dia_final = sha1_final } } |
138 | }; | 138 | }; |
139 | 139 | ||
140 | static int __init init(void) | 140 | static int __init sha1_s390_init(void) |
141 | { | 141 | { |
142 | if (!crypt_s390_func_available(KIMD_SHA_1)) | 142 | if (!crypt_s390_func_available(KIMD_SHA_1)) |
143 | return -EOPNOTSUPP; | 143 | return -EOPNOTSUPP; |
@@ -145,13 +145,13 @@ static int __init init(void) | |||
145 | return crypto_register_alg(&alg); | 145 | return crypto_register_alg(&alg); |
146 | } | 146 | } |
147 | 147 | ||
148 | static void __exit fini(void) | 148 | static void __exit sha1_s390_fini(void) |
149 | { | 149 | { |
150 | crypto_unregister_alg(&alg); | 150 | crypto_unregister_alg(&alg); |
151 | } | 151 | } |
152 | 152 | ||
153 | module_init(init); | 153 | module_init(sha1_s390_init); |
154 | module_exit(fini); | 154 | module_exit(sha1_s390_fini); |
155 | 155 | ||
156 | MODULE_ALIAS("sha1"); | 156 | MODULE_ALIAS("sha1"); |
157 | 157 | ||
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index ccf8633c4f65..2a3d756b35d4 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -133,7 +133,7 @@ static struct crypto_alg alg = { | |||
133 | .dia_final = sha256_final } } | 133 | .dia_final = sha256_final } } |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static int init(void) | 136 | static int sha256_s390_init(void) |
137 | { | 137 | { |
138 | if (!crypt_s390_func_available(KIMD_SHA_256)) | 138 | if (!crypt_s390_func_available(KIMD_SHA_256)) |
139 | return -EOPNOTSUPP; | 139 | return -EOPNOTSUPP; |
@@ -141,13 +141,13 @@ static int init(void) | |||
141 | return crypto_register_alg(&alg); | 141 | return crypto_register_alg(&alg); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void __exit fini(void) | 144 | static void __exit sha256_s390_fini(void) |
145 | { | 145 | { |
146 | crypto_unregister_alg(&alg); | 146 | crypto_unregister_alg(&alg); |
147 | } | 147 | } |
148 | 148 | ||
149 | module_init(init); | 149 | module_init(sha256_s390_init); |
150 | module_exit(fini); | 150 | module_exit(sha256_s390_fini); |
151 | 151 | ||
152 | MODULE_ALIAS("sha256"); | 152 | MODULE_ALIAS("sha256"); |
153 | 153 | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 62f6b5a606dd..dcc3ec2ef643 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -3,6 +3,7 @@ | |||
3 | # Linux kernel version: 2.6.25-rc4 | 3 | # Linux kernel version: 2.6.25-rc4 |
4 | # Wed Mar 5 11:22:59 2008 | 4 | # Wed Mar 5 11:22:59 2008 |
5 | # | 5 | # |
6 | CONFIG_SCHED_MC=y | ||
6 | CONFIG_MMU=y | 7 | CONFIG_MMU=y |
7 | CONFIG_ZONE_DMA=y | 8 | CONFIG_ZONE_DMA=y |
8 | CONFIG_LOCKDEP_SUPPORT=y | 9 | CONFIG_LOCKDEP_SUPPORT=y |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 4d3e38392cb1..77051cd27925 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -11,7 +11,7 @@ CFLAGS_smp.o := -Wno-nonnull | |||
11 | 11 | ||
12 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ | 12 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ |
13 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 13 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
14 | semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o | 14 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o |
15 | 15 | ||
16 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 16 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
17 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 17 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
@@ -19,7 +19,7 @@ obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | |||
19 | extra-y += head.o init_task.o vmlinux.lds | 19 | extra-y += head.o init_task.o vmlinux.lds |
20 | 20 | ||
21 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 21 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
22 | obj-$(CONFIG_SMP) += smp.o | 22 | obj-$(CONFIG_SMP) += smp.o topology.o |
23 | 23 | ||
24 | obj-$(CONFIG_AUDIT) += audit.o | 24 | obj-$(CONFIG_AUDIT) += audit.o |
25 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 25 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index e89f8c0c42a0..20723a062017 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h | |||
@@ -162,4 +162,77 @@ struct ucontext32 { | |||
162 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 162 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
163 | }; | 163 | }; |
164 | 164 | ||
165 | struct __sysctl_args32; | ||
166 | struct stat64_emu31; | ||
167 | struct mmap_arg_struct_emu31; | ||
168 | struct fadvise64_64_args; | ||
169 | struct old_sigaction32; | ||
170 | struct old_sigaction32; | ||
171 | |||
172 | long sys32_chown16(const char __user * filename, u16 user, u16 group); | ||
173 | long sys32_lchown16(const char __user * filename, u16 user, u16 group); | ||
174 | long sys32_fchown16(unsigned int fd, u16 user, u16 group); | ||
175 | long sys32_setregid16(u16 rgid, u16 egid); | ||
176 | long sys32_setgid16(u16 gid); | ||
177 | long sys32_setreuid16(u16 ruid, u16 euid); | ||
178 | long sys32_setuid16(u16 uid); | ||
179 | long sys32_setresuid16(u16 ruid, u16 euid, u16 suid); | ||
180 | long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid); | ||
181 | long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid); | ||
182 | long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid); | ||
183 | long sys32_setfsuid16(u16 uid); | ||
184 | long sys32_setfsgid16(u16 gid); | ||
185 | long sys32_getgroups16(int gidsetsize, u16 __user *grouplist); | ||
186 | long sys32_setgroups16(int gidsetsize, u16 __user *grouplist); | ||
187 | long sys32_getuid16(void); | ||
188 | long sys32_geteuid16(void); | ||
189 | long sys32_getgid16(void); | ||
190 | long sys32_getegid16(void); | ||
191 | long sys32_ipc(u32 call, int first, int second, int third, u32 ptr); | ||
192 | long sys32_truncate64(const char __user * path, unsigned long high, | ||
193 | unsigned long low); | ||
194 | long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low); | ||
195 | long sys32_sched_rr_get_interval(compat_pid_t pid, | ||
196 | struct compat_timespec __user *interval); | ||
197 | long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | ||
198 | compat_sigset_t __user *oset, size_t sigsetsize); | ||
199 | long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize); | ||
200 | long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo); | ||
201 | long sys32_execve(void); | ||
202 | long sys32_init_module(void __user *umod, unsigned long len, | ||
203 | const char __user *uargs); | ||
204 | long sys32_delete_module(const char __user *name_user, unsigned int flags); | ||
205 | long sys32_gettimeofday(struct compat_timeval __user *tv, | ||
206 | struct timezone __user *tz); | ||
207 | long sys32_settimeofday(struct compat_timeval __user *tv, | ||
208 | struct timezone __user *tz); | ||
209 | long sys32_pause(void); | ||
210 | long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
211 | u32 poshi, u32 poslo); | ||
212 | long sys32_pwrite64(unsigned int fd, const char __user *ubuf, | ||
213 | size_t count, u32 poshi, u32 poslo); | ||
214 | compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count); | ||
215 | long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, | ||
216 | size_t count); | ||
217 | long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, | ||
218 | s32 count); | ||
219 | long sys32_sysctl(struct __sysctl_args32 __user *args); | ||
220 | long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf); | ||
221 | long sys32_lstat64(char __user * filename, | ||
222 | struct stat64_emu31 __user * statbuf); | ||
223 | long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf); | ||
224 | long sys32_fstatat64(unsigned int dfd, char __user *filename, | ||
225 | struct stat64_emu31 __user* statbuf, int flag); | ||
226 | unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg); | ||
227 | long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg); | ||
228 | long sys32_read(unsigned int fd, char __user * buf, size_t count); | ||
229 | long sys32_write(unsigned int fd, char __user * buf, size_t count); | ||
230 | long sys32_clone(void); | ||
231 | long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise); | ||
232 | long sys32_fadvise64_64(struct fadvise64_64_args __user *args); | ||
233 | long sys32_sigaction(int sig, const struct old_sigaction32 __user *act, | ||
234 | struct old_sigaction32 __user *oact); | ||
235 | long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | ||
236 | struct sigaction32 __user *oact, size_t sigsetsize); | ||
237 | long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss); | ||
165 | #endif /* _ASM_S390X_S390_H */ | 238 | #endif /* _ASM_S390X_S390_H */ |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a5692c460bad..c7f02e777af2 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/lowcore.h> | 29 | #include <asm/lowcore.h> |
30 | #include "compat_linux.h" | 30 | #include "compat_linux.h" |
31 | #include "compat_ptrace.h" | 31 | #include "compat_ptrace.h" |
32 | #include "entry.h" | ||
32 | 33 | ||
33 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 34 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
34 | 35 | ||
@@ -428,6 +429,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
428 | /* Default to using normal stack */ | 429 | /* Default to using normal stack */ |
429 | sp = (unsigned long) A(regs->gprs[15]); | 430 | sp = (unsigned long) A(regs->gprs[15]); |
430 | 431 | ||
432 | /* Overflow on alternate signal stack gives SIGSEGV. */ | ||
433 | if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL)) | ||
434 | return (void __user *) -1UL; | ||
435 | |||
431 | /* This is the X/Open sanctioned signal stack switching. */ | 436 | /* This is the X/Open sanctioned signal stack switching. */ |
432 | if (ka->sa.sa_flags & SA_ONSTACK) { | 437 | if (ka->sa.sa_flags & SA_ONSTACK) { |
433 | if (! sas_ss_flags(sp)) | 438 | if (! sas_ss_flags(sp)) |
@@ -461,6 +466,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
461 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) | 466 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) |
462 | goto give_sigsegv; | 467 | goto give_sigsegv; |
463 | 468 | ||
469 | if (frame == (void __user *) -1UL) | ||
470 | goto give_sigsegv; | ||
471 | |||
464 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) | 472 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) |
465 | goto give_sigsegv; | 473 | goto give_sigsegv; |
466 | 474 | ||
@@ -514,6 +522,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
514 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) | 522 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) |
515 | goto give_sigsegv; | 523 | goto give_sigsegv; |
516 | 524 | ||
525 | if (frame == (void __user *) -1UL) | ||
526 | goto give_sigsegv; | ||
527 | |||
517 | if (copy_siginfo_to_user32(&frame->info, info)) | 528 | if (copy_siginfo_to_user32(&frame->info, info)) |
518 | goto give_sigsegv; | 529 | goto give_sigsegv; |
519 | 530 | ||
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 1b2f5ce45320..1e7d4ac7068b 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -73,7 +73,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf, | |||
73 | static int debug_open(struct inode *inode, struct file *file); | 73 | static int debug_open(struct inode *inode, struct file *file); |
74 | static int debug_close(struct inode *inode, struct file *file); | 74 | static int debug_close(struct inode *inode, struct file *file); |
75 | static debug_info_t* debug_info_create(char *name, int pages_per_area, | 75 | static debug_info_t* debug_info_create(char *name, int pages_per_area, |
76 | int nr_areas, int buf_size); | 76 | int nr_areas, int buf_size, mode_t mode); |
77 | static void debug_info_get(debug_info_t *); | 77 | static void debug_info_get(debug_info_t *); |
78 | static void debug_info_put(debug_info_t *); | 78 | static void debug_info_put(debug_info_t *); |
79 | static int debug_prolog_level_fn(debug_info_t * id, | 79 | static int debug_prolog_level_fn(debug_info_t * id, |
@@ -157,7 +157,7 @@ struct debug_view debug_sprintf_view = { | |||
157 | }; | 157 | }; |
158 | 158 | ||
159 | /* used by dump analysis tools to determine version of debug feature */ | 159 | /* used by dump analysis tools to determine version of debug feature */ |
160 | unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; | 160 | static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION; |
161 | 161 | ||
162 | /* static globals */ | 162 | /* static globals */ |
163 | 163 | ||
@@ -327,7 +327,8 @@ debug_info_free(debug_info_t* db_info){ | |||
327 | */ | 327 | */ |
328 | 328 | ||
329 | static debug_info_t* | 329 | static debug_info_t* |
330 | debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size) | 330 | debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size, |
331 | mode_t mode) | ||
331 | { | 332 | { |
332 | debug_info_t* rc; | 333 | debug_info_t* rc; |
333 | 334 | ||
@@ -336,6 +337,8 @@ debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size) | |||
336 | if(!rc) | 337 | if(!rc) |
337 | goto out; | 338 | goto out; |
338 | 339 | ||
340 | rc->mode = mode & ~S_IFMT; | ||
341 | |||
339 | /* create root directory */ | 342 | /* create root directory */ |
340 | rc->debugfs_root_entry = debugfs_create_dir(rc->name, | 343 | rc->debugfs_root_entry = debugfs_create_dir(rc->name, |
341 | debug_debugfs_root_entry); | 344 | debug_debugfs_root_entry); |
@@ -676,23 +679,30 @@ debug_close(struct inode *inode, struct file *file) | |||
676 | } | 679 | } |
677 | 680 | ||
678 | /* | 681 | /* |
679 | * debug_register: | 682 | * debug_register_mode: |
680 | * - creates and initializes debug area for the caller | 683 | * - Creates and initializes debug area for the caller |
681 | * - returns handle for debug area | 684 | * The mode parameter allows to specify access rights for the s390dbf files |
685 | * - Returns handle for debug area | ||
682 | */ | 686 | */ |
683 | 687 | ||
684 | debug_info_t* | 688 | debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas, |
685 | debug_register (char *name, int pages_per_area, int nr_areas, int buf_size) | 689 | int buf_size, mode_t mode, uid_t uid, |
690 | gid_t gid) | ||
686 | { | 691 | { |
687 | debug_info_t *rc = NULL; | 692 | debug_info_t *rc = NULL; |
688 | 693 | ||
694 | /* Since debugfs currently does not support uid/gid other than root, */ | ||
695 | /* we do not allow gid/uid != 0 until we get support for that. */ | ||
696 | if ((uid != 0) || (gid != 0)) | ||
697 | printk(KERN_WARNING "debug: Warning - Currently only uid/gid " | ||
698 | "= 0 are supported. Using root as owner now!"); | ||
689 | if (!initialized) | 699 | if (!initialized) |
690 | BUG(); | 700 | BUG(); |
691 | mutex_lock(&debug_mutex); | 701 | mutex_lock(&debug_mutex); |
692 | 702 | ||
693 | /* create new debug_info */ | 703 | /* create new debug_info */ |
694 | 704 | ||
695 | rc = debug_info_create(name, pages_per_area, nr_areas, buf_size); | 705 | rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode); |
696 | if(!rc) | 706 | if(!rc) |
697 | goto out; | 707 | goto out; |
698 | debug_register_view(rc, &debug_level_view); | 708 | debug_register_view(rc, &debug_level_view); |
@@ -705,6 +715,20 @@ out: | |||
705 | mutex_unlock(&debug_mutex); | 715 | mutex_unlock(&debug_mutex); |
706 | return rc; | 716 | return rc; |
707 | } | 717 | } |
718 | EXPORT_SYMBOL(debug_register_mode); | ||
719 | |||
720 | /* | ||
721 | * debug_register: | ||
722 | * - creates and initializes debug area for the caller | ||
723 | * - returns handle for debug area | ||
724 | */ | ||
725 | |||
726 | debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas, | ||
727 | int buf_size) | ||
728 | { | ||
729 | return debug_register_mode(name, pages_per_area, nr_areas, buf_size, | ||
730 | S_IRUSR | S_IWUSR, 0, 0); | ||
731 | } | ||
708 | 732 | ||
709 | /* | 733 | /* |
710 | * debug_unregister: | 734 | * debug_unregister: |
@@ -1073,15 +1097,16 @@ debug_register_view(debug_info_t * id, struct debug_view *view) | |||
1073 | int rc = 0; | 1097 | int rc = 0; |
1074 | int i; | 1098 | int i; |
1075 | unsigned long flags; | 1099 | unsigned long flags; |
1076 | mode_t mode = S_IFREG; | 1100 | mode_t mode; |
1077 | struct dentry *pde; | 1101 | struct dentry *pde; |
1078 | 1102 | ||
1079 | if (!id) | 1103 | if (!id) |
1080 | goto out; | 1104 | goto out; |
1081 | if (view->prolog_proc || view->format_proc || view->header_proc) | 1105 | mode = (id->mode | S_IFREG) & ~S_IXUGO; |
1082 | mode |= S_IRUSR; | 1106 | if (!(view->prolog_proc || view->format_proc || view->header_proc)) |
1083 | if (view->input_proc) | 1107 | mode &= ~(S_IRUSR | S_IRGRP | S_IROTH); |
1084 | mode |= S_IWUSR; | 1108 | if (!view->input_proc) |
1109 | mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH); | ||
1085 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, | 1110 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, |
1086 | id , &debug_file_ops); | 1111 | id , &debug_file_ops); |
1087 | if (!pde){ | 1112 | if (!pde){ |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 01832c440636..540a67f979b6 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
22 | #include <asm/cpcmd.h> | 22 | #include <asm/cpcmd.h> |
23 | #include <asm/sclp.h> | 23 | #include <asm/sclp.h> |
24 | #include "entry.h" | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * Create a Kernel NSS if the SAVESYS= parameter is defined | 27 | * Create a Kernel NSS if the SAVESYS= parameter is defined |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h new file mode 100644 index 000000000000..6b1896345eda --- /dev/null +++ b/arch/s390/kernel/entry.h | |||
@@ -0,0 +1,60 @@ | |||
1 | #ifndef _ENTRY_H | ||
2 | #define _ENTRY_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/signal.h> | ||
6 | #include <asm/ptrace.h> | ||
7 | |||
8 | typedef void pgm_check_handler_t(struct pt_regs *, long); | ||
9 | extern pgm_check_handler_t *pgm_check_table[128]; | ||
10 | pgm_check_handler_t do_protection_exception; | ||
11 | pgm_check_handler_t do_dat_exception; | ||
12 | |||
13 | extern int sysctl_userprocess_debug; | ||
14 | |||
15 | void do_single_step(struct pt_regs *regs); | ||
16 | void syscall_trace(struct pt_regs *regs, int entryexit); | ||
17 | void kernel_stack_overflow(struct pt_regs * regs); | ||
18 | void do_signal(struct pt_regs *regs); | ||
19 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, | ||
20 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); | ||
21 | |||
22 | void do_extint(struct pt_regs *regs, unsigned short code); | ||
23 | int __cpuinit start_secondary(void *cpuvoid); | ||
24 | void __init startup_init(void); | ||
25 | void die(const char * str, struct pt_regs * regs, long err); | ||
26 | |||
27 | struct new_utsname; | ||
28 | struct mmap_arg_struct; | ||
29 | struct fadvise64_64_args; | ||
30 | struct old_sigaction; | ||
31 | struct sel_arg_struct; | ||
32 | |||
33 | long sys_pipe(unsigned long __user *fildes); | ||
34 | long sys_mmap2(struct mmap_arg_struct __user *arg); | ||
35 | long old_mmap(struct mmap_arg_struct __user *arg); | ||
36 | long sys_ipc(uint call, int first, unsigned long second, | ||
37 | unsigned long third, void __user *ptr); | ||
38 | long s390x_newuname(struct new_utsname __user *name); | ||
39 | long s390x_personality(unsigned long personality); | ||
40 | long s390_fadvise64(int fd, u32 offset_high, u32 offset_low, | ||
41 | size_t len, int advice); | ||
42 | long s390_fadvise64_64(struct fadvise64_64_args __user *args); | ||
43 | long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low); | ||
44 | long sys_fork(void); | ||
45 | long sys_clone(void); | ||
46 | long sys_vfork(void); | ||
47 | void execve_tail(void); | ||
48 | long sys_execve(void); | ||
49 | int sys_sigsuspend(int history0, int history1, old_sigset_t mask); | ||
50 | long sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
51 | struct old_sigaction __user *oact); | ||
52 | long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss); | ||
53 | long sys_sigreturn(void); | ||
54 | long sys_rt_sigreturn(void); | ||
55 | long sys32_sigreturn(void); | ||
56 | long sys32_rt_sigreturn(void); | ||
57 | long old_select(struct sel_arg_struct __user *arg); | ||
58 | long sys_ptrace(long request, long pid, long addr, long data); | ||
59 | |||
60 | #endif /* _ENTRY_H */ | ||
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index efde6e178f6c..cd959c0b2e16 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -475,6 +475,7 @@ pgm_check_handler: | |||
475 | pgm_no_vtime: | 475 | pgm_no_vtime: |
476 | #endif | 476 | #endif |
477 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 477 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
478 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | ||
478 | TRACE_IRQS_OFF | 479 | TRACE_IRQS_OFF |
479 | lgf %r3,__LC_PGM_ILC # load program interruption code | 480 | lgf %r3,__LC_PGM_ILC # load program interruption code |
480 | lghi %r8,0x7f | 481 | lghi %r8,0x7f |
@@ -847,6 +848,7 @@ stack_overflow: | |||
847 | je 0f | 848 | je 0f |
848 | la %r1,__LC_SAVE_AREA+32 | 849 | la %r1,__LC_SAVE_AREA+32 |
849 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack | 850 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack |
851 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | ||
850 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain | 852 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain |
851 | la %r2,SP_PTREGS(%r15) # load pt_regs | 853 | la %r2,SP_PTREGS(%r15) # load pt_regs |
852 | jg kernel_stack_overflow | 854 | jg kernel_stack_overflow |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 375232c46c7a..532542447d66 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -655,7 +655,7 @@ static struct kobj_attribute reipl_type_attr = | |||
655 | 655 | ||
656 | static struct kset *reipl_kset; | 656 | static struct kset *reipl_kset; |
657 | 657 | ||
658 | void reipl_run(struct shutdown_trigger *trigger) | 658 | static void reipl_run(struct shutdown_trigger *trigger) |
659 | { | 659 | { |
660 | struct ccw_dev_id devid; | 660 | struct ccw_dev_id devid; |
661 | static char buf[100]; | 661 | static char buf[100]; |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index c5549a206284..ed04d1372d5d 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -360,7 +360,7 @@ no_kprobe: | |||
360 | * - When the probed function returns, this probe | 360 | * - When the probed function returns, this probe |
361 | * causes the handlers to fire | 361 | * causes the handlers to fire |
362 | */ | 362 | */ |
363 | void kretprobe_trampoline_holder(void) | 363 | static void __used kretprobe_trampoline_holder(void) |
364 | { | 364 | { |
365 | asm volatile(".global kretprobe_trampoline\n" | 365 | asm volatile(".global kretprobe_trampoline\n" |
366 | "kretprobe_trampoline: bcr 0,0\n"); | 366 | "kretprobe_trampoline: bcr 0,0\n"); |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index ce203154d8ce..c1aff194141d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -36,6 +36,8 @@ | |||
36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
37 | #include <linux/notifier.h> | 37 | #include <linux/notifier.h> |
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <linux/tick.h> | ||
40 | #include <linux/elfcore.h> | ||
39 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
40 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
41 | #include <asm/system.h> | 43 | #include <asm/system.h> |
@@ -44,6 +46,7 @@ | |||
44 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
45 | #include <asm/timer.h> | 47 | #include <asm/timer.h> |
46 | #include <asm/cpu.h> | 48 | #include <asm/cpu.h> |
49 | #include "entry.h" | ||
47 | 50 | ||
48 | asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); | 51 | asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); |
49 | 52 | ||
@@ -76,6 +79,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
76 | * Need to know about CPUs going idle? | 79 | * Need to know about CPUs going idle? |
77 | */ | 80 | */ |
78 | static ATOMIC_NOTIFIER_HEAD(idle_chain); | 81 | static ATOMIC_NOTIFIER_HEAD(idle_chain); |
82 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | ||
79 | 83 | ||
80 | int register_idle_notifier(struct notifier_block *nb) | 84 | int register_idle_notifier(struct notifier_block *nb) |
81 | { | 85 | { |
@@ -89,9 +93,33 @@ int unregister_idle_notifier(struct notifier_block *nb) | |||
89 | } | 93 | } |
90 | EXPORT_SYMBOL(unregister_idle_notifier); | 94 | EXPORT_SYMBOL(unregister_idle_notifier); |
91 | 95 | ||
92 | void do_monitor_call(struct pt_regs *regs, long interruption_code) | 96 | static int s390_idle_enter(void) |
97 | { | ||
98 | struct s390_idle_data *idle; | ||
99 | int nr_calls = 0; | ||
100 | void *hcpu; | ||
101 | int rc; | ||
102 | |||
103 | hcpu = (void *)(long)smp_processor_id(); | ||
104 | rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1, | ||
105 | &nr_calls); | ||
106 | if (rc == NOTIFY_BAD) { | ||
107 | nr_calls--; | ||
108 | __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
109 | hcpu, nr_calls, NULL); | ||
110 | return rc; | ||
111 | } | ||
112 | idle = &__get_cpu_var(s390_idle); | ||
113 | spin_lock(&idle->lock); | ||
114 | idle->idle_count++; | ||
115 | idle->in_idle = 1; | ||
116 | idle->idle_enter = get_clock(); | ||
117 | spin_unlock(&idle->lock); | ||
118 | return NOTIFY_OK; | ||
119 | } | ||
120 | |||
121 | void s390_idle_leave(void) | ||
93 | { | 122 | { |
94 | #ifdef CONFIG_SMP | ||
95 | struct s390_idle_data *idle; | 123 | struct s390_idle_data *idle; |
96 | 124 | ||
97 | idle = &__get_cpu_var(s390_idle); | 125 | idle = &__get_cpu_var(s390_idle); |
@@ -99,10 +127,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code) | |||
99 | idle->idle_time += get_clock() - idle->idle_enter; | 127 | idle->idle_time += get_clock() - idle->idle_enter; |
100 | idle->in_idle = 0; | 128 | idle->in_idle = 0; |
101 | spin_unlock(&idle->lock); | 129 | spin_unlock(&idle->lock); |
102 | #endif | ||
103 | /* disable monitor call class 0 */ | ||
104 | __ctl_clear_bit(8, 15); | ||
105 | |||
106 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | 130 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, |
107 | (void *)(long) smp_processor_id()); | 131 | (void *)(long) smp_processor_id()); |
108 | } | 132 | } |
@@ -113,61 +137,30 @@ extern void s390_handle_mcck(void); | |||
113 | */ | 137 | */ |
114 | static void default_idle(void) | 138 | static void default_idle(void) |
115 | { | 139 | { |
116 | int cpu, rc; | ||
117 | int nr_calls = 0; | ||
118 | void *hcpu; | ||
119 | #ifdef CONFIG_SMP | ||
120 | struct s390_idle_data *idle; | ||
121 | #endif | ||
122 | |||
123 | /* CPU is going idle. */ | 140 | /* CPU is going idle. */ |
124 | cpu = smp_processor_id(); | ||
125 | hcpu = (void *)(long)cpu; | ||
126 | local_irq_disable(); | 141 | local_irq_disable(); |
127 | if (need_resched()) { | 142 | if (need_resched()) { |
128 | local_irq_enable(); | 143 | local_irq_enable(); |
129 | return; | 144 | return; |
130 | } | 145 | } |
131 | 146 | if (s390_idle_enter() == NOTIFY_BAD) { | |
132 | rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1, | ||
133 | &nr_calls); | ||
134 | if (rc == NOTIFY_BAD) { | ||
135 | nr_calls--; | ||
136 | __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
137 | hcpu, nr_calls, NULL); | ||
138 | local_irq_enable(); | 147 | local_irq_enable(); |
139 | return; | 148 | return; |
140 | } | 149 | } |
141 | |||
142 | /* enable monitor call class 0 */ | ||
143 | __ctl_set_bit(8, 15); | ||
144 | |||
145 | #ifdef CONFIG_HOTPLUG_CPU | 150 | #ifdef CONFIG_HOTPLUG_CPU |
146 | if (cpu_is_offline(cpu)) { | 151 | if (cpu_is_offline(smp_processor_id())) { |
147 | preempt_enable_no_resched(); | 152 | preempt_enable_no_resched(); |
148 | cpu_die(); | 153 | cpu_die(); |
149 | } | 154 | } |
150 | #endif | 155 | #endif |
151 | |||
152 | local_mcck_disable(); | 156 | local_mcck_disable(); |
153 | if (test_thread_flag(TIF_MCCK_PENDING)) { | 157 | if (test_thread_flag(TIF_MCCK_PENDING)) { |
154 | local_mcck_enable(); | 158 | local_mcck_enable(); |
155 | /* disable monitor call class 0 */ | 159 | s390_idle_leave(); |
156 | __ctl_clear_bit(8, 15); | ||
157 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
158 | hcpu); | ||
159 | local_irq_enable(); | 160 | local_irq_enable(); |
160 | s390_handle_mcck(); | 161 | s390_handle_mcck(); |
161 | return; | 162 | return; |
162 | } | 163 | } |
163 | #ifdef CONFIG_SMP | ||
164 | idle = &__get_cpu_var(s390_idle); | ||
165 | spin_lock(&idle->lock); | ||
166 | idle->idle_count++; | ||
167 | idle->in_idle = 1; | ||
168 | idle->idle_enter = get_clock(); | ||
169 | spin_unlock(&idle->lock); | ||
170 | #endif | ||
171 | trace_hardirqs_on(); | 164 | trace_hardirqs_on(); |
172 | /* Wait for external, I/O or machine check interrupt. */ | 165 | /* Wait for external, I/O or machine check interrupt. */ |
173 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 166 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
@@ -177,9 +170,10 @@ static void default_idle(void) | |||
177 | void cpu_idle(void) | 170 | void cpu_idle(void) |
178 | { | 171 | { |
179 | for (;;) { | 172 | for (;;) { |
173 | tick_nohz_stop_sched_tick(); | ||
180 | while (!need_resched()) | 174 | while (!need_resched()) |
181 | default_idle(); | 175 | default_idle(); |
182 | 176 | tick_nohz_restart_sched_tick(); | |
183 | preempt_enable_no_resched(); | 177 | preempt_enable_no_resched(); |
184 | schedule(); | 178 | schedule(); |
185 | preempt_disable(); | 179 | preempt_disable(); |
@@ -201,6 +195,7 @@ void show_regs(struct pt_regs *regs) | |||
201 | /* Show stack backtrace if pt_regs is from kernel mode */ | 195 | /* Show stack backtrace if pt_regs is from kernel mode */ |
202 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 196 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
203 | show_trace(NULL, (unsigned long *) regs->gprs[15]); | 197 | show_trace(NULL, (unsigned long *) regs->gprs[15]); |
198 | show_last_breaking_event(regs); | ||
204 | } | 199 | } |
205 | 200 | ||
206 | extern void kernel_thread_starter(void); | 201 | extern void kernel_thread_starter(void); |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 6e036bae9875..58a064296987 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/system.h> | 41 | #include <asm/system.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/unistd.h> | 43 | #include <asm/unistd.h> |
44 | #include "entry.h" | ||
44 | 45 | ||
45 | #ifdef CONFIG_COMPAT | 46 | #ifdef CONFIG_COMPAT |
46 | #include "compat_ptrace.h" | 47 | #include "compat_ptrace.h" |
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index acf93dba7727..e019b419efc6 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c | |||
@@ -13,11 +13,12 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | 16 | #include <asm/cpu.h> | |
17 | #include <asm/lowcore.h> | 17 | #include <asm/lowcore.h> |
18 | #include <asm/s390_ext.h> | 18 | #include <asm/s390_ext.h> |
19 | #include <asm/irq_regs.h> | 19 | #include <asm/irq_regs.h> |
20 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
21 | #include "entry.h" | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * ext_int_hash[index] is the start of the list for all external interrupts | 24 | * ext_int_hash[index] is the start of the list for all external interrupts |
@@ -119,13 +120,10 @@ void do_extint(struct pt_regs *regs, unsigned short code) | |||
119 | 120 | ||
120 | old_regs = set_irq_regs(regs); | 121 | old_regs = set_irq_regs(regs); |
121 | irq_enter(); | 122 | irq_enter(); |
122 | asm volatile ("mc 0,0"); | 123 | s390_idle_check(); |
123 | if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) | 124 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) |
124 | /** | 125 | /* Serve timer interrupts first. */ |
125 | * Make sure that the i/o interrupt did not "overtake" | 126 | clock_comparator_work(); |
126 | * the last HZ timer interrupt. | ||
127 | */ | ||
128 | account_ticks(S390_lowcore.int_clock); | ||
129 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | 127 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; |
130 | index = ext_hash(code); | 128 | index = ext_hash(code); |
131 | for (p = ext_int_hash[index]; p; p = p->next) { | 129 | for (p = ext_int_hash[index]; p; p = p->next) { |
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 7234c737f825..48238a114ce9 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c | |||
@@ -27,13 +27,6 @@ EXPORT_SYMBOL(_zb_findmap); | |||
27 | EXPORT_SYMBOL(_sb_findmap); | 27 | EXPORT_SYMBOL(_sb_findmap); |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * semaphore ops | ||
31 | */ | ||
32 | EXPORT_SYMBOL(__up); | ||
33 | EXPORT_SYMBOL(__down); | ||
34 | EXPORT_SYMBOL(__down_interruptible); | ||
35 | |||
36 | /* | ||
37 | * binfmt_elf loader | 30 | * binfmt_elf loader |
38 | */ | 31 | */ |
39 | extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); | 32 | extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); |
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c deleted file mode 100644 index 191303f6c1d8..000000000000 --- a/arch/s390/kernel/semaphore.c +++ /dev/null | |||
@@ -1,108 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/s390/kernel/semaphore.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1998-2000 IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky | ||
7 | * | ||
8 | * Derived from "linux/arch/i386/kernel/semaphore.c | ||
9 | * Copyright (C) 1999, Linus Torvalds | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/semaphore.h> | ||
17 | |||
18 | /* | ||
19 | * Atomically update sem->count. Equivalent to: | ||
20 | * old_val = sem->count.counter; | ||
21 | * new_val = ((old_val >= 0) ? old_val : 0) + incr; | ||
22 | * sem->count.counter = new_val; | ||
23 | * return old_val; | ||
24 | */ | ||
25 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
26 | { | ||
27 | int old_val, new_val; | ||
28 | |||
29 | asm volatile( | ||
30 | " l %0,0(%3)\n" | ||
31 | "0: ltr %1,%0\n" | ||
32 | " jhe 1f\n" | ||
33 | " lhi %1,0\n" | ||
34 | "1: ar %1,%4\n" | ||
35 | " cs %0,%1,0(%3)\n" | ||
36 | " jl 0b\n" | ||
37 | : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count) | ||
38 | : "a" (&sem->count), "d" (incr), "m" (sem->count) | ||
39 | : "cc"); | ||
40 | return old_val; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * The inline function up() incremented count but the result | ||
45 | * was <= 0. This indicates that some process is waiting on | ||
46 | * the semaphore. The semaphore is free and we'll wake the | ||
47 | * first sleeping process, so we set count to 1 unless some | ||
48 | * other cpu has called up in the meantime in which case | ||
49 | * we just increment count by 1. | ||
50 | */ | ||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | __sem_update_count(sem, 1); | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * The inline function down() decremented count and the result | ||
59 | * was < 0. The wait loop will atomically test and update the | ||
60 | * semaphore counter following the rules: | ||
61 | * count > 0: decrement count, wake up queue and exit. | ||
62 | * count <= 0: set count to -1, go to sleep. | ||
63 | */ | ||
64 | void __sched __down(struct semaphore * sem) | ||
65 | { | ||
66 | struct task_struct *tsk = current; | ||
67 | DECLARE_WAITQUEUE(wait, tsk); | ||
68 | |||
69 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
70 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
71 | while (__sem_update_count(sem, -1) <= 0) { | ||
72 | schedule(); | ||
73 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
74 | } | ||
75 | remove_wait_queue(&sem->wait, &wait); | ||
76 | __set_task_state(tsk, TASK_RUNNING); | ||
77 | wake_up(&sem->wait); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Same as __down() with an additional test for signals. | ||
82 | * If a signal is pending the count is updated as follows: | ||
83 | * count > 0: wake up queue and exit. | ||
84 | * count <= 0: set count to 0, wake up queue and exit. | ||
85 | */ | ||
86 | int __sched __down_interruptible(struct semaphore * sem) | ||
87 | { | ||
88 | int retval = 0; | ||
89 | struct task_struct *tsk = current; | ||
90 | DECLARE_WAITQUEUE(wait, tsk); | ||
91 | |||
92 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
93 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
94 | while (__sem_update_count(sem, -1) <= 0) { | ||
95 | if (signal_pending(current)) { | ||
96 | __sem_update_count(sem, 0); | ||
97 | retval = -EINTR; | ||
98 | break; | ||
99 | } | ||
100 | schedule(); | ||
101 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
102 | } | ||
103 | remove_wait_queue(&sem->wait, &wait); | ||
104 | __set_task_state(tsk, TASK_RUNNING); | ||
105 | wake_up(&sem->wait); | ||
106 | return retval; | ||
107 | } | ||
108 | |||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 290e504061a3..7141147e6b63 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/pfn.h> | 39 | #include <linux/pfn.h> |
40 | #include <linux/ctype.h> | 40 | #include <linux/ctype.h> |
41 | #include <linux/reboot.h> | 41 | #include <linux/reboot.h> |
42 | #include <linux/topology.h> | ||
42 | 43 | ||
43 | #include <asm/ipl.h> | 44 | #include <asm/ipl.h> |
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
@@ -427,7 +428,7 @@ setup_lowcore(void) | |||
427 | lc->io_new_psw.mask = psw_kernel_bits; | 428 | lc->io_new_psw.mask = psw_kernel_bits; |
428 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 429 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
429 | lc->ipl_device = S390_lowcore.ipl_device; | 430 | lc->ipl_device = S390_lowcore.ipl_device; |
430 | lc->jiffy_timer = -1LL; | 431 | lc->clock_comparator = -1ULL; |
431 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 432 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
432 | lc->async_stack = (unsigned long) | 433 | lc->async_stack = (unsigned long) |
433 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | 434 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; |
@@ -687,7 +688,7 @@ static __init unsigned int stfl(void) | |||
687 | return S390_lowcore.stfl_fac_list; | 688 | return S390_lowcore.stfl_fac_list; |
688 | } | 689 | } |
689 | 690 | ||
690 | static __init int stfle(unsigned long long *list, int doublewords) | 691 | static int __init __stfle(unsigned long long *list, int doublewords) |
691 | { | 692 | { |
692 | typedef struct { unsigned long long _[doublewords]; } addrtype; | 693 | typedef struct { unsigned long long _[doublewords]; } addrtype; |
693 | register unsigned long __nr asm("0") = doublewords - 1; | 694 | register unsigned long __nr asm("0") = doublewords - 1; |
@@ -697,6 +698,13 @@ static __init int stfle(unsigned long long *list, int doublewords) | |||
697 | return __nr + 1; | 698 | return __nr + 1; |
698 | } | 699 | } |
699 | 700 | ||
701 | int __init stfle(unsigned long long *list, int doublewords) | ||
702 | { | ||
703 | if (!(stfl() & (1UL << 24))) | ||
704 | return -EOPNOTSUPP; | ||
705 | return __stfle(list, doublewords); | ||
706 | } | ||
707 | |||
700 | /* | 708 | /* |
701 | * Setup hardware capabilities. | 709 | * Setup hardware capabilities. |
702 | */ | 710 | */ |
@@ -741,7 +749,7 @@ static void __init setup_hwcaps(void) | |||
741 | * HWCAP_S390_DFP bit 6. | 749 | * HWCAP_S390_DFP bit 6. |
742 | */ | 750 | */ |
743 | if ((elf_hwcap & (1UL << 2)) && | 751 | if ((elf_hwcap & (1UL << 2)) && |
744 | stfle(&facility_list_extended, 1) > 0) { | 752 | __stfle(&facility_list_extended, 1) > 0) { |
745 | if (facility_list_extended & (1ULL << (64 - 43))) | 753 | if (facility_list_extended & (1ULL << (64 - 43))) |
746 | elf_hwcap |= 1UL << 6; | 754 | elf_hwcap |= 1UL << 6; |
747 | } | 755 | } |
@@ -823,6 +831,7 @@ setup_arch(char **cmdline_p) | |||
823 | 831 | ||
824 | cpu_init(); | 832 | cpu_init(); |
825 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; | 833 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; |
834 | s390_init_cpu_topology(); | ||
826 | 835 | ||
827 | /* | 836 | /* |
828 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). | 837 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 4449bf32cbf1..b97682040215 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/ucontext.h> | 27 | #include <asm/ucontext.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/lowcore.h> | 29 | #include <asm/lowcore.h> |
30 | #include "entry.h" | ||
30 | 31 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 32 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
32 | 33 | ||
@@ -235,6 +236,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
235 | /* Default to using normal stack */ | 236 | /* Default to using normal stack */ |
236 | sp = regs->gprs[15]; | 237 | sp = regs->gprs[15]; |
237 | 238 | ||
239 | /* Overflow on alternate signal stack gives SIGSEGV. */ | ||
240 | if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL)) | ||
241 | return (void __user *) -1UL; | ||
242 | |||
238 | /* This is the X/Open sanctioned signal stack switching. */ | 243 | /* This is the X/Open sanctioned signal stack switching. */ |
239 | if (ka->sa.sa_flags & SA_ONSTACK) { | 244 | if (ka->sa.sa_flags & SA_ONSTACK) { |
240 | if (! sas_ss_flags(sp)) | 245 | if (! sas_ss_flags(sp)) |
@@ -270,6 +275,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
270 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) | 275 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) |
271 | goto give_sigsegv; | 276 | goto give_sigsegv; |
272 | 277 | ||
278 | if (frame == (void __user *) -1UL) | ||
279 | goto give_sigsegv; | ||
280 | |||
273 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) | 281 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) |
274 | goto give_sigsegv; | 282 | goto give_sigsegv; |
275 | 283 | ||
@@ -327,6 +335,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
327 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) | 335 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) |
328 | goto give_sigsegv; | 336 | goto give_sigsegv; |
329 | 337 | ||
338 | if (frame == (void __user *) -1UL) | ||
339 | goto give_sigsegv; | ||
340 | |||
330 | if (copy_siginfo_to_user(&frame->info, info)) | 341 | if (copy_siginfo_to_user(&frame->info, info)) |
331 | goto give_sigsegv; | 342 | goto give_sigsegv; |
332 | 343 | ||
@@ -474,11 +485,6 @@ void do_signal(struct pt_regs *regs) | |||
474 | int ret; | 485 | int ret; |
475 | #ifdef CONFIG_COMPAT | 486 | #ifdef CONFIG_COMPAT |
476 | if (test_thread_flag(TIF_31BIT)) { | 487 | if (test_thread_flag(TIF_31BIT)) { |
477 | extern int handle_signal32(unsigned long sig, | ||
478 | struct k_sigaction *ka, | ||
479 | siginfo_t *info, | ||
480 | sigset_t *oldset, | ||
481 | struct pt_regs *regs); | ||
482 | ret = handle_signal32(signr, &ka, &info, oldset, regs); | 488 | ret = handle_signal32(signr, &ka, &info, oldset, regs); |
483 | } | 489 | } |
484 | else | 490 | else |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8f894d380a62..0dfa988c1b26 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/lowcore.h> | 44 | #include <asm/lowcore.h> |
45 | #include <asm/sclp.h> | 45 | #include <asm/sclp.h> |
46 | #include <asm/cpu.h> | 46 | #include <asm/cpu.h> |
47 | #include "entry.h" | ||
47 | 48 | ||
48 | /* | 49 | /* |
49 | * An array with a pointer the lowcore of every CPU. | 50 | * An array with a pointer the lowcore of every CPU. |
@@ -67,13 +68,12 @@ enum s390_cpu_state { | |||
67 | CPU_STATE_CONFIGURED, | 68 | CPU_STATE_CONFIGURED, |
68 | }; | 69 | }; |
69 | 70 | ||
70 | #ifdef CONFIG_HOTPLUG_CPU | 71 | DEFINE_MUTEX(smp_cpu_state_mutex); |
71 | static DEFINE_MUTEX(smp_cpu_state_mutex); | 72 | int smp_cpu_polarization[NR_CPUS]; |
72 | #endif | ||
73 | static int smp_cpu_state[NR_CPUS]; | 73 | static int smp_cpu_state[NR_CPUS]; |
74 | static int cpu_management; | ||
74 | 75 | ||
75 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 76 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
76 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | ||
77 | 77 | ||
78 | static void smp_ext_bitcall(int, ec_bit_sig); | 78 | static void smp_ext_bitcall(int, ec_bit_sig); |
79 | 79 | ||
@@ -298,7 +298,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |||
298 | /* | 298 | /* |
299 | * this function sends a 'purge tlb' signal to another CPU. | 299 | * this function sends a 'purge tlb' signal to another CPU. |
300 | */ | 300 | */ |
301 | void smp_ptlb_callback(void *info) | 301 | static void smp_ptlb_callback(void *info) |
302 | { | 302 | { |
303 | __tlb_flush_local(); | 303 | __tlb_flush_local(); |
304 | } | 304 | } |
@@ -456,6 +456,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
456 | if (cpu_known(cpu_id)) | 456 | if (cpu_known(cpu_id)) |
457 | continue; | 457 | continue; |
458 | __cpu_logical_map[logical_cpu] = cpu_id; | 458 | __cpu_logical_map[logical_cpu] = cpu_id; |
459 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | ||
459 | if (!cpu_stopped(logical_cpu)) | 460 | if (!cpu_stopped(logical_cpu)) |
460 | continue; | 461 | continue; |
461 | cpu_set(logical_cpu, cpu_present_map); | 462 | cpu_set(logical_cpu, cpu_present_map); |
@@ -489,6 +490,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) | |||
489 | if (cpu_known(cpu_id)) | 490 | if (cpu_known(cpu_id)) |
490 | continue; | 491 | continue; |
491 | __cpu_logical_map[logical_cpu] = cpu_id; | 492 | __cpu_logical_map[logical_cpu] = cpu_id; |
493 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | ||
492 | cpu_set(logical_cpu, cpu_present_map); | 494 | cpu_set(logical_cpu, cpu_present_map); |
493 | if (cpu >= info->configured) | 495 | if (cpu >= info->configured) |
494 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 496 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
@@ -846,6 +848,7 @@ void __init smp_prepare_boot_cpu(void) | |||
846 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 848 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
847 | current_set[0] = current; | 849 | current_set[0] = current; |
848 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | 850 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; |
851 | smp_cpu_polarization[0] = POLARIZATION_UNKNWN; | ||
849 | spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); | 852 | spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); |
850 | } | 853 | } |
851 | 854 | ||
@@ -897,15 +900,19 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf, | |||
897 | case 0: | 900 | case 0: |
898 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { | 901 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { |
899 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); | 902 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); |
900 | if (!rc) | 903 | if (!rc) { |
901 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 904 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; |
905 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | ||
906 | } | ||
902 | } | 907 | } |
903 | break; | 908 | break; |
904 | case 1: | 909 | case 1: |
905 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { | 910 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { |
906 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); | 911 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); |
907 | if (!rc) | 912 | if (!rc) { |
908 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 913 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; |
914 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | ||
915 | } | ||
909 | } | 916 | } |
910 | break; | 917 | break; |
911 | default: | 918 | default: |
@@ -919,6 +926,34 @@ out: | |||
919 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | 926 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); |
920 | #endif /* CONFIG_HOTPLUG_CPU */ | 927 | #endif /* CONFIG_HOTPLUG_CPU */ |
921 | 928 | ||
929 | static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf) | ||
930 | { | ||
931 | int cpu = dev->id; | ||
932 | ssize_t count; | ||
933 | |||
934 | mutex_lock(&smp_cpu_state_mutex); | ||
935 | switch (smp_cpu_polarization[cpu]) { | ||
936 | case POLARIZATION_HRZ: | ||
937 | count = sprintf(buf, "horizontal\n"); | ||
938 | break; | ||
939 | case POLARIZATION_VL: | ||
940 | count = sprintf(buf, "vertical:low\n"); | ||
941 | break; | ||
942 | case POLARIZATION_VM: | ||
943 | count = sprintf(buf, "vertical:medium\n"); | ||
944 | break; | ||
945 | case POLARIZATION_VH: | ||
946 | count = sprintf(buf, "vertical:high\n"); | ||
947 | break; | ||
948 | default: | ||
949 | count = sprintf(buf, "unknown\n"); | ||
950 | break; | ||
951 | } | ||
952 | mutex_unlock(&smp_cpu_state_mutex); | ||
953 | return count; | ||
954 | } | ||
955 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); | ||
956 | |||
922 | static ssize_t show_cpu_address(struct sys_device *dev, char *buf) | 957 | static ssize_t show_cpu_address(struct sys_device *dev, char *buf) |
923 | { | 958 | { |
924 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); | 959 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); |
@@ -931,6 +966,7 @@ static struct attribute *cpu_common_attrs[] = { | |||
931 | &attr_configure.attr, | 966 | &attr_configure.attr, |
932 | #endif | 967 | #endif |
933 | &attr_address.attr, | 968 | &attr_address.attr, |
969 | &attr_polarization.attr, | ||
934 | NULL, | 970 | NULL, |
935 | }; | 971 | }; |
936 | 972 | ||
@@ -1075,11 +1111,48 @@ static ssize_t __ref rescan_store(struct sys_device *dev, | |||
1075 | out: | 1111 | out: |
1076 | put_online_cpus(); | 1112 | put_online_cpus(); |
1077 | mutex_unlock(&smp_cpu_state_mutex); | 1113 | mutex_unlock(&smp_cpu_state_mutex); |
1114 | if (!cpus_empty(newcpus)) | ||
1115 | topology_schedule_update(); | ||
1078 | return rc ? rc : count; | 1116 | return rc ? rc : count; |
1079 | } | 1117 | } |
1080 | static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); | 1118 | static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); |
1081 | #endif /* CONFIG_HOTPLUG_CPU */ | 1119 | #endif /* CONFIG_HOTPLUG_CPU */ |
1082 | 1120 | ||
1121 | static ssize_t dispatching_show(struct sys_device *dev, char *buf) | ||
1122 | { | ||
1123 | ssize_t count; | ||
1124 | |||
1125 | mutex_lock(&smp_cpu_state_mutex); | ||
1126 | count = sprintf(buf, "%d\n", cpu_management); | ||
1127 | mutex_unlock(&smp_cpu_state_mutex); | ||
1128 | return count; | ||
1129 | } | ||
1130 | |||
1131 | static ssize_t dispatching_store(struct sys_device *dev, const char *buf, | ||
1132 | size_t count) | ||
1133 | { | ||
1134 | int val, rc; | ||
1135 | char delim; | ||
1136 | |||
1137 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
1138 | return -EINVAL; | ||
1139 | if (val != 0 && val != 1) | ||
1140 | return -EINVAL; | ||
1141 | rc = 0; | ||
1142 | mutex_lock(&smp_cpu_state_mutex); | ||
1143 | get_online_cpus(); | ||
1144 | if (cpu_management == val) | ||
1145 | goto out; | ||
1146 | rc = topology_set_cpu_management(val); | ||
1147 | if (!rc) | ||
1148 | cpu_management = val; | ||
1149 | out: | ||
1150 | put_online_cpus(); | ||
1151 | mutex_unlock(&smp_cpu_state_mutex); | ||
1152 | return rc ? rc : count; | ||
1153 | } | ||
1154 | static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store); | ||
1155 | |||
1083 | static int __init topology_init(void) | 1156 | static int __init topology_init(void) |
1084 | { | 1157 | { |
1085 | int cpu; | 1158 | int cpu; |
@@ -1093,6 +1166,10 @@ static int __init topology_init(void) | |||
1093 | if (rc) | 1166 | if (rc) |
1094 | return rc; | 1167 | return rc; |
1095 | #endif | 1168 | #endif |
1169 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
1170 | &attr_dispatching.attr); | ||
1171 | if (rc) | ||
1172 | return rc; | ||
1096 | for_each_present_cpu(cpu) { | 1173 | for_each_present_cpu(cpu) { |
1097 | rc = smp_add_present_cpu(cpu); | 1174 | rc = smp_add_present_cpu(cpu); |
1098 | if (rc) | 1175 | if (rc) |
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index fefee99f28aa..988d0d64c2c8 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c | |||
@@ -29,8 +29,8 @@ | |||
29 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
30 | #include <linux/unistd.h> | 30 | #include <linux/unistd.h> |
31 | #include <linux/ipc.h> | 31 | #include <linux/ipc.h> |
32 | |||
33 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
33 | #include "entry.h" | ||
34 | 34 | ||
35 | /* | 35 | /* |
36 | * sys_pipe() is the normal C calling standard for creating | 36 | * sys_pipe() is the normal C calling standard for creating |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index cb232c155360..7aec676fefd5 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/timex.h> | 30 | #include <linux/timex.h> |
31 | #include <linux/notifier.h> | 31 | #include <linux/notifier.h> |
32 | #include <linux/clocksource.h> | 32 | #include <linux/clocksource.h> |
33 | 33 | #include <linux/clockchips.h> | |
34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
35 | #include <asm/delay.h> | 35 | #include <asm/delay.h> |
36 | #include <asm/s390_ext.h> | 36 | #include <asm/s390_ext.h> |
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/irq_regs.h> | 39 | #include <asm/irq_regs.h> |
40 | #include <asm/timer.h> | 40 | #include <asm/timer.h> |
41 | #include <asm/etr.h> | 41 | #include <asm/etr.h> |
42 | #include <asm/cio.h> | ||
42 | 43 | ||
43 | /* change this if you have some constant time drift */ | 44 | /* change this if you have some constant time drift */ |
44 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 45 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
@@ -57,16 +58,16 @@ | |||
57 | 58 | ||
58 | static ext_int_info_t ext_int_info_cc; | 59 | static ext_int_info_t ext_int_info_cc; |
59 | static ext_int_info_t ext_int_etr_cc; | 60 | static ext_int_info_t ext_int_etr_cc; |
60 | static u64 init_timer_cc; | ||
61 | static u64 jiffies_timer_cc; | 61 | static u64 jiffies_timer_cc; |
62 | static u64 xtime_cc; | 62 | |
63 | static DEFINE_PER_CPU(struct clock_event_device, comparators); | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * Scheduler clock - returns current time in nanosec units. | 66 | * Scheduler clock - returns current time in nanosec units. |
66 | */ | 67 | */ |
67 | unsigned long long sched_clock(void) | 68 | unsigned long long sched_clock(void) |
68 | { | 69 | { |
69 | return ((get_clock() - jiffies_timer_cc) * 125) >> 9; | 70 | return ((get_clock_xt() - jiffies_timer_cc) * 125) >> 9; |
70 | } | 71 | } |
71 | 72 | ||
72 | /* | 73 | /* |
@@ -95,162 +96,40 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) | |||
95 | #define s390_do_profile() do { ; } while(0) | 96 | #define s390_do_profile() do { ; } while(0) |
96 | #endif /* CONFIG_PROFILING */ | 97 | #endif /* CONFIG_PROFILING */ |
97 | 98 | ||
98 | /* | 99 | void clock_comparator_work(void) |
99 | * Advance the per cpu tick counter up to the time given with the | ||
100 | * "time" argument. The per cpu update consists of accounting | ||
101 | * the virtual cpu time, calling update_process_times and calling | ||
102 | * the profiling hook. If xtime is before time it is advanced as well. | ||
103 | */ | ||
104 | void account_ticks(u64 time) | ||
105 | { | 100 | { |
106 | __u32 ticks; | 101 | struct clock_event_device *cd; |
107 | __u64 tmp; | ||
108 | |||
109 | /* Calculate how many ticks have passed. */ | ||
110 | if (time < S390_lowcore.jiffy_timer) | ||
111 | return; | ||
112 | tmp = time - S390_lowcore.jiffy_timer; | ||
113 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ | ||
114 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; | ||
115 | S390_lowcore.jiffy_timer += | ||
116 | CLK_TICKS_PER_JIFFY * (__u64) ticks; | ||
117 | } else if (tmp >= CLK_TICKS_PER_JIFFY) { | ||
118 | ticks = 2; | ||
119 | S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY; | ||
120 | } else { | ||
121 | ticks = 1; | ||
122 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; | ||
123 | } | ||
124 | |||
125 | #ifdef CONFIG_SMP | ||
126 | /* | ||
127 | * Do not rely on the boot cpu to do the calls to do_timer. | ||
128 | * Spread it over all cpus instead. | ||
129 | */ | ||
130 | write_seqlock(&xtime_lock); | ||
131 | if (S390_lowcore.jiffy_timer > xtime_cc) { | ||
132 | __u32 xticks; | ||
133 | tmp = S390_lowcore.jiffy_timer - xtime_cc; | ||
134 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { | ||
135 | xticks = __div(tmp, CLK_TICKS_PER_JIFFY); | ||
136 | xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY; | ||
137 | } else { | ||
138 | xticks = 1; | ||
139 | xtime_cc += CLK_TICKS_PER_JIFFY; | ||
140 | } | ||
141 | do_timer(xticks); | ||
142 | } | ||
143 | write_sequnlock(&xtime_lock); | ||
144 | #else | ||
145 | do_timer(ticks); | ||
146 | #endif | ||
147 | |||
148 | while (ticks--) | ||
149 | update_process_times(user_mode(get_irq_regs())); | ||
150 | 102 | ||
103 | S390_lowcore.clock_comparator = -1ULL; | ||
104 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
105 | cd = &__get_cpu_var(comparators); | ||
106 | cd->event_handler(cd); | ||
151 | s390_do_profile(); | 107 | s390_do_profile(); |
152 | } | 108 | } |
153 | 109 | ||
154 | #ifdef CONFIG_NO_IDLE_HZ | ||
155 | |||
156 | #ifdef CONFIG_NO_IDLE_HZ_INIT | ||
157 | int sysctl_hz_timer = 0; | ||
158 | #else | ||
159 | int sysctl_hz_timer = 1; | ||
160 | #endif | ||
161 | |||
162 | /* | ||
163 | * Stop the HZ tick on the current CPU. | ||
164 | * Only cpu_idle may call this function. | ||
165 | */ | ||
166 | static void stop_hz_timer(void) | ||
167 | { | ||
168 | unsigned long flags; | ||
169 | unsigned long seq, next; | ||
170 | __u64 timer, todval; | ||
171 | int cpu = smp_processor_id(); | ||
172 | |||
173 | if (sysctl_hz_timer != 0) | ||
174 | return; | ||
175 | |||
176 | cpu_set(cpu, nohz_cpu_mask); | ||
177 | |||
178 | /* | ||
179 | * Leave the clock comparator set up for the next timer | ||
180 | * tick if either rcu or a softirq is pending. | ||
181 | */ | ||
182 | if (rcu_needs_cpu(cpu) || local_softirq_pending()) { | ||
183 | cpu_clear(cpu, nohz_cpu_mask); | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * This cpu is going really idle. Set up the clock comparator | ||
189 | * for the next event. | ||
190 | */ | ||
191 | next = next_timer_interrupt(); | ||
192 | do { | ||
193 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
194 | timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64; | ||
195 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
196 | todval = -1ULL; | ||
197 | /* Be careful about overflows. */ | ||
198 | if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) { | ||
199 | timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; | ||
200 | if (timer >= jiffies_timer_cc) | ||
201 | todval = timer; | ||
202 | } | ||
203 | set_clock_comparator(todval); | ||
204 | } | ||
205 | |||
206 | /* | 110 | /* |
207 | * Start the HZ tick on the current CPU. | 111 | * Fixup the clock comparator. |
208 | * Only cpu_idle may call this function. | ||
209 | */ | 112 | */ |
210 | static void start_hz_timer(void) | 113 | static void fixup_clock_comparator(unsigned long long delta) |
211 | { | 114 | { |
212 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) | 115 | /* If nobody is waiting there's nothing to fix. */ |
116 | if (S390_lowcore.clock_comparator == -1ULL) | ||
213 | return; | 117 | return; |
214 | account_ticks(get_clock()); | 118 | S390_lowcore.clock_comparator += delta; |
215 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | 119 | set_clock_comparator(S390_lowcore.clock_comparator); |
216 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | ||
217 | } | ||
218 | |||
219 | static int nohz_idle_notify(struct notifier_block *self, | ||
220 | unsigned long action, void *hcpu) | ||
221 | { | ||
222 | switch (action) { | ||
223 | case S390_CPU_IDLE: | ||
224 | stop_hz_timer(); | ||
225 | break; | ||
226 | case S390_CPU_NOT_IDLE: | ||
227 | start_hz_timer(); | ||
228 | break; | ||
229 | } | ||
230 | return NOTIFY_OK; | ||
231 | } | 120 | } |
232 | 121 | ||
233 | static struct notifier_block nohz_idle_nb = { | 122 | static int s390_next_event(unsigned long delta, |
234 | .notifier_call = nohz_idle_notify, | 123 | struct clock_event_device *evt) |
235 | }; | ||
236 | |||
237 | static void __init nohz_init(void) | ||
238 | { | 124 | { |
239 | if (register_idle_notifier(&nohz_idle_nb)) | 125 | S390_lowcore.clock_comparator = get_clock() + delta; |
240 | panic("Couldn't register idle notifier"); | 126 | set_clock_comparator(S390_lowcore.clock_comparator); |
127 | return 0; | ||
241 | } | 128 | } |
242 | 129 | ||
243 | #endif | 130 | static void s390_set_mode(enum clock_event_mode mode, |
244 | 131 | struct clock_event_device *evt) | |
245 | /* | ||
246 | * Set up per cpu jiffy timer and set the clock comparator. | ||
247 | */ | ||
248 | static void setup_jiffy_timer(void) | ||
249 | { | 132 | { |
250 | /* Set up clock comparator to next jiffy. */ | ||
251 | S390_lowcore.jiffy_timer = | ||
252 | jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY; | ||
253 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
254 | } | 133 | } |
255 | 134 | ||
256 | /* | 135 | /* |
@@ -259,7 +138,26 @@ static void setup_jiffy_timer(void) | |||
259 | */ | 138 | */ |
260 | void init_cpu_timer(void) | 139 | void init_cpu_timer(void) |
261 | { | 140 | { |
262 | setup_jiffy_timer(); | 141 | struct clock_event_device *cd; |
142 | int cpu; | ||
143 | |||
144 | S390_lowcore.clock_comparator = -1ULL; | ||
145 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
146 | |||
147 | cpu = smp_processor_id(); | ||
148 | cd = &per_cpu(comparators, cpu); | ||
149 | cd->name = "comparator"; | ||
150 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
151 | cd->mult = 16777; | ||
152 | cd->shift = 12; | ||
153 | cd->min_delta_ns = 1; | ||
154 | cd->max_delta_ns = LONG_MAX; | ||
155 | cd->rating = 400; | ||
156 | cd->cpumask = cpumask_of_cpu(cpu); | ||
157 | cd->set_next_event = s390_next_event; | ||
158 | cd->set_mode = s390_set_mode; | ||
159 | |||
160 | clockevents_register_device(cd); | ||
263 | 161 | ||
264 | /* Enable clock comparator timer interrupt. */ | 162 | /* Enable clock comparator timer interrupt. */ |
265 | __ctl_set_bit(0,11); | 163 | __ctl_set_bit(0,11); |
@@ -270,8 +168,6 @@ void init_cpu_timer(void) | |||
270 | 168 | ||
271 | static void clock_comparator_interrupt(__u16 code) | 169 | static void clock_comparator_interrupt(__u16 code) |
272 | { | 170 | { |
273 | /* set clock comparator for next tick */ | ||
274 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
275 | } | 171 | } |
276 | 172 | ||
277 | static void etr_reset(void); | 173 | static void etr_reset(void); |
@@ -316,8 +212,9 @@ static struct clocksource clocksource_tod = { | |||
316 | */ | 212 | */ |
317 | void __init time_init(void) | 213 | void __init time_init(void) |
318 | { | 214 | { |
215 | u64 init_timer_cc; | ||
216 | |||
319 | init_timer_cc = reset_tod_clock(); | 217 | init_timer_cc = reset_tod_clock(); |
320 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; | ||
321 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; | 218 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; |
322 | 219 | ||
323 | /* set xtime */ | 220 | /* set xtime */ |
@@ -342,10 +239,6 @@ void __init time_init(void) | |||
342 | /* Enable TOD clock interrupts on the boot cpu. */ | 239 | /* Enable TOD clock interrupts on the boot cpu. */ |
343 | init_cpu_timer(); | 240 | init_cpu_timer(); |
344 | 241 | ||
345 | #ifdef CONFIG_NO_IDLE_HZ | ||
346 | nohz_init(); | ||
347 | #endif | ||
348 | |||
349 | #ifdef CONFIG_VIRT_TIMER | 242 | #ifdef CONFIG_VIRT_TIMER |
350 | vtime_init(); | 243 | vtime_init(); |
351 | #endif | 244 | #endif |
@@ -699,53 +592,49 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | |||
699 | } | 592 | } |
700 | 593 | ||
701 | /* | 594 | /* |
702 | * The time is "clock". xtime is what we think the time is. | 595 | * The time is "clock". old is what we think the time is. |
703 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | 596 | * Adjust the value by a multiple of jiffies and add the delta to ntp. |
704 | * "delay" is an approximation how long the synchronization took. If | 597 | * "delay" is an approximation how long the synchronization took. If |
705 | * the time correction is positive, then "delay" is subtracted from | 598 | * the time correction is positive, then "delay" is subtracted from |
706 | * the time difference and only the remaining part is passed to ntp. | 599 | * the time difference and only the remaining part is passed to ntp. |
707 | */ | 600 | */ |
708 | static void etr_adjust_time(unsigned long long clock, unsigned long long delay) | 601 | static unsigned long long etr_adjust_time(unsigned long long old, |
602 | unsigned long long clock, | ||
603 | unsigned long long delay) | ||
709 | { | 604 | { |
710 | unsigned long long delta, ticks; | 605 | unsigned long long delta, ticks; |
711 | struct timex adjust; | 606 | struct timex adjust; |
712 | 607 | ||
713 | /* | 608 | if (clock > old) { |
714 | * We don't have to take the xtime lock because the cpu | ||
715 | * executing etr_adjust_time is running disabled in | ||
716 | * tasklet context and all other cpus are looping in | ||
717 | * etr_sync_cpu_start. | ||
718 | */ | ||
719 | if (clock > xtime_cc) { | ||
720 | /* It is later than we thought. */ | 609 | /* It is later than we thought. */ |
721 | delta = ticks = clock - xtime_cc; | 610 | delta = ticks = clock - old; |
722 | delta = ticks = (delta < delay) ? 0 : delta - delay; | 611 | delta = ticks = (delta < delay) ? 0 : delta - delay; |
723 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 612 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
724 | init_timer_cc = init_timer_cc + delta; | ||
725 | jiffies_timer_cc = jiffies_timer_cc + delta; | ||
726 | xtime_cc = xtime_cc + delta; | ||
727 | adjust.offset = ticks * (1000000 / HZ); | 613 | adjust.offset = ticks * (1000000 / HZ); |
728 | } else { | 614 | } else { |
729 | /* It is earlier than we thought. */ | 615 | /* It is earlier than we thought. */ |
730 | delta = ticks = xtime_cc - clock; | 616 | delta = ticks = old - clock; |
731 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 617 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
732 | init_timer_cc = init_timer_cc - delta; | 618 | delta = -delta; |
733 | jiffies_timer_cc = jiffies_timer_cc - delta; | ||
734 | xtime_cc = xtime_cc - delta; | ||
735 | adjust.offset = -ticks * (1000000 / HZ); | 619 | adjust.offset = -ticks * (1000000 / HZ); |
736 | } | 620 | } |
621 | jiffies_timer_cc += delta; | ||
737 | if (adjust.offset != 0) { | 622 | if (adjust.offset != 0) { |
738 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | 623 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", |
739 | adjust.offset); | 624 | adjust.offset); |
740 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | 625 | adjust.modes = ADJ_OFFSET_SINGLESHOT; |
741 | do_adjtimex(&adjust); | 626 | do_adjtimex(&adjust); |
742 | } | 627 | } |
628 | return delta; | ||
743 | } | 629 | } |
744 | 630 | ||
631 | static struct { | ||
632 | int in_sync; | ||
633 | unsigned long long fixup_cc; | ||
634 | } etr_sync; | ||
635 | |||
745 | static void etr_sync_cpu_start(void *dummy) | 636 | static void etr_sync_cpu_start(void *dummy) |
746 | { | 637 | { |
747 | int *in_sync = dummy; | ||
748 | |||
749 | etr_enable_sync_clock(); | 638 | etr_enable_sync_clock(); |
750 | /* | 639 | /* |
751 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 640 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
@@ -753,7 +642,7 @@ static void etr_sync_cpu_start(void *dummy) | |||
753 | * __udelay will stop the cpu on an enabled wait psw until the | 642 | * __udelay will stop the cpu on an enabled wait psw until the |
754 | * TOD is running again. | 643 | * TOD is running again. |
755 | */ | 644 | */ |
756 | while (*in_sync == 0) { | 645 | while (etr_sync.in_sync == 0) { |
757 | __udelay(1); | 646 | __udelay(1); |
758 | /* | 647 | /* |
759 | * A different cpu changes *in_sync. Therefore use | 648 | * A different cpu changes *in_sync. Therefore use |
@@ -761,14 +650,14 @@ static void etr_sync_cpu_start(void *dummy) | |||
761 | */ | 650 | */ |
762 | barrier(); | 651 | barrier(); |
763 | } | 652 | } |
764 | if (*in_sync != 1) | 653 | if (etr_sync.in_sync != 1) |
765 | /* Didn't work. Clear per-cpu in sync bit again. */ | 654 | /* Didn't work. Clear per-cpu in sync bit again. */ |
766 | etr_disable_sync_clock(NULL); | 655 | etr_disable_sync_clock(NULL); |
767 | /* | 656 | /* |
768 | * This round of TOD syncing is done. Set the clock comparator | 657 | * This round of TOD syncing is done. Set the clock comparator |
769 | * to the next tick and let the processor continue. | 658 | * to the next tick and let the processor continue. |
770 | */ | 659 | */ |
771 | setup_jiffy_timer(); | 660 | fixup_clock_comparator(etr_sync.fixup_cc); |
772 | } | 661 | } |
773 | 662 | ||
774 | static void etr_sync_cpu_end(void *dummy) | 663 | static void etr_sync_cpu_end(void *dummy) |
@@ -783,8 +672,8 @@ static void etr_sync_cpu_end(void *dummy) | |||
783 | static int etr_sync_clock(struct etr_aib *aib, int port) | 672 | static int etr_sync_clock(struct etr_aib *aib, int port) |
784 | { | 673 | { |
785 | struct etr_aib *sync_port; | 674 | struct etr_aib *sync_port; |
786 | unsigned long long clock, delay; | 675 | unsigned long long clock, old_clock, delay, delta; |
787 | int in_sync, follows; | 676 | int follows; |
788 | int rc; | 677 | int rc; |
789 | 678 | ||
790 | /* Check if the current aib is adjacent to the sync port aib. */ | 679 | /* Check if the current aib is adjacent to the sync port aib. */ |
@@ -799,9 +688,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
799 | * successfully synced the clock. smp_call_function will | 688 | * successfully synced the clock. smp_call_function will |
800 | * return after all other cpus are in etr_sync_cpu_start. | 689 | * return after all other cpus are in etr_sync_cpu_start. |
801 | */ | 690 | */ |
802 | in_sync = 0; | 691 | memset(&etr_sync, 0, sizeof(etr_sync)); |
803 | preempt_disable(); | 692 | preempt_disable(); |
804 | smp_call_function(etr_sync_cpu_start,&in_sync,0,0); | 693 | smp_call_function(etr_sync_cpu_start, NULL, 0, 0); |
805 | local_irq_disable(); | 694 | local_irq_disable(); |
806 | etr_enable_sync_clock(); | 695 | etr_enable_sync_clock(); |
807 | 696 | ||
@@ -809,6 +698,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
809 | __ctl_set_bit(14, 21); | 698 | __ctl_set_bit(14, 21); |
810 | __ctl_set_bit(0, 29); | 699 | __ctl_set_bit(0, 29); |
811 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; | 700 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; |
701 | old_clock = get_clock(); | ||
812 | if (set_clock(clock) == 0) { | 702 | if (set_clock(clock) == 0) { |
813 | __udelay(1); /* Wait for the clock to start. */ | 703 | __udelay(1); /* Wait for the clock to start. */ |
814 | __ctl_clear_bit(0, 29); | 704 | __ctl_clear_bit(0, 29); |
@@ -817,16 +707,17 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
817 | /* Adjust Linux timing variables. */ | 707 | /* Adjust Linux timing variables. */ |
818 | delay = (unsigned long long) | 708 | delay = (unsigned long long) |
819 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 709 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
820 | etr_adjust_time(clock, delay); | 710 | delta = etr_adjust_time(old_clock, clock, delay); |
821 | setup_jiffy_timer(); | 711 | etr_sync.fixup_cc = delta; |
712 | fixup_clock_comparator(delta); | ||
822 | /* Verify that the clock is properly set. */ | 713 | /* Verify that the clock is properly set. */ |
823 | if (!etr_aib_follows(sync_port, aib, port)) { | 714 | if (!etr_aib_follows(sync_port, aib, port)) { |
824 | /* Didn't work. */ | 715 | /* Didn't work. */ |
825 | etr_disable_sync_clock(NULL); | 716 | etr_disable_sync_clock(NULL); |
826 | in_sync = -EAGAIN; | 717 | etr_sync.in_sync = -EAGAIN; |
827 | rc = -EAGAIN; | 718 | rc = -EAGAIN; |
828 | } else { | 719 | } else { |
829 | in_sync = 1; | 720 | etr_sync.in_sync = 1; |
830 | rc = 0; | 721 | rc = 0; |
831 | } | 722 | } |
832 | } else { | 723 | } else { |
@@ -834,7 +725,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
834 | __ctl_clear_bit(0, 29); | 725 | __ctl_clear_bit(0, 29); |
835 | __ctl_clear_bit(14, 21); | 726 | __ctl_clear_bit(14, 21); |
836 | etr_disable_sync_clock(NULL); | 727 | etr_disable_sync_clock(NULL); |
837 | in_sync = -EAGAIN; | 728 | etr_sync.in_sync = -EAGAIN; |
838 | rc = -EAGAIN; | 729 | rc = -EAGAIN; |
839 | } | 730 | } |
840 | local_irq_enable(); | 731 | local_irq_enable(); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c new file mode 100644 index 000000000000..12b39b3d9c38 --- /dev/null +++ b/arch/s390/kernel/topology.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2007 | ||
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/device.h> | ||
10 | #include <linux/bootmem.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include <linux/cpu.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <asm/delay.h> | ||
16 | #include <asm/s390_ext.h> | ||
17 | #include <asm/sysinfo.h> | ||
18 | |||
19 | #define CPU_BITS 64 | ||
20 | #define NR_MAG 6 | ||
21 | |||
22 | #define PTF_HORIZONTAL (0UL) | ||
23 | #define PTF_VERTICAL (1UL) | ||
24 | #define PTF_CHECK (2UL) | ||
25 | |||
26 | struct tl_cpu { | ||
27 | unsigned char reserved0[4]; | ||
28 | unsigned char :6; | ||
29 | unsigned char pp:2; | ||
30 | unsigned char reserved1; | ||
31 | unsigned short origin; | ||
32 | unsigned long mask[CPU_BITS / BITS_PER_LONG]; | ||
33 | }; | ||
34 | |||
35 | struct tl_container { | ||
36 | unsigned char reserved[8]; | ||
37 | }; | ||
38 | |||
39 | union tl_entry { | ||
40 | unsigned char nl; | ||
41 | struct tl_cpu cpu; | ||
42 | struct tl_container container; | ||
43 | }; | ||
44 | |||
45 | struct tl_info { | ||
46 | unsigned char reserved0[2]; | ||
47 | unsigned short length; | ||
48 | unsigned char mag[NR_MAG]; | ||
49 | unsigned char reserved1; | ||
50 | unsigned char mnest; | ||
51 | unsigned char reserved2[4]; | ||
52 | union tl_entry tle[0]; | ||
53 | }; | ||
54 | |||
55 | struct core_info { | ||
56 | struct core_info *next; | ||
57 | cpumask_t mask; | ||
58 | }; | ||
59 | |||
60 | static void topology_work_fn(struct work_struct *work); | ||
61 | static struct tl_info *tl_info; | ||
62 | static struct core_info core_info; | ||
63 | static int machine_has_topology; | ||
64 | static int machine_has_topology_irq; | ||
65 | static struct timer_list topology_timer; | ||
66 | static void set_topology_timer(void); | ||
67 | static DECLARE_WORK(topology_work, topology_work_fn); | ||
68 | |||
69 | cpumask_t cpu_coregroup_map(unsigned int cpu) | ||
70 | { | ||
71 | struct core_info *core = &core_info; | ||
72 | cpumask_t mask; | ||
73 | |||
74 | cpus_clear(mask); | ||
75 | if (!machine_has_topology) | ||
76 | return cpu_present_map; | ||
77 | mutex_lock(&smp_cpu_state_mutex); | ||
78 | while (core) { | ||
79 | if (cpu_isset(cpu, core->mask)) { | ||
80 | mask = core->mask; | ||
81 | break; | ||
82 | } | ||
83 | core = core->next; | ||
84 | } | ||
85 | mutex_unlock(&smp_cpu_state_mutex); | ||
86 | if (cpus_empty(mask)) | ||
87 | mask = cpumask_of_cpu(cpu); | ||
88 | return mask; | ||
89 | } | ||
90 | |||
91 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | ||
92 | { | ||
93 | unsigned int cpu; | ||
94 | |||
95 | for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); | ||
96 | cpu < CPU_BITS; | ||
97 | cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) | ||
98 | { | ||
99 | unsigned int rcpu, lcpu; | ||
100 | |||
101 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | ||
102 | for_each_present_cpu(lcpu) { | ||
103 | if (__cpu_logical_map[lcpu] == rcpu) { | ||
104 | cpu_set(lcpu, core->mask); | ||
105 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | ||
106 | } | ||
107 | } | ||
108 | } | ||
109 | } | ||
110 | |||
111 | static void clear_cores(void) | ||
112 | { | ||
113 | struct core_info *core = &core_info; | ||
114 | |||
115 | while (core) { | ||
116 | cpus_clear(core->mask); | ||
117 | core = core->next; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | static union tl_entry *next_tle(union tl_entry *tle) | ||
122 | { | ||
123 | if (tle->nl) | ||
124 | return (union tl_entry *)((struct tl_container *)tle + 1); | ||
125 | else | ||
126 | return (union tl_entry *)((struct tl_cpu *)tle + 1); | ||
127 | } | ||
128 | |||
129 | static void tl_to_cores(struct tl_info *info) | ||
130 | { | ||
131 | union tl_entry *tle, *end; | ||
132 | struct core_info *core = &core_info; | ||
133 | |||
134 | mutex_lock(&smp_cpu_state_mutex); | ||
135 | clear_cores(); | ||
136 | tle = info->tle; | ||
137 | end = (union tl_entry *)((unsigned long)info + info->length); | ||
138 | while (tle < end) { | ||
139 | switch (tle->nl) { | ||
140 | case 5: | ||
141 | case 4: | ||
142 | case 3: | ||
143 | case 2: | ||
144 | break; | ||
145 | case 1: | ||
146 | core = core->next; | ||
147 | break; | ||
148 | case 0: | ||
149 | add_cpus_to_core(&tle->cpu, core); | ||
150 | break; | ||
151 | default: | ||
152 | clear_cores(); | ||
153 | machine_has_topology = 0; | ||
154 | return; | ||
155 | } | ||
156 | tle = next_tle(tle); | ||
157 | } | ||
158 | mutex_unlock(&smp_cpu_state_mutex); | ||
159 | } | ||
160 | |||
161 | static void topology_update_polarization_simple(void) | ||
162 | { | ||
163 | int cpu; | ||
164 | |||
165 | mutex_lock(&smp_cpu_state_mutex); | ||
166 | for_each_present_cpu(cpu) | ||
167 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | ||
168 | mutex_unlock(&smp_cpu_state_mutex); | ||
169 | } | ||
170 | |||
171 | static int ptf(unsigned long fc) | ||
172 | { | ||
173 | int rc; | ||
174 | |||
175 | asm volatile( | ||
176 | " .insn rre,0xb9a20000,%1,%1\n" | ||
177 | " ipm %0\n" | ||
178 | " srl %0,28\n" | ||
179 | : "=d" (rc) | ||
180 | : "d" (fc) : "cc"); | ||
181 | return rc; | ||
182 | } | ||
183 | |||
184 | int topology_set_cpu_management(int fc) | ||
185 | { | ||
186 | int cpu; | ||
187 | int rc; | ||
188 | |||
189 | if (!machine_has_topology) | ||
190 | return -EOPNOTSUPP; | ||
191 | if (fc) | ||
192 | rc = ptf(PTF_VERTICAL); | ||
193 | else | ||
194 | rc = ptf(PTF_HORIZONTAL); | ||
195 | if (rc) | ||
196 | return -EBUSY; | ||
197 | for_each_present_cpu(cpu) | ||
198 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | ||
199 | return rc; | ||
200 | } | ||
201 | |||
202 | void arch_update_cpu_topology(void) | ||
203 | { | ||
204 | struct tl_info *info = tl_info; | ||
205 | struct sys_device *sysdev; | ||
206 | int cpu; | ||
207 | |||
208 | if (!machine_has_topology) { | ||
209 | topology_update_polarization_simple(); | ||
210 | return; | ||
211 | } | ||
212 | stsi(info, 15, 1, 2); | ||
213 | tl_to_cores(info); | ||
214 | for_each_online_cpu(cpu) { | ||
215 | sysdev = get_cpu_sysdev(cpu); | ||
216 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void topology_work_fn(struct work_struct *work) | ||
221 | { | ||
222 | arch_reinit_sched_domains(); | ||
223 | } | ||
224 | |||
225 | void topology_schedule_update(void) | ||
226 | { | ||
227 | schedule_work(&topology_work); | ||
228 | } | ||
229 | |||
230 | static void topology_timer_fn(unsigned long ignored) | ||
231 | { | ||
232 | if (ptf(PTF_CHECK)) | ||
233 | topology_schedule_update(); | ||
234 | set_topology_timer(); | ||
235 | } | ||
236 | |||
237 | static void set_topology_timer(void) | ||
238 | { | ||
239 | topology_timer.function = topology_timer_fn; | ||
240 | topology_timer.data = 0; | ||
241 | topology_timer.expires = jiffies + 60 * HZ; | ||
242 | add_timer(&topology_timer); | ||
243 | } | ||
244 | |||
245 | static void topology_interrupt(__u16 code) | ||
246 | { | ||
247 | schedule_work(&topology_work); | ||
248 | } | ||
249 | |||
250 | static int __init init_topology_update(void) | ||
251 | { | ||
252 | int rc; | ||
253 | |||
254 | if (!machine_has_topology) { | ||
255 | topology_update_polarization_simple(); | ||
256 | return 0; | ||
257 | } | ||
258 | init_timer_deferrable(&topology_timer); | ||
259 | if (machine_has_topology_irq) { | ||
260 | rc = register_external_interrupt(0x2005, topology_interrupt); | ||
261 | if (rc) | ||
262 | return rc; | ||
263 | ctl_set_bit(0, 8); | ||
264 | } | ||
265 | else | ||
266 | set_topology_timer(); | ||
267 | return 0; | ||
268 | } | ||
269 | __initcall(init_topology_update); | ||
270 | |||
271 | void __init s390_init_cpu_topology(void) | ||
272 | { | ||
273 | unsigned long long facility_bits; | ||
274 | struct tl_info *info; | ||
275 | struct core_info *core; | ||
276 | int nr_cores; | ||
277 | int i; | ||
278 | |||
279 | if (stfle(&facility_bits, 1) <= 0) | ||
280 | return; | ||
281 | if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) | ||
282 | return; | ||
283 | machine_has_topology = 1; | ||
284 | |||
285 | if (facility_bits & (1ULL << 51)) | ||
286 | machine_has_topology_irq = 1; | ||
287 | |||
288 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | ||
289 | if (!tl_info) | ||
290 | goto error; | ||
291 | info = tl_info; | ||
292 | stsi(info, 15, 1, 2); | ||
293 | |||
294 | nr_cores = info->mag[NR_MAG - 2]; | ||
295 | for (i = 0; i < info->mnest - 2; i++) | ||
296 | nr_cores *= info->mag[NR_MAG - 3 - i]; | ||
297 | |||
298 | printk(KERN_INFO "CPU topology:"); | ||
299 | for (i = 0; i < NR_MAG; i++) | ||
300 | printk(" %d", info->mag[i]); | ||
301 | printk(" / %d\n", info->mnest); | ||
302 | |||
303 | core = &core_info; | ||
304 | for (i = 0; i < nr_cores; i++) { | ||
305 | core->next = alloc_bootmem(sizeof(struct core_info)); | ||
306 | core = core->next; | ||
307 | if (!core) | ||
308 | goto error; | ||
309 | } | ||
310 | return; | ||
311 | error: | ||
312 | machine_has_topology = 0; | ||
313 | machine_has_topology_irq = 0; | ||
314 | } | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 60f728aeaf12..57b607b61100 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -42,11 +42,8 @@ | |||
42 | #include <asm/s390_ext.h> | 42 | #include <asm/s390_ext.h> |
43 | #include <asm/lowcore.h> | 43 | #include <asm/lowcore.h> |
44 | #include <asm/debug.h> | 44 | #include <asm/debug.h> |
45 | #include "entry.h" | ||
45 | 46 | ||
46 | /* Called from entry.S only */ | ||
47 | extern void handle_per_exception(struct pt_regs *regs); | ||
48 | |||
49 | typedef void pgm_check_handler_t(struct pt_regs *, long); | ||
50 | pgm_check_handler_t *pgm_check_table[128]; | 47 | pgm_check_handler_t *pgm_check_table[128]; |
51 | 48 | ||
52 | #ifdef CONFIG_SYSCTL | 49 | #ifdef CONFIG_SYSCTL |
@@ -59,7 +56,6 @@ int sysctl_userprocess_debug = 0; | |||
59 | 56 | ||
60 | extern pgm_check_handler_t do_protection_exception; | 57 | extern pgm_check_handler_t do_protection_exception; |
61 | extern pgm_check_handler_t do_dat_exception; | 58 | extern pgm_check_handler_t do_dat_exception; |
62 | extern pgm_check_handler_t do_monitor_call; | ||
63 | extern pgm_check_handler_t do_asce_exception; | 59 | extern pgm_check_handler_t do_asce_exception; |
64 | 60 | ||
65 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) | 61 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) |
@@ -138,7 +134,6 @@ void show_trace(struct task_struct *task, unsigned long *stack) | |||
138 | else | 134 | else |
139 | __show_trace(sp, S390_lowcore.thread_info, | 135 | __show_trace(sp, S390_lowcore.thread_info, |
140 | S390_lowcore.thread_info + THREAD_SIZE); | 136 | S390_lowcore.thread_info + THREAD_SIZE); |
141 | printk("\n"); | ||
142 | if (!task) | 137 | if (!task) |
143 | task = current; | 138 | task = current; |
144 | debug_show_held_locks(task); | 139 | debug_show_held_locks(task); |
@@ -166,6 +161,15 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
166 | show_trace(task, sp); | 161 | show_trace(task, sp); |
167 | } | 162 | } |
168 | 163 | ||
164 | #ifdef CONFIG_64BIT | ||
165 | void show_last_breaking_event(struct pt_regs *regs) | ||
166 | { | ||
167 | printk("Last Breaking-Event-Address:\n"); | ||
168 | printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); | ||
169 | print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); | ||
170 | } | ||
171 | #endif | ||
172 | |||
169 | /* | 173 | /* |
170 | * The architecture-independent dump_stack generator | 174 | * The architecture-independent dump_stack generator |
171 | */ | 175 | */ |
@@ -739,6 +743,5 @@ void __init trap_init(void) | |||
739 | pgm_check_table[0x15] = &operand_exception; | 743 | pgm_check_table[0x15] = &operand_exception; |
740 | pgm_check_table[0x1C] = &space_switch_exception; | 744 | pgm_check_table[0x1C] = &space_switch_exception; |
741 | pgm_check_table[0x1D] = &hfp_sqrt_exception; | 745 | pgm_check_table[0x1D] = &hfp_sqrt_exception; |
742 | pgm_check_table[0x40] = &do_monitor_call; | ||
743 | pfault_irq_init(); | 746 | pfault_irq_init(); |
744 | } | 747 | } |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 70f2a862b670..eae21a8ac72d 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -34,7 +34,7 @@ void __delay(unsigned long loops) | |||
34 | */ | 34 | */ |
35 | void __udelay(unsigned long usecs) | 35 | void __udelay(unsigned long usecs) |
36 | { | 36 | { |
37 | u64 end, time, jiffy_timer = 0; | 37 | u64 end, time, old_cc = 0; |
38 | unsigned long flags, cr0, mask, dummy; | 38 | unsigned long flags, cr0, mask, dummy; |
39 | int irq_context; | 39 | int irq_context; |
40 | 40 | ||
@@ -43,8 +43,8 @@ void __udelay(unsigned long usecs) | |||
43 | local_bh_disable(); | 43 | local_bh_disable(); |
44 | local_irq_save(flags); | 44 | local_irq_save(flags); |
45 | if (raw_irqs_disabled_flags(flags)) { | 45 | if (raw_irqs_disabled_flags(flags)) { |
46 | jiffy_timer = S390_lowcore.jiffy_timer; | 46 | old_cc = S390_lowcore.clock_comparator; |
47 | S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); | 47 | S390_lowcore.clock_comparator = -1ULL; |
48 | __ctl_store(cr0, 0, 0); | 48 | __ctl_store(cr0, 0, 0); |
49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | 49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; |
50 | __ctl_load(dummy , 0, 0); | 50 | __ctl_load(dummy , 0, 0); |
@@ -55,8 +55,8 @@ void __udelay(unsigned long usecs) | |||
55 | 55 | ||
56 | end = get_clock() + ((u64) usecs << 12); | 56 | end = get_clock() + ((u64) usecs << 12); |
57 | do { | 57 | do { |
58 | time = end < S390_lowcore.jiffy_timer ? | 58 | time = end < S390_lowcore.clock_comparator ? |
59 | end : S390_lowcore.jiffy_timer; | 59 | end : S390_lowcore.clock_comparator; |
60 | set_clock_comparator(time); | 60 | set_clock_comparator(time); |
61 | trace_hardirqs_on(); | 61 | trace_hardirqs_on(); |
62 | __load_psw_mask(mask); | 62 | __load_psw_mask(mask); |
@@ -65,10 +65,10 @@ void __udelay(unsigned long usecs) | |||
65 | 65 | ||
66 | if (raw_irqs_disabled_flags(flags)) { | 66 | if (raw_irqs_disabled_flags(flags)) { |
67 | __ctl_load(cr0, 0, 0); | 67 | __ctl_load(cr0, 0, 0); |
68 | S390_lowcore.jiffy_timer = jiffy_timer; | 68 | S390_lowcore.clock_comparator = old_cc; |
69 | } | 69 | } |
70 | if (!irq_context) | 70 | if (!irq_context) |
71 | _local_bh_enable(); | 71 | _local_bh_enable(); |
72 | set_clock_comparator(S390_lowcore.jiffy_timer); | 72 | set_clock_comparator(S390_lowcore.clock_comparator); |
73 | local_irq_restore(flags); | 73 | local_irq_restore(flags); |
74 | } | 74 | } |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 5efdfe9f5e76..d66215b0fde9 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -302,6 +302,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to, | |||
302 | pte_t *pte_from, *pte_to; | 302 | pte_t *pte_from, *pte_to; |
303 | int write_user; | 303 | int write_user; |
304 | 304 | ||
305 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
306 | memcpy((void __force *) to, (void __force *) from, n); | ||
307 | return 0; | ||
308 | } | ||
305 | done = 0; | 309 | done = 0; |
306 | retry: | 310 | retry: |
307 | spin_lock(&mm->page_table_lock); | 311 | spin_lock(&mm->page_table_lock); |
@@ -361,18 +365,10 @@ fault: | |||
361 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 365 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
362 | "m" (*uaddr) : "cc" ); | 366 | "m" (*uaddr) : "cc" ); |
363 | 367 | ||
364 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 368 | static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) |
365 | { | 369 | { |
366 | int oldval = 0, newval, ret; | 370 | int oldval = 0, newval, ret; |
367 | 371 | ||
368 | spin_lock(¤t->mm->page_table_lock); | ||
369 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
370 | if (!uaddr) { | ||
371 | spin_unlock(¤t->mm->page_table_lock); | ||
372 | return -EFAULT; | ||
373 | } | ||
374 | get_page(virt_to_page(uaddr)); | ||
375 | spin_unlock(¤t->mm->page_table_lock); | ||
376 | switch (op) { | 372 | switch (op) { |
377 | case FUTEX_OP_SET: | 373 | case FUTEX_OP_SET: |
378 | __futex_atomic_op("lr %2,%5\n", | 374 | __futex_atomic_op("lr %2,%5\n", |
@@ -397,17 +393,17 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |||
397 | default: | 393 | default: |
398 | ret = -ENOSYS; | 394 | ret = -ENOSYS; |
399 | } | 395 | } |
400 | put_page(virt_to_page(uaddr)); | 396 | if (ret == 0) |
401 | *old = oldval; | 397 | *old = oldval; |
402 | return ret; | 398 | return ret; |
403 | } | 399 | } |
404 | 400 | ||
405 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 401 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) |
406 | { | 402 | { |
407 | int ret; | 403 | int ret; |
408 | 404 | ||
409 | if (!current->mm) | 405 | if (segment_eq(get_fs(), KERNEL_DS)) |
410 | return -EFAULT; | 406 | return __futex_atomic_op_pt(op, uaddr, oparg, old); |
411 | spin_lock(¤t->mm->page_table_lock); | 407 | spin_lock(¤t->mm->page_table_lock); |
412 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 408 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); |
413 | if (!uaddr) { | 409 | if (!uaddr) { |
@@ -416,13 +412,40 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | |||
416 | } | 412 | } |
417 | get_page(virt_to_page(uaddr)); | 413 | get_page(virt_to_page(uaddr)); |
418 | spin_unlock(¤t->mm->page_table_lock); | 414 | spin_unlock(¤t->mm->page_table_lock); |
419 | asm volatile(" cs %1,%4,0(%5)\n" | 415 | ret = __futex_atomic_op_pt(op, uaddr, oparg, old); |
420 | "0: lr %0,%1\n" | 416 | put_page(virt_to_page(uaddr)); |
421 | "1:\n" | 417 | return ret; |
422 | EX_TABLE(0b,1b) | 418 | } |
419 | |||
420 | static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | ||
421 | { | ||
422 | int ret; | ||
423 | |||
424 | asm volatile("0: cs %1,%4,0(%5)\n" | ||
425 | "1: lr %0,%1\n" | ||
426 | "2:\n" | ||
427 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | ||
423 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | 428 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
424 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | 429 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
425 | : "cc", "memory" ); | 430 | : "cc", "memory" ); |
431 | return ret; | ||
432 | } | ||
433 | |||
434 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | ||
435 | { | ||
436 | int ret; | ||
437 | |||
438 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
439 | return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | ||
440 | spin_lock(¤t->mm->page_table_lock); | ||
441 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
442 | if (!uaddr) { | ||
443 | spin_unlock(¤t->mm->page_table_lock); | ||
444 | return -EFAULT; | ||
445 | } | ||
446 | get_page(virt_to_page(uaddr)); | ||
447 | spin_unlock(¤t->mm->page_table_lock); | ||
448 | ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | ||
426 | put_page(virt_to_page(uaddr)); | 449 | put_page(virt_to_page(uaddr)); |
427 | return ret; | 450 | return ret; |
428 | } | 451 | } |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 880b0ebf894b..ed2af0a3303b 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -289,22 +289,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
289 | 289 | ||
290 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 290 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
291 | 291 | ||
292 | switch (rc) { | 292 | if (rc) |
293 | case 0: | ||
294 | break; | ||
295 | case -ENOSPC: | ||
296 | PRINT_WARN("segment_load: not loading segment %s - overlaps " | ||
297 | "storage/segment\n", name); | ||
298 | goto out_free; | ||
299 | case -ERANGE: | ||
300 | PRINT_WARN("segment_load: not loading segment %s - exceeds " | ||
301 | "kernel mapping range\n", name); | ||
302 | goto out_free; | ||
303 | default: | ||
304 | PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n", | ||
305 | name, rc); | ||
306 | goto out_free; | 293 | goto out_free; |
307 | } | ||
308 | 294 | ||
309 | seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 295 | seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); |
310 | if (seg->res == NULL) { | 296 | if (seg->res == NULL) { |
@@ -582,8 +568,59 @@ out: | |||
582 | mutex_unlock(&dcss_lock); | 568 | mutex_unlock(&dcss_lock); |
583 | } | 569 | } |
584 | 570 | ||
571 | /* | ||
572 | * print appropriate error message for segment_load()/segment_type() | ||
573 | * return code | ||
574 | */ | ||
575 | void segment_warning(int rc, char *seg_name) | ||
576 | { | ||
577 | switch (rc) { | ||
578 | case -ENOENT: | ||
579 | PRINT_WARN("cannot load/query segment %s, " | ||
580 | "does not exist\n", seg_name); | ||
581 | break; | ||
582 | case -ENOSYS: | ||
583 | PRINT_WARN("cannot load/query segment %s, " | ||
584 | "not running on VM\n", seg_name); | ||
585 | break; | ||
586 | case -EIO: | ||
587 | PRINT_WARN("cannot load/query segment %s, " | ||
588 | "hardware error\n", seg_name); | ||
589 | break; | ||
590 | case -ENOTSUPP: | ||
591 | PRINT_WARN("cannot load/query segment %s, " | ||
592 | "is a multi-part segment\n", seg_name); | ||
593 | break; | ||
594 | case -ENOSPC: | ||
595 | PRINT_WARN("cannot load/query segment %s, " | ||
596 | "overlaps with storage\n", seg_name); | ||
597 | break; | ||
598 | case -EBUSY: | ||
599 | PRINT_WARN("cannot load/query segment %s, " | ||
600 | "overlaps with already loaded dcss\n", seg_name); | ||
601 | break; | ||
602 | case -EPERM: | ||
603 | PRINT_WARN("cannot load/query segment %s, " | ||
604 | "already loaded in incompatible mode\n", seg_name); | ||
605 | break; | ||
606 | case -ENOMEM: | ||
607 | PRINT_WARN("cannot load/query segment %s, " | ||
608 | "out of memory\n", seg_name); | ||
609 | break; | ||
610 | case -ERANGE: | ||
611 | PRINT_WARN("cannot load/query segment %s, " | ||
612 | "exceeds kernel mapping range\n", seg_name); | ||
613 | break; | ||
614 | default: | ||
615 | PRINT_WARN("cannot load/query segment %s, " | ||
616 | "return value %i\n", seg_name, rc); | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | |||
585 | EXPORT_SYMBOL(segment_load); | 621 | EXPORT_SYMBOL(segment_load); |
586 | EXPORT_SYMBOL(segment_unload); | 622 | EXPORT_SYMBOL(segment_unload); |
587 | EXPORT_SYMBOL(segment_save); | 623 | EXPORT_SYMBOL(segment_save); |
588 | EXPORT_SYMBOL(segment_type); | 624 | EXPORT_SYMBOL(segment_type); |
589 | EXPORT_SYMBOL(segment_modify_shared); | 625 | EXPORT_SYMBOL(segment_modify_shared); |
626 | EXPORT_SYMBOL(segment_warning); | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index ed13d429a487..2650f46001d0 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -28,11 +28,11 @@ | |||
28 | #include <linux/hardirq.h> | 28 | #include <linux/hardirq.h> |
29 | #include <linux/kprobes.h> | 29 | #include <linux/kprobes.h> |
30 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | |||
32 | #include <asm/system.h> | 31 | #include <asm/system.h> |
33 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
34 | #include <asm/s390_ext.h> | 33 | #include <asm/s390_ext.h> |
35 | #include <asm/mmu_context.h> | 34 | #include <asm/mmu_context.h> |
35 | #include "../kernel/entry.h" | ||
36 | 36 | ||
37 | #ifndef CONFIG_64BIT | 37 | #ifndef CONFIG_64BIT |
38 | #define __FAIL_ADDR_MASK 0x7ffff000 | 38 | #define __FAIL_ADDR_MASK 0x7ffff000 |
@@ -50,8 +50,6 @@ | |||
50 | extern int sysctl_userprocess_debug; | 50 | extern int sysctl_userprocess_debug; |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | extern void die(const char *,struct pt_regs *,long); | ||
54 | |||
55 | #ifdef CONFIG_KPROBES | 53 | #ifdef CONFIG_KPROBES |
56 | static inline int notify_page_fault(struct pt_regs *regs, long err) | 54 | static inline int notify_page_fault(struct pt_regs *regs, long err) |
57 | { | 55 | { |
@@ -245,11 +243,6 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code, | |||
245 | } | 243 | } |
246 | 244 | ||
247 | #ifdef CONFIG_S390_EXEC_PROTECT | 245 | #ifdef CONFIG_S390_EXEC_PROTECT |
248 | extern long sys_sigreturn(struct pt_regs *regs); | ||
249 | extern long sys_rt_sigreturn(struct pt_regs *regs); | ||
250 | extern long sys32_sigreturn(struct pt_regs *regs); | ||
251 | extern long sys32_rt_sigreturn(struct pt_regs *regs); | ||
252 | |||
253 | static int signal_return(struct mm_struct *mm, struct pt_regs *regs, | 246 | static int signal_return(struct mm_struct *mm, struct pt_regs *regs, |
254 | unsigned long address, unsigned long error_code) | 247 | unsigned long address, unsigned long error_code) |
255 | { | 248 | { |
@@ -270,15 +263,15 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs, | |||
270 | #ifdef CONFIG_COMPAT | 263 | #ifdef CONFIG_COMPAT |
271 | compat = test_tsk_thread_flag(current, TIF_31BIT); | 264 | compat = test_tsk_thread_flag(current, TIF_31BIT); |
272 | if (compat && instruction == 0x0a77) | 265 | if (compat && instruction == 0x0a77) |
273 | sys32_sigreturn(regs); | 266 | sys32_sigreturn(); |
274 | else if (compat && instruction == 0x0aad) | 267 | else if (compat && instruction == 0x0aad) |
275 | sys32_rt_sigreturn(regs); | 268 | sys32_rt_sigreturn(); |
276 | else | 269 | else |
277 | #endif | 270 | #endif |
278 | if (instruction == 0x0a77) | 271 | if (instruction == 0x0a77) |
279 | sys_sigreturn(regs); | 272 | sys_sigreturn(); |
280 | else if (instruction == 0x0aad) | 273 | else if (instruction == 0x0aad) |
281 | sys_rt_sigreturn(regs); | 274 | sys_rt_sigreturn(); |
282 | else { | 275 | else { |
283 | current->thread.prot_addr = address; | 276 | current->thread.prot_addr = address; |
284 | current->thread.trap_no = error_code; | 277 | current->thread.trap_no = error_code; |
@@ -424,7 +417,7 @@ no_context: | |||
424 | } | 417 | } |
425 | 418 | ||
426 | void __kprobes do_protection_exception(struct pt_regs *regs, | 419 | void __kprobes do_protection_exception(struct pt_regs *regs, |
427 | unsigned long error_code) | 420 | long error_code) |
428 | { | 421 | { |
429 | /* Protection exception is supressing, decrement psw address. */ | 422 | /* Protection exception is supressing, decrement psw address. */ |
430 | regs->psw.addr -= (error_code >> 16); | 423 | regs->psw.addr -= (error_code >> 16); |
@@ -440,7 +433,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, | |||
440 | do_exception(regs, 4, 1); | 433 | do_exception(regs, 4, 1); |
441 | } | 434 | } |
442 | 435 | ||
443 | void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) | 436 | void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) |
444 | { | 437 | { |
445 | do_exception(regs, error_code & 0xff, 0); | 438 | do_exception(regs, error_code & 0xff, 0); |
446 | } | 439 | } |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 8053245fe259..202c952a29b4 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -50,7 +50,6 @@ void show_mem(void) | |||
50 | 50 | ||
51 | printk("Mem-info:\n"); | 51 | printk("Mem-info:\n"); |
52 | show_free_areas(); | 52 | show_free_areas(); |
53 | printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); | ||
54 | i = max_mapnr; | 53 | i = max_mapnr; |
55 | while (i-- > 0) { | 54 | while (i-- > 0) { |
56 | if (!pfn_valid(i)) | 55 | if (!pfn_valid(i)) |
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 62bf373266f7..4bbdce36b92b 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head_32.o init_task.o vmlinux.lds | 5 | extra-y := head_32.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ | 7 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ |
8 | ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \ | 8 | ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \ |
9 | syscalls_32.o time_32.o topology.o traps.o traps_32.o | 9 | syscalls_32.o time_32.o topology.o traps.o traps_32.o |
10 | 10 | ||
11 | obj-y += cpu/ timers/ | 11 | obj-y += cpu/ timers/ |
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index e01283d49cbf..6edf53b93d94 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 | |||
@@ -1,7 +1,7 @@ | |||
1 | extra-y := head_64.o init_task.o vmlinux.lds | 1 | extra-y := head_64.o init_task.o vmlinux.lds |
2 | 2 | ||
3 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ | 3 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ |
4 | ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \ | 4 | ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ |
5 | syscalls_64.o time_64.o topology.o traps.o traps_64.o | 5 | syscalls_64.o time_64.o topology.o traps.o traps_64.o |
6 | 6 | ||
7 | obj-y += cpu/ timers/ | 7 | obj-y += cpu/ timers/ |
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c deleted file mode 100644 index 184119eeae56..000000000000 --- a/arch/sh/kernel/semaphore.c +++ /dev/null | |||
@@ -1,139 +0,0 @@ | |||
1 | /* | ||
2 | * Just taken from alpha implementation. | ||
3 | * This can't work well, perhaps. | ||
4 | */ | ||
5 | /* | ||
6 | * Generic semaphore code. Buyer beware. Do your own | ||
7 | * specific changes in <asm/semaphore-helper.h> | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/wait.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/semaphore.h> | ||
15 | #include <asm/semaphore-helper.h> | ||
16 | |||
17 | DEFINE_SPINLOCK(semaphore_wake_lock); | ||
18 | |||
19 | /* | ||
20 | * Semaphores are implemented using a two-way counter: | ||
21 | * The "count" variable is decremented for each process | ||
22 | * that tries to sleep, while the "waking" variable is | ||
23 | * incremented when the "up()" code goes to wake up waiting | ||
24 | * processes. | ||
25 | * | ||
26 | * Notably, the inline "up()" and "down()" functions can | ||
27 | * efficiently test if they need to do any extra work (up | ||
28 | * needs to do something only if count was negative before | ||
29 | * the increment operation. | ||
30 | * | ||
31 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
32 | * atomically. | ||
33 | * | ||
34 | * When __up() is called, the count was negative before | ||
35 | * incrementing it, and we need to wake up somebody. | ||
36 | * | ||
37 | * This routine adds one to the count of processes that need to | ||
38 | * wake up and exit. ALL waiting processes actually wake up but | ||
39 | * only the one that gets to the "waking" field first will gate | ||
40 | * through and acquire the semaphore. The others will go back | ||
41 | * to sleep. | ||
42 | * | ||
43 | * Note that these functions are only called when there is | ||
44 | * contention on the lock, and as such all this is the | ||
45 | * "non-critical" part of the whole semaphore business. The | ||
46 | * critical part is the inline stuff in <asm/semaphore.h> | ||
47 | * where we want to avoid any extra jumps and calls. | ||
48 | */ | ||
49 | void __up(struct semaphore *sem) | ||
50 | { | ||
51 | wake_one_more(sem); | ||
52 | wake_up(&sem->wait); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Perform the "down" function. Return zero for semaphore acquired, | ||
57 | * return negative for signalled out of the function. | ||
58 | * | ||
59 | * If called from __down, the return is ignored and the wait loop is | ||
60 | * not interruptible. This means that a task waiting on a semaphore | ||
61 | * using "down()" cannot be killed until someone does an "up()" on | ||
62 | * the semaphore. | ||
63 | * | ||
64 | * If called from __down_interruptible, the return value gets checked | ||
65 | * upon return. If the return value is negative then the task continues | ||
66 | * with the negative value in the return register (it can be tested by | ||
67 | * the caller). | ||
68 | * | ||
69 | * Either form may be used in conjunction with "up()". | ||
70 | * | ||
71 | */ | ||
72 | |||
73 | #define DOWN_VAR \ | ||
74 | struct task_struct *tsk = current; \ | ||
75 | wait_queue_t wait; \ | ||
76 | init_waitqueue_entry(&wait, tsk); | ||
77 | |||
78 | #define DOWN_HEAD(task_state) \ | ||
79 | \ | ||
80 | \ | ||
81 | tsk->state = (task_state); \ | ||
82 | add_wait_queue(&sem->wait, &wait); \ | ||
83 | \ | ||
84 | /* \ | ||
85 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
86 | * so we must wait. \ | ||
87 | * \ | ||
88 | * We can let go the lock for purposes of waiting. \ | ||
89 | * We re-acquire it after awaking so as to protect \ | ||
90 | * all semaphore operations. \ | ||
91 | * \ | ||
92 | * If "up()" is called before we call waking_non_zero() then \ | ||
93 | * we will catch it right away. If it is called later then \ | ||
94 | * we will have to go through a wakeup cycle to catch it. \ | ||
95 | * \ | ||
96 | * Multiple waiters contend for the semaphore lock to see \ | ||
97 | * who gets to gate through and who has to wait some more. \ | ||
98 | */ \ | ||
99 | for (;;) { | ||
100 | |||
101 | #define DOWN_TAIL(task_state) \ | ||
102 | tsk->state = (task_state); \ | ||
103 | } \ | ||
104 | tsk->state = TASK_RUNNING; \ | ||
105 | remove_wait_queue(&sem->wait, &wait); | ||
106 | |||
107 | void __sched __down(struct semaphore * sem) | ||
108 | { | ||
109 | DOWN_VAR | ||
110 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
111 | if (waking_non_zero(sem)) | ||
112 | break; | ||
113 | schedule(); | ||
114 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
115 | } | ||
116 | |||
117 | int __sched __down_interruptible(struct semaphore * sem) | ||
118 | { | ||
119 | int ret = 0; | ||
120 | DOWN_VAR | ||
121 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
122 | |||
123 | ret = waking_non_zero_interruptible(sem, tsk); | ||
124 | if (ret) | ||
125 | { | ||
126 | if (ret == 1) | ||
127 | /* ret != 0 only if we get interrupted -arca */ | ||
128 | ret = 0; | ||
129 | break; | ||
130 | } | ||
131 | schedule(); | ||
132 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | int __down_trylock(struct semaphore * sem) | ||
137 | { | ||
138 | return waking_non_zero_trylock(sem); | ||
139 | } | ||
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 45bb333fd9ec..6d405462cee8 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/pci.h> | 9 | #include <linux/pci.h> |
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <asm/sections.h> | 11 | #include <asm/sections.h> |
12 | #include <asm/semaphore.h> | ||
13 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | #include <asm/checksum.h> | 14 | #include <asm/checksum.h> |
@@ -48,12 +47,6 @@ EXPORT_SYMBOL(__copy_user); | |||
48 | EXPORT_SYMBOL(get_vm_area); | 47 | EXPORT_SYMBOL(get_vm_area); |
49 | #endif | 48 | #endif |
50 | 49 | ||
51 | /* semaphore exports */ | ||
52 | EXPORT_SYMBOL(__up); | ||
53 | EXPORT_SYMBOL(__down); | ||
54 | EXPORT_SYMBOL(__down_interruptible); | ||
55 | EXPORT_SYMBOL(__down_trylock); | ||
56 | |||
57 | EXPORT_SYMBOL(__udelay); | 50 | EXPORT_SYMBOL(__udelay); |
58 | EXPORT_SYMBOL(__ndelay); | 51 | EXPORT_SYMBOL(__ndelay); |
59 | EXPORT_SYMBOL(__const_udelay); | 52 | EXPORT_SYMBOL(__const_udelay); |
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index b6410ce4bd1d..a310c9707f03 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/in6.h> | 16 | #include <linux/in6.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/screen_info.h> | 18 | #include <linux/screen_info.h> |
19 | #include <asm/semaphore.h> | ||
20 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
21 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
22 | #include <asm/checksum.h> | 21 | #include <asm/checksum.h> |
@@ -37,9 +36,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); | |||
37 | EXPORT_SYMBOL(screen_info); | 36 | EXPORT_SYMBOL(screen_info); |
38 | #endif | 37 | #endif |
39 | 38 | ||
40 | EXPORT_SYMBOL(__down); | ||
41 | EXPORT_SYMBOL(__down_trylock); | ||
42 | EXPORT_SYMBOL(__up); | ||
43 | EXPORT_SYMBOL(__put_user_asm_l); | 39 | EXPORT_SYMBOL(__put_user_asm_l); |
44 | EXPORT_SYMBOL(__get_user_asm_l); | 40 | EXPORT_SYMBOL(__get_user_asm_l); |
45 | EXPORT_SYMBOL(copy_page); | 41 | EXPORT_SYMBOL(copy_page); |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index bf1b15d3f6f5..2712bb166f6f 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -12,7 +12,7 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \ | |||
12 | sys_sparc.o sunos_asm.o systbls.o \ | 12 | sys_sparc.o sunos_asm.o systbls.o \ |
13 | time.o windows.o cpu.o devices.o sclow.o \ | 13 | time.o windows.o cpu.o devices.o sclow.o \ |
14 | tadpole.o tick14.o ptrace.o sys_solaris.o \ | 14 | tadpole.o tick14.o ptrace.o sys_solaris.o \ |
15 | unaligned.o una_asm.o muldiv.o semaphore.o \ | 15 | unaligned.o una_asm.o muldiv.o \ |
16 | prom.o of_device.o devres.o | 16 | prom.o of_device.o devres.o |
17 | 17 | ||
18 | devres-y = ../../../kernel/irq/devres.o | 18 | devres-y = ../../../kernel/irq/devres.o |
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c deleted file mode 100644 index 0c37c1a7cd7e..000000000000 --- a/arch/sparc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | /* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */ | ||
2 | |||
3 | /* sparc32 semaphore implementation, based on i386 version */ | ||
4 | |||
5 | #include <linux/sched.h> | ||
6 | #include <linux/errno.h> | ||
7 | #include <linux/init.h> | ||
8 | |||
9 | #include <asm/semaphore.h> | ||
10 | |||
11 | /* | ||
12 | * Semaphores are implemented using a two-way counter: | ||
13 | * The "count" variable is decremented for each process | ||
14 | * that tries to acquire the semaphore, while the "sleeping" | ||
15 | * variable is a count of such acquires. | ||
16 | * | ||
17 | * Notably, the inline "up()" and "down()" functions can | ||
18 | * efficiently test if they need to do any extra work (up | ||
19 | * needs to do something only if count was negative before | ||
20 | * the increment operation. | ||
21 | * | ||
22 | * "sleeping" and the contention routine ordering is | ||
23 | * protected by the semaphore spinlock. | ||
24 | * | ||
25 | * Note that these functions are only called when there is | ||
26 | * contention on the lock, and as such all this is the | ||
27 | * "non-critical" part of the whole semaphore business. The | ||
28 | * critical part is the inline stuff in <asm/semaphore.h> | ||
29 | * where we want to avoid any extra jumps and calls. | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * Logic: | ||
34 | * - only on a boundary condition do we need to care. When we go | ||
35 | * from a negative count to a non-negative, we wake people up. | ||
36 | * - when we go from a non-negative count to a negative do we | ||
37 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
38 | * that we're on the wakeup list before we synchronize so that | ||
39 | * we cannot lose wakeup events. | ||
40 | */ | ||
41 | |||
42 | void __up(struct semaphore *sem) | ||
43 | { | ||
44 | wake_up(&sem->wait); | ||
45 | } | ||
46 | |||
47 | static DEFINE_SPINLOCK(semaphore_lock); | ||
48 | |||
49 | void __sched __down(struct semaphore * sem) | ||
50 | { | ||
51 | struct task_struct *tsk = current; | ||
52 | DECLARE_WAITQUEUE(wait, tsk); | ||
53 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
54 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
55 | |||
56 | spin_lock_irq(&semaphore_lock); | ||
57 | sem->sleepers++; | ||
58 | for (;;) { | ||
59 | int sleepers = sem->sleepers; | ||
60 | |||
61 | /* | ||
62 | * Add "everybody else" into it. They aren't | ||
63 | * playing, because we own the spinlock. | ||
64 | */ | ||
65 | if (!atomic24_add_negative(sleepers - 1, &sem->count)) { | ||
66 | sem->sleepers = 0; | ||
67 | break; | ||
68 | } | ||
69 | sem->sleepers = 1; /* us - see -1 above */ | ||
70 | spin_unlock_irq(&semaphore_lock); | ||
71 | |||
72 | schedule(); | ||
73 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
74 | spin_lock_irq(&semaphore_lock); | ||
75 | } | ||
76 | spin_unlock_irq(&semaphore_lock); | ||
77 | remove_wait_queue(&sem->wait, &wait); | ||
78 | tsk->state = TASK_RUNNING; | ||
79 | wake_up(&sem->wait); | ||
80 | } | ||
81 | |||
82 | int __sched __down_interruptible(struct semaphore * sem) | ||
83 | { | ||
84 | int retval = 0; | ||
85 | struct task_struct *tsk = current; | ||
86 | DECLARE_WAITQUEUE(wait, tsk); | ||
87 | tsk->state = TASK_INTERRUPTIBLE; | ||
88 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
89 | |||
90 | spin_lock_irq(&semaphore_lock); | ||
91 | sem->sleepers ++; | ||
92 | for (;;) { | ||
93 | int sleepers = sem->sleepers; | ||
94 | |||
95 | /* | ||
96 | * With signals pending, this turns into | ||
97 | * the trylock failure case - we won't be | ||
98 | * sleeping, and we* can't get the lock as | ||
99 | * it has contention. Just correct the count | ||
100 | * and exit. | ||
101 | */ | ||
102 | if (signal_pending(current)) { | ||
103 | retval = -EINTR; | ||
104 | sem->sleepers = 0; | ||
105 | atomic24_add(sleepers, &sem->count); | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Add "everybody else" into it. They aren't | ||
111 | * playing, because we own the spinlock. The | ||
112 | * "-1" is because we're still hoping to get | ||
113 | * the lock. | ||
114 | */ | ||
115 | if (!atomic24_add_negative(sleepers - 1, &sem->count)) { | ||
116 | sem->sleepers = 0; | ||
117 | break; | ||
118 | } | ||
119 | sem->sleepers = 1; /* us - see -1 above */ | ||
120 | spin_unlock_irq(&semaphore_lock); | ||
121 | |||
122 | schedule(); | ||
123 | tsk->state = TASK_INTERRUPTIBLE; | ||
124 | spin_lock_irq(&semaphore_lock); | ||
125 | } | ||
126 | spin_unlock_irq(&semaphore_lock); | ||
127 | tsk->state = TASK_RUNNING; | ||
128 | remove_wait_queue(&sem->wait, &wait); | ||
129 | wake_up(&sem->wait); | ||
130 | return retval; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Trylock failed - make sure we correct for | ||
135 | * having decremented the count. | ||
136 | */ | ||
137 | int __down_trylock(struct semaphore * sem) | ||
138 | { | ||
139 | int sleepers; | ||
140 | unsigned long flags; | ||
141 | |||
142 | spin_lock_irqsave(&semaphore_lock, flags); | ||
143 | sleepers = sem->sleepers + 1; | ||
144 | sem->sleepers = 0; | ||
145 | |||
146 | /* | ||
147 | * Add "everybody else" and us into it. They aren't | ||
148 | * playing, because we own the spinlock. | ||
149 | */ | ||
150 | if (!atomic24_add_negative(sleepers, &sem->count)) | ||
151 | wake_up(&sem->wait); | ||
152 | |||
153 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
154 | return 1; | ||
155 | } | ||
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index c1025e551650..97b1de0e9094 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
@@ -107,11 +107,6 @@ EXPORT_SYMBOL(___rw_read_try); | |||
107 | EXPORT_SYMBOL(___rw_read_exit); | 107 | EXPORT_SYMBOL(___rw_read_exit); |
108 | EXPORT_SYMBOL(___rw_write_enter); | 108 | EXPORT_SYMBOL(___rw_write_enter); |
109 | #endif | 109 | #endif |
110 | /* semaphores */ | ||
111 | EXPORT_SYMBOL(__up); | ||
112 | EXPORT_SYMBOL(__down); | ||
113 | EXPORT_SYMBOL(__down_trylock); | ||
114 | EXPORT_SYMBOL(__down_interruptible); | ||
115 | 110 | ||
116 | EXPORT_SYMBOL(sparc_valid_addr_bitmap); | 111 | EXPORT_SYMBOL(sparc_valid_addr_bitmap); |
117 | EXPORT_SYMBOL(phys_base); | 112 | EXPORT_SYMBOL(phys_base); |
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 1bf5b187de49..459462e80a12 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile | |||
@@ -10,7 +10,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
10 | obj-y := process.o setup.o cpu.o idprom.o \ | 10 | obj-y := process.o setup.o cpu.o idprom.o \ |
11 | traps.o auxio.o una_asm.o sysfs.o iommu.o \ | 11 | traps.o auxio.o una_asm.o sysfs.o iommu.o \ |
12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ | 12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ |
13 | unaligned.o central.o pci.o starfire.o semaphore.o \ | 13 | unaligned.o central.o pci.o starfire.o \ |
14 | power.o sbus.o sparc64_ksyms.o chmc.o \ | 14 | power.o sbus.o sparc64_ksyms.o chmc.o \ |
15 | visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o | 15 | visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o |
16 | 16 | ||
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c deleted file mode 100644 index 9974a6899551..000000000000 --- a/arch/sparc64/kernel/semaphore.c +++ /dev/null | |||
@@ -1,254 +0,0 @@ | |||
1 | /* semaphore.c: Sparc64 semaphore implementation. | ||
2 | * | ||
3 | * This is basically the PPC semaphore scheme ported to use | ||
4 | * the sparc64 atomic instructions, so see the PPC code for | ||
5 | * credits. | ||
6 | */ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | /* | ||
13 | * Atomically update sem->count. | ||
14 | * This does the equivalent of the following: | ||
15 | * | ||
16 | * old_count = sem->count; | ||
17 | * tmp = MAX(old_count, 0) + incr; | ||
18 | * sem->count = tmp; | ||
19 | * return old_count; | ||
20 | */ | ||
21 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
22 | { | ||
23 | int old_count, tmp; | ||
24 | |||
25 | __asm__ __volatile__("\n" | ||
26 | " ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n" | ||
27 | "1: ldsw [%3], %0\n" | ||
28 | " mov %0, %1\n" | ||
29 | " cmp %0, 0\n" | ||
30 | " movl %%icc, 0, %1\n" | ||
31 | " add %1, %4, %1\n" | ||
32 | " cas [%3], %0, %1\n" | ||
33 | " cmp %0, %1\n" | ||
34 | " membar #StoreLoad | #StoreStore\n" | ||
35 | " bne,pn %%icc, 1b\n" | ||
36 | " nop\n" | ||
37 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
38 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
39 | : "cc"); | ||
40 | |||
41 | return old_count; | ||
42 | } | ||
43 | |||
44 | static void __up(struct semaphore *sem) | ||
45 | { | ||
46 | __sem_update_count(sem, 1); | ||
47 | wake_up(&sem->wait); | ||
48 | } | ||
49 | |||
50 | void up(struct semaphore *sem) | ||
51 | { | ||
52 | /* This atomically does: | ||
53 | * old_val = sem->count; | ||
54 | * new_val = sem->count + 1; | ||
55 | * sem->count = new_val; | ||
56 | * if (old_val < 0) | ||
57 | * __up(sem); | ||
58 | * | ||
59 | * The (old_val < 0) test is equivalent to | ||
60 | * the more straightforward (new_val <= 0), | ||
61 | * but it is easier to test the former because | ||
62 | * of how the CAS instruction works. | ||
63 | */ | ||
64 | |||
65 | __asm__ __volatile__("\n" | ||
66 | " ! up sem(%0)\n" | ||
67 | " membar #StoreLoad | #LoadLoad\n" | ||
68 | "1: lduw [%0], %%g1\n" | ||
69 | " add %%g1, 1, %%g7\n" | ||
70 | " cas [%0], %%g1, %%g7\n" | ||
71 | " cmp %%g1, %%g7\n" | ||
72 | " bne,pn %%icc, 1b\n" | ||
73 | " addcc %%g7, 1, %%g0\n" | ||
74 | " membar #StoreLoad | #StoreStore\n" | ||
75 | " ble,pn %%icc, 3f\n" | ||
76 | " nop\n" | ||
77 | "2:\n" | ||
78 | " .subsection 2\n" | ||
79 | "3: mov %0, %%g1\n" | ||
80 | " save %%sp, -160, %%sp\n" | ||
81 | " call %1\n" | ||
82 | " mov %%g1, %%o0\n" | ||
83 | " ba,pt %%xcc, 2b\n" | ||
84 | " restore\n" | ||
85 | " .previous\n" | ||
86 | : : "r" (sem), "i" (__up) | ||
87 | : "g1", "g2", "g3", "g7", "memory", "cc"); | ||
88 | } | ||
89 | |||
90 | static void __sched __down(struct semaphore * sem) | ||
91 | { | ||
92 | struct task_struct *tsk = current; | ||
93 | DECLARE_WAITQUEUE(wait, tsk); | ||
94 | |||
95 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
96 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
97 | |||
98 | while (__sem_update_count(sem, -1) <= 0) { | ||
99 | schedule(); | ||
100 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
101 | } | ||
102 | remove_wait_queue(&sem->wait, &wait); | ||
103 | tsk->state = TASK_RUNNING; | ||
104 | |||
105 | wake_up(&sem->wait); | ||
106 | } | ||
107 | |||
108 | void __sched down(struct semaphore *sem) | ||
109 | { | ||
110 | might_sleep(); | ||
111 | /* This atomically does: | ||
112 | * old_val = sem->count; | ||
113 | * new_val = sem->count - 1; | ||
114 | * sem->count = new_val; | ||
115 | * if (old_val < 1) | ||
116 | * __down(sem); | ||
117 | * | ||
118 | * The (old_val < 1) test is equivalent to | ||
119 | * the more straightforward (new_val < 0), | ||
120 | * but it is easier to test the former because | ||
121 | * of how the CAS instruction works. | ||
122 | */ | ||
123 | |||
124 | __asm__ __volatile__("\n" | ||
125 | " ! down sem(%0)\n" | ||
126 | "1: lduw [%0], %%g1\n" | ||
127 | " sub %%g1, 1, %%g7\n" | ||
128 | " cas [%0], %%g1, %%g7\n" | ||
129 | " cmp %%g1, %%g7\n" | ||
130 | " bne,pn %%icc, 1b\n" | ||
131 | " cmp %%g7, 1\n" | ||
132 | " membar #StoreLoad | #StoreStore\n" | ||
133 | " bl,pn %%icc, 3f\n" | ||
134 | " nop\n" | ||
135 | "2:\n" | ||
136 | " .subsection 2\n" | ||
137 | "3: mov %0, %%g1\n" | ||
138 | " save %%sp, -160, %%sp\n" | ||
139 | " call %1\n" | ||
140 | " mov %%g1, %%o0\n" | ||
141 | " ba,pt %%xcc, 2b\n" | ||
142 | " restore\n" | ||
143 | " .previous\n" | ||
144 | : : "r" (sem), "i" (__down) | ||
145 | : "g1", "g2", "g3", "g7", "memory", "cc"); | ||
146 | } | ||
147 | |||
148 | int down_trylock(struct semaphore *sem) | ||
149 | { | ||
150 | int ret; | ||
151 | |||
152 | /* This atomically does: | ||
153 | * old_val = sem->count; | ||
154 | * new_val = sem->count - 1; | ||
155 | * if (old_val < 1) { | ||
156 | * ret = 1; | ||
157 | * } else { | ||
158 | * sem->count = new_val; | ||
159 | * ret = 0; | ||
160 | * } | ||
161 | * | ||
162 | * The (old_val < 1) test is equivalent to | ||
163 | * the more straightforward (new_val < 0), | ||
164 | * but it is easier to test the former because | ||
165 | * of how the CAS instruction works. | ||
166 | */ | ||
167 | |||
168 | __asm__ __volatile__("\n" | ||
169 | " ! down_trylock sem(%1) ret(%0)\n" | ||
170 | "1: lduw [%1], %%g1\n" | ||
171 | " sub %%g1, 1, %%g7\n" | ||
172 | " cmp %%g1, 1\n" | ||
173 | " bl,pn %%icc, 2f\n" | ||
174 | " mov 1, %0\n" | ||
175 | " cas [%1], %%g1, %%g7\n" | ||
176 | " cmp %%g1, %%g7\n" | ||
177 | " bne,pn %%icc, 1b\n" | ||
178 | " mov 0, %0\n" | ||
179 | " membar #StoreLoad | #StoreStore\n" | ||
180 | "2:\n" | ||
181 | : "=&r" (ret) | ||
182 | : "r" (sem) | ||
183 | : "g1", "g7", "memory", "cc"); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static int __sched __down_interruptible(struct semaphore * sem) | ||
189 | { | ||
190 | int retval = 0; | ||
191 | struct task_struct *tsk = current; | ||
192 | DECLARE_WAITQUEUE(wait, tsk); | ||
193 | |||
194 | tsk->state = TASK_INTERRUPTIBLE; | ||
195 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
196 | |||
197 | while (__sem_update_count(sem, -1) <= 0) { | ||
198 | if (signal_pending(current)) { | ||
199 | __sem_update_count(sem, 0); | ||
200 | retval = -EINTR; | ||
201 | break; | ||
202 | } | ||
203 | schedule(); | ||
204 | tsk->state = TASK_INTERRUPTIBLE; | ||
205 | } | ||
206 | tsk->state = TASK_RUNNING; | ||
207 | remove_wait_queue(&sem->wait, &wait); | ||
208 | wake_up(&sem->wait); | ||
209 | return retval; | ||
210 | } | ||
211 | |||
212 | int __sched down_interruptible(struct semaphore *sem) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | |||
216 | might_sleep(); | ||
217 | /* This atomically does: | ||
218 | * old_val = sem->count; | ||
219 | * new_val = sem->count - 1; | ||
220 | * sem->count = new_val; | ||
221 | * if (old_val < 1) | ||
222 | * ret = __down_interruptible(sem); | ||
223 | * | ||
224 | * The (old_val < 1) test is equivalent to | ||
225 | * the more straightforward (new_val < 0), | ||
226 | * but it is easier to test the former because | ||
227 | * of how the CAS instruction works. | ||
228 | */ | ||
229 | |||
230 | __asm__ __volatile__("\n" | ||
231 | " ! down_interruptible sem(%2) ret(%0)\n" | ||
232 | "1: lduw [%2], %%g1\n" | ||
233 | " sub %%g1, 1, %%g7\n" | ||
234 | " cas [%2], %%g1, %%g7\n" | ||
235 | " cmp %%g1, %%g7\n" | ||
236 | " bne,pn %%icc, 1b\n" | ||
237 | " cmp %%g7, 1\n" | ||
238 | " membar #StoreLoad | #StoreStore\n" | ||
239 | " bl,pn %%icc, 3f\n" | ||
240 | " nop\n" | ||
241 | "2:\n" | ||
242 | " .subsection 2\n" | ||
243 | "3: mov %2, %%g1\n" | ||
244 | " save %%sp, -160, %%sp\n" | ||
245 | " call %3\n" | ||
246 | " mov %%g1, %%o0\n" | ||
247 | " ba,pt %%xcc, 2b\n" | ||
248 | " restore\n" | ||
249 | " .previous\n" | ||
250 | : "=r" (ret) | ||
251 | : "0" (ret), "r" (sem), "i" (__down_interruptible) | ||
252 | : "g1", "g2", "g3", "g7", "memory", "cc"); | ||
253 | return ret; | ||
254 | } | ||
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 51fa773f38c9..051b8d9cb989 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -130,12 +130,6 @@ EXPORT_SYMBOL(_mcount); | |||
130 | 130 | ||
131 | EXPORT_SYMBOL(sparc64_get_clock_tick); | 131 | EXPORT_SYMBOL(sparc64_get_clock_tick); |
132 | 132 | ||
133 | /* semaphores */ | ||
134 | EXPORT_SYMBOL(down); | ||
135 | EXPORT_SYMBOL(down_trylock); | ||
136 | EXPORT_SYMBOL(down_interruptible); | ||
137 | EXPORT_SYMBOL(up); | ||
138 | |||
139 | /* RW semaphores */ | 133 | /* RW semaphores */ |
140 | EXPORT_SYMBOL(__down_read); | 134 | EXPORT_SYMBOL(__down_read); |
141 | EXPORT_SYMBOL(__down_read_trylock); | 135 | EXPORT_SYMBOL(__down_read_trylock); |
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index 3cd8a04d66d8..e09edfa560da 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 | |||
@@ -19,10 +19,6 @@ config 64BIT | |||
19 | bool | 19 | bool |
20 | default n | 20 | default n |
21 | 21 | ||
22 | config SEMAPHORE_SLEEPERS | ||
23 | bool | ||
24 | default y | ||
25 | |||
26 | config 3_LEVEL_PGTABLES | 22 | config 3_LEVEL_PGTABLES |
27 | bool "Three-level pagetables (EXPERIMENTAL)" | 23 | bool "Three-level pagetables (EXPERIMENTAL)" |
28 | default n | 24 | default n |
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index 6533b349f061..3fbe69e359ed 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 | |||
@@ -11,10 +11,6 @@ config RWSEM_GENERIC_SPINLOCK | |||
11 | bool | 11 | bool |
12 | default y | 12 | default y |
13 | 13 | ||
14 | config SEMAPHORE_SLEEPERS | ||
15 | bool | ||
16 | default y | ||
17 | |||
18 | config 3_LEVEL_PGTABLES | 14 | config 3_LEVEL_PGTABLES |
19 | bool | 15 | bool |
20 | default y | 16 | default y |
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c index 2a1eac1859ce..bfbefd30db8f 100644 --- a/arch/um/sys-i386/ksyms.c +++ b/arch/um/sys-i386/ksyms.c | |||
@@ -1,17 +1,5 @@ | |||
1 | #include "linux/module.h" | 1 | #include "linux/module.h" |
2 | #include "linux/in6.h" | ||
3 | #include "linux/rwsem.h" | ||
4 | #include "asm/byteorder.h" | ||
5 | #include "asm/delay.h" | ||
6 | #include "asm/semaphore.h" | ||
7 | #include "asm/uaccess.h" | ||
8 | #include "asm/checksum.h" | 2 | #include "asm/checksum.h" |
9 | #include "asm/errno.h" | ||
10 | |||
11 | EXPORT_SYMBOL(__down_failed); | ||
12 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
13 | EXPORT_SYMBOL(__down_failed_trylock); | ||
14 | EXPORT_SYMBOL(__up_wakeup); | ||
15 | 3 | ||
16 | /* Networking helper routines. */ | 4 | /* Networking helper routines. */ |
17 | EXPORT_SYMBOL(csum_partial); | 5 | EXPORT_SYMBOL(csum_partial); |
diff --git a/arch/um/sys-ppc/Makefile b/arch/um/sys-ppc/Makefile index 08901526e893..b8bc844fd2c4 100644 --- a/arch/um/sys-ppc/Makefile +++ b/arch/um/sys-ppc/Makefile | |||
@@ -3,7 +3,7 @@ OBJ = built-in.o | |||
3 | .S.o: | 3 | .S.o: |
4 | $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o | 4 | $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o |
5 | 5 | ||
6 | OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \ | 6 | OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \ |
7 | ptrace_user.o sysrq.o | 7 | ptrace_user.o sysrq.o |
8 | 8 | ||
9 | EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel | 9 | EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel |
@@ -20,10 +20,6 @@ ptrace_user.o: ptrace_user.c | |||
20 | sigcontext.o: sigcontext.c | 20 | sigcontext.o: sigcontext.c |
21 | $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $< | 21 | $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $< |
22 | 22 | ||
23 | semaphore.c: | ||
24 | rm -f $@ | ||
25 | ln -s $(srctree)/arch/ppc/kernel/$@ $@ | ||
26 | |||
27 | checksum.S: | 23 | checksum.S: |
28 | rm -f $@ | 24 | rm -f $@ |
29 | ln -s $(srctree)/arch/ppc/lib/$@ $@ | 25 | ln -s $(srctree)/arch/ppc/lib/$@ $@ |
@@ -66,4 +62,4 @@ misc.o: misc.S ppc_defs.h | |||
66 | $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o | 62 | $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o |
67 | rm -f asm | 63 | rm -f asm |
68 | 64 | ||
69 | clean-files := $(OBJS) ppc_defs.h checksum.S semaphore.c mk_defs.c | 65 | clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c |
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c index 12c593607c59..4d7d1a812d8f 100644 --- a/arch/um/sys-x86_64/ksyms.c +++ b/arch/um/sys-x86_64/ksyms.c | |||
@@ -1,16 +1,5 @@ | |||
1 | #include "linux/module.h" | 1 | #include "linux/module.h" |
2 | #include "linux/in6.h" | 2 | #include "asm/string.h" |
3 | #include "linux/rwsem.h" | ||
4 | #include "asm/byteorder.h" | ||
5 | #include "asm/semaphore.h" | ||
6 | #include "asm/uaccess.h" | ||
7 | #include "asm/checksum.h" | ||
8 | #include "asm/errno.h" | ||
9 | |||
10 | EXPORT_SYMBOL(__down_failed); | ||
11 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
12 | EXPORT_SYMBOL(__down_failed_trylock); | ||
13 | EXPORT_SYMBOL(__up_wakeup); | ||
14 | 3 | ||
15 | /*XXX: we need them because they would be exported by x86_64 */ | 4 | /*XXX: we need them because they would be exported by x86_64 */ |
16 | EXPORT_SYMBOL(__memcpy); | 5 | EXPORT_SYMBOL(__memcpy); |
diff --git a/arch/v850/kernel/Makefile b/arch/v850/kernel/Makefile index 3930482bddc4..da5889c53576 100644 --- a/arch/v850/kernel/Makefile +++ b/arch/v850/kernel/Makefile | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | extra-y := head.o init_task.o vmlinux.lds | 12 | extra-y := head.o init_task.o vmlinux.lds |
13 | 13 | ||
14 | obj-y += intv.o entry.o process.o syscalls.o time.o semaphore.o setup.o \ | 14 | obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \ |
15 | signal.o irq.o mach.o ptrace.o bug.o | 15 | signal.o irq.o mach.o ptrace.o bug.o |
16 | obj-$(CONFIG_MODULES) += module.o v850_ksyms.o | 16 | obj-$(CONFIG_MODULES) += module.o v850_ksyms.o |
17 | # chip-specific code | 17 | # chip-specific code |
diff --git a/arch/v850/kernel/semaphore.c b/arch/v850/kernel/semaphore.c deleted file mode 100644 index fc89fd661c99..000000000000 --- a/arch/v850/kernel/semaphore.c +++ /dev/null | |||
@@ -1,166 +0,0 @@ | |||
1 | /* | ||
2 | * arch/v850/kernel/semaphore.c -- Semaphore support | ||
3 | * | ||
4 | * Copyright (C) 1998-2000 IBM Corporation | ||
5 | * Copyright (C) 1999 Linus Torvalds | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General | ||
8 | * Public License. See the file COPYING in the main directory of this | ||
9 | * archive for more details. | ||
10 | * | ||
11 | * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c | ||
12 | * Author(s): Martin Schwidefsky | ||
13 | * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c | ||
14 | */ | ||
15 | |||
16 | #include <linux/errno.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | |||
20 | #include <asm/semaphore.h> | ||
21 | |||
22 | /* | ||
23 | * Semaphores are implemented using a two-way counter: | ||
24 | * The "count" variable is decremented for each process | ||
25 | * that tries to acquire the semaphore, while the "sleeping" | ||
26 | * variable is a count of such acquires. | ||
27 | * | ||
28 | * Notably, the inline "up()" and "down()" functions can | ||
29 | * efficiently test if they need to do any extra work (up | ||
30 | * needs to do something only if count was negative before | ||
31 | * the increment operation. | ||
32 | * | ||
33 | * "sleeping" and the contention routine ordering is | ||
34 | * protected by the semaphore spinlock. | ||
35 | * | ||
36 | * Note that these functions are only called when there is | ||
37 | * contention on the lock, and as such all this is the | ||
38 | * "non-critical" part of the whole semaphore business. The | ||
39 | * critical part is the inline stuff in <asm/semaphore.h> | ||
40 | * where we want to avoid any extra jumps and calls. | ||
41 | */ | ||
42 | |||
43 | /* | ||
44 | * Logic: | ||
45 | * - only on a boundary condition do we need to care. When we go | ||
46 | * from a negative count to a non-negative, we wake people up. | ||
47 | * - when we go from a non-negative count to a negative do we | ||
48 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
49 | * that we're on the wakeup list before we synchronize so that | ||
50 | * we cannot lose wakeup events. | ||
51 | */ | ||
52 | |||
53 | void __up(struct semaphore *sem) | ||
54 | { | ||
55 | wake_up(&sem->wait); | ||
56 | } | ||
57 | |||
58 | static DEFINE_SPINLOCK(semaphore_lock); | ||
59 | |||
60 | void __sched __down(struct semaphore * sem) | ||
61 | { | ||
62 | struct task_struct *tsk = current; | ||
63 | DECLARE_WAITQUEUE(wait, tsk); | ||
64 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
65 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
66 | |||
67 | spin_lock_irq(&semaphore_lock); | ||
68 | sem->sleepers++; | ||
69 | for (;;) { | ||
70 | int sleepers = sem->sleepers; | ||
71 | |||
72 | /* | ||
73 | * Add "everybody else" into it. They aren't | ||
74 | * playing, because we own the spinlock. | ||
75 | */ | ||
76 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
77 | sem->sleepers = 0; | ||
78 | break; | ||
79 | } | ||
80 | sem->sleepers = 1; /* us - see -1 above */ | ||
81 | spin_unlock_irq(&semaphore_lock); | ||
82 | |||
83 | schedule(); | ||
84 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
85 | spin_lock_irq(&semaphore_lock); | ||
86 | } | ||
87 | spin_unlock_irq(&semaphore_lock); | ||
88 | remove_wait_queue(&sem->wait, &wait); | ||
89 | tsk->state = TASK_RUNNING; | ||
90 | wake_up(&sem->wait); | ||
91 | } | ||
92 | |||
93 | int __sched __down_interruptible(struct semaphore * sem) | ||
94 | { | ||
95 | int retval = 0; | ||
96 | struct task_struct *tsk = current; | ||
97 | DECLARE_WAITQUEUE(wait, tsk); | ||
98 | tsk->state = TASK_INTERRUPTIBLE; | ||
99 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
100 | |||
101 | spin_lock_irq(&semaphore_lock); | ||
102 | sem->sleepers ++; | ||
103 | for (;;) { | ||
104 | int sleepers = sem->sleepers; | ||
105 | |||
106 | /* | ||
107 | * With signals pending, this turns into | ||
108 | * the trylock failure case - we won't be | ||
109 | * sleeping, and we* can't get the lock as | ||
110 | * it has contention. Just correct the count | ||
111 | * and exit. | ||
112 | */ | ||
113 | if (signal_pending(current)) { | ||
114 | retval = -EINTR; | ||
115 | sem->sleepers = 0; | ||
116 | atomic_add(sleepers, &sem->count); | ||
117 | break; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Add "everybody else" into it. They aren't | ||
122 | * playing, because we own the spinlock. The | ||
123 | * "-1" is because we're still hoping to get | ||
124 | * the lock. | ||
125 | */ | ||
126 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
127 | sem->sleepers = 0; | ||
128 | break; | ||
129 | } | ||
130 | sem->sleepers = 1; /* us - see -1 above */ | ||
131 | spin_unlock_irq(&semaphore_lock); | ||
132 | |||
133 | schedule(); | ||
134 | tsk->state = TASK_INTERRUPTIBLE; | ||
135 | spin_lock_irq(&semaphore_lock); | ||
136 | } | ||
137 | spin_unlock_irq(&semaphore_lock); | ||
138 | tsk->state = TASK_RUNNING; | ||
139 | remove_wait_queue(&sem->wait, &wait); | ||
140 | wake_up(&sem->wait); | ||
141 | return retval; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Trylock failed - make sure we correct for | ||
146 | * having decremented the count. | ||
147 | */ | ||
148 | int __down_trylock(struct semaphore * sem) | ||
149 | { | ||
150 | unsigned long flags; | ||
151 | int sleepers; | ||
152 | |||
153 | spin_lock_irqsave(&semaphore_lock, flags); | ||
154 | sleepers = sem->sleepers + 1; | ||
155 | sem->sleepers = 0; | ||
156 | |||
157 | /* | ||
158 | * Add "everybody else" and us into it. They aren't | ||
159 | * playing, because we own the spinlock. | ||
160 | */ | ||
161 | if (!atomic_add_negative(sleepers, &sem->count)) | ||
162 | wake_up(&sem->wait); | ||
163 | |||
164 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
165 | return 1; | ||
166 | } | ||
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c index 93575fdc874d..8d386a5dbc4a 100644 --- a/arch/v850/kernel/v850_ksyms.c +++ b/arch/v850/kernel/v850_ksyms.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/pgalloc.h> | 11 | #include <asm/pgalloc.h> |
12 | #include <asm/irq.h> | 12 | #include <asm/irq.h> |
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | #include <asm/semaphore.h> | ||
15 | #include <asm/checksum.h> | 14 | #include <asm/checksum.h> |
16 | #include <asm/current.h> | 15 | #include <asm/current.h> |
17 | 16 | ||
@@ -34,12 +33,6 @@ EXPORT_SYMBOL (memset); | |||
34 | EXPORT_SYMBOL (memcpy); | 33 | EXPORT_SYMBOL (memcpy); |
35 | EXPORT_SYMBOL (memmove); | 34 | EXPORT_SYMBOL (memmove); |
36 | 35 | ||
37 | /* semaphores */ | ||
38 | EXPORT_SYMBOL (__down); | ||
39 | EXPORT_SYMBOL (__down_interruptible); | ||
40 | EXPORT_SYMBOL (__down_trylock); | ||
41 | EXPORT_SYMBOL (__up); | ||
42 | |||
43 | /* | 36 | /* |
44 | * libgcc functions - functions that are used internally by the | 37 | * libgcc functions - functions that are used internally by the |
45 | * compiler... (prototypes are not correct though, but that | 38 | * compiler... (prototypes are not correct though, but that |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6c70fed0f9a0..2a59dbb28248 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -23,6 +23,7 @@ config X86 | |||
23 | select HAVE_KPROBES | 23 | select HAVE_KPROBES |
24 | select HAVE_KRETPROBES | 24 | select HAVE_KRETPROBES |
25 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 25 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
26 | select HAVE_ARCH_KGDB | ||
26 | 27 | ||
27 | 28 | ||
28 | config GENERIC_LOCKBREAK | 29 | config GENERIC_LOCKBREAK |
@@ -53,9 +54,6 @@ config STACKTRACE_SUPPORT | |||
53 | config HAVE_LATENCYTOP_SUPPORT | 54 | config HAVE_LATENCYTOP_SUPPORT |
54 | def_bool y | 55 | def_bool y |
55 | 56 | ||
56 | config SEMAPHORE_SLEEPERS | ||
57 | def_bool y | ||
58 | |||
59 | config FAST_CMPXCHG_LOCAL | 57 | config FAST_CMPXCHG_LOCAL |
60 | bool | 58 | bool |
61 | default y | 59 | default y |
@@ -117,7 +115,7 @@ config ARCH_HAS_CPU_RELAX | |||
117 | def_bool y | 115 | def_bool y |
118 | 116 | ||
119 | config HAVE_SETUP_PER_CPU_AREA | 117 | config HAVE_SETUP_PER_CPU_AREA |
120 | def_bool X86_64 | 118 | def_bool X86_64 || (X86_SMP && !X86_VOYAGER) |
121 | 119 | ||
122 | config ARCH_HIBERNATION_POSSIBLE | 120 | config ARCH_HIBERNATION_POSSIBLE |
123 | def_bool y | 121 | def_bool y |
@@ -171,7 +169,7 @@ config X86_64_SMP | |||
171 | config X86_HT | 169 | config X86_HT |
172 | bool | 170 | bool |
173 | depends on SMP | 171 | depends on SMP |
174 | depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || (X86_64 && !MK8) | 172 | depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || X86_64 |
175 | default y | 173 | default y |
176 | 174 | ||
177 | config X86_BIOS_REBOOT | 175 | config X86_BIOS_REBOOT |
@@ -181,7 +179,7 @@ config X86_BIOS_REBOOT | |||
181 | 179 | ||
182 | config X86_TRAMPOLINE | 180 | config X86_TRAMPOLINE |
183 | bool | 181 | bool |
184 | depends on X86_SMP || (X86_VOYAGER && SMP) | 182 | depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP) |
185 | default y | 183 | default y |
186 | 184 | ||
187 | config KTIME_SCALAR | 185 | config KTIME_SCALAR |
@@ -241,8 +239,7 @@ config X86_ELAN | |||
241 | 239 | ||
242 | config X86_VOYAGER | 240 | config X86_VOYAGER |
243 | bool "Voyager (NCR)" | 241 | bool "Voyager (NCR)" |
244 | depends on X86_32 | 242 | depends on X86_32 && (SMP || BROKEN) |
245 | select SMP if !BROKEN | ||
246 | help | 243 | help |
247 | Voyager is an MCA-based 32-way capable SMP architecture proprietary | 244 | Voyager is an MCA-based 32-way capable SMP architecture proprietary |
248 | to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. | 245 | to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based. |
@@ -254,9 +251,8 @@ config X86_VOYAGER | |||
254 | 251 | ||
255 | config X86_NUMAQ | 252 | config X86_NUMAQ |
256 | bool "NUMAQ (IBM/Sequent)" | 253 | bool "NUMAQ (IBM/Sequent)" |
257 | select SMP | 254 | depends on SMP && X86_32 |
258 | select NUMA | 255 | select NUMA |
259 | depends on X86_32 | ||
260 | help | 256 | help |
261 | This option is used for getting Linux to run on a (IBM/Sequent) NUMA | 257 | This option is used for getting Linux to run on a (IBM/Sequent) NUMA |
262 | multiquad box. This changes the way that processors are bootstrapped, | 258 | multiquad box. This changes the way that processors are bootstrapped, |
@@ -327,8 +323,9 @@ config X86_RDC321X | |||
327 | 323 | ||
328 | config X86_VSMP | 324 | config X86_VSMP |
329 | bool "Support for ScaleMP vSMP" | 325 | bool "Support for ScaleMP vSMP" |
330 | depends on X86_64 && PCI | 326 | select PARAVIRT |
331 | help | 327 | depends on X86_64 |
328 | help | ||
332 | Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is | 329 | Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is |
333 | supposed to run on these EM64T-based machines. Only choose this option | 330 | supposed to run on these EM64T-based machines. Only choose this option |
334 | if you have one of these machines. | 331 | if you have one of these machines. |
@@ -383,6 +380,35 @@ config PARAVIRT | |||
383 | 380 | ||
384 | endif | 381 | endif |
385 | 382 | ||
383 | config MEMTEST_BOOTPARAM | ||
384 | bool "Memtest boot parameter" | ||
385 | depends on X86_64 | ||
386 | default y | ||
387 | help | ||
388 | This option adds a kernel parameter 'memtest', which allows memtest | ||
389 | to be disabled at boot. If this option is selected, memtest | ||
390 | functionality can be disabled with memtest=0 on the kernel | ||
391 | command line. The purpose of this option is to allow a single | ||
392 | kernel image to be distributed with memtest built in, but not | ||
393 | necessarily enabled. | ||
394 | |||
395 | If you are unsure how to answer this question, answer Y. | ||
396 | |||
397 | config MEMTEST_BOOTPARAM_VALUE | ||
398 | int "Memtest boot parameter default value (0-4)" | ||
399 | depends on MEMTEST_BOOTPARAM | ||
400 | range 0 4 | ||
401 | default 0 | ||
402 | help | ||
403 | This option sets the default value for the kernel parameter | ||
404 | 'memtest', which allows memtest to be disabled at boot. If this | ||
405 | option is set to 0 (zero), the memtest kernel parameter will | ||
406 | default to 0, disabling memtest at bootup. If this option is | ||
407 | set to 4, the memtest kernel parameter will default to 4, | ||
408 | enabling memtest at bootup, and use that as pattern number. | ||
409 | |||
410 | If you are unsure how to answer this question, answer 0. | ||
411 | |||
386 | config ACPI_SRAT | 412 | config ACPI_SRAT |
387 | def_bool y | 413 | def_bool y |
388 | depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH) | 414 | depends on X86_32 && ACPI && NUMA && (X86_SUMMIT || X86_GENERICARCH) |
@@ -507,7 +533,7 @@ config NR_CPUS | |||
507 | 533 | ||
508 | config SCHED_SMT | 534 | config SCHED_SMT |
509 | bool "SMT (Hyperthreading) scheduler support" | 535 | bool "SMT (Hyperthreading) scheduler support" |
510 | depends on (X86_64 && SMP) || (X86_32 && X86_HT) | 536 | depends on X86_HT |
511 | help | 537 | help |
512 | SMT scheduler support improves the CPU scheduler's decision making | 538 | SMT scheduler support improves the CPU scheduler's decision making |
513 | when dealing with Intel Pentium 4 chips with HyperThreading at a | 539 | when dealing with Intel Pentium 4 chips with HyperThreading at a |
@@ -517,7 +543,7 @@ config SCHED_SMT | |||
517 | config SCHED_MC | 543 | config SCHED_MC |
518 | def_bool y | 544 | def_bool y |
519 | prompt "Multi-core scheduler support" | 545 | prompt "Multi-core scheduler support" |
520 | depends on (X86_64 && SMP) || (X86_32 && X86_HT) | 546 | depends on X86_HT |
521 | help | 547 | help |
522 | Multi-core scheduler support improves the CPU scheduler's decision | 548 | Multi-core scheduler support improves the CPU scheduler's decision |
523 | making when dealing with multi-core CPU chips at a cost of slightly | 549 | making when dealing with multi-core CPU chips at a cost of slightly |
@@ -886,7 +912,7 @@ config NUMA_EMU | |||
886 | number of nodes. This is only useful for debugging. | 912 | number of nodes. This is only useful for debugging. |
887 | 913 | ||
888 | config NODES_SHIFT | 914 | config NODES_SHIFT |
889 | int | 915 | int "Max num nodes shift(1-15)" |
890 | range 1 15 if X86_64 | 916 | range 1 15 if X86_64 |
891 | default "6" if X86_64 | 917 | default "6" if X86_64 |
892 | default "4" if X86_NUMAQ | 918 | default "4" if X86_NUMAQ |
@@ -1010,6 +1036,21 @@ config MTRR | |||
1010 | 1036 | ||
1011 | See <file:Documentation/mtrr.txt> for more information. | 1037 | See <file:Documentation/mtrr.txt> for more information. |
1012 | 1038 | ||
1039 | config X86_PAT | ||
1040 | def_bool y | ||
1041 | prompt "x86 PAT support" | ||
1042 | depends on MTRR && NONPROMISC_DEVMEM | ||
1043 | help | ||
1044 | Use PAT attributes to setup page level cache control. | ||
1045 | |||
1046 | PATs are the modern equivalents of MTRRs and are much more | ||
1047 | flexible than MTRRs. | ||
1048 | |||
1049 | Say N here if you see bootup problems (boot crash, boot hang, | ||
1050 | spontaneous reboots) or a non-working video driver. | ||
1051 | |||
1052 | If unsure, say Y. | ||
1053 | |||
1013 | config EFI | 1054 | config EFI |
1014 | def_bool n | 1055 | def_bool n |
1015 | prompt "EFI runtime service support" | 1056 | prompt "EFI runtime service support" |
@@ -1078,6 +1119,7 @@ source kernel/Kconfig.hz | |||
1078 | 1119 | ||
1079 | config KEXEC | 1120 | config KEXEC |
1080 | bool "kexec system call" | 1121 | bool "kexec system call" |
1122 | depends on X86_64 || X86_BIOS_REBOOT | ||
1081 | help | 1123 | help |
1082 | kexec is a system call that implements the ability to shutdown your | 1124 | kexec is a system call that implements the ability to shutdown your |
1083 | current kernel, and to start another kernel. It is like a reboot | 1125 | current kernel, and to start another kernel. It is like a reboot |
@@ -1379,7 +1421,7 @@ endmenu | |||
1379 | menu "Bus options (PCI etc.)" | 1421 | menu "Bus options (PCI etc.)" |
1380 | 1422 | ||
1381 | config PCI | 1423 | config PCI |
1382 | bool "PCI support" if !X86_VISWS | 1424 | bool "PCI support" if !X86_VISWS && !X86_VSMP |
1383 | depends on !X86_VOYAGER | 1425 | depends on !X86_VOYAGER |
1384 | default y | 1426 | default y |
1385 | select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) | 1427 | select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 9304bfba7d45..57072f2716f9 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -388,7 +388,7 @@ config X86_OOSTORE | |||
388 | # | 388 | # |
389 | config X86_P6_NOP | 389 | config X86_P6_NOP |
390 | def_bool y | 390 | def_bool y |
391 | depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4) | 391 | depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC) |
392 | 392 | ||
393 | config X86_TSC | 393 | config X86_TSC |
394 | def_bool y | 394 | def_bool y |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 702eb39901ca..610aaecc19f8 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -54,6 +54,18 @@ config DEBUG_PER_CPU_MAPS | |||
54 | 54 | ||
55 | Say N if unsure. | 55 | Say N if unsure. |
56 | 56 | ||
57 | config X86_PTDUMP | ||
58 | bool "Export kernel pagetable layout to userspace via debugfs" | ||
59 | depends on DEBUG_KERNEL | ||
60 | select DEBUG_FS | ||
61 | help | ||
62 | Say Y here if you want to show the kernel pagetable layout in a | ||
63 | debugfs file. This information is only useful for kernel developers | ||
64 | who are working in architecture specific areas of the kernel. | ||
65 | It is probably not a good idea to enable this feature in a production | ||
66 | kernel. | ||
67 | If in doubt, say "N" | ||
68 | |||
57 | config DEBUG_RODATA | 69 | config DEBUG_RODATA |
58 | bool "Write protect kernel read-only data structures" | 70 | bool "Write protect kernel read-only data structures" |
59 | default y | 71 | default y |
@@ -64,6 +76,18 @@ config DEBUG_RODATA | |||
64 | data. This is recommended so that we can catch kernel bugs sooner. | 76 | data. This is recommended so that we can catch kernel bugs sooner. |
65 | If in doubt, say "Y". | 77 | If in doubt, say "Y". |
66 | 78 | ||
79 | config DIRECT_GBPAGES | ||
80 | bool "Enable gbpages-mapped kernel pagetables" | ||
81 | depends on DEBUG_KERNEL && EXPERIMENTAL && X86_64 | ||
82 | help | ||
83 | Enable gigabyte pages support (if the CPU supports it). This can | ||
84 | improve the kernel's performance a tiny bit by reducing TLB | ||
85 | pressure. | ||
86 | |||
87 | This is experimental code. | ||
88 | |||
89 | If in doubt, say "N". | ||
90 | |||
67 | config DEBUG_RODATA_TEST | 91 | config DEBUG_RODATA_TEST |
68 | bool "Testcase for the DEBUG_RODATA feature" | 92 | bool "Testcase for the DEBUG_RODATA feature" |
69 | depends on DEBUG_RODATA | 93 | depends on DEBUG_RODATA |
@@ -82,8 +106,8 @@ config DEBUG_NX_TEST | |||
82 | 106 | ||
83 | config 4KSTACKS | 107 | config 4KSTACKS |
84 | bool "Use 4Kb for kernel stacks instead of 8Kb" | 108 | bool "Use 4Kb for kernel stacks instead of 8Kb" |
85 | depends on DEBUG_KERNEL | ||
86 | depends on X86_32 | 109 | depends on X86_32 |
110 | default y | ||
87 | help | 111 | help |
88 | If you say Y here the kernel will use a 4Kb stacksize for the | 112 | If you say Y here the kernel will use a 4Kb stacksize for the |
89 | kernel stack attached to each process/thread. This facilitates | 113 | kernel stack attached to each process/thread. This facilitates |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f1e739a43d41..3cff3c894cf3 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -151,7 +151,6 @@ mflags-y += -Iinclude/asm-x86/mach-default | |||
151 | # 64 bit does not support subarch support - clear sub arch variables | 151 | # 64 bit does not support subarch support - clear sub arch variables |
152 | fcore-$(CONFIG_X86_64) := | 152 | fcore-$(CONFIG_X86_64) := |
153 | mcore-$(CONFIG_X86_64) := | 153 | mcore-$(CONFIG_X86_64) := |
154 | mflags-$(CONFIG_X86_64) := | ||
155 | 154 | ||
156 | KBUILD_CFLAGS += $(mflags-y) | 155 | KBUILD_CFLAGS += $(mflags-y) |
157 | KBUILD_AFLAGS += $(mflags-y) | 156 | KBUILD_AFLAGS += $(mflags-y) |
@@ -159,9 +158,9 @@ KBUILD_AFLAGS += $(mflags-y) | |||
159 | ### | 158 | ### |
160 | # Kernel objects | 159 | # Kernel objects |
161 | 160 | ||
162 | head-y := arch/x86/kernel/head_$(BITS).o | 161 | head-y := arch/x86/kernel/head_$(BITS).o |
163 | head-$(CONFIG_X86_64) += arch/x86/kernel/head64.o | 162 | head-y += arch/x86/kernel/head$(BITS).o |
164 | head-y += arch/x86/kernel/init_task.o | 163 | head-y += arch/x86/kernel/init_task.o |
165 | 164 | ||
166 | libs-y += arch/x86/lib/ | 165 | libs-y += arch/x86/lib/ |
167 | 166 | ||
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index f88458e83ef0..7ee102f9c4f8 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
@@ -30,7 +30,7 @@ subdir- := compressed | |||
30 | 30 | ||
31 | setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o | 31 | setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o |
32 | setup-y += header.o main.o mca.o memory.o pm.o pmjump.o | 32 | setup-y += header.o main.o mca.o memory.o pm.o pmjump.o |
33 | setup-y += printf.o string.o tty.o video.o version.o | 33 | setup-y += printf.o string.o tty.o video.o video-mode.o version.o |
34 | setup-$(CONFIG_X86_APM_BOOT) += apm.o | 34 | setup-$(CONFIG_X86_APM_BOOT) += apm.o |
35 | setup-$(CONFIG_X86_VOYAGER) += voyager.o | 35 | setup-$(CONFIG_X86_VOYAGER) += voyager.o |
36 | 36 | ||
@@ -94,6 +94,20 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE | |||
94 | 94 | ||
95 | SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) | 95 | SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) |
96 | 96 | ||
97 | sed-offsets := -e 's/^00*/0/' \ | ||
98 | -e 's/^\([0-9a-fA-F]*\) . \(input_data\|input_data_end\)$$/\#define \2 0x\1/p' | ||
99 | |||
100 | quiet_cmd_offsets = OFFSETS $@ | ||
101 | cmd_offsets = $(NM) $< | sed -n $(sed-offsets) > $@ | ||
102 | |||
103 | $(obj)/offsets.h: $(obj)/compressed/vmlinux FORCE | ||
104 | $(call if_changed,offsets) | ||
105 | |||
106 | targets += offsets.h | ||
107 | |||
108 | AFLAGS_header.o += -I$(obj) | ||
109 | $(obj)/header.o: $(obj)/offsets.h | ||
110 | |||
97 | LDFLAGS_setup.elf := -T | 111 | LDFLAGS_setup.elf := -T |
98 | $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE | 112 | $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE |
99 | $(call if_changed,ld) | 113 | $(call if_changed,ld) |
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 7822a4983da2..09578070bfba 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h | |||
@@ -286,6 +286,11 @@ int getchar_timeout(void); | |||
286 | /* video.c */ | 286 | /* video.c */ |
287 | void set_video(void); | 287 | void set_video(void); |
288 | 288 | ||
289 | /* video-mode.c */ | ||
290 | int set_mode(u16 mode); | ||
291 | int mode_defined(u16 mode); | ||
292 | void probe_cards(int unsafe); | ||
293 | |||
289 | /* video-vesa.c */ | 294 | /* video-vesa.c */ |
290 | void vesa_store_edid(void); | 295 | void vesa_store_edid(void); |
291 | 296 | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index d2b9f3bb87c0..92fdd35bd93e 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -22,7 +22,7 @@ $(obj)/vmlinux: $(src)/vmlinux_$(BITS).lds $(obj)/head_$(BITS).o $(obj)/misc.o $ | |||
22 | $(call if_changed,ld) | 22 | $(call if_changed,ld) |
23 | @: | 23 | @: |
24 | 24 | ||
25 | OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S | 25 | OBJCOPYFLAGS_vmlinux.bin := -R .comment -S |
26 | $(obj)/vmlinux.bin: vmlinux FORCE | 26 | $(obj)/vmlinux.bin: vmlinux FORCE |
27 | $(call if_changed,objcopy) | 27 | $(call if_changed,objcopy) |
28 | 28 | ||
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 8182e32c1b42..dad4e699f5a3 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -15,6 +15,10 @@ | |||
15 | * we just keep it from happening | 15 | * we just keep it from happening |
16 | */ | 16 | */ |
17 | #undef CONFIG_PARAVIRT | 17 | #undef CONFIG_PARAVIRT |
18 | #ifdef CONFIG_X86_32 | ||
19 | #define _ASM_DESC_H_ 1 | ||
20 | #endif | ||
21 | |||
18 | #ifdef CONFIG_X86_64 | 22 | #ifdef CONFIG_X86_64 |
19 | #define _LINUX_STRING_H_ 1 | 23 | #define _LINUX_STRING_H_ 1 |
20 | #define __LINUX_BITMAP_H 1 | 24 | #define __LINUX_BITMAP_H 1 |
@@ -22,6 +26,7 @@ | |||
22 | 26 | ||
23 | #include <linux/linkage.h> | 27 | #include <linux/linkage.h> |
24 | #include <linux/screen_info.h> | 28 | #include <linux/screen_info.h> |
29 | #include <linux/elf.h> | ||
25 | #include <asm/io.h> | 30 | #include <asm/io.h> |
26 | #include <asm/page.h> | 31 | #include <asm/page.h> |
27 | #include <asm/boot.h> | 32 | #include <asm/boot.h> |
@@ -53,8 +58,8 @@ | |||
53 | * 1 bit (last block flag) | 58 | * 1 bit (last block flag) |
54 | * 2 bits (block type) | 59 | * 2 bits (block type) |
55 | * | 60 | * |
56 | * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved. | 61 | * 1 block occurs every 32K -1 bytes or when there 50% compression |
57 | * The smallest block type encoding is always used. | 62 | * has been achieved. The smallest block type encoding is always used. |
58 | * | 63 | * |
59 | * stored: | 64 | * stored: |
60 | * 32 bits length in bytes. | 65 | * 32 bits length in bytes. |
@@ -90,9 +95,9 @@ | |||
90 | * | 95 | * |
91 | * All of which is enough to compute an amount of extra data that is required | 96 | * All of which is enough to compute an amount of extra data that is required |
92 | * to be safe. To avoid problems at the block level allocating 5 extra bytes | 97 | * to be safe. To avoid problems at the block level allocating 5 extra bytes |
93 | * per 32767 bytes of data is sufficient. To avoind problems internal to a block | 98 | * per 32767 bytes of data is sufficient. To avoind problems internal to a |
94 | * adding an extra 32767 bytes (the worst case uncompressed block size) is | 99 | * block adding an extra 32767 bytes (the worst case uncompressed block size) |
95 | * sufficient, to ensure that in the worst case the decompressed data for | 100 | * is sufficient, to ensure that in the worst case the decompressed data for |
96 | * block will stop the byte before the compressed data for a block begins. | 101 | * block will stop the byte before the compressed data for a block begins. |
97 | * To avoid problems with the compressed data's meta information an extra 18 | 102 | * To avoid problems with the compressed data's meta information an extra 18 |
98 | * bytes are needed. Leading to the formula: | 103 | * bytes are needed. Leading to the formula: |
@@ -111,58 +116,66 @@ | |||
111 | * gzip declarations | 116 | * gzip declarations |
112 | */ | 117 | */ |
113 | 118 | ||
114 | #define OF(args) args | 119 | #define OF(args) args |
115 | #define STATIC static | 120 | #define STATIC static |
116 | 121 | ||
117 | #undef memset | 122 | #undef memset |
118 | #undef memcpy | 123 | #undef memcpy |
119 | #define memzero(s, n) memset ((s), 0, (n)) | 124 | #define memzero(s, n) memset((s), 0, (n)) |
125 | |||
126 | typedef unsigned char uch; | ||
127 | typedef unsigned short ush; | ||
128 | typedef unsigned long ulg; | ||
129 | |||
130 | /* | ||
131 | * Window size must be at least 32k, and a power of two. | ||
132 | * We don't actually have a window just a huge output buffer, | ||
133 | * so we report a 2G window size, as that should always be | ||
134 | * larger than our output buffer: | ||
135 | */ | ||
136 | #define WSIZE 0x80000000 | ||
137 | |||
138 | /* Input buffer: */ | ||
139 | static unsigned char *inbuf; | ||
120 | 140 | ||
121 | typedef unsigned char uch; | 141 | /* Sliding window buffer (and final output buffer): */ |
122 | typedef unsigned short ush; | 142 | static unsigned char *window; |
123 | typedef unsigned long ulg; | ||
124 | 143 | ||
125 | #define WSIZE 0x80000000 /* Window size must be at least 32k, | 144 | /* Valid bytes in inbuf: */ |
126 | * and a power of two | 145 | static unsigned insize; |
127 | * We don't actually have a window just | ||
128 | * a huge output buffer so I report | ||
129 | * a 2G windows size, as that should | ||
130 | * always be larger than our output buffer. | ||
131 | */ | ||
132 | 146 | ||
133 | static uch *inbuf; /* input buffer */ | 147 | /* Index of next byte to be processed in inbuf: */ |
134 | static uch *window; /* Sliding window buffer, (and final output buffer) */ | 148 | static unsigned inptr; |
135 | 149 | ||
136 | static unsigned insize; /* valid bytes in inbuf */ | 150 | /* Bytes in output buffer: */ |
137 | static unsigned inptr; /* index of next byte to be processed in inbuf */ | 151 | static unsigned outcnt; |
138 | static unsigned outcnt; /* bytes in output buffer */ | ||
139 | 152 | ||
140 | /* gzip flag byte */ | 153 | /* gzip flag byte */ |
141 | #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ | 154 | #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ |
142 | #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ | 155 | #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */ |
143 | #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ | 156 | #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ |
144 | #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ | 157 | #define ORIG_NAM 0x08 /* bit 3 set: original file name present */ |
145 | #define COMMENT 0x10 /* bit 4 set: file comment present */ | 158 | #define COMMENT 0x10 /* bit 4 set: file comment present */ |
146 | #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ | 159 | #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ |
147 | #define RESERVED 0xC0 /* bit 6,7: reserved */ | 160 | #define RESERVED 0xC0 /* bit 6, 7: reserved */ |
148 | 161 | ||
149 | #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) | 162 | #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) |
150 | 163 | ||
151 | /* Diagnostic functions */ | 164 | /* Diagnostic functions */ |
152 | #ifdef DEBUG | 165 | #ifdef DEBUG |
153 | # define Assert(cond,msg) {if(!(cond)) error(msg);} | 166 | # define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0) |
154 | # define Trace(x) fprintf x | 167 | # define Trace(x) do { fprintf x; } while (0) |
155 | # define Tracev(x) {if (verbose) fprintf x ;} | 168 | # define Tracev(x) do { if (verbose) fprintf x ; } while (0) |
156 | # define Tracevv(x) {if (verbose>1) fprintf x ;} | 169 | # define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0) |
157 | # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} | 170 | # define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0) |
158 | # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} | 171 | # define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0) |
159 | #else | 172 | #else |
160 | # define Assert(cond,msg) | 173 | # define Assert(cond, msg) |
161 | # define Trace(x) | 174 | # define Trace(x) |
162 | # define Tracev(x) | 175 | # define Tracev(x) |
163 | # define Tracevv(x) | 176 | # define Tracevv(x) |
164 | # define Tracec(c,x) | 177 | # define Tracec(c, x) |
165 | # define Tracecv(c,x) | 178 | # define Tracecv(c, x) |
166 | #endif | 179 | #endif |
167 | 180 | ||
168 | static int fill_inbuf(void); | 181 | static int fill_inbuf(void); |
@@ -170,7 +183,7 @@ static void flush_window(void); | |||
170 | static void error(char *m); | 183 | static void error(char *m); |
171 | static void gzip_mark(void **); | 184 | static void gzip_mark(void **); |
172 | static void gzip_release(void **); | 185 | static void gzip_release(void **); |
173 | 186 | ||
174 | /* | 187 | /* |
175 | * This is set up by the setup-routine at boot-time | 188 | * This is set up by the setup-routine at boot-time |
176 | */ | 189 | */ |
@@ -185,7 +198,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */ | |||
185 | extern unsigned char input_data[]; | 198 | extern unsigned char input_data[]; |
186 | extern int input_len; | 199 | extern int input_len; |
187 | 200 | ||
188 | static long bytes_out = 0; | 201 | static long bytes_out; |
189 | 202 | ||
190 | static void *malloc(int size); | 203 | static void *malloc(int size); |
191 | static void free(void *where); | 204 | static void free(void *where); |
@@ -210,7 +223,7 @@ static memptr free_mem_end_ptr; | |||
210 | #define HEAP_SIZE 0x4000 | 223 | #define HEAP_SIZE 0x4000 |
211 | #endif | 224 | #endif |
212 | 225 | ||
213 | static char *vidmem = (char *)0xb8000; | 226 | static char *vidmem; |
214 | static int vidport; | 227 | static int vidport; |
215 | static int lines, cols; | 228 | static int lines, cols; |
216 | 229 | ||
@@ -224,8 +237,10 @@ static void *malloc(int size) | |||
224 | { | 237 | { |
225 | void *p; | 238 | void *p; |
226 | 239 | ||
227 | if (size <0) error("Malloc error"); | 240 | if (size < 0) |
228 | if (free_mem_ptr <= 0) error("Memory error"); | 241 | error("Malloc error"); |
242 | if (free_mem_ptr <= 0) | ||
243 | error("Memory error"); | ||
229 | 244 | ||
230 | free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ | 245 | free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ |
231 | 246 | ||
@@ -251,19 +266,19 @@ static void gzip_release(void **ptr) | |||
251 | { | 266 | { |
252 | free_mem_ptr = (memptr) *ptr; | 267 | free_mem_ptr = (memptr) *ptr; |
253 | } | 268 | } |
254 | 269 | ||
255 | static void scroll(void) | 270 | static void scroll(void) |
256 | { | 271 | { |
257 | int i; | 272 | int i; |
258 | 273 | ||
259 | memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 ); | 274 | memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2); |
260 | for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 ) | 275 | for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2) |
261 | vidmem[i] = ' '; | 276 | vidmem[i] = ' '; |
262 | } | 277 | } |
263 | 278 | ||
264 | static void putstr(const char *s) | 279 | static void putstr(const char *s) |
265 | { | 280 | { |
266 | int x,y,pos; | 281 | int x, y, pos; |
267 | char c; | 282 | char c; |
268 | 283 | ||
269 | #ifdef CONFIG_X86_32 | 284 | #ifdef CONFIG_X86_32 |
@@ -274,18 +289,18 @@ static void putstr(const char *s) | |||
274 | x = RM_SCREEN_INFO.orig_x; | 289 | x = RM_SCREEN_INFO.orig_x; |
275 | y = RM_SCREEN_INFO.orig_y; | 290 | y = RM_SCREEN_INFO.orig_y; |
276 | 291 | ||
277 | while ( ( c = *s++ ) != '\0' ) { | 292 | while ((c = *s++) != '\0') { |
278 | if ( c == '\n' ) { | 293 | if (c == '\n') { |
279 | x = 0; | 294 | x = 0; |
280 | if ( ++y >= lines ) { | 295 | if (++y >= lines) { |
281 | scroll(); | 296 | scroll(); |
282 | y--; | 297 | y--; |
283 | } | 298 | } |
284 | } else { | 299 | } else { |
285 | vidmem [(x + cols * y) * 2] = c; | 300 | vidmem [(x + cols * y) * 2] = c; |
286 | if ( ++x >= cols ) { | 301 | if (++x >= cols) { |
287 | x = 0; | 302 | x = 0; |
288 | if ( ++y >= lines ) { | 303 | if (++y >= lines) { |
289 | scroll(); | 304 | scroll(); |
290 | y--; | 305 | y--; |
291 | } | 306 | } |
@@ -303,22 +318,22 @@ static void putstr(const char *s) | |||
303 | outb(0xff & (pos >> 1), vidport+1); | 318 | outb(0xff & (pos >> 1), vidport+1); |
304 | } | 319 | } |
305 | 320 | ||
306 | static void* memset(void* s, int c, unsigned n) | 321 | static void *memset(void *s, int c, unsigned n) |
307 | { | 322 | { |
308 | int i; | 323 | int i; |
309 | char *ss = s; | 324 | char *ss = s; |
310 | 325 | ||
311 | for (i=0;i<n;i++) ss[i] = c; | 326 | for (i = 0; i < n; i++) ss[i] = c; |
312 | return s; | 327 | return s; |
313 | } | 328 | } |
314 | 329 | ||
315 | static void* memcpy(void* dest, const void* src, unsigned n) | 330 | static void *memcpy(void *dest, const void *src, unsigned n) |
316 | { | 331 | { |
317 | int i; | 332 | int i; |
318 | const char *s = src; | 333 | const char *s = src; |
319 | char *d = dest; | 334 | char *d = dest; |
320 | 335 | ||
321 | for (i=0;i<n;i++) d[i] = s[i]; | 336 | for (i = 0; i < n; i++) d[i] = s[i]; |
322 | return dest; | 337 | return dest; |
323 | } | 338 | } |
324 | 339 | ||
@@ -341,9 +356,9 @@ static void flush_window(void) | |||
341 | /* With my window equal to my output buffer | 356 | /* With my window equal to my output buffer |
342 | * I only need to compute the crc here. | 357 | * I only need to compute the crc here. |
343 | */ | 358 | */ |
344 | ulg c = crc; /* temporary variable */ | 359 | unsigned long c = crc; /* temporary variable */ |
345 | unsigned n; | 360 | unsigned n; |
346 | uch *in, ch; | 361 | unsigned char *in, ch; |
347 | 362 | ||
348 | in = window; | 363 | in = window; |
349 | for (n = 0; n < outcnt; n++) { | 364 | for (n = 0; n < outcnt; n++) { |
@@ -351,7 +366,7 @@ static void flush_window(void) | |||
351 | c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); | 366 | c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); |
352 | } | 367 | } |
353 | crc = c; | 368 | crc = c; |
354 | bytes_out += (ulg)outcnt; | 369 | bytes_out += (unsigned long)outcnt; |
355 | outcnt = 0; | 370 | outcnt = 0; |
356 | } | 371 | } |
357 | 372 | ||
@@ -365,9 +380,59 @@ static void error(char *x) | |||
365 | asm("hlt"); | 380 | asm("hlt"); |
366 | } | 381 | } |
367 | 382 | ||
383 | static void parse_elf(void *output) | ||
384 | { | ||
385 | #ifdef CONFIG_X86_64 | ||
386 | Elf64_Ehdr ehdr; | ||
387 | Elf64_Phdr *phdrs, *phdr; | ||
388 | #else | ||
389 | Elf32_Ehdr ehdr; | ||
390 | Elf32_Phdr *phdrs, *phdr; | ||
391 | #endif | ||
392 | void *dest; | ||
393 | int i; | ||
394 | |||
395 | memcpy(&ehdr, output, sizeof(ehdr)); | ||
396 | if (ehdr.e_ident[EI_MAG0] != ELFMAG0 || | ||
397 | ehdr.e_ident[EI_MAG1] != ELFMAG1 || | ||
398 | ehdr.e_ident[EI_MAG2] != ELFMAG2 || | ||
399 | ehdr.e_ident[EI_MAG3] != ELFMAG3) { | ||
400 | error("Kernel is not a valid ELF file"); | ||
401 | return; | ||
402 | } | ||
403 | |||
404 | putstr("Parsing ELF... "); | ||
405 | |||
406 | phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum); | ||
407 | if (!phdrs) | ||
408 | error("Failed to allocate space for phdrs"); | ||
409 | |||
410 | memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum); | ||
411 | |||
412 | for (i = 0; i < ehdr.e_phnum; i++) { | ||
413 | phdr = &phdrs[i]; | ||
414 | |||
415 | switch (phdr->p_type) { | ||
416 | case PT_LOAD: | ||
417 | #ifdef CONFIG_RELOCATABLE | ||
418 | dest = output; | ||
419 | dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); | ||
420 | #else | ||
421 | dest = (void *)(phdr->p_paddr); | ||
422 | #endif | ||
423 | memcpy(dest, | ||
424 | output + phdr->p_offset, | ||
425 | phdr->p_filesz); | ||
426 | break; | ||
427 | default: /* Ignore other PT_* */ break; | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
368 | asmlinkage void decompress_kernel(void *rmode, memptr heap, | 432 | asmlinkage void decompress_kernel(void *rmode, memptr heap, |
369 | uch *input_data, unsigned long input_len, | 433 | unsigned char *input_data, |
370 | uch *output) | 434 | unsigned long input_len, |
435 | unsigned char *output) | ||
371 | { | 436 | { |
372 | real_mode = rmode; | 437 | real_mode = rmode; |
373 | 438 | ||
@@ -390,12 +455,12 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
390 | inptr = 0; | 455 | inptr = 0; |
391 | 456 | ||
392 | #ifdef CONFIG_X86_64 | 457 | #ifdef CONFIG_X86_64 |
393 | if ((ulg)output & (__KERNEL_ALIGN - 1)) | 458 | if ((unsigned long)output & (__KERNEL_ALIGN - 1)) |
394 | error("Destination address not 2M aligned"); | 459 | error("Destination address not 2M aligned"); |
395 | if ((ulg)output >= 0xffffffffffUL) | 460 | if ((unsigned long)output >= 0xffffffffffUL) |
396 | error("Destination address too large"); | 461 | error("Destination address too large"); |
397 | #else | 462 | #else |
398 | if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1)) | 463 | if ((u32)output & (CONFIG_PHYSICAL_ALIGN - 1)) |
399 | error("Destination address not CONFIG_PHYSICAL_ALIGN aligned"); | 464 | error("Destination address not CONFIG_PHYSICAL_ALIGN aligned"); |
400 | if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff)) | 465 | if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff)) |
401 | error("Destination address too large"); | 466 | error("Destination address too large"); |
@@ -408,6 +473,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
408 | makecrc(); | 473 | makecrc(); |
409 | putstr("\nDecompressing Linux... "); | 474 | putstr("\nDecompressing Linux... "); |
410 | gunzip(); | 475 | gunzip(); |
476 | parse_elf(output); | ||
411 | putstr("done.\nBooting the kernel.\n"); | 477 | putstr("done.\nBooting the kernel.\n"); |
412 | return; | 478 | return; |
413 | } | 479 | } |
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 769065bd23d7..2462c88689ed 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c | |||
@@ -56,27 +56,27 @@ static const u32 req_flags[NCAPINTS] = | |||
56 | REQUIRED_MASK7, | 56 | REQUIRED_MASK7, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define A32(a,b,c,d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) | 59 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) |
60 | 60 | ||
61 | static int is_amd(void) | 61 | static int is_amd(void) |
62 | { | 62 | { |
63 | return cpu_vendor[0] == A32('A','u','t','h') && | 63 | return cpu_vendor[0] == A32('A', 'u', 't', 'h') && |
64 | cpu_vendor[1] == A32('e','n','t','i') && | 64 | cpu_vendor[1] == A32('e', 'n', 't', 'i') && |
65 | cpu_vendor[2] == A32('c','A','M','D'); | 65 | cpu_vendor[2] == A32('c', 'A', 'M', 'D'); |
66 | } | 66 | } |
67 | 67 | ||
68 | static int is_centaur(void) | 68 | static int is_centaur(void) |
69 | { | 69 | { |
70 | return cpu_vendor[0] == A32('C','e','n','t') && | 70 | return cpu_vendor[0] == A32('C', 'e', 'n', 't') && |
71 | cpu_vendor[1] == A32('a','u','r','H') && | 71 | cpu_vendor[1] == A32('a', 'u', 'r', 'H') && |
72 | cpu_vendor[2] == A32('a','u','l','s'); | 72 | cpu_vendor[2] == A32('a', 'u', 'l', 's'); |
73 | } | 73 | } |
74 | 74 | ||
75 | static int is_transmeta(void) | 75 | static int is_transmeta(void) |
76 | { | 76 | { |
77 | return cpu_vendor[0] == A32('G','e','n','u') && | 77 | return cpu_vendor[0] == A32('G', 'e', 'n', 'u') && |
78 | cpu_vendor[1] == A32('i','n','e','T') && | 78 | cpu_vendor[1] == A32('i', 'n', 'e', 'T') && |
79 | cpu_vendor[2] == A32('M','x','8','6'); | 79 | cpu_vendor[2] == A32('M', 'x', '8', '6'); |
80 | } | 80 | } |
81 | 81 | ||
82 | static int has_fpu(void) | 82 | static int has_fpu(void) |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 64ad9016585a..6d2df8d61c54 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/page.h> | 22 | #include <asm/page.h> |
23 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
24 | #include "boot.h" | 24 | #include "boot.h" |
25 | #include "offsets.h" | ||
25 | 26 | ||
26 | SETUPSECTS = 4 /* default nr of setup-sectors */ | 27 | SETUPSECTS = 4 /* default nr of setup-sectors */ |
27 | BOOTSEG = 0x07C0 /* original address of boot-sector */ | 28 | BOOTSEG = 0x07C0 /* original address of boot-sector */ |
@@ -119,7 +120,7 @@ _start: | |||
119 | # Part 2 of the header, from the old setup.S | 120 | # Part 2 of the header, from the old setup.S |
120 | 121 | ||
121 | .ascii "HdrS" # header signature | 122 | .ascii "HdrS" # header signature |
122 | .word 0x0207 # header version number (>= 0x0105) | 123 | .word 0x0208 # header version number (>= 0x0105) |
123 | # or else old loadlin-1.5 will fail) | 124 | # or else old loadlin-1.5 will fail) |
124 | .globl realmode_swtch | 125 | .globl realmode_swtch |
125 | realmode_swtch: .word 0, 0 # default_switch, SETUPSEG | 126 | realmode_swtch: .word 0, 0 # default_switch, SETUPSEG |
@@ -223,6 +224,9 @@ hardware_subarch: .long 0 # subarchitecture, added with 2.07 | |||
223 | 224 | ||
224 | hardware_subarch_data: .quad 0 | 225 | hardware_subarch_data: .quad 0 |
225 | 226 | ||
227 | payload_offset: .long input_data | ||
228 | payload_length: .long input_data_end-input_data | ||
229 | |||
226 | # End of setup header ##################################################### | 230 | # End of setup header ##################################################### |
227 | 231 | ||
228 | .section ".inittext", "ax" | 232 | .section ".inittext", "ax" |
diff --git a/arch/x86/boot/pm.c b/arch/x86/boot/pm.c index 1a0f936c160b..a93cb8bded4d 100644 --- a/arch/x86/boot/pm.c +++ b/arch/x86/boot/pm.c | |||
@@ -100,7 +100,7 @@ static void reset_coprocessor(void) | |||
100 | /* | 100 | /* |
101 | * Set up the GDT | 101 | * Set up the GDT |
102 | */ | 102 | */ |
103 | #define GDT_ENTRY(flags,base,limit) \ | 103 | #define GDT_ENTRY(flags, base, limit) \ |
104 | (((u64)(base & 0xff000000) << 32) | \ | 104 | (((u64)(base & 0xff000000) << 32) | \ |
105 | ((u64)flags << 40) | \ | 105 | ((u64)flags << 40) | \ |
106 | ((u64)(limit & 0x00ff0000) << 32) | \ | 106 | ((u64)(limit & 0x00ff0000) << 32) | \ |
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index b4248740ff0d..44dc1923c0e3 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c | |||
@@ -50,6 +50,75 @@ typedef unsigned long u32; | |||
50 | u8 buf[SETUP_SECT_MAX*512]; | 50 | u8 buf[SETUP_SECT_MAX*512]; |
51 | int is_big_kernel; | 51 | int is_big_kernel; |
52 | 52 | ||
53 | /*----------------------------------------------------------------------*/ | ||
54 | |||
55 | static const u32 crctab32[] = { | ||
56 | 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, | ||
57 | 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, | ||
58 | 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, | ||
59 | 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, | ||
60 | 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, | ||
61 | 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, | ||
62 | 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, | ||
63 | 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, | ||
64 | 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, | ||
65 | 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, | ||
66 | 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, | ||
67 | 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, | ||
68 | 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, | ||
69 | 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, | ||
70 | 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, | ||
71 | 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, | ||
72 | 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, | ||
73 | 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, | ||
74 | 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, | ||
75 | 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, | ||
76 | 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, | ||
77 | 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, | ||
78 | 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, | ||
79 | 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, | ||
80 | 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, | ||
81 | 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, | ||
82 | 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, | ||
83 | 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, | ||
84 | 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, | ||
85 | 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, | ||
86 | 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, | ||
87 | 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, | ||
88 | 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, | ||
89 | 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, | ||
90 | 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, | ||
91 | 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, | ||
92 | 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, | ||
93 | 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, | ||
94 | 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, | ||
95 | 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, | ||
96 | 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, | ||
97 | 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, | ||
98 | 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, | ||
99 | 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, | ||
100 | 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, | ||
101 | 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, | ||
102 | 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, | ||
103 | 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, | ||
104 | 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, | ||
105 | 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, | ||
106 | 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, | ||
107 | 0x2d02ef8d | ||
108 | }; | ||
109 | |||
110 | static u32 partial_crc32_one(u8 c, u32 crc) | ||
111 | { | ||
112 | return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8); | ||
113 | } | ||
114 | |||
115 | static u32 partial_crc32(const u8 *s, int len, u32 crc) | ||
116 | { | ||
117 | while (len--) | ||
118 | crc = partial_crc32_one(*s++, crc); | ||
119 | return crc; | ||
120 | } | ||
121 | |||
53 | static void die(const char * str, ...) | 122 | static void die(const char * str, ...) |
54 | { | 123 | { |
55 | va_list args; | 124 | va_list args; |
@@ -74,6 +143,7 @@ int main(int argc, char ** argv) | |||
74 | FILE *file; | 143 | FILE *file; |
75 | int fd; | 144 | int fd; |
76 | void *kernel; | 145 | void *kernel; |
146 | u32 crc = 0xffffffffUL; | ||
77 | 147 | ||
78 | if (argc > 2 && !strcmp(argv[1], "-b")) | 148 | if (argc > 2 && !strcmp(argv[1], "-b")) |
79 | { | 149 | { |
@@ -144,7 +214,8 @@ int main(int argc, char ** argv) | |||
144 | kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0); | 214 | kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0); |
145 | if (kernel == MAP_FAILED) | 215 | if (kernel == MAP_FAILED) |
146 | die("Unable to mmap '%s': %m", argv[2]); | 216 | die("Unable to mmap '%s': %m", argv[2]); |
147 | sys_size = (sz + 15) / 16; | 217 | /* Number of 16-byte paragraphs, including space for a 4-byte CRC */ |
218 | sys_size = (sz + 15 + 4) / 16; | ||
148 | if (!is_big_kernel && sys_size > DEF_SYSSIZE) | 219 | if (!is_big_kernel && sys_size > DEF_SYSSIZE) |
149 | die("System is too big. Try using bzImage or modules."); | 220 | die("System is too big. Try using bzImage or modules."); |
150 | 221 | ||
@@ -155,12 +226,27 @@ int main(int argc, char ** argv) | |||
155 | buf[0x1f6] = sys_size >> 16; | 226 | buf[0x1f6] = sys_size >> 16; |
156 | buf[0x1f7] = sys_size >> 24; | 227 | buf[0x1f7] = sys_size >> 24; |
157 | 228 | ||
229 | crc = partial_crc32(buf, i, crc); | ||
158 | if (fwrite(buf, 1, i, stdout) != i) | 230 | if (fwrite(buf, 1, i, stdout) != i) |
159 | die("Writing setup failed"); | 231 | die("Writing setup failed"); |
160 | 232 | ||
161 | /* Copy the kernel code */ | 233 | /* Copy the kernel code */ |
234 | crc = partial_crc32(kernel, sz, crc); | ||
162 | if (fwrite(kernel, 1, sz, stdout) != sz) | 235 | if (fwrite(kernel, 1, sz, stdout) != sz) |
163 | die("Writing kernel failed"); | 236 | die("Writing kernel failed"); |
237 | |||
238 | /* Add padding leaving 4 bytes for the checksum */ | ||
239 | while (sz++ < (sys_size*16) - 4) { | ||
240 | crc = partial_crc32_one('\0', crc); | ||
241 | if (fwrite("\0", 1, 1, stdout) != 1) | ||
242 | die("Writing padding failed"); | ||
243 | } | ||
244 | |||
245 | /* Write the CRC */ | ||
246 | fprintf(stderr, "CRC %lx\n", crc); | ||
247 | if (fwrite(&crc, 1, 4, stdout) != 4) | ||
248 | die("Writing CRC failed"); | ||
249 | |||
164 | close(fd); | 250 | close(fd); |
165 | 251 | ||
166 | /* Everything is OK */ | 252 | /* Everything is OK */ |
diff --git a/arch/x86/boot/video-bios.c b/arch/x86/boot/video-bios.c index ff664a117096..39e247e96172 100644 --- a/arch/x86/boot/video-bios.c +++ b/arch/x86/boot/video-bios.c | |||
@@ -50,6 +50,7 @@ static int set_bios_mode(u8 mode) | |||
50 | if (new_mode == mode) | 50 | if (new_mode == mode) |
51 | return 0; /* Mode change OK */ | 51 | return 0; /* Mode change OK */ |
52 | 52 | ||
53 | #ifndef _WAKEUP | ||
53 | if (new_mode != boot_params.screen_info.orig_video_mode) { | 54 | if (new_mode != boot_params.screen_info.orig_video_mode) { |
54 | /* Mode setting failed, but we didn't end up where we | 55 | /* Mode setting failed, but we didn't end up where we |
55 | started. That's bad. Try to revert to the original | 56 | started. That's bad. Try to revert to the original |
@@ -59,13 +60,18 @@ static int set_bios_mode(u8 mode) | |||
59 | : "+a" (ax) | 60 | : "+a" (ax) |
60 | : : "ebx", "ecx", "edx", "esi", "edi"); | 61 | : : "ebx", "ecx", "edx", "esi", "edi"); |
61 | } | 62 | } |
63 | #endif | ||
62 | return -1; | 64 | return -1; |
63 | } | 65 | } |
64 | 66 | ||
65 | static int bios_probe(void) | 67 | static int bios_probe(void) |
66 | { | 68 | { |
67 | u8 mode; | 69 | u8 mode; |
70 | #ifdef _WAKEUP | ||
71 | u8 saved_mode = 0x03; | ||
72 | #else | ||
68 | u8 saved_mode = boot_params.screen_info.orig_video_mode; | 73 | u8 saved_mode = boot_params.screen_info.orig_video_mode; |
74 | #endif | ||
69 | u16 crtc; | 75 | u16 crtc; |
70 | struct mode_info *mi; | 76 | struct mode_info *mi; |
71 | int nmodes = 0; | 77 | int nmodes = 0; |
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c new file mode 100644 index 000000000000..748e8d06290a --- /dev/null +++ b/arch/x86/boot/video-mode.c | |||
@@ -0,0 +1,173 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
4 | * Copyright 2007-2008 rPath, Inc. - All Rights Reserved | ||
5 | * | ||
6 | * This file is part of the Linux kernel, and is made available under | ||
7 | * the terms of the GNU General Public License version 2. | ||
8 | * | ||
9 | * ----------------------------------------------------------------------- */ | ||
10 | |||
11 | /* | ||
12 | * arch/i386/boot/video-mode.c | ||
13 | * | ||
14 | * Set the video mode. This is separated out into a different | ||
15 | * file in order to be shared with the ACPI wakeup code. | ||
16 | */ | ||
17 | |||
18 | #include "boot.h" | ||
19 | #include "video.h" | ||
20 | #include "vesa.h" | ||
21 | |||
22 | /* | ||
23 | * Common variables | ||
24 | */ | ||
25 | int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */ | ||
26 | u16 video_segment; | ||
27 | int force_x, force_y; /* Don't query the BIOS for cols/rows */ | ||
28 | |||
29 | int do_restore; /* Screen contents changed during mode flip */ | ||
30 | int graphic_mode; /* Graphic mode with linear frame buffer */ | ||
31 | |||
32 | /* Probe the video drivers and have them generate their mode lists. */ | ||
33 | void probe_cards(int unsafe) | ||
34 | { | ||
35 | struct card_info *card; | ||
36 | static u8 probed[2]; | ||
37 | |||
38 | if (probed[unsafe]) | ||
39 | return; | ||
40 | |||
41 | probed[unsafe] = 1; | ||
42 | |||
43 | for (card = video_cards; card < video_cards_end; card++) { | ||
44 | if (card->unsafe == unsafe) { | ||
45 | if (card->probe) | ||
46 | card->nmodes = card->probe(); | ||
47 | else | ||
48 | card->nmodes = 0; | ||
49 | } | ||
50 | } | ||
51 | } | ||
52 | |||
53 | /* Test if a mode is defined */ | ||
54 | int mode_defined(u16 mode) | ||
55 | { | ||
56 | struct card_info *card; | ||
57 | struct mode_info *mi; | ||
58 | int i; | ||
59 | |||
60 | for (card = video_cards; card < video_cards_end; card++) { | ||
61 | mi = card->modes; | ||
62 | for (i = 0; i < card->nmodes; i++, mi++) { | ||
63 | if (mi->mode == mode) | ||
64 | return 1; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /* Set mode (without recalc) */ | ||
72 | static int raw_set_mode(u16 mode, u16 *real_mode) | ||
73 | { | ||
74 | int nmode, i; | ||
75 | struct card_info *card; | ||
76 | struct mode_info *mi; | ||
77 | |||
78 | /* Drop the recalc bit if set */ | ||
79 | mode &= ~VIDEO_RECALC; | ||
80 | |||
81 | /* Scan for mode based on fixed ID, position, or resolution */ | ||
82 | nmode = 0; | ||
83 | for (card = video_cards; card < video_cards_end; card++) { | ||
84 | mi = card->modes; | ||
85 | for (i = 0; i < card->nmodes; i++, mi++) { | ||
86 | int visible = mi->x || mi->y; | ||
87 | |||
88 | if ((mode == nmode && visible) || | ||
89 | mode == mi->mode || | ||
90 | mode == (mi->y << 8)+mi->x) { | ||
91 | *real_mode = mi->mode; | ||
92 | return card->set_mode(mi); | ||
93 | } | ||
94 | |||
95 | if (visible) | ||
96 | nmode++; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | /* Nothing found? Is it an "exceptional" (unprobed) mode? */ | ||
101 | for (card = video_cards; card < video_cards_end; card++) { | ||
102 | if (mode >= card->xmode_first && | ||
103 | mode < card->xmode_first+card->xmode_n) { | ||
104 | struct mode_info mix; | ||
105 | *real_mode = mix.mode = mode; | ||
106 | mix.x = mix.y = 0; | ||
107 | return card->set_mode(&mix); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | /* Otherwise, failure... */ | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Recalculate the vertical video cutoff (hack!) | ||
117 | */ | ||
118 | static void vga_recalc_vertical(void) | ||
119 | { | ||
120 | unsigned int font_size, rows; | ||
121 | u16 crtc; | ||
122 | u8 pt, ov; | ||
123 | |||
124 | set_fs(0); | ||
125 | font_size = rdfs8(0x485); /* BIOS: font size (pixels) */ | ||
126 | rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */ | ||
127 | |||
128 | rows *= font_size; /* Visible scan lines */ | ||
129 | rows--; /* ... minus one */ | ||
130 | |||
131 | crtc = vga_crtc(); | ||
132 | |||
133 | pt = in_idx(crtc, 0x11); | ||
134 | pt &= ~0x80; /* Unlock CR0-7 */ | ||
135 | out_idx(pt, crtc, 0x11); | ||
136 | |||
137 | out_idx((u8)rows, crtc, 0x12); /* Lower height register */ | ||
138 | |||
139 | ov = in_idx(crtc, 0x07); /* Overflow register */ | ||
140 | ov &= 0xbd; | ||
141 | ov |= (rows >> (8-1)) & 0x02; | ||
142 | ov |= (rows >> (9-6)) & 0x40; | ||
143 | out_idx(ov, crtc, 0x07); | ||
144 | } | ||
145 | |||
146 | /* Set mode (with recalc if specified) */ | ||
147 | int set_mode(u16 mode) | ||
148 | { | ||
149 | int rv; | ||
150 | u16 real_mode; | ||
151 | |||
152 | /* Very special mode numbers... */ | ||
153 | if (mode == VIDEO_CURRENT_MODE) | ||
154 | return 0; /* Nothing to do... */ | ||
155 | else if (mode == NORMAL_VGA) | ||
156 | mode = VIDEO_80x25; | ||
157 | else if (mode == EXTENDED_VGA) | ||
158 | mode = VIDEO_8POINT; | ||
159 | |||
160 | rv = raw_set_mode(mode, &real_mode); | ||
161 | if (rv) | ||
162 | return rv; | ||
163 | |||
164 | if (mode & VIDEO_RECALC) | ||
165 | vga_recalc_vertical(); | ||
166 | |||
167 | /* Save the canonical mode number for the kernel, not | ||
168 | an alias, size specification or menu position */ | ||
169 | #ifndef _WAKEUP | ||
170 | boot_params.hdr.vid_mode = real_mode; | ||
171 | #endif | ||
172 | return 0; | ||
173 | } | ||
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 419b5c273374..5d5a3f6e8b5c 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c | |||
@@ -24,7 +24,11 @@ static struct vesa_mode_info vminfo; | |||
24 | 24 | ||
25 | __videocard video_vesa; | 25 | __videocard video_vesa; |
26 | 26 | ||
27 | #ifndef _WAKEUP | ||
27 | static void vesa_store_mode_params_graphics(void); | 28 | static void vesa_store_mode_params_graphics(void); |
29 | #else /* _WAKEUP */ | ||
30 | static inline void vesa_store_mode_params_graphics(void) {} | ||
31 | #endif /* _WAKEUP */ | ||
28 | 32 | ||
29 | static int vesa_probe(void) | 33 | static int vesa_probe(void) |
30 | { | 34 | { |
@@ -165,6 +169,8 @@ static int vesa_set_mode(struct mode_info *mode) | |||
165 | } | 169 | } |
166 | 170 | ||
167 | 171 | ||
172 | #ifndef _WAKEUP | ||
173 | |||
168 | /* Switch DAC to 8-bit mode */ | 174 | /* Switch DAC to 8-bit mode */ |
169 | static void vesa_dac_set_8bits(void) | 175 | static void vesa_dac_set_8bits(void) |
170 | { | 176 | { |
@@ -288,6 +294,8 @@ void vesa_store_edid(void) | |||
288 | #endif /* CONFIG_FIRMWARE_EDID */ | 294 | #endif /* CONFIG_FIRMWARE_EDID */ |
289 | } | 295 | } |
290 | 296 | ||
297 | #endif /* not _WAKEUP */ | ||
298 | |||
291 | __videocard video_vesa = | 299 | __videocard video_vesa = |
292 | { | 300 | { |
293 | .card_name = "VESA", | 301 | .card_name = "VESA", |
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c index 7259387b7d19..330d6589a2ad 100644 --- a/arch/x86/boot/video-vga.c +++ b/arch/x86/boot/video-vga.c | |||
@@ -210,6 +210,8 @@ static int vga_set_mode(struct mode_info *mode) | |||
210 | */ | 210 | */ |
211 | static int vga_probe(void) | 211 | static int vga_probe(void) |
212 | { | 212 | { |
213 | u16 ega_bx; | ||
214 | |||
213 | static const char *card_name[] = { | 215 | static const char *card_name[] = { |
214 | "CGA/MDA/HGC", "EGA", "VGA" | 216 | "CGA/MDA/HGC", "EGA", "VGA" |
215 | }; | 217 | }; |
@@ -226,12 +228,16 @@ static int vga_probe(void) | |||
226 | u8 vga_flag; | 228 | u8 vga_flag; |
227 | 229 | ||
228 | asm(INT10 | 230 | asm(INT10 |
229 | : "=b" (boot_params.screen_info.orig_video_ega_bx) | 231 | : "=b" (ega_bx) |
230 | : "a" (0x1200), "b" (0x10) /* Check EGA/VGA */ | 232 | : "a" (0x1200), "b" (0x10) /* Check EGA/VGA */ |
231 | : "ecx", "edx", "esi", "edi"); | 233 | : "ecx", "edx", "esi", "edi"); |
232 | 234 | ||
235 | #ifndef _WAKEUP | ||
236 | boot_params.screen_info.orig_video_ega_bx = ega_bx; | ||
237 | #endif | ||
238 | |||
233 | /* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */ | 239 | /* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */ |
234 | if ((u8)boot_params.screen_info.orig_video_ega_bx != 0x10) { | 240 | if ((u8)ega_bx != 0x10) { |
235 | /* EGA/VGA */ | 241 | /* EGA/VGA */ |
236 | asm(INT10 | 242 | asm(INT10 |
237 | : "=a" (vga_flag) | 243 | : "=a" (vga_flag) |
@@ -240,7 +246,9 @@ static int vga_probe(void) | |||
240 | 246 | ||
241 | if (vga_flag == 0x1a) { | 247 | if (vga_flag == 0x1a) { |
242 | adapter = ADAPTER_VGA; | 248 | adapter = ADAPTER_VGA; |
249 | #ifndef _WAKEUP | ||
243 | boot_params.screen_info.orig_video_isVGA = 1; | 250 | boot_params.screen_info.orig_video_isVGA = 1; |
251 | #endif | ||
244 | } else { | 252 | } else { |
245 | adapter = ADAPTER_EGA; | 253 | adapter = ADAPTER_EGA; |
246 | } | 254 | } |
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 696d08f3843c..c1c47ba069ef 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c | |||
@@ -18,21 +18,6 @@ | |||
18 | #include "video.h" | 18 | #include "video.h" |
19 | #include "vesa.h" | 19 | #include "vesa.h" |
20 | 20 | ||
21 | /* | ||
22 | * Mode list variables | ||
23 | */ | ||
24 | static struct card_info cards[]; /* List of cards to probe for */ | ||
25 | |||
26 | /* | ||
27 | * Common variables | ||
28 | */ | ||
29 | int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */ | ||
30 | u16 video_segment; | ||
31 | int force_x, force_y; /* Don't query the BIOS for cols/rows */ | ||
32 | |||
33 | int do_restore = 0; /* Screen contents changed during mode flip */ | ||
34 | int graphic_mode; /* Graphic mode with linear frame buffer */ | ||
35 | |||
36 | static void store_cursor_position(void) | 21 | static void store_cursor_position(void) |
37 | { | 22 | { |
38 | u16 curpos; | 23 | u16 curpos; |
@@ -107,147 +92,6 @@ static void store_mode_params(void) | |||
107 | boot_params.screen_info.orig_video_lines = y; | 92 | boot_params.screen_info.orig_video_lines = y; |
108 | } | 93 | } |
109 | 94 | ||
110 | /* Probe the video drivers and have them generate their mode lists. */ | ||
111 | static void probe_cards(int unsafe) | ||
112 | { | ||
113 | struct card_info *card; | ||
114 | static u8 probed[2]; | ||
115 | |||
116 | if (probed[unsafe]) | ||
117 | return; | ||
118 | |||
119 | probed[unsafe] = 1; | ||
120 | |||
121 | for (card = video_cards; card < video_cards_end; card++) { | ||
122 | if (card->unsafe == unsafe) { | ||
123 | if (card->probe) | ||
124 | card->nmodes = card->probe(); | ||
125 | else | ||
126 | card->nmodes = 0; | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* Test if a mode is defined */ | ||
132 | int mode_defined(u16 mode) | ||
133 | { | ||
134 | struct card_info *card; | ||
135 | struct mode_info *mi; | ||
136 | int i; | ||
137 | |||
138 | for (card = video_cards; card < video_cards_end; card++) { | ||
139 | mi = card->modes; | ||
140 | for (i = 0; i < card->nmodes; i++, mi++) { | ||
141 | if (mi->mode == mode) | ||
142 | return 1; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | /* Set mode (without recalc) */ | ||
150 | static int raw_set_mode(u16 mode, u16 *real_mode) | ||
151 | { | ||
152 | int nmode, i; | ||
153 | struct card_info *card; | ||
154 | struct mode_info *mi; | ||
155 | |||
156 | /* Drop the recalc bit if set */ | ||
157 | mode &= ~VIDEO_RECALC; | ||
158 | |||
159 | /* Scan for mode based on fixed ID, position, or resolution */ | ||
160 | nmode = 0; | ||
161 | for (card = video_cards; card < video_cards_end; card++) { | ||
162 | mi = card->modes; | ||
163 | for (i = 0; i < card->nmodes; i++, mi++) { | ||
164 | int visible = mi->x || mi->y; | ||
165 | |||
166 | if ((mode == nmode && visible) || | ||
167 | mode == mi->mode || | ||
168 | mode == (mi->y << 8)+mi->x) { | ||
169 | *real_mode = mi->mode; | ||
170 | return card->set_mode(mi); | ||
171 | } | ||
172 | |||
173 | if (visible) | ||
174 | nmode++; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | /* Nothing found? Is it an "exceptional" (unprobed) mode? */ | ||
179 | for (card = video_cards; card < video_cards_end; card++) { | ||
180 | if (mode >= card->xmode_first && | ||
181 | mode < card->xmode_first+card->xmode_n) { | ||
182 | struct mode_info mix; | ||
183 | *real_mode = mix.mode = mode; | ||
184 | mix.x = mix.y = 0; | ||
185 | return card->set_mode(&mix); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /* Otherwise, failure... */ | ||
190 | return -1; | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Recalculate the vertical video cutoff (hack!) | ||
195 | */ | ||
196 | static void vga_recalc_vertical(void) | ||
197 | { | ||
198 | unsigned int font_size, rows; | ||
199 | u16 crtc; | ||
200 | u8 pt, ov; | ||
201 | |||
202 | set_fs(0); | ||
203 | font_size = rdfs8(0x485); /* BIOS: font size (pixels) */ | ||
204 | rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */ | ||
205 | |||
206 | rows *= font_size; /* Visible scan lines */ | ||
207 | rows--; /* ... minus one */ | ||
208 | |||
209 | crtc = vga_crtc(); | ||
210 | |||
211 | pt = in_idx(crtc, 0x11); | ||
212 | pt &= ~0x80; /* Unlock CR0-7 */ | ||
213 | out_idx(pt, crtc, 0x11); | ||
214 | |||
215 | out_idx((u8)rows, crtc, 0x12); /* Lower height register */ | ||
216 | |||
217 | ov = in_idx(crtc, 0x07); /* Overflow register */ | ||
218 | ov &= 0xbd; | ||
219 | ov |= (rows >> (8-1)) & 0x02; | ||
220 | ov |= (rows >> (9-6)) & 0x40; | ||
221 | out_idx(ov, crtc, 0x07); | ||
222 | } | ||
223 | |||
224 | /* Set mode (with recalc if specified) */ | ||
225 | static int set_mode(u16 mode) | ||
226 | { | ||
227 | int rv; | ||
228 | u16 real_mode; | ||
229 | |||
230 | /* Very special mode numbers... */ | ||
231 | if (mode == VIDEO_CURRENT_MODE) | ||
232 | return 0; /* Nothing to do... */ | ||
233 | else if (mode == NORMAL_VGA) | ||
234 | mode = VIDEO_80x25; | ||
235 | else if (mode == EXTENDED_VGA) | ||
236 | mode = VIDEO_8POINT; | ||
237 | |||
238 | rv = raw_set_mode(mode, &real_mode); | ||
239 | if (rv) | ||
240 | return rv; | ||
241 | |||
242 | if (mode & VIDEO_RECALC) | ||
243 | vga_recalc_vertical(); | ||
244 | |||
245 | /* Save the canonical mode number for the kernel, not | ||
246 | an alias, size specification or menu position */ | ||
247 | boot_params.hdr.vid_mode = real_mode; | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static unsigned int get_entry(void) | 95 | static unsigned int get_entry(void) |
252 | { | 96 | { |
253 | char entry_buf[4]; | 97 | char entry_buf[4]; |
@@ -486,6 +330,7 @@ void set_video(void) | |||
486 | printf("Undefined video mode number: %x\n", mode); | 330 | printf("Undefined video mode number: %x\n", mode); |
487 | mode = ASK_VGA; | 331 | mode = ASK_VGA; |
488 | } | 332 | } |
333 | boot_params.hdr.vid_mode = mode; | ||
489 | vesa_store_edid(); | 334 | vesa_store_edid(); |
490 | store_mode_params(); | 335 | store_mode_params(); |
491 | 336 | ||
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 5e7771a3ba2f..05e155d3fb6c 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -468,7 +468,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
468 | restorer = ka->sa.sa_restorer; | 468 | restorer = ka->sa.sa_restorer; |
469 | } else { | 469 | } else { |
470 | /* Return stub is in 32bit vsyscall page */ | 470 | /* Return stub is in 32bit vsyscall page */ |
471 | if (current->binfmt->hasvdso) | 471 | if (current->mm->context.vdso) |
472 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, | 472 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, |
473 | sigreturn); | 473 | sigreturn); |
474 | else | 474 | else |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 8022d3c695c0..ae7158bce4d6 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -162,12 +162,14 @@ sysenter_tracesys: | |||
162 | SAVE_REST | 162 | SAVE_REST |
163 | CLEAR_RREGS | 163 | CLEAR_RREGS |
164 | movq %r9,R9(%rsp) | 164 | movq %r9,R9(%rsp) |
165 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ | 165 | movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ |
166 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 166 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
167 | call syscall_trace_enter | 167 | call syscall_trace_enter |
168 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 168 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
169 | RESTORE_REST | 169 | RESTORE_REST |
170 | xchgl %ebp,%r9d | 170 | xchgl %ebp,%r9d |
171 | cmpl $(IA32_NR_syscalls-1),%eax | ||
172 | ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ | ||
171 | jmp sysenter_do_call | 173 | jmp sysenter_do_call |
172 | CFI_ENDPROC | 174 | CFI_ENDPROC |
173 | ENDPROC(ia32_sysenter_target) | 175 | ENDPROC(ia32_sysenter_target) |
@@ -261,13 +263,15 @@ cstar_tracesys: | |||
261 | SAVE_REST | 263 | SAVE_REST |
262 | CLEAR_RREGS | 264 | CLEAR_RREGS |
263 | movq %r9,R9(%rsp) | 265 | movq %r9,R9(%rsp) |
264 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ | 266 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
265 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 267 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
266 | call syscall_trace_enter | 268 | call syscall_trace_enter |
267 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 269 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
268 | RESTORE_REST | 270 | RESTORE_REST |
269 | xchgl %ebp,%r9d | 271 | xchgl %ebp,%r9d |
270 | movl RSP-ARGOFFSET(%rsp), %r8d | 272 | movl RSP-ARGOFFSET(%rsp), %r8d |
273 | cmpl $(IA32_NR_syscalls-1),%eax | ||
274 | ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ | ||
271 | jmp cstar_do_call | 275 | jmp cstar_do_call |
272 | END(ia32_cstar_target) | 276 | END(ia32_cstar_target) |
273 | 277 | ||
@@ -325,7 +329,7 @@ ENTRY(ia32_syscall) | |||
325 | jnz ia32_tracesys | 329 | jnz ia32_tracesys |
326 | ia32_do_syscall: | 330 | ia32_do_syscall: |
327 | cmpl $(IA32_NR_syscalls-1),%eax | 331 | cmpl $(IA32_NR_syscalls-1),%eax |
328 | ja ia32_badsys | 332 | ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ |
329 | IA32_ARG_FIXUP | 333 | IA32_ARG_FIXUP |
330 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative | 334 | call *ia32_sys_call_table(,%rax,8) # xxx: rip relative |
331 | ia32_sysret: | 335 | ia32_sysret: |
@@ -335,7 +339,7 @@ ia32_sysret: | |||
335 | ia32_tracesys: | 339 | ia32_tracesys: |
336 | SAVE_REST | 340 | SAVE_REST |
337 | CLEAR_RREGS | 341 | CLEAR_RREGS |
338 | movq $-ENOSYS,RAX(%rsp) /* really needed? */ | 342 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
339 | movq %rsp,%rdi /* &pt_regs -> arg1 */ | 343 | movq %rsp,%rdi /* &pt_regs -> arg1 */ |
340 | call syscall_trace_enter | 344 | call syscall_trace_enter |
341 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ | 345 | LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index abf71d26fc2a..7cede7a9e0dc 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -26,51 +26,27 @@ | |||
26 | #include <linux/file.h> | 26 | #include <linux/file.h> |
27 | #include <linux/signal.h> | 27 | #include <linux/signal.h> |
28 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
29 | #include <linux/resource.h> | ||
30 | #include <linux/times.h> | 29 | #include <linux/times.h> |
31 | #include <linux/utsname.h> | 30 | #include <linux/utsname.h> |
32 | #include <linux/smp.h> | ||
33 | #include <linux/smp_lock.h> | 31 | #include <linux/smp_lock.h> |
34 | #include <linux/sem.h> | ||
35 | #include <linux/msg.h> | ||
36 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
37 | #include <linux/shm.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/uio.h> | 33 | #include <linux/uio.h> |
40 | #include <linux/nfs_fs.h> | ||
41 | #include <linux/quota.h> | ||
42 | #include <linux/module.h> | ||
43 | #include <linux/sunrpc/svc.h> | ||
44 | #include <linux/nfsd/nfsd.h> | ||
45 | #include <linux/nfsd/cache.h> | ||
46 | #include <linux/nfsd/xdr.h> | ||
47 | #include <linux/nfsd/syscall.h> | ||
48 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
49 | #include <linux/personality.h> | 35 | #include <linux/personality.h> |
50 | #include <linux/stat.h> | 36 | #include <linux/stat.h> |
51 | #include <linux/ipc.h> | ||
52 | #include <linux/rwsem.h> | 37 | #include <linux/rwsem.h> |
53 | #include <linux/binfmts.h> | ||
54 | #include <linux/init.h> | ||
55 | #include <linux/aio_abi.h> | ||
56 | #include <linux/aio.h> | ||
57 | #include <linux/compat.h> | 38 | #include <linux/compat.h> |
58 | #include <linux/vfs.h> | 39 | #include <linux/vfs.h> |
59 | #include <linux/ptrace.h> | 40 | #include <linux/ptrace.h> |
60 | #include <linux/highuid.h> | 41 | #include <linux/highuid.h> |
61 | #include <linux/vmalloc.h> | ||
62 | #include <linux/fsnotify.h> | ||
63 | #include <linux/sysctl.h> | 42 | #include <linux/sysctl.h> |
64 | #include <asm/mman.h> | 43 | #include <asm/mman.h> |
65 | #include <asm/types.h> | 44 | #include <asm/types.h> |
66 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
67 | #include <asm/semaphore.h> | 46 | #include <asm/semaphore.h> |
68 | #include <asm/atomic.h> | 47 | #include <asm/atomic.h> |
69 | #include <asm/ldt.h> | ||
70 | |||
71 | #include <net/scm.h> | ||
72 | #include <net/sock.h> | ||
73 | #include <asm/ia32.h> | 48 | #include <asm/ia32.h> |
49 | #include <asm/vgtod.h> | ||
74 | 50 | ||
75 | #define AA(__x) ((unsigned long)(__x)) | 51 | #define AA(__x) ((unsigned long)(__x)) |
76 | 52 | ||
@@ -804,11 +780,6 @@ asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, | |||
804 | if (IS_ERR(filename)) | 780 | if (IS_ERR(filename)) |
805 | return error; | 781 | return error; |
806 | error = compat_do_execve(filename, argv, envp, regs); | 782 | error = compat_do_execve(filename, argv, envp, regs); |
807 | if (error == 0) { | ||
808 | task_lock(current); | ||
809 | current->ptrace &= ~PT_DTRACE; | ||
810 | task_unlock(current); | ||
811 | } | ||
812 | putname(filename); | 783 | putname(filename); |
813 | return error; | 784 | return error; |
814 | } | 785 | } |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4eb5ce841106..c3920ea8ac56 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -2,8 +2,7 @@ | |||
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | extra-y := head_$(BITS).o init_task.o vmlinux.lds | 5 | extra-y := head_$(BITS).o head$(BITS).o init_task.o vmlinux.lds |
6 | extra-$(CONFIG_X86_64) += head64.o | ||
7 | 6 | ||
8 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) | 7 | CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) |
9 | 8 | ||
@@ -19,7 +18,7 @@ CFLAGS_tsc_64.o := $(nostackp) | |||
19 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o | 18 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o |
20 | obj-y += traps_$(BITS).o irq_$(BITS).o | 19 | obj-y += traps_$(BITS).o irq_$(BITS).o |
21 | obj-y += time_$(BITS).o ioport.o ldt.o | 20 | obj-y += time_$(BITS).o ioport.o ldt.o |
22 | obj-y += setup_$(BITS).o i8259_$(BITS).o | 21 | obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o |
23 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 22 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
24 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | 23 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o |
25 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o | 24 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o |
@@ -29,6 +28,7 @@ obj-y += alternative.o i8253.o | |||
29 | obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o | 28 | obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o |
30 | obj-y += tsc_$(BITS).o io_delay.o rtc.o | 29 | obj-y += tsc_$(BITS).o io_delay.o rtc.o |
31 | 30 | ||
31 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | ||
32 | obj-y += i387.o | 32 | obj-y += i387.o |
33 | obj-y += ptrace.o | 33 | obj-y += ptrace.o |
34 | obj-y += ds.o | 34 | obj-y += ds.o |
@@ -47,11 +47,12 @@ obj-$(CONFIG_MICROCODE) += microcode.o | |||
47 | obj-$(CONFIG_PCI) += early-quirks.o | 47 | obj-$(CONFIG_PCI) += early-quirks.o |
48 | apm-y := apm_32.o | 48 | apm-y := apm_32.o |
49 | obj-$(CONFIG_APM) += apm.o | 49 | obj-$(CONFIG_APM) += apm.o |
50 | obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o tsc_sync.o | 50 | obj-$(CONFIG_X86_SMP) += smp.o |
51 | obj-$(CONFIG_X86_32_SMP) += smpcommon_32.o | 51 | obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o |
52 | obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o | 52 | obj-$(CONFIG_X86_32_SMP) += smpcommon.o |
53 | obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o | ||
53 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o | 54 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o |
54 | obj-$(CONFIG_X86_MPPARSE) += mpparse_$(BITS).o | 55 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o |
55 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o | 56 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o |
56 | obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o | 57 | obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o |
57 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 58 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
@@ -60,12 +61,13 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | |||
60 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 61 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
61 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o | 62 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o |
62 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o | 63 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o |
63 | obj-$(CONFIG_X86_VSMP) += vsmp_64.o | 64 | obj-y += vsmp_64.o |
64 | obj-$(CONFIG_KPROBES) += kprobes.o | 65 | obj-$(CONFIG_KPROBES) += kprobes.o |
65 | obj-$(CONFIG_MODULES) += module_$(BITS).o | 66 | obj-$(CONFIG_MODULES) += module_$(BITS).o |
66 | obj-$(CONFIG_ACPI_SRAT) += srat_32.o | 67 | obj-$(CONFIG_ACPI_SRAT) += srat_32.o |
67 | obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o | 68 | obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o |
68 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | 69 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o |
70 | obj-$(CONFIG_KGDB) += kgdb.o | ||
69 | obj-$(CONFIG_VM86) += vm86_32.o | 71 | obj-$(CONFIG_VM86) += vm86_32.o |
70 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 72 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
71 | 73 | ||
@@ -89,7 +91,7 @@ scx200-y += scx200_32.o | |||
89 | ### | 91 | ### |
90 | # 64 bit specific files | 92 | # 64 bit specific files |
91 | ifeq ($(CONFIG_X86_64),y) | 93 | ifeq ($(CONFIG_X86_64),y) |
92 | obj-y += genapic_64.o genapic_flat_64.o | 94 | obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o |
93 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o | 95 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o |
94 | obj-$(CONFIG_AUDIT) += audit_64.o | 96 | obj-$(CONFIG_AUDIT) += audit_64.o |
95 | 97 | ||
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index 19d3d6e9d09b..7335959b6aff 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile | |||
@@ -1,7 +1,14 @@ | |||
1 | subdir- := realmode | ||
2 | |||
1 | obj-$(CONFIG_ACPI) += boot.o | 3 | obj-$(CONFIG_ACPI) += boot.o |
2 | obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o | 4 | obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o |
3 | 5 | ||
4 | ifneq ($(CONFIG_ACPI_PROCESSOR),) | 6 | ifneq ($(CONFIG_ACPI_PROCESSOR),) |
5 | obj-y += cstate.o processor.o | 7 | obj-y += cstate.o processor.o |
6 | endif | 8 | endif |
7 | 9 | ||
10 | $(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin | ||
11 | |||
12 | $(obj)/realmode/wakeup.bin: FORCE | ||
13 | $(Q)$(MAKE) $(build)=$(obj)/realmode $@ | ||
14 | |||
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 2cdc9de9371d..057ccf1d5ad4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -39,6 +39,11 @@ | |||
39 | #include <asm/apic.h> | 39 | #include <asm/apic.h> |
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | #include <asm/mpspec.h> | 41 | #include <asm/mpspec.h> |
42 | #include <asm/smp.h> | ||
43 | |||
44 | #ifdef CONFIG_X86_LOCAL_APIC | ||
45 | # include <mach_apic.h> | ||
46 | #endif | ||
42 | 47 | ||
43 | static int __initdata acpi_force = 0; | 48 | static int __initdata acpi_force = 0; |
44 | 49 | ||
@@ -52,9 +57,7 @@ EXPORT_SYMBOL(acpi_disabled); | |||
52 | #ifdef CONFIG_X86_64 | 57 | #ifdef CONFIG_X86_64 |
53 | 58 | ||
54 | #include <asm/proto.h> | 59 | #include <asm/proto.h> |
55 | 60 | #include <asm/genapic.h> | |
56 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } | ||
57 | |||
58 | 61 | ||
59 | #else /* X86 */ | 62 | #else /* X86 */ |
60 | 63 | ||
@@ -111,7 +114,7 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) | |||
111 | if (!phys_addr || !size) | 114 | if (!phys_addr || !size) |
112 | return NULL; | 115 | return NULL; |
113 | 116 | ||
114 | if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE) | 117 | if (phys_addr+size <= (max_pfn_mapped << PAGE_SHIFT) + PAGE_SIZE) |
115 | return __va(phys_addr); | 118 | return __va(phys_addr); |
116 | 119 | ||
117 | return NULL; | 120 | return NULL; |
@@ -237,6 +240,16 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) | |||
237 | return 0; | 240 | return 0; |
238 | } | 241 | } |
239 | 242 | ||
243 | static void __cpuinit acpi_register_lapic(int id, u8 enabled) | ||
244 | { | ||
245 | if (!enabled) { | ||
246 | ++disabled_cpus; | ||
247 | return; | ||
248 | } | ||
249 | |||
250 | generic_processor_info(id, 0); | ||
251 | } | ||
252 | |||
240 | static int __init | 253 | static int __init |
241 | acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) | 254 | acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) |
242 | { | 255 | { |
@@ -256,8 +269,26 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) | |||
256 | * to not preallocating memory for all NR_CPUS | 269 | * to not preallocating memory for all NR_CPUS |
257 | * when we use CPU hotplug. | 270 | * when we use CPU hotplug. |
258 | */ | 271 | */ |
259 | mp_register_lapic(processor->id, /* APIC ID */ | 272 | acpi_register_lapic(processor->id, /* APIC ID */ |
260 | processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */ | 273 | processor->lapic_flags & ACPI_MADT_ENABLED); |
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int __init | ||
279 | acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) | ||
280 | { | ||
281 | struct acpi_madt_local_sapic *processor = NULL; | ||
282 | |||
283 | processor = (struct acpi_madt_local_sapic *)header; | ||
284 | |||
285 | if (BAD_MADT_ENTRY(processor, end)) | ||
286 | return -EINVAL; | ||
287 | |||
288 | acpi_table_print_madt_entry(header); | ||
289 | |||
290 | acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ | ||
291 | processor->lapic_flags & ACPI_MADT_ENABLED); | ||
261 | 292 | ||
262 | return 0; | 293 | return 0; |
263 | } | 294 | } |
@@ -300,6 +331,8 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e | |||
300 | 331 | ||
301 | #ifdef CONFIG_X86_IO_APIC | 332 | #ifdef CONFIG_X86_IO_APIC |
302 | 333 | ||
334 | struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; | ||
335 | |||
303 | static int __init | 336 | static int __init |
304 | acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) | 337 | acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) |
305 | { | 338 | { |
@@ -532,7 +565,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
532 | buffer.pointer = NULL; | 565 | buffer.pointer = NULL; |
533 | 566 | ||
534 | tmp_map = cpu_present_map; | 567 | tmp_map = cpu_present_map; |
535 | mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); | 568 | acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); |
536 | 569 | ||
537 | /* | 570 | /* |
538 | * If mp_register_lapic successfully generates a new logical cpu | 571 | * If mp_register_lapic successfully generates a new logical cpu |
@@ -732,6 +765,16 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) | |||
732 | * Parse LAPIC entries in MADT | 765 | * Parse LAPIC entries in MADT |
733 | * returns 0 on success, < 0 on error | 766 | * returns 0 on success, < 0 on error |
734 | */ | 767 | */ |
768 | |||
769 | static void __init acpi_register_lapic_address(unsigned long address) | ||
770 | { | ||
771 | mp_lapic_addr = address; | ||
772 | |||
773 | set_fixmap_nocache(FIX_APIC_BASE, address); | ||
774 | if (boot_cpu_physical_apicid == -1U) | ||
775 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | ||
776 | } | ||
777 | |||
735 | static int __init acpi_parse_madt_lapic_entries(void) | 778 | static int __init acpi_parse_madt_lapic_entries(void) |
736 | { | 779 | { |
737 | int count; | 780 | int count; |
@@ -753,10 +796,14 @@ static int __init acpi_parse_madt_lapic_entries(void) | |||
753 | return count; | 796 | return count; |
754 | } | 797 | } |
755 | 798 | ||
756 | mp_register_lapic_address(acpi_lapic_addr); | 799 | acpi_register_lapic_address(acpi_lapic_addr); |
800 | |||
801 | count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, | ||
802 | acpi_parse_sapic, MAX_APICS); | ||
757 | 803 | ||
758 | count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, | 804 | if (!count) |
759 | MAX_APICS); | 805 | count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, |
806 | acpi_parse_lapic, MAX_APICS); | ||
760 | if (!count) { | 807 | if (!count) { |
761 | printk(KERN_ERR PREFIX "No LAPIC entries present\n"); | 808 | printk(KERN_ERR PREFIX "No LAPIC entries present\n"); |
762 | /* TBD: Cleanup to allow fallback to MPS */ | 809 | /* TBD: Cleanup to allow fallback to MPS */ |
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile new file mode 100644 index 000000000000..092900854acc --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/Makefile | |||
@@ -0,0 +1,57 @@ | |||
1 | # | ||
2 | # arch/x86/kernel/acpi/realmode/Makefile | ||
3 | # | ||
4 | # This file is subject to the terms and conditions of the GNU General Public | ||
5 | # License. See the file "COPYING" in the main directory of this archive | ||
6 | # for more details. | ||
7 | # | ||
8 | |||
9 | targets := wakeup.bin wakeup.elf | ||
10 | |||
11 | wakeup-y += wakeup.o wakemain.o video-mode.o copy.o | ||
12 | |||
13 | # The link order of the video-*.o modules can matter. In particular, | ||
14 | # video-vga.o *must* be listed first, followed by video-vesa.o. | ||
15 | # Hardware-specific drivers should follow in the order they should be | ||
16 | # probed, and video-bios.o should typically be last. | ||
17 | wakeup-y += video-vga.o | ||
18 | wakeup-y += video-vesa.o | ||
19 | wakeup-y += video-bios.o | ||
20 | |||
21 | targets += $(wakeup-y) | ||
22 | |||
23 | bootsrc := $(src)/../../../boot | ||
24 | |||
25 | # --------------------------------------------------------------------------- | ||
26 | |||
27 | # How to compile the 16-bit code. Note we always compile for -march=i386, | ||
28 | # that way we can complain to the user if the CPU is insufficient. | ||
29 | # Compile with _SETUP since this is similar to the boot-time setup code. | ||
30 | KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \ | ||
31 | -I$(srctree)/$(bootsrc) \ | ||
32 | $(cflags-y) \ | ||
33 | -Wall -Wstrict-prototypes \ | ||
34 | -march=i386 -mregparm=3 \ | ||
35 | -include $(srctree)/$(bootsrc)/code16gcc.h \ | ||
36 | -fno-strict-aliasing -fomit-frame-pointer \ | ||
37 | $(call cc-option, -ffreestanding) \ | ||
38 | $(call cc-option, -fno-toplevel-reorder,\ | ||
39 | $(call cc-option, -fno-unit-at-a-time)) \ | ||
40 | $(call cc-option, -fno-stack-protector) \ | ||
41 | $(call cc-option, -mpreferred-stack-boundary=2) | ||
42 | KBUILD_CFLAGS += $(call cc-option, -m32) | ||
43 | KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ | ||
44 | |||
45 | WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y)) | ||
46 | |||
47 | LDFLAGS_wakeup.elf := -T | ||
48 | |||
49 | CPPFLAGS_wakeup.lds += -P -C | ||
50 | |||
51 | $(obj)/wakeup.elf: $(src)/wakeup.lds $(WAKEUP_OBJS) FORCE | ||
52 | $(call if_changed,ld) | ||
53 | |||
54 | OBJCOPYFLAGS_wakeup.bin := -O binary | ||
55 | |||
56 | $(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE | ||
57 | $(call if_changed,objcopy) | ||
diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S new file mode 100644 index 000000000000..dc59ebee69d8 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/copy.S | |||
@@ -0,0 +1 @@ | |||
#include "../../../boot/copy.S" | |||
diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c new file mode 100644 index 000000000000..7deabc144a27 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-bios.c | |||
@@ -0,0 +1 @@ | |||
#include "../../../boot/video-bios.c" | |||
diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c new file mode 100644 index 000000000000..328ad209f113 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-mode.c | |||
@@ -0,0 +1 @@ | |||
#include "../../../boot/video-mode.c" | |||
diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c new file mode 100644 index 000000000000..9dbb9672226a --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-vesa.c | |||
@@ -0,0 +1 @@ | |||
#include "../../../boot/video-vesa.c" | |||
diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c new file mode 100644 index 000000000000..bcc81255f374 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/video-vga.c | |||
@@ -0,0 +1 @@ | |||
#include "../../../boot/video-vga.c" | |||
diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/kernel/acpi/realmode/wakemain.c new file mode 100644 index 000000000000..883962d9eef2 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakemain.c | |||
@@ -0,0 +1,81 @@ | |||
1 | #include "wakeup.h" | ||
2 | #include "boot.h" | ||
3 | |||
4 | static void udelay(int loops) | ||
5 | { | ||
6 | while (loops--) | ||
7 | io_delay(); /* Approximately 1 us */ | ||
8 | } | ||
9 | |||
10 | static void beep(unsigned int hz) | ||
11 | { | ||
12 | u8 enable; | ||
13 | |||
14 | if (!hz) { | ||
15 | enable = 0x00; /* Turn off speaker */ | ||
16 | } else { | ||
17 | u16 div = 1193181/hz; | ||
18 | |||
19 | outb(0xb6, 0x43); /* Ctr 2, squarewave, load, binary */ | ||
20 | io_delay(); | ||
21 | outb(div, 0x42); /* LSB of counter */ | ||
22 | io_delay(); | ||
23 | outb(div >> 8, 0x42); /* MSB of counter */ | ||
24 | io_delay(); | ||
25 | |||
26 | enable = 0x03; /* Turn on speaker */ | ||
27 | } | ||
28 | inb(0x61); /* Dummy read of System Control Port B */ | ||
29 | io_delay(); | ||
30 | outb(enable, 0x61); /* Enable timer 2 output to speaker */ | ||
31 | io_delay(); | ||
32 | } | ||
33 | |||
34 | #define DOT_HZ 880 | ||
35 | #define DASH_HZ 587 | ||
36 | #define US_PER_DOT 125000 | ||
37 | |||
38 | /* Okay, this is totally silly, but it's kind of fun. */ | ||
39 | static void send_morse(const char *pattern) | ||
40 | { | ||
41 | char s; | ||
42 | |||
43 | while ((s = *pattern++)) { | ||
44 | switch (s) { | ||
45 | case '.': | ||
46 | beep(DOT_HZ); | ||
47 | udelay(US_PER_DOT); | ||
48 | beep(0); | ||
49 | udelay(US_PER_DOT); | ||
50 | break; | ||
51 | case '-': | ||
52 | beep(DASH_HZ); | ||
53 | udelay(US_PER_DOT * 3); | ||
54 | beep(0); | ||
55 | udelay(US_PER_DOT); | ||
56 | break; | ||
57 | default: /* Assume it's a space */ | ||
58 | udelay(US_PER_DOT * 3); | ||
59 | break; | ||
60 | } | ||
61 | } | ||
62 | } | ||
63 | |||
64 | void main(void) | ||
65 | { | ||
66 | /* Kill machine if structures are wrong */ | ||
67 | if (wakeup_header.real_magic != 0x12345678) | ||
68 | while (1); | ||
69 | |||
70 | if (wakeup_header.realmode_flags & 4) | ||
71 | send_morse("...-"); | ||
72 | |||
73 | if (wakeup_header.realmode_flags & 1) | ||
74 | asm volatile("lcallw $0xc000,$3"); | ||
75 | |||
76 | if (wakeup_header.realmode_flags & 2) { | ||
77 | /* Need to call BIOS */ | ||
78 | probe_cards(0); | ||
79 | set_mode(wakeup_header.video_mode); | ||
80 | } | ||
81 | } | ||
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S new file mode 100644 index 000000000000..f9b77fb37e5b --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakeup.S | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * ACPI wakeup real mode startup stub | ||
3 | */ | ||
4 | #include <asm/segment.h> | ||
5 | #include <asm/msr-index.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/pgtable.h> | ||
8 | |||
9 | .code16 | ||
10 | .section ".header", "a" | ||
11 | |||
12 | /* This should match the structure in wakeup.h */ | ||
13 | .globl wakeup_header | ||
14 | wakeup_header: | ||
15 | video_mode: .short 0 /* Video mode number */ | ||
16 | pmode_return: .byte 0x66, 0xea /* ljmpl */ | ||
17 | .long 0 /* offset goes here */ | ||
18 | .short __KERNEL_CS | ||
19 | pmode_cr0: .long 0 /* Saved %cr0 */ | ||
20 | pmode_cr3: .long 0 /* Saved %cr3 */ | ||
21 | pmode_cr4: .long 0 /* Saved %cr4 */ | ||
22 | pmode_efer: .quad 0 /* Saved EFER */ | ||
23 | pmode_gdt: .quad 0 | ||
24 | realmode_flags: .long 0 | ||
25 | real_magic: .long 0 | ||
26 | trampoline_segment: .word 0 | ||
27 | signature: .long 0x51ee1111 | ||
28 | |||
29 | .text | ||
30 | .globl _start | ||
31 | .code16 | ||
32 | wakeup_code: | ||
33 | _start: | ||
34 | cli | ||
35 | cld | ||
36 | |||
37 | /* Set up segments */ | ||
38 | movw %cs, %ax | ||
39 | movw %ax, %ds | ||
40 | movw %ax, %es | ||
41 | movw %ax, %ss | ||
42 | |||
43 | movl $wakeup_stack_end, %esp | ||
44 | |||
45 | /* Clear the EFLAGS */ | ||
46 | pushl $0 | ||
47 | popfl | ||
48 | |||
49 | /* Check header signature... */ | ||
50 | movl signature, %eax | ||
51 | cmpl $0x51ee1111, %eax | ||
52 | jne bogus_real_magic | ||
53 | |||
54 | /* Check we really have everything... */ | ||
55 | movl end_signature, %eax | ||
56 | cmpl $0x65a22c82, %eax | ||
57 | jne bogus_real_magic | ||
58 | |||
59 | /* Call the C code */ | ||
60 | calll main | ||
61 | |||
62 | /* Do any other stuff... */ | ||
63 | |||
64 | #ifndef CONFIG_64BIT | ||
65 | /* This could also be done in C code... */ | ||
66 | movl pmode_cr3, %eax | ||
67 | movl %eax, %cr3 | ||
68 | |||
69 | movl pmode_cr4, %ecx | ||
70 | jecxz 1f | ||
71 | movl %ecx, %cr4 | ||
72 | 1: | ||
73 | movl pmode_efer, %eax | ||
74 | movl pmode_efer + 4, %edx | ||
75 | movl %eax, %ecx | ||
76 | orl %edx, %ecx | ||
77 | jz 1f | ||
78 | movl $0xc0000080, %ecx | ||
79 | wrmsr | ||
80 | 1: | ||
81 | |||
82 | lgdtl pmode_gdt | ||
83 | |||
84 | /* This really couldn't... */ | ||
85 | movl pmode_cr0, %eax | ||
86 | movl %eax, %cr0 | ||
87 | jmp pmode_return | ||
88 | #else | ||
89 | pushw $0 | ||
90 | pushw trampoline_segment | ||
91 | pushw $0 | ||
92 | lret | ||
93 | #endif | ||
94 | |||
95 | bogus_real_magic: | ||
96 | 1: | ||
97 | hlt | ||
98 | jmp 1b | ||
99 | |||
100 | .data | ||
101 | .balign 4 | ||
102 | .globl HEAP, heap_end | ||
103 | HEAP: | ||
104 | .long wakeup_heap | ||
105 | heap_end: | ||
106 | .long wakeup_stack | ||
107 | |||
108 | .bss | ||
109 | wakeup_heap: | ||
110 | .space 2048 | ||
111 | wakeup_stack: | ||
112 | .space 2048 | ||
113 | wakeup_stack_end: | ||
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h new file mode 100644 index 000000000000..ef8166fe8020 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakeup.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Definitions for the wakeup data structure at the head of the | ||
3 | * wakeup code. | ||
4 | */ | ||
5 | |||
6 | #ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H | ||
7 | #define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H | ||
8 | |||
9 | #ifndef __ASSEMBLY__ | ||
10 | #include <linux/types.h> | ||
11 | |||
12 | /* This must match data at wakeup.S */ | ||
13 | struct wakeup_header { | ||
14 | u16 video_mode; /* Video mode number */ | ||
15 | u16 _jmp1; /* ljmpl opcode, 32-bit only */ | ||
16 | u32 pmode_entry; /* Protected mode resume point, 32-bit only */ | ||
17 | u16 _jmp2; /* CS value, 32-bit only */ | ||
18 | u32 pmode_cr0; /* Protected mode cr0 */ | ||
19 | u32 pmode_cr3; /* Protected mode cr3 */ | ||
20 | u32 pmode_cr4; /* Protected mode cr4 */ | ||
21 | u32 pmode_efer_low; /* Protected mode EFER */ | ||
22 | u32 pmode_efer_high; | ||
23 | u64 pmode_gdt; | ||
24 | u32 realmode_flags; | ||
25 | u32 real_magic; | ||
26 | u16 trampoline_segment; /* segment with trampoline code, 64-bit only */ | ||
27 | u32 signature; /* To check we have correct structure */ | ||
28 | } __attribute__((__packed__)); | ||
29 | |||
30 | extern struct wakeup_header wakeup_header; | ||
31 | #endif | ||
32 | |||
33 | #define HEADER_OFFSET 0x3f00 | ||
34 | #define WAKEUP_SIZE 0x4000 | ||
35 | |||
36 | #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ | ||
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S new file mode 100644 index 000000000000..22fab6c4be15 --- /dev/null +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * wakeup.ld | ||
3 | * | ||
4 | * Linker script for the real-mode wakeup code | ||
5 | */ | ||
6 | #undef i386 | ||
7 | #include "wakeup.h" | ||
8 | |||
9 | OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") | ||
10 | OUTPUT_ARCH(i386) | ||
11 | ENTRY(_start) | ||
12 | |||
13 | SECTIONS | ||
14 | { | ||
15 | . = HEADER_OFFSET; | ||
16 | .header : { | ||
17 | *(.header) | ||
18 | } | ||
19 | |||
20 | . = 0; | ||
21 | .text : { | ||
22 | *(.text*) | ||
23 | } | ||
24 | |||
25 | . = ALIGN(16); | ||
26 | .rodata : { | ||
27 | *(.rodata*) | ||
28 | } | ||
29 | |||
30 | .videocards : { | ||
31 | video_cards = .; | ||
32 | *(.videocards) | ||
33 | video_cards_end = .; | ||
34 | } | ||
35 | |||
36 | . = ALIGN(16); | ||
37 | .data : { | ||
38 | *(.data*) | ||
39 | } | ||
40 | |||
41 | .signature : { | ||
42 | end_signature = .; | ||
43 | LONG(0x65a22c82) | ||
44 | } | ||
45 | |||
46 | . = ALIGN(16); | ||
47 | .bss : { | ||
48 | __bss_start = .; | ||
49 | *(.bss) | ||
50 | __bss_end = .; | ||
51 | } | ||
52 | |||
53 | . = ALIGN(16); | ||
54 | _end = .; | ||
55 | |||
56 | /DISCARD/ : { | ||
57 | *(.note*) | ||
58 | } | ||
59 | |||
60 | . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); | ||
61 | } | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 6bc815cd8cb3..afc25ee9964b 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -10,30 +10,72 @@ | |||
10 | #include <linux/dmi.h> | 10 | #include <linux/dmi.h> |
11 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
12 | 12 | ||
13 | #include <asm/smp.h> | 13 | #include "realmode/wakeup.h" |
14 | #include "sleep.h" | ||
14 | 15 | ||
15 | /* address in low memory of the wakeup routine. */ | 16 | unsigned long acpi_wakeup_address; |
16 | unsigned long acpi_wakeup_address = 0; | ||
17 | unsigned long acpi_realmode_flags; | 17 | unsigned long acpi_realmode_flags; |
18 | extern char wakeup_start, wakeup_end; | ||
19 | 18 | ||
20 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); | 19 | /* address in low memory of the wakeup routine. */ |
20 | static unsigned long acpi_realmode; | ||
21 | |||
22 | #ifdef CONFIG_64BIT | ||
23 | static char temp_stack[10240]; | ||
24 | #endif | ||
21 | 25 | ||
22 | /** | 26 | /** |
23 | * acpi_save_state_mem - save kernel state | 27 | * acpi_save_state_mem - save kernel state |
24 | * | 28 | * |
25 | * Create an identity mapped page table and copy the wakeup routine to | 29 | * Create an identity mapped page table and copy the wakeup routine to |
26 | * low memory. | 30 | * low memory. |
31 | * | ||
32 | * Note that this is too late to change acpi_wakeup_address. | ||
27 | */ | 33 | */ |
28 | int acpi_save_state_mem(void) | 34 | int acpi_save_state_mem(void) |
29 | { | 35 | { |
30 | if (!acpi_wakeup_address) { | 36 | struct wakeup_header *header; |
31 | printk(KERN_ERR "Could not allocate memory during boot, S3 disabled\n"); | 37 | |
38 | if (!acpi_realmode) { | ||
39 | printk(KERN_ERR "Could not allocate memory during boot, " | ||
40 | "S3 disabled\n"); | ||
32 | return -ENOMEM; | 41 | return -ENOMEM; |
33 | } | 42 | } |
34 | memcpy((void *)acpi_wakeup_address, &wakeup_start, | 43 | memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE); |
35 | &wakeup_end - &wakeup_start); | 44 | |
36 | acpi_copy_wakeup_routine(acpi_wakeup_address); | 45 | header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET); |
46 | if (header->signature != 0x51ee1111) { | ||
47 | printk(KERN_ERR "wakeup header does not match\n"); | ||
48 | return -EINVAL; | ||
49 | } | ||
50 | |||
51 | header->video_mode = saved_video_mode; | ||
52 | |||
53 | #ifndef CONFIG_64BIT | ||
54 | store_gdt((struct desc_ptr *)&header->pmode_gdt); | ||
55 | |||
56 | header->pmode_efer_low = nx_enabled; | ||
57 | if (header->pmode_efer_low & 1) { | ||
58 | /* This is strange, why not save efer, always? */ | ||
59 | rdmsr(MSR_EFER, header->pmode_efer_low, | ||
60 | header->pmode_efer_high); | ||
61 | } | ||
62 | #endif /* !CONFIG_64BIT */ | ||
63 | |||
64 | header->pmode_cr0 = read_cr0(); | ||
65 | header->pmode_cr4 = read_cr4(); | ||
66 | header->realmode_flags = acpi_realmode_flags; | ||
67 | header->real_magic = 0x12345678; | ||
68 | |||
69 | #ifndef CONFIG_64BIT | ||
70 | header->pmode_entry = (u32)&wakeup_pmode_return; | ||
71 | header->pmode_cr3 = (u32)(swsusp_pg_dir - __PAGE_OFFSET); | ||
72 | saved_magic = 0x12345678; | ||
73 | #else /* CONFIG_64BIT */ | ||
74 | header->trampoline_segment = setup_trampoline() >> 4; | ||
75 | init_rsp = (unsigned long)temp_stack + 4096; | ||
76 | initial_code = (unsigned long)wakeup_long64; | ||
77 | saved_magic = 0x123456789abcdef0; | ||
78 | #endif /* CONFIG_64BIT */ | ||
37 | 79 | ||
38 | return 0; | 80 | return 0; |
39 | } | 81 | } |
@@ -56,15 +98,20 @@ void acpi_restore_state_mem(void) | |||
56 | */ | 98 | */ |
57 | void __init acpi_reserve_bootmem(void) | 99 | void __init acpi_reserve_bootmem(void) |
58 | { | 100 | { |
59 | if ((&wakeup_end - &wakeup_start) > PAGE_SIZE*2) { | 101 | if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { |
60 | printk(KERN_ERR | 102 | printk(KERN_ERR |
61 | "ACPI: Wakeup code way too big, S3 disabled.\n"); | 103 | "ACPI: Wakeup code way too big, S3 disabled.\n"); |
62 | return; | 104 | return; |
63 | } | 105 | } |
64 | 106 | ||
65 | acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE*2); | 107 | acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE); |
66 | if (!acpi_wakeup_address) | 108 | |
109 | if (!acpi_realmode) { | ||
67 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); | 110 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); |
111 | return; | ||
112 | } | ||
113 | |||
114 | acpi_wakeup_address = acpi_realmode; | ||
68 | } | 115 | } |
69 | 116 | ||
70 | 117 | ||
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h new file mode 100644 index 000000000000..adbcbaa6f1df --- /dev/null +++ b/arch/x86/kernel/acpi/sleep.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* | ||
2 | * Variables and functions used by the code in sleep.c | ||
3 | */ | ||
4 | |||
5 | #include <asm/trampoline.h> | ||
6 | |||
7 | extern char wakeup_code_start, wakeup_code_end; | ||
8 | |||
9 | extern unsigned long saved_video_mode; | ||
10 | extern long saved_magic; | ||
11 | |||
12 | extern int wakeup_pmode_return; | ||
13 | extern char swsusp_pg_dir[PAGE_SIZE]; | ||
14 | |||
15 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); | ||
16 | extern void wakeup_long64(void); | ||
diff --git a/arch/x86/kernel/acpi/sleep_32.c b/arch/x86/kernel/acpi/sleep_32.c deleted file mode 100644 index 63fe5525e026..000000000000 --- a/arch/x86/kernel/acpi/sleep_32.c +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* | ||
2 | * sleep.c - x86-specific ACPI sleep support. | ||
3 | * | ||
4 | * Copyright (C) 2001-2003 Patrick Mochel | ||
5 | * Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz> | ||
6 | */ | ||
7 | |||
8 | #include <linux/acpi.h> | ||
9 | #include <linux/bootmem.h> | ||
10 | #include <linux/dmi.h> | ||
11 | #include <linux/cpumask.h> | ||
12 | |||
13 | #include <asm/smp.h> | ||
14 | |||
15 | /* Ouch, we want to delete this. We already have better version in userspace, in | ||
16 | s2ram from suspend.sf.net project */ | ||
17 | static __init int reset_videomode_after_s3(const struct dmi_system_id *d) | ||
18 | { | ||
19 | acpi_realmode_flags |= 2; | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | static __initdata struct dmi_system_id acpisleep_dmi_table[] = { | ||
24 | { /* Reset video mode after returning from ACPI S3 sleep */ | ||
25 | .callback = reset_videomode_after_s3, | ||
26 | .ident = "Toshiba Satellite 4030cdt", | ||
27 | .matches = { | ||
28 | DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), | ||
29 | }, | ||
30 | }, | ||
31 | {} | ||
32 | }; | ||
33 | |||
34 | static int __init acpisleep_dmi_init(void) | ||
35 | { | ||
36 | dmi_check_system(acpisleep_dmi_table); | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | core_initcall(acpisleep_dmi_init); | ||
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index f53e3277f8e5..a12e6a9fb659 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S | |||
@@ -3,178 +3,12 @@ | |||
3 | #include <asm/segment.h> | 3 | #include <asm/segment.h> |
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | 5 | ||
6 | # | 6 | # Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 |
7 | # wakeup_code runs in real mode, and at unknown address (determined at run-time). | ||
8 | # Therefore it must only use relative jumps/calls. | ||
9 | # | ||
10 | # Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled | ||
11 | # | ||
12 | # If physical address of wakeup_code is 0x12345, BIOS should call us with | ||
13 | # cs = 0x1234, eip = 0x05 | ||
14 | # | ||
15 | |||
16 | #define BEEP \ | ||
17 | inb $97, %al; \ | ||
18 | outb %al, $0x80; \ | ||
19 | movb $3, %al; \ | ||
20 | outb %al, $97; \ | ||
21 | outb %al, $0x80; \ | ||
22 | movb $-74, %al; \ | ||
23 | outb %al, $67; \ | ||
24 | outb %al, $0x80; \ | ||
25 | movb $-119, %al; \ | ||
26 | outb %al, $66; \ | ||
27 | outb %al, $0x80; \ | ||
28 | movb $15, %al; \ | ||
29 | outb %al, $66; | ||
30 | |||
31 | ALIGN | ||
32 | .align 4096 | ||
33 | ENTRY(wakeup_start) | ||
34 | wakeup_code: | ||
35 | wakeup_code_start = . | ||
36 | .code16 | ||
37 | |||
38 | cli | ||
39 | cld | ||
40 | |||
41 | # setup data segment | ||
42 | movw %cs, %ax | ||
43 | movw %ax, %ds # Make ds:0 point to wakeup_start | ||
44 | movw %ax, %ss | ||
45 | |||
46 | testl $4, realmode_flags - wakeup_code | ||
47 | jz 1f | ||
48 | BEEP | ||
49 | 1: | ||
50 | mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board | ||
51 | |||
52 | pushl $0 # Kill any dangerous flags | ||
53 | popfl | ||
54 | |||
55 | movl real_magic - wakeup_code, %eax | ||
56 | cmpl $0x12345678, %eax | ||
57 | jne bogus_real_magic | ||
58 | |||
59 | testl $1, realmode_flags - wakeup_code | ||
60 | jz 1f | ||
61 | lcall $0xc000,$3 | ||
62 | movw %cs, %ax | ||
63 | movw %ax, %ds # Bios might have played with that | ||
64 | movw %ax, %ss | ||
65 | 1: | ||
66 | |||
67 | testl $2, realmode_flags - wakeup_code | ||
68 | jz 1f | ||
69 | mov video_mode - wakeup_code, %ax | ||
70 | call mode_set | ||
71 | 1: | ||
72 | |||
73 | # set up page table | ||
74 | movl $swsusp_pg_dir-__PAGE_OFFSET, %eax | ||
75 | movl %eax, %cr3 | ||
76 | |||
77 | testl $1, real_efer_save_restore - wakeup_code | ||
78 | jz 4f | ||
79 | # restore efer setting | ||
80 | movl real_save_efer_edx - wakeup_code, %edx | ||
81 | movl real_save_efer_eax - wakeup_code, %eax | ||
82 | mov $0xc0000080, %ecx | ||
83 | wrmsr | ||
84 | 4: | ||
85 | # make sure %cr4 is set correctly (features, etc) | ||
86 | movl real_save_cr4 - wakeup_code, %eax | ||
87 | movl %eax, %cr4 | ||
88 | |||
89 | # need a gdt -- use lgdtl to force 32-bit operands, in case | ||
90 | # the GDT is located past 16 megabytes. | ||
91 | lgdtl real_save_gdt - wakeup_code | ||
92 | |||
93 | movl real_save_cr0 - wakeup_code, %eax | ||
94 | movl %eax, %cr0 | ||
95 | jmp 1f | ||
96 | 1: | ||
97 | movl real_magic - wakeup_code, %eax | ||
98 | cmpl $0x12345678, %eax | ||
99 | jne bogus_real_magic | ||
100 | |||
101 | testl $8, realmode_flags - wakeup_code | ||
102 | jz 1f | ||
103 | BEEP | ||
104 | 1: | ||
105 | ljmpl $__KERNEL_CS, $wakeup_pmode_return | ||
106 | |||
107 | real_save_gdt: .word 0 | ||
108 | .long 0 | ||
109 | real_save_cr0: .long 0 | ||
110 | real_save_cr3: .long 0 | ||
111 | real_save_cr4: .long 0 | ||
112 | real_magic: .long 0 | ||
113 | video_mode: .long 0 | ||
114 | realmode_flags: .long 0 | ||
115 | real_efer_save_restore: .long 0 | ||
116 | real_save_efer_edx: .long 0 | ||
117 | real_save_efer_eax: .long 0 | ||
118 | |||
119 | bogus_real_magic: | ||
120 | jmp bogus_real_magic | ||
121 | |||
122 | /* This code uses an extended set of video mode numbers. These include: | ||
123 | * Aliases for standard modes | ||
124 | * NORMAL_VGA (-1) | ||
125 | * EXTENDED_VGA (-2) | ||
126 | * ASK_VGA (-3) | ||
127 | * Video modes numbered by menu position -- NOT RECOMMENDED because of lack | ||
128 | * of compatibility when extending the table. These are between 0x00 and 0xff. | ||
129 | */ | ||
130 | #define VIDEO_FIRST_MENU 0x0000 | ||
131 | |||
132 | /* Standard BIOS video modes (BIOS number + 0x0100) */ | ||
133 | #define VIDEO_FIRST_BIOS 0x0100 | ||
134 | |||
135 | /* VESA BIOS video modes (VESA number + 0x0200) */ | ||
136 | #define VIDEO_FIRST_VESA 0x0200 | ||
137 | |||
138 | /* Video7 special modes (BIOS number + 0x0900) */ | ||
139 | #define VIDEO_FIRST_V7 0x0900 | ||
140 | |||
141 | # Setting of user mode (AX=mode ID) => CF=success | ||
142 | |||
143 | # For now, we only handle VESA modes (0x0200..0x03ff). To handle other | ||
144 | # modes, we should probably compile in the video code from the boot | ||
145 | # directory. | ||
146 | mode_set: | ||
147 | movw %ax, %bx | ||
148 | subb $VIDEO_FIRST_VESA>>8, %bh | ||
149 | cmpb $2, %bh | ||
150 | jb check_vesa | ||
151 | |||
152 | setbad: | ||
153 | clc | ||
154 | ret | ||
155 | |||
156 | check_vesa: | ||
157 | orw $0x4000, %bx # Use linear frame buffer | ||
158 | movw $0x4f02, %ax # VESA BIOS mode set call | ||
159 | int $0x10 | ||
160 | cmpw $0x004f, %ax # AL=4f if implemented | ||
161 | jnz setbad # AH=0 if OK | ||
162 | |||
163 | stc | ||
164 | ret | ||
165 | 7 | ||
166 | .code32 | 8 | .code32 |
167 | ALIGN | 9 | ALIGN |
168 | 10 | ||
169 | .org 0x800 | 11 | ENTRY(wakeup_pmode_return) |
170 | wakeup_stack_begin: # Stack grows down | ||
171 | |||
172 | .org 0xff0 # Just below end of page | ||
173 | wakeup_stack: | ||
174 | ENTRY(wakeup_end) | ||
175 | |||
176 | .org 0x1000 | ||
177 | |||
178 | wakeup_pmode_return: | 12 | wakeup_pmode_return: |
179 | movw $__KERNEL_DS, %ax | 13 | movw $__KERNEL_DS, %ax |
180 | movw %ax, %ss | 14 | movw %ax, %ss |
@@ -187,7 +21,7 @@ wakeup_pmode_return: | |||
187 | lgdt saved_gdt | 21 | lgdt saved_gdt |
188 | lidt saved_idt | 22 | lidt saved_idt |
189 | lldt saved_ldt | 23 | lldt saved_ldt |
190 | ljmp $(__KERNEL_CS),$1f | 24 | ljmp $(__KERNEL_CS), $1f |
191 | 1: | 25 | 1: |
192 | movl %cr3, %eax | 26 | movl %cr3, %eax |
193 | movl %eax, %cr3 | 27 | movl %eax, %cr3 |
@@ -201,82 +35,41 @@ wakeup_pmode_return: | |||
201 | jne bogus_magic | 35 | jne bogus_magic |
202 | 36 | ||
203 | # jump to place where we left off | 37 | # jump to place where we left off |
204 | movl saved_eip,%eax | 38 | movl saved_eip, %eax |
205 | jmp *%eax | 39 | jmp *%eax |
206 | 40 | ||
207 | bogus_magic: | 41 | bogus_magic: |
208 | jmp bogus_magic | 42 | jmp bogus_magic |
209 | 43 | ||
210 | 44 | ||
211 | ## | ||
212 | # acpi_copy_wakeup_routine | ||
213 | # | ||
214 | # Copy the above routine to low memory. | ||
215 | # | ||
216 | # Parameters: | ||
217 | # %eax: place to copy wakeup routine to | ||
218 | # | ||
219 | # Returned address is location of code in low memory (past data and stack) | ||
220 | # | ||
221 | ENTRY(acpi_copy_wakeup_routine) | ||
222 | 45 | ||
223 | pushl %ebx | 46 | save_registers: |
224 | sgdt saved_gdt | 47 | sgdt saved_gdt |
225 | sidt saved_idt | 48 | sidt saved_idt |
226 | sldt saved_ldt | 49 | sldt saved_ldt |
227 | str saved_tss | 50 | str saved_tss |
228 | 51 | ||
229 | movl nx_enabled, %edx | ||
230 | movl %edx, real_efer_save_restore - wakeup_start (%eax) | ||
231 | testl $1, real_efer_save_restore - wakeup_start (%eax) | ||
232 | jz 2f | ||
233 | # save efer setting | ||
234 | pushl %eax | ||
235 | movl %eax, %ebx | ||
236 | mov $0xc0000080, %ecx | ||
237 | rdmsr | ||
238 | movl %edx, real_save_efer_edx - wakeup_start (%ebx) | ||
239 | movl %eax, real_save_efer_eax - wakeup_start (%ebx) | ||
240 | popl %eax | ||
241 | 2: | ||
242 | |||
243 | movl %cr3, %edx | ||
244 | movl %edx, real_save_cr3 - wakeup_start (%eax) | ||
245 | movl %cr4, %edx | ||
246 | movl %edx, real_save_cr4 - wakeup_start (%eax) | ||
247 | movl %cr0, %edx | ||
248 | movl %edx, real_save_cr0 - wakeup_start (%eax) | ||
249 | sgdt real_save_gdt - wakeup_start (%eax) | ||
250 | |||
251 | movl saved_videomode, %edx | ||
252 | movl %edx, video_mode - wakeup_start (%eax) | ||
253 | movl acpi_realmode_flags, %edx | ||
254 | movl %edx, realmode_flags - wakeup_start (%eax) | ||
255 | movl $0x12345678, real_magic - wakeup_start (%eax) | ||
256 | movl $0x12345678, saved_magic | ||
257 | popl %ebx | ||
258 | ret | ||
259 | |||
260 | save_registers: | ||
261 | leal 4(%esp), %eax | 52 | leal 4(%esp), %eax |
262 | movl %eax, saved_context_esp | 53 | movl %eax, saved_context_esp |
263 | movl %ebx, saved_context_ebx | 54 | movl %ebx, saved_context_ebx |
264 | movl %ebp, saved_context_ebp | 55 | movl %ebp, saved_context_ebp |
265 | movl %esi, saved_context_esi | 56 | movl %esi, saved_context_esi |
266 | movl %edi, saved_context_edi | 57 | movl %edi, saved_context_edi |
267 | pushfl ; popl saved_context_eflags | 58 | pushfl |
268 | 59 | popl saved_context_eflags | |
269 | movl $ret_point, saved_eip | 60 | |
61 | movl $ret_point, saved_eip | ||
270 | ret | 62 | ret |
271 | 63 | ||
272 | 64 | ||
273 | restore_registers: | 65 | restore_registers: |
274 | movl saved_context_ebp, %ebp | 66 | movl saved_context_ebp, %ebp |
275 | movl saved_context_ebx, %ebx | 67 | movl saved_context_ebx, %ebx |
276 | movl saved_context_esi, %esi | 68 | movl saved_context_esi, %esi |
277 | movl saved_context_edi, %edi | 69 | movl saved_context_edi, %edi |
278 | pushl saved_context_eflags ; popfl | 70 | pushl saved_context_eflags |
279 | ret | 71 | popfl |
72 | ret | ||
280 | 73 | ||
281 | ENTRY(do_suspend_lowlevel) | 74 | ENTRY(do_suspend_lowlevel) |
282 | call save_processor_state | 75 | call save_processor_state |
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 2e1b9e0d0767..bcc293423a70 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S | |||
@@ -7,191 +7,18 @@ | |||
7 | #include <asm/asm-offsets.h> | 7 | #include <asm/asm-offsets.h> |
8 | 8 | ||
9 | # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 | 9 | # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 |
10 | # | ||
11 | # wakeup_code runs in real mode, and at unknown address (determined at run-time). | ||
12 | # Therefore it must only use relative jumps/calls. | ||
13 | # | ||
14 | # Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled | ||
15 | # | ||
16 | # If physical address of wakeup_code is 0x12345, BIOS should call us with | ||
17 | # cs = 0x1234, eip = 0x05 | ||
18 | # | ||
19 | |||
20 | #define BEEP \ | ||
21 | inb $97, %al; \ | ||
22 | outb %al, $0x80; \ | ||
23 | movb $3, %al; \ | ||
24 | outb %al, $97; \ | ||
25 | outb %al, $0x80; \ | ||
26 | movb $-74, %al; \ | ||
27 | outb %al, $67; \ | ||
28 | outb %al, $0x80; \ | ||
29 | movb $-119, %al; \ | ||
30 | outb %al, $66; \ | ||
31 | outb %al, $0x80; \ | ||
32 | movb $15, %al; \ | ||
33 | outb %al, $66; | ||
34 | |||
35 | |||
36 | ALIGN | ||
37 | .align 16 | ||
38 | ENTRY(wakeup_start) | ||
39 | wakeup_code: | ||
40 | wakeup_code_start = . | ||
41 | .code16 | ||
42 | |||
43 | # Running in *copy* of this code, somewhere in low 1MB. | ||
44 | |||
45 | cli | ||
46 | cld | ||
47 | # setup data segment | ||
48 | movw %cs, %ax | ||
49 | movw %ax, %ds # Make ds:0 point to wakeup_start | ||
50 | movw %ax, %ss | ||
51 | |||
52 | # Data segment must be set up before we can see whether to beep. | ||
53 | testl $4, realmode_flags - wakeup_code | ||
54 | jz 1f | ||
55 | BEEP | ||
56 | 1: | ||
57 | |||
58 | # Private stack is needed for ASUS board | ||
59 | mov $(wakeup_stack - wakeup_code), %sp | ||
60 | |||
61 | pushl $0 # Kill any dangerous flags | ||
62 | popfl | ||
63 | |||
64 | movl real_magic - wakeup_code, %eax | ||
65 | cmpl $0x12345678, %eax | ||
66 | jne bogus_real_magic | ||
67 | |||
68 | testl $1, realmode_flags - wakeup_code | ||
69 | jz 1f | ||
70 | lcall $0xc000,$3 | ||
71 | movw %cs, %ax | ||
72 | movw %ax, %ds # Bios might have played with that | ||
73 | movw %ax, %ss | ||
74 | 1: | ||
75 | |||
76 | testl $2, realmode_flags - wakeup_code | ||
77 | jz 1f | ||
78 | mov video_mode - wakeup_code, %ax | ||
79 | call mode_set | ||
80 | 1: | ||
81 | |||
82 | mov %ds, %ax # Find 32bit wakeup_code addr | ||
83 | movzx %ax, %esi # (Convert %ds:gdt to a liner ptr) | ||
84 | shll $4, %esi | ||
85 | # Fix up the vectors | ||
86 | addl %esi, wakeup_32_vector - wakeup_code | ||
87 | addl %esi, wakeup_long64_vector - wakeup_code | ||
88 | addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer | ||
89 | |||
90 | lidtl %ds:idt_48a - wakeup_code | ||
91 | lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is | ||
92 | # appropriate | ||
93 | |||
94 | movl $1, %eax # protected mode (PE) bit | ||
95 | lmsw %ax # This is it! | ||
96 | jmp 1f | ||
97 | 1: | ||
98 | |||
99 | ljmpl *(wakeup_32_vector - wakeup_code) | ||
100 | |||
101 | .balign 4 | ||
102 | wakeup_32_vector: | ||
103 | .long wakeup_32 - wakeup_code | ||
104 | .word __KERNEL32_CS, 0 | ||
105 | |||
106 | .code32 | ||
107 | wakeup_32: | ||
108 | # Running in this code, but at low address; paging is not yet turned on. | ||
109 | |||
110 | movl $__KERNEL_DS, %eax | ||
111 | movl %eax, %ds | ||
112 | |||
113 | /* | ||
114 | * Prepare for entering 64bits mode | ||
115 | */ | ||
116 | |||
117 | /* Enable PAE */ | ||
118 | xorl %eax, %eax | ||
119 | btsl $5, %eax | ||
120 | movl %eax, %cr4 | ||
121 | |||
122 | /* Setup early boot stage 4 level pagetables */ | ||
123 | leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax | ||
124 | movl %eax, %cr3 | ||
125 | |||
126 | /* Check if nx is implemented */ | ||
127 | movl $0x80000001, %eax | ||
128 | cpuid | ||
129 | movl %edx,%edi | ||
130 | |||
131 | /* Enable Long Mode */ | ||
132 | xorl %eax, %eax | ||
133 | btsl $_EFER_LME, %eax | ||
134 | |||
135 | /* No Execute supported? */ | ||
136 | btl $20,%edi | ||
137 | jnc 1f | ||
138 | btsl $_EFER_NX, %eax | ||
139 | |||
140 | /* Make changes effective */ | ||
141 | 1: movl $MSR_EFER, %ecx | ||
142 | xorl %edx, %edx | ||
143 | wrmsr | ||
144 | |||
145 | xorl %eax, %eax | ||
146 | btsl $31, %eax /* Enable paging and in turn activate Long Mode */ | ||
147 | btsl $0, %eax /* Enable protected mode */ | ||
148 | |||
149 | /* Make changes effective */ | ||
150 | movl %eax, %cr0 | ||
151 | |||
152 | /* At this point: | ||
153 | CR4.PAE must be 1 | ||
154 | CS.L must be 0 | ||
155 | CR3 must point to PML4 | ||
156 | Next instruction must be a branch | ||
157 | This must be on identity-mapped page | ||
158 | */ | ||
159 | /* | ||
160 | * At this point we're in long mode but in 32bit compatibility mode | ||
161 | * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn | ||
162 | * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load | ||
163 | * the new gdt/idt that has __KERNEL_CS with CS.L = 1. | ||
164 | */ | ||
165 | |||
166 | /* Finally jump in 64bit mode */ | ||
167 | ljmp *(wakeup_long64_vector - wakeup_code)(%esi) | ||
168 | |||
169 | .balign 4 | ||
170 | wakeup_long64_vector: | ||
171 | .long wakeup_long64 - wakeup_code | ||
172 | .word __KERNEL_CS, 0 | ||
173 | 10 | ||
174 | .code64 | 11 | .code64 |
175 | |||
176 | /* Hooray, we are in Long 64-bit mode (but still running in | ||
177 | * low memory) | ||
178 | */ | ||
179 | wakeup_long64: | ||
180 | /* | 12 | /* |
181 | * We must switch to a new descriptor in kernel space for the GDT | 13 | * Hooray, we are in Long 64-bit mode (but still running in low memory) |
182 | * because soon the kernel won't have access anymore to the userspace | ||
183 | * addresses where we're currently running on. We have to do that here | ||
184 | * because in 32bit we couldn't load a 64bit linear address. | ||
185 | */ | 14 | */ |
186 | lgdt cpu_gdt_descr | 15 | ENTRY(wakeup_long64) |
187 | 16 | wakeup_long64: | |
188 | movq saved_magic, %rax | 17 | movq saved_magic, %rax |
189 | movq $0x123456789abcdef0, %rdx | 18 | movq $0x123456789abcdef0, %rdx |
190 | cmpq %rdx, %rax | 19 | cmpq %rdx, %rax |
191 | jne bogus_64_magic | 20 | jne bogus_64_magic |
192 | 21 | ||
193 | nop | ||
194 | nop | ||
195 | movw $__KERNEL_DS, %ax | 22 | movw $__KERNEL_DS, %ax |
196 | movw %ax, %ss | 23 | movw %ax, %ss |
197 | movw %ax, %ds | 24 | movw %ax, %ds |
@@ -208,130 +35,8 @@ wakeup_long64: | |||
208 | movq saved_rip, %rax | 35 | movq saved_rip, %rax |
209 | jmp *%rax | 36 | jmp *%rax |
210 | 37 | ||
211 | .code32 | ||
212 | |||
213 | .align 64 | ||
214 | gdta: | ||
215 | /* Its good to keep gdt in sync with one in trampoline.S */ | ||
216 | .word 0, 0, 0, 0 # dummy | ||
217 | /* ??? Why I need the accessed bit set in order for this to work? */ | ||
218 | .quad 0x00cf9b000000ffff # __KERNEL32_CS | ||
219 | .quad 0x00af9b000000ffff # __KERNEL_CS | ||
220 | .quad 0x00cf93000000ffff # __KERNEL_DS | ||
221 | |||
222 | idt_48a: | ||
223 | .word 0 # idt limit = 0 | ||
224 | .word 0, 0 # idt base = 0L | ||
225 | |||
226 | gdt_48a: | ||
227 | .word 0x800 # gdt limit=2048, | ||
228 | # 256 GDT entries | ||
229 | .long gdta - wakeup_code # gdt base (relocated in later) | ||
230 | |||
231 | real_magic: .quad 0 | ||
232 | video_mode: .quad 0 | ||
233 | realmode_flags: .quad 0 | ||
234 | |||
235 | .code16 | ||
236 | bogus_real_magic: | ||
237 | jmp bogus_real_magic | ||
238 | |||
239 | .code64 | ||
240 | bogus_64_magic: | 38 | bogus_64_magic: |
241 | jmp bogus_64_magic | 39 | jmp bogus_64_magic |
242 | |||
243 | /* This code uses an extended set of video mode numbers. These include: | ||
244 | * Aliases for standard modes | ||
245 | * NORMAL_VGA (-1) | ||
246 | * EXTENDED_VGA (-2) | ||
247 | * ASK_VGA (-3) | ||
248 | * Video modes numbered by menu position -- NOT RECOMMENDED because of lack | ||
249 | * of compatibility when extending the table. These are between 0x00 and 0xff. | ||
250 | */ | ||
251 | #define VIDEO_FIRST_MENU 0x0000 | ||
252 | |||
253 | /* Standard BIOS video modes (BIOS number + 0x0100) */ | ||
254 | #define VIDEO_FIRST_BIOS 0x0100 | ||
255 | |||
256 | /* VESA BIOS video modes (VESA number + 0x0200) */ | ||
257 | #define VIDEO_FIRST_VESA 0x0200 | ||
258 | |||
259 | /* Video7 special modes (BIOS number + 0x0900) */ | ||
260 | #define VIDEO_FIRST_V7 0x0900 | ||
261 | |||
262 | # Setting of user mode (AX=mode ID) => CF=success | ||
263 | |||
264 | # For now, we only handle VESA modes (0x0200..0x03ff). To handle other | ||
265 | # modes, we should probably compile in the video code from the boot | ||
266 | # directory. | ||
267 | .code16 | ||
268 | mode_set: | ||
269 | movw %ax, %bx | ||
270 | subb $VIDEO_FIRST_VESA>>8, %bh | ||
271 | cmpb $2, %bh | ||
272 | jb check_vesa | ||
273 | |||
274 | setbad: | ||
275 | clc | ||
276 | ret | ||
277 | |||
278 | check_vesa: | ||
279 | orw $0x4000, %bx # Use linear frame buffer | ||
280 | movw $0x4f02, %ax # VESA BIOS mode set call | ||
281 | int $0x10 | ||
282 | cmpw $0x004f, %ax # AL=4f if implemented | ||
283 | jnz setbad # AH=0 if OK | ||
284 | |||
285 | stc | ||
286 | ret | ||
287 | |||
288 | wakeup_stack_begin: # Stack grows down | ||
289 | |||
290 | .org 0xff0 | ||
291 | wakeup_stack: # Just below end of page | ||
292 | |||
293 | .org 0x1000 | ||
294 | ENTRY(wakeup_level4_pgt) | ||
295 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | ||
296 | .fill 510,8,0 | ||
297 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ | ||
298 | .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE | ||
299 | |||
300 | ENTRY(wakeup_end) | ||
301 | |||
302 | ## | ||
303 | # acpi_copy_wakeup_routine | ||
304 | # | ||
305 | # Copy the above routine to low memory. | ||
306 | # | ||
307 | # Parameters: | ||
308 | # %rdi: place to copy wakeup routine to | ||
309 | # | ||
310 | # Returned address is location of code in low memory (past data and stack) | ||
311 | # | ||
312 | .code64 | ||
313 | ENTRY(acpi_copy_wakeup_routine) | ||
314 | pushq %rax | ||
315 | pushq %rdx | ||
316 | |||
317 | movl saved_video_mode, %edx | ||
318 | movl %edx, video_mode - wakeup_start (,%rdi) | ||
319 | movl acpi_realmode_flags, %edx | ||
320 | movl %edx, realmode_flags - wakeup_start (,%rdi) | ||
321 | movq $0x12345678, real_magic - wakeup_start (,%rdi) | ||
322 | movq $0x123456789abcdef0, %rdx | ||
323 | movq %rdx, saved_magic | ||
324 | |||
325 | movq saved_magic, %rax | ||
326 | movq $0x123456789abcdef0, %rdx | ||
327 | cmpq %rdx, %rax | ||
328 | jne bogus_64_magic | ||
329 | |||
330 | # restore the regs we used | ||
331 | popq %rdx | ||
332 | popq %rax | ||
333 | ENTRY(do_suspend_lowlevel_s4bios) | ||
334 | ret | ||
335 | 40 | ||
336 | .align 2 | 41 | .align 2 |
337 | .p2align 4,,15 | 42 | .p2align 4,,15 |
@@ -414,7 +119,7 @@ do_suspend_lowlevel: | |||
414 | jmp restore_processor_state | 119 | jmp restore_processor_state |
415 | .LFE5: | 120 | .LFE5: |
416 | .Lfe5: | 121 | .Lfe5: |
417 | .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel | 122 | .size do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel |
418 | 123 | ||
419 | .data | 124 | .data |
420 | ALIGN | 125 | ALIGN |
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S new file mode 100644 index 000000000000..6ff3b5730575 --- /dev/null +++ b/arch/x86/kernel/acpi/wakeup_rm.S | |||
@@ -0,0 +1,10 @@ | |||
1 | /* | ||
2 | * Wrapper script for the realmode binary as a transport object | ||
3 | * before copying to low memory. | ||
4 | */ | ||
5 | .section ".rodata","a" | ||
6 | .globl wakeup_code_start, wakeup_code_end | ||
7 | wakeup_code_start: | ||
8 | .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin" | ||
9 | wakeup_code_end: | ||
10 | .size wakeup_code_start, .-wakeup_code_start | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 5fed98ca0e1f..df4099dc1c68 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <asm/mce.h> | 11 | #include <asm/mce.h> |
12 | #include <asm/nmi.h> | 12 | #include <asm/nmi.h> |
13 | #include <asm/vsyscall.h> | 13 | #include <asm/vsyscall.h> |
14 | #include <asm/cacheflush.h> | ||
15 | #include <asm/io.h> | ||
14 | 16 | ||
15 | #define MAX_PATCH_LEN (255-1) | 17 | #define MAX_PATCH_LEN (255-1) |
16 | 18 | ||
@@ -177,7 +179,7 @@ static const unsigned char*const * find_nop_table(void) | |||
177 | #endif /* CONFIG_X86_64 */ | 179 | #endif /* CONFIG_X86_64 */ |
178 | 180 | ||
179 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 181 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
180 | static void add_nops(void *insns, unsigned int len) | 182 | void add_nops(void *insns, unsigned int len) |
181 | { | 183 | { |
182 | const unsigned char *const *noptable = find_nop_table(); | 184 | const unsigned char *const *noptable = find_nop_table(); |
183 | 185 | ||
@@ -190,6 +192,7 @@ static void add_nops(void *insns, unsigned int len) | |||
190 | len -= noplen; | 192 | len -= noplen; |
191 | } | 193 | } |
192 | } | 194 | } |
195 | EXPORT_SYMBOL_GPL(add_nops); | ||
193 | 196 | ||
194 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 197 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
195 | extern u8 *__smp_locks[], *__smp_locks_end[]; | 198 | extern u8 *__smp_locks[], *__smp_locks_end[]; |
@@ -205,7 +208,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | |||
205 | struct alt_instr *a; | 208 | struct alt_instr *a; |
206 | char insnbuf[MAX_PATCH_LEN]; | 209 | char insnbuf[MAX_PATCH_LEN]; |
207 | 210 | ||
208 | DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); | 211 | DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); |
209 | for (a = start; a < end; a++) { | 212 | for (a = start; a < end; a++) { |
210 | u8 *instr = a->instr; | 213 | u8 *instr = a->instr; |
211 | BUG_ON(a->replacementlen > a->instrlen); | 214 | BUG_ON(a->replacementlen > a->instrlen); |
@@ -217,13 +220,13 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | |||
217 | if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { | 220 | if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { |
218 | instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); | 221 | instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); |
219 | DPRINTK("%s: vsyscall fixup: %p => %p\n", | 222 | DPRINTK("%s: vsyscall fixup: %p => %p\n", |
220 | __FUNCTION__, a->instr, instr); | 223 | __func__, a->instr, instr); |
221 | } | 224 | } |
222 | #endif | 225 | #endif |
223 | memcpy(insnbuf, a->replacement, a->replacementlen); | 226 | memcpy(insnbuf, a->replacement, a->replacementlen); |
224 | add_nops(insnbuf + a->replacementlen, | 227 | add_nops(insnbuf + a->replacementlen, |
225 | a->instrlen - a->replacementlen); | 228 | a->instrlen - a->replacementlen); |
226 | text_poke(instr, insnbuf, a->instrlen); | 229 | text_poke_early(instr, insnbuf, a->instrlen); |
227 | } | 230 | } |
228 | } | 231 | } |
229 | 232 | ||
@@ -284,7 +287,6 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
284 | void *text, void *text_end) | 287 | void *text, void *text_end) |
285 | { | 288 | { |
286 | struct smp_alt_module *smp; | 289 | struct smp_alt_module *smp; |
287 | unsigned long flags; | ||
288 | 290 | ||
289 | if (noreplace_smp) | 291 | if (noreplace_smp) |
290 | return; | 292 | return; |
@@ -307,42 +309,40 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
307 | smp->text = text; | 309 | smp->text = text; |
308 | smp->text_end = text_end; | 310 | smp->text_end = text_end; |
309 | DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", | 311 | DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", |
310 | __FUNCTION__, smp->locks, smp->locks_end, | 312 | __func__, smp->locks, smp->locks_end, |
311 | smp->text, smp->text_end, smp->name); | 313 | smp->text, smp->text_end, smp->name); |
312 | 314 | ||
313 | spin_lock_irqsave(&smp_alt, flags); | 315 | spin_lock(&smp_alt); |
314 | list_add_tail(&smp->next, &smp_alt_modules); | 316 | list_add_tail(&smp->next, &smp_alt_modules); |
315 | if (boot_cpu_has(X86_FEATURE_UP)) | 317 | if (boot_cpu_has(X86_FEATURE_UP)) |
316 | alternatives_smp_unlock(smp->locks, smp->locks_end, | 318 | alternatives_smp_unlock(smp->locks, smp->locks_end, |
317 | smp->text, smp->text_end); | 319 | smp->text, smp->text_end); |
318 | spin_unlock_irqrestore(&smp_alt, flags); | 320 | spin_unlock(&smp_alt); |
319 | } | 321 | } |
320 | 322 | ||
321 | void alternatives_smp_module_del(struct module *mod) | 323 | void alternatives_smp_module_del(struct module *mod) |
322 | { | 324 | { |
323 | struct smp_alt_module *item; | 325 | struct smp_alt_module *item; |
324 | unsigned long flags; | ||
325 | 326 | ||
326 | if (smp_alt_once || noreplace_smp) | 327 | if (smp_alt_once || noreplace_smp) |
327 | return; | 328 | return; |
328 | 329 | ||
329 | spin_lock_irqsave(&smp_alt, flags); | 330 | spin_lock(&smp_alt); |
330 | list_for_each_entry(item, &smp_alt_modules, next) { | 331 | list_for_each_entry(item, &smp_alt_modules, next) { |
331 | if (mod != item->mod) | 332 | if (mod != item->mod) |
332 | continue; | 333 | continue; |
333 | list_del(&item->next); | 334 | list_del(&item->next); |
334 | spin_unlock_irqrestore(&smp_alt, flags); | 335 | spin_unlock(&smp_alt); |
335 | DPRINTK("%s: %s\n", __FUNCTION__, item->name); | 336 | DPRINTK("%s: %s\n", __func__, item->name); |
336 | kfree(item); | 337 | kfree(item); |
337 | return; | 338 | return; |
338 | } | 339 | } |
339 | spin_unlock_irqrestore(&smp_alt, flags); | 340 | spin_unlock(&smp_alt); |
340 | } | 341 | } |
341 | 342 | ||
342 | void alternatives_smp_switch(int smp) | 343 | void alternatives_smp_switch(int smp) |
343 | { | 344 | { |
344 | struct smp_alt_module *mod; | 345 | struct smp_alt_module *mod; |
345 | unsigned long flags; | ||
346 | 346 | ||
347 | #ifdef CONFIG_LOCKDEP | 347 | #ifdef CONFIG_LOCKDEP |
348 | /* | 348 | /* |
@@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp) | |||
359 | return; | 359 | return; |
360 | BUG_ON(!smp && (num_online_cpus() > 1)); | 360 | BUG_ON(!smp && (num_online_cpus() > 1)); |
361 | 361 | ||
362 | spin_lock_irqsave(&smp_alt, flags); | 362 | spin_lock(&smp_alt); |
363 | 363 | ||
364 | /* | 364 | /* |
365 | * Avoid unnecessary switches because it forces JIT based VMs to | 365 | * Avoid unnecessary switches because it forces JIT based VMs to |
@@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp) | |||
383 | mod->text, mod->text_end); | 383 | mod->text, mod->text_end); |
384 | } | 384 | } |
385 | smp_mode = smp; | 385 | smp_mode = smp; |
386 | spin_unlock_irqrestore(&smp_alt, flags); | 386 | spin_unlock(&smp_alt); |
387 | } | 387 | } |
388 | 388 | ||
389 | #endif | 389 | #endif |
@@ -411,7 +411,7 @@ void apply_paravirt(struct paravirt_patch_site *start, | |||
411 | 411 | ||
412 | /* Pad the rest with nops */ | 412 | /* Pad the rest with nops */ |
413 | add_nops(insnbuf + used, p->len - used); | 413 | add_nops(insnbuf + used, p->len - used); |
414 | text_poke(p->instr, insnbuf, p->len); | 414 | text_poke_early(p->instr, insnbuf, p->len); |
415 | } | 415 | } |
416 | } | 416 | } |
417 | extern struct paravirt_patch_site __start_parainstructions[], | 417 | extern struct paravirt_patch_site __start_parainstructions[], |
@@ -420,8 +420,6 @@ extern struct paravirt_patch_site __start_parainstructions[], | |||
420 | 420 | ||
421 | void __init alternative_instructions(void) | 421 | void __init alternative_instructions(void) |
422 | { | 422 | { |
423 | unsigned long flags; | ||
424 | |||
425 | /* The patching is not fully atomic, so try to avoid local interruptions | 423 | /* The patching is not fully atomic, so try to avoid local interruptions |
426 | that might execute the to be patched code. | 424 | that might execute the to be patched code. |
427 | Other CPUs are not running. */ | 425 | Other CPUs are not running. */ |
@@ -430,7 +428,6 @@ void __init alternative_instructions(void) | |||
430 | stop_mce(); | 428 | stop_mce(); |
431 | #endif | 429 | #endif |
432 | 430 | ||
433 | local_irq_save(flags); | ||
434 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 431 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
435 | 432 | ||
436 | /* switch to patch-once-at-boottime-only mode and free the | 433 | /* switch to patch-once-at-boottime-only mode and free the |
@@ -462,7 +459,6 @@ void __init alternative_instructions(void) | |||
462 | } | 459 | } |
463 | #endif | 460 | #endif |
464 | apply_paravirt(__parainstructions, __parainstructions_end); | 461 | apply_paravirt(__parainstructions, __parainstructions_end); |
465 | local_irq_restore(flags); | ||
466 | 462 | ||
467 | if (smp_alt_once) | 463 | if (smp_alt_once) |
468 | free_init_pages("SMP alternatives", | 464 | free_init_pages("SMP alternatives", |
@@ -475,18 +471,71 @@ void __init alternative_instructions(void) | |||
475 | #endif | 471 | #endif |
476 | } | 472 | } |
477 | 473 | ||
478 | /* | 474 | /** |
479 | * Warning: | 475 | * text_poke_early - Update instructions on a live kernel at boot time |
476 | * @addr: address to modify | ||
477 | * @opcode: source of the copy | ||
478 | * @len: length to copy | ||
479 | * | ||
480 | * When you use this code to patch more than one byte of an instruction | 480 | * When you use this code to patch more than one byte of an instruction |
481 | * you need to make sure that other CPUs cannot execute this code in parallel. | 481 | * you need to make sure that other CPUs cannot execute this code in parallel. |
482 | * Also no thread must be currently preempted in the middle of these instructions. | 482 | * Also no thread must be currently preempted in the middle of these |
483 | * And on the local CPU you need to be protected again NMI or MCE handlers | 483 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
484 | * seeing an inconsistent instruction while you patch. | 484 | * handlers seeing an inconsistent instruction while you patch. |
485 | */ | 485 | */ |
486 | void __kprobes text_poke(void *addr, unsigned char *opcode, int len) | 486 | void *text_poke_early(void *addr, const void *opcode, size_t len) |
487 | { | 487 | { |
488 | unsigned long flags; | ||
489 | local_irq_save(flags); | ||
488 | memcpy(addr, opcode, len); | 490 | memcpy(addr, opcode, len); |
491 | local_irq_restore(flags); | ||
492 | sync_core(); | ||
493 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | ||
494 | that causes hangs on some VIA CPUs. */ | ||
495 | return addr; | ||
496 | } | ||
497 | |||
498 | /** | ||
499 | * text_poke - Update instructions on a live kernel | ||
500 | * @addr: address to modify | ||
501 | * @opcode: source of the copy | ||
502 | * @len: length to copy | ||
503 | * | ||
504 | * Only atomic text poke/set should be allowed when not doing early patching. | ||
505 | * It means the size must be writable atomically and the address must be aligned | ||
506 | * in a way that permits an atomic write. It also makes sure we fit on a single | ||
507 | * page. | ||
508 | */ | ||
509 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | ||
510 | { | ||
511 | unsigned long flags; | ||
512 | char *vaddr; | ||
513 | int nr_pages = 2; | ||
514 | |||
515 | BUG_ON(len > sizeof(long)); | ||
516 | BUG_ON((((long)addr + len - 1) & ~(sizeof(long) - 1)) | ||
517 | - ((long)addr & ~(sizeof(long) - 1))); | ||
518 | if (kernel_text_address((unsigned long)addr)) { | ||
519 | struct page *pages[2] = { virt_to_page(addr), | ||
520 | virt_to_page(addr + PAGE_SIZE) }; | ||
521 | if (!pages[1]) | ||
522 | nr_pages = 1; | ||
523 | vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); | ||
524 | BUG_ON(!vaddr); | ||
525 | local_irq_save(flags); | ||
526 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); | ||
527 | local_irq_restore(flags); | ||
528 | vunmap(vaddr); | ||
529 | } else { | ||
530 | /* | ||
531 | * modules are in vmalloc'ed memory, always writable. | ||
532 | */ | ||
533 | local_irq_save(flags); | ||
534 | memcpy(addr, opcode, len); | ||
535 | local_irq_restore(flags); | ||
536 | } | ||
489 | sync_core(); | 537 | sync_core(); |
490 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | 538 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
491 | that causes hangs on some VIA CPUs. */ | 539 | that causes hangs on some VIA CPUs. */ |
540 | return addr; | ||
492 | } | 541 | } |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 00df126169b4..479926d9e004 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -27,11 +27,11 @@ | |||
27 | #include <asm/k8.h> | 27 | #include <asm/k8.h> |
28 | 28 | ||
29 | int gart_iommu_aperture; | 29 | int gart_iommu_aperture; |
30 | int gart_iommu_aperture_disabled __initdata = 0; | 30 | int gart_iommu_aperture_disabled __initdata; |
31 | int gart_iommu_aperture_allowed __initdata = 0; | 31 | int gart_iommu_aperture_allowed __initdata; |
32 | 32 | ||
33 | int fallback_aper_order __initdata = 1; /* 64MB */ | 33 | int fallback_aper_order __initdata = 1; /* 64MB */ |
34 | int fallback_aper_force __initdata = 0; | 34 | int fallback_aper_force __initdata; |
35 | 35 | ||
36 | int fix_aperture __initdata = 1; | 36 | int fix_aperture __initdata = 1; |
37 | 37 | ||
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 35a568ea8400..687208190b06 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -50,6 +50,11 @@ | |||
50 | # error SPURIOUS_APIC_VECTOR definition error | 50 | # error SPURIOUS_APIC_VECTOR definition error |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | unsigned long mp_lapic_addr; | ||
54 | |||
55 | DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; | ||
56 | EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | ||
57 | |||
53 | /* | 58 | /* |
54 | * Knob to control our willingness to enable the local APIC. | 59 | * Knob to control our willingness to enable the local APIC. |
55 | * | 60 | * |
@@ -621,6 +626,35 @@ int setup_profiling_timer(unsigned int multiplier) | |||
621 | } | 626 | } |
622 | 627 | ||
623 | /* | 628 | /* |
629 | * Setup extended LVT, AMD specific (K8, family 10h) | ||
630 | * | ||
631 | * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and | ||
632 | * MCE interrupts are supported. Thus MCE offset must be set to 0. | ||
633 | */ | ||
634 | |||
635 | #define APIC_EILVT_LVTOFF_MCE 0 | ||
636 | #define APIC_EILVT_LVTOFF_IBS 1 | ||
637 | |||
638 | static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) | ||
639 | { | ||
640 | unsigned long reg = (lvt_off << 4) + APIC_EILVT0; | ||
641 | unsigned int v = (mask << 16) | (msg_type << 8) | vector; | ||
642 | apic_write(reg, v); | ||
643 | } | ||
644 | |||
645 | u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) | ||
646 | { | ||
647 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); | ||
648 | return APIC_EILVT_LVTOFF_MCE; | ||
649 | } | ||
650 | |||
651 | u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) | ||
652 | { | ||
653 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); | ||
654 | return APIC_EILVT_LVTOFF_IBS; | ||
655 | } | ||
656 | |||
657 | /* | ||
624 | * Local APIC start and shutdown | 658 | * Local APIC start and shutdown |
625 | */ | 659 | */ |
626 | 660 | ||
@@ -868,12 +902,50 @@ void __init init_bsp_APIC(void) | |||
868 | apic_write_around(APIC_LVT1, value); | 902 | apic_write_around(APIC_LVT1, value); |
869 | } | 903 | } |
870 | 904 | ||
905 | void __cpuinit lapic_setup_esr(void) | ||
906 | { | ||
907 | unsigned long oldvalue, value, maxlvt; | ||
908 | if (lapic_is_integrated() && !esr_disable) { | ||
909 | /* !82489DX */ | ||
910 | maxlvt = lapic_get_maxlvt(); | ||
911 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | ||
912 | apic_write(APIC_ESR, 0); | ||
913 | oldvalue = apic_read(APIC_ESR); | ||
914 | |||
915 | /* enables sending errors */ | ||
916 | value = ERROR_APIC_VECTOR; | ||
917 | apic_write_around(APIC_LVTERR, value); | ||
918 | /* | ||
919 | * spec says clear errors after enabling vector. | ||
920 | */ | ||
921 | if (maxlvt > 3) | ||
922 | apic_write(APIC_ESR, 0); | ||
923 | value = apic_read(APIC_ESR); | ||
924 | if (value != oldvalue) | ||
925 | apic_printk(APIC_VERBOSE, "ESR value before enabling " | ||
926 | "vector: 0x%08lx after: 0x%08lx\n", | ||
927 | oldvalue, value); | ||
928 | } else { | ||
929 | if (esr_disable) | ||
930 | /* | ||
931 | * Something untraceable is creating bad interrupts on | ||
932 | * secondary quads ... for the moment, just leave the | ||
933 | * ESR disabled - we can't do anything useful with the | ||
934 | * errors anyway - mbligh | ||
935 | */ | ||
936 | printk(KERN_INFO "Leaving ESR disabled.\n"); | ||
937 | else | ||
938 | printk(KERN_INFO "No ESR for 82489DX.\n"); | ||
939 | } | ||
940 | } | ||
941 | |||
942 | |||
871 | /** | 943 | /** |
872 | * setup_local_APIC - setup the local APIC | 944 | * setup_local_APIC - setup the local APIC |
873 | */ | 945 | */ |
874 | void __cpuinit setup_local_APIC(void) | 946 | void __cpuinit setup_local_APIC(void) |
875 | { | 947 | { |
876 | unsigned long oldvalue, value, maxlvt, integrated; | 948 | unsigned long value, integrated; |
877 | int i, j; | 949 | int i, j; |
878 | 950 | ||
879 | /* Pound the ESR really hard over the head with a big hammer - mbligh */ | 951 | /* Pound the ESR really hard over the head with a big hammer - mbligh */ |
@@ -997,40 +1069,13 @@ void __cpuinit setup_local_APIC(void) | |||
997 | if (!integrated) /* 82489DX */ | 1069 | if (!integrated) /* 82489DX */ |
998 | value |= APIC_LVT_LEVEL_TRIGGER; | 1070 | value |= APIC_LVT_LEVEL_TRIGGER; |
999 | apic_write_around(APIC_LVT1, value); | 1071 | apic_write_around(APIC_LVT1, value); |
1072 | } | ||
1000 | 1073 | ||
1001 | if (integrated && !esr_disable) { | 1074 | void __cpuinit end_local_APIC_setup(void) |
1002 | /* !82489DX */ | 1075 | { |
1003 | maxlvt = lapic_get_maxlvt(); | 1076 | unsigned long value; |
1004 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | ||
1005 | apic_write(APIC_ESR, 0); | ||
1006 | oldvalue = apic_read(APIC_ESR); | ||
1007 | |||
1008 | /* enables sending errors */ | ||
1009 | value = ERROR_APIC_VECTOR; | ||
1010 | apic_write_around(APIC_LVTERR, value); | ||
1011 | /* | ||
1012 | * spec says clear errors after enabling vector. | ||
1013 | */ | ||
1014 | if (maxlvt > 3) | ||
1015 | apic_write(APIC_ESR, 0); | ||
1016 | value = apic_read(APIC_ESR); | ||
1017 | if (value != oldvalue) | ||
1018 | apic_printk(APIC_VERBOSE, "ESR value before enabling " | ||
1019 | "vector: 0x%08lx after: 0x%08lx\n", | ||
1020 | oldvalue, value); | ||
1021 | } else { | ||
1022 | if (esr_disable) | ||
1023 | /* | ||
1024 | * Something untraceable is creating bad interrupts on | ||
1025 | * secondary quads ... for the moment, just leave the | ||
1026 | * ESR disabled - we can't do anything useful with the | ||
1027 | * errors anyway - mbligh | ||
1028 | */ | ||
1029 | printk(KERN_INFO "Leaving ESR disabled.\n"); | ||
1030 | else | ||
1031 | printk(KERN_INFO "No ESR for 82489DX.\n"); | ||
1032 | } | ||
1033 | 1077 | ||
1078 | lapic_setup_esr(); | ||
1034 | /* Disable the local apic timer */ | 1079 | /* Disable the local apic timer */ |
1035 | value = apic_read(APIC_LVTT); | 1080 | value = apic_read(APIC_LVTT); |
1036 | value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | 1081 | value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); |
@@ -1147,7 +1192,7 @@ void __init init_apic_mappings(void) | |||
1147 | * default configuration (or the MP table is broken). | 1192 | * default configuration (or the MP table is broken). |
1148 | */ | 1193 | */ |
1149 | if (boot_cpu_physical_apicid == -1U) | 1194 | if (boot_cpu_physical_apicid == -1U) |
1150 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 1195 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); |
1151 | 1196 | ||
1152 | #ifdef CONFIG_X86_IO_APIC | 1197 | #ifdef CONFIG_X86_IO_APIC |
1153 | { | 1198 | { |
@@ -1185,6 +1230,9 @@ fake_ioapic_page: | |||
1185 | * This initializes the IO-APIC and APIC hardware if this is | 1230 | * This initializes the IO-APIC and APIC hardware if this is |
1186 | * a UP kernel. | 1231 | * a UP kernel. |
1187 | */ | 1232 | */ |
1233 | |||
1234 | int apic_version[MAX_APICS]; | ||
1235 | |||
1188 | int __init APIC_init_uniprocessor(void) | 1236 | int __init APIC_init_uniprocessor(void) |
1189 | { | 1237 | { |
1190 | if (enable_local_apic < 0) | 1238 | if (enable_local_apic < 0) |
@@ -1214,12 +1262,13 @@ int __init APIC_init_uniprocessor(void) | |||
1214 | * might be zero if read from MP tables. Get it from LAPIC. | 1262 | * might be zero if read from MP tables. Get it from LAPIC. |
1215 | */ | 1263 | */ |
1216 | #ifdef CONFIG_CRASH_DUMP | 1264 | #ifdef CONFIG_CRASH_DUMP |
1217 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 1265 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); |
1218 | #endif | 1266 | #endif |
1219 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | 1267 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); |
1220 | 1268 | ||
1221 | setup_local_APIC(); | 1269 | setup_local_APIC(); |
1222 | 1270 | ||
1271 | end_local_APIC_setup(); | ||
1223 | #ifdef CONFIG_X86_IO_APIC | 1272 | #ifdef CONFIG_X86_IO_APIC |
1224 | if (smp_found_config) | 1273 | if (smp_found_config) |
1225 | if (!skip_ioapic_setup && nr_ioapics) | 1274 | if (!skip_ioapic_setup && nr_ioapics) |
@@ -1288,6 +1337,29 @@ void smp_error_interrupt(struct pt_regs *regs) | |||
1288 | irq_exit(); | 1337 | irq_exit(); |
1289 | } | 1338 | } |
1290 | 1339 | ||
1340 | #ifdef CONFIG_SMP | ||
1341 | void __init smp_intr_init(void) | ||
1342 | { | ||
1343 | /* | ||
1344 | * IRQ0 must be given a fixed assignment and initialized, | ||
1345 | * because it's used before the IO-APIC is set up. | ||
1346 | */ | ||
1347 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); | ||
1348 | |||
1349 | /* | ||
1350 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | ||
1351 | * IPI, driven by wakeup. | ||
1352 | */ | ||
1353 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | ||
1354 | |||
1355 | /* IPI for invalidation */ | ||
1356 | set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | ||
1357 | |||
1358 | /* IPI for generic function call */ | ||
1359 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | ||
1360 | } | ||
1361 | #endif | ||
1362 | |||
1291 | /* | 1363 | /* |
1292 | * Initialize APIC interrupts | 1364 | * Initialize APIC interrupts |
1293 | */ | 1365 | */ |
@@ -1394,6 +1466,88 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
1394 | } | 1466 | } |
1395 | } | 1467 | } |
1396 | 1468 | ||
1469 | unsigned int __cpuinitdata maxcpus = NR_CPUS; | ||
1470 | |||
1471 | void __cpuinit generic_processor_info(int apicid, int version) | ||
1472 | { | ||
1473 | int cpu; | ||
1474 | cpumask_t tmp_map; | ||
1475 | physid_mask_t phys_cpu; | ||
1476 | |||
1477 | /* | ||
1478 | * Validate version | ||
1479 | */ | ||
1480 | if (version == 0x0) { | ||
1481 | printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " | ||
1482 | "fixing up to 0x10. (tell your hw vendor)\n", | ||
1483 | version); | ||
1484 | version = 0x10; | ||
1485 | } | ||
1486 | apic_version[apicid] = version; | ||
1487 | |||
1488 | phys_cpu = apicid_to_cpu_present(apicid); | ||
1489 | physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); | ||
1490 | |||
1491 | if (num_processors >= NR_CPUS) { | ||
1492 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | ||
1493 | " Processor ignored.\n", NR_CPUS); | ||
1494 | return; | ||
1495 | } | ||
1496 | |||
1497 | if (num_processors >= maxcpus) { | ||
1498 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | ||
1499 | " Processor ignored.\n", maxcpus); | ||
1500 | return; | ||
1501 | } | ||
1502 | |||
1503 | num_processors++; | ||
1504 | cpus_complement(tmp_map, cpu_present_map); | ||
1505 | cpu = first_cpu(tmp_map); | ||
1506 | |||
1507 | if (apicid == boot_cpu_physical_apicid) | ||
1508 | /* | ||
1509 | * x86_bios_cpu_apicid is required to have processors listed | ||
1510 | * in same order as logical cpu numbers. Hence the first | ||
1511 | * entry is BSP, and so on. | ||
1512 | */ | ||
1513 | cpu = 0; | ||
1514 | |||
1515 | /* | ||
1516 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y | ||
1517 | * but we need to work other dependencies like SMP_SUSPEND etc | ||
1518 | * before this can be done without some confusion. | ||
1519 | * if (CPU_HOTPLUG_ENABLED || num_processors > 8) | ||
1520 | * - Ashok Raj <ashok.raj@intel.com> | ||
1521 | */ | ||
1522 | if (num_processors > 8) { | ||
1523 | switch (boot_cpu_data.x86_vendor) { | ||
1524 | case X86_VENDOR_INTEL: | ||
1525 | if (!APIC_XAPIC(version)) { | ||
1526 | def_to_bigsmp = 0; | ||
1527 | break; | ||
1528 | } | ||
1529 | /* If P4 and above fall through */ | ||
1530 | case X86_VENDOR_AMD: | ||
1531 | def_to_bigsmp = 1; | ||
1532 | } | ||
1533 | } | ||
1534 | #ifdef CONFIG_SMP | ||
1535 | /* are we being called early in kernel startup? */ | ||
1536 | if (x86_cpu_to_apicid_early_ptr) { | ||
1537 | u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; | ||
1538 | u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; | ||
1539 | |||
1540 | cpu_to_apicid[cpu] = apicid; | ||
1541 | bios_cpu_apicid[cpu] = apicid; | ||
1542 | } else { | ||
1543 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | ||
1544 | per_cpu(x86_bios_cpu_apicid, cpu) = apicid; | ||
1545 | } | ||
1546 | #endif | ||
1547 | cpu_set(cpu, cpu_possible_map); | ||
1548 | cpu_set(cpu, cpu_present_map); | ||
1549 | } | ||
1550 | |||
1397 | /* | 1551 | /* |
1398 | * Power management | 1552 | * Power management |
1399 | */ | 1553 | */ |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index d8d03e09dea2..9e8e5c050c55 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -34,13 +34,15 @@ | |||
34 | #include <asm/mpspec.h> | 34 | #include <asm/mpspec.h> |
35 | #include <asm/hpet.h> | 35 | #include <asm/hpet.h> |
36 | #include <asm/pgalloc.h> | 36 | #include <asm/pgalloc.h> |
37 | #include <asm/mach_apic.h> | ||
38 | #include <asm/nmi.h> | 37 | #include <asm/nmi.h> |
39 | #include <asm/idle.h> | 38 | #include <asm/idle.h> |
40 | #include <asm/proto.h> | 39 | #include <asm/proto.h> |
41 | #include <asm/timex.h> | 40 | #include <asm/timex.h> |
42 | #include <asm/apic.h> | 41 | #include <asm/apic.h> |
43 | 42 | ||
43 | #include <mach_ipi.h> | ||
44 | #include <mach_apic.h> | ||
45 | |||
44 | int disable_apic_timer __cpuinitdata; | 46 | int disable_apic_timer __cpuinitdata; |
45 | static int apic_calibrate_pmtmr __initdata; | 47 | static int apic_calibrate_pmtmr __initdata; |
46 | int disable_apic; | 48 | int disable_apic; |
@@ -83,6 +85,12 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | |||
83 | 85 | ||
84 | static unsigned long apic_phys; | 86 | static unsigned long apic_phys; |
85 | 87 | ||
88 | unsigned long mp_lapic_addr; | ||
89 | |||
90 | DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; | ||
91 | EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | ||
92 | |||
93 | unsigned int __cpuinitdata maxcpus = NR_CPUS; | ||
86 | /* | 94 | /* |
87 | * Get the LAPIC version | 95 | * Get the LAPIC version |
88 | */ | 96 | */ |
@@ -431,7 +439,8 @@ void __cpuinit check_boot_apic_timer_broadcast(void) | |||
431 | lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; | 439 | lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; |
432 | 440 | ||
433 | local_irq_enable(); | 441 | local_irq_enable(); |
434 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id); | 442 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, |
443 | &boot_cpu_physical_apicid); | ||
435 | local_irq_disable(); | 444 | local_irq_disable(); |
436 | } | 445 | } |
437 | 446 | ||
@@ -640,10 +649,10 @@ int __init verify_local_APIC(void) | |||
640 | /* | 649 | /* |
641 | * The ID register is read/write in a real APIC. | 650 | * The ID register is read/write in a real APIC. |
642 | */ | 651 | */ |
643 | reg0 = apic_read(APIC_ID); | 652 | reg0 = read_apic_id(); |
644 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); | 653 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); |
645 | apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); | 654 | apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); |
646 | reg1 = apic_read(APIC_ID); | 655 | reg1 = read_apic_id(); |
647 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); | 656 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); |
648 | apic_write(APIC_ID, reg0); | 657 | apic_write(APIC_ID, reg0); |
649 | if (reg1 != (reg0 ^ APIC_ID_MASK)) | 658 | if (reg1 != (reg0 ^ APIC_ID_MASK)) |
@@ -728,6 +737,7 @@ void __cpuinit setup_local_APIC(void) | |||
728 | unsigned int value; | 737 | unsigned int value; |
729 | int i, j; | 738 | int i, j; |
730 | 739 | ||
740 | preempt_disable(); | ||
731 | value = apic_read(APIC_LVR); | 741 | value = apic_read(APIC_LVR); |
732 | 742 | ||
733 | BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f); | 743 | BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f); |
@@ -821,6 +831,7 @@ void __cpuinit setup_local_APIC(void) | |||
821 | else | 831 | else |
822 | value = APIC_DM_NMI | APIC_LVT_MASKED; | 832 | value = APIC_DM_NMI | APIC_LVT_MASKED; |
823 | apic_write(APIC_LVT1, value); | 833 | apic_write(APIC_LVT1, value); |
834 | preempt_enable(); | ||
824 | } | 835 | } |
825 | 836 | ||
826 | void __cpuinit lapic_setup_esr(void) | 837 | void __cpuinit lapic_setup_esr(void) |
@@ -857,10 +868,34 @@ static int __init detect_init_APIC(void) | |||
857 | } | 868 | } |
858 | 869 | ||
859 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | 870 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; |
860 | boot_cpu_id = 0; | 871 | boot_cpu_physical_apicid = 0; |
861 | return 0; | 872 | return 0; |
862 | } | 873 | } |
863 | 874 | ||
875 | void __init early_init_lapic_mapping(void) | ||
876 | { | ||
877 | unsigned long apic_phys; | ||
878 | |||
879 | /* | ||
880 | * If no local APIC can be found then go out | ||
881 | * : it means there is no mpatable and MADT | ||
882 | */ | ||
883 | if (!smp_found_config) | ||
884 | return; | ||
885 | |||
886 | apic_phys = mp_lapic_addr; | ||
887 | |||
888 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | ||
889 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | ||
890 | APIC_BASE, apic_phys); | ||
891 | |||
892 | /* | ||
893 | * Fetch the APIC ID of the BSP in case we have a | ||
894 | * default configuration (or the MP table is broken). | ||
895 | */ | ||
896 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | ||
897 | } | ||
898 | |||
864 | /** | 899 | /** |
865 | * init_apic_mappings - initialize APIC mappings | 900 | * init_apic_mappings - initialize APIC mappings |
866 | */ | 901 | */ |
@@ -881,16 +916,11 @@ void __init init_apic_mappings(void) | |||
881 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | 916 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", |
882 | APIC_BASE, apic_phys); | 917 | APIC_BASE, apic_phys); |
883 | 918 | ||
884 | /* Put local APIC into the resource map. */ | ||
885 | lapic_resource.start = apic_phys; | ||
886 | lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; | ||
887 | insert_resource(&iomem_resource, &lapic_resource); | ||
888 | |||
889 | /* | 919 | /* |
890 | * Fetch the APIC ID of the BSP in case we have a | 920 | * Fetch the APIC ID of the BSP in case we have a |
891 | * default configuration (or the MP table is broken). | 921 | * default configuration (or the MP table is broken). |
892 | */ | 922 | */ |
893 | boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); | 923 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); |
894 | } | 924 | } |
895 | 925 | ||
896 | /* | 926 | /* |
@@ -911,8 +941,8 @@ int __init APIC_init_uniprocessor(void) | |||
911 | 941 | ||
912 | verify_local_APIC(); | 942 | verify_local_APIC(); |
913 | 943 | ||
914 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); | 944 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); |
915 | apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id)); | 945 | apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid)); |
916 | 946 | ||
917 | setup_local_APIC(); | 947 | setup_local_APIC(); |
918 | 948 | ||
@@ -1029,6 +1059,52 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
1029 | apic_write(APIC_LVT1, value); | 1059 | apic_write(APIC_LVT1, value); |
1030 | } | 1060 | } |
1031 | 1061 | ||
1062 | void __cpuinit generic_processor_info(int apicid, int version) | ||
1063 | { | ||
1064 | int cpu; | ||
1065 | cpumask_t tmp_map; | ||
1066 | |||
1067 | if (num_processors >= NR_CPUS) { | ||
1068 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | ||
1069 | " Processor ignored.\n", NR_CPUS); | ||
1070 | return; | ||
1071 | } | ||
1072 | |||
1073 | if (num_processors >= maxcpus) { | ||
1074 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | ||
1075 | " Processor ignored.\n", maxcpus); | ||
1076 | return; | ||
1077 | } | ||
1078 | |||
1079 | num_processors++; | ||
1080 | cpus_complement(tmp_map, cpu_present_map); | ||
1081 | cpu = first_cpu(tmp_map); | ||
1082 | |||
1083 | physid_set(apicid, phys_cpu_present_map); | ||
1084 | if (apicid == boot_cpu_physical_apicid) { | ||
1085 | /* | ||
1086 | * x86_bios_cpu_apicid is required to have processors listed | ||
1087 | * in same order as logical cpu numbers. Hence the first | ||
1088 | * entry is BSP, and so on. | ||
1089 | */ | ||
1090 | cpu = 0; | ||
1091 | } | ||
1092 | /* are we being called early in kernel startup? */ | ||
1093 | if (x86_cpu_to_apicid_early_ptr) { | ||
1094 | u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; | ||
1095 | u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; | ||
1096 | |||
1097 | cpu_to_apicid[cpu] = apicid; | ||
1098 | bios_cpu_apicid[cpu] = apicid; | ||
1099 | } else { | ||
1100 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | ||
1101 | per_cpu(x86_bios_cpu_apicid, cpu) = apicid; | ||
1102 | } | ||
1103 | |||
1104 | cpu_set(cpu, cpu_possible_map); | ||
1105 | cpu_set(cpu, cpu_present_map); | ||
1106 | } | ||
1107 | |||
1032 | /* | 1108 | /* |
1033 | * Power management | 1109 | * Power management |
1034 | */ | 1110 | */ |
@@ -1065,7 +1141,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) | |||
1065 | 1141 | ||
1066 | maxlvt = lapic_get_maxlvt(); | 1142 | maxlvt = lapic_get_maxlvt(); |
1067 | 1143 | ||
1068 | apic_pm_state.apic_id = apic_read(APIC_ID); | 1144 | apic_pm_state.apic_id = read_apic_id(); |
1069 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); | 1145 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); |
1070 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); | 1146 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); |
1071 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); | 1147 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); |
@@ -1180,9 +1256,19 @@ __cpuinit int apic_is_clustered_box(void) | |||
1180 | { | 1256 | { |
1181 | int i, clusters, zeros; | 1257 | int i, clusters, zeros; |
1182 | unsigned id; | 1258 | unsigned id; |
1183 | u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; | 1259 | u16 *bios_cpu_apicid; |
1184 | DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); | 1260 | DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); |
1185 | 1261 | ||
1262 | /* | ||
1263 | * there is not this kind of box with AMD CPU yet. | ||
1264 | * Some AMD box with quadcore cpu and 8 sockets apicid | ||
1265 | * will be [4, 0x23] or [8, 0x27] could be thought to | ||
1266 | * vsmp box still need checking... | ||
1267 | */ | ||
1268 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) | ||
1269 | return 0; | ||
1270 | |||
1271 | bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; | ||
1186 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); | 1272 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); |
1187 | 1273 | ||
1188 | for (i = 0; i < NR_CPUS; i++) { | 1274 | for (i = 0; i < NR_CPUS; i++) { |
@@ -1219,6 +1305,12 @@ __cpuinit int apic_is_clustered_box(void) | |||
1219 | ++zeros; | 1305 | ++zeros; |
1220 | } | 1306 | } |
1221 | 1307 | ||
1308 | /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are | ||
1309 | * not guaranteed to be synced between boards | ||
1310 | */ | ||
1311 | if (is_vsmp_box() && clusters > 1) | ||
1312 | return 1; | ||
1313 | |||
1222 | /* | 1314 | /* |
1223 | * If clusters > 2, then should be multi-chassis. | 1315 | * If clusters > 2, then should be multi-chassis. |
1224 | * May have to revisit this when multi-core + hyperthreaded CPUs come | 1316 | * May have to revisit this when multi-core + hyperthreaded CPUs come |
@@ -1290,3 +1382,21 @@ static __init int setup_apicpmtimer(char *s) | |||
1290 | } | 1382 | } |
1291 | __setup("apicpmtimer", setup_apicpmtimer); | 1383 | __setup("apicpmtimer", setup_apicpmtimer); |
1292 | 1384 | ||
1385 | static int __init lapic_insert_resource(void) | ||
1386 | { | ||
1387 | if (!apic_phys) | ||
1388 | return -1; | ||
1389 | |||
1390 | /* Put local APIC into the resource map. */ | ||
1391 | lapic_resource.start = apic_phys; | ||
1392 | lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; | ||
1393 | insert_resource(&iomem_resource, &lapic_resource); | ||
1394 | |||
1395 | return 0; | ||
1396 | } | ||
1397 | |||
1398 | /* | ||
1399 | * need call insert after e820_reserve_resources() | ||
1400 | * that is using request_resource | ||
1401 | */ | ||
1402 | late_initcall(lapic_insert_resource); | ||
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index d4438ef296d8..f0030a0999c7 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -2217,7 +2217,6 @@ static struct dmi_system_id __initdata apm_dmi_table[] = { | |||
2217 | */ | 2217 | */ |
2218 | static int __init apm_init(void) | 2218 | static int __init apm_init(void) |
2219 | { | 2219 | { |
2220 | struct proc_dir_entry *apm_proc; | ||
2221 | struct desc_struct *gdt; | 2220 | struct desc_struct *gdt; |
2222 | int err; | 2221 | int err; |
2223 | 2222 | ||
@@ -2322,9 +2321,7 @@ static int __init apm_init(void) | |||
2322 | set_base(gdt[APM_DS >> 3], | 2321 | set_base(gdt[APM_DS >> 3], |
2323 | __va((unsigned long)apm_info.bios.dseg << 4)); | 2322 | __va((unsigned long)apm_info.bios.dseg << 4)); |
2324 | 2323 | ||
2325 | apm_proc = create_proc_entry("apm", 0, NULL); | 2324 | proc_create("apm", 0, NULL, &apm_file_ops); |
2326 | if (apm_proc) | ||
2327 | apm_proc->proc_fops = &apm_file_ops; | ||
2328 | 2325 | ||
2329 | kapmd_task = kthread_create(apm, NULL, "kapmd"); | 2326 | kapmd_task = kthread_create(apm, NULL, "kapmd"); |
2330 | if (IS_ERR(kapmd_task)) { | 2327 | if (IS_ERR(kapmd_task)) { |
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 8ea040124f7d..670c3c311289 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/personality.h> | 10 | #include <linux/personality.h> |
11 | #include <linux/suspend.h> | 11 | #include <linux/suspend.h> |
12 | #include <asm/ucontext.h> | 12 | #include <asm/ucontext.h> |
13 | #include "sigframe_32.h" | 13 | #include "sigframe.h" |
14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
15 | #include <asm/fixmap.h> | 15 | #include <asm/fixmap.h> |
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
diff --git a/arch/x86/kernel/bugs_64.c b/arch/x86/kernel/bugs_64.c index 8f520f93ffd4..9a3ed0649d4e 100644 --- a/arch/x86/kernel/bugs_64.c +++ b/arch/x86/kernel/bugs_64.c | |||
@@ -9,13 +9,25 @@ | |||
9 | #include <asm/bugs.h> | 9 | #include <asm/bugs.h> |
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/mtrr.h> | 11 | #include <asm/mtrr.h> |
12 | #include <asm/cacheflush.h> | ||
12 | 13 | ||
13 | void __init check_bugs(void) | 14 | void __init check_bugs(void) |
14 | { | 15 | { |
15 | identify_cpu(&boot_cpu_data); | 16 | identify_boot_cpu(); |
16 | #if !defined(CONFIG_SMP) | 17 | #if !defined(CONFIG_SMP) |
17 | printk("CPU: "); | 18 | printk("CPU: "); |
18 | print_cpu_info(&boot_cpu_data); | 19 | print_cpu_info(&boot_cpu_data); |
19 | #endif | 20 | #endif |
20 | alternative_instructions(); | 21 | alternative_instructions(); |
22 | |||
23 | /* | ||
24 | * Make sure the first 2MB area is not mapped by huge pages | ||
25 | * There are typically fixed size MTRRs in there and overlapping | ||
26 | * MTRRs into large pages causes slow downs. | ||
27 | * | ||
28 | * Right now we don't do that with gbpages because there seems | ||
29 | * very little benefit for that case. | ||
30 | */ | ||
31 | if (!direct_gbpages) | ||
32 | set_memory_4k((unsigned long)__va(0), 1); | ||
21 | } | 33 | } |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index a0c4d7c5dbd7..ee7c45235e54 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -3,9 +3,9 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
6 | obj-y += feature_names.o | 6 | obj-y += proc.o feature_names.o |
7 | 7 | ||
8 | obj-$(CONFIG_X86_32) += common.o proc.o bugs.o | 8 | obj-$(CONFIG_X86_32) += common.o bugs.o |
9 | obj-$(CONFIG_X86_32) += amd.o | 9 | obj-$(CONFIG_X86_32) += amd.o |
10 | obj-$(CONFIG_X86_32) += cyrix.o | 10 | obj-$(CONFIG_X86_32) += cyrix.o |
11 | obj-$(CONFIG_X86_32) += centaur.o | 11 | obj-$(CONFIG_X86_32) += centaur.o |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 693e353999cd..0173065dc3b7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -4,8 +4,8 @@ | |||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/apic.h> | 6 | #include <asm/apic.h> |
7 | #include <asm/mach_apic.h> | ||
8 | 7 | ||
8 | #include <mach_apic.h> | ||
9 | #include "cpu.h" | 9 | #include "cpu.h" |
10 | 10 | ||
11 | /* | 11 | /* |
@@ -20,7 +20,7 @@ | |||
20 | * the chip setting when fixing the bug but they also tweaked some | 20 | * the chip setting when fixing the bug but they also tweaked some |
21 | * performance at the same time.. | 21 | * performance at the same time.. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | extern void vide(void); | 24 | extern void vide(void); |
25 | __asm__(".align 4\nvide: ret"); | 25 | __asm__(".align 4\nvide: ret"); |
26 | 26 | ||
@@ -63,12 +63,12 @@ static __cpuinit int amd_apic_timer_broken(void) | |||
63 | 63 | ||
64 | int force_mwait __cpuinitdata; | 64 | int force_mwait __cpuinitdata; |
65 | 65 | ||
66 | void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 66 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
67 | { | 67 | { |
68 | if (cpuid_eax(0x80000000) >= 0x80000007) { | 68 | if (cpuid_eax(0x80000000) >= 0x80000007) { |
69 | c->x86_power = cpuid_edx(0x80000007); | 69 | c->x86_power = cpuid_edx(0x80000007); |
70 | if (c->x86_power & (1<<8)) | 70 | if (c->x86_power & (1<<8)) |
71 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); | 71 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
72 | } | 72 | } |
73 | } | 73 | } |
74 | 74 | ||
@@ -81,7 +81,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
81 | #ifdef CONFIG_SMP | 81 | #ifdef CONFIG_SMP |
82 | unsigned long long value; | 82 | unsigned long long value; |
83 | 83 | ||
84 | /* Disable TLB flush filter by setting HWCR.FFDIS on K8 | 84 | /* |
85 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
85 | * bit 6 of msr C001_0015 | 86 | * bit 6 of msr C001_0015 |
86 | * | 87 | * |
87 | * Errata 63 for SH-B3 steppings | 88 | * Errata 63 for SH-B3 steppings |
@@ -102,15 +103,16 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
102 | * no bus pipeline) | 103 | * no bus pipeline) |
103 | */ | 104 | */ |
104 | 105 | ||
105 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 106 | /* |
106 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 107 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
107 | clear_bit(0*32+31, c->x86_capability); | 108 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
108 | 109 | */ | |
110 | clear_cpu_cap(c, 0*32+31); | ||
111 | |||
109 | r = get_model_name(c); | 112 | r = get_model_name(c); |
110 | 113 | ||
111 | switch(c->x86) | 114 | switch (c->x86) { |
112 | { | 115 | case 4: |
113 | case 4: | ||
114 | /* | 116 | /* |
115 | * General Systems BIOSen alias the cpu frequency registers | 117 | * General Systems BIOSen alias the cpu frequency registers |
116 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | 118 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux |
@@ -120,61 +122,60 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
120 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | 122 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ |
121 | #define CBAR_ENB (0x80000000) | 123 | #define CBAR_ENB (0x80000000) |
122 | #define CBAR_KEY (0X000000CB) | 124 | #define CBAR_KEY (0X000000CB) |
123 | if (c->x86_model==9 || c->x86_model == 10) { | 125 | if (c->x86_model == 9 || c->x86_model == 10) { |
124 | if (inl (CBAR) & CBAR_ENB) | 126 | if (inl (CBAR) & CBAR_ENB) |
125 | outl (0 | CBAR_KEY, CBAR); | 127 | outl (0 | CBAR_KEY, CBAR); |
126 | } | 128 | } |
127 | break; | 129 | break; |
128 | case 5: | 130 | case 5: |
129 | if( c->x86_model < 6 ) | 131 | if (c->x86_model < 6) { |
130 | { | ||
131 | /* Based on AMD doc 20734R - June 2000 */ | 132 | /* Based on AMD doc 20734R - June 2000 */ |
132 | if ( c->x86_model == 0 ) { | 133 | if (c->x86_model == 0) { |
133 | clear_bit(X86_FEATURE_APIC, c->x86_capability); | 134 | clear_cpu_cap(c, X86_FEATURE_APIC); |
134 | set_bit(X86_FEATURE_PGE, c->x86_capability); | 135 | set_cpu_cap(c, X86_FEATURE_PGE); |
135 | } | 136 | } |
136 | break; | 137 | break; |
137 | } | 138 | } |
138 | 139 | ||
139 | if ( c->x86_model == 6 && c->x86_mask == 1 ) { | 140 | if (c->x86_model == 6 && c->x86_mask == 1) { |
140 | const int K6_BUG_LOOP = 1000000; | 141 | const int K6_BUG_LOOP = 1000000; |
141 | int n; | 142 | int n; |
142 | void (*f_vide)(void); | 143 | void (*f_vide)(void); |
143 | unsigned long d, d2; | 144 | unsigned long d, d2; |
144 | 145 | ||
145 | printk(KERN_INFO "AMD K6 stepping B detected - "); | 146 | printk(KERN_INFO "AMD K6 stepping B detected - "); |
146 | 147 | ||
147 | /* | 148 | /* |
148 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | 149 | * It looks like AMD fixed the 2.6.2 bug and improved indirect |
149 | * calls at the same time. | 150 | * calls at the same time. |
150 | */ | 151 | */ |
151 | 152 | ||
152 | n = K6_BUG_LOOP; | 153 | n = K6_BUG_LOOP; |
153 | f_vide = vide; | 154 | f_vide = vide; |
154 | rdtscl(d); | 155 | rdtscl(d); |
155 | while (n--) | 156 | while (n--) |
156 | f_vide(); | 157 | f_vide(); |
157 | rdtscl(d2); | 158 | rdtscl(d2); |
158 | d = d2-d; | 159 | d = d2-d; |
159 | 160 | ||
160 | if (d > 20*K6_BUG_LOOP) | 161 | if (d > 20*K6_BUG_LOOP) |
161 | printk("system stability may be impaired when more than 32 MB are used.\n"); | 162 | printk("system stability may be impaired when more than 32 MB are used.\n"); |
162 | else | 163 | else |
163 | printk("probably OK (after B9730xxxx).\n"); | 164 | printk("probably OK (after B9730xxxx).\n"); |
164 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | 165 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
165 | } | 166 | } |
166 | 167 | ||
167 | /* K6 with old style WHCR */ | 168 | /* K6 with old style WHCR */ |
168 | if (c->x86_model < 8 || | 169 | if (c->x86_model < 8 || |
169 | (c->x86_model== 8 && c->x86_mask < 8)) { | 170 | (c->x86_model == 8 && c->x86_mask < 8)) { |
170 | /* We can only write allocate on the low 508Mb */ | 171 | /* We can only write allocate on the low 508Mb */ |
171 | if(mbytes>508) | 172 | if (mbytes > 508) |
172 | mbytes=508; | 173 | mbytes = 508; |
173 | 174 | ||
174 | rdmsr(MSR_K6_WHCR, l, h); | 175 | rdmsr(MSR_K6_WHCR, l, h); |
175 | if ((l&0x0000FFFF)==0) { | 176 | if ((l&0x0000FFFF) == 0) { |
176 | unsigned long flags; | 177 | unsigned long flags; |
177 | l=(1<<0)|((mbytes/4)<<1); | 178 | l = (1<<0)|((mbytes/4)<<1); |
178 | local_irq_save(flags); | 179 | local_irq_save(flags); |
179 | wbinvd(); | 180 | wbinvd(); |
180 | wrmsr(MSR_K6_WHCR, l, h); | 181 | wrmsr(MSR_K6_WHCR, l, h); |
@@ -185,17 +186,17 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
185 | break; | 186 | break; |
186 | } | 187 | } |
187 | 188 | ||
188 | if ((c->x86_model == 8 && c->x86_mask >7) || | 189 | if ((c->x86_model == 8 && c->x86_mask > 7) || |
189 | c->x86_model == 9 || c->x86_model == 13) { | 190 | c->x86_model == 9 || c->x86_model == 13) { |
190 | /* The more serious chips .. */ | 191 | /* The more serious chips .. */ |
191 | 192 | ||
192 | if(mbytes>4092) | 193 | if (mbytes > 4092) |
193 | mbytes=4092; | 194 | mbytes = 4092; |
194 | 195 | ||
195 | rdmsr(MSR_K6_WHCR, l, h); | 196 | rdmsr(MSR_K6_WHCR, l, h); |
196 | if ((l&0xFFFF0000)==0) { | 197 | if ((l&0xFFFF0000) == 0) { |
197 | unsigned long flags; | 198 | unsigned long flags; |
198 | l=((mbytes>>2)<<22)|(1<<16); | 199 | l = ((mbytes>>2)<<22)|(1<<16); |
199 | local_irq_save(flags); | 200 | local_irq_save(flags); |
200 | wbinvd(); | 201 | wbinvd(); |
201 | wrmsr(MSR_K6_WHCR, l, h); | 202 | wrmsr(MSR_K6_WHCR, l, h); |
@@ -207,7 +208,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
207 | /* Set MTRR capability flag if appropriate */ | 208 | /* Set MTRR capability flag if appropriate */ |
208 | if (c->x86_model == 13 || c->x86_model == 9 || | 209 | if (c->x86_model == 13 || c->x86_model == 9 || |
209 | (c->x86_model == 8 && c->x86_mask >= 8)) | 210 | (c->x86_model == 8 && c->x86_mask >= 8)) |
210 | set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); | 211 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); |
211 | break; | 212 | break; |
212 | } | 213 | } |
213 | 214 | ||
@@ -217,10 +218,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
217 | break; | 218 | break; |
218 | } | 219 | } |
219 | break; | 220 | break; |
220 | case 6: /* An Athlon/Duron */ | 221 | case 6: /* An Athlon/Duron */ |
221 | 222 | ||
222 | /* Bit 15 of Athlon specific MSR 15, needs to be 0 | 223 | /* |
223 | * to enable SSE on Palomino/Morgan/Barton CPU's. | 224 | * Bit 15 of Athlon specific MSR 15, needs to be 0 |
225 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
224 | * If the BIOS didn't enable it already, enable it here. | 226 | * If the BIOS didn't enable it already, enable it here. |
225 | */ | 227 | */ |
226 | if (c->x86_model >= 6 && c->x86_model <= 10) { | 228 | if (c->x86_model >= 6 && c->x86_model <= 10) { |
@@ -229,15 +231,16 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
229 | rdmsr(MSR_K7_HWCR, l, h); | 231 | rdmsr(MSR_K7_HWCR, l, h); |
230 | l &= ~0x00008000; | 232 | l &= ~0x00008000; |
231 | wrmsr(MSR_K7_HWCR, l, h); | 233 | wrmsr(MSR_K7_HWCR, l, h); |
232 | set_bit(X86_FEATURE_XMM, c->x86_capability); | 234 | set_cpu_cap(c, X86_FEATURE_XMM); |
233 | } | 235 | } |
234 | } | 236 | } |
235 | 237 | ||
236 | /* It's been determined by AMD that Athlons since model 8 stepping 1 | 238 | /* |
239 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
237 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | 240 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx |
238 | * As per AMD technical note 27212 0.2 | 241 | * As per AMD technical note 27212 0.2 |
239 | */ | 242 | */ |
240 | if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { | 243 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
241 | rdmsr(MSR_K7_CLK_CTL, l, h); | 244 | rdmsr(MSR_K7_CLK_CTL, l, h); |
242 | if ((l & 0xfff00000) != 0x20000000) { | 245 | if ((l & 0xfff00000) != 0x20000000) { |
243 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | 246 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, |
@@ -253,20 +256,19 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
253 | /* Use K8 tuning for Fam10h and Fam11h */ | 256 | /* Use K8 tuning for Fam10h and Fam11h */ |
254 | case 0x10: | 257 | case 0x10: |
255 | case 0x11: | 258 | case 0x11: |
256 | set_bit(X86_FEATURE_K8, c->x86_capability); | 259 | set_cpu_cap(c, X86_FEATURE_K8); |
257 | break; | 260 | break; |
258 | case 6: | 261 | case 6: |
259 | set_bit(X86_FEATURE_K7, c->x86_capability); | 262 | set_cpu_cap(c, X86_FEATURE_K7); |
260 | break; | 263 | break; |
261 | } | 264 | } |
262 | if (c->x86 >= 6) | 265 | if (c->x86 >= 6) |
263 | set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability); | 266 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
264 | 267 | ||
265 | display_cacheinfo(c); | 268 | display_cacheinfo(c); |
266 | 269 | ||
267 | if (cpuid_eax(0x80000000) >= 0x80000008) { | 270 | if (cpuid_eax(0x80000000) >= 0x80000008) |
268 | c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; | 271 | c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; |
269 | } | ||
270 | 272 | ||
271 | #ifdef CONFIG_X86_HT | 273 | #ifdef CONFIG_X86_HT |
272 | /* | 274 | /* |
@@ -302,20 +304,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
302 | 304 | ||
303 | /* K6s reports MCEs but don't actually have all the MSRs */ | 305 | /* K6s reports MCEs but don't actually have all the MSRs */ |
304 | if (c->x86 < 6) | 306 | if (c->x86 < 6) |
305 | clear_bit(X86_FEATURE_MCE, c->x86_capability); | 307 | clear_cpu_cap(c, X86_FEATURE_MCE); |
306 | 308 | ||
307 | if (cpu_has_xmm2) | 309 | if (cpu_has_xmm2) |
308 | set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); | 310 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
309 | } | 311 | } |
310 | 312 | ||
311 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 313 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
312 | { | 314 | { |
313 | /* AMD errata T13 (order #21922) */ | 315 | /* AMD errata T13 (order #21922) */ |
314 | if ((c->x86 == 6)) { | 316 | if ((c->x86 == 6)) { |
315 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | 317 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ |
316 | size = 64; | 318 | size = 64; |
317 | if (c->x86_model == 4 && | 319 | if (c->x86_model == 4 && |
318 | (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */ | 320 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ |
319 | size = 256; | 321 | size = 256; |
320 | } | 322 | } |
321 | return size; | 323 | return size; |
@@ -323,19 +325,20 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned in | |||
323 | 325 | ||
324 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | 326 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { |
325 | .c_vendor = "AMD", | 327 | .c_vendor = "AMD", |
326 | .c_ident = { "AuthenticAMD" }, | 328 | .c_ident = { "AuthenticAMD" }, |
327 | .c_models = { | 329 | .c_models = { |
328 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | 330 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = |
329 | { | 331 | { |
330 | [3] = "486 DX/2", | 332 | [3] = "486 DX/2", |
331 | [7] = "486 DX/2-WB", | 333 | [7] = "486 DX/2-WB", |
332 | [8] = "486 DX/4", | 334 | [8] = "486 DX/4", |
333 | [9] = "486 DX/4-WB", | 335 | [9] = "486 DX/4-WB", |
334 | [14] = "Am5x86-WT", | 336 | [14] = "Am5x86-WT", |
335 | [15] = "Am5x86-WB" | 337 | [15] = "Am5x86-WB" |
336 | } | 338 | } |
337 | }, | 339 | }, |
338 | }, | 340 | }, |
341 | .c_early_init = early_init_amd, | ||
339 | .c_init = init_amd, | 342 | .c_init = init_amd, |
340 | .c_size_cache = amd_size_cache, | 343 | .c_size_cache = amd_size_cache, |
341 | }; | 344 | }; |
@@ -345,3 +348,5 @@ int __init amd_init_cpu(void) | |||
345 | cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; | 348 | cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; |
346 | return 0; | 349 | return 0; |
347 | } | 350 | } |
351 | |||
352 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | ||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 9681fa15ddf0..e0f45edd6a55 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -1,31 +1,34 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/init.h> | 2 | #include <linux/init.h> |
3 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
4 | |||
4 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
5 | #include <asm/msr.h> | 6 | #include <asm/msr.h> |
6 | #include <asm/e820.h> | 7 | #include <asm/e820.h> |
7 | #include <asm/mtrr.h> | 8 | #include <asm/mtrr.h> |
9 | |||
8 | #include "cpu.h" | 10 | #include "cpu.h" |
9 | 11 | ||
10 | #ifdef CONFIG_X86_OOSTORE | 12 | #ifdef CONFIG_X86_OOSTORE |
11 | 13 | ||
12 | static u32 __cpuinit power2(u32 x) | 14 | static u32 __cpuinit power2(u32 x) |
13 | { | 15 | { |
14 | u32 s=1; | 16 | u32 s = 1; |
15 | while(s<=x) | 17 | |
16 | s<<=1; | 18 | while (s <= x) |
17 | return s>>=1; | 19 | s <<= 1; |
20 | |||
21 | return s >>= 1; | ||
18 | } | 22 | } |
19 | 23 | ||
20 | 24 | ||
21 | /* | 25 | /* |
22 | * Set up an actual MCR | 26 | * Set up an actual MCR |
23 | */ | 27 | */ |
24 | |||
25 | static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | 28 | static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) |
26 | { | 29 | { |
27 | u32 lo, hi; | 30 | u32 lo, hi; |
28 | 31 | ||
29 | hi = base & ~0xFFF; | 32 | hi = base & ~0xFFF; |
30 | lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ | 33 | lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ |
31 | lo &= ~0xFFF; /* Remove the ctrl value bits */ | 34 | lo &= ~0xFFF; /* Remove the ctrl value bits */ |
@@ -35,30 +38,28 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | |||
35 | } | 38 | } |
36 | 39 | ||
37 | /* | 40 | /* |
38 | * Figure what we can cover with MCR's | 41 | * Figure what we can cover with MCR's |
39 | * | 42 | * |
40 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | 43 | * Shortcut: We know you can't put 4Gig of RAM on a winchip |
41 | */ | 44 | */ |
42 | 45 | static u32 __cpuinit ramtop(void) | |
43 | static u32 __cpuinit ramtop(void) /* 16388 */ | ||
44 | { | 46 | { |
45 | int i; | ||
46 | u32 top = 0; | ||
47 | u32 clip = 0xFFFFFFFFUL; | 47 | u32 clip = 0xFFFFFFFFUL; |
48 | 48 | u32 top = 0; | |
49 | int i; | ||
50 | |||
49 | for (i = 0; i < e820.nr_map; i++) { | 51 | for (i = 0; i < e820.nr_map; i++) { |
50 | unsigned long start, end; | 52 | unsigned long start, end; |
51 | 53 | ||
52 | if (e820.map[i].addr > 0xFFFFFFFFUL) | 54 | if (e820.map[i].addr > 0xFFFFFFFFUL) |
53 | continue; | 55 | continue; |
54 | /* | 56 | /* |
55 | * Don't MCR over reserved space. Ignore the ISA hole | 57 | * Don't MCR over reserved space. Ignore the ISA hole |
56 | * we frob around that catastrophe already | 58 | * we frob around that catastrophe already |
57 | */ | 59 | */ |
58 | 60 | if (e820.map[i].type == E820_RESERVED) { | |
59 | if (e820.map[i].type == E820_RESERVED) | 61 | if (e820.map[i].addr >= 0x100000UL && |
60 | { | 62 | e820.map[i].addr < clip) |
61 | if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip) | ||
62 | clip = e820.map[i].addr; | 63 | clip = e820.map[i].addr; |
63 | continue; | 64 | continue; |
64 | } | 65 | } |
@@ -69,28 +70,27 @@ static u32 __cpuinit ramtop(void) /* 16388 */ | |||
69 | if (end > top) | 70 | if (end > top) |
70 | top = end; | 71 | top = end; |
71 | } | 72 | } |
72 | /* Everything below 'top' should be RAM except for the ISA hole. | 73 | /* |
73 | Because of the limited MCR's we want to map NV/ACPI into our | 74 | * Everything below 'top' should be RAM except for the ISA hole. |
74 | MCR range for gunk in RAM | 75 | * Because of the limited MCR's we want to map NV/ACPI into our |
75 | 76 | * MCR range for gunk in RAM | |
76 | Clip might cause us to MCR insufficient RAM but that is an | 77 | * |
77 | acceptable failure mode and should only bite obscure boxes with | 78 | * Clip might cause us to MCR insufficient RAM but that is an |
78 | a VESA hole at 15Mb | 79 | * acceptable failure mode and should only bite obscure boxes with |
79 | 80 | * a VESA hole at 15Mb | |
80 | The second case Clip sometimes kicks in is when the EBDA is marked | 81 | * |
81 | as reserved. Again we fail safe with reasonable results | 82 | * The second case Clip sometimes kicks in is when the EBDA is marked |
82 | */ | 83 | * as reserved. Again we fail safe with reasonable results |
83 | 84 | */ | |
84 | if(top>clip) | 85 | if (top > clip) |
85 | top=clip; | 86 | top = clip; |
86 | 87 | ||
87 | return top; | 88 | return top; |
88 | } | 89 | } |
89 | 90 | ||
90 | /* | 91 | /* |
91 | * Compute a set of MCR's to give maximum coverage | 92 | * Compute a set of MCR's to give maximum coverage |
92 | */ | 93 | */ |
93 | |||
94 | static int __cpuinit centaur_mcr_compute(int nr, int key) | 94 | static int __cpuinit centaur_mcr_compute(int nr, int key) |
95 | { | 95 | { |
96 | u32 mem = ramtop(); | 96 | u32 mem = ramtop(); |
@@ -99,141 +99,131 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) | |||
99 | u32 top = root; | 99 | u32 top = root; |
100 | u32 floor = 0; | 100 | u32 floor = 0; |
101 | int ct = 0; | 101 | int ct = 0; |
102 | 102 | ||
103 | while(ct<nr) | 103 | while (ct < nr) { |
104 | { | ||
105 | u32 fspace = 0; | 104 | u32 fspace = 0; |
105 | u32 high; | ||
106 | u32 low; | ||
106 | 107 | ||
107 | /* | 108 | /* |
108 | * Find the largest block we will fill going upwards | 109 | * Find the largest block we will fill going upwards |
109 | */ | 110 | */ |
110 | 111 | high = power2(mem-top); | |
111 | u32 high = power2(mem-top); | ||
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Find the largest block we will fill going downwards | 114 | * Find the largest block we will fill going downwards |
115 | */ | 115 | */ |
116 | 116 | low = base/2; | |
117 | u32 low = base/2; | ||
118 | 117 | ||
119 | /* | 118 | /* |
120 | * Don't fill below 1Mb going downwards as there | 119 | * Don't fill below 1Mb going downwards as there |
121 | * is an ISA hole in the way. | 120 | * is an ISA hole in the way. |
122 | */ | 121 | */ |
123 | 122 | if (base <= 1024*1024) | |
124 | if(base <= 1024*1024) | ||
125 | low = 0; | 123 | low = 0; |
126 | 124 | ||
127 | /* | 125 | /* |
128 | * See how much space we could cover by filling below | 126 | * See how much space we could cover by filling below |
129 | * the ISA hole | 127 | * the ISA hole |
130 | */ | 128 | */ |
131 | 129 | ||
132 | if(floor == 0) | 130 | if (floor == 0) |
133 | fspace = 512*1024; | 131 | fspace = 512*1024; |
134 | else if(floor ==512*1024) | 132 | else if (floor == 512*1024) |
135 | fspace = 128*1024; | 133 | fspace = 128*1024; |
136 | 134 | ||
137 | /* And forget ROM space */ | 135 | /* And forget ROM space */ |
138 | 136 | ||
139 | /* | 137 | /* |
140 | * Now install the largest coverage we get | 138 | * Now install the largest coverage we get |
141 | */ | 139 | */ |
142 | 140 | if (fspace > high && fspace > low) { | |
143 | if(fspace > high && fspace > low) | ||
144 | { | ||
145 | centaur_mcr_insert(ct, floor, fspace, key); | 141 | centaur_mcr_insert(ct, floor, fspace, key); |
146 | floor += fspace; | 142 | floor += fspace; |
147 | } | 143 | } else if (high > low) { |
148 | else if(high > low) | ||
149 | { | ||
150 | centaur_mcr_insert(ct, top, high, key); | 144 | centaur_mcr_insert(ct, top, high, key); |
151 | top += high; | 145 | top += high; |
152 | } | 146 | } else if (low > 0) { |
153 | else if(low > 0) | ||
154 | { | ||
155 | base -= low; | 147 | base -= low; |
156 | centaur_mcr_insert(ct, base, low, key); | 148 | centaur_mcr_insert(ct, base, low, key); |
157 | } | 149 | } else |
158 | else break; | 150 | break; |
159 | ct++; | 151 | ct++; |
160 | } | 152 | } |
161 | /* | 153 | /* |
162 | * We loaded ct values. We now need to set the mask. The caller | 154 | * We loaded ct values. We now need to set the mask. The caller |
163 | * must do this bit. | 155 | * must do this bit. |
164 | */ | 156 | */ |
165 | |||
166 | return ct; | 157 | return ct; |
167 | } | 158 | } |
168 | 159 | ||
169 | static void __cpuinit centaur_create_optimal_mcr(void) | 160 | static void __cpuinit centaur_create_optimal_mcr(void) |
170 | { | 161 | { |
162 | int used; | ||
171 | int i; | 163 | int i; |
164 | |||
172 | /* | 165 | /* |
173 | * Allocate up to 6 mcrs to mark as much of ram as possible | 166 | * Allocate up to 6 mcrs to mark as much of ram as possible |
174 | * as write combining and weak write ordered. | 167 | * as write combining and weak write ordered. |
175 | * | 168 | * |
176 | * To experiment with: Linux never uses stack operations for | 169 | * To experiment with: Linux never uses stack operations for |
177 | * mmio spaces so we could globally enable stack operation wc | 170 | * mmio spaces so we could globally enable stack operation wc |
178 | * | 171 | * |
179 | * Load the registers with type 31 - full write combining, all | 172 | * Load the registers with type 31 - full write combining, all |
180 | * writes weakly ordered. | 173 | * writes weakly ordered. |
181 | */ | 174 | */ |
182 | int used = centaur_mcr_compute(6, 31); | 175 | used = centaur_mcr_compute(6, 31); |
183 | 176 | ||
184 | /* | 177 | /* |
185 | * Wipe unused MCRs | 178 | * Wipe unused MCRs |
186 | */ | 179 | */ |
187 | 180 | for (i = used; i < 8; i++) | |
188 | for(i=used;i<8;i++) | ||
189 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | 181 | wrmsr(MSR_IDT_MCR0+i, 0, 0); |
190 | } | 182 | } |
191 | 183 | ||
192 | static void __cpuinit winchip2_create_optimal_mcr(void) | 184 | static void __cpuinit winchip2_create_optimal_mcr(void) |
193 | { | 185 | { |
194 | u32 lo, hi; | 186 | u32 lo, hi; |
187 | int used; | ||
195 | int i; | 188 | int i; |
196 | 189 | ||
197 | /* | 190 | /* |
198 | * Allocate up to 6 mcrs to mark as much of ram as possible | 191 | * Allocate up to 6 mcrs to mark as much of ram as possible |
199 | * as write combining, weak store ordered. | 192 | * as write combining, weak store ordered. |
200 | * | 193 | * |
201 | * Load the registers with type 25 | 194 | * Load the registers with type 25 |
202 | * 8 - weak write ordering | 195 | * 8 - weak write ordering |
203 | * 16 - weak read ordering | 196 | * 16 - weak read ordering |
204 | * 1 - write combining | 197 | * 1 - write combining |
205 | */ | 198 | */ |
199 | used = centaur_mcr_compute(6, 25); | ||
206 | 200 | ||
207 | int used = centaur_mcr_compute(6, 25); | ||
208 | |||
209 | /* | 201 | /* |
210 | * Mark the registers we are using. | 202 | * Mark the registers we are using. |
211 | */ | 203 | */ |
212 | |||
213 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 204 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
214 | for(i=0;i<used;i++) | 205 | for (i = 0; i < used; i++) |
215 | lo|=1<<(9+i); | 206 | lo |= 1<<(9+i); |
216 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 207 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
217 | 208 | ||
218 | /* | 209 | /* |
219 | * Wipe unused MCRs | 210 | * Wipe unused MCRs |
220 | */ | 211 | */ |
221 | 212 | ||
222 | for(i=used;i<8;i++) | 213 | for (i = used; i < 8; i++) |
223 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | 214 | wrmsr(MSR_IDT_MCR0+i, 0, 0); |
224 | } | 215 | } |
225 | 216 | ||
226 | /* | 217 | /* |
227 | * Handle the MCR key on the Winchip 2. | 218 | * Handle the MCR key on the Winchip 2. |
228 | */ | 219 | */ |
229 | |||
230 | static void __cpuinit winchip2_unprotect_mcr(void) | 220 | static void __cpuinit winchip2_unprotect_mcr(void) |
231 | { | 221 | { |
232 | u32 lo, hi; | 222 | u32 lo, hi; |
233 | u32 key; | 223 | u32 key; |
234 | 224 | ||
235 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 225 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
236 | lo&=~0x1C0; /* blank bits 8-6 */ | 226 | lo &= ~0x1C0; /* blank bits 8-6 */ |
237 | key = (lo>>17) & 7; | 227 | key = (lo>>17) & 7; |
238 | lo |= key<<6; /* replace with unlock key */ | 228 | lo |= key<<6; /* replace with unlock key */ |
239 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 229 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
@@ -242,9 +232,9 @@ static void __cpuinit winchip2_unprotect_mcr(void) | |||
242 | static void __cpuinit winchip2_protect_mcr(void) | 232 | static void __cpuinit winchip2_protect_mcr(void) |
243 | { | 233 | { |
244 | u32 lo, hi; | 234 | u32 lo, hi; |
245 | 235 | ||
246 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 236 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
247 | lo&=~0x1C0; /* blank bits 8-6 */ | 237 | lo &= ~0x1C0; /* blank bits 8-6 */ |
248 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 238 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
249 | } | 239 | } |
250 | #endif /* CONFIG_X86_OOSTORE */ | 240 | #endif /* CONFIG_X86_OOSTORE */ |
@@ -267,17 +257,17 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
267 | 257 | ||
268 | /* enable ACE unit, if present and disabled */ | 258 | /* enable ACE unit, if present and disabled */ |
269 | if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { | 259 | if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { |
270 | rdmsr (MSR_VIA_FCR, lo, hi); | 260 | rdmsr(MSR_VIA_FCR, lo, hi); |
271 | lo |= ACE_FCR; /* enable ACE unit */ | 261 | lo |= ACE_FCR; /* enable ACE unit */ |
272 | wrmsr (MSR_VIA_FCR, lo, hi); | 262 | wrmsr(MSR_VIA_FCR, lo, hi); |
273 | printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); | 263 | printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); |
274 | } | 264 | } |
275 | 265 | ||
276 | /* enable RNG unit, if present and disabled */ | 266 | /* enable RNG unit, if present and disabled */ |
277 | if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { | 267 | if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { |
278 | rdmsr (MSR_VIA_RNG, lo, hi); | 268 | rdmsr(MSR_VIA_RNG, lo, hi); |
279 | lo |= RNG_ENABLE; /* enable RNG unit */ | 269 | lo |= RNG_ENABLE; /* enable RNG unit */ |
280 | wrmsr (MSR_VIA_RNG, lo, hi); | 270 | wrmsr(MSR_VIA_RNG, lo, hi); |
281 | printk(KERN_INFO "CPU: Enabled h/w RNG\n"); | 271 | printk(KERN_INFO "CPU: Enabled h/w RNG\n"); |
282 | } | 272 | } |
283 | 273 | ||
@@ -288,171 +278,183 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
288 | } | 278 | } |
289 | 279 | ||
290 | /* Cyrix III family needs CX8 & PGE explicitly enabled. */ | 280 | /* Cyrix III family needs CX8 & PGE explicitly enabled. */ |
291 | if (c->x86_model >=6 && c->x86_model <= 9) { | 281 | if (c->x86_model >= 6 && c->x86_model <= 9) { |
292 | rdmsr (MSR_VIA_FCR, lo, hi); | 282 | rdmsr(MSR_VIA_FCR, lo, hi); |
293 | lo |= (1<<1 | 1<<7); | 283 | lo |= (1<<1 | 1<<7); |
294 | wrmsr (MSR_VIA_FCR, lo, hi); | 284 | wrmsr(MSR_VIA_FCR, lo, hi); |
295 | set_bit(X86_FEATURE_CX8, c->x86_capability); | 285 | set_cpu_cap(c, X86_FEATURE_CX8); |
296 | } | 286 | } |
297 | 287 | ||
298 | /* Before Nehemiah, the C3's had 3dNOW! */ | 288 | /* Before Nehemiah, the C3's had 3dNOW! */ |
299 | if (c->x86_model >=6 && c->x86_model <9) | 289 | if (c->x86_model >= 6 && c->x86_model < 9) |
300 | set_bit(X86_FEATURE_3DNOW, c->x86_capability); | 290 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
301 | 291 | ||
302 | get_model_name(c); | 292 | get_model_name(c); |
303 | display_cacheinfo(c); | 293 | display_cacheinfo(c); |
304 | } | 294 | } |
305 | 295 | ||
296 | enum { | ||
297 | ECX8 = 1<<1, | ||
298 | EIERRINT = 1<<2, | ||
299 | DPM = 1<<3, | ||
300 | DMCE = 1<<4, | ||
301 | DSTPCLK = 1<<5, | ||
302 | ELINEAR = 1<<6, | ||
303 | DSMC = 1<<7, | ||
304 | DTLOCK = 1<<8, | ||
305 | EDCTLB = 1<<8, | ||
306 | EMMX = 1<<9, | ||
307 | DPDC = 1<<11, | ||
308 | EBRPRED = 1<<12, | ||
309 | DIC = 1<<13, | ||
310 | DDC = 1<<14, | ||
311 | DNA = 1<<15, | ||
312 | ERETSTK = 1<<16, | ||
313 | E2MMX = 1<<19, | ||
314 | EAMD3D = 1<<20, | ||
315 | }; | ||
316 | |||
306 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 317 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
307 | { | 318 | { |
308 | enum { | ||
309 | ECX8=1<<1, | ||
310 | EIERRINT=1<<2, | ||
311 | DPM=1<<3, | ||
312 | DMCE=1<<4, | ||
313 | DSTPCLK=1<<5, | ||
314 | ELINEAR=1<<6, | ||
315 | DSMC=1<<7, | ||
316 | DTLOCK=1<<8, | ||
317 | EDCTLB=1<<8, | ||
318 | EMMX=1<<9, | ||
319 | DPDC=1<<11, | ||
320 | EBRPRED=1<<12, | ||
321 | DIC=1<<13, | ||
322 | DDC=1<<14, | ||
323 | DNA=1<<15, | ||
324 | ERETSTK=1<<16, | ||
325 | E2MMX=1<<19, | ||
326 | EAMD3D=1<<20, | ||
327 | }; | ||
328 | 319 | ||
329 | char *name; | 320 | char *name; |
330 | u32 fcr_set=0; | 321 | u32 fcr_set = 0; |
331 | u32 fcr_clr=0; | 322 | u32 fcr_clr = 0; |
332 | u32 lo,hi,newlo; | 323 | u32 lo, hi, newlo; |
333 | u32 aa,bb,cc,dd; | 324 | u32 aa, bb, cc, dd; |
334 | 325 | ||
335 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 326 | /* |
336 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 327 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
337 | clear_bit(0*32+31, c->x86_capability); | 328 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
329 | */ | ||
330 | clear_cpu_cap(c, 0*32+31); | ||
338 | 331 | ||
339 | switch (c->x86) { | 332 | switch (c->x86) { |
340 | 333 | case 5: | |
341 | case 5: | 334 | switch (c->x86_model) { |
342 | switch(c->x86_model) { | 335 | case 4: |
343 | case 4: | 336 | name = "C6"; |
344 | name="C6"; | 337 | fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; |
345 | fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK; | 338 | fcr_clr = DPDC; |
346 | fcr_clr=DPDC; | 339 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); |
347 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); | 340 | clear_cpu_cap(c, X86_FEATURE_TSC); |
348 | clear_bit(X86_FEATURE_TSC, c->x86_capability); | ||
349 | #ifdef CONFIG_X86_OOSTORE | 341 | #ifdef CONFIG_X86_OOSTORE |
350 | centaur_create_optimal_mcr(); | 342 | centaur_create_optimal_mcr(); |
351 | /* Enable | 343 | /* |
352 | write combining on non-stack, non-string | 344 | * Enable: |
353 | write combining on string, all types | 345 | * write combining on non-stack, non-string |
354 | weak write ordering | 346 | * write combining on string, all types |
355 | 347 | * weak write ordering | |
356 | The C6 original lacks weak read order | 348 | * |
357 | 349 | * The C6 original lacks weak read order | |
358 | Note 0x120 is write only on Winchip 1 */ | 350 | * |
359 | 351 | * Note 0x120 is write only on Winchip 1 | |
360 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); | 352 | */ |
361 | #endif | 353 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); |
354 | #endif | ||
355 | break; | ||
356 | case 8: | ||
357 | switch (c->x86_mask) { | ||
358 | default: | ||
359 | name = "2"; | ||
360 | break; | ||
361 | case 7 ... 9: | ||
362 | name = "2A"; | ||
362 | break; | 363 | break; |
363 | case 8: | 364 | case 10 ... 15: |
364 | switch(c->x86_mask) { | 365 | name = "2B"; |
365 | default: | 366 | break; |
366 | name="2"; | 367 | } |
367 | break; | 368 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
368 | case 7 ... 9: | 369 | E2MMX|EAMD3D; |
369 | name="2A"; | 370 | fcr_clr = DPDC; |
370 | break; | ||
371 | case 10 ... 15: | ||
372 | name="2B"; | ||
373 | break; | ||
374 | } | ||
375 | fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; | ||
376 | fcr_clr=DPDC; | ||
377 | #ifdef CONFIG_X86_OOSTORE | 371 | #ifdef CONFIG_X86_OOSTORE |
378 | winchip2_unprotect_mcr(); | 372 | winchip2_unprotect_mcr(); |
379 | winchip2_create_optimal_mcr(); | 373 | winchip2_create_optimal_mcr(); |
380 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 374 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
381 | /* Enable | 375 | /* |
382 | write combining on non-stack, non-string | 376 | * Enable: |
383 | write combining on string, all types | 377 | * write combining on non-stack, non-string |
384 | weak write ordering | 378 | * write combining on string, all types |
385 | */ | 379 | * weak write ordering |
386 | lo|=31; | 380 | */ |
387 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 381 | lo |= 31; |
388 | winchip2_protect_mcr(); | 382 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
383 | winchip2_protect_mcr(); | ||
389 | #endif | 384 | #endif |
390 | break; | 385 | break; |
391 | case 9: | 386 | case 9: |
392 | name="3"; | 387 | name = "3"; |
393 | fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; | 388 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
394 | fcr_clr=DPDC; | 389 | E2MMX|EAMD3D; |
390 | fcr_clr = DPDC; | ||
395 | #ifdef CONFIG_X86_OOSTORE | 391 | #ifdef CONFIG_X86_OOSTORE |
396 | winchip2_unprotect_mcr(); | 392 | winchip2_unprotect_mcr(); |
397 | winchip2_create_optimal_mcr(); | 393 | winchip2_create_optimal_mcr(); |
398 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 394 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
399 | /* Enable | 395 | /* |
400 | write combining on non-stack, non-string | 396 | * Enable: |
401 | write combining on string, all types | 397 | * write combining on non-stack, non-string |
402 | weak write ordering | 398 | * write combining on string, all types |
403 | */ | 399 | * weak write ordering |
404 | lo|=31; | 400 | */ |
405 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 401 | lo |= 31; |
406 | winchip2_protect_mcr(); | 402 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
403 | winchip2_protect_mcr(); | ||
407 | #endif | 404 | #endif |
408 | break; | 405 | break; |
409 | default: | 406 | default: |
410 | name="??"; | 407 | name = "??"; |
411 | } | 408 | } |
412 | 409 | ||
413 | rdmsr(MSR_IDT_FCR1, lo, hi); | 410 | rdmsr(MSR_IDT_FCR1, lo, hi); |
414 | newlo=(lo|fcr_set) & (~fcr_clr); | 411 | newlo = (lo|fcr_set) & (~fcr_clr); |
415 | 412 | ||
416 | if (newlo!=lo) { | 413 | if (newlo != lo) { |
417 | printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo ); | 414 | printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", |
418 | wrmsr(MSR_IDT_FCR1, newlo, hi ); | 415 | lo, newlo); |
419 | } else { | 416 | wrmsr(MSR_IDT_FCR1, newlo, hi); |
420 | printk(KERN_INFO "Centaur FCR is 0x%X\n",lo); | 417 | } else { |
421 | } | 418 | printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); |
422 | /* Emulate MTRRs using Centaur's MCR. */ | 419 | } |
423 | set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability); | 420 | /* Emulate MTRRs using Centaur's MCR. */ |
424 | /* Report CX8 */ | 421 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); |
425 | set_bit(X86_FEATURE_CX8, c->x86_capability); | 422 | /* Report CX8 */ |
426 | /* Set 3DNow! on Winchip 2 and above. */ | 423 | set_cpu_cap(c, X86_FEATURE_CX8); |
427 | if (c->x86_model >=8) | 424 | /* Set 3DNow! on Winchip 2 and above. */ |
428 | set_bit(X86_FEATURE_3DNOW, c->x86_capability); | 425 | if (c->x86_model >= 8) |
429 | /* See if we can find out some more. */ | 426 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
430 | if ( cpuid_eax(0x80000000) >= 0x80000005 ) { | 427 | /* See if we can find out some more. */ |
431 | /* Yes, we can. */ | 428 | if (cpuid_eax(0x80000000) >= 0x80000005) { |
432 | cpuid(0x80000005,&aa,&bb,&cc,&dd); | 429 | /* Yes, we can. */ |
433 | /* Add L1 data and code cache sizes. */ | 430 | cpuid(0x80000005, &aa, &bb, &cc, &dd); |
434 | c->x86_cache_size = (cc>>24)+(dd>>24); | 431 | /* Add L1 data and code cache sizes. */ |
435 | } | 432 | c->x86_cache_size = (cc>>24)+(dd>>24); |
436 | sprintf( c->x86_model_id, "WinChip %s", name ); | 433 | } |
437 | break; | 434 | sprintf(c->x86_model_id, "WinChip %s", name); |
435 | break; | ||
438 | 436 | ||
439 | case 6: | 437 | case 6: |
440 | init_c3(c); | 438 | init_c3(c); |
441 | break; | 439 | break; |
442 | } | 440 | } |
443 | } | 441 | } |
444 | 442 | ||
445 | static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 443 | static unsigned int __cpuinit |
444 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | ||
446 | { | 445 | { |
447 | /* VIA C3 CPUs (670-68F) need further shifting. */ | 446 | /* VIA C3 CPUs (670-68F) need further shifting. */ |
448 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) | 447 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) |
449 | size >>= 8; | 448 | size >>= 8; |
450 | 449 | ||
451 | /* VIA also screwed up Nehemiah stepping 1, and made | 450 | /* |
452 | it return '65KB' instead of '64KB' | 451 | * There's also an erratum in Nehemiah stepping 1, which |
453 | - Note, it seems this may only be in engineering samples. */ | 452 | * returns '65KB' instead of '64KB' |
454 | if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65)) | 453 | * - Note, it seems this may only be in engineering samples. |
455 | size -=1; | 454 | */ |
455 | if ((c->x86 == 6) && (c->x86_model == 9) && | ||
456 | (c->x86_mask == 1) && (size == 65)) | ||
457 | size -= 1; | ||
456 | 458 | ||
457 | return size; | 459 | return size; |
458 | } | 460 | } |
@@ -464,8 +466,4 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | |||
464 | .c_size_cache = centaur_size_cache, | 466 | .c_size_cache = centaur_size_cache, |
465 | }; | 467 | }; |
466 | 468 | ||
467 | int __init centaur_init_cpu(void) | 469 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); |
468 | { | ||
469 | cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev; | ||
470 | return 0; | ||
471 | } | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a38aafaefc23..d999d7833bc2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -62,9 +62,9 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | |||
62 | static int cachesize_override __cpuinitdata = -1; | 62 | static int cachesize_override __cpuinitdata = -1; |
63 | static int disable_x86_serial_nr __cpuinitdata = 1; | 63 | static int disable_x86_serial_nr __cpuinitdata = 1; |
64 | 64 | ||
65 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | 65 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
66 | 66 | ||
67 | static void __cpuinit default_init(struct cpuinfo_x86 * c) | 67 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
68 | { | 68 | { |
69 | /* Not much we can do here... */ | 69 | /* Not much we can do here... */ |
70 | /* Check if at least it has cpuid */ | 70 | /* Check if at least it has cpuid */ |
@@ -81,11 +81,11 @@ static struct cpu_dev __cpuinitdata default_cpu = { | |||
81 | .c_init = default_init, | 81 | .c_init = default_init, |
82 | .c_vendor = "Unknown", | 82 | .c_vendor = "Unknown", |
83 | }; | 83 | }; |
84 | static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu; | 84 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; |
85 | 85 | ||
86 | static int __init cachesize_setup(char *str) | 86 | static int __init cachesize_setup(char *str) |
87 | { | 87 | { |
88 | get_option (&str, &cachesize_override); | 88 | get_option(&str, &cachesize_override); |
89 | return 1; | 89 | return 1; |
90 | } | 90 | } |
91 | __setup("cachesize=", cachesize_setup); | 91 | __setup("cachesize=", cachesize_setup); |
@@ -107,12 +107,12 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
107 | /* Intel chips right-justify this string for some dumb reason; | 107 | /* Intel chips right-justify this string for some dumb reason; |
108 | undo that brain damage */ | 108 | undo that brain damage */ |
109 | p = q = &c->x86_model_id[0]; | 109 | p = q = &c->x86_model_id[0]; |
110 | while ( *p == ' ' ) | 110 | while (*p == ' ') |
111 | p++; | 111 | p++; |
112 | if ( p != q ) { | 112 | if (p != q) { |
113 | while ( *p ) | 113 | while (*p) |
114 | *q++ = *p++; | 114 | *q++ = *p++; |
115 | while ( q <= &c->x86_model_id[48] ) | 115 | while (q <= &c->x86_model_id[48]) |
116 | *q++ = '\0'; /* Zero-pad the rest */ | 116 | *q++ = '\0'; /* Zero-pad the rest */ |
117 | } | 117 | } |
118 | 118 | ||
@@ -130,7 +130,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); |
131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
133 | c->x86_cache_size=(ecx>>24)+(edx>>24); | 133 | c->x86_cache_size = (ecx>>24)+(edx>>24); |
134 | } | 134 | } |
135 | 135 | ||
136 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 136 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
@@ -138,16 +138,16 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
138 | 138 | ||
139 | ecx = cpuid_ecx(0x80000006); | 139 | ecx = cpuid_ecx(0x80000006); |
140 | l2size = ecx >> 16; | 140 | l2size = ecx >> 16; |
141 | 141 | ||
142 | /* do processor-specific cache resizing */ | 142 | /* do processor-specific cache resizing */ |
143 | if (this_cpu->c_size_cache) | 143 | if (this_cpu->c_size_cache) |
144 | l2size = this_cpu->c_size_cache(c,l2size); | 144 | l2size = this_cpu->c_size_cache(c, l2size); |
145 | 145 | ||
146 | /* Allow user to override all this if necessary. */ | 146 | /* Allow user to override all this if necessary. */ |
147 | if (cachesize_override != -1) | 147 | if (cachesize_override != -1) |
148 | l2size = cachesize_override; | 148 | l2size = cachesize_override; |
149 | 149 | ||
150 | if ( l2size == 0 ) | 150 | if (l2size == 0) |
151 | return; /* Again, no L2 cache is possible */ | 151 | return; /* Again, no L2 cache is possible */ |
152 | 152 | ||
153 | c->x86_cache_size = l2size; | 153 | c->x86_cache_size = l2size; |
@@ -156,16 +156,19 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
156 | l2size, ecx & 0xFF); | 156 | l2size, ecx & 0xFF); |
157 | } | 157 | } |
158 | 158 | ||
159 | /* Naming convention should be: <Name> [(<Codename>)] */ | 159 | /* |
160 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | 160 | * Naming convention should be: <Name> [(<Codename>)] |
161 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | 161 | * This table only is used unless init_<vendor>() below doesn't set it; |
162 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
163 | * | ||
164 | */ | ||
162 | 165 | ||
163 | /* Look up CPU names by table lookup. */ | 166 | /* Look up CPU names by table lookup. */ |
164 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 167 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) |
165 | { | 168 | { |
166 | struct cpu_model_info *info; | 169 | struct cpu_model_info *info; |
167 | 170 | ||
168 | if ( c->x86_model >= 16 ) | 171 | if (c->x86_model >= 16) |
169 | return NULL; /* Range check */ | 172 | return NULL; /* Range check */ |
170 | 173 | ||
171 | if (!this_cpu) | 174 | if (!this_cpu) |
@@ -190,9 +193,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |||
190 | 193 | ||
191 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 194 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
192 | if (cpu_devs[i]) { | 195 | if (cpu_devs[i]) { |
193 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | 196 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
194 | (cpu_devs[i]->c_ident[1] && | 197 | (cpu_devs[i]->c_ident[1] && |
195 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | 198 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
196 | c->x86_vendor = i; | 199 | c->x86_vendor = i; |
197 | if (!early) | 200 | if (!early) |
198 | this_cpu = cpu_devs[i]; | 201 | this_cpu = cpu_devs[i]; |
@@ -210,7 +213,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |||
210 | } | 213 | } |
211 | 214 | ||
212 | 215 | ||
213 | static int __init x86_fxsr_setup(char * s) | 216 | static int __init x86_fxsr_setup(char *s) |
214 | { | 217 | { |
215 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | 218 | setup_clear_cpu_cap(X86_FEATURE_FXSR); |
216 | setup_clear_cpu_cap(X86_FEATURE_XMM); | 219 | setup_clear_cpu_cap(X86_FEATURE_XMM); |
@@ -219,7 +222,7 @@ static int __init x86_fxsr_setup(char * s) | |||
219 | __setup("nofxsr", x86_fxsr_setup); | 222 | __setup("nofxsr", x86_fxsr_setup); |
220 | 223 | ||
221 | 224 | ||
222 | static int __init x86_sep_setup(char * s) | 225 | static int __init x86_sep_setup(char *s) |
223 | { | 226 | { |
224 | setup_clear_cpu_cap(X86_FEATURE_SEP); | 227 | setup_clear_cpu_cap(X86_FEATURE_SEP); |
225 | return 1; | 228 | return 1; |
@@ -306,14 +309,30 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
306 | 309 | ||
307 | } | 310 | } |
308 | 311 | ||
309 | } | 312 | clear_cpu_cap(c, X86_FEATURE_PAT); |
313 | |||
314 | switch (c->x86_vendor) { | ||
315 | case X86_VENDOR_AMD: | ||
316 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
317 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
318 | break; | ||
319 | case X86_VENDOR_INTEL: | ||
320 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | ||
321 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
322 | break; | ||
323 | } | ||
310 | 324 | ||
311 | /* Do minimum CPU detection early. | 325 | } |
312 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | ||
313 | The others are not touched to avoid unwanted side effects. | ||
314 | 326 | ||
315 | WARNING: this function is only called on the BP. Don't add code here | 327 | /* |
316 | that is supposed to run on all CPUs. */ | 328 | * Do minimum CPU detection early. |
329 | * Fields really needed: vendor, cpuid_level, family, model, mask, | ||
330 | * cache alignment. | ||
331 | * The others are not touched to avoid unwanted side effects. | ||
332 | * | ||
333 | * WARNING: this function is only called on the BP. Don't add code here | ||
334 | * that is supposed to run on all CPUs. | ||
335 | */ | ||
317 | static void __init early_cpu_detect(void) | 336 | static void __init early_cpu_detect(void) |
318 | { | 337 | { |
319 | struct cpuinfo_x86 *c = &boot_cpu_data; | 338 | struct cpuinfo_x86 *c = &boot_cpu_data; |
@@ -328,19 +347,14 @@ static void __init early_cpu_detect(void) | |||
328 | 347 | ||
329 | get_cpu_vendor(c, 1); | 348 | get_cpu_vendor(c, 1); |
330 | 349 | ||
331 | switch (c->x86_vendor) { | 350 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
332 | case X86_VENDOR_AMD: | 351 | cpu_devs[c->x86_vendor]->c_early_init) |
333 | early_init_amd(c); | 352 | cpu_devs[c->x86_vendor]->c_early_init(c); |
334 | break; | ||
335 | case X86_VENDOR_INTEL: | ||
336 | early_init_intel(c); | ||
337 | break; | ||
338 | } | ||
339 | 353 | ||
340 | early_get_cap(c); | 354 | early_get_cap(c); |
341 | } | 355 | } |
342 | 356 | ||
343 | static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | 357 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
344 | { | 358 | { |
345 | u32 tfms, xlvl; | 359 | u32 tfms, xlvl; |
346 | unsigned int ebx; | 360 | unsigned int ebx; |
@@ -351,13 +365,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
351 | (unsigned int *)&c->x86_vendor_id[0], | 365 | (unsigned int *)&c->x86_vendor_id[0], |
352 | (unsigned int *)&c->x86_vendor_id[8], | 366 | (unsigned int *)&c->x86_vendor_id[8], |
353 | (unsigned int *)&c->x86_vendor_id[4]); | 367 | (unsigned int *)&c->x86_vendor_id[4]); |
354 | 368 | ||
355 | get_cpu_vendor(c, 0); | 369 | get_cpu_vendor(c, 0); |
356 | /* Initialize the standard set of capabilities */ | 370 | /* Initialize the standard set of capabilities */ |
357 | /* Note that the vendor-specific code below might override */ | 371 | /* Note that the vendor-specific code below might override */ |
358 | |||
359 | /* Intel-defined flags: level 0x00000001 */ | 372 | /* Intel-defined flags: level 0x00000001 */ |
360 | if ( c->cpuid_level >= 0x00000001 ) { | 373 | if (c->cpuid_level >= 0x00000001) { |
361 | u32 capability, excap; | 374 | u32 capability, excap; |
362 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 375 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
363 | c->x86_capability[0] = capability; | 376 | c->x86_capability[0] = capability; |
@@ -369,12 +382,14 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
369 | if (c->x86 >= 0x6) | 382 | if (c->x86 >= 0x6) |
370 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 383 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
371 | c->x86_mask = tfms & 15; | 384 | c->x86_mask = tfms & 15; |
385 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
372 | #ifdef CONFIG_X86_HT | 386 | #ifdef CONFIG_X86_HT |
373 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | 387 | c->apicid = phys_pkg_id(c->initial_apicid, 0); |
388 | c->phys_proc_id = c->initial_apicid; | ||
374 | #else | 389 | #else |
375 | c->apicid = (ebx >> 24) & 0xFF; | 390 | c->apicid = c->initial_apicid; |
376 | #endif | 391 | #endif |
377 | if (c->x86_capability[0] & (1<<19)) | 392 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) |
378 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | 393 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; |
379 | } else { | 394 | } else { |
380 | /* Have CPUID level 0 only - unheard of */ | 395 | /* Have CPUID level 0 only - unheard of */ |
@@ -383,33 +398,42 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
383 | 398 | ||
384 | /* AMD-defined flags: level 0x80000001 */ | 399 | /* AMD-defined flags: level 0x80000001 */ |
385 | xlvl = cpuid_eax(0x80000000); | 400 | xlvl = cpuid_eax(0x80000000); |
386 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | 401 | if ((xlvl & 0xffff0000) == 0x80000000) { |
387 | if ( xlvl >= 0x80000001 ) { | 402 | if (xlvl >= 0x80000001) { |
388 | c->x86_capability[1] = cpuid_edx(0x80000001); | 403 | c->x86_capability[1] = cpuid_edx(0x80000001); |
389 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 404 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
390 | } | 405 | } |
391 | if ( xlvl >= 0x80000004 ) | 406 | if (xlvl >= 0x80000004) |
392 | get_model_name(c); /* Default name */ | 407 | get_model_name(c); /* Default name */ |
393 | } | 408 | } |
394 | 409 | ||
395 | init_scattered_cpuid_features(c); | 410 | init_scattered_cpuid_features(c); |
396 | } | 411 | } |
397 | 412 | ||
398 | #ifdef CONFIG_X86_HT | 413 | clear_cpu_cap(c, X86_FEATURE_PAT); |
399 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; | 414 | |
400 | #endif | 415 | switch (c->x86_vendor) { |
416 | case X86_VENDOR_AMD: | ||
417 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
418 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
419 | break; | ||
420 | case X86_VENDOR_INTEL: | ||
421 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | ||
422 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
423 | break; | ||
424 | } | ||
401 | } | 425 | } |
402 | 426 | ||
403 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 427 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
404 | { | 428 | { |
405 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | 429 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { |
406 | /* Disable processor serial number */ | 430 | /* Disable processor serial number */ |
407 | unsigned long lo,hi; | 431 | unsigned long lo, hi; |
408 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | 432 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
409 | lo |= 0x200000; | 433 | lo |= 0x200000; |
410 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | 434 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
411 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 435 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
412 | clear_bit(X86_FEATURE_PN, c->x86_capability); | 436 | clear_cpu_cap(c, X86_FEATURE_PN); |
413 | 437 | ||
414 | /* Disabling the serial number may affect the cpuid level */ | 438 | /* Disabling the serial number may affect the cpuid level */ |
415 | c->cpuid_level = cpuid_eax(0); | 439 | c->cpuid_level = cpuid_eax(0); |
@@ -444,9 +468,11 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
444 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 468 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
445 | 469 | ||
446 | if (!have_cpuid_p()) { | 470 | if (!have_cpuid_p()) { |
447 | /* First of all, decide if this is a 486 or higher */ | 471 | /* |
448 | /* It's a 486 if we can modify the AC flag */ | 472 | * First of all, decide if this is a 486 or higher |
449 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | 473 | * It's a 486 if we can modify the AC flag |
474 | */ | ||
475 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | ||
450 | c->x86 = 4; | 476 | c->x86 = 4; |
451 | else | 477 | else |
452 | c->x86 = 3; | 478 | c->x86 = 3; |
@@ -479,10 +505,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
479 | */ | 505 | */ |
480 | 506 | ||
481 | /* If the model name is still unset, do table lookup. */ | 507 | /* If the model name is still unset, do table lookup. */ |
482 | if ( !c->x86_model_id[0] ) { | 508 | if (!c->x86_model_id[0]) { |
483 | char *p; | 509 | char *p; |
484 | p = table_lookup_model(c); | 510 | p = table_lookup_model(c); |
485 | if ( p ) | 511 | if (p) |
486 | strcpy(c->x86_model_id, p); | 512 | strcpy(c->x86_model_id, p); |
487 | else | 513 | else |
488 | /* Last resort... */ | 514 | /* Last resort... */ |
@@ -496,9 +522,9 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
496 | * common between the CPUs. The first time this routine gets | 522 | * common between the CPUs. The first time this routine gets |
497 | * executed, c == &boot_cpu_data. | 523 | * executed, c == &boot_cpu_data. |
498 | */ | 524 | */ |
499 | if ( c != &boot_cpu_data ) { | 525 | if (c != &boot_cpu_data) { |
500 | /* AND the already accumulated flags with these */ | 526 | /* AND the already accumulated flags with these */ |
501 | for ( i = 0 ; i < NCAPINTS ; i++ ) | 527 | for (i = 0 ; i < NCAPINTS ; i++) |
502 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 528 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
503 | } | 529 | } |
504 | 530 | ||
@@ -542,7 +568,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
542 | 568 | ||
543 | if (smp_num_siblings == 1) { | 569 | if (smp_num_siblings == 1) { |
544 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 570 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
545 | } else if (smp_num_siblings > 1 ) { | 571 | } else if (smp_num_siblings > 1) { |
546 | 572 | ||
547 | if (smp_num_siblings > NR_CPUS) { | 573 | if (smp_num_siblings > NR_CPUS) { |
548 | printk(KERN_WARNING "CPU: Unsupported number of the " | 574 | printk(KERN_WARNING "CPU: Unsupported number of the " |
@@ -552,7 +578,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
552 | } | 578 | } |
553 | 579 | ||
554 | index_msb = get_count_order(smp_num_siblings); | 580 | index_msb = get_count_order(smp_num_siblings); |
555 | c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | 581 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); |
556 | 582 | ||
557 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 583 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
558 | c->phys_proc_id); | 584 | c->phys_proc_id); |
@@ -563,7 +589,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
563 | 589 | ||
564 | core_bits = get_count_order(c->x86_max_cores); | 590 | core_bits = get_count_order(c->x86_max_cores); |
565 | 591 | ||
566 | c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & | 592 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & |
567 | ((1 << core_bits) - 1); | 593 | ((1 << core_bits) - 1); |
568 | 594 | ||
569 | if (c->x86_max_cores > 1) | 595 | if (c->x86_max_cores > 1) |
@@ -597,7 +623,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
597 | else | 623 | else |
598 | printk("%s", c->x86_model_id); | 624 | printk("%s", c->x86_model_id); |
599 | 625 | ||
600 | if (c->x86_mask || c->cpuid_level >= 0) | 626 | if (c->x86_mask || c->cpuid_level >= 0) |
601 | printk(" stepping %02x\n", c->x86_mask); | 627 | printk(" stepping %02x\n", c->x86_mask); |
602 | else | 628 | else |
603 | printk("\n"); | 629 | printk("\n"); |
@@ -616,23 +642,15 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
616 | 642 | ||
617 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 643 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
618 | 644 | ||
619 | /* This is hacky. :) | ||
620 | * We're emulating future behavior. | ||
621 | * In the future, the cpu-specific init functions will be called implicitly | ||
622 | * via the magic of initcalls. | ||
623 | * They will insert themselves into the cpu_devs structure. | ||
624 | * Then, when cpu_init() is called, we can just iterate over that array. | ||
625 | */ | ||
626 | void __init early_cpu_init(void) | 645 | void __init early_cpu_init(void) |
627 | { | 646 | { |
628 | intel_cpu_init(); | 647 | struct cpu_vendor_dev *cvdev; |
629 | cyrix_init_cpu(); | 648 | |
630 | nsc_init_cpu(); | 649 | for (cvdev = __x86cpuvendor_start ; |
631 | amd_init_cpu(); | 650 | cvdev < __x86cpuvendor_end ; |
632 | centaur_init_cpu(); | 651 | cvdev++) |
633 | transmeta_init_cpu(); | 652 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; |
634 | nexgen_init_cpu(); | 653 | |
635 | umc_init_cpu(); | ||
636 | early_cpu_detect(); | 654 | early_cpu_detect(); |
637 | } | 655 | } |
638 | 656 | ||
@@ -666,7 +684,7 @@ void __cpuinit cpu_init(void) | |||
666 | { | 684 | { |
667 | int cpu = smp_processor_id(); | 685 | int cpu = smp_processor_id(); |
668 | struct task_struct *curr = current; | 686 | struct task_struct *curr = current; |
669 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 687 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
670 | struct thread_struct *thread = &curr->thread; | 688 | struct thread_struct *thread = &curr->thread; |
671 | 689 | ||
672 | if (cpu_test_and_set(cpu, cpu_initialized)) { | 690 | if (cpu_test_and_set(cpu, cpu_initialized)) { |
@@ -692,7 +710,7 @@ void __cpuinit cpu_init(void) | |||
692 | enter_lazy_tlb(&init_mm, curr); | 710 | enter_lazy_tlb(&init_mm, curr); |
693 | 711 | ||
694 | load_sp0(t, thread); | 712 | load_sp0(t, thread); |
695 | set_tss_desc(cpu,t); | 713 | set_tss_desc(cpu, t); |
696 | load_TR_desc(); | 714 | load_TR_desc(); |
697 | load_LDT(&init_mm.context); | 715 | load_LDT(&init_mm.context); |
698 | 716 | ||
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index e0b38c33d842..783691b2a738 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -14,6 +14,7 @@ struct cpu_dev { | |||
14 | 14 | ||
15 | struct cpu_model_info c_models[4]; | 15 | struct cpu_model_info c_models[4]; |
16 | 16 | ||
17 | void (*c_early_init)(struct cpuinfo_x86 *c); | ||
17 | void (*c_init)(struct cpuinfo_x86 * c); | 18 | void (*c_init)(struct cpuinfo_x86 * c); |
18 | void (*c_identify)(struct cpuinfo_x86 * c); | 19 | void (*c_identify)(struct cpuinfo_x86 * c); |
19 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); | 20 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); |
@@ -21,18 +22,17 @@ struct cpu_dev { | |||
21 | 22 | ||
22 | extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; | 23 | extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; |
23 | 24 | ||
25 | struct cpu_vendor_dev { | ||
26 | int vendor; | ||
27 | struct cpu_dev *cpu_dev; | ||
28 | }; | ||
29 | |||
30 | #define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \ | ||
31 | static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \ | ||
32 | __attribute__((__section__(".x86cpuvendor.init"))) = \ | ||
33 | { cpu_vendor_id, cpu_dev } | ||
34 | |||
35 | extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[]; | ||
36 | |||
24 | extern int get_model_name(struct cpuinfo_x86 *c); | 37 | extern int get_model_name(struct cpuinfo_x86 *c); |
25 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 38 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
26 | |||
27 | extern void early_init_intel(struct cpuinfo_x86 *c); | ||
28 | extern void early_init_amd(struct cpuinfo_x86 *c); | ||
29 | |||
30 | /* Specific CPU type init functions */ | ||
31 | int intel_cpu_init(void); | ||
32 | int amd_init_cpu(void); | ||
33 | int cyrix_init_cpu(void); | ||
34 | int nsc_init_cpu(void); | ||
35 | int centaur_init_cpu(void); | ||
36 | int transmeta_init_cpu(void); | ||
37 | int nexgen_init_cpu(void); | ||
38 | int umc_init_cpu(void); | ||
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 7139b0262703..3fd7a67bb06a 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -19,7 +19,7 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
19 | { | 19 | { |
20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
21 | unsigned long flags; | 21 | unsigned long flags; |
22 | 22 | ||
23 | /* we test for DEVID by checking whether CCR3 is writable */ | 23 | /* we test for DEVID by checking whether CCR3 is writable */ |
24 | local_irq_save(flags); | 24 | local_irq_save(flags); |
25 | ccr3 = getCx86(CX86_CCR3); | 25 | ccr3 = getCx86(CX86_CCR3); |
@@ -37,8 +37,7 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
37 | setCx86(CX86_CCR2, ccr2); | 37 | setCx86(CX86_CCR2, ccr2); |
38 | *dir0 = 0xfe; | 38 | *dir0 = 0xfe; |
39 | } | 39 | } |
40 | } | 40 | } else { |
41 | else { | ||
42 | setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ | 41 | setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ |
43 | 42 | ||
44 | /* read DIR0 and DIR1 CPU registers */ | 43 | /* read DIR0 and DIR1 CPU registers */ |
@@ -86,7 +85,7 @@ static char cyrix_model_mult2[] __cpuinitdata = "12233445"; | |||
86 | static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) | 85 | static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) |
87 | { | 86 | { |
88 | unsigned long flags; | 87 | unsigned long flags; |
89 | 88 | ||
90 | if (Cx86_dir0_msb == 3) { | 89 | if (Cx86_dir0_msb == 3) { |
91 | unsigned char ccr3, ccr5; | 90 | unsigned char ccr3, ccr5; |
92 | 91 | ||
@@ -132,7 +131,7 @@ static void __cpuinit set_cx86_memwb(void) | |||
132 | /* set 'Not Write-through' */ | 131 | /* set 'Not Write-through' */ |
133 | write_cr0(read_cr0() | X86_CR0_NW); | 132 | write_cr0(read_cr0() | X86_CR0_NW); |
134 | /* CCR2 bit 2: lock NW bit and set WT1 */ | 133 | /* CCR2 bit 2: lock NW bit and set WT1 */ |
135 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); | 134 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); |
136 | } | 135 | } |
137 | 136 | ||
138 | static void __cpuinit set_cx86_inc(void) | 137 | static void __cpuinit set_cx86_inc(void) |
@@ -148,7 +147,7 @@ static void __cpuinit set_cx86_inc(void) | |||
148 | setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); | 147 | setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); |
149 | /* PCR0 -- Performance Control */ | 148 | /* PCR0 -- Performance Control */ |
150 | /* Incrementor Margin 10 */ | 149 | /* Incrementor Margin 10 */ |
151 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); | 150 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); |
152 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 151 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
153 | } | 152 | } |
154 | 153 | ||
@@ -167,16 +166,16 @@ static void __cpuinit geode_configure(void) | |||
167 | 166 | ||
168 | ccr3 = getCx86(CX86_CCR3); | 167 | ccr3 = getCx86(CX86_CCR3); |
169 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 168 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
170 | 169 | ||
171 | 170 | ||
172 | /* FPU fast, DTE cache, Mem bypass */ | 171 | /* FPU fast, DTE cache, Mem bypass */ |
173 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); | 172 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); |
174 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 173 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
175 | 174 | ||
176 | set_cx86_memwb(); | 175 | set_cx86_memwb(); |
177 | set_cx86_reorder(); | 176 | set_cx86_reorder(); |
178 | set_cx86_inc(); | 177 | set_cx86_inc(); |
179 | 178 | ||
180 | local_irq_restore(flags); | 179 | local_irq_restore(flags); |
181 | } | 180 | } |
182 | 181 | ||
@@ -187,14 +186,16 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
187 | char *buf = c->x86_model_id; | 186 | char *buf = c->x86_model_id; |
188 | const char *p = NULL; | 187 | const char *p = NULL; |
189 | 188 | ||
190 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 189 | /* |
191 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 190 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
192 | clear_bit(0*32+31, c->x86_capability); | 191 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
192 | */ | ||
193 | clear_cpu_cap(c, 0*32+31); | ||
193 | 194 | ||
194 | /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ | 195 | /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ |
195 | if ( test_bit(1*32+24, c->x86_capability) ) { | 196 | if (test_cpu_cap(c, 1*32+24)) { |
196 | clear_bit(1*32+24, c->x86_capability); | 197 | clear_cpu_cap(c, 1*32+24); |
197 | set_bit(X86_FEATURE_CXMMX, c->x86_capability); | 198 | set_cpu_cap(c, X86_FEATURE_CXMMX); |
198 | } | 199 | } |
199 | 200 | ||
200 | do_cyrix_devid(&dir0, &dir1); | 201 | do_cyrix_devid(&dir0, &dir1); |
@@ -213,7 +214,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
213 | * the model, multiplier and stepping. Black magic included, | 214 | * the model, multiplier and stepping. Black magic included, |
214 | * to make the silicon step/rev numbers match the printed ones. | 215 | * to make the silicon step/rev numbers match the printed ones. |
215 | */ | 216 | */ |
216 | 217 | ||
217 | switch (dir0_msn) { | 218 | switch (dir0_msn) { |
218 | unsigned char tmp; | 219 | unsigned char tmp; |
219 | 220 | ||
@@ -241,7 +242,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
241 | } else /* 686 */ | 242 | } else /* 686 */ |
242 | p = Cx86_cb+1; | 243 | p = Cx86_cb+1; |
243 | /* Emulate MTRRs using Cyrix's ARRs. */ | 244 | /* Emulate MTRRs using Cyrix's ARRs. */ |
244 | set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); | 245 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); |
245 | /* 6x86's contain this bug */ | 246 | /* 6x86's contain this bug */ |
246 | c->coma_bug = 1; | 247 | c->coma_bug = 1; |
247 | break; | 248 | break; |
@@ -250,17 +251,18 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
250 | #ifdef CONFIG_PCI | 251 | #ifdef CONFIG_PCI |
251 | { | 252 | { |
252 | u32 vendor, device; | 253 | u32 vendor, device; |
253 | /* It isn't really a PCI quirk directly, but the cure is the | 254 | /* |
254 | same. The MediaGX has deep magic SMM stuff that handles the | 255 | * It isn't really a PCI quirk directly, but the cure is the |
255 | SB emulation. It throws away the fifo on disable_dma() which | 256 | * same. The MediaGX has deep magic SMM stuff that handles the |
256 | is wrong and ruins the audio. | 257 | * SB emulation. It throws away the fifo on disable_dma() which |
257 | 258 | * is wrong and ruins the audio. | |
258 | Bug2: VSA1 has a wrap bug so that using maximum sized DMA | 259 | * |
259 | causes bad things. According to NatSemi VSA2 has another | 260 | * Bug2: VSA1 has a wrap bug so that using maximum sized DMA |
260 | bug to do with 'hlt'. I've not seen any boards using VSA2 | 261 | * causes bad things. According to NatSemi VSA2 has another |
261 | and X doesn't seem to support it either so who cares 8). | 262 | * bug to do with 'hlt'. I've not seen any boards using VSA2 |
262 | VSA1 we work around however. | 263 | * and X doesn't seem to support it either so who cares 8). |
263 | */ | 264 | * VSA1 we work around however. |
265 | */ | ||
264 | 266 | ||
265 | printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); | 267 | printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); |
266 | isa_dma_bridge_buggy = 2; | 268 | isa_dma_bridge_buggy = 2; |
@@ -273,55 +275,51 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
273 | 275 | ||
274 | /* | 276 | /* |
275 | * The 5510/5520 companion chips have a funky PIT. | 277 | * The 5510/5520 companion chips have a funky PIT. |
276 | */ | 278 | */ |
277 | if (vendor == PCI_VENDOR_ID_CYRIX && | 279 | if (vendor == PCI_VENDOR_ID_CYRIX && |
278 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) | 280 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) |
279 | mark_tsc_unstable("cyrix 5510/5520 detected"); | 281 | mark_tsc_unstable("cyrix 5510/5520 detected"); |
280 | } | 282 | } |
281 | #endif | 283 | #endif |
282 | c->x86_cache_size=16; /* Yep 16K integrated cache thats it */ | 284 | c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */ |
283 | 285 | ||
284 | /* GXm supports extended cpuid levels 'ala' AMD */ | 286 | /* GXm supports extended cpuid levels 'ala' AMD */ |
285 | if (c->cpuid_level == 2) { | 287 | if (c->cpuid_level == 2) { |
286 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ | 288 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ |
287 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); | 289 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); |
288 | 290 | ||
289 | /* | 291 | /* |
290 | * GXm : 0x30 ... 0x5f GXm datasheet 51 | 292 | * GXm : 0x30 ... 0x5f GXm datasheet 51 |
291 | * GXlv: 0x6x GXlv datasheet 54 | 293 | * GXlv: 0x6x GXlv datasheet 54 |
292 | * ? : 0x7x | 294 | * ? : 0x7x |
293 | * GX1 : 0x8x GX1 datasheet 56 | 295 | * GX1 : 0x8x GX1 datasheet 56 |
294 | */ | 296 | */ |
295 | if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f)) | 297 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) |
296 | geode_configure(); | 298 | geode_configure(); |
297 | get_model_name(c); /* get CPU marketing name */ | 299 | get_model_name(c); /* get CPU marketing name */ |
298 | return; | 300 | return; |
299 | } | 301 | } else { /* MediaGX */ |
300 | else { /* MediaGX */ | ||
301 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; | 302 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; |
302 | p = Cx86_cb+2; | 303 | p = Cx86_cb+2; |
303 | c->x86_model = (dir1 & 0x20) ? 1 : 2; | 304 | c->x86_model = (dir1 & 0x20) ? 1 : 2; |
304 | } | 305 | } |
305 | break; | 306 | break; |
306 | 307 | ||
307 | case 5: /* 6x86MX/M II */ | 308 | case 5: /* 6x86MX/M II */ |
308 | if (dir1 > 7) | 309 | if (dir1 > 7) { |
309 | { | ||
310 | dir0_msn++; /* M II */ | 310 | dir0_msn++; /* M II */ |
311 | /* Enable MMX extensions (App note 108) */ | 311 | /* Enable MMX extensions (App note 108) */ |
312 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); | 312 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); |
313 | } | 313 | } else { |
314 | else | ||
315 | { | ||
316 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ | 314 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ |
317 | } | 315 | } |
318 | tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; | 316 | tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; |
319 | Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; | 317 | Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; |
320 | p = Cx86_cb+tmp; | 318 | p = Cx86_cb+tmp; |
321 | if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) | 319 | if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) |
322 | (c->x86_model)++; | 320 | (c->x86_model)++; |
323 | /* Emulate MTRRs using Cyrix's ARRs. */ | 321 | /* Emulate MTRRs using Cyrix's ARRs. */ |
324 | set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); | 322 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); |
325 | break; | 323 | break; |
326 | 324 | ||
327 | case 0xf: /* Cyrix 486 without DEVID registers */ | 325 | case 0xf: /* Cyrix 486 without DEVID registers */ |
@@ -343,7 +341,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
343 | break; | 341 | break; |
344 | } | 342 | } |
345 | strcpy(buf, Cx86_model[dir0_msn & 7]); | 343 | strcpy(buf, Cx86_model[dir0_msn & 7]); |
346 | if (p) strcat(buf, p); | 344 | if (p) |
345 | strcat(buf, p); | ||
347 | return; | 346 | return; |
348 | } | 347 | } |
349 | 348 | ||
@@ -352,7 +351,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
352 | */ | 351 | */ |
353 | static void __cpuinit init_nsc(struct cpuinfo_x86 *c) | 352 | static void __cpuinit init_nsc(struct cpuinfo_x86 *c) |
354 | { | 353 | { |
355 | /* There may be GX1 processors in the wild that are branded | 354 | /* |
355 | * There may be GX1 processors in the wild that are branded | ||
356 | * NSC and not Cyrix. | 356 | * NSC and not Cyrix. |
357 | * | 357 | * |
358 | * This function only handles the GX processor, and kicks every | 358 | * This function only handles the GX processor, and kicks every |
@@ -377,7 +377,7 @@ static void __cpuinit init_nsc(struct cpuinfo_x86 *c) | |||
377 | * by the fact that they preserve the flags across the division of 5/2. | 377 | * by the fact that they preserve the flags across the division of 5/2. |
378 | * PII and PPro exhibit this behavior too, but they have cpuid available. | 378 | * PII and PPro exhibit this behavior too, but they have cpuid available. |
379 | */ | 379 | */ |
380 | 380 | ||
381 | /* | 381 | /* |
382 | * Perform the Cyrix 5/2 test. A Cyrix won't change | 382 | * Perform the Cyrix 5/2 test. A Cyrix won't change |
383 | * the flags, while other 486 chips will. | 383 | * the flags, while other 486 chips will. |
@@ -398,27 +398,26 @@ static inline int test_cyrix_52div(void) | |||
398 | return (unsigned char) (test >> 8) == 0x02; | 398 | return (unsigned char) (test >> 8) == 0x02; |
399 | } | 399 | } |
400 | 400 | ||
401 | static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c) | 401 | static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) |
402 | { | 402 | { |
403 | /* Detect Cyrix with disabled CPUID */ | 403 | /* Detect Cyrix with disabled CPUID */ |
404 | if ( c->x86 == 4 && test_cyrix_52div() ) { | 404 | if (c->x86 == 4 && test_cyrix_52div()) { |
405 | unsigned char dir0, dir1; | 405 | unsigned char dir0, dir1; |
406 | 406 | ||
407 | strcpy(c->x86_vendor_id, "CyrixInstead"); | 407 | strcpy(c->x86_vendor_id, "CyrixInstead"); |
408 | c->x86_vendor = X86_VENDOR_CYRIX; | 408 | c->x86_vendor = X86_VENDOR_CYRIX; |
409 | 409 | ||
410 | /* Actually enable cpuid on the older cyrix */ | 410 | /* Actually enable cpuid on the older cyrix */ |
411 | 411 | ||
412 | /* Retrieve CPU revisions */ | 412 | /* Retrieve CPU revisions */ |
413 | 413 | ||
414 | do_cyrix_devid(&dir0, &dir1); | 414 | do_cyrix_devid(&dir0, &dir1); |
415 | 415 | ||
416 | dir0>>=4; | 416 | dir0 >>= 4; |
417 | 417 | ||
418 | /* Check it is an affected model */ | 418 | /* Check it is an affected model */ |
419 | 419 | ||
420 | if (dir0 == 5 || dir0 == 3) | 420 | if (dir0 == 5 || dir0 == 3) { |
421 | { | ||
422 | unsigned char ccr3; | 421 | unsigned char ccr3; |
423 | unsigned long flags; | 422 | unsigned long flags; |
424 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); | 423 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); |
@@ -434,26 +433,17 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c) | |||
434 | 433 | ||
435 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 434 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { |
436 | .c_vendor = "Cyrix", | 435 | .c_vendor = "Cyrix", |
437 | .c_ident = { "CyrixInstead" }, | 436 | .c_ident = { "CyrixInstead" }, |
438 | .c_init = init_cyrix, | 437 | .c_init = init_cyrix, |
439 | .c_identify = cyrix_identify, | 438 | .c_identify = cyrix_identify, |
440 | }; | 439 | }; |
441 | 440 | ||
442 | int __init cyrix_init_cpu(void) | 441 | cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); |
443 | { | ||
444 | cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev; | ||
445 | return 0; | ||
446 | } | ||
447 | 442 | ||
448 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { | 443 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { |
449 | .c_vendor = "NSC", | 444 | .c_vendor = "NSC", |
450 | .c_ident = { "Geode by NSC" }, | 445 | .c_ident = { "Geode by NSC" }, |
451 | .c_init = init_nsc, | 446 | .c_init = init_nsc, |
452 | }; | 447 | }; |
453 | 448 | ||
454 | int __init nsc_init_cpu(void) | 449 | cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); |
455 | { | ||
456 | cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev; | ||
457 | return 0; | ||
458 | } | ||
459 | |||
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c index ee975ac6bbcb..e43ad4ad4cba 100644 --- a/arch/x86/kernel/cpu/feature_names.c +++ b/arch/x86/kernel/cpu/feature_names.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * This file must not contain any executable code. | 4 | * This file must not contain any executable code. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "asm/cpufeature.h" | 7 | #include <asm/cpufeature.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * These flag bits must match the definitions in <asm/cpufeature.h>. | 10 | * These flag bits must match the definitions in <asm/cpufeature.h>. |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fae31ce747bd..fe9224c51d37 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -30,7 +30,7 @@ | |||
30 | struct movsl_mask movsl_mask __read_mostly; | 30 | struct movsl_mask movsl_mask __read_mostly; |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 33 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
34 | { | 34 | { |
35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | 35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ |
36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | 36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) |
@@ -45,7 +45,7 @@ void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
45 | * | 45 | * |
46 | * This is called before we do cpu ident work | 46 | * This is called before we do cpu ident work |
47 | */ | 47 | */ |
48 | 48 | ||
49 | int __cpuinit ppro_with_ram_bug(void) | 49 | int __cpuinit ppro_with_ram_bug(void) |
50 | { | 50 | { |
51 | /* Uses data from early_cpu_detect now */ | 51 | /* Uses data from early_cpu_detect now */ |
@@ -58,7 +58,7 @@ int __cpuinit ppro_with_ram_bug(void) | |||
58 | } | 58 | } |
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | 62 | ||
63 | /* | 63 | /* |
64 | * P4 Xeon errata 037 workaround. | 64 | * P4 Xeon errata 037 workaround. |
@@ -69,7 +69,7 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
69 | unsigned long lo, hi; | 69 | unsigned long lo, hi; |
70 | 70 | ||
71 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 71 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
72 | rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 72 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
73 | if ((lo & (1<<9)) == 0) { | 73 | if ((lo & (1<<9)) == 0) { |
74 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 74 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
75 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 75 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
@@ -127,10 +127,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
127 | */ | 127 | */ |
128 | c->f00f_bug = 0; | 128 | c->f00f_bug = 0; |
129 | if (!paravirt_enabled() && c->x86 == 5) { | 129 | if (!paravirt_enabled() && c->x86 == 5) { |
130 | static int f00f_workaround_enabled = 0; | 130 | static int f00f_workaround_enabled; |
131 | 131 | ||
132 | c->f00f_bug = 1; | 132 | c->f00f_bug = 1; |
133 | if ( !f00f_workaround_enabled ) { | 133 | if (!f00f_workaround_enabled) { |
134 | trap_init_f00f_bug(); | 134 | trap_init_f00f_bug(); |
135 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | 135 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); |
136 | f00f_workaround_enabled = 1; | 136 | f00f_workaround_enabled = 1; |
@@ -139,20 +139,22 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | l2 = init_intel_cacheinfo(c); | 141 | l2 = init_intel_cacheinfo(c); |
142 | if (c->cpuid_level > 9 ) { | 142 | if (c->cpuid_level > 9) { |
143 | unsigned eax = cpuid_eax(10); | 143 | unsigned eax = cpuid_eax(10); |
144 | /* Check for version and the number of counters */ | 144 | /* Check for version and the number of counters */ |
145 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | 145 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) |
146 | set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); | 146 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
147 | } | 147 | } |
148 | 148 | ||
149 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | 149 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ |
150 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | 150 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) |
151 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | 151 | clear_cpu_cap(c, X86_FEATURE_SEP); |
152 | 152 | ||
153 | /* Names for the Pentium II/Celeron processors | 153 | /* |
154 | detectable only by also checking the cache size. | 154 | * Names for the Pentium II/Celeron processors |
155 | Dixon is NOT a Celeron. */ | 155 | * detectable only by also checking the cache size. |
156 | * Dixon is NOT a Celeron. | ||
157 | */ | ||
156 | if (c->x86 == 6) { | 158 | if (c->x86 == 6) { |
157 | switch (c->x86_model) { | 159 | switch (c->x86_model) { |
158 | case 5: | 160 | case 5: |
@@ -163,14 +165,14 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
163 | p = "Mobile Pentium II (Dixon)"; | 165 | p = "Mobile Pentium II (Dixon)"; |
164 | } | 166 | } |
165 | break; | 167 | break; |
166 | 168 | ||
167 | case 6: | 169 | case 6: |
168 | if (l2 == 128) | 170 | if (l2 == 128) |
169 | p = "Celeron (Mendocino)"; | 171 | p = "Celeron (Mendocino)"; |
170 | else if (c->x86_mask == 0 || c->x86_mask == 5) | 172 | else if (c->x86_mask == 0 || c->x86_mask == 5) |
171 | p = "Celeron-A"; | 173 | p = "Celeron-A"; |
172 | break; | 174 | break; |
173 | 175 | ||
174 | case 8: | 176 | case 8: |
175 | if (l2 == 128) | 177 | if (l2 == 128) |
176 | p = "Celeron (Coppermine)"; | 178 | p = "Celeron (Coppermine)"; |
@@ -178,9 +180,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
178 | } | 180 | } |
179 | } | 181 | } |
180 | 182 | ||
181 | if ( p ) | 183 | if (p) |
182 | strcpy(c->x86_model_id, p); | 184 | strcpy(c->x86_model_id, p); |
183 | 185 | ||
184 | c->x86_max_cores = num_cpu_cores(c); | 186 | c->x86_max_cores = num_cpu_cores(c); |
185 | 187 | ||
186 | detect_ht(c); | 188 | detect_ht(c); |
@@ -207,28 +209,29 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
207 | #endif | 209 | #endif |
208 | 210 | ||
209 | if (cpu_has_xmm2) | 211 | if (cpu_has_xmm2) |
210 | set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability); | 212 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
211 | if (c->x86 == 15) { | 213 | if (c->x86 == 15) { |
212 | set_bit(X86_FEATURE_P4, c->x86_capability); | 214 | set_cpu_cap(c, X86_FEATURE_P4); |
213 | } | 215 | } |
214 | if (c->x86 == 6) | 216 | if (c->x86 == 6) |
215 | set_bit(X86_FEATURE_P3, c->x86_capability); | 217 | set_cpu_cap(c, X86_FEATURE_P3); |
216 | if (cpu_has_ds) { | 218 | if (cpu_has_ds) { |
217 | unsigned int l1; | 219 | unsigned int l1; |
218 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | 220 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); |
219 | if (!(l1 & (1<<11))) | 221 | if (!(l1 & (1<<11))) |
220 | set_bit(X86_FEATURE_BTS, c->x86_capability); | 222 | set_cpu_cap(c, X86_FEATURE_BTS); |
221 | if (!(l1 & (1<<12))) | 223 | if (!(l1 & (1<<12))) |
222 | set_bit(X86_FEATURE_PEBS, c->x86_capability); | 224 | set_cpu_cap(c, X86_FEATURE_PEBS); |
223 | } | 225 | } |
224 | 226 | ||
225 | if (cpu_has_bts) | 227 | if (cpu_has_bts) |
226 | ds_init_intel(c); | 228 | ds_init_intel(c); |
227 | } | 229 | } |
228 | 230 | ||
229 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 231 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
230 | { | 232 | { |
231 | /* Intel PIII Tualatin. This comes in two flavours. | 233 | /* |
234 | * Intel PIII Tualatin. This comes in two flavours. | ||
232 | * One has 256kb of cache, the other 512. We have no way | 235 | * One has 256kb of cache, the other 512. We have no way |
233 | * to determine which, so we use a boottime override | 236 | * to determine which, so we use a boottime override |
234 | * for the 512kb model, and assume 256 otherwise. | 237 | * for the 512kb model, and assume 256 otherwise. |
@@ -240,42 +243,42 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned | |||
240 | 243 | ||
241 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 244 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
242 | .c_vendor = "Intel", | 245 | .c_vendor = "Intel", |
243 | .c_ident = { "GenuineIntel" }, | 246 | .c_ident = { "GenuineIntel" }, |
244 | .c_models = { | 247 | .c_models = { |
245 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | 248 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
246 | { | 249 | { |
247 | [0] = "486 DX-25/33", | 250 | [0] = "486 DX-25/33", |
248 | [1] = "486 DX-50", | 251 | [1] = "486 DX-50", |
249 | [2] = "486 SX", | 252 | [2] = "486 SX", |
250 | [3] = "486 DX/2", | 253 | [3] = "486 DX/2", |
251 | [4] = "486 SL", | 254 | [4] = "486 SL", |
252 | [5] = "486 SX/2", | 255 | [5] = "486 SX/2", |
253 | [7] = "486 DX/2-WB", | 256 | [7] = "486 DX/2-WB", |
254 | [8] = "486 DX/4", | 257 | [8] = "486 DX/4", |
255 | [9] = "486 DX/4-WB" | 258 | [9] = "486 DX/4-WB" |
256 | } | 259 | } |
257 | }, | 260 | }, |
258 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = | 261 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = |
259 | { | 262 | { |
260 | [0] = "Pentium 60/66 A-step", | 263 | [0] = "Pentium 60/66 A-step", |
261 | [1] = "Pentium 60/66", | 264 | [1] = "Pentium 60/66", |
262 | [2] = "Pentium 75 - 200", | 265 | [2] = "Pentium 75 - 200", |
263 | [3] = "OverDrive PODP5V83", | 266 | [3] = "OverDrive PODP5V83", |
264 | [4] = "Pentium MMX", | 267 | [4] = "Pentium MMX", |
265 | [7] = "Mobile Pentium 75 - 200", | 268 | [7] = "Mobile Pentium 75 - 200", |
266 | [8] = "Mobile Pentium MMX" | 269 | [8] = "Mobile Pentium MMX" |
267 | } | 270 | } |
268 | }, | 271 | }, |
269 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = | 272 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = |
270 | { | 273 | { |
271 | [0] = "Pentium Pro A-step", | 274 | [0] = "Pentium Pro A-step", |
272 | [1] = "Pentium Pro", | 275 | [1] = "Pentium Pro", |
273 | [3] = "Pentium II (Klamath)", | 276 | [3] = "Pentium II (Klamath)", |
274 | [4] = "Pentium II (Deschutes)", | 277 | [4] = "Pentium II (Deschutes)", |
275 | [5] = "Pentium II (Deschutes)", | 278 | [5] = "Pentium II (Deschutes)", |
276 | [6] = "Mobile Pentium II", | 279 | [6] = "Mobile Pentium II", |
277 | [7] = "Pentium III (Katmai)", | 280 | [7] = "Pentium III (Katmai)", |
278 | [8] = "Pentium III (Coppermine)", | 281 | [8] = "Pentium III (Coppermine)", |
279 | [10] = "Pentium III (Cascades)", | 282 | [10] = "Pentium III (Cascades)", |
280 | [11] = "Pentium III (Tualatin)", | 283 | [11] = "Pentium III (Tualatin)", |
281 | } | 284 | } |
@@ -290,15 +293,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
290 | } | 293 | } |
291 | }, | 294 | }, |
292 | }, | 295 | }, |
296 | .c_early_init = early_init_intel, | ||
293 | .c_init = init_intel, | 297 | .c_init = init_intel, |
294 | .c_size_cache = intel_size_cache, | 298 | .c_size_cache = intel_size_cache, |
295 | }; | 299 | }; |
296 | 300 | ||
297 | __init int intel_cpu_init(void) | 301 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); |
298 | { | ||
299 | cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; | ||
300 | return 0; | ||
301 | } | ||
302 | 302 | ||
303 | #ifndef CONFIG_X86_CMPXCHG | 303 | #ifndef CONFIG_X86_CMPXCHG |
304 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | 304 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) |
@@ -364,5 +364,5 @@ unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | |||
364 | EXPORT_SYMBOL(cmpxchg_486_u64); | 364 | EXPORT_SYMBOL(cmpxchg_486_u64); |
365 | #endif | 365 | #endif |
366 | 366 | ||
367 | // arch_initcall(intel_cpu_init); | 367 | /* arch_initcall(intel_cpu_init); */ |
368 | 368 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index a5182dcd94ae..774d87cfd8cd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c | |||
@@ -10,20 +10,20 @@ | |||
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | #include <linux/thread_info.h> | 11 | #include <linux/thread_info.h> |
12 | 12 | ||
13 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
14 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/mce.h> | 15 | #include <asm/mce.h> |
16 | 16 | ||
17 | #include "mce.h" | 17 | #include "mce.h" |
18 | 18 | ||
19 | int mce_disabled = 0; | 19 | int mce_disabled; |
20 | int nr_mce_banks; | 20 | int nr_mce_banks; |
21 | 21 | ||
22 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ | 22 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ |
23 | 23 | ||
24 | /* Handle unconfigured int18 (should never happen) */ | 24 | /* Handle unconfigured int18 (should never happen) */ |
25 | static void unexpected_machine_check(struct pt_regs * regs, long error_code) | 25 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) |
26 | { | 26 | { |
27 | printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); | 27 | printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); |
28 | } | 28 | } |
29 | 29 | ||
@@ -33,30 +33,30 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_mac | |||
33 | /* This has to be run for each processor */ | 33 | /* This has to be run for each processor */ |
34 | void mcheck_init(struct cpuinfo_x86 *c) | 34 | void mcheck_init(struct cpuinfo_x86 *c) |
35 | { | 35 | { |
36 | if (mce_disabled==1) | 36 | if (mce_disabled == 1) |
37 | return; | 37 | return; |
38 | 38 | ||
39 | switch (c->x86_vendor) { | 39 | switch (c->x86_vendor) { |
40 | case X86_VENDOR_AMD: | 40 | case X86_VENDOR_AMD: |
41 | amd_mcheck_init(c); | 41 | amd_mcheck_init(c); |
42 | break; | 42 | break; |
43 | 43 | ||
44 | case X86_VENDOR_INTEL: | 44 | case X86_VENDOR_INTEL: |
45 | if (c->x86==5) | 45 | if (c->x86 == 5) |
46 | intel_p5_mcheck_init(c); | 46 | intel_p5_mcheck_init(c); |
47 | if (c->x86==6) | 47 | if (c->x86 == 6) |
48 | intel_p6_mcheck_init(c); | 48 | intel_p6_mcheck_init(c); |
49 | if (c->x86==15) | 49 | if (c->x86 == 15) |
50 | intel_p4_mcheck_init(c); | 50 | intel_p4_mcheck_init(c); |
51 | break; | 51 | break; |
52 | 52 | ||
53 | case X86_VENDOR_CENTAUR: | 53 | case X86_VENDOR_CENTAUR: |
54 | if (c->x86==5) | 54 | if (c->x86 == 5) |
55 | winchip_mcheck_init(c); | 55 | winchip_mcheck_init(c); |
56 | break; | 56 | break; |
57 | 57 | ||
58 | default: | 58 | default: |
59 | break; | 59 | break; |
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index bf39409b3838..00ccb6c14ec2 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | 18 | ||
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/msr.h> | 21 | #include <asm/msr.h> |
22 | 22 | ||
@@ -26,23 +26,26 @@ static int firstbank; | |||
26 | 26 | ||
27 | #define MCE_RATE 15*HZ /* timer rate is 15s */ | 27 | #define MCE_RATE 15*HZ /* timer rate is 15s */ |
28 | 28 | ||
29 | static void mce_checkregs (void *info) | 29 | static void mce_checkregs(void *info) |
30 | { | 30 | { |
31 | u32 low, high; | 31 | u32 low, high; |
32 | int i; | 32 | int i; |
33 | 33 | ||
34 | for (i=firstbank; i<nr_mce_banks; i++) { | 34 | for (i = firstbank; i < nr_mce_banks; i++) { |
35 | rdmsr (MSR_IA32_MC0_STATUS+i*4, low, high); | 35 | rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); |
36 | 36 | ||
37 | if (high & (1<<31)) { | 37 | if (high & (1<<31)) { |
38 | printk(KERN_INFO "MCE: The hardware reports a non " | 38 | printk(KERN_INFO "MCE: The hardware reports a non " |
39 | "fatal, correctable incident occurred on " | 39 | "fatal, correctable incident occurred on " |
40 | "CPU %d.\n", | 40 | "CPU %d.\n", |
41 | smp_processor_id()); | 41 | smp_processor_id()); |
42 | printk (KERN_INFO "Bank %d: %08x%08x\n", i, high, low); | 42 | printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low); |
43 | 43 | ||
44 | /* Scrub the error so we don't pick it up in MCE_RATE seconds time. */ | 44 | /* |
45 | wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); | 45 | * Scrub the error so we don't pick it up in MCE_RATE |
46 | * seconds time. | ||
47 | */ | ||
48 | wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); | ||
46 | 49 | ||
47 | /* Serialize */ | 50 | /* Serialize */ |
48 | wmb(); | 51 | wmb(); |
@@ -55,10 +58,10 @@ static void mce_work_fn(struct work_struct *work); | |||
55 | static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); | 58 | static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); |
56 | 59 | ||
57 | static void mce_work_fn(struct work_struct *work) | 60 | static void mce_work_fn(struct work_struct *work) |
58 | { | 61 | { |
59 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 62 | on_each_cpu(mce_checkregs, NULL, 1, 1); |
60 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); | 63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); |
61 | } | 64 | } |
62 | 65 | ||
63 | static int __init init_nonfatal_mce_checker(void) | 66 | static int __init init_nonfatal_mce_checker(void) |
64 | { | 67 | { |
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index a18310aaae0c..bfa5817afdda 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c | |||
@@ -9,20 +9,20 @@ | |||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | 11 | ||
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
15 | 15 | ||
16 | #include "mce.h" | 16 | #include "mce.h" |
17 | 17 | ||
18 | /* Machine check handler for Pentium class Intel */ | 18 | /* Machine check handler for Pentium class Intel */ |
19 | static void pentium_machine_check(struct pt_regs * regs, long error_code) | 19 | static void pentium_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 20 | { |
21 | u32 loaddr, hi, lotype; | 21 | u32 loaddr, hi, lotype; |
22 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); | 22 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); |
23 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); | 23 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); |
24 | printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); | 24 | printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); |
25 | if(lotype&(1<<5)) | 25 | if (lotype&(1<<5)) |
26 | printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); | 26 | printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); |
27 | add_taint(TAINT_MACHINE_CHECK); | 27 | add_taint(TAINT_MACHINE_CHECK); |
28 | } | 28 | } |
@@ -31,13 +31,13 @@ static void pentium_machine_check(struct pt_regs * regs, long error_code) | |||
31 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c) | 31 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c) |
32 | { | 32 | { |
33 | u32 l, h; | 33 | u32 l, h; |
34 | 34 | ||
35 | /*Check for MCE support */ | 35 | /*Check for MCE support */ |
36 | if( !cpu_has(c, X86_FEATURE_MCE) ) | 36 | if (!cpu_has(c, X86_FEATURE_MCE)) |
37 | return; | 37 | return; |
38 | 38 | ||
39 | /* Default P5 to off as its often misconnected */ | 39 | /* Default P5 to off as its often misconnected */ |
40 | if(mce_disabled != -1) | 40 | if (mce_disabled != -1) |
41 | return; | 41 | return; |
42 | machine_check_vector = pentium_machine_check; | 42 | machine_check_vector = pentium_machine_check; |
43 | wmb(); | 43 | wmb(); |
@@ -47,7 +47,7 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) | |||
47 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); | 47 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); |
48 | printk(KERN_INFO "Intel old style machine check architecture supported.\n"); | 48 | printk(KERN_INFO "Intel old style machine check architecture supported.\n"); |
49 | 49 | ||
50 | /* Enable MCE */ | 50 | /* Enable MCE */ |
51 | set_in_cr4(X86_CR4_MCE); | 51 | set_in_cr4(X86_CR4_MCE); |
52 | printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); | 52 | printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); |
53 | } | 53 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c index 74342604d30e..62efc9c2b3af 100644 --- a/arch/x86/kernel/cpu/mcheck/p6.c +++ b/arch/x86/kernel/cpu/mcheck/p6.c | |||
@@ -9,23 +9,23 @@ | |||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | 11 | ||
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
15 | 15 | ||
16 | #include "mce.h" | 16 | #include "mce.h" |
17 | 17 | ||
18 | /* Machine Check Handler For PII/PIII */ | 18 | /* Machine Check Handler For PII/PIII */ |
19 | static void intel_machine_check(struct pt_regs * regs, long error_code) | 19 | static void intel_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 20 | { |
21 | int recover=1; | 21 | int recover = 1; |
22 | u32 alow, ahigh, high, low; | 22 | u32 alow, ahigh, high, low; |
23 | u32 mcgstl, mcgsth; | 23 | u32 mcgstl, mcgsth; |
24 | int i; | 24 | int i; |
25 | 25 | ||
26 | rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 26 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
27 | if (mcgstl & (1<<0)) /* Recoverable ? */ | 27 | if (mcgstl & (1<<0)) /* Recoverable ? */ |
28 | recover=0; | 28 | recover = 0; |
29 | 29 | ||
30 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", | 30 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", |
31 | smp_processor_id(), mcgsth, mcgstl); | 31 | smp_processor_id(), mcgsth, mcgstl); |
@@ -55,30 +55,30 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) | |||
55 | } | 55 | } |
56 | 56 | ||
57 | if (recover & 2) | 57 | if (recover & 2) |
58 | panic ("CPU context corrupt"); | 58 | panic("CPU context corrupt"); |
59 | if (recover & 1) | 59 | if (recover & 1) |
60 | panic ("Unable to continue"); | 60 | panic("Unable to continue"); |
61 | 61 | ||
62 | printk (KERN_EMERG "Attempting to continue.\n"); | 62 | printk(KERN_EMERG "Attempting to continue.\n"); |
63 | /* | 63 | /* |
64 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not | 64 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not |
65 | * recoverable/continuable.This will allow BIOS to look at the MSRs | 65 | * recoverable/continuable.This will allow BIOS to look at the MSRs |
66 | * for errors if the OS could not log the error. | 66 | * for errors if the OS could not log the error. |
67 | */ | 67 | */ |
68 | for (i=0; i<nr_mce_banks; i++) { | 68 | for (i = 0; i < nr_mce_banks; i++) { |
69 | unsigned int msr; | 69 | unsigned int msr; |
70 | msr = MSR_IA32_MC0_STATUS+i*4; | 70 | msr = MSR_IA32_MC0_STATUS+i*4; |
71 | rdmsr (msr,low, high); | 71 | rdmsr(msr, low, high); |
72 | if (high & (1<<31)) { | 72 | if (high & (1<<31)) { |
73 | /* Clear it */ | 73 | /* Clear it */ |
74 | wrmsr (msr, 0UL, 0UL); | 74 | wrmsr(msr, 0UL, 0UL); |
75 | /* Serialize */ | 75 | /* Serialize */ |
76 | wmb(); | 76 | wmb(); |
77 | add_taint(TAINT_MACHINE_CHECK); | 77 | add_taint(TAINT_MACHINE_CHECK); |
78 | } | 78 | } |
79 | } | 79 | } |
80 | mcgstl &= ~(1<<2); | 80 | mcgstl &= ~(1<<2); |
81 | wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth); | 81 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
82 | } | 82 | } |
83 | 83 | ||
84 | /* Set up machine check reporting for processors with Intel style MCE */ | 84 | /* Set up machine check reporting for processors with Intel style MCE */ |
@@ -86,21 +86,21 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c) | |||
86 | { | 86 | { |
87 | u32 l, h; | 87 | u32 l, h; |
88 | int i; | 88 | int i; |
89 | 89 | ||
90 | /* Check for MCE support */ | 90 | /* Check for MCE support */ |
91 | if (!cpu_has(c, X86_FEATURE_MCE)) | 91 | if (!cpu_has(c, X86_FEATURE_MCE)) |
92 | return; | 92 | return; |
93 | 93 | ||
94 | /* Check for PPro style MCA */ | 94 | /* Check for PPro style MCA */ |
95 | if (!cpu_has(c, X86_FEATURE_MCA)) | 95 | if (!cpu_has(c, X86_FEATURE_MCA)) |
96 | return; | 96 | return; |
97 | 97 | ||
98 | /* Ok machine check is available */ | 98 | /* Ok machine check is available */ |
99 | machine_check_vector = intel_machine_check; | 99 | machine_check_vector = intel_machine_check; |
100 | wmb(); | 100 | wmb(); |
101 | 101 | ||
102 | printk (KERN_INFO "Intel machine check architecture supported.\n"); | 102 | printk(KERN_INFO "Intel machine check architecture supported.\n"); |
103 | rdmsr (MSR_IA32_MCG_CAP, l, h); | 103 | rdmsr(MSR_IA32_MCG_CAP, l, h); |
104 | if (l & (1<<8)) /* Control register present ? */ | 104 | if (l & (1<<8)) /* Control register present ? */ |
105 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 105 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
106 | nr_mce_banks = l & 0xff; | 106 | nr_mce_banks = l & 0xff; |
@@ -110,13 +110,13 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c) | |||
110 | * - MC0_CTL should not be written | 110 | * - MC0_CTL should not be written |
111 | * - Status registers on all banks should be cleared on reset | 111 | * - Status registers on all banks should be cleared on reset |
112 | */ | 112 | */ |
113 | for (i=1; i<nr_mce_banks; i++) | 113 | for (i = 1; i < nr_mce_banks; i++) |
114 | wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); | 114 | wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); |
115 | 115 | ||
116 | for (i=0; i<nr_mce_banks; i++) | 116 | for (i = 0; i < nr_mce_banks; i++) |
117 | wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); | 117 | wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); |
118 | 118 | ||
119 | set_in_cr4 (X86_CR4_MCE); | 119 | set_in_cr4(X86_CR4_MCE); |
120 | printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", | 120 | printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", |
121 | smp_processor_id()); | 121 | smp_processor_id()); |
122 | } | 122 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 3d428d5afc52..f2be3e190c6b 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c | |||
@@ -8,14 +8,14 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | 10 | ||
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
14 | 14 | ||
15 | #include "mce.h" | 15 | #include "mce.h" |
16 | 16 | ||
17 | /* Machine check handler for WinChip C6 */ | 17 | /* Machine check handler for WinChip C6 */ |
18 | static void winchip_machine_check(struct pt_regs * regs, long error_code) | 18 | static void winchip_machine_check(struct pt_regs *regs, long error_code) |
19 | { | 19 | { |
20 | printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); | 20 | printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); |
21 | add_taint(TAINT_MACHINE_CHECK); | 21 | add_taint(TAINT_MACHINE_CHECK); |
@@ -28,8 +28,8 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) | |||
28 | machine_check_vector = winchip_machine_check; | 28 | machine_check_vector = winchip_machine_check; |
29 | wmb(); | 29 | wmb(); |
30 | rdmsr(MSR_IDT_FCR1, lo, hi); | 30 | rdmsr(MSR_IDT_FCR1, lo, hi); |
31 | lo|= (1<<2); /* Enable EIERRINT (int 18 MCE) */ | 31 | lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */ |
32 | lo&= ~(1<<4); /* Enable MCE */ | 32 | lo &= ~(1<<4); /* Enable MCE */ |
33 | wrmsr(MSR_IDT_FCR1, lo, hi); | 33 | wrmsr(MSR_IDT_FCR1, lo, hi); |
34 | set_in_cr4(X86_CR4_MCE); | 34 | set_in_cr4(X86_CR4_MCE); |
35 | printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n"); | 35 | printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n"); |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 3e18db4cefee..353efe4f5017 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <asm/cpufeature.h> | 11 | #include <asm/cpufeature.h> |
12 | #include <asm/processor-flags.h> | 12 | #include <asm/processor-flags.h> |
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | #include <asm/pat.h> | ||
14 | #include "mtrr.h" | 15 | #include "mtrr.h" |
15 | 16 | ||
16 | struct mtrr_state { | 17 | struct mtrr_state { |
@@ -35,6 +36,8 @@ static struct fixed_range_block fixed_range_blocks[] = { | |||
35 | 36 | ||
36 | static unsigned long smp_changes_mask; | 37 | static unsigned long smp_changes_mask; |
37 | static struct mtrr_state mtrr_state = {}; | 38 | static struct mtrr_state mtrr_state = {}; |
39 | static int mtrr_state_set; | ||
40 | static u64 tom2; | ||
38 | 41 | ||
39 | #undef MODULE_PARAM_PREFIX | 42 | #undef MODULE_PARAM_PREFIX |
40 | #define MODULE_PARAM_PREFIX "mtrr." | 43 | #define MODULE_PARAM_PREFIX "mtrr." |
@@ -42,6 +45,111 @@ static struct mtrr_state mtrr_state = {}; | |||
42 | static int mtrr_show; | 45 | static int mtrr_show; |
43 | module_param_named(show, mtrr_show, bool, 0); | 46 | module_param_named(show, mtrr_show, bool, 0); |
44 | 47 | ||
48 | /* | ||
49 | * Returns the effective MTRR type for the region | ||
50 | * Error returns: | ||
51 | * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR | ||
52 | * - 0xFF - when MTRR is not enabled | ||
53 | */ | ||
54 | u8 mtrr_type_lookup(u64 start, u64 end) | ||
55 | { | ||
56 | int i; | ||
57 | u64 base, mask; | ||
58 | u8 prev_match, curr_match; | ||
59 | |||
60 | if (!mtrr_state_set) | ||
61 | return 0xFF; | ||
62 | |||
63 | if (!mtrr_state.enabled) | ||
64 | return 0xFF; | ||
65 | |||
66 | /* Make end inclusive end, instead of exclusive */ | ||
67 | end--; | ||
68 | |||
69 | /* Look in fixed ranges. Just return the type as per start */ | ||
70 | if (mtrr_state.have_fixed && (start < 0x100000)) { | ||
71 | int idx; | ||
72 | |||
73 | if (start < 0x80000) { | ||
74 | idx = 0; | ||
75 | idx += (start >> 16); | ||
76 | return mtrr_state.fixed_ranges[idx]; | ||
77 | } else if (start < 0xC0000) { | ||
78 | idx = 1 * 8; | ||
79 | idx += ((start - 0x80000) >> 14); | ||
80 | return mtrr_state.fixed_ranges[idx]; | ||
81 | } else if (start < 0x1000000) { | ||
82 | idx = 3 * 8; | ||
83 | idx += ((start - 0xC0000) >> 12); | ||
84 | return mtrr_state.fixed_ranges[idx]; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Look in variable ranges | ||
90 | * Look of multiple ranges matching this address and pick type | ||
91 | * as per MTRR precedence | ||
92 | */ | ||
93 | if (!mtrr_state.enabled & 2) { | ||
94 | return mtrr_state.def_type; | ||
95 | } | ||
96 | |||
97 | prev_match = 0xFF; | ||
98 | for (i = 0; i < num_var_ranges; ++i) { | ||
99 | unsigned short start_state, end_state; | ||
100 | |||
101 | if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) | ||
102 | continue; | ||
103 | |||
104 | base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + | ||
105 | (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); | ||
106 | mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + | ||
107 | (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); | ||
108 | |||
109 | start_state = ((start & mask) == (base & mask)); | ||
110 | end_state = ((end & mask) == (base & mask)); | ||
111 | if (start_state != end_state) | ||
112 | return 0xFE; | ||
113 | |||
114 | if ((start & mask) != (base & mask)) { | ||
115 | continue; | ||
116 | } | ||
117 | |||
118 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; | ||
119 | if (prev_match == 0xFF) { | ||
120 | prev_match = curr_match; | ||
121 | continue; | ||
122 | } | ||
123 | |||
124 | if (prev_match == MTRR_TYPE_UNCACHABLE || | ||
125 | curr_match == MTRR_TYPE_UNCACHABLE) { | ||
126 | return MTRR_TYPE_UNCACHABLE; | ||
127 | } | ||
128 | |||
129 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
130 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
131 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
132 | curr_match == MTRR_TYPE_WRBACK)) { | ||
133 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
134 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
135 | } | ||
136 | |||
137 | if (prev_match != curr_match) { | ||
138 | return MTRR_TYPE_UNCACHABLE; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | if (tom2) { | ||
143 | if (start >= (1ULL<<32) && (end < tom2)) | ||
144 | return MTRR_TYPE_WRBACK; | ||
145 | } | ||
146 | |||
147 | if (prev_match != 0xFF) | ||
148 | return prev_match; | ||
149 | |||
150 | return mtrr_state.def_type; | ||
151 | } | ||
152 | |||
45 | /* Get the MSR pair relating to a var range */ | 153 | /* Get the MSR pair relating to a var range */ |
46 | static void | 154 | static void |
47 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 155 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
@@ -79,12 +187,16 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) | |||
79 | base, base + step - 1, mtrr_attrib_to_str(*types)); | 187 | base, base + step - 1, mtrr_attrib_to_str(*types)); |
80 | } | 188 | } |
81 | 189 | ||
190 | static void prepare_set(void); | ||
191 | static void post_set(void); | ||
192 | |||
82 | /* Grab all of the MTRR state for this CPU into *state */ | 193 | /* Grab all of the MTRR state for this CPU into *state */ |
83 | void __init get_mtrr_state(void) | 194 | void __init get_mtrr_state(void) |
84 | { | 195 | { |
85 | unsigned int i; | 196 | unsigned int i; |
86 | struct mtrr_var_range *vrs; | 197 | struct mtrr_var_range *vrs; |
87 | unsigned lo, dummy; | 198 | unsigned lo, dummy; |
199 | unsigned long flags; | ||
88 | 200 | ||
89 | vrs = mtrr_state.var_ranges; | 201 | vrs = mtrr_state.var_ranges; |
90 | 202 | ||
@@ -100,6 +212,15 @@ void __init get_mtrr_state(void) | |||
100 | mtrr_state.def_type = (lo & 0xff); | 212 | mtrr_state.def_type = (lo & 0xff); |
101 | mtrr_state.enabled = (lo & 0xc00) >> 10; | 213 | mtrr_state.enabled = (lo & 0xc00) >> 10; |
102 | 214 | ||
215 | if (amd_special_default_mtrr()) { | ||
216 | unsigned lo, hi; | ||
217 | /* TOP_MEM2 */ | ||
218 | rdmsr(MSR_K8_TOP_MEM2, lo, hi); | ||
219 | tom2 = hi; | ||
220 | tom2 <<= 32; | ||
221 | tom2 |= lo; | ||
222 | tom2 &= 0xffffff8000000ULL; | ||
223 | } | ||
103 | if (mtrr_show) { | 224 | if (mtrr_show) { |
104 | int high_width; | 225 | int high_width; |
105 | 226 | ||
@@ -130,7 +251,22 @@ void __init get_mtrr_state(void) | |||
130 | else | 251 | else |
131 | printk(KERN_INFO "MTRR %u disabled\n", i); | 252 | printk(KERN_INFO "MTRR %u disabled\n", i); |
132 | } | 253 | } |
254 | if (tom2) { | ||
255 | printk(KERN_INFO "TOM2: %016llx aka %lldM\n", | ||
256 | tom2, tom2>>20); | ||
257 | } | ||
133 | } | 258 | } |
259 | mtrr_state_set = 1; | ||
260 | |||
261 | /* PAT setup for BP. We need to go through sync steps here */ | ||
262 | local_irq_save(flags); | ||
263 | prepare_set(); | ||
264 | |||
265 | pat_init(); | ||
266 | |||
267 | post_set(); | ||
268 | local_irq_restore(flags); | ||
269 | |||
134 | } | 270 | } |
135 | 271 | ||
136 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 272 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ |
@@ -397,6 +533,9 @@ static void generic_set_all(void) | |||
397 | /* Actually set the state */ | 533 | /* Actually set the state */ |
398 | mask = set_mtrr_state(); | 534 | mask = set_mtrr_state(); |
399 | 535 | ||
536 | /* also set PAT */ | ||
537 | pat_init(); | ||
538 | |||
400 | post_set(); | 539 | post_set(); |
401 | local_irq_restore(flags); | 540 | local_irq_restore(flags); |
402 | 541 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 91e150acb46c..1960f1985e5e 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -424,11 +424,10 @@ static int __init mtrr_if_init(void) | |||
424 | return -ENODEV; | 424 | return -ENODEV; |
425 | 425 | ||
426 | proc_root_mtrr = | 426 | proc_root_mtrr = |
427 | create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root); | 427 | proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops); |
428 | if (proc_root_mtrr) { | 428 | |
429 | if (proc_root_mtrr) | ||
429 | proc_root_mtrr->owner = THIS_MODULE; | 430 | proc_root_mtrr->owner = THIS_MODULE; |
430 | proc_root_mtrr->proc_fops = &mtrr_fops; | ||
431 | } | ||
432 | return 0; | 431 | return 0; |
433 | } | 432 | } |
434 | 433 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index a6450b3ae759..6a1e278d9323 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -627,7 +627,7 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | |||
627 | #define Tom2Enabled (1U << 21) | 627 | #define Tom2Enabled (1U << 21) |
628 | #define Tom2ForceMemTypeWB (1U << 22) | 628 | #define Tom2ForceMemTypeWB (1U << 22) |
629 | 629 | ||
630 | static __init int amd_special_default_mtrr(void) | 630 | int __init amd_special_default_mtrr(void) |
631 | { | 631 | { |
632 | u32 l, h; | 632 | u32 l, h; |
633 | 633 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c index 9f8ba923d1c9..7f7e2753685b 100644 --- a/arch/x86/kernel/cpu/mtrr/state.c +++ b/arch/x86/kernel/cpu/mtrr/state.c | |||
@@ -19,13 +19,15 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
19 | if (use_intel() || is_cpu(CYRIX)) { | 19 | if (use_intel() || is_cpu(CYRIX)) { |
20 | 20 | ||
21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
22 | if ( cpu_has_pge ) { | 22 | if (cpu_has_pge) { |
23 | ctxt->cr4val = read_cr4(); | 23 | ctxt->cr4val = read_cr4(); |
24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); | 24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); |
25 | } | 25 | } |
26 | 26 | ||
27 | /* Disable and flush caches. Note that wbinvd flushes the TLBs as | 27 | /* |
28 | a side-effect */ | 28 | * Disable and flush caches. Note that wbinvd flushes the TLBs |
29 | * as a side-effect | ||
30 | */ | ||
29 | cr0 = read_cr0() | X86_CR0_CD; | 31 | cr0 = read_cr0() | X86_CR0_CD; |
30 | wbinvd(); | 32 | wbinvd(); |
31 | write_cr0(cr0); | 33 | write_cr0(cr0); |
@@ -42,7 +44,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
42 | 44 | ||
43 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) | 45 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) |
44 | { | 46 | { |
45 | if (use_intel()) | 47 | if (use_intel()) |
46 | /* Disable MTRRs, and set the default type to uncached */ | 48 | /* Disable MTRRs, and set the default type to uncached */ |
47 | mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, | 49 | mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, |
48 | ctxt->deftype_hi); | 50 | ctxt->deftype_hi); |
@@ -66,12 +68,12 @@ void set_mtrr_done(struct set_mtrr_context *ctxt) | |||
66 | else | 68 | else |
67 | /* Cyrix ARRs - everything else was excluded at the top */ | 69 | /* Cyrix ARRs - everything else was excluded at the top */ |
68 | setCx86(CX86_CCR3, ctxt->ccr3); | 70 | setCx86(CX86_CCR3, ctxt->ccr3); |
69 | 71 | ||
70 | /* Enable caches */ | 72 | /* Enable caches */ |
71 | write_cr0(read_cr0() & 0xbfffffff); | 73 | write_cr0(read_cr0() & 0xbfffffff); |
72 | 74 | ||
73 | /* Restore value of CR4 */ | 75 | /* Restore value of CR4 */ |
74 | if ( cpu_has_pge ) | 76 | if (cpu_has_pge) |
75 | write_cr4(ctxt->cr4val); | 77 | write_cr4(ctxt->cr4val); |
76 | } | 78 | } |
77 | /* Re-enable interrupts locally (if enabled previously) */ | 79 | /* Re-enable interrupts locally (if enabled previously) */ |
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c index 961fbe1a748f..5d5e1c134123 100644 --- a/arch/x86/kernel/cpu/nexgen.c +++ b/arch/x86/kernel/cpu/nexgen.c | |||
@@ -9,11 +9,11 @@ | |||
9 | * Detect a NexGen CPU running without BIOS hypercode new enough | 9 | * Detect a NexGen CPU running without BIOS hypercode new enough |
10 | * to have CPUID. (Thanks to Herbert Oppmann) | 10 | * to have CPUID. (Thanks to Herbert Oppmann) |
11 | */ | 11 | */ |
12 | 12 | ||
13 | static int __cpuinit deep_magic_nexgen_probe(void) | 13 | static int __cpuinit deep_magic_nexgen_probe(void) |
14 | { | 14 | { |
15 | int ret; | 15 | int ret; |
16 | 16 | ||
17 | __asm__ __volatile__ ( | 17 | __asm__ __volatile__ ( |
18 | " movw $0x5555, %%ax\n" | 18 | " movw $0x5555, %%ax\n" |
19 | " xorw %%dx,%%dx\n" | 19 | " xorw %%dx,%%dx\n" |
@@ -22,22 +22,21 @@ static int __cpuinit deep_magic_nexgen_probe(void) | |||
22 | " movl $0, %%eax\n" | 22 | " movl $0, %%eax\n" |
23 | " jnz 1f\n" | 23 | " jnz 1f\n" |
24 | " movl $1, %%eax\n" | 24 | " movl $1, %%eax\n" |
25 | "1:\n" | 25 | "1:\n" |
26 | : "=a" (ret) : : "cx", "dx" ); | 26 | : "=a" (ret) : : "cx", "dx"); |
27 | return ret; | 27 | return ret; |
28 | } | 28 | } |
29 | 29 | ||
30 | static void __cpuinit init_nexgen(struct cpuinfo_x86 * c) | 30 | static void __cpuinit init_nexgen(struct cpuinfo_x86 *c) |
31 | { | 31 | { |
32 | c->x86_cache_size = 256; /* A few had 1 MB... */ | 32 | c->x86_cache_size = 256; /* A few had 1 MB... */ |
33 | } | 33 | } |
34 | 34 | ||
35 | static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c) | 35 | static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c) |
36 | { | 36 | { |
37 | /* Detect NexGen with old hypercode */ | 37 | /* Detect NexGen with old hypercode */ |
38 | if ( deep_magic_nexgen_probe() ) { | 38 | if (deep_magic_nexgen_probe()) |
39 | strcpy(c->x86_vendor_id, "NexGenDriven"); | 39 | strcpy(c->x86_vendor_id, "NexGenDriven"); |
40 | } | ||
41 | } | 40 | } |
42 | 41 | ||
43 | static struct cpu_dev nexgen_cpu_dev __cpuinitdata = { | 42 | static struct cpu_dev nexgen_cpu_dev __cpuinitdata = { |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index af11d31dce0a..0978a4a39418 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -8,78 +8,139 @@ | |||
8 | /* | 8 | /* |
9 | * Get CPU information for use by the procfs. | 9 | * Get CPU information for use by the procfs. |
10 | */ | 10 | */ |
11 | #ifdef CONFIG_X86_32 | ||
12 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
13 | unsigned int cpu) | ||
14 | { | ||
15 | #ifdef CONFIG_X86_HT | ||
16 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
17 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
18 | seq_printf(m, "siblings\t: %d\n", | ||
19 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
20 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
21 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
22 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
23 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
24 | } | ||
25 | #endif | ||
26 | } | ||
27 | |||
28 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | ||
29 | { | ||
30 | /* | ||
31 | * We use exception 16 if we have hardware math and we've either seen | ||
32 | * it or the CPU claims it is internal | ||
33 | */ | ||
34 | int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); | ||
35 | seq_printf(m, | ||
36 | "fdiv_bug\t: %s\n" | ||
37 | "hlt_bug\t\t: %s\n" | ||
38 | "f00f_bug\t: %s\n" | ||
39 | "coma_bug\t: %s\n" | ||
40 | "fpu\t\t: %s\n" | ||
41 | "fpu_exception\t: %s\n" | ||
42 | "cpuid level\t: %d\n" | ||
43 | "wp\t\t: %s\n", | ||
44 | c->fdiv_bug ? "yes" : "no", | ||
45 | c->hlt_works_ok ? "no" : "yes", | ||
46 | c->f00f_bug ? "yes" : "no", | ||
47 | c->coma_bug ? "yes" : "no", | ||
48 | c->hard_math ? "yes" : "no", | ||
49 | fpu_exception ? "yes" : "no", | ||
50 | c->cpuid_level, | ||
51 | c->wp_works_ok ? "yes" : "no"); | ||
52 | } | ||
53 | #else | ||
54 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
55 | unsigned int cpu) | ||
56 | { | ||
57 | #ifdef CONFIG_SMP | ||
58 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
59 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
60 | seq_printf(m, "siblings\t: %d\n", | ||
61 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
62 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
63 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
64 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
65 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
66 | } | ||
67 | #endif | ||
68 | } | ||
69 | |||
70 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | ||
71 | { | ||
72 | seq_printf(m, | ||
73 | "fpu\t\t: yes\n" | ||
74 | "fpu_exception\t: yes\n" | ||
75 | "cpuid level\t: %d\n" | ||
76 | "wp\t\t: yes\n", | ||
77 | c->cpuid_level); | ||
78 | } | ||
79 | #endif | ||
80 | |||
11 | static int show_cpuinfo(struct seq_file *m, void *v) | 81 | static int show_cpuinfo(struct seq_file *m, void *v) |
12 | { | 82 | { |
13 | struct cpuinfo_x86 *c = v; | 83 | struct cpuinfo_x86 *c = v; |
14 | int i, n = 0; | 84 | unsigned int cpu = 0; |
15 | int fpu_exception; | 85 | int i; |
16 | 86 | ||
17 | #ifdef CONFIG_SMP | 87 | #ifdef CONFIG_SMP |
18 | n = c->cpu_index; | 88 | cpu = c->cpu_index; |
19 | #endif | 89 | #endif |
20 | seq_printf(m, "processor\t: %d\n" | 90 | seq_printf(m, "processor\t: %u\n" |
21 | "vendor_id\t: %s\n" | 91 | "vendor_id\t: %s\n" |
22 | "cpu family\t: %d\n" | 92 | "cpu family\t: %d\n" |
23 | "model\t\t: %d\n" | 93 | "model\t\t: %u\n" |
24 | "model name\t: %s\n", | 94 | "model name\t: %s\n", |
25 | n, | 95 | cpu, |
26 | c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", | 96 | c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", |
27 | c->x86, | 97 | c->x86, |
28 | c->x86_model, | 98 | c->x86_model, |
29 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); | 99 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); |
30 | 100 | ||
31 | if (c->x86_mask || c->cpuid_level >= 0) | 101 | if (c->x86_mask || c->cpuid_level >= 0) |
32 | seq_printf(m, "stepping\t: %d\n", c->x86_mask); | 102 | seq_printf(m, "stepping\t: %d\n", c->x86_mask); |
33 | else | 103 | else |
34 | seq_printf(m, "stepping\t: unknown\n"); | 104 | seq_printf(m, "stepping\t: unknown\n"); |
35 | 105 | ||
36 | if ( cpu_has(c, X86_FEATURE_TSC) ) { | 106 | if (cpu_has(c, X86_FEATURE_TSC)) { |
37 | unsigned int freq = cpufreq_quick_get(n); | 107 | unsigned int freq = cpufreq_quick_get(cpu); |
108 | |||
38 | if (!freq) | 109 | if (!freq) |
39 | freq = cpu_khz; | 110 | freq = cpu_khz; |
40 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", | 111 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", |
41 | freq / 1000, (freq % 1000)); | 112 | freq / 1000, (freq % 1000)); |
42 | } | 113 | } |
43 | 114 | ||
44 | /* Cache size */ | 115 | /* Cache size */ |
45 | if (c->x86_cache_size >= 0) | 116 | if (c->x86_cache_size >= 0) |
46 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); | 117 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); |
47 | #ifdef CONFIG_X86_HT | 118 | |
48 | if (c->x86_max_cores * smp_num_siblings > 1) { | 119 | show_cpuinfo_core(m, c, cpu); |
49 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 120 | show_cpuinfo_misc(m, c); |
50 | seq_printf(m, "siblings\t: %d\n", | 121 | |
51 | cpus_weight(per_cpu(cpu_core_map, n))); | 122 | seq_printf(m, "flags\t\t:"); |
52 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 123 | for (i = 0; i < 32*NCAPINTS; i++) |
53 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 124 | if (cpu_has(c, i) && x86_cap_flags[i] != NULL) |
54 | } | ||
55 | #endif | ||
56 | |||
57 | /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */ | ||
58 | fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); | ||
59 | seq_printf(m, "fdiv_bug\t: %s\n" | ||
60 | "hlt_bug\t\t: %s\n" | ||
61 | "f00f_bug\t: %s\n" | ||
62 | "coma_bug\t: %s\n" | ||
63 | "fpu\t\t: %s\n" | ||
64 | "fpu_exception\t: %s\n" | ||
65 | "cpuid level\t: %d\n" | ||
66 | "wp\t\t: %s\n" | ||
67 | "flags\t\t:", | ||
68 | c->fdiv_bug ? "yes" : "no", | ||
69 | c->hlt_works_ok ? "no" : "yes", | ||
70 | c->f00f_bug ? "yes" : "no", | ||
71 | c->coma_bug ? "yes" : "no", | ||
72 | c->hard_math ? "yes" : "no", | ||
73 | fpu_exception ? "yes" : "no", | ||
74 | c->cpuid_level, | ||
75 | c->wp_works_ok ? "yes" : "no"); | ||
76 | |||
77 | for ( i = 0 ; i < 32*NCAPINTS ; i++ ) | ||
78 | if ( test_bit(i, c->x86_capability) && | ||
79 | x86_cap_flags[i] != NULL ) | ||
80 | seq_printf(m, " %s", x86_cap_flags[i]); | 125 | seq_printf(m, " %s", x86_cap_flags[i]); |
81 | 126 | ||
82 | for (i = 0; i < 32; i++) | 127 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", |
128 | c->loops_per_jiffy/(500000/HZ), | ||
129 | (c->loops_per_jiffy/(5000/HZ)) % 100); | ||
130 | |||
131 | #ifdef CONFIG_X86_64 | ||
132 | if (c->x86_tlbsize > 0) | ||
133 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); | ||
134 | #endif | ||
135 | seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); | ||
136 | #ifdef CONFIG_X86_64 | ||
137 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); | ||
138 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", | ||
139 | c->x86_phys_bits, c->x86_virt_bits); | ||
140 | #endif | ||
141 | |||
142 | seq_printf(m, "power management:"); | ||
143 | for (i = 0; i < 32; i++) { | ||
83 | if (c->x86_power & (1 << i)) { | 144 | if (c->x86_power & (1 << i)) { |
84 | if (i < ARRAY_SIZE(x86_power_flags) && | 145 | if (i < ARRAY_SIZE(x86_power_flags) && |
85 | x86_power_flags[i]) | 146 | x86_power_flags[i]) |
@@ -89,11 +150,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
89 | else | 150 | else |
90 | seq_printf(m, " [%d]", i); | 151 | seq_printf(m, " [%d]", i); |
91 | } | 152 | } |
153 | } | ||
92 | 154 | ||
93 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", | 155 | seq_printf(m, "\n\n"); |
94 | c->loops_per_jiffy/(500000/HZ), | ||
95 | (c->loops_per_jiffy/(5000/HZ)) % 100); | ||
96 | seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size); | ||
97 | 156 | ||
98 | return 0; | 157 | return 0; |
99 | } | 158 | } |
@@ -106,14 +165,17 @@ static void *c_start(struct seq_file *m, loff_t *pos) | |||
106 | return &cpu_data(*pos); | 165 | return &cpu_data(*pos); |
107 | return NULL; | 166 | return NULL; |
108 | } | 167 | } |
168 | |||
109 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 169 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
110 | { | 170 | { |
111 | *pos = next_cpu(*pos, cpu_online_map); | 171 | *pos = next_cpu(*pos, cpu_online_map); |
112 | return c_start(m, pos); | 172 | return c_start(m, pos); |
113 | } | 173 | } |
174 | |||
114 | static void c_stop(struct seq_file *m, void *v) | 175 | static void c_stop(struct seq_file *m, void *v) |
115 | { | 176 | { |
116 | } | 177 | } |
178 | |||
117 | const struct seq_operations cpuinfo_op = { | 179 | const struct seq_operations cpuinfo_op = { |
118 | .start = c_start, | 180 | .start = c_start, |
119 | .next = c_next, | 181 | .next = c_next, |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index e8b422c1c512..b911a2c61b8f 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -18,8 +18,8 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
18 | /* Print CMS and CPU revision */ | 18 | /* Print CMS and CPU revision */ |
19 | max = cpuid_eax(0x80860000); | 19 | max = cpuid_eax(0x80860000); |
20 | cpu_rev = 0; | 20 | cpu_rev = 0; |
21 | if ( max >= 0x80860001 ) { | 21 | if (max >= 0x80860001) { |
22 | cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); | 22 | cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); |
23 | if (cpu_rev != 0x02000000) { | 23 | if (cpu_rev != 0x02000000) { |
24 | printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", | 24 | printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", |
25 | (cpu_rev >> 24) & 0xff, | 25 | (cpu_rev >> 24) & 0xff, |
@@ -29,7 +29,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
29 | cpu_freq); | 29 | cpu_freq); |
30 | } | 30 | } |
31 | } | 31 | } |
32 | if ( max >= 0x80860002 ) { | 32 | if (max >= 0x80860002) { |
33 | cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); | 33 | cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); |
34 | if (cpu_rev == 0x02000000) { | 34 | if (cpu_rev == 0x02000000) { |
35 | printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", | 35 | printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", |
@@ -42,7 +42,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
42 | cms_rev1 & 0xff, | 42 | cms_rev1 & 0xff, |
43 | cms_rev2); | 43 | cms_rev2); |
44 | } | 44 | } |
45 | if ( max >= 0x80860006 ) { | 45 | if (max >= 0x80860006) { |
46 | cpuid(0x80860003, | 46 | cpuid(0x80860003, |
47 | (void *)&cpu_info[0], | 47 | (void *)&cpu_info[0], |
48 | (void *)&cpu_info[4], | 48 | (void *)&cpu_info[4], |
@@ -74,23 +74,25 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
74 | wrmsr(0x80860004, cap_mask, uk); | 74 | wrmsr(0x80860004, cap_mask, uk); |
75 | 75 | ||
76 | /* All Transmeta CPUs have a constant TSC */ | 76 | /* All Transmeta CPUs have a constant TSC */ |
77 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); | 77 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
78 | 78 | ||
79 | #ifdef CONFIG_SYSCTL | 79 | #ifdef CONFIG_SYSCTL |
80 | /* randomize_va_space slows us down enormously; | 80 | /* |
81 | it probably triggers retranslation of x86->native bytecode */ | 81 | * randomize_va_space slows us down enormously; |
82 | * it probably triggers retranslation of x86->native bytecode | ||
83 | */ | ||
82 | randomize_va_space = 0; | 84 | randomize_va_space = 0; |
83 | #endif | 85 | #endif |
84 | } | 86 | } |
85 | 87 | ||
86 | static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c) | 88 | static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c) |
87 | { | 89 | { |
88 | u32 xlvl; | 90 | u32 xlvl; |
89 | 91 | ||
90 | /* Transmeta-defined flags: level 0x80860001 */ | 92 | /* Transmeta-defined flags: level 0x80860001 */ |
91 | xlvl = cpuid_eax(0x80860000); | 93 | xlvl = cpuid_eax(0x80860000); |
92 | if ( (xlvl & 0xffff0000) == 0x80860000 ) { | 94 | if ((xlvl & 0xffff0000) == 0x80860000) { |
93 | if ( xlvl >= 0x80860001 ) | 95 | if (xlvl >= 0x80860001) |
94 | c->x86_capability[2] = cpuid_edx(0x80860001); | 96 | c->x86_capability[2] = cpuid_edx(0x80860001); |
95 | } | 97 | } |
96 | } | 98 | } |
@@ -102,8 +104,4 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { | |||
102 | .c_identify = transmeta_identify, | 104 | .c_identify = transmeta_identify, |
103 | }; | 105 | }; |
104 | 106 | ||
105 | int __init transmeta_init_cpu(void) | 107 | cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); |
106 | { | ||
107 | cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev; | ||
108 | return 0; | ||
109 | } | ||
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index a7a4e75bdcd7..b1fc90989d75 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -3,24 +3,23 @@ | |||
3 | #include <asm/processor.h> | 3 | #include <asm/processor.h> |
4 | #include "cpu.h" | 4 | #include "cpu.h" |
5 | 5 | ||
6 | /* UMC chips appear to be only either 386 or 486, so no special init takes place. | 6 | /* |
7 | * UMC chips appear to be only either 386 or 486, | ||
8 | * so no special init takes place. | ||
7 | */ | 9 | */ |
8 | 10 | ||
9 | static struct cpu_dev umc_cpu_dev __cpuinitdata = { | 11 | static struct cpu_dev umc_cpu_dev __cpuinitdata = { |
10 | .c_vendor = "UMC", | 12 | .c_vendor = "UMC", |
11 | .c_ident = { "UMC UMC UMC" }, | 13 | .c_ident = { "UMC UMC UMC" }, |
12 | .c_models = { | 14 | .c_models = { |
13 | { .vendor = X86_VENDOR_UMC, .family = 4, .model_names = | 15 | { .vendor = X86_VENDOR_UMC, .family = 4, .model_names = |
14 | { | 16 | { |
15 | [1] = "U5D", | 17 | [1] = "U5D", |
16 | [2] = "U5S", | 18 | [2] = "U5S", |
17 | } | 19 | } |
18 | }, | 20 | }, |
19 | }, | 21 | }, |
20 | }; | 22 | }; |
21 | 23 | ||
22 | int __init umc_init_cpu(void) | 24 | cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); |
23 | { | 25 | |
24 | cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev; | ||
25 | return 0; | ||
26 | } | ||
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 9a5fa0abfcc7..2251d0ae9570 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -26,11 +26,7 @@ | |||
26 | #include <linux/kdebug.h> | 26 | #include <linux/kdebug.h> |
27 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
28 | 28 | ||
29 | #ifdef CONFIG_X86_32 | ||
30 | #include <mach_ipi.h> | 29 | #include <mach_ipi.h> |
31 | #else | ||
32 | #include <asm/mach_apic.h> | ||
33 | #endif | ||
34 | 30 | ||
35 | /* This keeps a track of which one is crashing cpu. */ | 31 | /* This keeps a track of which one is crashing cpu. */ |
36 | static int crashing_cpu; | 32 | static int crashing_cpu; |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index dcd918c1580d..11c11b8ec48d 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -220,11 +220,11 @@ int ds_allocate(void **dsp, size_t bts_size_in_bytes) | |||
220 | 220 | ||
221 | int ds_free(void **dsp) | 221 | int ds_free(void **dsp) |
222 | { | 222 | { |
223 | if (*dsp) | 223 | if (*dsp) { |
224 | kfree((void *)get_bts_buffer_base(*dsp)); | 224 | kfree((void *)get_bts_buffer_base(*dsp)); |
225 | kfree(*dsp); | 225 | kfree(*dsp); |
226 | *dsp = NULL; | 226 | *dsp = NULL; |
227 | 227 | } | |
228 | return 0; | 228 | return 0; |
229 | } | 229 | } |
230 | 230 | ||
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c index 80444c5c9b14..0240cd778365 100644 --- a/arch/x86/kernel/e820_32.c +++ b/arch/x86/kernel/e820_32.c | |||
@@ -450,38 +450,25 @@ int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
450 | * thinkpad 560x, for example, does not cooperate with the memory | 450 | * thinkpad 560x, for example, does not cooperate with the memory |
451 | * detection code.) | 451 | * detection code.) |
452 | */ | 452 | */ |
453 | int __init copy_e820_map(struct e820entry * biosmap, int nr_map) | 453 | int __init copy_e820_map(struct e820entry *biosmap, int nr_map) |
454 | { | 454 | { |
455 | /* Only one memory region (or negative)? Ignore it */ | 455 | /* Only one memory region (or negative)? Ignore it */ |
456 | if (nr_map < 2) | 456 | if (nr_map < 2) |
457 | return -1; | 457 | return -1; |
458 | 458 | ||
459 | do { | 459 | do { |
460 | unsigned long long start = biosmap->addr; | 460 | u64 start = biosmap->addr; |
461 | unsigned long long size = biosmap->size; | 461 | u64 size = biosmap->size; |
462 | unsigned long long end = start + size; | 462 | u64 end = start + size; |
463 | unsigned long type = biosmap->type; | 463 | u32 type = biosmap->type; |
464 | 464 | ||
465 | /* Overflow in 64 bits? Ignore the memory map. */ | 465 | /* Overflow in 64 bits? Ignore the memory map. */ |
466 | if (start > end) | 466 | if (start > end) |
467 | return -1; | 467 | return -1; |
468 | 468 | ||
469 | /* | ||
470 | * Some BIOSes claim RAM in the 640k - 1M region. | ||
471 | * Not right. Fix it up. | ||
472 | */ | ||
473 | if (type == E820_RAM) { | ||
474 | if (start < 0x100000ULL && end > 0xA0000ULL) { | ||
475 | if (start < 0xA0000ULL) | ||
476 | add_memory_region(start, 0xA0000ULL-start, type); | ||
477 | if (end <= 0x100000ULL) | ||
478 | continue; | ||
479 | start = 0x100000ULL; | ||
480 | size = end - start; | ||
481 | } | ||
482 | } | ||
483 | add_memory_region(start, size, type); | 469 | add_memory_region(start, size, type); |
484 | } while (biosmap++,--nr_map); | 470 | } while (biosmap++, --nr_map); |
471 | |||
485 | return 0; | 472 | return 0; |
486 | } | 473 | } |
487 | 474 | ||
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 9be697126013..7f6c0c85c8f6 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
28 | #include <asm/sections.h> | 28 | #include <asm/sections.h> |
29 | #include <asm/kdebug.h> | 29 | #include <asm/kdebug.h> |
30 | #include <asm/trampoline.h> | ||
30 | 31 | ||
31 | struct e820map e820; | 32 | struct e820map e820; |
32 | 33 | ||
@@ -36,11 +37,11 @@ struct e820map e820; | |||
36 | unsigned long end_pfn; | 37 | unsigned long end_pfn; |
37 | 38 | ||
38 | /* | 39 | /* |
39 | * end_pfn only includes RAM, while end_pfn_map includes all e820 entries. | 40 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
40 | * The direct mapping extends to end_pfn_map, so that we can directly access | 41 | * The direct mapping extends to max_pfn_mapped, so that we can directly access |
41 | * apertures, ACPI and other tables without having to play with fixmaps. | 42 | * apertures, ACPI and other tables without having to play with fixmaps. |
42 | */ | 43 | */ |
43 | unsigned long end_pfn_map; | 44 | unsigned long max_pfn_mapped; |
44 | 45 | ||
45 | /* | 46 | /* |
46 | * Last pfn which the user wants to use. | 47 | * Last pfn which the user wants to use. |
@@ -58,8 +59,8 @@ struct early_res { | |||
58 | }; | 59 | }; |
59 | static struct early_res early_res[MAX_EARLY_RES] __initdata = { | 60 | static struct early_res early_res[MAX_EARLY_RES] __initdata = { |
60 | { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ | 61 | { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ |
61 | #ifdef CONFIG_SMP | 62 | #ifdef CONFIG_X86_TRAMPOLINE |
62 | { SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE, "SMP_TRAMPOLINE" }, | 63 | { TRAMPOLINE_BASE, TRAMPOLINE_BASE + 2 * PAGE_SIZE, "TRAMPOLINE" }, |
63 | #endif | 64 | #endif |
64 | {} | 65 | {} |
65 | }; | 66 | }; |
@@ -95,7 +96,8 @@ void __init early_res_to_bootmem(void) | |||
95 | } | 96 | } |
96 | 97 | ||
97 | /* Check for already reserved areas */ | 98 | /* Check for already reserved areas */ |
98 | static inline int bad_addr(unsigned long *addrp, unsigned long size) | 99 | static inline int |
100 | bad_addr(unsigned long *addrp, unsigned long size, unsigned long align) | ||
99 | { | 101 | { |
100 | int i; | 102 | int i; |
101 | unsigned long addr = *addrp, last; | 103 | unsigned long addr = *addrp, last; |
@@ -105,7 +107,7 @@ again: | |||
105 | for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { | 107 | for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { |
106 | struct early_res *r = &early_res[i]; | 108 | struct early_res *r = &early_res[i]; |
107 | if (last >= r->start && addr < r->end) { | 109 | if (last >= r->start && addr < r->end) { |
108 | *addrp = addr = r->end; | 110 | *addrp = addr = round_up(r->end, align); |
109 | changed = 1; | 111 | changed = 1; |
110 | goto again; | 112 | goto again; |
111 | } | 113 | } |
@@ -113,6 +115,40 @@ again: | |||
113 | return changed; | 115 | return changed; |
114 | } | 116 | } |
115 | 117 | ||
118 | /* Check for already reserved areas */ | ||
119 | static inline int | ||
120 | bad_addr_size(unsigned long *addrp, unsigned long *sizep, unsigned long align) | ||
121 | { | ||
122 | int i; | ||
123 | unsigned long addr = *addrp, last; | ||
124 | unsigned long size = *sizep; | ||
125 | int changed = 0; | ||
126 | again: | ||
127 | last = addr + size; | ||
128 | for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) { | ||
129 | struct early_res *r = &early_res[i]; | ||
130 | if (last > r->start && addr < r->start) { | ||
131 | size = r->start - addr; | ||
132 | changed = 1; | ||
133 | goto again; | ||
134 | } | ||
135 | if (last > r->end && addr < r->end) { | ||
136 | addr = round_up(r->end, align); | ||
137 | size = last - addr; | ||
138 | changed = 1; | ||
139 | goto again; | ||
140 | } | ||
141 | if (last <= r->end && addr >= r->start) { | ||
142 | (*sizep)++; | ||
143 | return 0; | ||
144 | } | ||
145 | } | ||
146 | if (changed) { | ||
147 | *addrp = addr; | ||
148 | *sizep = size; | ||
149 | } | ||
150 | return changed; | ||
151 | } | ||
116 | /* | 152 | /* |
117 | * This function checks if any part of the range <start,end> is mapped | 153 | * This function checks if any part of the range <start,end> is mapped |
118 | * with type. | 154 | * with type. |
@@ -174,26 +210,27 @@ int __init e820_all_mapped(unsigned long start, unsigned long end, | |||
174 | * Find a free area with specified alignment in a specific range. | 210 | * Find a free area with specified alignment in a specific range. |
175 | */ | 211 | */ |
176 | unsigned long __init find_e820_area(unsigned long start, unsigned long end, | 212 | unsigned long __init find_e820_area(unsigned long start, unsigned long end, |
177 | unsigned size, unsigned long align) | 213 | unsigned long size, unsigned long align) |
178 | { | 214 | { |
179 | int i; | 215 | int i; |
180 | unsigned long mask = ~(align - 1); | ||
181 | 216 | ||
182 | for (i = 0; i < e820.nr_map; i++) { | 217 | for (i = 0; i < e820.nr_map; i++) { |
183 | struct e820entry *ei = &e820.map[i]; | 218 | struct e820entry *ei = &e820.map[i]; |
184 | unsigned long addr = ei->addr, last; | 219 | unsigned long addr, last; |
220 | unsigned long ei_last; | ||
185 | 221 | ||
186 | if (ei->type != E820_RAM) | 222 | if (ei->type != E820_RAM) |
187 | continue; | 223 | continue; |
224 | addr = round_up(ei->addr, align); | ||
225 | ei_last = ei->addr + ei->size; | ||
188 | if (addr < start) | 226 | if (addr < start) |
189 | addr = start; | 227 | addr = round_up(start, align); |
190 | if (addr > ei->addr + ei->size) | 228 | if (addr >= ei_last) |
191 | continue; | 229 | continue; |
192 | while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) | 230 | while (bad_addr(&addr, size, align) && addr+size <= ei_last) |
193 | ; | 231 | ; |
194 | addr = (addr + align - 1) & mask; | ||
195 | last = addr + size; | 232 | last = addr + size; |
196 | if (last > ei->addr + ei->size) | 233 | if (last > ei_last) |
197 | continue; | 234 | continue; |
198 | if (last > end) | 235 | if (last > end) |
199 | continue; | 236 | continue; |
@@ -203,6 +240,40 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, | |||
203 | } | 240 | } |
204 | 241 | ||
205 | /* | 242 | /* |
243 | * Find next free range after *start | ||
244 | */ | ||
245 | unsigned long __init find_e820_area_size(unsigned long start, | ||
246 | unsigned long *sizep, | ||
247 | unsigned long align) | ||
248 | { | ||
249 | int i; | ||
250 | |||
251 | for (i = 0; i < e820.nr_map; i++) { | ||
252 | struct e820entry *ei = &e820.map[i]; | ||
253 | unsigned long addr, last; | ||
254 | unsigned long ei_last; | ||
255 | |||
256 | if (ei->type != E820_RAM) | ||
257 | continue; | ||
258 | addr = round_up(ei->addr, align); | ||
259 | ei_last = ei->addr + ei->size; | ||
260 | if (addr < start) | ||
261 | addr = round_up(start, align); | ||
262 | if (addr >= ei_last) | ||
263 | continue; | ||
264 | *sizep = ei_last - addr; | ||
265 | while (bad_addr_size(&addr, sizep, align) && | ||
266 | addr + *sizep <= ei_last) | ||
267 | ; | ||
268 | last = addr + *sizep; | ||
269 | if (last > ei_last) | ||
270 | continue; | ||
271 | return addr; | ||
272 | } | ||
273 | return -1UL; | ||
274 | |||
275 | } | ||
276 | /* | ||
206 | * Find the highest page frame number we have available | 277 | * Find the highest page frame number we have available |
207 | */ | 278 | */ |
208 | unsigned long __init e820_end_of_ram(void) | 279 | unsigned long __init e820_end_of_ram(void) |
@@ -211,29 +282,29 @@ unsigned long __init e820_end_of_ram(void) | |||
211 | 282 | ||
212 | end_pfn = find_max_pfn_with_active_regions(); | 283 | end_pfn = find_max_pfn_with_active_regions(); |
213 | 284 | ||
214 | if (end_pfn > end_pfn_map) | 285 | if (end_pfn > max_pfn_mapped) |
215 | end_pfn_map = end_pfn; | 286 | max_pfn_mapped = end_pfn; |
216 | if (end_pfn_map > MAXMEM>>PAGE_SHIFT) | 287 | if (max_pfn_mapped > MAXMEM>>PAGE_SHIFT) |
217 | end_pfn_map = MAXMEM>>PAGE_SHIFT; | 288 | max_pfn_mapped = MAXMEM>>PAGE_SHIFT; |
218 | if (end_pfn > end_user_pfn) | 289 | if (end_pfn > end_user_pfn) |
219 | end_pfn = end_user_pfn; | 290 | end_pfn = end_user_pfn; |
220 | if (end_pfn > end_pfn_map) | 291 | if (end_pfn > max_pfn_mapped) |
221 | end_pfn = end_pfn_map; | 292 | end_pfn = max_pfn_mapped; |
222 | 293 | ||
223 | printk(KERN_INFO "end_pfn_map = %lu\n", end_pfn_map); | 294 | printk(KERN_INFO "max_pfn_mapped = %lu\n", max_pfn_mapped); |
224 | return end_pfn; | 295 | return end_pfn; |
225 | } | 296 | } |
226 | 297 | ||
227 | /* | 298 | /* |
228 | * Mark e820 reserved areas as busy for the resource manager. | 299 | * Mark e820 reserved areas as busy for the resource manager. |
229 | */ | 300 | */ |
230 | void __init e820_reserve_resources(struct resource *code_resource, | 301 | void __init e820_reserve_resources(void) |
231 | struct resource *data_resource, struct resource *bss_resource) | ||
232 | { | 302 | { |
233 | int i; | 303 | int i; |
304 | struct resource *res; | ||
305 | |||
306 | res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); | ||
234 | for (i = 0; i < e820.nr_map; i++) { | 307 | for (i = 0; i < e820.nr_map; i++) { |
235 | struct resource *res; | ||
236 | res = alloc_bootmem_low(sizeof(struct resource)); | ||
237 | switch (e820.map[i].type) { | 308 | switch (e820.map[i].type) { |
238 | case E820_RAM: res->name = "System RAM"; break; | 309 | case E820_RAM: res->name = "System RAM"; break; |
239 | case E820_ACPI: res->name = "ACPI Tables"; break; | 310 | case E820_ACPI: res->name = "ACPI Tables"; break; |
@@ -243,21 +314,8 @@ void __init e820_reserve_resources(struct resource *code_resource, | |||
243 | res->start = e820.map[i].addr; | 314 | res->start = e820.map[i].addr; |
244 | res->end = res->start + e820.map[i].size - 1; | 315 | res->end = res->start + e820.map[i].size - 1; |
245 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 316 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
246 | request_resource(&iomem_resource, res); | 317 | insert_resource(&iomem_resource, res); |
247 | if (e820.map[i].type == E820_RAM) { | 318 | res++; |
248 | /* | ||
249 | * We don't know which RAM region contains kernel data, | ||
250 | * so we try it repeatedly and let the resource manager | ||
251 | * test it. | ||
252 | */ | ||
253 | request_resource(res, code_resource); | ||
254 | request_resource(res, data_resource); | ||
255 | request_resource(res, bss_resource); | ||
256 | #ifdef CONFIG_KEXEC | ||
257 | if (crashk_res.start != crashk_res.end) | ||
258 | request_resource(res, &crashk_res); | ||
259 | #endif | ||
260 | } | ||
261 | } | 319 | } |
262 | } | 320 | } |
263 | 321 | ||
@@ -309,9 +367,9 @@ static int __init e820_find_active_region(const struct e820entry *ei, | |||
309 | if (*ei_startpfn >= *ei_endpfn) | 367 | if (*ei_startpfn >= *ei_endpfn) |
310 | return 0; | 368 | return 0; |
311 | 369 | ||
312 | /* Check if end_pfn_map should be updated */ | 370 | /* Check if max_pfn_mapped should be updated */ |
313 | if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map) | 371 | if (ei->type != E820_RAM && *ei_endpfn > max_pfn_mapped) |
314 | end_pfn_map = *ei_endpfn; | 372 | max_pfn_mapped = *ei_endpfn; |
315 | 373 | ||
316 | /* Skip if map is outside the node */ | 374 | /* Skip if map is outside the node */ |
317 | if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || | 375 | if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || |
@@ -634,10 +692,10 @@ static int __init copy_e820_map(struct e820entry *biosmap, int nr_map) | |||
634 | return -1; | 692 | return -1; |
635 | 693 | ||
636 | do { | 694 | do { |
637 | unsigned long start = biosmap->addr; | 695 | u64 start = biosmap->addr; |
638 | unsigned long size = biosmap->size; | 696 | u64 size = biosmap->size; |
639 | unsigned long end = start + size; | 697 | u64 end = start + size; |
640 | unsigned long type = biosmap->type; | 698 | u32 type = biosmap->type; |
641 | 699 | ||
642 | /* Overflow in 64 bits? Ignore the memory map. */ | 700 | /* Overflow in 64 bits? Ignore the memory map. */ |
643 | if (start > end) | 701 | if (start > end) |
@@ -702,7 +760,7 @@ static int __init parse_memmap_opt(char *p) | |||
702 | saved_max_pfn = e820_end_of_ram(); | 760 | saved_max_pfn = e820_end_of_ram(); |
703 | remove_all_active_ranges(); | 761 | remove_all_active_ranges(); |
704 | #endif | 762 | #endif |
705 | end_pfn_map = 0; | 763 | max_pfn_mapped = 0; |
706 | e820.nr_map = 0; | 764 | e820.nr_map = 0; |
707 | userdef = 1; | 765 | userdef = 1; |
708 | return 0; | 766 | return 0; |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index cff84cd9987f..643fd861b724 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #define VGABASE (__ISA_IO_base + 0xb8000) | 13 | #define VGABASE (__ISA_IO_base + 0xb8000) |
14 | 14 | ||
15 | static int max_ypos = 25, max_xpos = 80; | 15 | static int max_ypos = 25, max_xpos = 80; |
16 | static int current_ypos = 25, current_xpos = 0; | 16 | static int current_ypos = 25, current_xpos; |
17 | 17 | ||
18 | static void early_vga_write(struct console *con, const char *str, unsigned n) | 18 | static void early_vga_write(struct console *con, const char *str, unsigned n) |
19 | { | 19 | { |
@@ -108,12 +108,12 @@ static __init void early_serial_init(char *s) | |||
108 | 108 | ||
109 | if (*s) { | 109 | if (*s) { |
110 | unsigned port; | 110 | unsigned port; |
111 | if (!strncmp(s,"0x",2)) { | 111 | if (!strncmp(s, "0x", 2)) { |
112 | early_serial_base = simple_strtoul(s, &e, 16); | 112 | early_serial_base = simple_strtoul(s, &e, 16); |
113 | } else { | 113 | } else { |
114 | static int bases[] = { 0x3f8, 0x2f8 }; | 114 | static int bases[] = { 0x3f8, 0x2f8 }; |
115 | 115 | ||
116 | if (!strncmp(s,"ttyS",4)) | 116 | if (!strncmp(s, "ttyS", 4)) |
117 | s += 4; | 117 | s += 4; |
118 | port = simple_strtoul(s, &e, 10); | 118 | port = simple_strtoul(s, &e, 10); |
119 | if (port > 1 || s == e) | 119 | if (port > 1 || s == e) |
@@ -194,7 +194,7 @@ static struct console simnow_console = { | |||
194 | 194 | ||
195 | /* Direct interface for emergencies */ | 195 | /* Direct interface for emergencies */ |
196 | static struct console *early_console = &early_vga_console; | 196 | static struct console *early_console = &early_vga_console; |
197 | static int early_console_initialized = 0; | 197 | static int early_console_initialized; |
198 | 198 | ||
199 | void early_printk(const char *fmt, ...) | 199 | void early_printk(const char *fmt, ...) |
200 | { | 200 | { |
@@ -202,9 +202,9 @@ void early_printk(const char *fmt, ...) | |||
202 | int n; | 202 | int n; |
203 | va_list ap; | 203 | va_list ap; |
204 | 204 | ||
205 | va_start(ap,fmt); | 205 | va_start(ap, fmt); |
206 | n = vscnprintf(buf,512,fmt,ap); | 206 | n = vscnprintf(buf, 512, fmt, ap); |
207 | early_console->write(early_console,buf,n); | 207 | early_console->write(early_console, buf, n); |
208 | va_end(ap); | 208 | va_end(ap); |
209 | } | 209 | } |
210 | 210 | ||
@@ -229,15 +229,15 @@ static int __init setup_early_printk(char *buf) | |||
229 | early_serial_init(buf); | 229 | early_serial_init(buf); |
230 | early_console = &early_serial_console; | 230 | early_console = &early_serial_console; |
231 | } else if (!strncmp(buf, "vga", 3) | 231 | } else if (!strncmp(buf, "vga", 3) |
232 | && boot_params.screen_info.orig_video_isVGA == 1) { | 232 | && boot_params.screen_info.orig_video_isVGA == 1) { |
233 | max_xpos = boot_params.screen_info.orig_video_cols; | 233 | max_xpos = boot_params.screen_info.orig_video_cols; |
234 | max_ypos = boot_params.screen_info.orig_video_lines; | 234 | max_ypos = boot_params.screen_info.orig_video_lines; |
235 | current_ypos = boot_params.screen_info.orig_y; | 235 | current_ypos = boot_params.screen_info.orig_y; |
236 | early_console = &early_vga_console; | 236 | early_console = &early_vga_console; |
237 | } else if (!strncmp(buf, "simnow", 6)) { | 237 | } else if (!strncmp(buf, "simnow", 6)) { |
238 | simnow_init(buf + 6); | 238 | simnow_init(buf + 6); |
239 | early_console = &simnow_console; | 239 | early_console = &simnow_console; |
240 | keep_early = 1; | 240 | keep_early = 1; |
241 | #ifdef CONFIG_HVC_XEN | 241 | #ifdef CONFIG_HVC_XEN |
242 | } else if (!strncmp(buf, "xen", 3)) { | 242 | } else if (!strncmp(buf, "xen", 3)) { |
243 | early_console = &xenboot_console; | 243 | early_console = &xenboot_console; |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 4b87c32b639f..9ba49a26dff8 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/desc.h> | 51 | #include <asm/desc.h> |
52 | #include <asm/percpu.h> | 52 | #include <asm/percpu.h> |
53 | #include <asm/dwarf2.h> | 53 | #include <asm/dwarf2.h> |
54 | #include <asm/processor-flags.h> | ||
54 | #include "irq_vectors.h" | 55 | #include "irq_vectors.h" |
55 | 56 | ||
56 | /* | 57 | /* |
@@ -68,13 +69,6 @@ | |||
68 | 69 | ||
69 | #define nr_syscalls ((syscall_table_size)/4) | 70 | #define nr_syscalls ((syscall_table_size)/4) |
70 | 71 | ||
71 | CF_MASK = 0x00000001 | ||
72 | TF_MASK = 0x00000100 | ||
73 | IF_MASK = 0x00000200 | ||
74 | DF_MASK = 0x00000400 | ||
75 | NT_MASK = 0x00004000 | ||
76 | VM_MASK = 0x00020000 | ||
77 | |||
78 | #ifdef CONFIG_PREEMPT | 72 | #ifdef CONFIG_PREEMPT |
79 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF | 73 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
80 | #else | 74 | #else |
@@ -84,7 +78,7 @@ VM_MASK = 0x00020000 | |||
84 | 78 | ||
85 | .macro TRACE_IRQS_IRET | 79 | .macro TRACE_IRQS_IRET |
86 | #ifdef CONFIG_TRACE_IRQFLAGS | 80 | #ifdef CONFIG_TRACE_IRQFLAGS |
87 | testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off? | 81 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? |
88 | jz 1f | 82 | jz 1f |
89 | TRACE_IRQS_ON | 83 | TRACE_IRQS_ON |
90 | 1: | 84 | 1: |
@@ -246,7 +240,7 @@ ret_from_intr: | |||
246 | check_userspace: | 240 | check_userspace: |
247 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS | 241 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
248 | movb PT_CS(%esp), %al | 242 | movb PT_CS(%esp), %al |
249 | andl $(VM_MASK | SEGMENT_RPL_MASK), %eax | 243 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
250 | cmpl $USER_RPL, %eax | 244 | cmpl $USER_RPL, %eax |
251 | jb resume_kernel # not returning to v8086 or userspace | 245 | jb resume_kernel # not returning to v8086 or userspace |
252 | 246 | ||
@@ -271,7 +265,7 @@ need_resched: | |||
271 | movl TI_flags(%ebp), %ecx # need_resched set ? | 265 | movl TI_flags(%ebp), %ecx # need_resched set ? |
272 | testb $_TIF_NEED_RESCHED, %cl | 266 | testb $_TIF_NEED_RESCHED, %cl |
273 | jz restore_all | 267 | jz restore_all |
274 | testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ? | 268 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? |
275 | jz restore_all | 269 | jz restore_all |
276 | call preempt_schedule_irq | 270 | call preempt_schedule_irq |
277 | jmp need_resched | 271 | jmp need_resched |
@@ -291,10 +285,10 @@ ENTRY(ia32_sysenter_target) | |||
291 | movl TSS_sysenter_sp0(%esp),%esp | 285 | movl TSS_sysenter_sp0(%esp),%esp |
292 | sysenter_past_esp: | 286 | sysenter_past_esp: |
293 | /* | 287 | /* |
294 | * No need to follow this irqs on/off section: the syscall | 288 | * Interrupts are disabled here, but we can't trace it until |
295 | * disabled irqs and here we enable it straight after entry: | 289 | * enough kernel state to call TRACE_IRQS_OFF can be called - but |
290 | * we immediately enable interrupts at that point anyway. | ||
296 | */ | 291 | */ |
297 | ENABLE_INTERRUPTS(CLBR_NONE) | ||
298 | pushl $(__USER_DS) | 292 | pushl $(__USER_DS) |
299 | CFI_ADJUST_CFA_OFFSET 4 | 293 | CFI_ADJUST_CFA_OFFSET 4 |
300 | /*CFI_REL_OFFSET ss, 0*/ | 294 | /*CFI_REL_OFFSET ss, 0*/ |
@@ -302,6 +296,7 @@ sysenter_past_esp: | |||
302 | CFI_ADJUST_CFA_OFFSET 4 | 296 | CFI_ADJUST_CFA_OFFSET 4 |
303 | CFI_REL_OFFSET esp, 0 | 297 | CFI_REL_OFFSET esp, 0 |
304 | pushfl | 298 | pushfl |
299 | orl $X86_EFLAGS_IF, (%esp) | ||
305 | CFI_ADJUST_CFA_OFFSET 4 | 300 | CFI_ADJUST_CFA_OFFSET 4 |
306 | pushl $(__USER_CS) | 301 | pushl $(__USER_CS) |
307 | CFI_ADJUST_CFA_OFFSET 4 | 302 | CFI_ADJUST_CFA_OFFSET 4 |
@@ -315,6 +310,11 @@ sysenter_past_esp: | |||
315 | CFI_ADJUST_CFA_OFFSET 4 | 310 | CFI_ADJUST_CFA_OFFSET 4 |
316 | CFI_REL_OFFSET eip, 0 | 311 | CFI_REL_OFFSET eip, 0 |
317 | 312 | ||
313 | pushl %eax | ||
314 | CFI_ADJUST_CFA_OFFSET 4 | ||
315 | SAVE_ALL | ||
316 | ENABLE_INTERRUPTS(CLBR_NONE) | ||
317 | |||
318 | /* | 318 | /* |
319 | * Load the potential sixth argument from user stack. | 319 | * Load the potential sixth argument from user stack. |
320 | * Careful about security. | 320 | * Careful about security. |
@@ -322,14 +322,12 @@ sysenter_past_esp: | |||
322 | cmpl $__PAGE_OFFSET-3,%ebp | 322 | cmpl $__PAGE_OFFSET-3,%ebp |
323 | jae syscall_fault | 323 | jae syscall_fault |
324 | 1: movl (%ebp),%ebp | 324 | 1: movl (%ebp),%ebp |
325 | movl %ebp,PT_EBP(%esp) | ||
325 | .section __ex_table,"a" | 326 | .section __ex_table,"a" |
326 | .align 4 | 327 | .align 4 |
327 | .long 1b,syscall_fault | 328 | .long 1b,syscall_fault |
328 | .previous | 329 | .previous |
329 | 330 | ||
330 | pushl %eax | ||
331 | CFI_ADJUST_CFA_OFFSET 4 | ||
332 | SAVE_ALL | ||
333 | GET_THREAD_INFO(%ebp) | 331 | GET_THREAD_INFO(%ebp) |
334 | 332 | ||
335 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 333 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
@@ -384,7 +382,7 @@ syscall_exit: | |||
384 | # setting need_resched or sigpending | 382 | # setting need_resched or sigpending |
385 | # between sampling and the iret | 383 | # between sampling and the iret |
386 | TRACE_IRQS_OFF | 384 | TRACE_IRQS_OFF |
387 | testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit | 385 | testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit |
388 | jz no_singlestep | 386 | jz no_singlestep |
389 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) | 387 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) |
390 | no_singlestep: | 388 | no_singlestep: |
@@ -399,7 +397,7 @@ restore_all: | |||
399 | # See comments in process.c:copy_thread() for details. | 397 | # See comments in process.c:copy_thread() for details. |
400 | movb PT_OLDSS(%esp), %ah | 398 | movb PT_OLDSS(%esp), %ah |
401 | movb PT_CS(%esp), %al | 399 | movb PT_CS(%esp), %al |
402 | andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax | 400 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
403 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | 401 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
404 | CFI_REMEMBER_STATE | 402 | CFI_REMEMBER_STATE |
405 | je ldt_ss # returning to user-space with LDT SS | 403 | je ldt_ss # returning to user-space with LDT SS |
@@ -486,7 +484,7 @@ work_resched: | |||
486 | work_notifysig: # deal with pending signals and | 484 | work_notifysig: # deal with pending signals and |
487 | # notify-resume requests | 485 | # notify-resume requests |
488 | #ifdef CONFIG_VM86 | 486 | #ifdef CONFIG_VM86 |
489 | testl $VM_MASK, PT_EFLAGS(%esp) | 487 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
490 | movl %esp, %eax | 488 | movl %esp, %eax |
491 | jne work_notifysig_v86 # returning to kernel-space or | 489 | jne work_notifysig_v86 # returning to kernel-space or |
492 | # vm86-space | 490 | # vm86-space |
@@ -543,9 +541,6 @@ END(syscall_exit_work) | |||
543 | 541 | ||
544 | RING0_INT_FRAME # can't unwind into user space anyway | 542 | RING0_INT_FRAME # can't unwind into user space anyway |
545 | syscall_fault: | 543 | syscall_fault: |
546 | pushl %eax # save orig_eax | ||
547 | CFI_ADJUST_CFA_OFFSET 4 | ||
548 | SAVE_ALL | ||
549 | GET_THREAD_INFO(%ebp) | 544 | GET_THREAD_INFO(%ebp) |
550 | movl $-EFAULT,PT_EAX(%esp) | 545 | movl $-EFAULT,PT_EAX(%esp) |
551 | jmp resume_userspace | 546 | jmp resume_userspace |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c20c9e7e08dd..556a8df522a7 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -319,19 +319,17 @@ badsys: | |||
319 | /* Do syscall tracing */ | 319 | /* Do syscall tracing */ |
320 | tracesys: | 320 | tracesys: |
321 | SAVE_REST | 321 | SAVE_REST |
322 | movq $-ENOSYS,RAX(%rsp) | 322 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
323 | FIXUP_TOP_OF_STACK %rdi | 323 | FIXUP_TOP_OF_STACK %rdi |
324 | movq %rsp,%rdi | 324 | movq %rsp,%rdi |
325 | call syscall_trace_enter | 325 | call syscall_trace_enter |
326 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ | 326 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ |
327 | RESTORE_REST | 327 | RESTORE_REST |
328 | cmpq $__NR_syscall_max,%rax | 328 | cmpq $__NR_syscall_max,%rax |
329 | movq $-ENOSYS,%rcx | 329 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ |
330 | cmova %rcx,%rax | ||
331 | ja 1f | ||
332 | movq %r10,%rcx /* fixup for C */ | 330 | movq %r10,%rcx /* fixup for C */ |
333 | call *sys_call_table(,%rax,8) | 331 | call *sys_call_table(,%rax,8) |
334 | 1: movq %rax,RAX-ARGOFFSET(%rsp) | 332 | movq %rax,RAX-ARGOFFSET(%rsp) |
335 | /* Use IRET because user could have changed frame */ | 333 | /* Use IRET because user could have changed frame */ |
336 | 334 | ||
337 | /* | 335 | /* |
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 4ae7b6440260..9546ef408b92 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/hardirq.h> | ||
18 | 19 | ||
19 | #include <asm/smp.h> | 20 | #include <asm/smp.h> |
20 | #include <asm/ipi.h> | 21 | #include <asm/ipi.h> |
@@ -24,20 +25,20 @@ | |||
24 | #include <acpi/acpi_bus.h> | 25 | #include <acpi/acpi_bus.h> |
25 | #endif | 26 | #endif |
26 | 27 | ||
27 | /* which logical CPU number maps to which CPU (physical APIC ID) */ | 28 | DEFINE_PER_CPU(int, x2apic_extra_bits); |
28 | u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata | ||
29 | = { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
30 | void *x86_cpu_to_apicid_early_ptr; | ||
31 | DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; | ||
32 | EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); | ||
33 | 29 | ||
34 | struct genapic __read_mostly *genapic = &apic_flat; | 30 | struct genapic __read_mostly *genapic = &apic_flat; |
35 | 31 | ||
32 | static enum uv_system_type uv_system_type; | ||
33 | |||
36 | /* | 34 | /* |
37 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. | 35 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. |
38 | */ | 36 | */ |
39 | void __init setup_apic_routing(void) | 37 | void __init setup_apic_routing(void) |
40 | { | 38 | { |
39 | if (uv_system_type == UV_NON_UNIQUE_APIC) | ||
40 | genapic = &apic_x2apic_uv_x; | ||
41 | else | ||
41 | #ifdef CONFIG_ACPI | 42 | #ifdef CONFIG_ACPI |
42 | /* | 43 | /* |
43 | * Quirk: some x86_64 machines can only use physical APIC mode | 44 | * Quirk: some x86_64 machines can only use physical APIC mode |
@@ -64,3 +65,37 @@ void send_IPI_self(int vector) | |||
64 | { | 65 | { |
65 | __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); | 66 | __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); |
66 | } | 67 | } |
68 | |||
69 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
70 | { | ||
71 | if (!strcmp(oem_id, "SGI")) { | ||
72 | if (!strcmp(oem_table_id, "UVL")) | ||
73 | uv_system_type = UV_LEGACY_APIC; | ||
74 | else if (!strcmp(oem_table_id, "UVX")) | ||
75 | uv_system_type = UV_X2APIC; | ||
76 | else if (!strcmp(oem_table_id, "UVH")) | ||
77 | uv_system_type = UV_NON_UNIQUE_APIC; | ||
78 | } | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | unsigned int read_apic_id(void) | ||
83 | { | ||
84 | unsigned int id; | ||
85 | |||
86 | WARN_ON(preemptible()); | ||
87 | id = apic_read(APIC_ID); | ||
88 | if (uv_system_type >= UV_X2APIC) | ||
89 | id |= __get_cpu_var(x2apic_extra_bits); | ||
90 | return id; | ||
91 | } | ||
92 | |||
93 | enum uv_system_type get_uv_system_type(void) | ||
94 | { | ||
95 | return uv_system_type; | ||
96 | } | ||
97 | |||
98 | int is_uv_system(void) | ||
99 | { | ||
100 | return uv_system_type != UV_NONE; | ||
101 | } | ||
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 07352b74bda6..1a9c68845ee8 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -97,7 +97,7 @@ static void flat_send_IPI_all(int vector) | |||
97 | 97 | ||
98 | static int flat_apic_id_registered(void) | 98 | static int flat_apic_id_registered(void) |
99 | { | 99 | { |
100 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | 100 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); |
101 | } | 101 | } |
102 | 102 | ||
103 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | 103 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) |
@@ -138,12 +138,9 @@ static cpumask_t physflat_target_cpus(void) | |||
138 | 138 | ||
139 | static cpumask_t physflat_vector_allocation_domain(int cpu) | 139 | static cpumask_t physflat_vector_allocation_domain(int cpu) |
140 | { | 140 | { |
141 | cpumask_t domain = CPU_MASK_NONE; | 141 | return cpumask_of_cpu(cpu); |
142 | cpu_set(cpu, domain); | ||
143 | return domain; | ||
144 | } | 142 | } |
145 | 143 | ||
146 | |||
147 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) | 144 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) |
148 | { | 145 | { |
149 | send_IPI_mask_sequence(cpumask, vector); | 146 | send_IPI_mask_sequence(cpumask, vector); |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c new file mode 100644 index 000000000000..5d77c9cd8e15 --- /dev/null +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -0,0 +1,245 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV APIC functions (note: not an Intel compatible APIC) | ||
7 | * | ||
8 | * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #include <linux/threads.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/ctype.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <asm/smp.h> | ||
21 | #include <asm/ipi.h> | ||
22 | #include <asm/genapic.h> | ||
23 | #include <asm/uv/uv_mmrs.h> | ||
24 | #include <asm/uv/uv_hub.h> | ||
25 | |||
26 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | ||
27 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); | ||
28 | |||
29 | struct uv_blade_info *uv_blade_info; | ||
30 | EXPORT_SYMBOL_GPL(uv_blade_info); | ||
31 | |||
32 | short *uv_node_to_blade; | ||
33 | EXPORT_SYMBOL_GPL(uv_node_to_blade); | ||
34 | |||
35 | short *uv_cpu_to_blade; | ||
36 | EXPORT_SYMBOL_GPL(uv_cpu_to_blade); | ||
37 | |||
38 | short uv_possible_blades; | ||
39 | EXPORT_SYMBOL_GPL(uv_possible_blades); | ||
40 | |||
41 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
42 | |||
43 | static cpumask_t uv_target_cpus(void) | ||
44 | { | ||
45 | return cpumask_of_cpu(0); | ||
46 | } | ||
47 | |||
48 | static cpumask_t uv_vector_allocation_domain(int cpu) | ||
49 | { | ||
50 | cpumask_t domain = CPU_MASK_NONE; | ||
51 | cpu_set(cpu, domain); | ||
52 | return domain; | ||
53 | } | ||
54 | |||
55 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | ||
56 | { | ||
57 | unsigned long val; | ||
58 | int nasid; | ||
59 | |||
60 | nasid = uv_apicid_to_nasid(phys_apicid); | ||
61 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | ||
62 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | ||
63 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | ||
64 | (6 << UVH_IPI_INT_DELIVERY_MODE_SHFT); | ||
65 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void uv_send_IPI_one(int cpu, int vector) | ||
70 | { | ||
71 | unsigned long val, apicid; | ||
72 | int nasid; | ||
73 | |||
74 | apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ | ||
75 | nasid = uv_apicid_to_nasid(apicid); | ||
76 | val = | ||
77 | (1UL << UVH_IPI_INT_SEND_SHFT) | (apicid << | ||
78 | UVH_IPI_INT_APIC_ID_SHFT) | | ||
79 | (vector << UVH_IPI_INT_VECTOR_SHFT); | ||
80 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | ||
81 | printk(KERN_DEBUG | ||
82 | "UV: IPI to cpu %d, apicid 0x%lx, vec %d, nasid%d, val 0x%lx\n", | ||
83 | cpu, apicid, vector, nasid, val); | ||
84 | } | ||
85 | |||
86 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | ||
87 | { | ||
88 | unsigned int cpu; | ||
89 | |||
90 | for (cpu = 0; cpu < NR_CPUS; ++cpu) | ||
91 | if (cpu_isset(cpu, mask)) | ||
92 | uv_send_IPI_one(cpu, vector); | ||
93 | } | ||
94 | |||
95 | static void uv_send_IPI_allbutself(int vector) | ||
96 | { | ||
97 | cpumask_t mask = cpu_online_map; | ||
98 | |||
99 | cpu_clear(smp_processor_id(), mask); | ||
100 | |||
101 | if (!cpus_empty(mask)) | ||
102 | uv_send_IPI_mask(mask, vector); | ||
103 | } | ||
104 | |||
105 | static void uv_send_IPI_all(int vector) | ||
106 | { | ||
107 | uv_send_IPI_mask(cpu_online_map, vector); | ||
108 | } | ||
109 | |||
110 | static int uv_apic_id_registered(void) | ||
111 | { | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | ||
116 | { | ||
117 | int cpu; | ||
118 | |||
119 | /* | ||
120 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
121 | * May as well be the first. | ||
122 | */ | ||
123 | cpu = first_cpu(cpumask); | ||
124 | if ((unsigned)cpu < NR_CPUS) | ||
125 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
126 | else | ||
127 | return BAD_APICID; | ||
128 | } | ||
129 | |||
130 | static unsigned int phys_pkg_id(int index_msb) | ||
131 | { | ||
132 | return GET_APIC_ID(read_apic_id()) >> index_msb; | ||
133 | } | ||
134 | |||
135 | #ifdef ZZZ /* Needs x2apic patch */ | ||
136 | static void uv_send_IPI_self(int vector) | ||
137 | { | ||
138 | apic_write(APIC_SELF_IPI, vector); | ||
139 | } | ||
140 | #endif | ||
141 | |||
142 | struct genapic apic_x2apic_uv_x = { | ||
143 | .name = "UV large system", | ||
144 | .int_delivery_mode = dest_Fixed, | ||
145 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | ||
146 | .target_cpus = uv_target_cpus, | ||
147 | .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ | ||
148 | .apic_id_registered = uv_apic_id_registered, | ||
149 | .send_IPI_all = uv_send_IPI_all, | ||
150 | .send_IPI_allbutself = uv_send_IPI_allbutself, | ||
151 | .send_IPI_mask = uv_send_IPI_mask, | ||
152 | /* ZZZ.send_IPI_self = uv_send_IPI_self, */ | ||
153 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | ||
154 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ | ||
155 | }; | ||
156 | |||
157 | static __cpuinit void set_x2apic_extra_bits(int nasid) | ||
158 | { | ||
159 | __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Called on boot cpu. | ||
164 | */ | ||
165 | static __init void uv_system_init(void) | ||
166 | { | ||
167 | union uvh_si_addr_map_config_u m_n_config; | ||
168 | int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; | ||
169 | unsigned long mmr_base; | ||
170 | |||
171 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | ||
172 | mmr_base = | ||
173 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & | ||
174 | ~UV_MMR_ENABLE; | ||
175 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); | ||
176 | |||
177 | last_nasid = -1; | ||
178 | for_each_possible_cpu(cpu) { | ||
179 | nid = cpu_to_node(cpu); | ||
180 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | ||
181 | if (nasid != last_nasid) | ||
182 | uv_possible_blades++; | ||
183 | last_nasid = nasid; | ||
184 | } | ||
185 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | ||
186 | |||
187 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | ||
188 | uv_blade_info = alloc_bootmem_pages(bytes); | ||
189 | |||
190 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | ||
191 | uv_node_to_blade = alloc_bootmem_pages(bytes); | ||
192 | memset(uv_node_to_blade, 255, bytes); | ||
193 | |||
194 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); | ||
195 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); | ||
196 | memset(uv_cpu_to_blade, 255, bytes); | ||
197 | |||
198 | last_nasid = -1; | ||
199 | blade = -1; | ||
200 | lcpu = -1; | ||
201 | for_each_possible_cpu(cpu) { | ||
202 | nid = cpu_to_node(cpu); | ||
203 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | ||
204 | if (nasid != last_nasid) { | ||
205 | blade++; | ||
206 | lcpu = -1; | ||
207 | uv_blade_info[blade].nr_posible_cpus = 0; | ||
208 | uv_blade_info[blade].nr_online_cpus = 0; | ||
209 | } | ||
210 | last_nasid = nasid; | ||
211 | lcpu++; | ||
212 | |||
213 | uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; | ||
214 | uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; | ||
215 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; | ||
216 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; | ||
217 | uv_cpu_hub_info(cpu)->local_nasid = nasid; | ||
218 | uv_cpu_hub_info(cpu)->gnode_upper = | ||
219 | nasid & ~((1 << uv_hub_info->n_val) - 1); | ||
220 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | ||
221 | uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ | ||
222 | uv_blade_info[blade].nasid = nasid; | ||
223 | uv_blade_info[blade].nr_posible_cpus++; | ||
224 | uv_node_to_blade[nid] = blade; | ||
225 | uv_cpu_to_blade[cpu] = blade; | ||
226 | |||
227 | printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", | ||
228 | cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); | ||
229 | printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Called on each cpu to initialize the per_cpu UV data area. | ||
235 | */ | ||
236 | void __cpuinit uv_cpu_init(void) | ||
237 | { | ||
238 | if (!uv_node_to_blade) | ||
239 | uv_system_init(); | ||
240 | |||
241 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; | ||
242 | |||
243 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | ||
244 | set_x2apic_extra_bits(uv_hub_info->local_nasid); | ||
245 | } | ||
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c new file mode 100644 index 000000000000..3db059058927 --- /dev/null +++ b/arch/x86/kernel/head32.c | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * linux/arch/i386/kernel/head32.c -- prepare to run common code | ||
3 | * | ||
4 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | ||
5 | * Copyright (C) 2007 Eric Biederman <ebiederm@xmission.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/start_kernel.h> | ||
10 | |||
11 | void __init i386_start_kernel(void) | ||
12 | { | ||
13 | start_kernel(); | ||
14 | } | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index ad2440832de0..d6d54faa84df 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -49,39 +49,75 @@ static void __init copy_bootdata(char *real_mode_data) | |||
49 | } | 49 | } |
50 | } | 50 | } |
51 | 51 | ||
52 | #define EBDA_ADDR_POINTER 0x40E | 52 | #define BIOS_EBDA_SEGMENT 0x40E |
53 | #define BIOS_LOWMEM_KILOBYTES 0x413 | ||
53 | 54 | ||
54 | static __init void reserve_ebda(void) | 55 | /* |
56 | * The BIOS places the EBDA/XBDA at the top of conventional | ||
57 | * memory, and usually decreases the reported amount of | ||
58 | * conventional memory (int 0x12) too. This also contains a | ||
59 | * workaround for Dell systems that neglect to reserve EBDA. | ||
60 | * The same workaround also avoids a problem with the AMD768MPX | ||
61 | * chipset: reserve a page before VGA to prevent PCI prefetch | ||
62 | * into it (errata #56). Usually the page is reserved anyways, | ||
63 | * unless you have no PS/2 mouse plugged in. | ||
64 | */ | ||
65 | static void __init reserve_ebda_region(void) | ||
55 | { | 66 | { |
56 | unsigned ebda_addr, ebda_size; | 67 | unsigned int lowmem, ebda_addr; |
68 | |||
69 | /* To determine the position of the EBDA and the */ | ||
70 | /* end of conventional memory, we need to look at */ | ||
71 | /* the BIOS data area. In a paravirtual environment */ | ||
72 | /* that area is absent. We'll just have to assume */ | ||
73 | /* that the paravirt case can handle memory setup */ | ||
74 | /* correctly, without our help. */ | ||
75 | if (paravirt_enabled()) | ||
76 | return; | ||
57 | 77 | ||
58 | /* | 78 | /* end of low (conventional) memory */ |
59 | * there is a real-mode segmented pointer pointing to the | 79 | lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); |
60 | * 4K EBDA area at 0x40E | 80 | lowmem <<= 10; |
61 | */ | 81 | |
62 | ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER); | 82 | /* start of EBDA area */ |
83 | ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); | ||
63 | ebda_addr <<= 4; | 84 | ebda_addr <<= 4; |
64 | 85 | ||
65 | if (!ebda_addr) | 86 | /* Fixup: bios puts an EBDA in the top 64K segment */ |
66 | return; | 87 | /* of conventional memory, but does not adjust lowmem. */ |
88 | if ((lowmem - ebda_addr) <= 0x10000) | ||
89 | lowmem = ebda_addr; | ||
67 | 90 | ||
68 | ebda_size = *(unsigned short *)__va(ebda_addr); | 91 | /* Fixup: bios does not report an EBDA at all. */ |
92 | /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ | ||
93 | if ((ebda_addr == 0) && (lowmem >= 0x9f000)) | ||
94 | lowmem = 0x9f000; | ||
69 | 95 | ||
70 | /* Round EBDA up to pages */ | 96 | /* Paranoia: should never happen, but... */ |
71 | if (ebda_size == 0) | 97 | if ((lowmem == 0) || (lowmem >= 0x100000)) |
72 | ebda_size = 1; | 98 | lowmem = 0x9f000; |
73 | ebda_size <<= 10; | ||
74 | ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE); | ||
75 | if (ebda_size > 64*1024) | ||
76 | ebda_size = 64*1024; | ||
77 | 99 | ||
78 | reserve_early(ebda_addr, ebda_addr + ebda_size, "EBDA"); | 100 | /* reserve all memory between lowmem and the 1MB mark */ |
101 | reserve_early(lowmem, 0x100000, "BIOS reserved"); | ||
79 | } | 102 | } |
80 | 103 | ||
81 | void __init x86_64_start_kernel(char * real_mode_data) | 104 | void __init x86_64_start_kernel(char * real_mode_data) |
82 | { | 105 | { |
83 | int i; | 106 | int i; |
84 | 107 | ||
108 | /* | ||
109 | * Build-time sanity checks on the kernel image and module | ||
110 | * area mappings. (these are purely build-time and produce no code) | ||
111 | */ | ||
112 | BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); | ||
113 | BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); | ||
114 | BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); | ||
115 | BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); | ||
116 | BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); | ||
117 | BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); | ||
118 | BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == | ||
119 | (__START_KERNEL & PGDIR_MASK))); | ||
120 | |||
85 | /* clear bss before set_intr_gate with early_idt_handler */ | 121 | /* clear bss before set_intr_gate with early_idt_handler */ |
86 | clear_bss(); | 122 | clear_bss(); |
87 | 123 | ||
@@ -91,7 +127,7 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
91 | /* Cleanup the over mapped high alias */ | 127 | /* Cleanup the over mapped high alias */ |
92 | cleanup_highmap(); | 128 | cleanup_highmap(); |
93 | 129 | ||
94 | for (i = 0; i < IDT_ENTRIES; i++) { | 130 | for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { |
95 | #ifdef CONFIG_EARLY_PRINTK | 131 | #ifdef CONFIG_EARLY_PRINTK |
96 | set_intr_gate(i, &early_idt_handlers[i]); | 132 | set_intr_gate(i, &early_idt_handlers[i]); |
97 | #else | 133 | #else |
@@ -118,7 +154,7 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
118 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 154 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); |
119 | } | 155 | } |
120 | 156 | ||
121 | reserve_ebda(); | 157 | reserve_ebda_region(); |
122 | 158 | ||
123 | /* | 159 | /* |
124 | * At this point everything still needed from the boot loader | 160 | * At this point everything still needed from the boot loader |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 74d87ea85b5c..826988a6e964 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -450,7 +450,7 @@ is386: movl $2,%ecx # set MP | |||
450 | jmp initialize_secondary # all other CPUs call initialize_secondary | 450 | jmp initialize_secondary # all other CPUs call initialize_secondary |
451 | 1: | 451 | 1: |
452 | #endif /* CONFIG_SMP */ | 452 | #endif /* CONFIG_SMP */ |
453 | jmp start_kernel | 453 | jmp i386_start_kernel |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * We depend on ET to be correct. This checks for 287/387. | 456 | * We depend on ET to be correct. This checks for 287/387. |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index a007454133a3..10a1955bb1d1 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -132,10 +132,6 @@ ident_complete: | |||
132 | addq %rbp, trampoline_level4_pgt + 0(%rip) | 132 | addq %rbp, trampoline_level4_pgt + 0(%rip) |
133 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) | 133 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) |
134 | #endif | 134 | #endif |
135 | #ifdef CONFIG_ACPI_SLEEP | ||
136 | addq %rbp, wakeup_level4_pgt + 0(%rip) | ||
137 | addq %rbp, wakeup_level4_pgt + (511*8)(%rip) | ||
138 | #endif | ||
139 | 135 | ||
140 | /* Due to ENTRY(), sometimes the empty space gets filled with | 136 | /* Due to ENTRY(), sometimes the empty space gets filled with |
141 | * zeros. Better take a jmp than relying on empty space being | 137 | * zeros. Better take a jmp than relying on empty space being |
@@ -267,21 +263,16 @@ ENTRY(secondary_startup_64) | |||
267 | bad_address: | 263 | bad_address: |
268 | jmp bad_address | 264 | jmp bad_address |
269 | 265 | ||
266 | .section ".init.text","ax" | ||
270 | #ifdef CONFIG_EARLY_PRINTK | 267 | #ifdef CONFIG_EARLY_PRINTK |
271 | .macro early_idt_tramp first, last | ||
272 | .ifgt \last-\first | ||
273 | early_idt_tramp \first, \last-1 | ||
274 | .endif | ||
275 | movl $\last,%esi | ||
276 | jmp early_idt_handler | ||
277 | .endm | ||
278 | |||
279 | .globl early_idt_handlers | 268 | .globl early_idt_handlers |
280 | early_idt_handlers: | 269 | early_idt_handlers: |
281 | early_idt_tramp 0, 63 | 270 | i = 0 |
282 | early_idt_tramp 64, 127 | 271 | .rept NUM_EXCEPTION_VECTORS |
283 | early_idt_tramp 128, 191 | 272 | movl $i, %esi |
284 | early_idt_tramp 192, 255 | 273 | jmp early_idt_handler |
274 | i = i + 1 | ||
275 | .endr | ||
285 | #endif | 276 | #endif |
286 | 277 | ||
287 | ENTRY(early_idt_handler) | 278 | ENTRY(early_idt_handler) |
@@ -327,6 +318,7 @@ early_idt_msg: | |||
327 | early_idt_ripmsg: | 318 | early_idt_ripmsg: |
328 | .asciz "RIP %s\n" | 319 | .asciz "RIP %s\n" |
329 | #endif /* CONFIG_EARLY_PRINTK */ | 320 | #endif /* CONFIG_EARLY_PRINTK */ |
321 | .previous | ||
330 | 322 | ||
331 | .balign PAGE_SIZE | 323 | .balign PAGE_SIZE |
332 | 324 | ||
@@ -383,12 +375,12 @@ NEXT_PAGE(level2_ident_pgt) | |||
383 | 375 | ||
384 | NEXT_PAGE(level2_kernel_pgt) | 376 | NEXT_PAGE(level2_kernel_pgt) |
385 | /* | 377 | /* |
386 | * 128 MB kernel mapping. We spend a full page on this pagetable | 378 | * 512 MB kernel mapping. We spend a full page on this pagetable |
387 | * anyway. | 379 | * anyway. |
388 | * | 380 | * |
389 | * The kernel code+data+bss must not be bigger than that. | 381 | * The kernel code+data+bss must not be bigger than that. |
390 | * | 382 | * |
391 | * (NOTE: at +128MB starts the module area, see MODULES_VADDR. | 383 | * (NOTE: at +512MB starts the module area, see MODULES_VADDR. |
392 | * If you want to increase this then increase MODULES_VADDR | 384 | * If you want to increase this then increase MODULES_VADDR |
393 | * too.) | 385 | * too.) |
394 | */ | 386 | */ |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 061627806a2d..deb43785e923 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
@@ -1,13 +1,8 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <asm/semaphore.h> | ||
3 | #include <asm/checksum.h> | 2 | #include <asm/checksum.h> |
4 | #include <asm/desc.h> | 3 | #include <asm/desc.h> |
5 | #include <asm/pgtable.h> | 4 | #include <asm/pgtable.h> |
6 | 5 | ||
7 | EXPORT_SYMBOL(__down_failed); | ||
8 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
9 | EXPORT_SYMBOL(__down_failed_trylock); | ||
10 | EXPORT_SYMBOL(__up_wakeup); | ||
11 | /* Networking helper routines. */ | 6 | /* Networking helper routines. */ |
12 | EXPORT_SYMBOL(csum_partial_copy_generic); | 7 | EXPORT_SYMBOL(csum_partial_copy_generic); |
13 | 8 | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index d2e39e69aaf8..8f8102d967b3 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -5,45 +5,41 @@ | |||
5 | * General FPU state handling cleanups | 5 | * General FPU state handling cleanups |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | 6 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
7 | */ | 7 | */ |
8 | |||
9 | #include <linux/sched.h> | ||
10 | #include <linux/module.h> | 8 | #include <linux/module.h> |
11 | #include <linux/regset.h> | 9 | #include <linux/regset.h> |
10 | #include <linux/sched.h> | ||
11 | |||
12 | #include <asm/sigcontext.h> | ||
12 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
13 | #include <asm/i387.h> | ||
14 | #include <asm/math_emu.h> | 14 | #include <asm/math_emu.h> |
15 | #include <asm/sigcontext.h> | ||
16 | #include <asm/user.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
16 | #include <asm/ptrace.h> | ||
17 | #include <asm/i387.h> | ||
18 | #include <asm/user.h> | ||
19 | 19 | ||
20 | #ifdef CONFIG_X86_64 | 20 | #ifdef CONFIG_X86_64 |
21 | 21 | # include <asm/sigcontext32.h> | |
22 | #include <asm/sigcontext32.h> | 22 | # include <asm/user32.h> |
23 | #include <asm/user32.h> | ||
24 | |||
25 | #else | 23 | #else |
26 | 24 | # define save_i387_ia32 save_i387 | |
27 | #define save_i387_ia32 save_i387 | 25 | # define restore_i387_ia32 restore_i387 |
28 | #define restore_i387_ia32 restore_i387 | 26 | # define _fpstate_ia32 _fpstate |
29 | 27 | # define user_i387_ia32_struct user_i387_struct | |
30 | #define _fpstate_ia32 _fpstate | 28 | # define user32_fxsr_struct user_fxsr_struct |
31 | #define user_i387_ia32_struct user_i387_struct | ||
32 | #define user32_fxsr_struct user_fxsr_struct | ||
33 | |||
34 | #endif | 29 | #endif |
35 | 30 | ||
36 | #ifdef CONFIG_MATH_EMULATION | 31 | #ifdef CONFIG_MATH_EMULATION |
37 | #define HAVE_HWFP (boot_cpu_data.hard_math) | 32 | # define HAVE_HWFP (boot_cpu_data.hard_math) |
38 | #else | 33 | #else |
39 | #define HAVE_HWFP 1 | 34 | # define HAVE_HWFP 1 |
40 | #endif | 35 | #endif |
41 | 36 | ||
42 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 37 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
43 | 38 | ||
44 | void mxcsr_feature_mask_init(void) | 39 | void mxcsr_feature_mask_init(void) |
45 | { | 40 | { |
46 | unsigned long mask = 0; | 41 | unsigned long mask = 0; |
42 | |||
47 | clts(); | 43 | clts(); |
48 | if (cpu_has_fxsr) { | 44 | if (cpu_has_fxsr) { |
49 | memset(¤t->thread.i387.fxsave, 0, | 45 | memset(¤t->thread.i387.fxsave, 0, |
@@ -69,10 +65,11 @@ void __cpuinit fpu_init(void) | |||
69 | 65 | ||
70 | if (offsetof(struct task_struct, thread.i387.fxsave) & 15) | 66 | if (offsetof(struct task_struct, thread.i387.fxsave) & 15) |
71 | __bad_fxsave_alignment(); | 67 | __bad_fxsave_alignment(); |
68 | |||
72 | set_in_cr4(X86_CR4_OSFXSR); | 69 | set_in_cr4(X86_CR4_OSFXSR); |
73 | set_in_cr4(X86_CR4_OSXMMEXCPT); | 70 | set_in_cr4(X86_CR4_OSXMMEXCPT); |
74 | 71 | ||
75 | write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */ | 72 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ |
76 | 73 | ||
77 | mxcsr_feature_mask_init(); | 74 | mxcsr_feature_mask_init(); |
78 | /* clean state in init */ | 75 | /* clean state in init */ |
@@ -178,6 +175,7 @@ static inline unsigned short twd_i387_to_fxsr(unsigned short twd) | |||
178 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | 175 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ |
179 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | 176 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ |
180 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | 177 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ |
178 | |||
181 | return tmp; | 179 | return tmp; |
182 | } | 180 | } |
183 | 181 | ||
@@ -232,8 +230,8 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) | |||
232 | * FXSR floating point environment conversions. | 230 | * FXSR floating point environment conversions. |
233 | */ | 231 | */ |
234 | 232 | ||
235 | static void convert_from_fxsr(struct user_i387_ia32_struct *env, | 233 | static void |
236 | struct task_struct *tsk) | 234 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) |
237 | { | 235 | { |
238 | struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave; | 236 | struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave; |
239 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; | 237 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; |
@@ -252,10 +250,11 @@ static void convert_from_fxsr(struct user_i387_ia32_struct *env, | |||
252 | * should be actually ds/cs at fpu exception time, but | 250 | * should be actually ds/cs at fpu exception time, but |
253 | * that information is not available in 64bit mode. | 251 | * that information is not available in 64bit mode. |
254 | */ | 252 | */ |
255 | asm("mov %%ds,%0" : "=r" (env->fos)); | 253 | asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos)); |
256 | asm("mov %%cs,%0" : "=r" (env->fcs)); | 254 | asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs)); |
257 | } else { | 255 | } else { |
258 | struct pt_regs *regs = task_pt_regs(tsk); | 256 | struct pt_regs *regs = task_pt_regs(tsk); |
257 | |||
259 | env->fos = 0xffff0000 | tsk->thread.ds; | 258 | env->fos = 0xffff0000 | tsk->thread.ds; |
260 | env->fcs = regs->cs; | 259 | env->fcs = regs->cs; |
261 | } | 260 | } |
@@ -309,9 +308,10 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
309 | 308 | ||
310 | init_fpu(target); | 309 | init_fpu(target); |
311 | 310 | ||
312 | if (!cpu_has_fxsr) | 311 | if (!cpu_has_fxsr) { |
313 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 312 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
314 | &target->thread.i387.fsave, 0, -1); | 313 | &target->thread.i387.fsave, 0, -1); |
314 | } | ||
315 | 315 | ||
316 | if (kbuf && pos == 0 && count == sizeof(env)) { | 316 | if (kbuf && pos == 0 && count == sizeof(env)) { |
317 | convert_from_fxsr(kbuf, target); | 317 | convert_from_fxsr(kbuf, target); |
@@ -319,6 +319,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
319 | } | 319 | } |
320 | 320 | ||
321 | convert_from_fxsr(&env, target); | 321 | convert_from_fxsr(&env, target); |
322 | |||
322 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); | 323 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); |
323 | } | 324 | } |
324 | 325 | ||
@@ -335,9 +336,10 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
335 | init_fpu(target); | 336 | init_fpu(target); |
336 | set_stopped_child_used_math(target); | 337 | set_stopped_child_used_math(target); |
337 | 338 | ||
338 | if (!cpu_has_fxsr) | 339 | if (!cpu_has_fxsr) { |
339 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 340 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
340 | &target->thread.i387.fsave, 0, -1); | 341 | &target->thread.i387.fsave, 0, -1); |
342 | } | ||
341 | 343 | ||
342 | if (pos > 0 || count < sizeof(env)) | 344 | if (pos > 0 || count < sizeof(env)) |
343 | convert_from_fxsr(&env, target); | 345 | convert_from_fxsr(&env, target); |
@@ -392,28 +394,28 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
392 | { | 394 | { |
393 | if (!used_math()) | 395 | if (!used_math()) |
394 | return 0; | 396 | return 0; |
395 | 397 | /* | |
396 | /* This will cause a "finit" to be triggered by the next | 398 | * This will cause a "finit" to be triggered by the next |
397 | * attempted FPU operation by the 'current' process. | 399 | * attempted FPU operation by the 'current' process. |
398 | */ | 400 | */ |
399 | clear_used_math(); | 401 | clear_used_math(); |
400 | 402 | ||
401 | if (HAVE_HWFP) { | 403 | if (!HAVE_HWFP) { |
402 | if (cpu_has_fxsr) { | ||
403 | return save_i387_fxsave(buf); | ||
404 | } else { | ||
405 | return save_i387_fsave(buf); | ||
406 | } | ||
407 | } else { | ||
408 | return fpregs_soft_get(current, NULL, | 404 | return fpregs_soft_get(current, NULL, |
409 | 0, sizeof(struct user_i387_ia32_struct), | 405 | 0, sizeof(struct user_i387_ia32_struct), |
410 | NULL, buf) ? -1 : 1; | 406 | NULL, buf) ? -1 : 1; |
411 | } | 407 | } |
408 | |||
409 | if (cpu_has_fxsr) | ||
410 | return save_i387_fxsave(buf); | ||
411 | else | ||
412 | return save_i387_fsave(buf); | ||
412 | } | 413 | } |
413 | 414 | ||
414 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | 415 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) |
415 | { | 416 | { |
416 | struct task_struct *tsk = current; | 417 | struct task_struct *tsk = current; |
418 | |||
417 | clear_fpu(tsk); | 419 | clear_fpu(tsk); |
418 | return __copy_from_user(&tsk->thread.i387.fsave, buf, | 420 | return __copy_from_user(&tsk->thread.i387.fsave, buf, |
419 | sizeof(struct i387_fsave_struct)); | 421 | sizeof(struct i387_fsave_struct)); |
@@ -421,9 +423,10 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
421 | 423 | ||
422 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | 424 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) |
423 | { | 425 | { |
424 | int err; | ||
425 | struct task_struct *tsk = current; | 426 | struct task_struct *tsk = current; |
426 | struct user_i387_ia32_struct env; | 427 | struct user_i387_ia32_struct env; |
428 | int err; | ||
429 | |||
427 | clear_fpu(tsk); | 430 | clear_fpu(tsk); |
428 | err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0], | 431 | err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0], |
429 | sizeof(struct i387_fxsave_struct)); | 432 | sizeof(struct i387_fxsave_struct)); |
@@ -432,6 +435,7 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
432 | if (err || __copy_from_user(&env, buf, sizeof(env))) | 435 | if (err || __copy_from_user(&env, buf, sizeof(env))) |
433 | return 1; | 436 | return 1; |
434 | convert_to_fxsr(tsk, &env); | 437 | convert_to_fxsr(tsk, &env); |
438 | |||
435 | return 0; | 439 | return 0; |
436 | } | 440 | } |
437 | 441 | ||
@@ -440,17 +444,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
440 | int err; | 444 | int err; |
441 | 445 | ||
442 | if (HAVE_HWFP) { | 446 | if (HAVE_HWFP) { |
443 | if (cpu_has_fxsr) { | 447 | if (cpu_has_fxsr) |
444 | err = restore_i387_fxsave(buf); | 448 | err = restore_i387_fxsave(buf); |
445 | } else { | 449 | else |
446 | err = restore_i387_fsave(buf); | 450 | err = restore_i387_fsave(buf); |
447 | } | ||
448 | } else { | 451 | } else { |
449 | err = fpregs_soft_set(current, NULL, | 452 | err = fpregs_soft_set(current, NULL, |
450 | 0, sizeof(struct user_i387_ia32_struct), | 453 | 0, sizeof(struct user_i387_ia32_struct), |
451 | NULL, buf) != 0; | 454 | NULL, buf) != 0; |
452 | } | 455 | } |
453 | set_used_math(); | 456 | set_used_math(); |
457 | |||
454 | return err; | 458 | return err; |
455 | } | 459 | } |
456 | 460 | ||
@@ -463,8 +467,8 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
463 | */ | 467 | */ |
464 | int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) | 468 | int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) |
465 | { | 469 | { |
466 | int fpvalid; | ||
467 | struct task_struct *tsk = current; | 470 | struct task_struct *tsk = current; |
471 | int fpvalid; | ||
468 | 472 | ||
469 | fpvalid = !!used_math(); | 473 | fpvalid = !!used_math(); |
470 | if (fpvalid) | 474 | if (fpvalid) |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 4ca548632c8d..2e2f42074e18 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -71,6 +71,16 @@ int sis_apic_bug = -1; | |||
71 | */ | 71 | */ |
72 | int nr_ioapic_registers[MAX_IO_APICS]; | 72 | int nr_ioapic_registers[MAX_IO_APICS]; |
73 | 73 | ||
74 | /* I/O APIC entries */ | ||
75 | struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
76 | int nr_ioapics; | ||
77 | |||
78 | /* MP IRQ source entries */ | ||
79 | struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
80 | |||
81 | /* # of MP IRQ source entries */ | ||
82 | int mp_irq_entries; | ||
83 | |||
74 | static int disable_timer_pin_1 __initdata; | 84 | static int disable_timer_pin_1 __initdata; |
75 | 85 | ||
76 | /* | 86 | /* |
@@ -810,10 +820,7 @@ static int __init find_isa_irq_pin(int irq, int type) | |||
810 | for (i = 0; i < mp_irq_entries; i++) { | 820 | for (i = 0; i < mp_irq_entries; i++) { |
811 | int lbus = mp_irqs[i].mpc_srcbus; | 821 | int lbus = mp_irqs[i].mpc_srcbus; |
812 | 822 | ||
813 | if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || | 823 | if (test_bit(lbus, mp_bus_not_pci) && |
814 | mp_bus_id_to_type[lbus] == MP_BUS_EISA || | ||
815 | mp_bus_id_to_type[lbus] == MP_BUS_MCA | ||
816 | ) && | ||
817 | (mp_irqs[i].mpc_irqtype == type) && | 824 | (mp_irqs[i].mpc_irqtype == type) && |
818 | (mp_irqs[i].mpc_srcbusirq == irq)) | 825 | (mp_irqs[i].mpc_srcbusirq == irq)) |
819 | 826 | ||
@@ -829,10 +836,7 @@ static int __init find_isa_irq_apic(int irq, int type) | |||
829 | for (i = 0; i < mp_irq_entries; i++) { | 836 | for (i = 0; i < mp_irq_entries; i++) { |
830 | int lbus = mp_irqs[i].mpc_srcbus; | 837 | int lbus = mp_irqs[i].mpc_srcbus; |
831 | 838 | ||
832 | if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || | 839 | if (test_bit(lbus, mp_bus_not_pci) && |
833 | mp_bus_id_to_type[lbus] == MP_BUS_EISA || | ||
834 | mp_bus_id_to_type[lbus] == MP_BUS_MCA | ||
835 | ) && | ||
836 | (mp_irqs[i].mpc_irqtype == type) && | 840 | (mp_irqs[i].mpc_irqtype == type) && |
837 | (mp_irqs[i].mpc_srcbusirq == irq)) | 841 | (mp_irqs[i].mpc_srcbusirq == irq)) |
838 | break; | 842 | break; |
@@ -872,7 +876,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) | |||
872 | mp_irqs[i].mpc_dstapic == MP_APIC_ALL) | 876 | mp_irqs[i].mpc_dstapic == MP_APIC_ALL) |
873 | break; | 877 | break; |
874 | 878 | ||
875 | if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) && | 879 | if (!test_bit(lbus, mp_bus_not_pci) && |
876 | !mp_irqs[i].mpc_irqtype && | 880 | !mp_irqs[i].mpc_irqtype && |
877 | (bus == lbus) && | 881 | (bus == lbus) && |
878 | (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { | 882 | (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { |
@@ -921,6 +925,7 @@ void __init setup_ioapic_dest(void) | |||
921 | } | 925 | } |
922 | #endif | 926 | #endif |
923 | 927 | ||
928 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
924 | /* | 929 | /* |
925 | * EISA Edge/Level control register, ELCR | 930 | * EISA Edge/Level control register, ELCR |
926 | */ | 931 | */ |
@@ -934,6 +939,13 @@ static int EISA_ELCR(unsigned int irq) | |||
934 | "Broken MPtable reports ISA irq %d\n", irq); | 939 | "Broken MPtable reports ISA irq %d\n", irq); |
935 | return 0; | 940 | return 0; |
936 | } | 941 | } |
942 | #endif | ||
943 | |||
944 | /* ISA interrupts are always polarity zero edge triggered, | ||
945 | * when listed as conforming in the MP table. */ | ||
946 | |||
947 | #define default_ISA_trigger(idx) (0) | ||
948 | #define default_ISA_polarity(idx) (0) | ||
937 | 949 | ||
938 | /* EISA interrupts are always polarity zero and can be edge or level | 950 | /* EISA interrupts are always polarity zero and can be edge or level |
939 | * trigger depending on the ELCR value. If an interrupt is listed as | 951 | * trigger depending on the ELCR value. If an interrupt is listed as |
@@ -941,13 +953,7 @@ static int EISA_ELCR(unsigned int irq) | |||
941 | * be read in from the ELCR */ | 953 | * be read in from the ELCR */ |
942 | 954 | ||
943 | #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq)) | 955 | #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq)) |
944 | #define default_EISA_polarity(idx) (0) | 956 | #define default_EISA_polarity(idx) default_ISA_polarity(idx) |
945 | |||
946 | /* ISA interrupts are always polarity zero edge triggered, | ||
947 | * when listed as conforming in the MP table. */ | ||
948 | |||
949 | #define default_ISA_trigger(idx) (0) | ||
950 | #define default_ISA_polarity(idx) (0) | ||
951 | 957 | ||
952 | /* PCI interrupts are always polarity one level triggered, | 958 | /* PCI interrupts are always polarity one level triggered, |
953 | * when listed as conforming in the MP table. */ | 959 | * when listed as conforming in the MP table. */ |
@@ -959,7 +965,7 @@ static int EISA_ELCR(unsigned int irq) | |||
959 | * when listed as conforming in the MP table. */ | 965 | * when listed as conforming in the MP table. */ |
960 | 966 | ||
961 | #define default_MCA_trigger(idx) (1) | 967 | #define default_MCA_trigger(idx) (1) |
962 | #define default_MCA_polarity(idx) (0) | 968 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) |
963 | 969 | ||
964 | static int MPBIOS_polarity(int idx) | 970 | static int MPBIOS_polarity(int idx) |
965 | { | 971 | { |
@@ -973,35 +979,9 @@ static int MPBIOS_polarity(int idx) | |||
973 | { | 979 | { |
974 | case 0: /* conforms, ie. bus-type dependent polarity */ | 980 | case 0: /* conforms, ie. bus-type dependent polarity */ |
975 | { | 981 | { |
976 | switch (mp_bus_id_to_type[bus]) | 982 | polarity = test_bit(bus, mp_bus_not_pci)? |
977 | { | 983 | default_ISA_polarity(idx): |
978 | case MP_BUS_ISA: /* ISA pin */ | 984 | default_PCI_polarity(idx); |
979 | { | ||
980 | polarity = default_ISA_polarity(idx); | ||
981 | break; | ||
982 | } | ||
983 | case MP_BUS_EISA: /* EISA pin */ | ||
984 | { | ||
985 | polarity = default_EISA_polarity(idx); | ||
986 | break; | ||
987 | } | ||
988 | case MP_BUS_PCI: /* PCI pin */ | ||
989 | { | ||
990 | polarity = default_PCI_polarity(idx); | ||
991 | break; | ||
992 | } | ||
993 | case MP_BUS_MCA: /* MCA pin */ | ||
994 | { | ||
995 | polarity = default_MCA_polarity(idx); | ||
996 | break; | ||
997 | } | ||
998 | default: | ||
999 | { | ||
1000 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1001 | polarity = 1; | ||
1002 | break; | ||
1003 | } | ||
1004 | } | ||
1005 | break; | 985 | break; |
1006 | } | 986 | } |
1007 | case 1: /* high active */ | 987 | case 1: /* high active */ |
@@ -1042,11 +1022,15 @@ static int MPBIOS_trigger(int idx) | |||
1042 | { | 1022 | { |
1043 | case 0: /* conforms, ie. bus-type dependent */ | 1023 | case 0: /* conforms, ie. bus-type dependent */ |
1044 | { | 1024 | { |
1025 | trigger = test_bit(bus, mp_bus_not_pci)? | ||
1026 | default_ISA_trigger(idx): | ||
1027 | default_PCI_trigger(idx); | ||
1028 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
1045 | switch (mp_bus_id_to_type[bus]) | 1029 | switch (mp_bus_id_to_type[bus]) |
1046 | { | 1030 | { |
1047 | case MP_BUS_ISA: /* ISA pin */ | 1031 | case MP_BUS_ISA: /* ISA pin */ |
1048 | { | 1032 | { |
1049 | trigger = default_ISA_trigger(idx); | 1033 | /* set before the switch */ |
1050 | break; | 1034 | break; |
1051 | } | 1035 | } |
1052 | case MP_BUS_EISA: /* EISA pin */ | 1036 | case MP_BUS_EISA: /* EISA pin */ |
@@ -1056,7 +1040,7 @@ static int MPBIOS_trigger(int idx) | |||
1056 | } | 1040 | } |
1057 | case MP_BUS_PCI: /* PCI pin */ | 1041 | case MP_BUS_PCI: /* PCI pin */ |
1058 | { | 1042 | { |
1059 | trigger = default_PCI_trigger(idx); | 1043 | /* set before the switch */ |
1060 | break; | 1044 | break; |
1061 | } | 1045 | } |
1062 | case MP_BUS_MCA: /* MCA pin */ | 1046 | case MP_BUS_MCA: /* MCA pin */ |
@@ -1071,6 +1055,7 @@ static int MPBIOS_trigger(int idx) | |||
1071 | break; | 1055 | break; |
1072 | } | 1056 | } |
1073 | } | 1057 | } |
1058 | #endif | ||
1074 | break; | 1059 | break; |
1075 | } | 1060 | } |
1076 | case 1: /* edge */ | 1061 | case 1: /* edge */ |
@@ -1120,39 +1105,22 @@ static int pin_2_irq(int idx, int apic, int pin) | |||
1120 | if (mp_irqs[idx].mpc_dstirq != pin) | 1105 | if (mp_irqs[idx].mpc_dstirq != pin) |
1121 | printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); | 1106 | printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); |
1122 | 1107 | ||
1123 | switch (mp_bus_id_to_type[bus]) | 1108 | if (test_bit(bus, mp_bus_not_pci)) |
1124 | { | 1109 | irq = mp_irqs[idx].mpc_srcbusirq; |
1125 | case MP_BUS_ISA: /* ISA pin */ | 1110 | else { |
1126 | case MP_BUS_EISA: | 1111 | /* |
1127 | case MP_BUS_MCA: | 1112 | * PCI IRQs are mapped in order |
1128 | { | 1113 | */ |
1129 | irq = mp_irqs[idx].mpc_srcbusirq; | 1114 | i = irq = 0; |
1130 | break; | 1115 | while (i < apic) |
1131 | } | 1116 | irq += nr_ioapic_registers[i++]; |
1132 | case MP_BUS_PCI: /* PCI pin */ | 1117 | irq += pin; |
1133 | { | ||
1134 | /* | ||
1135 | * PCI IRQs are mapped in order | ||
1136 | */ | ||
1137 | i = irq = 0; | ||
1138 | while (i < apic) | ||
1139 | irq += nr_ioapic_registers[i++]; | ||
1140 | irq += pin; | ||
1141 | |||
1142 | /* | ||
1143 | * For MPS mode, so far only needed by ES7000 platform | ||
1144 | */ | ||
1145 | if (ioapic_renumber_irq) | ||
1146 | irq = ioapic_renumber_irq(apic, irq); | ||
1147 | 1118 | ||
1148 | break; | 1119 | /* |
1149 | } | 1120 | * For MPS mode, so far only needed by ES7000 platform |
1150 | default: | 1121 | */ |
1151 | { | 1122 | if (ioapic_renumber_irq) |
1152 | printk(KERN_ERR "unknown bus type %d.\n",bus); | 1123 | irq = ioapic_renumber_irq(apic, irq); |
1153 | irq = 0; | ||
1154 | break; | ||
1155 | } | ||
1156 | } | 1124 | } |
1157 | 1125 | ||
1158 | /* | 1126 | /* |
@@ -1260,7 +1228,6 @@ static void __init setup_IO_APIC_irqs(void) | |||
1260 | { | 1228 | { |
1261 | struct IO_APIC_route_entry entry; | 1229 | struct IO_APIC_route_entry entry; |
1262 | int apic, pin, idx, irq, first_notcon = 1, vector; | 1230 | int apic, pin, idx, irq, first_notcon = 1, vector; |
1263 | unsigned long flags; | ||
1264 | 1231 | ||
1265 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1232 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
1266 | 1233 | ||
@@ -1326,9 +1293,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
1326 | if (!apic && (irq < 16)) | 1293 | if (!apic && (irq < 16)) |
1327 | disable_8259A_irq(irq); | 1294 | disable_8259A_irq(irq); |
1328 | } | 1295 | } |
1329 | spin_lock_irqsave(&ioapic_lock, flags); | 1296 | ioapic_write_entry(apic, pin, entry); |
1330 | __ioapic_write_entry(apic, pin, entry); | ||
1331 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1332 | } | 1297 | } |
1333 | } | 1298 | } |
1334 | 1299 | ||
@@ -1524,8 +1489,8 @@ void /*__init*/ print_local_APIC(void * dummy) | |||
1524 | 1489 | ||
1525 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | 1490 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", |
1526 | smp_processor_id(), hard_smp_processor_id()); | 1491 | smp_processor_id(), hard_smp_processor_id()); |
1527 | v = apic_read(APIC_ID); | 1492 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, |
1528 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v)); | 1493 | GET_APIC_ID(read_apic_id())); |
1529 | v = apic_read(APIC_LVR); | 1494 | v = apic_read(APIC_LVR); |
1530 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | 1495 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); |
1531 | ver = GET_APIC_VERSION(v); | 1496 | ver = GET_APIC_VERSION(v); |
@@ -1734,7 +1699,7 @@ void disable_IO_APIC(void) | |||
1734 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1699 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1735 | entry.vector = 0; | 1700 | entry.vector = 0; |
1736 | entry.dest.physical.physical_dest = | 1701 | entry.dest.physical.physical_dest = |
1737 | GET_APIC_ID(apic_read(APIC_ID)); | 1702 | GET_APIC_ID(read_apic_id()); |
1738 | 1703 | ||
1739 | /* | 1704 | /* |
1740 | * Add it to the IO-APIC irq-routing table: | 1705 | * Add it to the IO-APIC irq-routing table: |
@@ -2031,8 +1996,7 @@ static inline void init_IO_APIC_traps(void) | |||
2031 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 1996 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
2032 | */ | 1997 | */ |
2033 | for (irq = 0; irq < NR_IRQS ; irq++) { | 1998 | for (irq = 0; irq < NR_IRQS ; irq++) { |
2034 | int tmp = irq; | 1999 | if (IO_APIC_IRQ(irq) && !irq_vector[irq]) { |
2035 | if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) { | ||
2036 | /* | 2000 | /* |
2037 | * Hmm.. We don't have an entry for this, | 2001 | * Hmm.. We don't have an entry for this, |
2038 | * so default to an old-fashioned 8259 | 2002 | * so default to an old-fashioned 8259 |
@@ -2156,8 +2120,6 @@ static inline void unlock_ExtINT_logic(void) | |||
2156 | ioapic_write_entry(apic, pin, entry0); | 2120 | ioapic_write_entry(apic, pin, entry0); |
2157 | } | 2121 | } |
2158 | 2122 | ||
2159 | int timer_uses_ioapic_pin_0; | ||
2160 | |||
2161 | /* | 2123 | /* |
2162 | * This code may look a bit paranoid, but it's supposed to cooperate with | 2124 | * This code may look a bit paranoid, but it's supposed to cooperate with |
2163 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ | 2125 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ |
@@ -2168,10 +2130,14 @@ static inline void __init check_timer(void) | |||
2168 | { | 2130 | { |
2169 | int apic1, pin1, apic2, pin2; | 2131 | int apic1, pin1, apic2, pin2; |
2170 | int vector; | 2132 | int vector; |
2133 | unsigned int ver; | ||
2171 | unsigned long flags; | 2134 | unsigned long flags; |
2172 | 2135 | ||
2173 | local_irq_save(flags); | 2136 | local_irq_save(flags); |
2174 | 2137 | ||
2138 | ver = apic_read(APIC_LVR); | ||
2139 | ver = GET_APIC_VERSION(ver); | ||
2140 | |||
2175 | /* | 2141 | /* |
2176 | * get/set the timer IRQ vector: | 2142 | * get/set the timer IRQ vector: |
2177 | */ | 2143 | */ |
@@ -2184,11 +2150,15 @@ static inline void __init check_timer(void) | |||
2184 | * mode for the 8259A whenever interrupts are routed | 2150 | * mode for the 8259A whenever interrupts are routed |
2185 | * through I/O APICs. Also IRQ0 has to be enabled in | 2151 | * through I/O APICs. Also IRQ0 has to be enabled in |
2186 | * the 8259A which implies the virtual wire has to be | 2152 | * the 8259A which implies the virtual wire has to be |
2187 | * disabled in the local APIC. | 2153 | * disabled in the local APIC. Finally timer interrupts |
2154 | * need to be acknowledged manually in the 8259A for | ||
2155 | * timer_interrupt() and for the i82489DX when using | ||
2156 | * the NMI watchdog. | ||
2188 | */ | 2157 | */ |
2189 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | 2158 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
2190 | init_8259A(1); | 2159 | init_8259A(1); |
2191 | timer_ack = 1; | 2160 | timer_ack = !cpu_has_tsc; |
2161 | timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); | ||
2192 | if (timer_over_8254 > 0) | 2162 | if (timer_over_8254 > 0) |
2193 | enable_8259A_irq(0); | 2163 | enable_8259A_irq(0); |
2194 | 2164 | ||
@@ -2197,9 +2167,6 @@ static inline void __init check_timer(void) | |||
2197 | pin2 = ioapic_i8259.pin; | 2167 | pin2 = ioapic_i8259.pin; |
2198 | apic2 = ioapic_i8259.apic; | 2168 | apic2 = ioapic_i8259.apic; |
2199 | 2169 | ||
2200 | if (pin1 == 0) | ||
2201 | timer_uses_ioapic_pin_0 = 1; | ||
2202 | |||
2203 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", | 2170 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
2204 | vector, apic1, pin1, apic2, pin2); | 2171 | vector, apic1, pin1, apic2, pin2); |
2205 | 2172 | ||
@@ -2789,7 +2756,6 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
2789 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) | 2756 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) |
2790 | { | 2757 | { |
2791 | struct IO_APIC_route_entry entry; | 2758 | struct IO_APIC_route_entry entry; |
2792 | unsigned long flags; | ||
2793 | 2759 | ||
2794 | if (!IO_APIC_IRQ(irq)) { | 2760 | if (!IO_APIC_IRQ(irq)) { |
2795 | printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | 2761 | printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", |
@@ -2830,9 +2796,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a | |||
2830 | if (!ioapic && (irq < 16)) | 2796 | if (!ioapic && (irq < 16)) |
2831 | disable_8259A_irq(irq); | 2797 | disable_8259A_irq(irq); |
2832 | 2798 | ||
2833 | spin_lock_irqsave(&ioapic_lock, flags); | 2799 | ioapic_write_entry(ioapic, pin, entry); |
2834 | __ioapic_write_entry(ioapic, pin, entry); | ||
2835 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2836 | 2800 | ||
2837 | return 0; | 2801 | return 0; |
2838 | } | 2802 | } |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 1627c0d53e0b..b54464b26658 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -43,13 +43,15 @@ | |||
43 | #include <asm/smp.h> | 43 | #include <asm/smp.h> |
44 | #include <asm/desc.h> | 44 | #include <asm/desc.h> |
45 | #include <asm/proto.h> | 45 | #include <asm/proto.h> |
46 | #include <asm/mach_apic.h> | ||
47 | #include <asm/acpi.h> | 46 | #include <asm/acpi.h> |
48 | #include <asm/dma.h> | 47 | #include <asm/dma.h> |
49 | #include <asm/nmi.h> | 48 | #include <asm/nmi.h> |
50 | #include <asm/msidef.h> | 49 | #include <asm/msidef.h> |
51 | #include <asm/hypertransport.h> | 50 | #include <asm/hypertransport.h> |
52 | 51 | ||
52 | #include <mach_ipi.h> | ||
53 | #include <mach_apic.h> | ||
54 | |||
53 | struct irq_cfg { | 55 | struct irq_cfg { |
54 | cpumask_t domain; | 56 | cpumask_t domain; |
55 | cpumask_t old_domain; | 57 | cpumask_t old_domain; |
@@ -101,6 +103,16 @@ DEFINE_SPINLOCK(vector_lock); | |||
101 | */ | 103 | */ |
102 | int nr_ioapic_registers[MAX_IO_APICS]; | 104 | int nr_ioapic_registers[MAX_IO_APICS]; |
103 | 105 | ||
106 | /* I/O APIC entries */ | ||
107 | struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
108 | int nr_ioapics; | ||
109 | |||
110 | /* MP IRQ source entries */ | ||
111 | struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
112 | |||
113 | /* # of MP IRQ source entries */ | ||
114 | int mp_irq_entries; | ||
115 | |||
104 | /* | 116 | /* |
105 | * Rough estimation of how many shared IRQs there are, can | 117 | * Rough estimation of how many shared IRQs there are, can |
106 | * be changed anytime. | 118 | * be changed anytime. |
@@ -155,11 +167,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int value) | |||
155 | writel(value, &io_apic->data); | 167 | writel(value, &io_apic->data); |
156 | } | 168 | } |
157 | 169 | ||
158 | static int io_apic_level_ack_pending(unsigned int irq) | 170 | static bool io_apic_level_ack_pending(unsigned int irq) |
159 | { | 171 | { |
160 | struct irq_pin_list *entry; | 172 | struct irq_pin_list *entry; |
161 | unsigned long flags; | 173 | unsigned long flags; |
162 | int pending = 0; | ||
163 | 174 | ||
164 | spin_lock_irqsave(&ioapic_lock, flags); | 175 | spin_lock_irqsave(&ioapic_lock, flags); |
165 | entry = irq_2_pin + irq; | 176 | entry = irq_2_pin + irq; |
@@ -172,13 +183,17 @@ static int io_apic_level_ack_pending(unsigned int irq) | |||
172 | break; | 183 | break; |
173 | reg = io_apic_read(entry->apic, 0x10 + pin*2); | 184 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
174 | /* Is the remote IRR bit set? */ | 185 | /* Is the remote IRR bit set? */ |
175 | pending |= (reg >> 14) & 1; | 186 | if ((reg >> 14) & 1) { |
187 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
188 | return true; | ||
189 | } | ||
176 | if (!entry->next) | 190 | if (!entry->next) |
177 | break; | 191 | break; |
178 | entry = irq_2_pin + entry->next; | 192 | entry = irq_2_pin + entry->next; |
179 | } | 193 | } |
180 | spin_unlock_irqrestore(&ioapic_lock, flags); | 194 | spin_unlock_irqrestore(&ioapic_lock, flags); |
181 | return pending; | 195 | |
196 | return false; | ||
182 | } | 197 | } |
183 | 198 | ||
184 | /* | 199 | /* |
@@ -902,9 +917,8 @@ static void __init setup_IO_APIC_irqs(void) | |||
902 | static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) | 917 | static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) |
903 | { | 918 | { |
904 | struct IO_APIC_route_entry entry; | 919 | struct IO_APIC_route_entry entry; |
905 | unsigned long flags; | ||
906 | 920 | ||
907 | memset(&entry,0,sizeof(entry)); | 921 | memset(&entry, 0, sizeof(entry)); |
908 | 922 | ||
909 | disable_8259A_irq(0); | 923 | disable_8259A_irq(0); |
910 | 924 | ||
@@ -932,10 +946,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in | |||
932 | /* | 946 | /* |
933 | * Add it to the IO-APIC irq-routing table: | 947 | * Add it to the IO-APIC irq-routing table: |
934 | */ | 948 | */ |
935 | spin_lock_irqsave(&ioapic_lock, flags); | 949 | ioapic_write_entry(apic, pin, entry); |
936 | io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); | ||
937 | io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); | ||
938 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
939 | 950 | ||
940 | enable_8259A_irq(0); | 951 | enable_8259A_irq(0); |
941 | } | 952 | } |
@@ -1066,8 +1077,7 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1066 | 1077 | ||
1067 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | 1078 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", |
1068 | smp_processor_id(), hard_smp_processor_id()); | 1079 | smp_processor_id(), hard_smp_processor_id()); |
1069 | v = apic_read(APIC_ID); | 1080 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(read_apic_id())); |
1070 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v)); | ||
1071 | v = apic_read(APIC_LVR); | 1081 | v = apic_read(APIC_LVR); |
1072 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | 1082 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); |
1073 | ver = GET_APIC_VERSION(v); | 1083 | ver = GET_APIC_VERSION(v); |
@@ -1261,7 +1271,7 @@ void disable_IO_APIC(void) | |||
1261 | entry.dest_mode = 0; /* Physical */ | 1271 | entry.dest_mode = 0; /* Physical */ |
1262 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1272 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1263 | entry.vector = 0; | 1273 | entry.vector = 0; |
1264 | entry.dest = GET_APIC_ID(apic_read(APIC_ID)); | 1274 | entry.dest = GET_APIC_ID(read_apic_id()); |
1265 | 1275 | ||
1266 | /* | 1276 | /* |
1267 | * Add it to the IO-APIC irq-routing table: | 1277 | * Add it to the IO-APIC irq-routing table: |
@@ -1352,9 +1362,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
1352 | unsigned long flags; | 1362 | unsigned long flags; |
1353 | 1363 | ||
1354 | spin_lock_irqsave(&vector_lock, flags); | 1364 | spin_lock_irqsave(&vector_lock, flags); |
1355 | cpus_clear(mask); | 1365 | mask = cpumask_of_cpu(first_cpu(cfg->domain)); |
1356 | cpu_set(first_cpu(cfg->domain), mask); | ||
1357 | |||
1358 | send_IPI_mask(mask, cfg->vector); | 1366 | send_IPI_mask(mask, cfg->vector); |
1359 | spin_unlock_irqrestore(&vector_lock, flags); | 1367 | spin_unlock_irqrestore(&vector_lock, flags); |
1360 | 1368 | ||
@@ -1517,8 +1525,7 @@ static inline void init_IO_APIC_traps(void) | |||
1517 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 1525 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
1518 | */ | 1526 | */ |
1519 | for (irq = 0; irq < NR_IRQS ; irq++) { | 1527 | for (irq = 0; irq < NR_IRQS ; irq++) { |
1520 | int tmp = irq; | 1528 | if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) { |
1521 | if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) { | ||
1522 | /* | 1529 | /* |
1523 | * Hmm.. We don't have an entry for this, | 1530 | * Hmm.. We don't have an entry for this, |
1524 | * so default to an old-fashioned 8259 | 1531 | * so default to an old-fashioned 8259 |
@@ -1597,17 +1604,14 @@ static inline void unlock_ExtINT_logic(void) | |||
1597 | int apic, pin, i; | 1604 | int apic, pin, i; |
1598 | struct IO_APIC_route_entry entry0, entry1; | 1605 | struct IO_APIC_route_entry entry0, entry1; |
1599 | unsigned char save_control, save_freq_select; | 1606 | unsigned char save_control, save_freq_select; |
1600 | unsigned long flags; | ||
1601 | 1607 | ||
1602 | pin = find_isa_irq_pin(8, mp_INT); | 1608 | pin = find_isa_irq_pin(8, mp_INT); |
1603 | apic = find_isa_irq_apic(8, mp_INT); | 1609 | apic = find_isa_irq_apic(8, mp_INT); |
1604 | if (pin == -1) | 1610 | if (pin == -1) |
1605 | return; | 1611 | return; |
1606 | 1612 | ||
1607 | spin_lock_irqsave(&ioapic_lock, flags); | 1613 | entry0 = ioapic_read_entry(apic, pin); |
1608 | *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin); | 1614 | |
1609 | *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin); | ||
1610 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1611 | clear_IO_APIC_pin(apic, pin); | 1615 | clear_IO_APIC_pin(apic, pin); |
1612 | 1616 | ||
1613 | memset(&entry1, 0, sizeof(entry1)); | 1617 | memset(&entry1, 0, sizeof(entry1)); |
@@ -1620,10 +1624,7 @@ static inline void unlock_ExtINT_logic(void) | |||
1620 | entry1.trigger = 0; | 1624 | entry1.trigger = 0; |
1621 | entry1.vector = 0; | 1625 | entry1.vector = 0; |
1622 | 1626 | ||
1623 | spin_lock_irqsave(&ioapic_lock, flags); | 1627 | ioapic_write_entry(apic, pin, entry1); |
1624 | io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); | ||
1625 | io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); | ||
1626 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1627 | 1628 | ||
1628 | save_control = CMOS_READ(RTC_CONTROL); | 1629 | save_control = CMOS_READ(RTC_CONTROL); |
1629 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); | 1630 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); |
@@ -1642,10 +1643,7 @@ static inline void unlock_ExtINT_logic(void) | |||
1642 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | 1643 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
1643 | clear_IO_APIC_pin(apic, pin); | 1644 | clear_IO_APIC_pin(apic, pin); |
1644 | 1645 | ||
1645 | spin_lock_irqsave(&ioapic_lock, flags); | 1646 | ioapic_write_entry(apic, pin, entry0); |
1646 | io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); | ||
1647 | io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); | ||
1648 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1649 | } | 1647 | } |
1650 | 1648 | ||
1651 | /* | 1649 | /* |
@@ -2314,7 +2312,6 @@ static struct resource * __init ioapic_setup_resources(void) | |||
2314 | res = (void *)mem; | 2312 | res = (void *)mem; |
2315 | 2313 | ||
2316 | if (mem != NULL) { | 2314 | if (mem != NULL) { |
2317 | memset(mem, 0, n); | ||
2318 | mem += sizeof(struct resource) * nr_ioapics; | 2315 | mem += sizeof(struct resource) * nr_ioapics; |
2319 | 2316 | ||
2320 | for (i = 0; i < nr_ioapics; i++) { | 2317 | for (i = 0; i < nr_ioapics; i++) { |
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c new file mode 100644 index 000000000000..c0df7b89ca23 --- /dev/null +++ b/arch/x86/kernel/ipi.c | |||
@@ -0,0 +1,178 @@ | |||
1 | #include <linux/cpumask.h> | ||
2 | #include <linux/interrupt.h> | ||
3 | #include <linux/init.h> | ||
4 | |||
5 | #include <linux/mm.h> | ||
6 | #include <linux/delay.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/kernel_stat.h> | ||
9 | #include <linux/mc146818rtc.h> | ||
10 | #include <linux/cache.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/cpu.h> | ||
13 | #include <linux/module.h> | ||
14 | |||
15 | #include <asm/smp.h> | ||
16 | #include <asm/mtrr.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | #include <asm/mmu_context.h> | ||
19 | #include <asm/apic.h> | ||
20 | #include <asm/proto.h> | ||
21 | |||
22 | #ifdef CONFIG_X86_32 | ||
23 | #include <mach_apic.h> | ||
24 | /* | ||
25 | * the following functions deal with sending IPIs between CPUs. | ||
26 | * | ||
27 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. | ||
28 | */ | ||
29 | |||
30 | static inline int __prepare_ICR(unsigned int shortcut, int vector) | ||
31 | { | ||
32 | unsigned int icr = shortcut | APIC_DEST_LOGICAL; | ||
33 | |||
34 | switch (vector) { | ||
35 | default: | ||
36 | icr |= APIC_DM_FIXED | vector; | ||
37 | break; | ||
38 | case NMI_VECTOR: | ||
39 | icr |= APIC_DM_NMI; | ||
40 | break; | ||
41 | } | ||
42 | return icr; | ||
43 | } | ||
44 | |||
45 | static inline int __prepare_ICR2(unsigned int mask) | ||
46 | { | ||
47 | return SET_APIC_DEST_FIELD(mask); | ||
48 | } | ||
49 | |||
50 | void __send_IPI_shortcut(unsigned int shortcut, int vector) | ||
51 | { | ||
52 | /* | ||
53 | * Subtle. In the case of the 'never do double writes' workaround | ||
54 | * we have to lock out interrupts to be safe. As we don't care | ||
55 | * of the value read we use an atomic rmw access to avoid costly | ||
56 | * cli/sti. Otherwise we use an even cheaper single atomic write | ||
57 | * to the APIC. | ||
58 | */ | ||
59 | unsigned int cfg; | ||
60 | |||
61 | /* | ||
62 | * Wait for idle. | ||
63 | */ | ||
64 | apic_wait_icr_idle(); | ||
65 | |||
66 | /* | ||
67 | * No need to touch the target chip field | ||
68 | */ | ||
69 | cfg = __prepare_ICR(shortcut, vector); | ||
70 | |||
71 | /* | ||
72 | * Send the IPI. The write to APIC_ICR fires this off. | ||
73 | */ | ||
74 | apic_write_around(APIC_ICR, cfg); | ||
75 | } | ||
76 | |||
77 | void send_IPI_self(int vector) | ||
78 | { | ||
79 | __send_IPI_shortcut(APIC_DEST_SELF, vector); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * This is used to send an IPI with no shorthand notation (the destination is | ||
84 | * specified in bits 56 to 63 of the ICR). | ||
85 | */ | ||
86 | static inline void __send_IPI_dest_field(unsigned long mask, int vector) | ||
87 | { | ||
88 | unsigned long cfg; | ||
89 | |||
90 | /* | ||
91 | * Wait for idle. | ||
92 | */ | ||
93 | if (unlikely(vector == NMI_VECTOR)) | ||
94 | safe_apic_wait_icr_idle(); | ||
95 | else | ||
96 | apic_wait_icr_idle(); | ||
97 | |||
98 | /* | ||
99 | * prepare target chip field | ||
100 | */ | ||
101 | cfg = __prepare_ICR2(mask); | ||
102 | apic_write_around(APIC_ICR2, cfg); | ||
103 | |||
104 | /* | ||
105 | * program the ICR | ||
106 | */ | ||
107 | cfg = __prepare_ICR(0, vector); | ||
108 | |||
109 | /* | ||
110 | * Send the IPI. The write to APIC_ICR fires this off. | ||
111 | */ | ||
112 | apic_write_around(APIC_ICR, cfg); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * This is only used on smaller machines. | ||
117 | */ | ||
118 | void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | ||
119 | { | ||
120 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
121 | unsigned long flags; | ||
122 | |||
123 | local_irq_save(flags); | ||
124 | WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); | ||
125 | __send_IPI_dest_field(mask, vector); | ||
126 | local_irq_restore(flags); | ||
127 | } | ||
128 | |||
129 | void send_IPI_mask_sequence(cpumask_t mask, int vector) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | unsigned int query_cpu; | ||
133 | |||
134 | /* | ||
135 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
136 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | ||
137 | * should be modified to do 1 message per cluster ID - mbligh | ||
138 | */ | ||
139 | |||
140 | local_irq_save(flags); | ||
141 | for_each_possible_cpu(query_cpu) { | ||
142 | if (cpu_isset(query_cpu, mask)) { | ||
143 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | ||
144 | vector); | ||
145 | } | ||
146 | } | ||
147 | local_irq_restore(flags); | ||
148 | } | ||
149 | |||
150 | /* must come after the send_IPI functions above for inlining */ | ||
151 | #include <mach_ipi.h> | ||
152 | static int convert_apicid_to_cpu(int apic_id) | ||
153 | { | ||
154 | int i; | ||
155 | |||
156 | for_each_possible_cpu(i) { | ||
157 | if (per_cpu(x86_cpu_to_apicid, i) == apic_id) | ||
158 | return i; | ||
159 | } | ||
160 | return -1; | ||
161 | } | ||
162 | |||
163 | int safe_smp_processor_id(void) | ||
164 | { | ||
165 | int apicid, cpuid; | ||
166 | |||
167 | if (!boot_cpu_has(X86_FEATURE_APIC)) | ||
168 | return 0; | ||
169 | |||
170 | apicid = hard_smp_processor_id(); | ||
171 | if (apicid == BAD_APICID) | ||
172 | return 0; | ||
173 | |||
174 | cpuid = convert_apicid_to_cpu(apicid); | ||
175 | |||
176 | return cpuid >= 0 ? cpuid : 0; | ||
177 | } | ||
178 | #endif | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index cef054b09d27..6ea67b76a214 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -79,7 +79,7 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
79 | 79 | ||
80 | if (unlikely((unsigned)irq >= NR_IRQS)) { | 80 | if (unlikely((unsigned)irq >= NR_IRQS)) { |
81 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 81 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", |
82 | __FUNCTION__, irq); | 82 | __func__, irq); |
83 | BUG(); | 83 | BUG(); |
84 | } | 84 | } |
85 | 85 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c new file mode 100644 index 000000000000..8c7e555f6d39 --- /dev/null +++ b/arch/x86/kernel/kgdb.c | |||
@@ -0,0 +1,571 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License as published by the | ||
4 | * Free Software Foundation; either version 2, or (at your option) any | ||
5 | * later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, but | ||
8 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
10 | * General Public License for more details. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com> | ||
16 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | ||
17 | * Copyright (C) 2002 Andi Kleen, SuSE Labs | ||
18 | * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd. | ||
19 | * Copyright (C) 2007 MontaVista Software, Inc. | ||
20 | * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc. | ||
21 | */ | ||
22 | /**************************************************************************** | ||
23 | * Contributor: Lake Stevens Instrument Division$ | ||
24 | * Written by: Glenn Engel $ | ||
25 | * Updated by: Amit Kale<akale@veritas.com> | ||
26 | * Updated by: Tom Rini <trini@kernel.crashing.org> | ||
27 | * Updated by: Jason Wessel <jason.wessel@windriver.com> | ||
28 | * Modified for 386 by Jim Kingdon, Cygnus Support. | ||
29 | * Origianl kgdb, compatibility with 2.1.xx kernel by | ||
30 | * David Grothe <dave@gcom.com> | ||
31 | * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com> | ||
32 | * X86_64 changes from Andi Kleen's patch merged by Jim Houston | ||
33 | */ | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/kdebug.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/ptrace.h> | ||
39 | #include <linux/sched.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/kgdb.h> | ||
42 | #include <linux/init.h> | ||
43 | #include <linux/smp.h> | ||
44 | #include <linux/nmi.h> | ||
45 | |||
46 | #include <asm/apicdef.h> | ||
47 | #include <asm/system.h> | ||
48 | |||
49 | #ifdef CONFIG_X86_32 | ||
50 | # include <mach_ipi.h> | ||
51 | #else | ||
52 | # include <asm/mach_apic.h> | ||
53 | #endif | ||
54 | |||
55 | /* | ||
56 | * Put the error code here just in case the user cares: | ||
57 | */ | ||
58 | static int gdb_x86errcode; | ||
59 | |||
60 | /* | ||
61 | * Likewise, the vector number here (since GDB only gets the signal | ||
62 | * number through the usual means, and that's not very specific): | ||
63 | */ | ||
64 | static int gdb_x86vector = -1; | ||
65 | |||
66 | /** | ||
67 | * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs | ||
68 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | ||
69 | * @regs: The &struct pt_regs of the current process. | ||
70 | * | ||
71 | * Convert the pt_regs in @regs into the format for registers that | ||
72 | * GDB expects, stored in @gdb_regs. | ||
73 | */ | ||
74 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
75 | { | ||
76 | gdb_regs[GDB_AX] = regs->ax; | ||
77 | gdb_regs[GDB_BX] = regs->bx; | ||
78 | gdb_regs[GDB_CX] = regs->cx; | ||
79 | gdb_regs[GDB_DX] = regs->dx; | ||
80 | gdb_regs[GDB_SI] = regs->si; | ||
81 | gdb_regs[GDB_DI] = regs->di; | ||
82 | gdb_regs[GDB_BP] = regs->bp; | ||
83 | gdb_regs[GDB_PS] = regs->flags; | ||
84 | gdb_regs[GDB_PC] = regs->ip; | ||
85 | #ifdef CONFIG_X86_32 | ||
86 | gdb_regs[GDB_DS] = regs->ds; | ||
87 | gdb_regs[GDB_ES] = regs->es; | ||
88 | gdb_regs[GDB_CS] = regs->cs; | ||
89 | gdb_regs[GDB_SS] = __KERNEL_DS; | ||
90 | gdb_regs[GDB_FS] = 0xFFFF; | ||
91 | gdb_regs[GDB_GS] = 0xFFFF; | ||
92 | #else | ||
93 | gdb_regs[GDB_R8] = regs->r8; | ||
94 | gdb_regs[GDB_R9] = regs->r9; | ||
95 | gdb_regs[GDB_R10] = regs->r10; | ||
96 | gdb_regs[GDB_R11] = regs->r11; | ||
97 | gdb_regs[GDB_R12] = regs->r12; | ||
98 | gdb_regs[GDB_R13] = regs->r13; | ||
99 | gdb_regs[GDB_R14] = regs->r14; | ||
100 | gdb_regs[GDB_R15] = regs->r15; | ||
101 | #endif | ||
102 | gdb_regs[GDB_SP] = regs->sp; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs | ||
107 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | ||
108 | * @p: The &struct task_struct of the desired process. | ||
109 | * | ||
110 | * Convert the register values of the sleeping process in @p to | ||
111 | * the format that GDB expects. | ||
112 | * This function is called when kgdb does not have access to the | ||
113 | * &struct pt_regs and therefore it should fill the gdb registers | ||
114 | * @gdb_regs with what has been saved in &struct thread_struct | ||
115 | * thread field during switch_to. | ||
116 | */ | ||
117 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | ||
118 | { | ||
119 | gdb_regs[GDB_AX] = 0; | ||
120 | gdb_regs[GDB_BX] = 0; | ||
121 | gdb_regs[GDB_CX] = 0; | ||
122 | gdb_regs[GDB_DX] = 0; | ||
123 | gdb_regs[GDB_SI] = 0; | ||
124 | gdb_regs[GDB_DI] = 0; | ||
125 | gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp; | ||
126 | #ifdef CONFIG_X86_32 | ||
127 | gdb_regs[GDB_DS] = __KERNEL_DS; | ||
128 | gdb_regs[GDB_ES] = __KERNEL_DS; | ||
129 | gdb_regs[GDB_PS] = 0; | ||
130 | gdb_regs[GDB_CS] = __KERNEL_CS; | ||
131 | gdb_regs[GDB_PC] = p->thread.ip; | ||
132 | gdb_regs[GDB_SS] = __KERNEL_DS; | ||
133 | gdb_regs[GDB_FS] = 0xFFFF; | ||
134 | gdb_regs[GDB_GS] = 0xFFFF; | ||
135 | #else | ||
136 | gdb_regs[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); | ||
137 | gdb_regs[GDB_PC] = 0; | ||
138 | gdb_regs[GDB_R8] = 0; | ||
139 | gdb_regs[GDB_R9] = 0; | ||
140 | gdb_regs[GDB_R10] = 0; | ||
141 | gdb_regs[GDB_R11] = 0; | ||
142 | gdb_regs[GDB_R12] = 0; | ||
143 | gdb_regs[GDB_R13] = 0; | ||
144 | gdb_regs[GDB_R14] = 0; | ||
145 | gdb_regs[GDB_R15] = 0; | ||
146 | #endif | ||
147 | gdb_regs[GDB_SP] = p->thread.sp; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs. | ||
152 | * @gdb_regs: A pointer to hold the registers we've received from GDB. | ||
153 | * @regs: A pointer to a &struct pt_regs to hold these values in. | ||
154 | * | ||
155 | * Convert the GDB regs in @gdb_regs into the pt_regs, and store them | ||
156 | * in @regs. | ||
157 | */ | ||
158 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
159 | { | ||
160 | regs->ax = gdb_regs[GDB_AX]; | ||
161 | regs->bx = gdb_regs[GDB_BX]; | ||
162 | regs->cx = gdb_regs[GDB_CX]; | ||
163 | regs->dx = gdb_regs[GDB_DX]; | ||
164 | regs->si = gdb_regs[GDB_SI]; | ||
165 | regs->di = gdb_regs[GDB_DI]; | ||
166 | regs->bp = gdb_regs[GDB_BP]; | ||
167 | regs->flags = gdb_regs[GDB_PS]; | ||
168 | regs->ip = gdb_regs[GDB_PC]; | ||
169 | #ifdef CONFIG_X86_32 | ||
170 | regs->ds = gdb_regs[GDB_DS]; | ||
171 | regs->es = gdb_regs[GDB_ES]; | ||
172 | regs->cs = gdb_regs[GDB_CS]; | ||
173 | #else | ||
174 | regs->r8 = gdb_regs[GDB_R8]; | ||
175 | regs->r9 = gdb_regs[GDB_R9]; | ||
176 | regs->r10 = gdb_regs[GDB_R10]; | ||
177 | regs->r11 = gdb_regs[GDB_R11]; | ||
178 | regs->r12 = gdb_regs[GDB_R12]; | ||
179 | regs->r13 = gdb_regs[GDB_R13]; | ||
180 | regs->r14 = gdb_regs[GDB_R14]; | ||
181 | regs->r15 = gdb_regs[GDB_R15]; | ||
182 | #endif | ||
183 | } | ||
184 | |||
185 | static struct hw_breakpoint { | ||
186 | unsigned enabled; | ||
187 | unsigned type; | ||
188 | unsigned len; | ||
189 | unsigned long addr; | ||
190 | } breakinfo[4]; | ||
191 | |||
192 | static void kgdb_correct_hw_break(void) | ||
193 | { | ||
194 | unsigned long dr7; | ||
195 | int correctit = 0; | ||
196 | int breakbit; | ||
197 | int breakno; | ||
198 | |||
199 | get_debugreg(dr7, 7); | ||
200 | for (breakno = 0; breakno < 4; breakno++) { | ||
201 | breakbit = 2 << (breakno << 1); | ||
202 | if (!(dr7 & breakbit) && breakinfo[breakno].enabled) { | ||
203 | correctit = 1; | ||
204 | dr7 |= breakbit; | ||
205 | dr7 &= ~(0xf0000 << (breakno << 2)); | ||
206 | dr7 |= ((breakinfo[breakno].len << 2) | | ||
207 | breakinfo[breakno].type) << | ||
208 | ((breakno << 2) + 16); | ||
209 | if (breakno >= 0 && breakno <= 3) | ||
210 | set_debugreg(breakinfo[breakno].addr, breakno); | ||
211 | |||
212 | } else { | ||
213 | if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { | ||
214 | correctit = 1; | ||
215 | dr7 &= ~breakbit; | ||
216 | dr7 &= ~(0xf0000 << (breakno << 2)); | ||
217 | } | ||
218 | } | ||
219 | } | ||
220 | if (correctit) | ||
221 | set_debugreg(dr7, 7); | ||
222 | } | ||
223 | |||
224 | static int | ||
225 | kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | ||
226 | { | ||
227 | int i; | ||
228 | |||
229 | for (i = 0; i < 4; i++) | ||
230 | if (breakinfo[i].addr == addr && breakinfo[i].enabled) | ||
231 | break; | ||
232 | if (i == 4) | ||
233 | return -1; | ||
234 | |||
235 | breakinfo[i].enabled = 0; | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static void kgdb_remove_all_hw_break(void) | ||
241 | { | ||
242 | int i; | ||
243 | |||
244 | for (i = 0; i < 4; i++) | ||
245 | memset(&breakinfo[i], 0, sizeof(struct hw_breakpoint)); | ||
246 | } | ||
247 | |||
248 | static int | ||
249 | kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | ||
250 | { | ||
251 | unsigned type; | ||
252 | int i; | ||
253 | |||
254 | for (i = 0; i < 4; i++) | ||
255 | if (!breakinfo[i].enabled) | ||
256 | break; | ||
257 | if (i == 4) | ||
258 | return -1; | ||
259 | |||
260 | switch (bptype) { | ||
261 | case BP_HARDWARE_BREAKPOINT: | ||
262 | type = 0; | ||
263 | len = 1; | ||
264 | break; | ||
265 | case BP_WRITE_WATCHPOINT: | ||
266 | type = 1; | ||
267 | break; | ||
268 | case BP_ACCESS_WATCHPOINT: | ||
269 | type = 3; | ||
270 | break; | ||
271 | default: | ||
272 | return -1; | ||
273 | } | ||
274 | |||
275 | if (len == 1 || len == 2 || len == 4) | ||
276 | breakinfo[i].len = len - 1; | ||
277 | else | ||
278 | return -1; | ||
279 | |||
280 | breakinfo[i].enabled = 1; | ||
281 | breakinfo[i].addr = addr; | ||
282 | breakinfo[i].type = type; | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. | ||
289 | * @regs: Current &struct pt_regs. | ||
290 | * | ||
291 | * This function will be called if the particular architecture must | ||
292 | * disable hardware debugging while it is processing gdb packets or | ||
293 | * handling exception. | ||
294 | */ | ||
295 | void kgdb_disable_hw_debug(struct pt_regs *regs) | ||
296 | { | ||
297 | /* Disable hardware debugging while we are in kgdb: */ | ||
298 | set_debugreg(0UL, 7); | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * kgdb_post_primary_code - Save error vector/code numbers. | ||
303 | * @regs: Original pt_regs. | ||
304 | * @e_vector: Original error vector. | ||
305 | * @err_code: Original error code. | ||
306 | * | ||
307 | * This is needed on architectures which support SMP and KGDB. | ||
308 | * This function is called after all the slave cpus have been put | ||
309 | * to a know spin state and the primary CPU has control over KGDB. | ||
310 | */ | ||
311 | void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code) | ||
312 | { | ||
313 | /* primary processor is completely in the debugger */ | ||
314 | gdb_x86vector = e_vector; | ||
315 | gdb_x86errcode = err_code; | ||
316 | } | ||
317 | |||
318 | #ifdef CONFIG_SMP | ||
319 | /** | ||
320 | * kgdb_roundup_cpus - Get other CPUs into a holding pattern | ||
321 | * @flags: Current IRQ state | ||
322 | * | ||
323 | * On SMP systems, we need to get the attention of the other CPUs | ||
324 | * and get them be in a known state. This should do what is needed | ||
325 | * to get the other CPUs to call kgdb_wait(). Note that on some arches, | ||
326 | * the NMI approach is not used for rounding up all the CPUs. For example, | ||
327 | * in case of MIPS, smp_call_function() is used to roundup CPUs. In | ||
328 | * this case, we have to make sure that interrupts are enabled before | ||
329 | * calling smp_call_function(). The argument to this function is | ||
330 | * the flags that will be used when restoring the interrupts. There is | ||
331 | * local_irq_save() call before kgdb_roundup_cpus(). | ||
332 | * | ||
333 | * On non-SMP systems, this is not called. | ||
334 | */ | ||
335 | void kgdb_roundup_cpus(unsigned long flags) | ||
336 | { | ||
337 | send_IPI_allbutself(APIC_DM_NMI); | ||
338 | } | ||
339 | #endif | ||
340 | |||
341 | /** | ||
342 | * kgdb_arch_handle_exception - Handle architecture specific GDB packets. | ||
343 | * @vector: The error vector of the exception that happened. | ||
344 | * @signo: The signal number of the exception that happened. | ||
345 | * @err_code: The error code of the exception that happened. | ||
346 | * @remcom_in_buffer: The buffer of the packet we have read. | ||
347 | * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. | ||
348 | * @regs: The &struct pt_regs of the current process. | ||
349 | * | ||
350 | * This function MUST handle the 'c' and 's' command packets, | ||
351 | * as well packets to set / remove a hardware breakpoint, if used. | ||
352 | * If there are additional packets which the hardware needs to handle, | ||
353 | * they are handled here. The code should return -1 if it wants to | ||
354 | * process more packets, and a %0 or %1 if it wants to exit from the | ||
355 | * kgdb callback. | ||
356 | */ | ||
357 | int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | ||
358 | char *remcomInBuffer, char *remcomOutBuffer, | ||
359 | struct pt_regs *linux_regs) | ||
360 | { | ||
361 | unsigned long addr; | ||
362 | unsigned long dr6; | ||
363 | char *ptr; | ||
364 | int newPC; | ||
365 | |||
366 | switch (remcomInBuffer[0]) { | ||
367 | case 'c': | ||
368 | case 's': | ||
369 | /* try to read optional parameter, pc unchanged if no parm */ | ||
370 | ptr = &remcomInBuffer[1]; | ||
371 | if (kgdb_hex2long(&ptr, &addr)) | ||
372 | linux_regs->ip = addr; | ||
373 | case 'D': | ||
374 | case 'k': | ||
375 | newPC = linux_regs->ip; | ||
376 | |||
377 | /* clear the trace bit */ | ||
378 | linux_regs->flags &= ~TF_MASK; | ||
379 | atomic_set(&kgdb_cpu_doing_single_step, -1); | ||
380 | |||
381 | /* set the trace bit if we're stepping */ | ||
382 | if (remcomInBuffer[0] == 's') { | ||
383 | linux_regs->flags |= TF_MASK; | ||
384 | kgdb_single_step = 1; | ||
385 | if (kgdb_contthread) { | ||
386 | atomic_set(&kgdb_cpu_doing_single_step, | ||
387 | raw_smp_processor_id()); | ||
388 | } | ||
389 | } | ||
390 | |||
391 | get_debugreg(dr6, 6); | ||
392 | if (!(dr6 & 0x4000)) { | ||
393 | int breakno; | ||
394 | |||
395 | for (breakno = 0; breakno < 4; breakno++) { | ||
396 | if (dr6 & (1 << breakno) && | ||
397 | breakinfo[breakno].type == 0) { | ||
398 | /* Set restore flag: */ | ||
399 | linux_regs->flags |= X86_EFLAGS_RF; | ||
400 | break; | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | set_debugreg(0UL, 6); | ||
405 | kgdb_correct_hw_break(); | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | /* this means that we do not want to exit from the handler: */ | ||
411 | return -1; | ||
412 | } | ||
413 | |||
414 | static inline int | ||
415 | single_step_cont(struct pt_regs *regs, struct die_args *args) | ||
416 | { | ||
417 | /* | ||
418 | * Single step exception from kernel space to user space so | ||
419 | * eat the exception and continue the process: | ||
420 | */ | ||
421 | printk(KERN_ERR "KGDB: trap/step from kernel to user space, " | ||
422 | "resuming...\n"); | ||
423 | kgdb_arch_handle_exception(args->trapnr, args->signr, | ||
424 | args->err, "c", "", regs); | ||
425 | |||
426 | return NOTIFY_STOP; | ||
427 | } | ||
428 | |||
429 | static int was_in_debug_nmi[NR_CPUS]; | ||
430 | |||
431 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | ||
432 | { | ||
433 | struct pt_regs *regs = args->regs; | ||
434 | |||
435 | switch (cmd) { | ||
436 | case DIE_NMI: | ||
437 | if (atomic_read(&kgdb_active) != -1) { | ||
438 | /* KGDB CPU roundup */ | ||
439 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
440 | was_in_debug_nmi[raw_smp_processor_id()] = 1; | ||
441 | touch_nmi_watchdog(); | ||
442 | return NOTIFY_STOP; | ||
443 | } | ||
444 | return NOTIFY_DONE; | ||
445 | |||
446 | case DIE_NMI_IPI: | ||
447 | if (atomic_read(&kgdb_active) != -1) { | ||
448 | /* KGDB CPU roundup */ | ||
449 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
450 | was_in_debug_nmi[raw_smp_processor_id()] = 1; | ||
451 | touch_nmi_watchdog(); | ||
452 | } | ||
453 | return NOTIFY_DONE; | ||
454 | |||
455 | case DIE_NMIUNKNOWN: | ||
456 | if (was_in_debug_nmi[raw_smp_processor_id()]) { | ||
457 | was_in_debug_nmi[raw_smp_processor_id()] = 0; | ||
458 | return NOTIFY_STOP; | ||
459 | } | ||
460 | return NOTIFY_DONE; | ||
461 | |||
462 | case DIE_NMIWATCHDOG: | ||
463 | if (atomic_read(&kgdb_active) != -1) { | ||
464 | /* KGDB CPU roundup: */ | ||
465 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
466 | return NOTIFY_STOP; | ||
467 | } | ||
468 | /* Enter debugger: */ | ||
469 | break; | ||
470 | |||
471 | case DIE_DEBUG: | ||
472 | if (atomic_read(&kgdb_cpu_doing_single_step) == | ||
473 | raw_smp_processor_id() && | ||
474 | user_mode(regs)) | ||
475 | return single_step_cont(regs, args); | ||
476 | /* fall through */ | ||
477 | default: | ||
478 | if (user_mode(regs)) | ||
479 | return NOTIFY_DONE; | ||
480 | } | ||
481 | |||
482 | if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs)) | ||
483 | return NOTIFY_DONE; | ||
484 | |||
485 | /* Must touch watchdog before return to normal operation */ | ||
486 | touch_nmi_watchdog(); | ||
487 | return NOTIFY_STOP; | ||
488 | } | ||
489 | |||
490 | static int | ||
491 | kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) | ||
492 | { | ||
493 | unsigned long flags; | ||
494 | int ret; | ||
495 | |||
496 | local_irq_save(flags); | ||
497 | ret = __kgdb_notify(ptr, cmd); | ||
498 | local_irq_restore(flags); | ||
499 | |||
500 | return ret; | ||
501 | } | ||
502 | |||
503 | static struct notifier_block kgdb_notifier = { | ||
504 | .notifier_call = kgdb_notify, | ||
505 | |||
506 | /* | ||
507 | * Lowest-prio notifier priority, we want to be notified last: | ||
508 | */ | ||
509 | .priority = -INT_MAX, | ||
510 | }; | ||
511 | |||
512 | /** | ||
513 | * kgdb_arch_init - Perform any architecture specific initalization. | ||
514 | * | ||
515 | * This function will handle the initalization of any architecture | ||
516 | * specific callbacks. | ||
517 | */ | ||
518 | int kgdb_arch_init(void) | ||
519 | { | ||
520 | return register_die_notifier(&kgdb_notifier); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | ||
525 | * | ||
526 | * This function will handle the uninitalization of any architecture | ||
527 | * specific callbacks, for dynamic registration and unregistration. | ||
528 | */ | ||
529 | void kgdb_arch_exit(void) | ||
530 | { | ||
531 | unregister_die_notifier(&kgdb_notifier); | ||
532 | } | ||
533 | |||
534 | /** | ||
535 | * | ||
536 | * kgdb_skipexception - Bail out of KGDB when we've been triggered. | ||
537 | * @exception: Exception vector number | ||
538 | * @regs: Current &struct pt_regs. | ||
539 | * | ||
540 | * On some architectures we need to skip a breakpoint exception when | ||
541 | * it occurs after a breakpoint has been removed. | ||
542 | * | ||
543 | * Skip an int3 exception when it occurs after a breakpoint has been | ||
544 | * removed. Backtrack eip by 1 since the int3 would have caused it to | ||
545 | * increment by 1. | ||
546 | */ | ||
547 | int kgdb_skipexception(int exception, struct pt_regs *regs) | ||
548 | { | ||
549 | if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) { | ||
550 | regs->ip -= 1; | ||
551 | return 1; | ||
552 | } | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) | ||
557 | { | ||
558 | if (exception == 3) | ||
559 | return instruction_pointer(regs) - 1; | ||
560 | return instruction_pointer(regs); | ||
561 | } | ||
562 | |||
563 | struct kgdb_arch arch_kgdb_ops = { | ||
564 | /* Breakpoint instruction: */ | ||
565 | .gdb_bpt_instr = { 0xcc }, | ||
566 | .flags = KGDB_HW_BREAKPOINT, | ||
567 | .set_hw_breakpoint = kgdb_set_hw_break, | ||
568 | .remove_hw_breakpoint = kgdb_remove_hw_break, | ||
569 | .remove_all_hw_break = kgdb_remove_all_hw_break, | ||
570 | .correct_hw_break = kgdb_correct_hw_break, | ||
571 | }; | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 34a591283f5d..b8c6743a13da 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -410,13 +410,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
410 | static void __kprobes clear_btf(void) | 410 | static void __kprobes clear_btf(void) |
411 | { | 411 | { |
412 | if (test_thread_flag(TIF_DEBUGCTLMSR)) | 412 | if (test_thread_flag(TIF_DEBUGCTLMSR)) |
413 | wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); | 413 | update_debugctlmsr(0); |
414 | } | 414 | } |
415 | 415 | ||
416 | static void __kprobes restore_btf(void) | 416 | static void __kprobes restore_btf(void) |
417 | { | 417 | { |
418 | if (test_thread_flag(TIF_DEBUGCTLMSR)) | 418 | if (test_thread_flag(TIF_DEBUGCTLMSR)) |
419 | wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr); | 419 | update_debugctlmsr(current->thread.debugctlmsr); |
420 | } | 420 | } |
421 | 421 | ||
422 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 422 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
@@ -489,7 +489,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
489 | break; | 489 | break; |
490 | case KPROBE_HIT_SS: | 490 | case KPROBE_HIT_SS: |
491 | if (p == kprobe_running()) { | 491 | if (p == kprobe_running()) { |
492 | regs->flags &= ~TF_MASK; | 492 | regs->flags &= ~X86_EFLAGS_TF; |
493 | regs->flags |= kcb->kprobe_saved_flags; | 493 | regs->flags |= kcb->kprobe_saved_flags; |
494 | return 0; | 494 | return 0; |
495 | } else { | 495 | } else { |
@@ -858,15 +858,15 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
858 | if (!cur) | 858 | if (!cur) |
859 | return 0; | 859 | return 0; |
860 | 860 | ||
861 | resume_execution(cur, regs, kcb); | ||
862 | regs->flags |= kcb->kprobe_saved_flags; | ||
863 | trace_hardirqs_fixup_flags(regs->flags); | ||
864 | |||
861 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { | 865 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
862 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | 866 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
863 | cur->post_handler(cur, regs, 0); | 867 | cur->post_handler(cur, regs, 0); |
864 | } | 868 | } |
865 | 869 | ||
866 | resume_execution(cur, regs, kcb); | ||
867 | regs->flags |= kcb->kprobe_saved_flags; | ||
868 | trace_hardirqs_fixup_flags(regs->flags); | ||
869 | |||
870 | /* Restore back the original saved kprobes variables and continue. */ | 870 | /* Restore back the original saved kprobes variables and continue. */ |
871 | if (kcb->kprobe_status == KPROBE_REENTER) { | 871 | if (kcb->kprobe_status == KPROBE_REENTER) { |
872 | restore_previous_kprobe(kcb); | 872 | restore_previous_kprobe(kcb); |
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c index 9482033ed0fe..2dc183758be3 100644 --- a/arch/x86/kernel/mca_32.c +++ b/arch/x86/kernel/mca_32.c | |||
@@ -53,9 +53,9 @@ | |||
53 | #include <linux/init.h> | 53 | #include <linux/init.h> |
54 | #include <asm/arch_hooks.h> | 54 | #include <asm/arch_hooks.h> |
55 | 55 | ||
56 | static unsigned char which_scsi = 0; | 56 | static unsigned char which_scsi; |
57 | 57 | ||
58 | int MCA_bus = 0; | 58 | int MCA_bus; |
59 | EXPORT_SYMBOL(MCA_bus); | 59 | EXPORT_SYMBOL(MCA_bus); |
60 | 60 | ||
61 | /* | 61 | /* |
@@ -68,15 +68,17 @@ static DEFINE_SPINLOCK(mca_lock); | |||
68 | 68 | ||
69 | /* Build the status info for the adapter */ | 69 | /* Build the status info for the adapter */ |
70 | 70 | ||
71 | static void mca_configure_adapter_status(struct mca_device *mca_dev) { | 71 | static void mca_configure_adapter_status(struct mca_device *mca_dev) |
72 | { | ||
72 | mca_dev->status = MCA_ADAPTER_NONE; | 73 | mca_dev->status = MCA_ADAPTER_NONE; |
73 | 74 | ||
74 | mca_dev->pos_id = mca_dev->pos[0] | 75 | mca_dev->pos_id = mca_dev->pos[0] |
75 | + (mca_dev->pos[1] << 8); | 76 | + (mca_dev->pos[1] << 8); |
76 | 77 | ||
77 | if(!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) { | 78 | if (!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) { |
78 | 79 | ||
79 | /* id = 0x0000 usually indicates hardware failure, | 80 | /* |
81 | * id = 0x0000 usually indicates hardware failure, | ||
80 | * however, ZP Gu (zpg@castle.net> reports that his 9556 | 82 | * however, ZP Gu (zpg@castle.net> reports that his 9556 |
81 | * has 0x0000 as id and everything still works. There | 83 | * has 0x0000 as id and everything still works. There |
82 | * also seem to be an adapter with id = 0x0000; the | 84 | * also seem to be an adapter with id = 0x0000; the |
@@ -87,9 +89,10 @@ static void mca_configure_adapter_status(struct mca_device *mca_dev) { | |||
87 | mca_dev->status = MCA_ADAPTER_ERROR; | 89 | mca_dev->status = MCA_ADAPTER_ERROR; |
88 | 90 | ||
89 | return; | 91 | return; |
90 | } else if(mca_dev->pos_id != 0xffff) { | 92 | } else if (mca_dev->pos_id != 0xffff) { |
91 | 93 | ||
92 | /* 0xffff usually indicates that there's no adapter, | 94 | /* |
95 | * 0xffff usually indicates that there's no adapter, | ||
93 | * however, some integrated adapters may have 0xffff as | 96 | * however, some integrated adapters may have 0xffff as |
94 | * their id and still be valid. Examples are on-board | 97 | * their id and still be valid. Examples are on-board |
95 | * VGA of the 55sx, the integrated SCSI of the 56 & 57, | 98 | * VGA of the 55sx, the integrated SCSI of the 56 & 57, |
@@ -99,19 +102,19 @@ static void mca_configure_adapter_status(struct mca_device *mca_dev) { | |||
99 | mca_dev->status = MCA_ADAPTER_NORMAL; | 102 | mca_dev->status = MCA_ADAPTER_NORMAL; |
100 | } | 103 | } |
101 | 104 | ||
102 | if((mca_dev->pos_id == 0xffff || | 105 | if ((mca_dev->pos_id == 0xffff || |
103 | mca_dev->pos_id == 0x0000) && mca_dev->slot >= MCA_MAX_SLOT_NR) { | 106 | mca_dev->pos_id == 0x0000) && mca_dev->slot >= MCA_MAX_SLOT_NR) { |
104 | int j; | 107 | int j; |
105 | 108 | ||
106 | for(j = 2; j < 8; j++) { | 109 | for (j = 2; j < 8; j++) { |
107 | if(mca_dev->pos[j] != 0xff) { | 110 | if (mca_dev->pos[j] != 0xff) { |
108 | mca_dev->status = MCA_ADAPTER_NORMAL; | 111 | mca_dev->status = MCA_ADAPTER_NORMAL; |
109 | break; | 112 | break; |
110 | } | 113 | } |
111 | } | 114 | } |
112 | } | 115 | } |
113 | 116 | ||
114 | if(!(mca_dev->pos[2] & MCA_ENABLED)) { | 117 | if (!(mca_dev->pos[2] & MCA_ENABLED)) { |
115 | 118 | ||
116 | /* enabled bit is in POS 2 */ | 119 | /* enabled bit is in POS 2 */ |
117 | 120 | ||
@@ -133,7 +136,7 @@ static struct resource mca_standard_resources[] = { | |||
133 | 136 | ||
134 | #define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources) | 137 | #define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources) |
135 | 138 | ||
136 | /** | 139 | /* |
137 | * mca_read_and_store_pos - read the POS registers into a memory buffer | 140 | * mca_read_and_store_pos - read the POS registers into a memory buffer |
138 | * @pos: a char pointer to 8 bytes, contains the POS register value on | 141 | * @pos: a char pointer to 8 bytes, contains the POS register value on |
139 | * successful return | 142 | * successful return |
@@ -141,12 +144,14 @@ static struct resource mca_standard_resources[] = { | |||
141 | * Returns 1 if a card actually exists (i.e. the pos isn't | 144 | * Returns 1 if a card actually exists (i.e. the pos isn't |
142 | * all 0xff) or 0 otherwise | 145 | * all 0xff) or 0 otherwise |
143 | */ | 146 | */ |
144 | static int mca_read_and_store_pos(unsigned char *pos) { | 147 | static int mca_read_and_store_pos(unsigned char *pos) |
148 | { | ||
145 | int j; | 149 | int j; |
146 | int found = 0; | 150 | int found = 0; |
147 | 151 | ||
148 | for(j=0; j<8; j++) { | 152 | for (j = 0; j < 8; j++) { |
149 | if((pos[j] = inb_p(MCA_POS_REG(j))) != 0xff) { | 153 | pos[j] = inb_p(MCA_POS_REG(j)); |
154 | if (pos[j] != 0xff) { | ||
150 | /* 0xff all across means no device. 0x00 means | 155 | /* 0xff all across means no device. 0x00 means |
151 | * something's broken, but a device is | 156 | * something's broken, but a device is |
152 | * probably there. However, if you get 0x00 | 157 | * probably there. However, if you get 0x00 |
@@ -167,11 +172,11 @@ static unsigned char mca_pc_read_pos(struct mca_device *mca_dev, int reg) | |||
167 | unsigned char byte; | 172 | unsigned char byte; |
168 | unsigned long flags; | 173 | unsigned long flags; |
169 | 174 | ||
170 | if(reg < 0 || reg >= 8) | 175 | if (reg < 0 || reg >= 8) |
171 | return 0; | 176 | return 0; |
172 | 177 | ||
173 | spin_lock_irqsave(&mca_lock, flags); | 178 | spin_lock_irqsave(&mca_lock, flags); |
174 | if(mca_dev->pos_register) { | 179 | if (mca_dev->pos_register) { |
175 | /* Disable adapter setup, enable motherboard setup */ | 180 | /* Disable adapter setup, enable motherboard setup */ |
176 | 181 | ||
177 | outb_p(0, MCA_ADAPTER_SETUP_REG); | 182 | outb_p(0, MCA_ADAPTER_SETUP_REG); |
@@ -203,7 +208,7 @@ static void mca_pc_write_pos(struct mca_device *mca_dev, int reg, | |||
203 | { | 208 | { |
204 | unsigned long flags; | 209 | unsigned long flags; |
205 | 210 | ||
206 | if(reg < 0 || reg >= 8) | 211 | if (reg < 0 || reg >= 8) |
207 | return; | 212 | return; |
208 | 213 | ||
209 | spin_lock_irqsave(&mca_lock, flags); | 214 | spin_lock_irqsave(&mca_lock, flags); |
@@ -227,17 +232,17 @@ static void mca_pc_write_pos(struct mca_device *mca_dev, int reg, | |||
227 | } | 232 | } |
228 | 233 | ||
229 | /* for the primary MCA bus, we have identity transforms */ | 234 | /* for the primary MCA bus, we have identity transforms */ |
230 | static int mca_dummy_transform_irq(struct mca_device * mca_dev, int irq) | 235 | static int mca_dummy_transform_irq(struct mca_device *mca_dev, int irq) |
231 | { | 236 | { |
232 | return irq; | 237 | return irq; |
233 | } | 238 | } |
234 | 239 | ||
235 | static int mca_dummy_transform_ioport(struct mca_device * mca_dev, int port) | 240 | static int mca_dummy_transform_ioport(struct mca_device *mca_dev, int port) |
236 | { | 241 | { |
237 | return port; | 242 | return port; |
238 | } | 243 | } |
239 | 244 | ||
240 | static void *mca_dummy_transform_memory(struct mca_device * mca_dev, void *mem) | 245 | static void *mca_dummy_transform_memory(struct mca_device *mca_dev, void *mem) |
241 | { | 246 | { |
242 | return mem; | 247 | return mem; |
243 | } | 248 | } |
@@ -251,7 +256,8 @@ static int __init mca_init(void) | |||
251 | short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00}; | 256 | short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00}; |
252 | struct mca_bus *bus; | 257 | struct mca_bus *bus; |
253 | 258 | ||
254 | /* WARNING: Be careful when making changes here. Putting an adapter | 259 | /* |
260 | * WARNING: Be careful when making changes here. Putting an adapter | ||
255 | * and the motherboard simultaneously into setup mode may result in | 261 | * and the motherboard simultaneously into setup mode may result in |
256 | * damage to chips (according to The Indispensible PC Hardware Book | 262 | * damage to chips (according to The Indispensible PC Hardware Book |
257 | * by Hans-Peter Messmer). Also, we disable system interrupts (so | 263 | * by Hans-Peter Messmer). Also, we disable system interrupts (so |
@@ -283,7 +289,7 @@ static int __init mca_init(void) | |||
283 | 289 | ||
284 | /* get the motherboard device */ | 290 | /* get the motherboard device */ |
285 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL); | 291 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL); |
286 | if(unlikely(!mca_dev)) | 292 | if (unlikely(!mca_dev)) |
287 | goto out_nomem; | 293 | goto out_nomem; |
288 | 294 | ||
289 | /* | 295 | /* |
@@ -309,7 +315,7 @@ static int __init mca_init(void) | |||
309 | mca_register_device(MCA_PRIMARY_BUS, mca_dev); | 315 | mca_register_device(MCA_PRIMARY_BUS, mca_dev); |
310 | 316 | ||
311 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); | 317 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); |
312 | if(unlikely(!mca_dev)) | 318 | if (unlikely(!mca_dev)) |
313 | goto out_unlock_nomem; | 319 | goto out_unlock_nomem; |
314 | 320 | ||
315 | /* Put motherboard into video setup mode, read integrated video | 321 | /* Put motherboard into video setup mode, read integrated video |
@@ -326,7 +332,8 @@ static int __init mca_init(void) | |||
326 | mca_dev->slot = MCA_INTEGVIDEO; | 332 | mca_dev->slot = MCA_INTEGVIDEO; |
327 | mca_register_device(MCA_PRIMARY_BUS, mca_dev); | 333 | mca_register_device(MCA_PRIMARY_BUS, mca_dev); |
328 | 334 | ||
329 | /* Put motherboard into scsi setup mode, read integrated scsi | 335 | /* |
336 | * Put motherboard into scsi setup mode, read integrated scsi | ||
330 | * POS registers, and turn motherboard setup off. | 337 | * POS registers, and turn motherboard setup off. |
331 | * | 338 | * |
332 | * It seems there are two possible SCSI registers. Martin says that | 339 | * It seems there are two possible SCSI registers. Martin says that |
@@ -338,18 +345,18 @@ static int __init mca_init(void) | |||
338 | * machine. | 345 | * machine. |
339 | */ | 346 | */ |
340 | 347 | ||
341 | for(i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) { | 348 | for (i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) { |
342 | outb_p(which_scsi, MCA_MOTHERBOARD_SETUP_REG); | 349 | outb_p(which_scsi, MCA_MOTHERBOARD_SETUP_REG); |
343 | if(mca_read_and_store_pos(pos)) | 350 | if (mca_read_and_store_pos(pos)) |
344 | break; | 351 | break; |
345 | } | 352 | } |
346 | if(which_scsi) { | 353 | if (which_scsi) { |
347 | /* found a scsi card */ | 354 | /* found a scsi card */ |
348 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); | 355 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); |
349 | if(unlikely(!mca_dev)) | 356 | if (unlikely(!mca_dev)) |
350 | goto out_unlock_nomem; | 357 | goto out_unlock_nomem; |
351 | 358 | ||
352 | for(j = 0; j < 8; j++) | 359 | for (j = 0; j < 8; j++) |
353 | mca_dev->pos[j] = pos[j]; | 360 | mca_dev->pos[j] = pos[j]; |
354 | 361 | ||
355 | mca_configure_adapter_status(mca_dev); | 362 | mca_configure_adapter_status(mca_dev); |
@@ -364,21 +371,22 @@ static int __init mca_init(void) | |||
364 | 371 | ||
365 | outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG); | 372 | outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG); |
366 | 373 | ||
367 | /* Now loop over MCA slots: put each adapter into setup mode, and | 374 | /* |
375 | * Now loop over MCA slots: put each adapter into setup mode, and | ||
368 | * read its POS registers. Then put adapter setup off. | 376 | * read its POS registers. Then put adapter setup off. |
369 | */ | 377 | */ |
370 | 378 | ||
371 | for(i=0; i<MCA_MAX_SLOT_NR; i++) { | 379 | for (i = 0; i < MCA_MAX_SLOT_NR; i++) { |
372 | outb_p(0x8|(i&0xf), MCA_ADAPTER_SETUP_REG); | 380 | outb_p(0x8|(i&0xf), MCA_ADAPTER_SETUP_REG); |
373 | if(!mca_read_and_store_pos(pos)) | 381 | if (!mca_read_and_store_pos(pos)) |
374 | continue; | 382 | continue; |
375 | 383 | ||
376 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); | 384 | mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); |
377 | if(unlikely(!mca_dev)) | 385 | if (unlikely(!mca_dev)) |
378 | goto out_unlock_nomem; | 386 | goto out_unlock_nomem; |
379 | 387 | ||
380 | for(j=0; j<8; j++) | 388 | for (j = 0; j < 8; j++) |
381 | mca_dev->pos[j]=pos[j]; | 389 | mca_dev->pos[j] = pos[j]; |
382 | 390 | ||
383 | mca_dev->driver_loaded = 0; | 391 | mca_dev->driver_loaded = 0; |
384 | mca_dev->slot = i; | 392 | mca_dev->slot = i; |
@@ -414,20 +422,20 @@ mca_handle_nmi_device(struct mca_device *mca_dev, int check_flag) | |||
414 | { | 422 | { |
415 | int slot = mca_dev->slot; | 423 | int slot = mca_dev->slot; |
416 | 424 | ||
417 | if(slot == MCA_INTEGSCSI) { | 425 | if (slot == MCA_INTEGSCSI) { |
418 | printk(KERN_CRIT "NMI: caused by MCA integrated SCSI adapter (%s)\n", | 426 | printk(KERN_CRIT "NMI: caused by MCA integrated SCSI adapter (%s)\n", |
419 | mca_dev->name); | 427 | mca_dev->name); |
420 | } else if(slot == MCA_INTEGVIDEO) { | 428 | } else if (slot == MCA_INTEGVIDEO) { |
421 | printk(KERN_CRIT "NMI: caused by MCA integrated video adapter (%s)\n", | 429 | printk(KERN_CRIT "NMI: caused by MCA integrated video adapter (%s)\n", |
422 | mca_dev->name); | 430 | mca_dev->name); |
423 | } else if(slot == MCA_MOTHERBOARD) { | 431 | } else if (slot == MCA_MOTHERBOARD) { |
424 | printk(KERN_CRIT "NMI: caused by motherboard (%s)\n", | 432 | printk(KERN_CRIT "NMI: caused by motherboard (%s)\n", |
425 | mca_dev->name); | 433 | mca_dev->name); |
426 | } | 434 | } |
427 | 435 | ||
428 | /* More info available in POS 6 and 7? */ | 436 | /* More info available in POS 6 and 7? */ |
429 | 437 | ||
430 | if(check_flag) { | 438 | if (check_flag) { |
431 | unsigned char pos6, pos7; | 439 | unsigned char pos6, pos7; |
432 | 440 | ||
433 | pos6 = mca_device_read_pos(mca_dev, 6); | 441 | pos6 = mca_device_read_pos(mca_dev, 6); |
@@ -447,8 +455,9 @@ static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data) | |||
447 | 455 | ||
448 | pos5 = mca_device_read_pos(mca_dev, 5); | 456 | pos5 = mca_device_read_pos(mca_dev, 5); |
449 | 457 | ||
450 | if(!(pos5 & 0x80)) { | 458 | if (!(pos5 & 0x80)) { |
451 | /* Bit 7 of POS 5 is reset when this adapter has a hardware | 459 | /* |
460 | * Bit 7 of POS 5 is reset when this adapter has a hardware | ||
452 | * error. Bit 7 it reset if there's error information | 461 | * error. Bit 7 it reset if there's error information |
453 | * available in POS 6 and 7. | 462 | * available in POS 6 and 7. |
454 | */ | 463 | */ |
@@ -460,7 +469,8 @@ static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data) | |||
460 | 469 | ||
461 | void __kprobes mca_handle_nmi(void) | 470 | void __kprobes mca_handle_nmi(void) |
462 | { | 471 | { |
463 | /* First try - scan the various adapters and see if a specific | 472 | /* |
473 | * First try - scan the various adapters and see if a specific | ||
464 | * adapter was responsible for the error. | 474 | * adapter was responsible for the error. |
465 | */ | 475 | */ |
466 | bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); | 476 | bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); |
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index f2702d01b8a8..25cf6dee4e56 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c | |||
@@ -290,7 +290,7 @@ static int get_maching_microcode(void *mc, int cpu) | |||
290 | } | 290 | } |
291 | return 0; | 291 | return 0; |
292 | find: | 292 | find: |
293 | pr_debug("microcode: CPU %d found a matching microcode update with" | 293 | pr_debug("microcode: CPU%d found a matching microcode update with" |
294 | " version 0x%x (current=0x%x)\n", cpu, mc_header->rev,uci->rev); | 294 | " version 0x%x (current=0x%x)\n", cpu, mc_header->rev,uci->rev); |
295 | new_mc = vmalloc(total_size); | 295 | new_mc = vmalloc(total_size); |
296 | if (!new_mc) { | 296 | if (!new_mc) { |
@@ -336,11 +336,11 @@ static void apply_microcode(int cpu) | |||
336 | 336 | ||
337 | spin_unlock_irqrestore(µcode_update_lock, flags); | 337 | spin_unlock_irqrestore(µcode_update_lock, flags); |
338 | if (val[1] != uci->mc->hdr.rev) { | 338 | if (val[1] != uci->mc->hdr.rev) { |
339 | printk(KERN_ERR "microcode: CPU%d updated from revision " | 339 | printk(KERN_ERR "microcode: CPU%d update from revision " |
340 | "0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]); | 340 | "0x%x to 0x%x failed\n", cpu_num, uci->rev, val[1]); |
341 | return; | 341 | return; |
342 | } | 342 | } |
343 | pr_debug("microcode: CPU%d updated from revision " | 343 | printk(KERN_INFO "microcode: CPU%d updated from revision " |
344 | "0x%x to 0x%x, date = %08x \n", | 344 | "0x%x to 0x%x, date = %08x \n", |
345 | cpu_num, uci->rev, val[1], uci->mc->hdr.date); | 345 | cpu_num, uci->rev, val[1], uci->mc->hdr.date); |
346 | uci->rev = val[1]; | 346 | uci->rev = val[1]; |
@@ -534,7 +534,7 @@ static int cpu_request_microcode(int cpu) | |||
534 | c->x86, c->x86_model, c->x86_mask); | 534 | c->x86, c->x86_model, c->x86_mask); |
535 | error = request_firmware(&firmware, name, µcode_pdev->dev); | 535 | error = request_firmware(&firmware, name, µcode_pdev->dev); |
536 | if (error) { | 536 | if (error) { |
537 | pr_debug("ucode data file %s load failed\n", name); | 537 | pr_debug("microcode: ucode data file %s load failed\n", name); |
538 | return error; | 538 | return error; |
539 | } | 539 | } |
540 | buf = firmware->data; | 540 | buf = firmware->data; |
@@ -709,7 +709,7 @@ static int __mc_sysdev_add(struct sys_device *sys_dev, int resume) | |||
709 | if (!cpu_online(cpu)) | 709 | if (!cpu_online(cpu)) |
710 | return 0; | 710 | return 0; |
711 | 711 | ||
712 | pr_debug("Microcode:CPU %d added\n", cpu); | 712 | pr_debug("microcode: CPU%d added\n", cpu); |
713 | memset(uci, 0, sizeof(*uci)); | 713 | memset(uci, 0, sizeof(*uci)); |
714 | 714 | ||
715 | err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); | 715 | err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); |
@@ -733,7 +733,7 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) | |||
733 | if (!cpu_online(cpu)) | 733 | if (!cpu_online(cpu)) |
734 | return 0; | 734 | return 0; |
735 | 735 | ||
736 | pr_debug("Microcode:CPU %d removed\n", cpu); | 736 | pr_debug("microcode: CPU%d removed\n", cpu); |
737 | microcode_fini_cpu(cpu); | 737 | microcode_fini_cpu(cpu); |
738 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | 738 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); |
739 | return 0; | 739 | return 0; |
@@ -745,7 +745,7 @@ static int mc_sysdev_resume(struct sys_device *dev) | |||
745 | 745 | ||
746 | if (!cpu_online(cpu)) | 746 | if (!cpu_online(cpu)) |
747 | return 0; | 747 | return 0; |
748 | pr_debug("Microcode:CPU %d resumed\n", cpu); | 748 | pr_debug("microcode: CPU%d resumed\n", cpu); |
749 | /* only CPU 0 will apply ucode here */ | 749 | /* only CPU 0 will apply ucode here */ |
750 | apply_microcode(0); | 750 | apply_microcode(0); |
751 | return 0; | 751 | return 0; |
@@ -783,7 +783,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | |||
783 | } | 783 | } |
784 | case CPU_DOWN_FAILED_FROZEN: | 784 | case CPU_DOWN_FAILED_FROZEN: |
785 | if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) | 785 | if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) |
786 | printk(KERN_ERR "Microcode: Failed to create the sysfs " | 786 | printk(KERN_ERR "microcode: Failed to create the sysfs " |
787 | "group for CPU%d\n", cpu); | 787 | "group for CPU%d\n", cpu); |
788 | break; | 788 | break; |
789 | case CPU_DOWN_PREPARE: | 789 | case CPU_DOWN_PREPARE: |
diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse.c index f349e68e45a0..70744e344fa1 100644 --- a/arch/x86/kernel/mpparse_32.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -4,82 +4,56 @@ | |||
4 | * | 4 | * |
5 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | 5 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> |
6 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | 6 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> |
7 | * | 7 | * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> |
8 | * Fixes | ||
9 | * Erich Boleyn : MP v1.4 and additional changes. | ||
10 | * Alan Cox : Added EBDA scanning | ||
11 | * Ingo Molnar : various cleanups and rewrites | ||
12 | * Maciej W. Rozycki: Bits for default MP configurations | ||
13 | * Paul Diefenbaugh: Added full ACPI support | ||
14 | */ | 8 | */ |
15 | 9 | ||
16 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
17 | #include <linux/init.h> | 11 | #include <linux/init.h> |
18 | #include <linux/acpi.h> | ||
19 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
20 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
21 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
22 | #include <linux/mc146818rtc.h> | 15 | #include <linux/mc146818rtc.h> |
23 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/acpi.h> | ||
18 | #include <linux/module.h> | ||
24 | 19 | ||
25 | #include <asm/smp.h> | 20 | #include <asm/smp.h> |
26 | #include <asm/acpi.h> | ||
27 | #include <asm/mtrr.h> | 21 | #include <asm/mtrr.h> |
28 | #include <asm/mpspec.h> | 22 | #include <asm/mpspec.h> |
23 | #include <asm/pgalloc.h> | ||
29 | #include <asm/io_apic.h> | 24 | #include <asm/io_apic.h> |
25 | #include <asm/proto.h> | ||
26 | #include <asm/acpi.h> | ||
27 | #include <asm/bios_ebda.h> | ||
30 | 28 | ||
31 | #include <mach_apic.h> | 29 | #include <mach_apic.h> |
30 | #ifdef CONFIG_X86_32 | ||
32 | #include <mach_apicdef.h> | 31 | #include <mach_apicdef.h> |
33 | #include <mach_mpparse.h> | 32 | #include <mach_mpparse.h> |
34 | #include <bios_ebda.h> | 33 | #endif |
35 | 34 | ||
36 | /* Have we found an MP table */ | 35 | /* Have we found an MP table */ |
37 | int smp_found_config; | 36 | int smp_found_config; |
38 | unsigned int __cpuinitdata maxcpus = NR_CPUS; | ||
39 | 37 | ||
40 | /* | 38 | /* |
41 | * Various Linux-internal data structures created from the | 39 | * Various Linux-internal data structures created from the |
42 | * MP-table. | 40 | * MP-table. |
43 | */ | 41 | */ |
44 | int apic_version [MAX_APICS]; | 42 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) |
45 | int mp_bus_id_to_type [MAX_MP_BUSSES]; | 43 | int mp_bus_id_to_type[MAX_MP_BUSSES]; |
46 | int mp_bus_id_to_node [MAX_MP_BUSSES]; | 44 | #endif |
47 | int mp_bus_id_to_local [MAX_MP_BUSSES]; | ||
48 | int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | ||
49 | int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; | ||
50 | static int mp_current_pci_id; | ||
51 | |||
52 | /* I/O APIC entries */ | ||
53 | struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
54 | |||
55 | /* # of MP IRQ source entries */ | ||
56 | struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
57 | 45 | ||
58 | /* MP IRQ source entries */ | 46 | DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); |
59 | int mp_irq_entries; | 47 | int mp_bus_id_to_pci_bus[MAX_MP_BUSSES] = {[0 ... MAX_MP_BUSSES - 1] = -1 }; |
60 | 48 | ||
61 | int nr_ioapics; | 49 | static int mp_current_pci_id; |
62 | 50 | ||
63 | int pic_mode; | 51 | int pic_mode; |
64 | unsigned long mp_lapic_addr; | ||
65 | |||
66 | unsigned int def_to_bigsmp = 0; | ||
67 | |||
68 | /* Processor that is doing the boot up */ | ||
69 | unsigned int boot_cpu_physical_apicid = -1U; | ||
70 | /* Internal processor count */ | ||
71 | unsigned int num_processors; | ||
72 | |||
73 | /* Bitmask of physically existing CPUs */ | ||
74 | physid_mask_t phys_cpu_present_map; | ||
75 | |||
76 | u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
77 | 52 | ||
78 | /* | 53 | /* |
79 | * Intel MP BIOS table parsing routines: | 54 | * Intel MP BIOS table parsing routines: |
80 | */ | 55 | */ |
81 | 56 | ||
82 | |||
83 | /* | 57 | /* |
84 | * Checksum an MP configuration block. | 58 | * Checksum an MP configuration block. |
85 | */ | 59 | */ |
@@ -94,216 +68,153 @@ static int __init mpf_checksum(unsigned char *mp, int len) | |||
94 | return sum & 0xFF; | 68 | return sum & 0xFF; |
95 | } | 69 | } |
96 | 70 | ||
71 | #ifdef CONFIG_X86_NUMAQ | ||
97 | /* | 72 | /* |
98 | * Have to match translation table entries to main table entries by counter | 73 | * Have to match translation table entries to main table entries by counter |
99 | * hence the mpc_record variable .... can't see a less disgusting way of | 74 | * hence the mpc_record variable .... can't see a less disgusting way of |
100 | * doing this .... | 75 | * doing this .... |
101 | */ | 76 | */ |
102 | 77 | ||
103 | static int mpc_record; | 78 | static int mpc_record; |
104 | static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata; | 79 | static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] |
80 | __cpuinitdata; | ||
81 | #endif | ||
105 | 82 | ||
106 | static void __cpuinit MP_processor_info (struct mpc_config_processor *m) | 83 | static void __cpuinit MP_processor_info(struct mpc_config_processor *m) |
107 | { | 84 | { |
108 | int ver, apicid; | 85 | int apicid; |
109 | physid_mask_t phys_cpu; | 86 | char *bootup_cpu = ""; |
110 | |||
111 | if (!(m->mpc_cpuflag & CPU_ENABLED)) | ||
112 | return; | ||
113 | 87 | ||
88 | if (!(m->mpc_cpuflag & CPU_ENABLED)) { | ||
89 | disabled_cpus++; | ||
90 | return; | ||
91 | } | ||
92 | #ifdef CONFIG_X86_NUMAQ | ||
114 | apicid = mpc_apic_id(m, translation_table[mpc_record]); | 93 | apicid = mpc_apic_id(m, translation_table[mpc_record]); |
115 | 94 | #else | |
116 | if (m->mpc_featureflag&(1<<0)) | 95 | apicid = m->mpc_apicid; |
117 | Dprintk(" Floating point unit present.\n"); | 96 | #endif |
118 | if (m->mpc_featureflag&(1<<7)) | ||
119 | Dprintk(" Machine Exception supported.\n"); | ||
120 | if (m->mpc_featureflag&(1<<8)) | ||
121 | Dprintk(" 64 bit compare & exchange supported.\n"); | ||
122 | if (m->mpc_featureflag&(1<<9)) | ||
123 | Dprintk(" Internal APIC present.\n"); | ||
124 | if (m->mpc_featureflag&(1<<11)) | ||
125 | Dprintk(" SEP present.\n"); | ||
126 | if (m->mpc_featureflag&(1<<12)) | ||
127 | Dprintk(" MTRR present.\n"); | ||
128 | if (m->mpc_featureflag&(1<<13)) | ||
129 | Dprintk(" PGE present.\n"); | ||
130 | if (m->mpc_featureflag&(1<<14)) | ||
131 | Dprintk(" MCA present.\n"); | ||
132 | if (m->mpc_featureflag&(1<<15)) | ||
133 | Dprintk(" CMOV present.\n"); | ||
134 | if (m->mpc_featureflag&(1<<16)) | ||
135 | Dprintk(" PAT present.\n"); | ||
136 | if (m->mpc_featureflag&(1<<17)) | ||
137 | Dprintk(" PSE present.\n"); | ||
138 | if (m->mpc_featureflag&(1<<18)) | ||
139 | Dprintk(" PSN present.\n"); | ||
140 | if (m->mpc_featureflag&(1<<19)) | ||
141 | Dprintk(" Cache Line Flush Instruction present.\n"); | ||
142 | /* 20 Reserved */ | ||
143 | if (m->mpc_featureflag&(1<<21)) | ||
144 | Dprintk(" Debug Trace and EMON Store present.\n"); | ||
145 | if (m->mpc_featureflag&(1<<22)) | ||
146 | Dprintk(" ACPI Thermal Throttle Registers present.\n"); | ||
147 | if (m->mpc_featureflag&(1<<23)) | ||
148 | Dprintk(" MMX present.\n"); | ||
149 | if (m->mpc_featureflag&(1<<24)) | ||
150 | Dprintk(" FXSR present.\n"); | ||
151 | if (m->mpc_featureflag&(1<<25)) | ||
152 | Dprintk(" XMM present.\n"); | ||
153 | if (m->mpc_featureflag&(1<<26)) | ||
154 | Dprintk(" Willamette New Instructions present.\n"); | ||
155 | if (m->mpc_featureflag&(1<<27)) | ||
156 | Dprintk(" Self Snoop present.\n"); | ||
157 | if (m->mpc_featureflag&(1<<28)) | ||
158 | Dprintk(" HT present.\n"); | ||
159 | if (m->mpc_featureflag&(1<<29)) | ||
160 | Dprintk(" Thermal Monitor present.\n"); | ||
161 | /* 30, 31 Reserved */ | ||
162 | |||
163 | |||
164 | if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { | 97 | if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { |
165 | Dprintk(" Bootup CPU\n"); | 98 | bootup_cpu = " (Bootup-CPU)"; |
166 | boot_cpu_physical_apicid = m->mpc_apicid; | 99 | boot_cpu_physical_apicid = m->mpc_apicid; |
167 | } | 100 | } |
168 | 101 | ||
169 | ver = m->mpc_apicver; | 102 | printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); |
170 | 103 | generic_processor_info(apicid, m->mpc_apicver); | |
171 | /* | ||
172 | * Validate version | ||
173 | */ | ||
174 | if (ver == 0x0) { | ||
175 | printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " | ||
176 | "fixing up to 0x10. (tell your hw vendor)\n", | ||
177 | m->mpc_apicid); | ||
178 | ver = 0x10; | ||
179 | } | ||
180 | apic_version[m->mpc_apicid] = ver; | ||
181 | |||
182 | phys_cpu = apicid_to_cpu_present(apicid); | ||
183 | physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); | ||
184 | |||
185 | if (num_processors >= NR_CPUS) { | ||
186 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | ||
187 | " Processor ignored.\n", NR_CPUS); | ||
188 | return; | ||
189 | } | ||
190 | |||
191 | if (num_processors >= maxcpus) { | ||
192 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | ||
193 | " Processor ignored.\n", maxcpus); | ||
194 | return; | ||
195 | } | ||
196 | |||
197 | cpu_set(num_processors, cpu_possible_map); | ||
198 | num_processors++; | ||
199 | |||
200 | /* | ||
201 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y | ||
202 | * but we need to work other dependencies like SMP_SUSPEND etc | ||
203 | * before this can be done without some confusion. | ||
204 | * if (CPU_HOTPLUG_ENABLED || num_processors > 8) | ||
205 | * - Ashok Raj <ashok.raj@intel.com> | ||
206 | */ | ||
207 | if (num_processors > 8) { | ||
208 | switch (boot_cpu_data.x86_vendor) { | ||
209 | case X86_VENDOR_INTEL: | ||
210 | if (!APIC_XAPIC(ver)) { | ||
211 | def_to_bigsmp = 0; | ||
212 | break; | ||
213 | } | ||
214 | /* If P4 and above fall through */ | ||
215 | case X86_VENDOR_AMD: | ||
216 | def_to_bigsmp = 1; | ||
217 | } | ||
218 | } | ||
219 | bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; | ||
220 | } | 104 | } |
221 | 105 | ||
222 | static void __init MP_bus_info (struct mpc_config_bus *m) | 106 | static void __init MP_bus_info(struct mpc_config_bus *m) |
223 | { | 107 | { |
224 | char str[7]; | 108 | char str[7]; |
225 | 109 | ||
226 | memcpy(str, m->mpc_bustype, 6); | 110 | memcpy(str, m->mpc_bustype, 6); |
227 | str[6] = 0; | 111 | str[6] = 0; |
228 | 112 | ||
113 | #ifdef CONFIG_X86_NUMAQ | ||
229 | mpc_oem_bus_info(m, str, translation_table[mpc_record]); | 114 | mpc_oem_bus_info(m, str, translation_table[mpc_record]); |
115 | #else | ||
116 | Dprintk("Bus #%d is %s\n", m->mpc_busid, str); | ||
117 | #endif | ||
230 | 118 | ||
231 | #if MAX_MP_BUSSES < 256 | 119 | #if MAX_MP_BUSSES < 256 |
232 | if (m->mpc_busid >= MAX_MP_BUSSES) { | 120 | if (m->mpc_busid >= MAX_MP_BUSSES) { |
233 | printk(KERN_WARNING "MP table busid value (%d) for bustype %s " | 121 | printk(KERN_WARNING "MP table busid value (%d) for bustype %s " |
234 | " is too large, max. supported is %d\n", | 122 | " is too large, max. supported is %d\n", |
235 | m->mpc_busid, str, MAX_MP_BUSSES - 1); | 123 | m->mpc_busid, str, MAX_MP_BUSSES - 1); |
236 | return; | 124 | return; |
237 | } | 125 | } |
238 | #endif | 126 | #endif |
239 | 127 | ||
240 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { | 128 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { |
129 | set_bit(m->mpc_busid, mp_bus_not_pci); | ||
130 | #if defined(CONFIG_EISA) || defined (CONFIG_MCA) | ||
241 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; | 131 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; |
242 | } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { | 132 | #endif |
243 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; | 133 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { |
244 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { | 134 | #ifdef CONFIG_X86_NUMAQ |
245 | mpc_oem_pci_bus(m, translation_table[mpc_record]); | 135 | mpc_oem_pci_bus(m, translation_table[mpc_record]); |
246 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; | 136 | #endif |
137 | clear_bit(m->mpc_busid, mp_bus_not_pci); | ||
247 | mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; | 138 | mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; |
248 | mp_current_pci_id++; | 139 | mp_current_pci_id++; |
249 | } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { | 140 | #if defined(CONFIG_EISA) || defined (CONFIG_MCA) |
141 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; | ||
142 | } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { | ||
143 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; | ||
144 | } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { | ||
250 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; | 145 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; |
251 | } else { | 146 | #endif |
147 | } else | ||
252 | printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); | 148 | printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); |
149 | } | ||
150 | |||
151 | #ifdef CONFIG_X86_IO_APIC | ||
152 | |||
153 | static int bad_ioapic(unsigned long address) | ||
154 | { | ||
155 | if (nr_ioapics >= MAX_IO_APICS) { | ||
156 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | ||
157 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | ||
158 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | ||
159 | } | ||
160 | if (!address) { | ||
161 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | ||
162 | " found in table, skipping!\n"); | ||
163 | return 1; | ||
253 | } | 164 | } |
165 | return 0; | ||
254 | } | 166 | } |
255 | 167 | ||
256 | static void __init MP_ioapic_info (struct mpc_config_ioapic *m) | 168 | static void __init MP_ioapic_info(struct mpc_config_ioapic *m) |
257 | { | 169 | { |
258 | if (!(m->mpc_flags & MPC_APIC_USABLE)) | 170 | if (!(m->mpc_flags & MPC_APIC_USABLE)) |
259 | return; | 171 | return; |
260 | 172 | ||
261 | printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", | 173 | printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", |
262 | m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); | 174 | m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); |
263 | if (nr_ioapics >= MAX_IO_APICS) { | 175 | |
264 | printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", | 176 | if (bad_ioapic(m->mpc_apicaddr)) |
265 | MAX_IO_APICS, nr_ioapics); | ||
266 | panic("Recompile kernel with bigger MAX_IO_APICS!.\n"); | ||
267 | } | ||
268 | if (!m->mpc_apicaddr) { | ||
269 | printk(KERN_ERR "WARNING: bogus zero I/O APIC address" | ||
270 | " found in MP table, skipping!\n"); | ||
271 | return; | 177 | return; |
272 | } | 178 | |
273 | mp_ioapics[nr_ioapics] = *m; | 179 | mp_ioapics[nr_ioapics] = *m; |
274 | nr_ioapics++; | 180 | nr_ioapics++; |
275 | } | 181 | } |
276 | 182 | ||
277 | static void __init MP_intsrc_info (struct mpc_config_intsrc *m) | 183 | static void __init MP_intsrc_info(struct mpc_config_intsrc *m) |
278 | { | 184 | { |
279 | mp_irqs [mp_irq_entries] = *m; | 185 | mp_irqs[mp_irq_entries] = *m; |
280 | Dprintk("Int: type %d, pol %d, trig %d, bus %d," | 186 | Dprintk("Int: type %d, pol %d, trig %d, bus %d," |
281 | " IRQ %02x, APIC ID %x, APIC INT %02x\n", | 187 | " IRQ %02x, APIC ID %x, APIC INT %02x\n", |
282 | m->mpc_irqtype, m->mpc_irqflag & 3, | 188 | m->mpc_irqtype, m->mpc_irqflag & 3, |
283 | (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, | 189 | (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, |
284 | m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); | 190 | m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); |
285 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | 191 | if (++mp_irq_entries == MAX_IRQ_SOURCES) |
286 | panic("Max # of irq sources exceeded!!\n"); | 192 | panic("Max # of irq sources exceeded!!\n"); |
287 | } | 193 | } |
288 | 194 | ||
289 | static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) | 195 | #endif |
196 | |||
197 | static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) | ||
290 | { | 198 | { |
291 | Dprintk("Lint: type %d, pol %d, trig %d, bus %d," | 199 | Dprintk("Lint: type %d, pol %d, trig %d, bus %d," |
292 | " IRQ %02x, APIC ID %x, APIC LINT %02x\n", | 200 | " IRQ %02x, APIC ID %x, APIC LINT %02x\n", |
293 | m->mpc_irqtype, m->mpc_irqflag & 3, | 201 | m->mpc_irqtype, m->mpc_irqflag & 3, |
294 | (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, | 202 | (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, |
295 | m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); | 203 | m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); |
296 | } | 204 | } |
297 | 205 | ||
298 | #ifdef CONFIG_X86_NUMAQ | 206 | #ifdef CONFIG_X86_NUMAQ |
299 | static void __init MP_translation_info (struct mpc_config_translation *m) | 207 | static void __init MP_translation_info(struct mpc_config_translation *m) |
300 | { | 208 | { |
301 | printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); | 209 | printk(KERN_INFO |
210 | "Translation: record %d, type %d, quad %d, global %d, local %d\n", | ||
211 | mpc_record, m->trans_type, m->trans_quad, m->trans_global, | ||
212 | m->trans_local); | ||
302 | 213 | ||
303 | if (mpc_record >= MAX_MPC_ENTRY) | 214 | if (mpc_record >= MAX_MPC_ENTRY) |
304 | printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); | 215 | printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); |
305 | else | 216 | else |
306 | translation_table[mpc_record] = m; /* stash this for later */ | 217 | translation_table[mpc_record] = m; /* stash this for later */ |
307 | if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) | 218 | if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) |
308 | node_set_online(m->trans_quad); | 219 | node_set_online(m->trans_quad); |
309 | } | 220 | } |
@@ -312,118 +223,124 @@ static void __init MP_translation_info (struct mpc_config_translation *m) | |||
312 | * Read/parse the MPC oem tables | 223 | * Read/parse the MPC oem tables |
313 | */ | 224 | */ |
314 | 225 | ||
315 | static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \ | 226 | static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, |
316 | unsigned short oemsize) | 227 | unsigned short oemsize) |
317 | { | 228 | { |
318 | int count = sizeof (*oemtable); /* the header size */ | 229 | int count = sizeof(*oemtable); /* the header size */ |
319 | unsigned char *oemptr = ((unsigned char *)oemtable)+count; | 230 | unsigned char *oemptr = ((unsigned char *)oemtable) + count; |
320 | 231 | ||
321 | mpc_record = 0; | 232 | mpc_record = 0; |
322 | printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); | 233 | printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", |
323 | if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) | 234 | oemtable); |
324 | { | 235 | if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) { |
325 | printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", | 236 | printk(KERN_WARNING |
326 | oemtable->oem_signature[0], | 237 | "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", |
327 | oemtable->oem_signature[1], | 238 | oemtable->oem_signature[0], oemtable->oem_signature[1], |
328 | oemtable->oem_signature[2], | 239 | oemtable->oem_signature[2], oemtable->oem_signature[3]); |
329 | oemtable->oem_signature[3]); | ||
330 | return; | 240 | return; |
331 | } | 241 | } |
332 | if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length)) | 242 | if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) { |
333 | { | ||
334 | printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); | 243 | printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); |
335 | return; | 244 | return; |
336 | } | 245 | } |
337 | while (count < oemtable->oem_length) { | 246 | while (count < oemtable->oem_length) { |
338 | switch (*oemptr) { | 247 | switch (*oemptr) { |
339 | case MP_TRANSLATION: | 248 | case MP_TRANSLATION: |
340 | { | 249 | { |
341 | struct mpc_config_translation *m= | 250 | struct mpc_config_translation *m = |
342 | (struct mpc_config_translation *)oemptr; | 251 | (struct mpc_config_translation *)oemptr; |
343 | MP_translation_info(m); | 252 | MP_translation_info(m); |
344 | oemptr += sizeof(*m); | 253 | oemptr += sizeof(*m); |
345 | count += sizeof(*m); | 254 | count += sizeof(*m); |
346 | ++mpc_record; | 255 | ++mpc_record; |
347 | break; | 256 | break; |
348 | } | 257 | } |
349 | default: | 258 | default: |
350 | { | 259 | { |
351 | printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr); | 260 | printk(KERN_WARNING |
261 | "Unrecognised OEM table entry type! - %d\n", | ||
262 | (int)*oemptr); | ||
352 | return; | 263 | return; |
353 | } | 264 | } |
354 | } | 265 | } |
355 | } | 266 | } |
356 | } | 267 | } |
357 | 268 | ||
358 | static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, | 269 | static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, |
359 | char *productid) | 270 | char *productid) |
360 | { | 271 | { |
361 | if (strncmp(oem, "IBM NUMA", 8)) | 272 | if (strncmp(oem, "IBM NUMA", 8)) |
362 | printk("Warning! May not be a NUMA-Q system!\n"); | 273 | printk("Warning! May not be a NUMA-Q system!\n"); |
363 | if (mpc->mpc_oemptr) | 274 | if (mpc->mpc_oemptr) |
364 | smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, | 275 | smp_read_mpc_oem((struct mp_config_oemtable *)mpc->mpc_oemptr, |
365 | mpc->mpc_oemsize); | 276 | mpc->mpc_oemsize); |
366 | } | 277 | } |
367 | #endif /* CONFIG_X86_NUMAQ */ | 278 | #endif /* CONFIG_X86_NUMAQ */ |
368 | 279 | ||
369 | /* | 280 | /* |
370 | * Read/parse the MPC | 281 | * Read/parse the MPC |
371 | */ | 282 | */ |
372 | 283 | ||
373 | static int __init smp_read_mpc(struct mp_config_table *mpc) | 284 | static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) |
374 | { | 285 | { |
375 | char str[16]; | 286 | char str[16]; |
376 | char oem[10]; | 287 | char oem[10]; |
377 | int count=sizeof(*mpc); | 288 | int count = sizeof(*mpc); |
378 | unsigned char *mpt=((unsigned char *)mpc)+count; | 289 | unsigned char *mpt = ((unsigned char *)mpc) + count; |
379 | 290 | ||
380 | if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { | 291 | if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { |
381 | printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", | 292 | printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", |
382 | *(u32 *)mpc->mpc_signature); | 293 | mpc->mpc_signature[0], mpc->mpc_signature[1], |
294 | mpc->mpc_signature[2], mpc->mpc_signature[3]); | ||
383 | return 0; | 295 | return 0; |
384 | } | 296 | } |
385 | if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { | 297 | if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { |
386 | printk(KERN_ERR "SMP mptable: checksum error!\n"); | 298 | printk(KERN_ERR "MPTABLE: checksum error!\n"); |
387 | return 0; | 299 | return 0; |
388 | } | 300 | } |
389 | if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { | 301 | if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { |
390 | printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", | 302 | printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", |
391 | mpc->mpc_spec); | 303 | mpc->mpc_spec); |
392 | return 0; | 304 | return 0; |
393 | } | 305 | } |
394 | if (!mpc->mpc_lapic) { | 306 | if (!mpc->mpc_lapic) { |
395 | printk(KERN_ERR "SMP mptable: null local APIC address!\n"); | 307 | printk(KERN_ERR "MPTABLE: null local APIC address!\n"); |
396 | return 0; | 308 | return 0; |
397 | } | 309 | } |
398 | memcpy(oem,mpc->mpc_oem,8); | 310 | memcpy(oem, mpc->mpc_oem, 8); |
399 | oem[8]=0; | 311 | oem[8] = 0; |
400 | printk(KERN_INFO "OEM ID: %s ",oem); | 312 | printk(KERN_INFO "MPTABLE: OEM ID: %s ", oem); |
401 | 313 | ||
402 | memcpy(str,mpc->mpc_productid,12); | 314 | memcpy(str, mpc->mpc_productid, 12); |
403 | str[12]=0; | 315 | str[12] = 0; |
404 | printk("Product ID: %s ",str); | 316 | printk("Product ID: %s ", str); |
405 | 317 | ||
318 | #ifdef CONFIG_X86_32 | ||
406 | mps_oem_check(mpc, oem, str); | 319 | mps_oem_check(mpc, oem, str); |
320 | #endif | ||
321 | printk(KERN_INFO "MPTABLE: Product ID: %s ", str); | ||
407 | 322 | ||
408 | printk("APIC at: 0x%X\n", mpc->mpc_lapic); | 323 | printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); |
409 | 324 | ||
410 | /* | 325 | /* save the local APIC address, it might be non-default */ |
411 | * Save the local APIC address (it might be non-default) -- but only | ||
412 | * if we're not using ACPI. | ||
413 | */ | ||
414 | if (!acpi_lapic) | 326 | if (!acpi_lapic) |
415 | mp_lapic_addr = mpc->mpc_lapic; | 327 | mp_lapic_addr = mpc->mpc_lapic; |
416 | 328 | ||
329 | if (early) | ||
330 | return 1; | ||
331 | |||
417 | /* | 332 | /* |
418 | * Now process the configuration blocks. | 333 | * Now process the configuration blocks. |
419 | */ | 334 | */ |
335 | #ifdef CONFIG_X86_NUMAQ | ||
420 | mpc_record = 0; | 336 | mpc_record = 0; |
337 | #endif | ||
421 | while (count < mpc->mpc_length) { | 338 | while (count < mpc->mpc_length) { |
422 | switch(*mpt) { | 339 | switch (*mpt) { |
423 | case MP_PROCESSOR: | 340 | case MP_PROCESSOR: |
424 | { | 341 | { |
425 | struct mpc_config_processor *m= | 342 | struct mpc_config_processor *m = |
426 | (struct mpc_config_processor *)mpt; | 343 | (struct mpc_config_processor *)mpt; |
427 | /* ACPI may have already provided this data */ | 344 | /* ACPI may have already provided this data */ |
428 | if (!acpi_lapic) | 345 | if (!acpi_lapic) |
429 | MP_processor_info(m); | 346 | MP_processor_info(m); |
@@ -431,57 +348,68 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) | |||
431 | count += sizeof(*m); | 348 | count += sizeof(*m); |
432 | break; | 349 | break; |
433 | } | 350 | } |
434 | case MP_BUS: | 351 | case MP_BUS: |
435 | { | 352 | { |
436 | struct mpc_config_bus *m= | 353 | struct mpc_config_bus *m = |
437 | (struct mpc_config_bus *)mpt; | 354 | (struct mpc_config_bus *)mpt; |
438 | MP_bus_info(m); | 355 | MP_bus_info(m); |
439 | mpt += sizeof(*m); | 356 | mpt += sizeof(*m); |
440 | count += sizeof(*m); | 357 | count += sizeof(*m); |
441 | break; | 358 | break; |
442 | } | 359 | } |
443 | case MP_IOAPIC: | 360 | case MP_IOAPIC: |
444 | { | 361 | { |
445 | struct mpc_config_ioapic *m= | 362 | #ifdef CONFIG_X86_IO_APIC |
446 | (struct mpc_config_ioapic *)mpt; | 363 | struct mpc_config_ioapic *m = |
364 | (struct mpc_config_ioapic *)mpt; | ||
447 | MP_ioapic_info(m); | 365 | MP_ioapic_info(m); |
448 | mpt+=sizeof(*m); | 366 | #endif |
449 | count+=sizeof(*m); | 367 | mpt += sizeof(struct mpc_config_ioapic); |
368 | count += sizeof(struct mpc_config_ioapic); | ||
450 | break; | 369 | break; |
451 | } | 370 | } |
452 | case MP_INTSRC: | 371 | case MP_INTSRC: |
453 | { | 372 | { |
454 | struct mpc_config_intsrc *m= | 373 | #ifdef CONFIG_X86_IO_APIC |
455 | (struct mpc_config_intsrc *)mpt; | 374 | struct mpc_config_intsrc *m = |
375 | (struct mpc_config_intsrc *)mpt; | ||
456 | 376 | ||
457 | MP_intsrc_info(m); | 377 | MP_intsrc_info(m); |
458 | mpt+=sizeof(*m); | 378 | #endif |
459 | count+=sizeof(*m); | 379 | mpt += sizeof(struct mpc_config_intsrc); |
380 | count += sizeof(struct mpc_config_intsrc); | ||
460 | break; | 381 | break; |
461 | } | 382 | } |
462 | case MP_LINTSRC: | 383 | case MP_LINTSRC: |
463 | { | 384 | { |
464 | struct mpc_config_lintsrc *m= | 385 | struct mpc_config_lintsrc *m = |
465 | (struct mpc_config_lintsrc *)mpt; | 386 | (struct mpc_config_lintsrc *)mpt; |
466 | MP_lintsrc_info(m); | 387 | MP_lintsrc_info(m); |
467 | mpt+=sizeof(*m); | 388 | mpt += sizeof(*m); |
468 | count+=sizeof(*m); | 389 | count += sizeof(*m); |
469 | break; | ||
470 | } | ||
471 | default: | ||
472 | { | ||
473 | count = mpc->mpc_length; | ||
474 | break; | 390 | break; |
475 | } | 391 | } |
392 | default: | ||
393 | /* wrong mptable */ | ||
394 | printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); | ||
395 | printk(KERN_ERR "type %x\n", *mpt); | ||
396 | print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, | ||
397 | 1, mpc, mpc->mpc_length, 1); | ||
398 | count = mpc->mpc_length; | ||
399 | break; | ||
476 | } | 400 | } |
401 | #ifdef CONFIG_X86_NUMAQ | ||
477 | ++mpc_record; | 402 | ++mpc_record; |
403 | #endif | ||
478 | } | 404 | } |
479 | setup_apic_routing(); | 405 | setup_apic_routing(); |
480 | if (!num_processors) | 406 | if (!num_processors) |
481 | printk(KERN_ERR "SMP mptable: no processors registered!\n"); | 407 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); |
482 | return num_processors; | 408 | return num_processors; |
483 | } | 409 | } |
484 | 410 | ||
411 | #ifdef CONFIG_X86_IO_APIC | ||
412 | |||
485 | static int __init ELCR_trigger(unsigned int irq) | 413 | static int __init ELCR_trigger(unsigned int irq) |
486 | { | 414 | { |
487 | unsigned int port; | 415 | unsigned int port; |
@@ -497,7 +425,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) | |||
497 | int ELCR_fallback = 0; | 425 | int ELCR_fallback = 0; |
498 | 426 | ||
499 | intsrc.mpc_type = MP_INTSRC; | 427 | intsrc.mpc_type = MP_INTSRC; |
500 | intsrc.mpc_irqflag = 0; /* conforming */ | 428 | intsrc.mpc_irqflag = 0; /* conforming */ |
501 | intsrc.mpc_srcbus = 0; | 429 | intsrc.mpc_srcbus = 0; |
502 | intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; | 430 | intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; |
503 | 431 | ||
@@ -512,12 +440,16 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) | |||
512 | * If it does, we assume it's valid. | 440 | * If it does, we assume it's valid. |
513 | */ | 441 | */ |
514 | if (mpc_default_type == 5) { | 442 | if (mpc_default_type == 5) { |
515 | printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); | 443 | printk(KERN_INFO "ISA/PCI bus type with no IRQ information... " |
444 | "falling back to ELCR\n"); | ||
516 | 445 | ||
517 | if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) | 446 | if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || |
518 | printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); | 447 | ELCR_trigger(13)) |
448 | printk(KERN_ERR "ELCR contains invalid data... " | ||
449 | "not using ELCR\n"); | ||
519 | else { | 450 | else { |
520 | printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); | 451 | printk(KERN_INFO |
452 | "Using ELCR to identify PCI interrupts\n"); | ||
521 | ELCR_fallback = 1; | 453 | ELCR_fallback = 1; |
522 | } | 454 | } |
523 | } | 455 | } |
@@ -546,21 +478,25 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) | |||
546 | } | 478 | } |
547 | 479 | ||
548 | intsrc.mpc_srcbusirq = i; | 480 | intsrc.mpc_srcbusirq = i; |
549 | intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ | 481 | intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ |
550 | MP_intsrc_info(&intsrc); | 482 | MP_intsrc_info(&intsrc); |
551 | } | 483 | } |
552 | 484 | ||
553 | intsrc.mpc_irqtype = mp_ExtINT; | 485 | intsrc.mpc_irqtype = mp_ExtINT; |
554 | intsrc.mpc_srcbusirq = 0; | 486 | intsrc.mpc_srcbusirq = 0; |
555 | intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ | 487 | intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ |
556 | MP_intsrc_info(&intsrc); | 488 | MP_intsrc_info(&intsrc); |
557 | } | 489 | } |
558 | 490 | ||
491 | #endif | ||
492 | |||
559 | static inline void __init construct_default_ISA_mptable(int mpc_default_type) | 493 | static inline void __init construct_default_ISA_mptable(int mpc_default_type) |
560 | { | 494 | { |
561 | struct mpc_config_processor processor; | 495 | struct mpc_config_processor processor; |
562 | struct mpc_config_bus bus; | 496 | struct mpc_config_bus bus; |
497 | #ifdef CONFIG_X86_IO_APIC | ||
563 | struct mpc_config_ioapic ioapic; | 498 | struct mpc_config_ioapic ioapic; |
499 | #endif | ||
564 | struct mpc_config_lintsrc lintsrc; | 500 | struct mpc_config_lintsrc lintsrc; |
565 | int linttypes[2] = { mp_ExtINT, mp_NMI }; | 501 | int linttypes[2] = { mp_ExtINT, mp_NMI }; |
566 | int i; | 502 | int i; |
@@ -578,8 +514,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) | |||
578 | processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; | 514 | processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
579 | processor.mpc_cpuflag = CPU_ENABLED; | 515 | processor.mpc_cpuflag = CPU_ENABLED; |
580 | processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | | 516 | processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | |
581 | (boot_cpu_data.x86_model << 4) | | 517 | (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; |
582 | boot_cpu_data.x86_mask; | ||
583 | processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; | 518 | processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; |
584 | processor.mpc_reserved[0] = 0; | 519 | processor.mpc_reserved[0] = 0; |
585 | processor.mpc_reserved[1] = 0; | 520 | processor.mpc_reserved[1] = 0; |
@@ -591,23 +526,22 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) | |||
591 | bus.mpc_type = MP_BUS; | 526 | bus.mpc_type = MP_BUS; |
592 | bus.mpc_busid = 0; | 527 | bus.mpc_busid = 0; |
593 | switch (mpc_default_type) { | 528 | switch (mpc_default_type) { |
594 | default: | 529 | default: |
595 | printk("???\n"); | 530 | printk(KERN_ERR "???\nUnknown standard configuration %d\n", |
596 | printk(KERN_ERR "Unknown standard configuration %d\n", | 531 | mpc_default_type); |
597 | mpc_default_type); | 532 | /* fall through */ |
598 | /* fall through */ | 533 | case 1: |
599 | case 1: | 534 | case 5: |
600 | case 5: | 535 | memcpy(bus.mpc_bustype, "ISA ", 6); |
601 | memcpy(bus.mpc_bustype, "ISA ", 6); | 536 | break; |
602 | break; | 537 | case 2: |
603 | case 2: | 538 | case 6: |
604 | case 6: | 539 | case 3: |
605 | case 3: | 540 | memcpy(bus.mpc_bustype, "EISA ", 6); |
606 | memcpy(bus.mpc_bustype, "EISA ", 6); | 541 | break; |
607 | break; | 542 | case 4: |
608 | case 4: | 543 | case 7: |
609 | case 7: | 544 | memcpy(bus.mpc_bustype, "MCA ", 6); |
610 | memcpy(bus.mpc_bustype, "MCA ", 6); | ||
611 | } | 545 | } |
612 | MP_bus_info(&bus); | 546 | MP_bus_info(&bus); |
613 | if (mpc_default_type > 4) { | 547 | if (mpc_default_type > 4) { |
@@ -616,6 +550,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) | |||
616 | MP_bus_info(&bus); | 550 | MP_bus_info(&bus); |
617 | } | 551 | } |
618 | 552 | ||
553 | #ifdef CONFIG_X86_IO_APIC | ||
619 | ioapic.mpc_type = MP_IOAPIC; | 554 | ioapic.mpc_type = MP_IOAPIC; |
620 | ioapic.mpc_apicid = 2; | 555 | ioapic.mpc_apicid = 2; |
621 | ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; | 556 | ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
@@ -627,9 +562,9 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) | |||
627 | * We set up most of the low 16 IO-APIC pins according to MPS rules. | 562 | * We set up most of the low 16 IO-APIC pins according to MPS rules. |
628 | */ | 563 | */ |
629 | construct_default_ioirq_mptable(mpc_default_type); | 564 | construct_default_ioirq_mptable(mpc_default_type); |
630 | 565 | #endif | |
631 | lintsrc.mpc_type = MP_LINTSRC; | 566 | lintsrc.mpc_type = MP_LINTSRC; |
632 | lintsrc.mpc_irqflag = 0; /* conforming */ | 567 | lintsrc.mpc_irqflag = 0; /* conforming */ |
633 | lintsrc.mpc_srcbusid = 0; | 568 | lintsrc.mpc_srcbusid = 0; |
634 | lintsrc.mpc_srcbusirq = 0; | 569 | lintsrc.mpc_srcbusirq = 0; |
635 | lintsrc.mpc_destapic = MP_APIC_ALL; | 570 | lintsrc.mpc_destapic = MP_APIC_ALL; |
@@ -645,36 +580,49 @@ static struct intel_mp_floating *mpf_found; | |||
645 | /* | 580 | /* |
646 | * Scan the memory blocks for an SMP configuration block. | 581 | * Scan the memory blocks for an SMP configuration block. |
647 | */ | 582 | */ |
648 | void __init get_smp_config (void) | 583 | static void __init __get_smp_config(unsigned early) |
649 | { | 584 | { |
650 | struct intel_mp_floating *mpf = mpf_found; | 585 | struct intel_mp_floating *mpf = mpf_found; |
651 | 586 | ||
587 | if (acpi_lapic && early) | ||
588 | return; | ||
652 | /* | 589 | /* |
653 | * ACPI supports both logical (e.g. Hyper-Threading) and physical | 590 | * ACPI supports both logical (e.g. Hyper-Threading) and physical |
654 | * processors, where MPS only supports physical. | 591 | * processors, where MPS only supports physical. |
655 | */ | 592 | */ |
656 | if (acpi_lapic && acpi_ioapic) { | 593 | if (acpi_lapic && acpi_ioapic) { |
657 | printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); | 594 | printk(KERN_INFO "Using ACPI (MADT) for SMP configuration " |
595 | "information\n"); | ||
658 | return; | 596 | return; |
659 | } | 597 | } else if (acpi_lapic) |
660 | else if (acpi_lapic) | 598 | printk(KERN_INFO "Using ACPI for processor (LAPIC) " |
661 | printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); | 599 | "configuration information\n"); |
662 | 600 | ||
663 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); | 601 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", |
664 | if (mpf->mpf_feature2 & (1<<7)) { | 602 | mpf->mpf_specification); |
603 | #ifdef CONFIG_X86_32 | ||
604 | if (mpf->mpf_feature2 & (1 << 7)) { | ||
665 | printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); | 605 | printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); |
666 | pic_mode = 1; | 606 | pic_mode = 1; |
667 | } else { | 607 | } else { |
668 | printk(KERN_INFO " Virtual Wire compatibility mode.\n"); | 608 | printk(KERN_INFO " Virtual Wire compatibility mode.\n"); |
669 | pic_mode = 0; | 609 | pic_mode = 0; |
670 | } | 610 | } |
671 | 611 | #endif | |
672 | /* | 612 | /* |
673 | * Now see if we need to read further. | 613 | * Now see if we need to read further. |
674 | */ | 614 | */ |
675 | if (mpf->mpf_feature1 != 0) { | 615 | if (mpf->mpf_feature1 != 0) { |
616 | if (early) { | ||
617 | /* | ||
618 | * local APIC has default address | ||
619 | */ | ||
620 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | ||
621 | return; | ||
622 | } | ||
676 | 623 | ||
677 | printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); | 624 | printk(KERN_INFO "Default MP configuration #%d\n", |
625 | mpf->mpf_feature1); | ||
678 | construct_default_ISA_mptable(mpf->mpf_feature1); | 626 | construct_default_ISA_mptable(mpf->mpf_feature1); |
679 | 627 | ||
680 | } else if (mpf->mpf_physptr) { | 628 | } else if (mpf->mpf_physptr) { |
@@ -683,12 +631,18 @@ void __init get_smp_config (void) | |||
683 | * Read the physical hardware table. Anything here will | 631 | * Read the physical hardware table. Anything here will |
684 | * override the defaults. | 632 | * override the defaults. |
685 | */ | 633 | */ |
686 | if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { | 634 | if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { |
687 | smp_found_config = 0; | 635 | smp_found_config = 0; |
688 | printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); | 636 | printk(KERN_ERR |
689 | printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); | 637 | "BIOS bug, MP table errors detected!...\n"); |
638 | printk(KERN_ERR "... disabling SMP support. " | ||
639 | "(tell your hw vendor)\n"); | ||
690 | return; | 640 | return; |
691 | } | 641 | } |
642 | |||
643 | if (early) | ||
644 | return; | ||
645 | #ifdef CONFIG_X86_IO_APIC | ||
692 | /* | 646 | /* |
693 | * If there are no explicit MP IRQ entries, then we are | 647 | * If there are no explicit MP IRQ entries, then we are |
694 | * broken. We set up most of the low 16 IO-APIC pins to | 648 | * broken. We set up most of the low 16 IO-APIC pins to |
@@ -697,7 +651,9 @@ void __init get_smp_config (void) | |||
697 | if (!mp_irq_entries) { | 651 | if (!mp_irq_entries) { |
698 | struct mpc_config_bus bus; | 652 | struct mpc_config_bus bus; |
699 | 653 | ||
700 | printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); | 654 | printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " |
655 | "using default mptable. " | ||
656 | "(tell your hw vendor)\n"); | ||
701 | 657 | ||
702 | bus.mpc_type = MP_BUS; | 658 | bus.mpc_type = MP_BUS; |
703 | bus.mpc_busid = 0; | 659 | bus.mpc_busid = 0; |
@@ -706,36 +662,51 @@ void __init get_smp_config (void) | |||
706 | 662 | ||
707 | construct_default_ioirq_mptable(0); | 663 | construct_default_ioirq_mptable(0); |
708 | } | 664 | } |
709 | 665 | #endif | |
710 | } else | 666 | } else |
711 | BUG(); | 667 | BUG(); |
712 | 668 | ||
713 | printk(KERN_INFO "Processors: %d\n", num_processors); | 669 | if (!early) |
670 | printk(KERN_INFO "Processors: %d\n", num_processors); | ||
714 | /* | 671 | /* |
715 | * Only use the first configuration found. | 672 | * Only use the first configuration found. |
716 | */ | 673 | */ |
717 | } | 674 | } |
718 | 675 | ||
719 | static int __init smp_scan_config (unsigned long base, unsigned long length) | 676 | void __init early_get_smp_config(void) |
677 | { | ||
678 | __get_smp_config(1); | ||
679 | } | ||
680 | |||
681 | void __init get_smp_config(void) | ||
720 | { | 682 | { |
721 | unsigned long *bp = phys_to_virt(base); | 683 | __get_smp_config(0); |
684 | } | ||
685 | |||
686 | static int __init smp_scan_config(unsigned long base, unsigned long length, | ||
687 | unsigned reserve) | ||
688 | { | ||
689 | extern void __bad_mpf_size(void); | ||
690 | unsigned int *bp = phys_to_virt(base); | ||
722 | struct intel_mp_floating *mpf; | 691 | struct intel_mp_floating *mpf; |
723 | 692 | ||
724 | printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length); | 693 | Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length); |
725 | if (sizeof(*mpf) != 16) | 694 | if (sizeof(*mpf) != 16) |
726 | printk("Error: MPF size\n"); | 695 | __bad_mpf_size(); |
727 | 696 | ||
728 | while (length > 0) { | 697 | while (length > 0) { |
729 | mpf = (struct intel_mp_floating *)bp; | 698 | mpf = (struct intel_mp_floating *)bp; |
730 | if ((*bp == SMP_MAGIC_IDENT) && | 699 | if ((*bp == SMP_MAGIC_IDENT) && |
731 | (mpf->mpf_length == 1) && | 700 | (mpf->mpf_length == 1) && |
732 | !mpf_checksum((unsigned char *)bp, 16) && | 701 | !mpf_checksum((unsigned char *)bp, 16) && |
733 | ((mpf->mpf_specification == 1) | 702 | ((mpf->mpf_specification == 1) |
734 | || (mpf->mpf_specification == 4)) ) { | 703 | || (mpf->mpf_specification == 4))) { |
735 | 704 | ||
736 | smp_found_config = 1; | 705 | smp_found_config = 1; |
706 | mpf_found = mpf; | ||
707 | #ifdef CONFIG_X86_32 | ||
737 | printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", | 708 | printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n", |
738 | mpf, virt_to_phys(mpf)); | 709 | mpf, virt_to_phys(mpf)); |
739 | reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, | 710 | reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE, |
740 | BOOTMEM_DEFAULT); | 711 | BOOTMEM_DEFAULT); |
741 | if (mpf->mpf_physptr) { | 712 | if (mpf->mpf_physptr) { |
@@ -756,8 +727,16 @@ static int __init smp_scan_config (unsigned long base, unsigned long length) | |||
756 | BOOTMEM_DEFAULT); | 727 | BOOTMEM_DEFAULT); |
757 | } | 728 | } |
758 | 729 | ||
759 | mpf_found = mpf; | 730 | #else |
760 | return 1; | 731 | if (!reserve) |
732 | return 1; | ||
733 | |||
734 | reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE); | ||
735 | if (mpf->mpf_physptr) | ||
736 | reserve_bootmem_generic(mpf->mpf_physptr, | ||
737 | PAGE_SIZE); | ||
738 | #endif | ||
739 | return 1; | ||
761 | } | 740 | } |
762 | bp += 4; | 741 | bp += 4; |
763 | length -= 16; | 742 | length -= 16; |
@@ -765,7 +744,7 @@ static int __init smp_scan_config (unsigned long base, unsigned long length) | |||
765 | return 0; | 744 | return 0; |
766 | } | 745 | } |
767 | 746 | ||
768 | void __init find_smp_config (void) | 747 | static void __init __find_smp_config(unsigned reserve) |
769 | { | 748 | { |
770 | unsigned int address; | 749 | unsigned int address; |
771 | 750 | ||
@@ -777,9 +756,9 @@ void __init find_smp_config (void) | |||
777 | * 2) Scan the top 1K of base RAM | 756 | * 2) Scan the top 1K of base RAM |
778 | * 3) Scan the 64K of bios | 757 | * 3) Scan the 64K of bios |
779 | */ | 758 | */ |
780 | if (smp_scan_config(0x0,0x400) || | 759 | if (smp_scan_config(0x0, 0x400, reserve) || |
781 | smp_scan_config(639*0x400,0x400) || | 760 | smp_scan_config(639 * 0x400, 0x400, reserve) || |
782 | smp_scan_config(0xF0000,0x10000)) | 761 | smp_scan_config(0xF0000, 0x10000, reserve)) |
783 | return; | 762 | return; |
784 | /* | 763 | /* |
785 | * If it is an SMP machine we should know now, unless the | 764 | * If it is an SMP machine we should know now, unless the |
@@ -800,144 +779,113 @@ void __init find_smp_config (void) | |||
800 | 779 | ||
801 | address = get_bios_ebda(); | 780 | address = get_bios_ebda(); |
802 | if (address) | 781 | if (address) |
803 | smp_scan_config(address, 0x400); | 782 | smp_scan_config(address, 0x400, reserve); |
804 | } | 783 | } |
805 | 784 | ||
806 | int es7000_plat; | 785 | void __init early_find_smp_config(void) |
807 | |||
808 | /* -------------------------------------------------------------------------- | ||
809 | ACPI-based MP Configuration | ||
810 | -------------------------------------------------------------------------- */ | ||
811 | |||
812 | #ifdef CONFIG_ACPI | ||
813 | |||
814 | void __init mp_register_lapic_address(u64 address) | ||
815 | { | 786 | { |
816 | mp_lapic_addr = (unsigned long) address; | 787 | __find_smp_config(0); |
817 | |||
818 | set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); | ||
819 | |||
820 | if (boot_cpu_physical_apicid == -1U) | ||
821 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | ||
822 | |||
823 | Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); | ||
824 | } | 788 | } |
825 | 789 | ||
826 | void __cpuinit mp_register_lapic (u8 id, u8 enabled) | 790 | void __init find_smp_config(void) |
827 | { | 791 | { |
828 | struct mpc_config_processor processor; | 792 | __find_smp_config(1); |
829 | int boot_cpu = 0; | 793 | } |
830 | |||
831 | if (MAX_APICS - id <= 0) { | ||
832 | printk(KERN_WARNING "Processor #%d invalid (max %d)\n", | ||
833 | id, MAX_APICS); | ||
834 | return; | ||
835 | } | ||
836 | |||
837 | if (id == boot_cpu_physical_apicid) | ||
838 | boot_cpu = 1; | ||
839 | 794 | ||
840 | processor.mpc_type = MP_PROCESSOR; | 795 | /* -------------------------------------------------------------------------- |
841 | processor.mpc_apicid = id; | 796 | ACPI-based MP Configuration |
842 | processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); | 797 | -------------------------------------------------------------------------- */ |
843 | processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); | ||
844 | processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); | ||
845 | processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | | ||
846 | (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; | ||
847 | processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; | ||
848 | processor.mpc_reserved[0] = 0; | ||
849 | processor.mpc_reserved[1] = 0; | ||
850 | 798 | ||
851 | MP_processor_info(&processor); | 799 | #ifdef CONFIG_ACPI |
852 | } | ||
853 | 800 | ||
854 | #ifdef CONFIG_X86_IO_APIC | 801 | #ifdef CONFIG_X86_IO_APIC |
855 | 802 | ||
856 | #define MP_ISA_BUS 0 | 803 | #define MP_ISA_BUS 0 |
857 | #define MP_MAX_IOAPIC_PIN 127 | 804 | #define MP_MAX_IOAPIC_PIN 127 |
858 | 805 | ||
859 | static struct mp_ioapic_routing { | 806 | extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS]; |
860 | int apic_id; | ||
861 | int gsi_base; | ||
862 | int gsi_end; | ||
863 | u32 pin_programmed[4]; | ||
864 | } mp_ioapic_routing[MAX_IO_APICS]; | ||
865 | 807 | ||
866 | static int mp_find_ioapic (int gsi) | 808 | static int mp_find_ioapic(int gsi) |
867 | { | 809 | { |
868 | int i = 0; | 810 | int i = 0; |
869 | 811 | ||
870 | /* Find the IOAPIC that manages this GSI. */ | 812 | /* Find the IOAPIC that manages this GSI. */ |
871 | for (i = 0; i < nr_ioapics; i++) { | 813 | for (i = 0; i < nr_ioapics; i++) { |
872 | if ((gsi >= mp_ioapic_routing[i].gsi_base) | 814 | if ((gsi >= mp_ioapic_routing[i].gsi_base) |
873 | && (gsi <= mp_ioapic_routing[i].gsi_end)) | 815 | && (gsi <= mp_ioapic_routing[i].gsi_end)) |
874 | return i; | 816 | return i; |
875 | } | 817 | } |
876 | 818 | ||
877 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | 819 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); |
878 | |||
879 | return -1; | 820 | return -1; |
880 | } | 821 | } |
881 | 822 | ||
882 | void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) | 823 | static u8 uniq_ioapic_id(u8 id) |
824 | { | ||
825 | #ifdef CONFIG_X86_32 | ||
826 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
827 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
828 | return io_apic_get_unique_id(nr_ioapics, id); | ||
829 | else | ||
830 | return id; | ||
831 | #else | ||
832 | int i; | ||
833 | DECLARE_BITMAP(used, 256); | ||
834 | bitmap_zero(used, 256); | ||
835 | for (i = 0; i < nr_ioapics; i++) { | ||
836 | struct mpc_config_ioapic *ia = &mp_ioapics[i]; | ||
837 | __set_bit(ia->mpc_apicid, used); | ||
838 | } | ||
839 | if (!test_bit(id, used)) | ||
840 | return id; | ||
841 | return find_first_zero_bit(used, 256); | ||
842 | #endif | ||
843 | } | ||
844 | |||
845 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | ||
883 | { | 846 | { |
884 | int idx = 0; | 847 | int idx = 0; |
885 | int tmpid; | ||
886 | 848 | ||
887 | if (nr_ioapics >= MAX_IO_APICS) { | 849 | if (bad_ioapic(address)) |
888 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | ||
889 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | ||
890 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | ||
891 | } | ||
892 | if (!address) { | ||
893 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | ||
894 | " found in MADT table, skipping!\n"); | ||
895 | return; | 850 | return; |
896 | } | ||
897 | 851 | ||
898 | idx = nr_ioapics++; | 852 | idx = nr_ioapics; |
899 | 853 | ||
900 | mp_ioapics[idx].mpc_type = MP_IOAPIC; | 854 | mp_ioapics[idx].mpc_type = MP_IOAPIC; |
901 | mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; | 855 | mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; |
902 | mp_ioapics[idx].mpc_apicaddr = address; | 856 | mp_ioapics[idx].mpc_apicaddr = address; |
903 | 857 | ||
904 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | 858 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); |
905 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | 859 | mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); |
906 | && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | 860 | #ifdef CONFIG_X86_32 |
907 | tmpid = io_apic_get_unique_id(idx, id); | ||
908 | else | ||
909 | tmpid = id; | ||
910 | if (tmpid == -1) { | ||
911 | nr_ioapics--; | ||
912 | return; | ||
913 | } | ||
914 | mp_ioapics[idx].mpc_apicid = tmpid; | ||
915 | mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); | 861 | mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); |
916 | 862 | #else | |
917 | /* | 863 | mp_ioapics[idx].mpc_apicver = 0; |
864 | #endif | ||
865 | /* | ||
918 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | 866 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups |
919 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | 867 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). |
920 | */ | 868 | */ |
921 | mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; | 869 | mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; |
922 | mp_ioapic_routing[idx].gsi_base = gsi_base; | 870 | mp_ioapic_routing[idx].gsi_base = gsi_base; |
923 | mp_ioapic_routing[idx].gsi_end = gsi_base + | 871 | mp_ioapic_routing[idx].gsi_end = gsi_base + |
924 | io_apic_get_redir_entries(idx); | 872 | io_apic_get_redir_entries(idx); |
925 | 873 | ||
926 | printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | 874 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " |
927 | "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, | 875 | "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, |
928 | mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, | 876 | mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, |
929 | mp_ioapic_routing[idx].gsi_base, | 877 | mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); |
930 | mp_ioapic_routing[idx].gsi_end); | 878 | |
879 | nr_ioapics++; | ||
931 | } | 880 | } |
932 | 881 | ||
933 | void __init | 882 | void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) |
934 | mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) | ||
935 | { | 883 | { |
936 | struct mpc_config_intsrc intsrc; | 884 | struct mpc_config_intsrc intsrc; |
937 | int ioapic = -1; | 885 | int ioapic = -1; |
938 | int pin = -1; | 886 | int pin = -1; |
939 | 887 | ||
940 | /* | 888 | /* |
941 | * Convert 'gsi' to 'ioapic.pin'. | 889 | * Convert 'gsi' to 'ioapic.pin'. |
942 | */ | 890 | */ |
943 | ioapic = mp_find_ioapic(gsi); | 891 | ioapic = mp_find_ioapic(gsi); |
@@ -947,7 +895,7 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) | |||
947 | 895 | ||
948 | /* | 896 | /* |
949 | * TBD: This check is for faulty timer entries, where the override | 897 | * TBD: This check is for faulty timer entries, where the override |
950 | * erroneously sets the trigger to level, resulting in a HUGE | 898 | * erroneously sets the trigger to level, resulting in a HUGE |
951 | * increase of timer interrupts! | 899 | * increase of timer interrupts! |
952 | */ | 900 | */ |
953 | if ((bus_irq == 0) && (trigger == 3)) | 901 | if ((bus_irq == 0) && (trigger == 3)) |
@@ -957,13 +905,13 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) | |||
957 | intsrc.mpc_irqtype = mp_INT; | 905 | intsrc.mpc_irqtype = mp_INT; |
958 | intsrc.mpc_irqflag = (trigger << 2) | polarity; | 906 | intsrc.mpc_irqflag = (trigger << 2) | polarity; |
959 | intsrc.mpc_srcbus = MP_ISA_BUS; | 907 | intsrc.mpc_srcbus = MP_ISA_BUS; |
960 | intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ | 908 | intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ |
961 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ | 909 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ |
962 | intsrc.mpc_dstirq = pin; /* INTIN# */ | 910 | intsrc.mpc_dstirq = pin; /* INTIN# */ |
963 | 911 | ||
964 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", | 912 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", |
965 | intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, | 913 | intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, |
966 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, | 914 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, |
967 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); | 915 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); |
968 | 916 | ||
969 | mp_irqs[mp_irq_entries] = intsrc; | 917 | mp_irqs[mp_irq_entries] = intsrc; |
@@ -971,16 +919,21 @@ mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) | |||
971 | panic("Max # of irq sources exceeded!\n"); | 919 | panic("Max # of irq sources exceeded!\n"); |
972 | } | 920 | } |
973 | 921 | ||
974 | void __init mp_config_acpi_legacy_irqs (void) | 922 | int es7000_plat; |
923 | |||
924 | void __init mp_config_acpi_legacy_irqs(void) | ||
975 | { | 925 | { |
976 | struct mpc_config_intsrc intsrc; | 926 | struct mpc_config_intsrc intsrc; |
977 | int i = 0; | 927 | int i = 0; |
978 | int ioapic = -1; | 928 | int ioapic = -1; |
979 | 929 | ||
980 | /* | 930 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) |
931 | /* | ||
981 | * Fabricate the legacy ISA bus (bus #31). | 932 | * Fabricate the legacy ISA bus (bus #31). |
982 | */ | 933 | */ |
983 | mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; | 934 | mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; |
935 | #endif | ||
936 | set_bit(MP_ISA_BUS, mp_bus_not_pci); | ||
984 | Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); | 937 | Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); |
985 | 938 | ||
986 | /* | 939 | /* |
@@ -989,19 +942,20 @@ void __init mp_config_acpi_legacy_irqs (void) | |||
989 | if (es7000_plat == 1) | 942 | if (es7000_plat == 1) |
990 | return; | 943 | return; |
991 | 944 | ||
992 | /* | 945 | /* |
993 | * Locate the IOAPIC that manages the ISA IRQs (0-15). | 946 | * Locate the IOAPIC that manages the ISA IRQs (0-15). |
994 | */ | 947 | */ |
995 | ioapic = mp_find_ioapic(0); | 948 | ioapic = mp_find_ioapic(0); |
996 | if (ioapic < 0) | 949 | if (ioapic < 0) |
997 | return; | 950 | return; |
998 | 951 | ||
999 | intsrc.mpc_type = MP_INTSRC; | 952 | intsrc.mpc_type = MP_INTSRC; |
1000 | intsrc.mpc_irqflag = 0; /* Conforming */ | 953 | intsrc.mpc_irqflag = 0; /* Conforming */ |
1001 | intsrc.mpc_srcbus = MP_ISA_BUS; | 954 | intsrc.mpc_srcbus = MP_ISA_BUS; |
955 | #ifdef CONFIG_X86_IO_APIC | ||
1002 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; | 956 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; |
1003 | 957 | #endif | |
1004 | /* | 958 | /* |
1005 | * Use the default configuration for the IRQs 0-15. Unless | 959 | * Use the default configuration for the IRQs 0-15. Unless |
1006 | * overridden by (MADT) interrupt source override entries. | 960 | * overridden by (MADT) interrupt source override entries. |
1007 | */ | 961 | */ |
@@ -1012,28 +966,29 @@ void __init mp_config_acpi_legacy_irqs (void) | |||
1012 | struct mpc_config_intsrc *irq = mp_irqs + idx; | 966 | struct mpc_config_intsrc *irq = mp_irqs + idx; |
1013 | 967 | ||
1014 | /* Do we already have a mapping for this ISA IRQ? */ | 968 | /* Do we already have a mapping for this ISA IRQ? */ |
1015 | if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) | 969 | if (irq->mpc_srcbus == MP_ISA_BUS |
970 | && irq->mpc_srcbusirq == i) | ||
1016 | break; | 971 | break; |
1017 | 972 | ||
1018 | /* Do we already have a mapping for this IOAPIC pin */ | 973 | /* Do we already have a mapping for this IOAPIC pin */ |
1019 | if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && | 974 | if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && |
1020 | (irq->mpc_dstirq == i)) | 975 | (irq->mpc_dstirq == i)) |
1021 | break; | 976 | break; |
1022 | } | 977 | } |
1023 | 978 | ||
1024 | if (idx != mp_irq_entries) { | 979 | if (idx != mp_irq_entries) { |
1025 | printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); | 980 | printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); |
1026 | continue; /* IRQ already used */ | 981 | continue; /* IRQ already used */ |
1027 | } | 982 | } |
1028 | 983 | ||
1029 | intsrc.mpc_irqtype = mp_INT; | 984 | intsrc.mpc_irqtype = mp_INT; |
1030 | intsrc.mpc_srcbusirq = i; /* Identity mapped */ | 985 | intsrc.mpc_srcbusirq = i; /* Identity mapped */ |
1031 | intsrc.mpc_dstirq = i; | 986 | intsrc.mpc_dstirq = i; |
1032 | 987 | ||
1033 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " | 988 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " |
1034 | "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, | 989 | "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, |
1035 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, | 990 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, |
1036 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, | 991 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, |
1037 | intsrc.mpc_dstirq); | 992 | intsrc.mpc_dstirq); |
1038 | 993 | ||
1039 | mp_irqs[mp_irq_entries] = intsrc; | 994 | mp_irqs[mp_irq_entries] = intsrc; |
@@ -1042,21 +997,27 @@ void __init mp_config_acpi_legacy_irqs (void) | |||
1042 | } | 997 | } |
1043 | } | 998 | } |
1044 | 999 | ||
1045 | #define MAX_GSI_NUM 4096 | ||
1046 | #define IRQ_COMPRESSION_START 64 | ||
1047 | |||
1048 | int mp_register_gsi(u32 gsi, int triggering, int polarity) | 1000 | int mp_register_gsi(u32 gsi, int triggering, int polarity) |
1049 | { | 1001 | { |
1050 | int ioapic = -1; | 1002 | int ioapic = -1; |
1051 | int ioapic_pin = 0; | 1003 | int ioapic_pin = 0; |
1052 | int idx, bit = 0; | 1004 | int idx, bit = 0; |
1005 | #ifdef CONFIG_X86_32 | ||
1006 | #define MAX_GSI_NUM 4096 | ||
1007 | #define IRQ_COMPRESSION_START 64 | ||
1008 | |||
1053 | static int pci_irq = IRQ_COMPRESSION_START; | 1009 | static int pci_irq = IRQ_COMPRESSION_START; |
1054 | /* | 1010 | /* |
1055 | * Mapping between Global System Interrupts, which | 1011 | * Mapping between Global System Interrupts, which |
1056 | * represent all possible interrupts, and IRQs | 1012 | * represent all possible interrupts, and IRQs |
1057 | * assigned to actual devices. | 1013 | * assigned to actual devices. |
1058 | */ | 1014 | */ |
1059 | static int gsi_to_irq[MAX_GSI_NUM]; | 1015 | static int gsi_to_irq[MAX_GSI_NUM]; |
1016 | #else | ||
1017 | |||
1018 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) | ||
1019 | return gsi; | ||
1020 | #endif | ||
1060 | 1021 | ||
1061 | /* Don't set up the ACPI SCI because it's already set up */ | 1022 | /* Don't set up the ACPI SCI because it's already set up */ |
1062 | if (acpi_gbl_FADT.sci_interrupt == gsi) | 1023 | if (acpi_gbl_FADT.sci_interrupt == gsi) |
@@ -1070,11 +1031,13 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) | |||
1070 | 1031 | ||
1071 | ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; | 1032 | ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; |
1072 | 1033 | ||
1034 | #ifdef CONFIG_X86_32 | ||
1073 | if (ioapic_renumber_irq) | 1035 | if (ioapic_renumber_irq) |
1074 | gsi = ioapic_renumber_irq(ioapic, gsi); | 1036 | gsi = ioapic_renumber_irq(ioapic, gsi); |
1037 | #endif | ||
1075 | 1038 | ||
1076 | /* | 1039 | /* |
1077 | * Avoid pin reprogramming. PRTs typically include entries | 1040 | * Avoid pin reprogramming. PRTs typically include entries |
1078 | * with redundant pin->gsi mappings (but unique PCI devices); | 1041 | * with redundant pin->gsi mappings (but unique PCI devices); |
1079 | * we only program the IOAPIC on the first. | 1042 | * we only program the IOAPIC on the first. |
1080 | */ | 1043 | */ |
@@ -1082,23 +1045,27 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) | |||
1082 | idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); | 1045 | idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); |
1083 | if (idx > 3) { | 1046 | if (idx > 3) { |
1084 | printk(KERN_ERR "Invalid reference to IOAPIC pin " | 1047 | printk(KERN_ERR "Invalid reference to IOAPIC pin " |
1085 | "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, | 1048 | "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, |
1086 | ioapic_pin); | 1049 | ioapic_pin); |
1087 | return gsi; | 1050 | return gsi; |
1088 | } | 1051 | } |
1089 | if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { | 1052 | if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { |
1090 | Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", | 1053 | Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", |
1091 | mp_ioapic_routing[ioapic].apic_id, ioapic_pin); | 1054 | mp_ioapic_routing[ioapic].apic_id, ioapic_pin); |
1055 | #ifdef CONFIG_X86_32 | ||
1092 | return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); | 1056 | return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); |
1057 | #else | ||
1058 | return gsi; | ||
1059 | #endif | ||
1093 | } | 1060 | } |
1094 | 1061 | ||
1095 | mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); | 1062 | mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit); |
1096 | 1063 | #ifdef CONFIG_X86_32 | |
1097 | /* | 1064 | /* |
1098 | * For GSI >= 64, use IRQ compression | 1065 | * For GSI >= 64, use IRQ compression |
1099 | */ | 1066 | */ |
1100 | if ((gsi >= IRQ_COMPRESSION_START) | 1067 | if ((gsi >= IRQ_COMPRESSION_START) |
1101 | && (triggering == ACPI_LEVEL_SENSITIVE)) { | 1068 | && (triggering == ACPI_LEVEL_SENSITIVE)) { |
1102 | /* | 1069 | /* |
1103 | * For PCI devices assign IRQs in order, avoiding gaps | 1070 | * For PCI devices assign IRQs in order, avoiding gaps |
1104 | * due to unused I/O APIC pins. | 1071 | * due to unused I/O APIC pins. |
@@ -1115,8 +1082,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) | |||
1115 | * So test for this condition, and if necessary, avoid | 1082 | * So test for this condition, and if necessary, avoid |
1116 | * the pin collision. | 1083 | * the pin collision. |
1117 | */ | 1084 | */ |
1118 | if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0)) | 1085 | gsi = pci_irq++; |
1119 | gsi = pci_irq++; | ||
1120 | /* | 1086 | /* |
1121 | * Don't assign IRQ used by ACPI SCI | 1087 | * Don't assign IRQ used by ACPI SCI |
1122 | */ | 1088 | */ |
@@ -1128,10 +1094,10 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) | |||
1128 | return gsi; | 1094 | return gsi; |
1129 | } | 1095 | } |
1130 | } | 1096 | } |
1131 | 1097 | #endif | |
1132 | io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, | 1098 | io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, |
1133 | triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, | 1099 | triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, |
1134 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | 1100 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); |
1135 | return gsi; | 1101 | return gsi; |
1136 | } | 1102 | } |
1137 | 1103 | ||
diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c deleted file mode 100644 index 72ab1403fed7..000000000000 --- a/arch/x86/kernel/mpparse_64.c +++ /dev/null | |||
@@ -1,867 +0,0 @@ | |||
1 | /* | ||
2 | * Intel Multiprocessor Specification 1.1 and 1.4 | ||
3 | * compliant MP-table parsing routines. | ||
4 | * | ||
5 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | ||
6 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | ||
7 | * | ||
8 | * Fixes | ||
9 | * Erich Boleyn : MP v1.4 and additional changes. | ||
10 | * Alan Cox : Added EBDA scanning | ||
11 | * Ingo Molnar : various cleanups and rewrites | ||
12 | * Maciej W. Rozycki: Bits for default MP configurations | ||
13 | * Paul Diefenbaugh: Added full ACPI support | ||
14 | */ | ||
15 | |||
16 | #include <linux/mm.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/bootmem.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/mc146818rtc.h> | ||
22 | #include <linux/acpi.h> | ||
23 | #include <linux/module.h> | ||
24 | |||
25 | #include <asm/smp.h> | ||
26 | #include <asm/mtrr.h> | ||
27 | #include <asm/mpspec.h> | ||
28 | #include <asm/pgalloc.h> | ||
29 | #include <asm/io_apic.h> | ||
30 | #include <asm/proto.h> | ||
31 | #include <asm/acpi.h> | ||
32 | |||
33 | /* Have we found an MP table */ | ||
34 | int smp_found_config; | ||
35 | |||
36 | /* | ||
37 | * Various Linux-internal data structures created from the | ||
38 | * MP-table. | ||
39 | */ | ||
40 | DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | ||
41 | int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; | ||
42 | |||
43 | static int mp_current_pci_id = 0; | ||
44 | /* I/O APIC entries */ | ||
45 | struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
46 | |||
47 | /* # of MP IRQ source entries */ | ||
48 | struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
49 | |||
50 | /* MP IRQ source entries */ | ||
51 | int mp_irq_entries; | ||
52 | |||
53 | int nr_ioapics; | ||
54 | unsigned long mp_lapic_addr = 0; | ||
55 | |||
56 | |||
57 | |||
58 | /* Processor that is doing the boot up */ | ||
59 | unsigned int boot_cpu_id = -1U; | ||
60 | EXPORT_SYMBOL(boot_cpu_id); | ||
61 | |||
62 | /* Internal processor count */ | ||
63 | unsigned int num_processors; | ||
64 | |||
65 | unsigned disabled_cpus __cpuinitdata; | ||
66 | |||
67 | /* Bitmask of physically existing CPUs */ | ||
68 | physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE; | ||
69 | |||
70 | u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata | ||
71 | = { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
72 | void *x86_bios_cpu_apicid_early_ptr; | ||
73 | DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID; | ||
74 | EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | ||
75 | |||
76 | |||
77 | /* | ||
78 | * Intel MP BIOS table parsing routines: | ||
79 | */ | ||
80 | |||
81 | /* | ||
82 | * Checksum an MP configuration block. | ||
83 | */ | ||
84 | |||
85 | static int __init mpf_checksum(unsigned char *mp, int len) | ||
86 | { | ||
87 | int sum = 0; | ||
88 | |||
89 | while (len--) | ||
90 | sum += *mp++; | ||
91 | |||
92 | return sum & 0xFF; | ||
93 | } | ||
94 | |||
95 | static void __cpuinit MP_processor_info(struct mpc_config_processor *m) | ||
96 | { | ||
97 | int cpu; | ||
98 | cpumask_t tmp_map; | ||
99 | char *bootup_cpu = ""; | ||
100 | |||
101 | if (!(m->mpc_cpuflag & CPU_ENABLED)) { | ||
102 | disabled_cpus++; | ||
103 | return; | ||
104 | } | ||
105 | if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { | ||
106 | bootup_cpu = " (Bootup-CPU)"; | ||
107 | boot_cpu_id = m->mpc_apicid; | ||
108 | } | ||
109 | |||
110 | printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); | ||
111 | |||
112 | if (num_processors >= NR_CPUS) { | ||
113 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | ||
114 | " Processor ignored.\n", NR_CPUS); | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | num_processors++; | ||
119 | cpus_complement(tmp_map, cpu_present_map); | ||
120 | cpu = first_cpu(tmp_map); | ||
121 | |||
122 | physid_set(m->mpc_apicid, phys_cpu_present_map); | ||
123 | if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { | ||
124 | /* | ||
125 | * x86_bios_cpu_apicid is required to have processors listed | ||
126 | * in same order as logical cpu numbers. Hence the first | ||
127 | * entry is BSP, and so on. | ||
128 | */ | ||
129 | cpu = 0; | ||
130 | } | ||
131 | /* are we being called early in kernel startup? */ | ||
132 | if (x86_cpu_to_apicid_early_ptr) { | ||
133 | u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr; | ||
134 | u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; | ||
135 | |||
136 | cpu_to_apicid[cpu] = m->mpc_apicid; | ||
137 | bios_cpu_apicid[cpu] = m->mpc_apicid; | ||
138 | } else { | ||
139 | per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid; | ||
140 | per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; | ||
141 | } | ||
142 | |||
143 | cpu_set(cpu, cpu_possible_map); | ||
144 | cpu_set(cpu, cpu_present_map); | ||
145 | } | ||
146 | |||
147 | static void __init MP_bus_info (struct mpc_config_bus *m) | ||
148 | { | ||
149 | char str[7]; | ||
150 | |||
151 | memcpy(str, m->mpc_bustype, 6); | ||
152 | str[6] = 0; | ||
153 | Dprintk("Bus #%d is %s\n", m->mpc_busid, str); | ||
154 | |||
155 | if (strncmp(str, "ISA", 3) == 0) { | ||
156 | set_bit(m->mpc_busid, mp_bus_not_pci); | ||
157 | } else if (strncmp(str, "PCI", 3) == 0) { | ||
158 | clear_bit(m->mpc_busid, mp_bus_not_pci); | ||
159 | mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; | ||
160 | mp_current_pci_id++; | ||
161 | } else { | ||
162 | printk(KERN_ERR "Unknown bustype %s\n", str); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | static int bad_ioapic(unsigned long address) | ||
167 | { | ||
168 | if (nr_ioapics >= MAX_IO_APICS) { | ||
169 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | ||
170 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | ||
171 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | ||
172 | } | ||
173 | if (!address) { | ||
174 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | ||
175 | " found in table, skipping!\n"); | ||
176 | return 1; | ||
177 | } | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static void __init MP_ioapic_info (struct mpc_config_ioapic *m) | ||
182 | { | ||
183 | if (!(m->mpc_flags & MPC_APIC_USABLE)) | ||
184 | return; | ||
185 | |||
186 | printk("I/O APIC #%d at 0x%X.\n", | ||
187 | m->mpc_apicid, m->mpc_apicaddr); | ||
188 | |||
189 | if (bad_ioapic(m->mpc_apicaddr)) | ||
190 | return; | ||
191 | |||
192 | mp_ioapics[nr_ioapics] = *m; | ||
193 | nr_ioapics++; | ||
194 | } | ||
195 | |||
196 | static void __init MP_intsrc_info (struct mpc_config_intsrc *m) | ||
197 | { | ||
198 | mp_irqs [mp_irq_entries] = *m; | ||
199 | Dprintk("Int: type %d, pol %d, trig %d, bus %d," | ||
200 | " IRQ %02x, APIC ID %x, APIC INT %02x\n", | ||
201 | m->mpc_irqtype, m->mpc_irqflag & 3, | ||
202 | (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, | ||
203 | m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); | ||
204 | if (++mp_irq_entries >= MAX_IRQ_SOURCES) | ||
205 | panic("Max # of irq sources exceeded!!\n"); | ||
206 | } | ||
207 | |||
208 | static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) | ||
209 | { | ||
210 | Dprintk("Lint: type %d, pol %d, trig %d, bus %d," | ||
211 | " IRQ %02x, APIC ID %x, APIC LINT %02x\n", | ||
212 | m->mpc_irqtype, m->mpc_irqflag & 3, | ||
213 | (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, | ||
214 | m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Read/parse the MPC | ||
219 | */ | ||
220 | |||
221 | static int __init smp_read_mpc(struct mp_config_table *mpc) | ||
222 | { | ||
223 | char str[16]; | ||
224 | int count=sizeof(*mpc); | ||
225 | unsigned char *mpt=((unsigned char *)mpc)+count; | ||
226 | |||
227 | if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { | ||
228 | printk("MPTABLE: bad signature [%c%c%c%c]!\n", | ||
229 | mpc->mpc_signature[0], | ||
230 | mpc->mpc_signature[1], | ||
231 | mpc->mpc_signature[2], | ||
232 | mpc->mpc_signature[3]); | ||
233 | return 0; | ||
234 | } | ||
235 | if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { | ||
236 | printk("MPTABLE: checksum error!\n"); | ||
237 | return 0; | ||
238 | } | ||
239 | if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { | ||
240 | printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", | ||
241 | mpc->mpc_spec); | ||
242 | return 0; | ||
243 | } | ||
244 | if (!mpc->mpc_lapic) { | ||
245 | printk(KERN_ERR "MPTABLE: null local APIC address!\n"); | ||
246 | return 0; | ||
247 | } | ||
248 | memcpy(str,mpc->mpc_oem,8); | ||
249 | str[8] = 0; | ||
250 | printk(KERN_INFO "MPTABLE: OEM ID: %s ",str); | ||
251 | |||
252 | memcpy(str,mpc->mpc_productid,12); | ||
253 | str[12] = 0; | ||
254 | printk("MPTABLE: Product ID: %s ",str); | ||
255 | |||
256 | printk("MPTABLE: APIC at: 0x%X\n",mpc->mpc_lapic); | ||
257 | |||
258 | /* save the local APIC address, it might be non-default */ | ||
259 | if (!acpi_lapic) | ||
260 | mp_lapic_addr = mpc->mpc_lapic; | ||
261 | |||
262 | /* | ||
263 | * Now process the configuration blocks. | ||
264 | */ | ||
265 | while (count < mpc->mpc_length) { | ||
266 | switch(*mpt) { | ||
267 | case MP_PROCESSOR: | ||
268 | { | ||
269 | struct mpc_config_processor *m= | ||
270 | (struct mpc_config_processor *)mpt; | ||
271 | if (!acpi_lapic) | ||
272 | MP_processor_info(m); | ||
273 | mpt += sizeof(*m); | ||
274 | count += sizeof(*m); | ||
275 | break; | ||
276 | } | ||
277 | case MP_BUS: | ||
278 | { | ||
279 | struct mpc_config_bus *m= | ||
280 | (struct mpc_config_bus *)mpt; | ||
281 | MP_bus_info(m); | ||
282 | mpt += sizeof(*m); | ||
283 | count += sizeof(*m); | ||
284 | break; | ||
285 | } | ||
286 | case MP_IOAPIC: | ||
287 | { | ||
288 | struct mpc_config_ioapic *m= | ||
289 | (struct mpc_config_ioapic *)mpt; | ||
290 | MP_ioapic_info(m); | ||
291 | mpt += sizeof(*m); | ||
292 | count += sizeof(*m); | ||
293 | break; | ||
294 | } | ||
295 | case MP_INTSRC: | ||
296 | { | ||
297 | struct mpc_config_intsrc *m= | ||
298 | (struct mpc_config_intsrc *)mpt; | ||
299 | |||
300 | MP_intsrc_info(m); | ||
301 | mpt += sizeof(*m); | ||
302 | count += sizeof(*m); | ||
303 | break; | ||
304 | } | ||
305 | case MP_LINTSRC: | ||
306 | { | ||
307 | struct mpc_config_lintsrc *m= | ||
308 | (struct mpc_config_lintsrc *)mpt; | ||
309 | MP_lintsrc_info(m); | ||
310 | mpt += sizeof(*m); | ||
311 | count += sizeof(*m); | ||
312 | break; | ||
313 | } | ||
314 | } | ||
315 | } | ||
316 | setup_apic_routing(); | ||
317 | if (!num_processors) | ||
318 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); | ||
319 | return num_processors; | ||
320 | } | ||
321 | |||
322 | static int __init ELCR_trigger(unsigned int irq) | ||
323 | { | ||
324 | unsigned int port; | ||
325 | |||
326 | port = 0x4d0 + (irq >> 3); | ||
327 | return (inb(port) >> (irq & 7)) & 1; | ||
328 | } | ||
329 | |||
330 | static void __init construct_default_ioirq_mptable(int mpc_default_type) | ||
331 | { | ||
332 | struct mpc_config_intsrc intsrc; | ||
333 | int i; | ||
334 | int ELCR_fallback = 0; | ||
335 | |||
336 | intsrc.mpc_type = MP_INTSRC; | ||
337 | intsrc.mpc_irqflag = 0; /* conforming */ | ||
338 | intsrc.mpc_srcbus = 0; | ||
339 | intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; | ||
340 | |||
341 | intsrc.mpc_irqtype = mp_INT; | ||
342 | |||
343 | /* | ||
344 | * If true, we have an ISA/PCI system with no IRQ entries | ||
345 | * in the MP table. To prevent the PCI interrupts from being set up | ||
346 | * incorrectly, we try to use the ELCR. The sanity check to see if | ||
347 | * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can | ||
348 | * never be level sensitive, so we simply see if the ELCR agrees. | ||
349 | * If it does, we assume it's valid. | ||
350 | */ | ||
351 | if (mpc_default_type == 5) { | ||
352 | printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); | ||
353 | |||
354 | if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) | ||
355 | printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n"); | ||
356 | else { | ||
357 | printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); | ||
358 | ELCR_fallback = 1; | ||
359 | } | ||
360 | } | ||
361 | |||
362 | for (i = 0; i < 16; i++) { | ||
363 | switch (mpc_default_type) { | ||
364 | case 2: | ||
365 | if (i == 0 || i == 13) | ||
366 | continue; /* IRQ0 & IRQ13 not connected */ | ||
367 | /* fall through */ | ||
368 | default: | ||
369 | if (i == 2) | ||
370 | continue; /* IRQ2 is never connected */ | ||
371 | } | ||
372 | |||
373 | if (ELCR_fallback) { | ||
374 | /* | ||
375 | * If the ELCR indicates a level-sensitive interrupt, we | ||
376 | * copy that information over to the MP table in the | ||
377 | * irqflag field (level sensitive, active high polarity). | ||
378 | */ | ||
379 | if (ELCR_trigger(i)) | ||
380 | intsrc.mpc_irqflag = 13; | ||
381 | else | ||
382 | intsrc.mpc_irqflag = 0; | ||
383 | } | ||
384 | |||
385 | intsrc.mpc_srcbusirq = i; | ||
386 | intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ | ||
387 | MP_intsrc_info(&intsrc); | ||
388 | } | ||
389 | |||
390 | intsrc.mpc_irqtype = mp_ExtINT; | ||
391 | intsrc.mpc_srcbusirq = 0; | ||
392 | intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ | ||
393 | MP_intsrc_info(&intsrc); | ||
394 | } | ||
395 | |||
396 | static inline void __init construct_default_ISA_mptable(int mpc_default_type) | ||
397 | { | ||
398 | struct mpc_config_processor processor; | ||
399 | struct mpc_config_bus bus; | ||
400 | struct mpc_config_ioapic ioapic; | ||
401 | struct mpc_config_lintsrc lintsrc; | ||
402 | int linttypes[2] = { mp_ExtINT, mp_NMI }; | ||
403 | int i; | ||
404 | |||
405 | /* | ||
406 | * local APIC has default address | ||
407 | */ | ||
408 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | ||
409 | |||
410 | /* | ||
411 | * 2 CPUs, numbered 0 & 1. | ||
412 | */ | ||
413 | processor.mpc_type = MP_PROCESSOR; | ||
414 | processor.mpc_apicver = 0; | ||
415 | processor.mpc_cpuflag = CPU_ENABLED; | ||
416 | processor.mpc_cpufeature = 0; | ||
417 | processor.mpc_featureflag = 0; | ||
418 | processor.mpc_reserved[0] = 0; | ||
419 | processor.mpc_reserved[1] = 0; | ||
420 | for (i = 0; i < 2; i++) { | ||
421 | processor.mpc_apicid = i; | ||
422 | MP_processor_info(&processor); | ||
423 | } | ||
424 | |||
425 | bus.mpc_type = MP_BUS; | ||
426 | bus.mpc_busid = 0; | ||
427 | switch (mpc_default_type) { | ||
428 | default: | ||
429 | printk(KERN_ERR "???\nUnknown standard configuration %d\n", | ||
430 | mpc_default_type); | ||
431 | /* fall through */ | ||
432 | case 1: | ||
433 | case 5: | ||
434 | memcpy(bus.mpc_bustype, "ISA ", 6); | ||
435 | break; | ||
436 | } | ||
437 | MP_bus_info(&bus); | ||
438 | if (mpc_default_type > 4) { | ||
439 | bus.mpc_busid = 1; | ||
440 | memcpy(bus.mpc_bustype, "PCI ", 6); | ||
441 | MP_bus_info(&bus); | ||
442 | } | ||
443 | |||
444 | ioapic.mpc_type = MP_IOAPIC; | ||
445 | ioapic.mpc_apicid = 2; | ||
446 | ioapic.mpc_apicver = 0; | ||
447 | ioapic.mpc_flags = MPC_APIC_USABLE; | ||
448 | ioapic.mpc_apicaddr = 0xFEC00000; | ||
449 | MP_ioapic_info(&ioapic); | ||
450 | |||
451 | /* | ||
452 | * We set up most of the low 16 IO-APIC pins according to MPS rules. | ||
453 | */ | ||
454 | construct_default_ioirq_mptable(mpc_default_type); | ||
455 | |||
456 | lintsrc.mpc_type = MP_LINTSRC; | ||
457 | lintsrc.mpc_irqflag = 0; /* conforming */ | ||
458 | lintsrc.mpc_srcbusid = 0; | ||
459 | lintsrc.mpc_srcbusirq = 0; | ||
460 | lintsrc.mpc_destapic = MP_APIC_ALL; | ||
461 | for (i = 0; i < 2; i++) { | ||
462 | lintsrc.mpc_irqtype = linttypes[i]; | ||
463 | lintsrc.mpc_destapiclint = i; | ||
464 | MP_lintsrc_info(&lintsrc); | ||
465 | } | ||
466 | } | ||
467 | |||
468 | static struct intel_mp_floating *mpf_found; | ||
469 | |||
470 | /* | ||
471 | * Scan the memory blocks for an SMP configuration block. | ||
472 | */ | ||
473 | void __init get_smp_config (void) | ||
474 | { | ||
475 | struct intel_mp_floating *mpf = mpf_found; | ||
476 | |||
477 | /* | ||
478 | * ACPI supports both logical (e.g. Hyper-Threading) and physical | ||
479 | * processors, where MPS only supports physical. | ||
480 | */ | ||
481 | if (acpi_lapic && acpi_ioapic) { | ||
482 | printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); | ||
483 | return; | ||
484 | } | ||
485 | else if (acpi_lapic) | ||
486 | printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); | ||
487 | |||
488 | printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); | ||
489 | |||
490 | /* | ||
491 | * Now see if we need to read further. | ||
492 | */ | ||
493 | if (mpf->mpf_feature1 != 0) { | ||
494 | |||
495 | printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); | ||
496 | construct_default_ISA_mptable(mpf->mpf_feature1); | ||
497 | |||
498 | } else if (mpf->mpf_physptr) { | ||
499 | |||
500 | /* | ||
501 | * Read the physical hardware table. Anything here will | ||
502 | * override the defaults. | ||
503 | */ | ||
504 | if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { | ||
505 | smp_found_config = 0; | ||
506 | printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); | ||
507 | printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); | ||
508 | return; | ||
509 | } | ||
510 | /* | ||
511 | * If there are no explicit MP IRQ entries, then we are | ||
512 | * broken. We set up most of the low 16 IO-APIC pins to | ||
513 | * ISA defaults and hope it will work. | ||
514 | */ | ||
515 | if (!mp_irq_entries) { | ||
516 | struct mpc_config_bus bus; | ||
517 | |||
518 | printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); | ||
519 | |||
520 | bus.mpc_type = MP_BUS; | ||
521 | bus.mpc_busid = 0; | ||
522 | memcpy(bus.mpc_bustype, "ISA ", 6); | ||
523 | MP_bus_info(&bus); | ||
524 | |||
525 | construct_default_ioirq_mptable(0); | ||
526 | } | ||
527 | |||
528 | } else | ||
529 | BUG(); | ||
530 | |||
531 | printk(KERN_INFO "Processors: %d\n", num_processors); | ||
532 | /* | ||
533 | * Only use the first configuration found. | ||
534 | */ | ||
535 | } | ||
536 | |||
537 | static int __init smp_scan_config (unsigned long base, unsigned long length) | ||
538 | { | ||
539 | extern void __bad_mpf_size(void); | ||
540 | unsigned int *bp = phys_to_virt(base); | ||
541 | struct intel_mp_floating *mpf; | ||
542 | |||
543 | Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); | ||
544 | if (sizeof(*mpf) != 16) | ||
545 | __bad_mpf_size(); | ||
546 | |||
547 | while (length > 0) { | ||
548 | mpf = (struct intel_mp_floating *)bp; | ||
549 | if ((*bp == SMP_MAGIC_IDENT) && | ||
550 | (mpf->mpf_length == 1) && | ||
551 | !mpf_checksum((unsigned char *)bp, 16) && | ||
552 | ((mpf->mpf_specification == 1) | ||
553 | || (mpf->mpf_specification == 4)) ) { | ||
554 | |||
555 | smp_found_config = 1; | ||
556 | reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE); | ||
557 | if (mpf->mpf_physptr) | ||
558 | reserve_bootmem_generic(mpf->mpf_physptr, PAGE_SIZE); | ||
559 | mpf_found = mpf; | ||
560 | return 1; | ||
561 | } | ||
562 | bp += 4; | ||
563 | length -= 16; | ||
564 | } | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | void __init find_smp_config(void) | ||
569 | { | ||
570 | unsigned int address; | ||
571 | |||
572 | /* | ||
573 | * FIXME: Linux assumes you have 640K of base ram.. | ||
574 | * this continues the error... | ||
575 | * | ||
576 | * 1) Scan the bottom 1K for a signature | ||
577 | * 2) Scan the top 1K of base RAM | ||
578 | * 3) Scan the 64K of bios | ||
579 | */ | ||
580 | if (smp_scan_config(0x0,0x400) || | ||
581 | smp_scan_config(639*0x400,0x400) || | ||
582 | smp_scan_config(0xF0000,0x10000)) | ||
583 | return; | ||
584 | /* | ||
585 | * If it is an SMP machine we should know now. | ||
586 | * | ||
587 | * there is a real-mode segmented pointer pointing to the | ||
588 | * 4K EBDA area at 0x40E, calculate and scan it here. | ||
589 | * | ||
590 | * NOTE! There are Linux loaders that will corrupt the EBDA | ||
591 | * area, and as such this kind of SMP config may be less | ||
592 | * trustworthy, simply because the SMP table may have been | ||
593 | * stomped on during early boot. These loaders are buggy and | ||
594 | * should be fixed. | ||
595 | */ | ||
596 | |||
597 | address = *(unsigned short *)phys_to_virt(0x40E); | ||
598 | address <<= 4; | ||
599 | if (smp_scan_config(address, 0x1000)) | ||
600 | return; | ||
601 | |||
602 | /* If we have come this far, we did not find an MP table */ | ||
603 | printk(KERN_INFO "No mptable found.\n"); | ||
604 | } | ||
605 | |||
606 | /* -------------------------------------------------------------------------- | ||
607 | ACPI-based MP Configuration | ||
608 | -------------------------------------------------------------------------- */ | ||
609 | |||
610 | #ifdef CONFIG_ACPI | ||
611 | |||
612 | void __init mp_register_lapic_address(u64 address) | ||
613 | { | ||
614 | mp_lapic_addr = (unsigned long) address; | ||
615 | set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); | ||
616 | if (boot_cpu_id == -1U) | ||
617 | boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID)); | ||
618 | } | ||
619 | |||
620 | void __cpuinit mp_register_lapic (u8 id, u8 enabled) | ||
621 | { | ||
622 | struct mpc_config_processor processor; | ||
623 | int boot_cpu = 0; | ||
624 | |||
625 | if (id == boot_cpu_id) | ||
626 | boot_cpu = 1; | ||
627 | |||
628 | processor.mpc_type = MP_PROCESSOR; | ||
629 | processor.mpc_apicid = id; | ||
630 | processor.mpc_apicver = 0; | ||
631 | processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); | ||
632 | processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); | ||
633 | processor.mpc_cpufeature = 0; | ||
634 | processor.mpc_featureflag = 0; | ||
635 | processor.mpc_reserved[0] = 0; | ||
636 | processor.mpc_reserved[1] = 0; | ||
637 | |||
638 | MP_processor_info(&processor); | ||
639 | } | ||
640 | |||
641 | #define MP_ISA_BUS 0 | ||
642 | #define MP_MAX_IOAPIC_PIN 127 | ||
643 | |||
644 | static struct mp_ioapic_routing { | ||
645 | int apic_id; | ||
646 | int gsi_start; | ||
647 | int gsi_end; | ||
648 | u32 pin_programmed[4]; | ||
649 | } mp_ioapic_routing[MAX_IO_APICS]; | ||
650 | |||
651 | static int mp_find_ioapic(int gsi) | ||
652 | { | ||
653 | int i = 0; | ||
654 | |||
655 | /* Find the IOAPIC that manages this GSI. */ | ||
656 | for (i = 0; i < nr_ioapics; i++) { | ||
657 | if ((gsi >= mp_ioapic_routing[i].gsi_start) | ||
658 | && (gsi <= mp_ioapic_routing[i].gsi_end)) | ||
659 | return i; | ||
660 | } | ||
661 | |||
662 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
663 | return -1; | ||
664 | } | ||
665 | |||
666 | static u8 uniq_ioapic_id(u8 id) | ||
667 | { | ||
668 | int i; | ||
669 | DECLARE_BITMAP(used, 256); | ||
670 | bitmap_zero(used, 256); | ||
671 | for (i = 0; i < nr_ioapics; i++) { | ||
672 | struct mpc_config_ioapic *ia = &mp_ioapics[i]; | ||
673 | __set_bit(ia->mpc_apicid, used); | ||
674 | } | ||
675 | if (!test_bit(id, used)) | ||
676 | return id; | ||
677 | return find_first_zero_bit(used, 256); | ||
678 | } | ||
679 | |||
680 | void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) | ||
681 | { | ||
682 | int idx = 0; | ||
683 | |||
684 | if (bad_ioapic(address)) | ||
685 | return; | ||
686 | |||
687 | idx = nr_ioapics; | ||
688 | |||
689 | mp_ioapics[idx].mpc_type = MP_IOAPIC; | ||
690 | mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; | ||
691 | mp_ioapics[idx].mpc_apicaddr = address; | ||
692 | |||
693 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
694 | mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id); | ||
695 | mp_ioapics[idx].mpc_apicver = 0; | ||
696 | |||
697 | /* | ||
698 | * Build basic IRQ lookup table to facilitate gsi->io_apic lookups | ||
699 | * and to prevent reprogramming of IOAPIC pins (PCI IRQs). | ||
700 | */ | ||
701 | mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; | ||
702 | mp_ioapic_routing[idx].gsi_start = gsi_base; | ||
703 | mp_ioapic_routing[idx].gsi_end = gsi_base + | ||
704 | io_apic_get_redir_entries(idx); | ||
705 | |||
706 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, address 0x%x, " | ||
707 | "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, | ||
708 | mp_ioapics[idx].mpc_apicaddr, | ||
709 | mp_ioapic_routing[idx].gsi_start, | ||
710 | mp_ioapic_routing[idx].gsi_end); | ||
711 | |||
712 | nr_ioapics++; | ||
713 | } | ||
714 | |||
715 | void __init | ||
716 | mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) | ||
717 | { | ||
718 | struct mpc_config_intsrc intsrc; | ||
719 | int ioapic = -1; | ||
720 | int pin = -1; | ||
721 | |||
722 | /* | ||
723 | * Convert 'gsi' to 'ioapic.pin'. | ||
724 | */ | ||
725 | ioapic = mp_find_ioapic(gsi); | ||
726 | if (ioapic < 0) | ||
727 | return; | ||
728 | pin = gsi - mp_ioapic_routing[ioapic].gsi_start; | ||
729 | |||
730 | /* | ||
731 | * TBD: This check is for faulty timer entries, where the override | ||
732 | * erroneously sets the trigger to level, resulting in a HUGE | ||
733 | * increase of timer interrupts! | ||
734 | */ | ||
735 | if ((bus_irq == 0) && (trigger == 3)) | ||
736 | trigger = 1; | ||
737 | |||
738 | intsrc.mpc_type = MP_INTSRC; | ||
739 | intsrc.mpc_irqtype = mp_INT; | ||
740 | intsrc.mpc_irqflag = (trigger << 2) | polarity; | ||
741 | intsrc.mpc_srcbus = MP_ISA_BUS; | ||
742 | intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ | ||
743 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ | ||
744 | intsrc.mpc_dstirq = pin; /* INTIN# */ | ||
745 | |||
746 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", | ||
747 | intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, | ||
748 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, | ||
749 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); | ||
750 | |||
751 | mp_irqs[mp_irq_entries] = intsrc; | ||
752 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | ||
753 | panic("Max # of irq sources exceeded!\n"); | ||
754 | } | ||
755 | |||
756 | void __init mp_config_acpi_legacy_irqs(void) | ||
757 | { | ||
758 | struct mpc_config_intsrc intsrc; | ||
759 | int i = 0; | ||
760 | int ioapic = -1; | ||
761 | |||
762 | /* | ||
763 | * Fabricate the legacy ISA bus (bus #31). | ||
764 | */ | ||
765 | set_bit(MP_ISA_BUS, mp_bus_not_pci); | ||
766 | |||
767 | /* | ||
768 | * Locate the IOAPIC that manages the ISA IRQs (0-15). | ||
769 | */ | ||
770 | ioapic = mp_find_ioapic(0); | ||
771 | if (ioapic < 0) | ||
772 | return; | ||
773 | |||
774 | intsrc.mpc_type = MP_INTSRC; | ||
775 | intsrc.mpc_irqflag = 0; /* Conforming */ | ||
776 | intsrc.mpc_srcbus = MP_ISA_BUS; | ||
777 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; | ||
778 | |||
779 | /* | ||
780 | * Use the default configuration for the IRQs 0-15. Unless | ||
781 | * overridden by (MADT) interrupt source override entries. | ||
782 | */ | ||
783 | for (i = 0; i < 16; i++) { | ||
784 | int idx; | ||
785 | |||
786 | for (idx = 0; idx < mp_irq_entries; idx++) { | ||
787 | struct mpc_config_intsrc *irq = mp_irqs + idx; | ||
788 | |||
789 | /* Do we already have a mapping for this ISA IRQ? */ | ||
790 | if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) | ||
791 | break; | ||
792 | |||
793 | /* Do we already have a mapping for this IOAPIC pin */ | ||
794 | if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && | ||
795 | (irq->mpc_dstirq == i)) | ||
796 | break; | ||
797 | } | ||
798 | |||
799 | if (idx != mp_irq_entries) { | ||
800 | printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); | ||
801 | continue; /* IRQ already used */ | ||
802 | } | ||
803 | |||
804 | intsrc.mpc_irqtype = mp_INT; | ||
805 | intsrc.mpc_srcbusirq = i; /* Identity mapped */ | ||
806 | intsrc.mpc_dstirq = i; | ||
807 | |||
808 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " | ||
809 | "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, | ||
810 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, | ||
811 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, | ||
812 | intsrc.mpc_dstirq); | ||
813 | |||
814 | mp_irqs[mp_irq_entries] = intsrc; | ||
815 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | ||
816 | panic("Max # of irq sources exceeded!\n"); | ||
817 | } | ||
818 | } | ||
819 | |||
820 | int mp_register_gsi(u32 gsi, int triggering, int polarity) | ||
821 | { | ||
822 | int ioapic = -1; | ||
823 | int ioapic_pin = 0; | ||
824 | int idx, bit = 0; | ||
825 | |||
826 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) | ||
827 | return gsi; | ||
828 | |||
829 | /* Don't set up the ACPI SCI because it's already set up */ | ||
830 | if (acpi_gbl_FADT.sci_interrupt == gsi) | ||
831 | return gsi; | ||
832 | |||
833 | ioapic = mp_find_ioapic(gsi); | ||
834 | if (ioapic < 0) { | ||
835 | printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); | ||
836 | return gsi; | ||
837 | } | ||
838 | |||
839 | ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start; | ||
840 | |||
841 | /* | ||
842 | * Avoid pin reprogramming. PRTs typically include entries | ||
843 | * with redundant pin->gsi mappings (but unique PCI devices); | ||
844 | * we only program the IOAPIC on the first. | ||
845 | */ | ||
846 | bit = ioapic_pin % 32; | ||
847 | idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); | ||
848 | if (idx > 3) { | ||
849 | printk(KERN_ERR "Invalid reference to IOAPIC pin " | ||
850 | "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, | ||
851 | ioapic_pin); | ||
852 | return gsi; | ||
853 | } | ||
854 | if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { | ||
855 | Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", | ||
856 | mp_ioapic_routing[ioapic].apic_id, ioapic_pin); | ||
857 | return gsi; | ||
858 | } | ||
859 | |||
860 | mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); | ||
861 | |||
862 | io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, | ||
863 | triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, | ||
864 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | ||
865 | return gsi; | ||
866 | } | ||
867 | #endif /*CONFIG_ACPI*/ | ||
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index af51ea8400b2..4dfb40530057 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -65,8 +65,8 @@ static loff_t msr_seek(struct file *file, loff_t offset, int orig) | |||
65 | return ret; | 65 | return ret; |
66 | } | 66 | } |
67 | 67 | ||
68 | static ssize_t msr_read(struct file *file, char __user * buf, | 68 | static ssize_t msr_read(struct file *file, char __user *buf, |
69 | size_t count, loff_t * ppos) | 69 | size_t count, loff_t *ppos) |
70 | { | 70 | { |
71 | u32 __user *tmp = (u32 __user *) buf; | 71 | u32 __user *tmp = (u32 __user *) buf; |
72 | u32 data[2]; | 72 | u32 data[2]; |
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 6a0aa7038685..8421d0ac6f22 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -22,9 +22,11 @@ | |||
22 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <linux/kernel_stat.h> | 23 | #include <linux/kernel_stat.h> |
24 | #include <linux/kdebug.h> | 24 | #include <linux/kdebug.h> |
25 | #include <linux/slab.h> | ||
25 | 26 | ||
26 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
27 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
29 | #include <asm/timer.h> | ||
28 | 30 | ||
29 | #include "mach_traps.h" | 31 | #include "mach_traps.h" |
30 | 32 | ||
@@ -67,7 +69,7 @@ static __init void nmi_cpu_busy(void *data) | |||
67 | } | 69 | } |
68 | #endif | 70 | #endif |
69 | 71 | ||
70 | static int __init check_nmi_watchdog(void) | 72 | int __init check_nmi_watchdog(void) |
71 | { | 73 | { |
72 | unsigned int *prev_nmi_count; | 74 | unsigned int *prev_nmi_count; |
73 | int cpu; | 75 | int cpu; |
@@ -80,7 +82,7 @@ static int __init check_nmi_watchdog(void) | |||
80 | 82 | ||
81 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); | 83 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
82 | if (!prev_nmi_count) | 84 | if (!prev_nmi_count) |
83 | return -1; | 85 | goto error; |
84 | 86 | ||
85 | printk(KERN_INFO "Testing NMI watchdog ... "); | 87 | printk(KERN_INFO "Testing NMI watchdog ... "); |
86 | 88 | ||
@@ -117,7 +119,7 @@ static int __init check_nmi_watchdog(void) | |||
117 | if (!atomic_read(&nmi_active)) { | 119 | if (!atomic_read(&nmi_active)) { |
118 | kfree(prev_nmi_count); | 120 | kfree(prev_nmi_count); |
119 | atomic_set(&nmi_active, -1); | 121 | atomic_set(&nmi_active, -1); |
120 | return -1; | 122 | goto error; |
121 | } | 123 | } |
122 | printk("OK.\n"); | 124 | printk("OK.\n"); |
123 | 125 | ||
@@ -128,9 +130,11 @@ static int __init check_nmi_watchdog(void) | |||
128 | 130 | ||
129 | kfree(prev_nmi_count); | 131 | kfree(prev_nmi_count); |
130 | return 0; | 132 | return 0; |
133 | error: | ||
134 | timer_ack = !cpu_has_tsc; | ||
135 | |||
136 | return -1; | ||
131 | } | 137 | } |
132 | /* This needs to happen later in boot so counters are working */ | ||
133 | late_initcall(check_nmi_watchdog); | ||
134 | 138 | ||
135 | static int __init setup_nmi_watchdog(char *str) | 139 | static int __init setup_nmi_watchdog(char *str) |
136 | { | 140 | { |
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 9a4fde74bee1..11f9130ac513 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <asm/proto.h> | 26 | #include <asm/proto.h> |
27 | #include <asm/mce.h> | 27 | #include <asm/mce.h> |
28 | 28 | ||
29 | #include <mach_traps.h> | ||
30 | |||
29 | int unknown_nmi_panic; | 31 | int unknown_nmi_panic; |
30 | int nmi_watchdog_enabled; | 32 | int nmi_watchdog_enabled; |
31 | int panic_on_unrecovered_nmi; | 33 | int panic_on_unrecovered_nmi; |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 075962cc75ab..3733412d1357 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -206,13 +206,6 @@ static struct resource reserve_ioports = { | |||
206 | .flags = IORESOURCE_IO | IORESOURCE_BUSY, | 206 | .flags = IORESOURCE_IO | IORESOURCE_BUSY, |
207 | }; | 207 | }; |
208 | 208 | ||
209 | static struct resource reserve_iomem = { | ||
210 | .start = 0, | ||
211 | .end = -1, | ||
212 | .name = "paravirt-iomem", | ||
213 | .flags = IORESOURCE_MEM | IORESOURCE_BUSY, | ||
214 | }; | ||
215 | |||
216 | /* | 209 | /* |
217 | * Reserve the whole legacy IO space to prevent any legacy drivers | 210 | * Reserve the whole legacy IO space to prevent any legacy drivers |
218 | * from wasting time probing for their hardware. This is a fairly | 211 | * from wasting time probing for their hardware. This is a fairly |
@@ -222,16 +215,7 @@ static struct resource reserve_iomem = { | |||
222 | */ | 215 | */ |
223 | int paravirt_disable_iospace(void) | 216 | int paravirt_disable_iospace(void) |
224 | { | 217 | { |
225 | int ret; | 218 | return request_resource(&ioport_resource, &reserve_ioports); |
226 | |||
227 | ret = request_resource(&ioport_resource, &reserve_ioports); | ||
228 | if (ret == 0) { | ||
229 | ret = request_resource(&iomem_resource, &reserve_iomem); | ||
230 | if (ret) | ||
231 | release_resource(&reserve_ioports); | ||
232 | } | ||
233 | |||
234 | return ret; | ||
235 | } | 219 | } |
236 | 220 | ||
237 | static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; | 221 | static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; |
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index 375cb2bc45be..ada5a0604992 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c | |||
@@ -232,32 +232,32 @@ static __init int iommu_setup(char *p) | |||
232 | return -EINVAL; | 232 | return -EINVAL; |
233 | 233 | ||
234 | while (*p) { | 234 | while (*p) { |
235 | if (!strncmp(p,"off",3)) | 235 | if (!strncmp(p, "off", 3)) |
236 | no_iommu = 1; | 236 | no_iommu = 1; |
237 | /* gart_parse_options has more force support */ | 237 | /* gart_parse_options has more force support */ |
238 | if (!strncmp(p,"force",5)) | 238 | if (!strncmp(p, "force", 5)) |
239 | force_iommu = 1; | 239 | force_iommu = 1; |
240 | if (!strncmp(p,"noforce",7)) { | 240 | if (!strncmp(p, "noforce", 7)) { |
241 | iommu_merge = 0; | 241 | iommu_merge = 0; |
242 | force_iommu = 0; | 242 | force_iommu = 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | if (!strncmp(p, "biomerge",8)) { | 245 | if (!strncmp(p, "biomerge", 8)) { |
246 | iommu_bio_merge = 4096; | 246 | iommu_bio_merge = 4096; |
247 | iommu_merge = 1; | 247 | iommu_merge = 1; |
248 | force_iommu = 1; | 248 | force_iommu = 1; |
249 | } | 249 | } |
250 | if (!strncmp(p, "panic",5)) | 250 | if (!strncmp(p, "panic", 5)) |
251 | panic_on_overflow = 1; | 251 | panic_on_overflow = 1; |
252 | if (!strncmp(p, "nopanic",7)) | 252 | if (!strncmp(p, "nopanic", 7)) |
253 | panic_on_overflow = 0; | 253 | panic_on_overflow = 0; |
254 | if (!strncmp(p, "merge",5)) { | 254 | if (!strncmp(p, "merge", 5)) { |
255 | iommu_merge = 1; | 255 | iommu_merge = 1; |
256 | force_iommu = 1; | 256 | force_iommu = 1; |
257 | } | 257 | } |
258 | if (!strncmp(p, "nomerge",7)) | 258 | if (!strncmp(p, "nomerge", 7)) |
259 | iommu_merge = 0; | 259 | iommu_merge = 0; |
260 | if (!strncmp(p, "forcesac",8)) | 260 | if (!strncmp(p, "forcesac", 8)) |
261 | iommu_sac_force = 1; | 261 | iommu_sac_force = 1; |
262 | if (!strncmp(p, "allowdac", 8)) | 262 | if (!strncmp(p, "allowdac", 8)) |
263 | forbid_dac = 0; | 263 | forbid_dac = 0; |
@@ -265,7 +265,7 @@ static __init int iommu_setup(char *p) | |||
265 | forbid_dac = -1; | 265 | forbid_dac = -1; |
266 | 266 | ||
267 | #ifdef CONFIG_SWIOTLB | 267 | #ifdef CONFIG_SWIOTLB |
268 | if (!strncmp(p, "soft",4)) | 268 | if (!strncmp(p, "soft", 4)) |
269 | swiotlb = 1; | 269 | swiotlb = 1; |
270 | #endif | 270 | #endif |
271 | 271 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 43930e73f657..3903a8f2eb97 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -113,20 +113,13 @@ void default_idle(void) | |||
113 | 113 | ||
114 | local_irq_disable(); | 114 | local_irq_disable(); |
115 | if (!need_resched()) { | 115 | if (!need_resched()) { |
116 | ktime_t t0, t1; | ||
117 | u64 t0n, t1n; | ||
118 | |||
119 | t0 = ktime_get(); | ||
120 | t0n = ktime_to_ns(t0); | ||
121 | safe_halt(); /* enables interrupts racelessly */ | 116 | safe_halt(); /* enables interrupts racelessly */ |
122 | local_irq_disable(); | 117 | local_irq_disable(); |
123 | t1 = ktime_get(); | ||
124 | t1n = ktime_to_ns(t1); | ||
125 | sched_clock_idle_wakeup_event(t1n - t0n); | ||
126 | } | 118 | } |
127 | local_irq_enable(); | 119 | local_irq_enable(); |
128 | current_thread_info()->status |= TS_POLLING; | 120 | current_thread_info()->status |= TS_POLLING; |
129 | } else { | 121 | } else { |
122 | local_irq_enable(); | ||
130 | /* loop is done by the caller */ | 123 | /* loop is done by the caller */ |
131 | cpu_relax(); | 124 | cpu_relax(); |
132 | } | 125 | } |
@@ -142,6 +135,7 @@ EXPORT_SYMBOL(default_idle); | |||
142 | */ | 135 | */ |
143 | static void poll_idle(void) | 136 | static void poll_idle(void) |
144 | { | 137 | { |
138 | local_irq_enable(); | ||
145 | cpu_relax(); | 139 | cpu_relax(); |
146 | } | 140 | } |
147 | 141 | ||
@@ -248,8 +242,11 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | |||
248 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 242 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
249 | smp_mb(); | 243 | smp_mb(); |
250 | if (!need_resched()) | 244 | if (!need_resched()) |
251 | __mwait(ax, cx); | 245 | __sti_mwait(ax, cx); |
252 | } | 246 | else |
247 | local_irq_enable(); | ||
248 | } else | ||
249 | local_irq_enable(); | ||
253 | } | 250 | } |
254 | 251 | ||
255 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | 252 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ |
@@ -332,7 +329,7 @@ void __show_registers(struct pt_regs *regs, int all) | |||
332 | init_utsname()->version); | 329 | init_utsname()->version); |
333 | 330 | ||
334 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | 331 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", |
335 | 0xffff & regs->cs, regs->ip, regs->flags, | 332 | (u16)regs->cs, regs->ip, regs->flags, |
336 | smp_processor_id()); | 333 | smp_processor_id()); |
337 | print_symbol("EIP is at %s\n", regs->ip); | 334 | print_symbol("EIP is at %s\n", regs->ip); |
338 | 335 | ||
@@ -341,8 +338,7 @@ void __show_registers(struct pt_regs *regs, int all) | |||
341 | printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", | 338 | printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
342 | regs->si, regs->di, regs->bp, sp); | 339 | regs->si, regs->di, regs->bp, sp); |
343 | printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", | 340 | printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", |
344 | regs->ds & 0xffff, regs->es & 0xffff, | 341 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); |
345 | regs->fs & 0xffff, gs, ss); | ||
346 | 342 | ||
347 | if (!all) | 343 | if (!all) |
348 | return; | 344 | return; |
@@ -513,6 +509,21 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
513 | return err; | 509 | return err; |
514 | } | 510 | } |
515 | 511 | ||
512 | void | ||
513 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | ||
514 | { | ||
515 | __asm__("movl %0, %%gs" :: "r"(0)); | ||
516 | regs->fs = 0; | ||
517 | set_fs(USER_DS); | ||
518 | regs->ds = __USER_DS; | ||
519 | regs->es = __USER_DS; | ||
520 | regs->ss = __USER_DS; | ||
521 | regs->cs = __USER_CS; | ||
522 | regs->ip = new_ip; | ||
523 | regs->sp = new_sp; | ||
524 | } | ||
525 | EXPORT_SYMBOL_GPL(start_thread); | ||
526 | |||
516 | #ifdef CONFIG_SECCOMP | 527 | #ifdef CONFIG_SECCOMP |
517 | static void hard_disable_TSC(void) | 528 | static void hard_disable_TSC(void) |
518 | { | 529 | { |
@@ -550,12 +561,12 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
550 | /* we clear debugctl to make sure DS | 561 | /* we clear debugctl to make sure DS |
551 | * is not in use when we change it */ | 562 | * is not in use when we change it */ |
552 | debugctl = 0; | 563 | debugctl = 0; |
553 | wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); | 564 | update_debugctlmsr(0); |
554 | wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); | 565 | wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); |
555 | } | 566 | } |
556 | 567 | ||
557 | if (next->debugctlmsr != debugctl) | 568 | if (next->debugctlmsr != debugctl) |
558 | wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); | 569 | update_debugctlmsr(next->debugctlmsr); |
559 | 570 | ||
560 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | 571 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { |
561 | set_debugreg(next->debugreg0, 0); | 572 | set_debugreg(next->debugreg0, 0); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 46c4c546b499..e75ccc8a2b87 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -107,16 +107,8 @@ void default_idle(void) | |||
107 | smp_mb(); | 107 | smp_mb(); |
108 | local_irq_disable(); | 108 | local_irq_disable(); |
109 | if (!need_resched()) { | 109 | if (!need_resched()) { |
110 | ktime_t t0, t1; | ||
111 | u64 t0n, t1n; | ||
112 | |||
113 | t0 = ktime_get(); | ||
114 | t0n = ktime_to_ns(t0); | ||
115 | safe_halt(); /* enables interrupts racelessly */ | 110 | safe_halt(); /* enables interrupts racelessly */ |
116 | local_irq_disable(); | 111 | local_irq_disable(); |
117 | t1 = ktime_get(); | ||
118 | t1n = ktime_to_ns(t1); | ||
119 | sched_clock_idle_wakeup_event(t1n - t0n); | ||
120 | } | 112 | } |
121 | local_irq_enable(); | 113 | local_irq_enable(); |
122 | current_thread_info()->status |= TS_POLLING; | 114 | current_thread_info()->status |= TS_POLLING; |
@@ -528,6 +520,21 @@ out: | |||
528 | return err; | 520 | return err; |
529 | } | 521 | } |
530 | 522 | ||
523 | void | ||
524 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | ||
525 | { | ||
526 | asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0)); | ||
527 | load_gs_index(0); | ||
528 | regs->ip = new_ip; | ||
529 | regs->sp = new_sp; | ||
530 | write_pda(oldrsp, new_sp); | ||
531 | regs->cs = __USER_CS; | ||
532 | regs->ss = __USER_DS; | ||
533 | regs->flags = 0x200; | ||
534 | set_fs(USER_DS); | ||
535 | } | ||
536 | EXPORT_SYMBOL_GPL(start_thread); | ||
537 | |||
531 | /* | 538 | /* |
532 | * This special macro can be used to load a debugging register | 539 | * This special macro can be used to load a debugging register |
533 | */ | 540 | */ |
@@ -548,12 +555,12 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
548 | /* we clear debugctl to make sure DS | 555 | /* we clear debugctl to make sure DS |
549 | * is not in use when we change it */ | 556 | * is not in use when we change it */ |
550 | debugctl = 0; | 557 | debugctl = 0; |
551 | wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); | 558 | update_debugctlmsr(0); |
552 | wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); | 559 | wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); |
553 | } | 560 | } |
554 | 561 | ||
555 | if (next->debugctlmsr != debugctl) | 562 | if (next->debugctlmsr != debugctl) |
556 | wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr); | 563 | update_debugctlmsr(next->debugctlmsr); |
557 | 564 | ||
558 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | 565 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { |
559 | loaddebug(next, 0); | 566 | loaddebug(next, 0); |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index eb92ccbb3502..559c1b027417 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1456,7 +1456,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) | |||
1456 | /* notification of system call entry/exit | 1456 | /* notification of system call entry/exit |
1457 | * - triggered by current->work.syscall_trace | 1457 | * - triggered by current->work.syscall_trace |
1458 | */ | 1458 | */ |
1459 | __attribute__((regparm(3))) | ||
1460 | int do_syscall_trace(struct pt_regs *regs, int entryexit) | 1459 | int do_syscall_trace(struct pt_regs *regs, int entryexit) |
1461 | { | 1460 | { |
1462 | int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU); | 1461 | int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU); |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 484c4a80d38a..9692202d3bfb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/init.h> | ||
3 | #include <linux/reboot.h> | 2 | #include <linux/reboot.h> |
4 | #include <linux/init.h> | 3 | #include <linux/init.h> |
5 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
@@ -412,12 +411,12 @@ static void native_machine_shutdown(void) | |||
412 | #ifdef CONFIG_X86_32 | 411 | #ifdef CONFIG_X86_32 |
413 | /* See if there has been given a command line override */ | 412 | /* See if there has been given a command line override */ |
414 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && | 413 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && |
415 | cpu_isset(reboot_cpu, cpu_online_map)) | 414 | cpu_online(reboot_cpu)) |
416 | reboot_cpu_id = reboot_cpu; | 415 | reboot_cpu_id = reboot_cpu; |
417 | #endif | 416 | #endif |
418 | 417 | ||
419 | /* Make certain the cpu I'm about to reboot on is online */ | 418 | /* Make certain the cpu I'm about to reboot on is online */ |
420 | if (!cpu_isset(reboot_cpu_id, cpu_online_map)) | 419 | if (!cpu_online(reboot_cpu_id)) |
421 | reboot_cpu_id = smp_processor_id(); | 420 | reboot_cpu_id = smp_processor_id(); |
422 | 421 | ||
423 | /* Make certain I only run on the appropriate processor */ | 422 | /* Make certain I only run on the appropriate processor */ |
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index f151d6fae462..c30fe25d470d 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S | |||
@@ -9,18 +9,19 @@ | |||
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | #include <asm/kexec.h> | 11 | #include <asm/kexec.h> |
12 | #include <asm/processor-flags.h> | ||
13 | #include <asm/pgtable.h> | ||
12 | 14 | ||
13 | /* | 15 | /* |
14 | * Must be relocatable PIC code callable as a C function | 16 | * Must be relocatable PIC code callable as a C function |
15 | */ | 17 | */ |
16 | 18 | ||
17 | #define PTR(x) (x << 2) | 19 | #define PTR(x) (x << 2) |
18 | #define PAGE_ALIGNED (1 << PAGE_SHIFT) | 20 | #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
19 | #define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */ | 21 | #define PAE_PGD_ATTR (_PAGE_PRESENT) |
20 | #define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */ | ||
21 | 22 | ||
22 | .text | 23 | .text |
23 | .align PAGE_ALIGNED | 24 | .align PAGE_SIZE |
24 | .globl relocate_kernel | 25 | .globl relocate_kernel |
25 | relocate_kernel: | 26 | relocate_kernel: |
26 | movl 8(%esp), %ebp /* list of pages */ | 27 | movl 8(%esp), %ebp /* list of pages */ |
@@ -155,7 +156,7 @@ relocate_new_kernel: | |||
155 | movl %eax, %cr3 | 156 | movl %eax, %cr3 |
156 | 157 | ||
157 | /* setup a new stack at the end of the physical control page */ | 158 | /* setup a new stack at the end of the physical control page */ |
158 | lea 4096(%edi), %esp | 159 | lea PAGE_SIZE(%edi), %esp |
159 | 160 | ||
160 | /* jump to identity mapped page */ | 161 | /* jump to identity mapped page */ |
161 | movl %edi, %eax | 162 | movl %edi, %eax |
@@ -168,16 +169,16 @@ identity_mapped: | |||
168 | pushl %edx | 169 | pushl %edx |
169 | 170 | ||
170 | /* Set cr0 to a known state: | 171 | /* Set cr0 to a known state: |
171 | * 31 0 == Paging disabled | 172 | * - Paging disabled |
172 | * 18 0 == Alignment check disabled | 173 | * - Alignment check disabled |
173 | * 16 0 == Write protect disabled | 174 | * - Write protect disabled |
174 | * 3 0 == No task switch | 175 | * - No task switch |
175 | * 2 0 == Don't do FP software emulation. | 176 | * - Don't do FP software emulation. |
176 | * 0 1 == Proctected mode enabled | 177 | * - Proctected mode enabled |
177 | */ | 178 | */ |
178 | movl %cr0, %eax | 179 | movl %cr0, %eax |
179 | andl $~((1<<31)|(1<<18)|(1<<16)|(1<<3)|(1<<2)), %eax | 180 | andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax |
180 | orl $(1<<0), %eax | 181 | orl $(X86_CR0_PE), %eax |
181 | movl %eax, %cr0 | 182 | movl %eax, %cr0 |
182 | 183 | ||
183 | /* clear cr4 if applicable */ | 184 | /* clear cr4 if applicable */ |
@@ -186,8 +187,7 @@ identity_mapped: | |||
186 | /* Set cr4 to a known state: | 187 | /* Set cr4 to a known state: |
187 | * Setting everything to zero seems safe. | 188 | * Setting everything to zero seems safe. |
188 | */ | 189 | */ |
189 | movl %cr4, %eax | 190 | xorl %eax, %eax |
190 | andl $0, %eax | ||
191 | movl %eax, %cr4 | 191 | movl %eax, %cr4 |
192 | 192 | ||
193 | jmp 1f | 193 | jmp 1f |
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 14e95872c6a3..f5afe665a82b 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S | |||
@@ -9,17 +9,18 @@ | |||
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | #include <asm/kexec.h> | 11 | #include <asm/kexec.h> |
12 | #include <asm/processor-flags.h> | ||
13 | #include <asm/pgtable.h> | ||
12 | 14 | ||
13 | /* | 15 | /* |
14 | * Must be relocatable PIC code callable as a C function | 16 | * Must be relocatable PIC code callable as a C function |
15 | */ | 17 | */ |
16 | 18 | ||
17 | #define PTR(x) (x << 3) | 19 | #define PTR(x) (x << 3) |
18 | #define PAGE_ALIGNED (1 << PAGE_SHIFT) | 20 | #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
19 | #define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */ | ||
20 | 21 | ||
21 | .text | 22 | .text |
22 | .align PAGE_ALIGNED | 23 | .align PAGE_SIZE |
23 | .code64 | 24 | .code64 |
24 | .globl relocate_kernel | 25 | .globl relocate_kernel |
25 | relocate_kernel: | 26 | relocate_kernel: |
@@ -160,7 +161,7 @@ relocate_new_kernel: | |||
160 | movq %r9, %cr3 | 161 | movq %r9, %cr3 |
161 | 162 | ||
162 | /* setup a new stack at the end of the physical control page */ | 163 | /* setup a new stack at the end of the physical control page */ |
163 | lea 4096(%r8), %rsp | 164 | lea PAGE_SIZE(%r8), %rsp |
164 | 165 | ||
165 | /* jump to identity mapped page */ | 166 | /* jump to identity mapped page */ |
166 | addq $(identity_mapped - relocate_kernel), %r8 | 167 | addq $(identity_mapped - relocate_kernel), %r8 |
@@ -172,33 +173,22 @@ identity_mapped: | |||
172 | pushq %rdx | 173 | pushq %rdx |
173 | 174 | ||
174 | /* Set cr0 to a known state: | 175 | /* Set cr0 to a known state: |
175 | * 31 1 == Paging enabled | 176 | * - Paging enabled |
176 | * 18 0 == Alignment check disabled | 177 | * - Alignment check disabled |
177 | * 16 0 == Write protect disabled | 178 | * - Write protect disabled |
178 | * 3 0 == No task switch | 179 | * - No task switch |
179 | * 2 0 == Don't do FP software emulation. | 180 | * - Don't do FP software emulation. |
180 | * 0 1 == Proctected mode enabled | 181 | * - Proctected mode enabled |
181 | */ | 182 | */ |
182 | movq %cr0, %rax | 183 | movq %cr0, %rax |
183 | andq $~((1<<18)|(1<<16)|(1<<3)|(1<<2)), %rax | 184 | andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax |
184 | orl $((1<<31)|(1<<0)), %eax | 185 | orl $(X86_CR0_PG | X86_CR0_PE), %eax |
185 | movq %rax, %cr0 | 186 | movq %rax, %cr0 |
186 | 187 | ||
187 | /* Set cr4 to a known state: | 188 | /* Set cr4 to a known state: |
188 | * 10 0 == xmm exceptions disabled | 189 | * - physical address extension enabled |
189 | * 9 0 == xmm registers instructions disabled | ||
190 | * 8 0 == performance monitoring counter disabled | ||
191 | * 7 0 == page global disabled | ||
192 | * 6 0 == machine check exceptions disabled | ||
193 | * 5 1 == physical address extension enabled | ||
194 | * 4 0 == page size extensions disabled | ||
195 | * 3 0 == Debug extensions disabled | ||
196 | * 2 0 == Time stamp disable (disabled) | ||
197 | * 1 0 == Protected mode virtual interrupts disabled | ||
198 | * 0 0 == VME disabled | ||
199 | */ | 190 | */ |
200 | 191 | movq $X86_CR4_PAE, %rax | |
201 | movq $((1<<5)), %rax | ||
202 | movq %rax, %cr4 | 192 | movq %rax, %cr4 |
203 | 193 | ||
204 | jmp 1f | 194 | jmp 1f |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index eb9b1a198f5e..9615eee9b775 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <asm/vsyscall.h> | 9 | #include <asm/vsyscall.h> |
10 | 10 | ||
11 | #ifdef CONFIG_X86_32 | 11 | #ifdef CONFIG_X86_32 |
12 | # define CMOS_YEARS_OFFS 1900 | ||
13 | /* | 12 | /* |
14 | * This is a special lock that is owned by the CPU and holds the index | 13 | * This is a special lock that is owned by the CPU and holds the index |
15 | * register we are working with. It is required for NMI access to the | 14 | * register we are working with. It is required for NMI access to the |
@@ -17,14 +16,11 @@ | |||
17 | */ | 16 | */ |
18 | volatile unsigned long cmos_lock = 0; | 17 | volatile unsigned long cmos_lock = 0; |
19 | EXPORT_SYMBOL(cmos_lock); | 18 | EXPORT_SYMBOL(cmos_lock); |
20 | #else | ||
21 | /* | ||
22 | * x86-64 systems only exists since 2002. | ||
23 | * This will work up to Dec 31, 2100 | ||
24 | */ | ||
25 | # define CMOS_YEARS_OFFS 2000 | ||
26 | #endif | 19 | #endif |
27 | 20 | ||
21 | /* For two digit years assume time is always after that */ | ||
22 | #define CMOS_YEARS_OFFS 2000 | ||
23 | |||
28 | DEFINE_SPINLOCK(rtc_lock); | 24 | DEFINE_SPINLOCK(rtc_lock); |
29 | EXPORT_SYMBOL(rtc_lock); | 25 | EXPORT_SYMBOL(rtc_lock); |
30 | 26 | ||
@@ -98,7 +94,7 @@ int mach_set_rtc_mmss(unsigned long nowtime) | |||
98 | 94 | ||
99 | unsigned long mach_get_cmos_time(void) | 95 | unsigned long mach_get_cmos_time(void) |
100 | { | 96 | { |
101 | unsigned int year, mon, day, hour, min, sec, century = 0; | 97 | unsigned int status, year, mon, day, hour, min, sec, century = 0; |
102 | 98 | ||
103 | /* | 99 | /* |
104 | * If UIP is clear, then we have >= 244 microseconds before | 100 | * If UIP is clear, then we have >= 244 microseconds before |
@@ -116,14 +112,16 @@ unsigned long mach_get_cmos_time(void) | |||
116 | mon = CMOS_READ(RTC_MONTH); | 112 | mon = CMOS_READ(RTC_MONTH); |
117 | year = CMOS_READ(RTC_YEAR); | 113 | year = CMOS_READ(RTC_YEAR); |
118 | 114 | ||
119 | #if defined(CONFIG_ACPI) && defined(CONFIG_X86_64) | 115 | #ifdef CONFIG_ACPI |
120 | /* CHECKME: Is this really 64bit only ??? */ | ||
121 | if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && | 116 | if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && |
122 | acpi_gbl_FADT.century) | 117 | acpi_gbl_FADT.century) |
123 | century = CMOS_READ(acpi_gbl_FADT.century); | 118 | century = CMOS_READ(acpi_gbl_FADT.century); |
124 | #endif | 119 | #endif |
125 | 120 | ||
126 | if (RTC_ALWAYS_BCD || !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)) { | 121 | status = CMOS_READ(RTC_CONTROL); |
122 | WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); | ||
123 | |||
124 | if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { | ||
127 | BCD_TO_BIN(sec); | 125 | BCD_TO_BIN(sec); |
128 | BCD_TO_BIN(min); | 126 | BCD_TO_BIN(min); |
129 | BCD_TO_BIN(hour); | 127 | BCD_TO_BIN(hour); |
@@ -136,11 +134,8 @@ unsigned long mach_get_cmos_time(void) | |||
136 | BCD_TO_BIN(century); | 134 | BCD_TO_BIN(century); |
137 | year += century * 100; | 135 | year += century * 100; |
138 | printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); | 136 | printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); |
139 | } else { | 137 | } else |
140 | year += CMOS_YEARS_OFFS; | 138 | year += CMOS_YEARS_OFFS; |
141 | if (year < 1970) | ||
142 | year += 100; | ||
143 | } | ||
144 | 139 | ||
145 | return mktime(year, mon, day, hour, min, sec); | 140 | return mktime(year, mon, day, hour, min, sec); |
146 | } | 141 | } |
@@ -151,8 +146,8 @@ unsigned char rtc_cmos_read(unsigned char addr) | |||
151 | unsigned char val; | 146 | unsigned char val; |
152 | 147 | ||
153 | lock_cmos_prefix(addr); | 148 | lock_cmos_prefix(addr); |
154 | outb_p(addr, RTC_PORT(0)); | 149 | outb(addr, RTC_PORT(0)); |
155 | val = inb_p(RTC_PORT(1)); | 150 | val = inb(RTC_PORT(1)); |
156 | lock_cmos_suffix(addr); | 151 | lock_cmos_suffix(addr); |
157 | return val; | 152 | return val; |
158 | } | 153 | } |
@@ -161,8 +156,8 @@ EXPORT_SYMBOL(rtc_cmos_read); | |||
161 | void rtc_cmos_write(unsigned char val, unsigned char addr) | 156 | void rtc_cmos_write(unsigned char val, unsigned char addr) |
162 | { | 157 | { |
163 | lock_cmos_prefix(addr); | 158 | lock_cmos_prefix(addr); |
164 | outb_p(addr, RTC_PORT(0)); | 159 | outb(addr, RTC_PORT(0)); |
165 | outb_p(val, RTC_PORT(1)); | 160 | outb(val, RTC_PORT(1)); |
166 | lock_cmos_suffix(addr); | 161 | lock_cmos_suffix(addr); |
167 | } | 162 | } |
168 | EXPORT_SYMBOL(rtc_cmos_write); | 163 | EXPORT_SYMBOL(rtc_cmos_write); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c new file mode 100644 index 000000000000..ed157c90412e --- /dev/null +++ b/arch/x86/kernel/setup.c | |||
@@ -0,0 +1,113 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/bootmem.h> | ||
5 | #include <linux/percpu.h> | ||
6 | #include <asm/smp.h> | ||
7 | #include <asm/percpu.h> | ||
8 | #include <asm/sections.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/setup.h> | ||
11 | #include <asm/topology.h> | ||
12 | #include <asm/mpspec.h> | ||
13 | #include <asm/apicdef.h> | ||
14 | |||
15 | unsigned int num_processors; | ||
16 | unsigned disabled_cpus __cpuinitdata; | ||
17 | /* Processor that is doing the boot up */ | ||
18 | unsigned int boot_cpu_physical_apicid = -1U; | ||
19 | EXPORT_SYMBOL(boot_cpu_physical_apicid); | ||
20 | |||
21 | physid_mask_t phys_cpu_present_map; | ||
22 | |||
23 | DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; | ||
24 | EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); | ||
25 | |||
26 | /* Bitmask of physically existing CPUs */ | ||
27 | physid_mask_t phys_cpu_present_map; | ||
28 | |||
29 | #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP) | ||
30 | /* | ||
31 | * Copy data used in early init routines from the initial arrays to the | ||
32 | * per cpu data areas. These arrays then become expendable and the | ||
33 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | ||
34 | */ | ||
35 | static void __init setup_per_cpu_maps(void) | ||
36 | { | ||
37 | int cpu; | ||
38 | |||
39 | for_each_possible_cpu(cpu) { | ||
40 | per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu]; | ||
41 | per_cpu(x86_bios_cpu_apicid, cpu) = | ||
42 | x86_bios_cpu_apicid_init[cpu]; | ||
43 | #ifdef CONFIG_NUMA | ||
44 | per_cpu(x86_cpu_to_node_map, cpu) = | ||
45 | x86_cpu_to_node_map_init[cpu]; | ||
46 | #endif | ||
47 | } | ||
48 | |||
49 | /* indicate the early static arrays will soon be gone */ | ||
50 | x86_cpu_to_apicid_early_ptr = NULL; | ||
51 | x86_bios_cpu_apicid_early_ptr = NULL; | ||
52 | #ifdef CONFIG_NUMA | ||
53 | x86_cpu_to_node_map_early_ptr = NULL; | ||
54 | #endif | ||
55 | } | ||
56 | |||
57 | #ifdef CONFIG_X86_32 | ||
58 | /* | ||
59 | * Great future not-so-futuristic plan: make i386 and x86_64 do it | ||
60 | * the same way | ||
61 | */ | ||
62 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | ||
63 | EXPORT_SYMBOL(__per_cpu_offset); | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * Great future plan: | ||
68 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | ||
69 | * Always point %gs to its beginning | ||
70 | */ | ||
71 | void __init setup_per_cpu_areas(void) | ||
72 | { | ||
73 | int i; | ||
74 | unsigned long size; | ||
75 | |||
76 | #ifdef CONFIG_HOTPLUG_CPU | ||
77 | prefill_possible_map(); | ||
78 | #endif | ||
79 | |||
80 | /* Copy section for each CPU (we discard the original) */ | ||
81 | size = PERCPU_ENOUGH_ROOM; | ||
82 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", | ||
83 | size); | ||
84 | |||
85 | for_each_possible_cpu(i) { | ||
86 | char *ptr; | ||
87 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
88 | ptr = alloc_bootmem_pages(size); | ||
89 | #else | ||
90 | int node = early_cpu_to_node(i); | ||
91 | if (!node_online(node) || !NODE_DATA(node)) { | ||
92 | ptr = alloc_bootmem_pages(size); | ||
93 | printk(KERN_INFO | ||
94 | "cpu %d has no node or node-local memory\n", i); | ||
95 | } | ||
96 | else | ||
97 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
98 | #endif | ||
99 | if (!ptr) | ||
100 | panic("Cannot allocate cpu data for CPU %d\n", i); | ||
101 | #ifdef CONFIG_X86_64 | ||
102 | cpu_pda(i)->data_offset = ptr - __per_cpu_start; | ||
103 | #else | ||
104 | __per_cpu_offset[i] = ptr - __per_cpu_start; | ||
105 | #endif | ||
106 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
107 | } | ||
108 | |||
109 | /* Setup percpu data maps */ | ||
110 | setup_per_cpu_maps(); | ||
111 | } | ||
112 | |||
113 | #endif | ||
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index e24c45677094..9042fb0e36f5 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/kgdb.h> | ||
14 | #include <asm/pda.h> | 15 | #include <asm/pda.h> |
15 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
@@ -23,6 +24,7 @@ | |||
23 | #include <asm/proto.h> | 24 | #include <asm/proto.h> |
24 | #include <asm/sections.h> | 25 | #include <asm/sections.h> |
25 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
27 | #include <asm/genapic.h> | ||
26 | 28 | ||
27 | #ifndef CONFIG_DEBUG_BOOT_PARAMS | 29 | #ifndef CONFIG_DEBUG_BOOT_PARAMS |
28 | struct boot_params __initdata boot_params; | 30 | struct boot_params __initdata boot_params; |
@@ -85,83 +87,6 @@ static int __init nonx32_setup(char *str) | |||
85 | } | 87 | } |
86 | __setup("noexec32=", nonx32_setup); | 88 | __setup("noexec32=", nonx32_setup); |
87 | 89 | ||
88 | /* | ||
89 | * Copy data used in early init routines from the initial arrays to the | ||
90 | * per cpu data areas. These arrays then become expendable and the | ||
91 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | ||
92 | */ | ||
93 | static void __init setup_per_cpu_maps(void) | ||
94 | { | ||
95 | int cpu; | ||
96 | |||
97 | for_each_possible_cpu(cpu) { | ||
98 | #ifdef CONFIG_SMP | ||
99 | if (per_cpu_offset(cpu)) { | ||
100 | #endif | ||
101 | per_cpu(x86_cpu_to_apicid, cpu) = | ||
102 | x86_cpu_to_apicid_init[cpu]; | ||
103 | per_cpu(x86_bios_cpu_apicid, cpu) = | ||
104 | x86_bios_cpu_apicid_init[cpu]; | ||
105 | #ifdef CONFIG_NUMA | ||
106 | per_cpu(x86_cpu_to_node_map, cpu) = | ||
107 | x86_cpu_to_node_map_init[cpu]; | ||
108 | #endif | ||
109 | #ifdef CONFIG_SMP | ||
110 | } | ||
111 | else | ||
112 | printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", | ||
113 | cpu); | ||
114 | #endif | ||
115 | } | ||
116 | |||
117 | /* indicate the early static arrays will soon be gone */ | ||
118 | x86_cpu_to_apicid_early_ptr = NULL; | ||
119 | x86_bios_cpu_apicid_early_ptr = NULL; | ||
120 | #ifdef CONFIG_NUMA | ||
121 | x86_cpu_to_node_map_early_ptr = NULL; | ||
122 | #endif | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Great future plan: | ||
127 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | ||
128 | * Always point %gs to its beginning | ||
129 | */ | ||
130 | void __init setup_per_cpu_areas(void) | ||
131 | { | ||
132 | int i; | ||
133 | unsigned long size; | ||
134 | |||
135 | #ifdef CONFIG_HOTPLUG_CPU | ||
136 | prefill_possible_map(); | ||
137 | #endif | ||
138 | |||
139 | /* Copy section for each CPU (we discard the original) */ | ||
140 | size = PERCPU_ENOUGH_ROOM; | ||
141 | |||
142 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); | ||
143 | for_each_cpu_mask (i, cpu_possible_map) { | ||
144 | char *ptr; | ||
145 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
146 | ptr = alloc_bootmem_pages(size); | ||
147 | #else | ||
148 | int node = early_cpu_to_node(i); | ||
149 | |||
150 | if (!node_online(node) || !NODE_DATA(node)) | ||
151 | ptr = alloc_bootmem_pages(size); | ||
152 | else | ||
153 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
154 | #endif | ||
155 | if (!ptr) | ||
156 | panic("Cannot allocate cpu data for CPU %d\n", i); | ||
157 | cpu_pda(i)->data_offset = ptr - __per_cpu_start; | ||
158 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
159 | } | ||
160 | |||
161 | /* setup percpu data maps early */ | ||
162 | setup_per_cpu_maps(); | ||
163 | } | ||
164 | |||
165 | void pda_init(int cpu) | 90 | void pda_init(int cpu) |
166 | { | 91 | { |
167 | struct x8664_pda *pda = cpu_pda(cpu); | 92 | struct x8664_pda *pda = cpu_pda(cpu); |
@@ -327,6 +252,17 @@ void __cpuinit cpu_init (void) | |||
327 | load_TR_desc(); | 252 | load_TR_desc(); |
328 | load_LDT(&init_mm.context); | 253 | load_LDT(&init_mm.context); |
329 | 254 | ||
255 | #ifdef CONFIG_KGDB | ||
256 | /* | ||
257 | * If the kgdb is connected no debug regs should be altered. This | ||
258 | * is only applicable when KGDB and a KGDB I/O module are built | ||
259 | * into the kernel and you are using early debugging with | ||
260 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | ||
261 | */ | ||
262 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | ||
263 | arch_kgdb_ops.correct_hw_break(); | ||
264 | else { | ||
265 | #endif | ||
330 | /* | 266 | /* |
331 | * Clear all 6 debug registers: | 267 | * Clear all 6 debug registers: |
332 | */ | 268 | */ |
@@ -337,8 +273,15 @@ void __cpuinit cpu_init (void) | |||
337 | set_debugreg(0UL, 3); | 273 | set_debugreg(0UL, 3); |
338 | set_debugreg(0UL, 6); | 274 | set_debugreg(0UL, 6); |
339 | set_debugreg(0UL, 7); | 275 | set_debugreg(0UL, 7); |
276 | #ifdef CONFIG_KGDB | ||
277 | /* If the kgdb is connected no debug regs should be altered. */ | ||
278 | } | ||
279 | #endif | ||
340 | 280 | ||
341 | fpu_init(); | 281 | fpu_init(); |
342 | 282 | ||
343 | raw_local_save_flags(kernel_eflags); | 283 | raw_local_save_flags(kernel_eflags); |
284 | |||
285 | if (is_uv_system()) | ||
286 | uv_cpu_init(); | ||
344 | } | 287 | } |
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c index 2b3e5d45176b..5b0bffb7fcc9 100644 --- a/arch/x86/kernel/setup_32.c +++ b/arch/x86/kernel/setup_32.c | |||
@@ -62,8 +62,9 @@ | |||
62 | #include <asm/io.h> | 62 | #include <asm/io.h> |
63 | #include <asm/vmi.h> | 63 | #include <asm/vmi.h> |
64 | #include <setup_arch.h> | 64 | #include <setup_arch.h> |
65 | #include <bios_ebda.h> | 65 | #include <asm/bios_ebda.h> |
66 | #include <asm/cacheflush.h> | 66 | #include <asm/cacheflush.h> |
67 | #include <asm/processor.h> | ||
67 | 68 | ||
68 | /* This value is set up by the early boot code to point to the value | 69 | /* This value is set up by the early boot code to point to the value |
69 | immediately after the boot time page tables. It contains a *physical* | 70 | immediately after the boot time page tables. It contains a *physical* |
@@ -154,6 +155,8 @@ struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; | |||
154 | struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; | 155 | struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; |
155 | EXPORT_SYMBOL(boot_cpu_data); | 156 | EXPORT_SYMBOL(boot_cpu_data); |
156 | 157 | ||
158 | unsigned int def_to_bigsmp; | ||
159 | |||
157 | #ifndef CONFIG_X86_PAE | 160 | #ifndef CONFIG_X86_PAE |
158 | unsigned long mmu_cr4_features; | 161 | unsigned long mmu_cr4_features; |
159 | #else | 162 | #else |
@@ -189,7 +192,7 @@ EXPORT_SYMBOL(ist_info); | |||
189 | extern void early_cpu_init(void); | 192 | extern void early_cpu_init(void); |
190 | extern int root_mountflags; | 193 | extern int root_mountflags; |
191 | 194 | ||
192 | unsigned long saved_videomode; | 195 | unsigned long saved_video_mode; |
193 | 196 | ||
194 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 197 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
195 | #define RAMDISK_PROMPT_FLAG 0x8000 | 198 | #define RAMDISK_PROMPT_FLAG 0x8000 |
@@ -227,7 +230,7 @@ static inline void copy_edd(void) | |||
227 | } | 230 | } |
228 | #endif | 231 | #endif |
229 | 232 | ||
230 | int __initdata user_defined_memmap = 0; | 233 | int __initdata user_defined_memmap; |
231 | 234 | ||
232 | /* | 235 | /* |
233 | * "mem=nopentium" disables the 4MB page tables. | 236 | * "mem=nopentium" disables the 4MB page tables. |
@@ -385,15 +388,56 @@ unsigned long __init find_max_low_pfn(void) | |||
385 | return max_low_pfn; | 388 | return max_low_pfn; |
386 | } | 389 | } |
387 | 390 | ||
391 | #define BIOS_EBDA_SEGMENT 0x40E | ||
392 | #define BIOS_LOWMEM_KILOBYTES 0x413 | ||
393 | |||
388 | /* | 394 | /* |
389 | * workaround for Dell systems that neglect to reserve EBDA | 395 | * The BIOS places the EBDA/XBDA at the top of conventional |
396 | * memory, and usually decreases the reported amount of | ||
397 | * conventional memory (int 0x12) too. This also contains a | ||
398 | * workaround for Dell systems that neglect to reserve EBDA. | ||
399 | * The same workaround also avoids a problem with the AMD768MPX | ||
400 | * chipset: reserve a page before VGA to prevent PCI prefetch | ||
401 | * into it (errata #56). Usually the page is reserved anyways, | ||
402 | * unless you have no PS/2 mouse plugged in. | ||
390 | */ | 403 | */ |
391 | static void __init reserve_ebda_region(void) | 404 | static void __init reserve_ebda_region(void) |
392 | { | 405 | { |
393 | unsigned int addr; | 406 | unsigned int lowmem, ebda_addr; |
394 | addr = get_bios_ebda(); | 407 | |
395 | if (addr) | 408 | /* To determine the position of the EBDA and the */ |
396 | reserve_bootmem(addr, PAGE_SIZE, BOOTMEM_DEFAULT); | 409 | /* end of conventional memory, we need to look at */ |
410 | /* the BIOS data area. In a paravirtual environment */ | ||
411 | /* that area is absent. We'll just have to assume */ | ||
412 | /* that the paravirt case can handle memory setup */ | ||
413 | /* correctly, without our help. */ | ||
414 | if (paravirt_enabled()) | ||
415 | return; | ||
416 | |||
417 | /* end of low (conventional) memory */ | ||
418 | lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); | ||
419 | lowmem <<= 10; | ||
420 | |||
421 | /* start of EBDA area */ | ||
422 | ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT); | ||
423 | ebda_addr <<= 4; | ||
424 | |||
425 | /* Fixup: bios puts an EBDA in the top 64K segment */ | ||
426 | /* of conventional memory, but does not adjust lowmem. */ | ||
427 | if ((lowmem - ebda_addr) <= 0x10000) | ||
428 | lowmem = ebda_addr; | ||
429 | |||
430 | /* Fixup: bios does not report an EBDA at all. */ | ||
431 | /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ | ||
432 | if ((ebda_addr == 0) && (lowmem >= 0x9f000)) | ||
433 | lowmem = 0x9f000; | ||
434 | |||
435 | /* Paranoia: should never happen, but... */ | ||
436 | if ((lowmem == 0) || (lowmem >= 0x100000)) | ||
437 | lowmem = 0x9f000; | ||
438 | |||
439 | /* reserve all memory between lowmem and the 1MB mark */ | ||
440 | reserve_bootmem(lowmem, 0x100000 - lowmem, BOOTMEM_DEFAULT); | ||
397 | } | 441 | } |
398 | 442 | ||
399 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 443 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
@@ -617,16 +661,9 @@ void __init setup_bootmem_allocator(void) | |||
617 | */ | 661 | */ |
618 | reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT); | 662 | reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT); |
619 | 663 | ||
620 | /* reserve EBDA region, it's a 4K region */ | 664 | /* reserve EBDA region */ |
621 | reserve_ebda_region(); | 665 | reserve_ebda_region(); |
622 | 666 | ||
623 | /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent | ||
624 | PCI prefetch into it (errata #56). Usually the page is reserved anyways, | ||
625 | unless you have no PS/2 mouse plugged in. */ | ||
626 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | ||
627 | boot_cpu_data.x86 == 6) | ||
628 | reserve_bootmem(0xa0000 - 4096, 4096, BOOTMEM_DEFAULT); | ||
629 | |||
630 | #ifdef CONFIG_SMP | 667 | #ifdef CONFIG_SMP |
631 | /* | 668 | /* |
632 | * But first pinch a few for the stack/trampoline stuff | 669 | * But first pinch a few for the stack/trampoline stuff |
@@ -687,6 +724,18 @@ char * __init __attribute__((weak)) memory_setup(void) | |||
687 | return machine_specific_memory_setup(); | 724 | return machine_specific_memory_setup(); |
688 | } | 725 | } |
689 | 726 | ||
727 | #ifdef CONFIG_NUMA | ||
728 | /* | ||
729 | * In the golden day, when everything among i386 and x86_64 will be | ||
730 | * integrated, this will not live here | ||
731 | */ | ||
732 | void *x86_cpu_to_node_map_early_ptr; | ||
733 | int x86_cpu_to_node_map_init[NR_CPUS] = { | ||
734 | [0 ... NR_CPUS-1] = NUMA_NO_NODE | ||
735 | }; | ||
736 | DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE; | ||
737 | #endif | ||
738 | |||
690 | /* | 739 | /* |
691 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 740 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
692 | * passed the efi memmap, systab, etc., so we should use these data structures | 741 | * passed the efi memmap, systab, etc., so we should use these data structures |
@@ -714,7 +763,7 @@ void __init setup_arch(char **cmdline_p) | |||
714 | edid_info = boot_params.edid_info; | 763 | edid_info = boot_params.edid_info; |
715 | apm_info.bios = boot_params.apm_bios_info; | 764 | apm_info.bios = boot_params.apm_bios_info; |
716 | ist_info = boot_params.ist_info; | 765 | ist_info = boot_params.ist_info; |
717 | saved_videomode = boot_params.hdr.vid_mode; | 766 | saved_video_mode = boot_params.hdr.vid_mode; |
718 | if( boot_params.sys_desc_table.length != 0 ) { | 767 | if( boot_params.sys_desc_table.length != 0 ) { |
719 | set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2); | 768 | set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2); |
720 | machine_id = boot_params.sys_desc_table.table[0]; | 769 | machine_id = boot_params.sys_desc_table.table[0]; |
@@ -820,6 +869,18 @@ void __init setup_arch(char **cmdline_p) | |||
820 | 869 | ||
821 | io_delay_init(); | 870 | io_delay_init(); |
822 | 871 | ||
872 | #ifdef CONFIG_X86_SMP | ||
873 | /* | ||
874 | * setup to use the early static init tables during kernel startup | ||
875 | * X86_SMP will exclude sub-arches that don't deal well with it. | ||
876 | */ | ||
877 | x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init; | ||
878 | x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init; | ||
879 | #ifdef CONFIG_NUMA | ||
880 | x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init; | ||
881 | #endif | ||
882 | #endif | ||
883 | |||
823 | #ifdef CONFIG_X86_GENERICARCH | 884 | #ifdef CONFIG_X86_GENERICARCH |
824 | generic_apic_probe(); | 885 | generic_apic_probe(); |
825 | #endif | 886 | #endif |
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index f4f7ecfb898c..674ef3510cdf 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -58,7 +58,6 @@ | |||
58 | #include <asm/mmu_context.h> | 58 | #include <asm/mmu_context.h> |
59 | #include <asm/proto.h> | 59 | #include <asm/proto.h> |
60 | #include <asm/setup.h> | 60 | #include <asm/setup.h> |
61 | #include <asm/mach_apic.h> | ||
62 | #include <asm/numa.h> | 61 | #include <asm/numa.h> |
63 | #include <asm/sections.h> | 62 | #include <asm/sections.h> |
64 | #include <asm/dmi.h> | 63 | #include <asm/dmi.h> |
@@ -66,7 +65,9 @@ | |||
66 | #include <asm/mce.h> | 65 | #include <asm/mce.h> |
67 | #include <asm/ds.h> | 66 | #include <asm/ds.h> |
68 | #include <asm/topology.h> | 67 | #include <asm/topology.h> |
68 | #include <asm/trampoline.h> | ||
69 | 69 | ||
70 | #include <mach_apic.h> | ||
70 | #ifdef CONFIG_PARAVIRT | 71 | #ifdef CONFIG_PARAVIRT |
71 | #include <asm/paravirt.h> | 72 | #include <asm/paravirt.h> |
72 | #else | 73 | #else |
@@ -248,6 +249,7 @@ static void __init reserve_crashkernel(void) | |||
248 | (unsigned long)(total_mem >> 20)); | 249 | (unsigned long)(total_mem >> 20)); |
249 | crashk_res.start = crash_base; | 250 | crashk_res.start = crash_base; |
250 | crashk_res.end = crash_base + crash_size - 1; | 251 | crashk_res.end = crash_base + crash_size - 1; |
252 | insert_resource(&iomem_resource, &crashk_res); | ||
251 | } | 253 | } |
252 | } | 254 | } |
253 | #else | 255 | #else |
@@ -322,6 +324,11 @@ void __init setup_arch(char **cmdline_p) | |||
322 | 324 | ||
323 | finish_e820_parsing(); | 325 | finish_e820_parsing(); |
324 | 326 | ||
327 | /* after parse_early_param, so could debug it */ | ||
328 | insert_resource(&iomem_resource, &code_resource); | ||
329 | insert_resource(&iomem_resource, &data_resource); | ||
330 | insert_resource(&iomem_resource, &bss_resource); | ||
331 | |||
325 | early_gart_iommu_check(); | 332 | early_gart_iommu_check(); |
326 | 333 | ||
327 | e820_register_active_regions(0, 0, -1UL); | 334 | e820_register_active_regions(0, 0, -1UL); |
@@ -341,10 +348,12 @@ void __init setup_arch(char **cmdline_p) | |||
341 | 348 | ||
342 | check_efer(); | 349 | check_efer(); |
343 | 350 | ||
344 | init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); | 351 | max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT)); |
345 | if (efi_enabled) | 352 | if (efi_enabled) |
346 | efi_init(); | 353 | efi_init(); |
347 | 354 | ||
355 | vsmp_init(); | ||
356 | |||
348 | dmi_scan_machine(); | 357 | dmi_scan_machine(); |
349 | 358 | ||
350 | io_delay_init(); | 359 | io_delay_init(); |
@@ -450,7 +459,7 @@ void __init setup_arch(char **cmdline_p) | |||
450 | /* | 459 | /* |
451 | * We trust e820 completely. No explicit ROM probing in memory. | 460 | * We trust e820 completely. No explicit ROM probing in memory. |
452 | */ | 461 | */ |
453 | e820_reserve_resources(&code_resource, &data_resource, &bss_resource); | 462 | e820_reserve_resources(); |
454 | e820_mark_nosave_regions(); | 463 | e820_mark_nosave_regions(); |
455 | 464 | ||
456 | /* request I/O space for devices used on all i[345]86 PCs */ | 465 | /* request I/O space for devices used on all i[345]86 PCs */ |
@@ -552,9 +561,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
552 | bits = c->x86_coreid_bits; | 561 | bits = c->x86_coreid_bits; |
553 | 562 | ||
554 | /* Low order bits define the core id (index of core in socket) */ | 563 | /* Low order bits define the core id (index of core in socket) */ |
555 | c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1); | 564 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); |
556 | /* Convert the APIC ID into the socket ID */ | 565 | /* Convert the initial APIC ID into the socket ID */ |
557 | c->phys_proc_id = phys_pkg_id(bits); | 566 | c->phys_proc_id = c->initial_apicid >> bits; |
558 | 567 | ||
559 | #ifdef CONFIG_NUMA | 568 | #ifdef CONFIG_NUMA |
560 | node = c->phys_proc_id; | 569 | node = c->phys_proc_id; |
@@ -571,7 +580,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
571 | If that doesn't result in a usable node fall back to the | 580 | If that doesn't result in a usable node fall back to the |
572 | path for the previous case. */ | 581 | path for the previous case. */ |
573 | 582 | ||
574 | int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); | 583 | int ht_nodeid = c->initial_apicid; |
575 | 584 | ||
576 | if (ht_nodeid >= 0 && | 585 | if (ht_nodeid >= 0 && |
577 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | 586 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) |
@@ -677,7 +686,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
677 | 686 | ||
678 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 687 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
679 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 688 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ |
680 | clear_bit(0*32+31, (unsigned long *)&c->x86_capability); | 689 | clear_cpu_cap(c, 0*32+31); |
681 | 690 | ||
682 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | 691 | /* On C+ stepping K8 rep microcode works well for copy/memset */ |
683 | level = cpuid_eax(1); | 692 | level = cpuid_eax(1); |
@@ -721,6 +730,19 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
721 | 730 | ||
722 | if (amd_apic_timer_broken()) | 731 | if (amd_apic_timer_broken()) |
723 | disable_apic_timer = 1; | 732 | disable_apic_timer = 1; |
733 | |||
734 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
735 | unsigned long long tseg; | ||
736 | |||
737 | /* | ||
738 | * Split up direct mapping around the TSEG SMM area. | ||
739 | * Don't do it for gbpages because there seems very little | ||
740 | * benefit in doing so. | ||
741 | */ | ||
742 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) && | ||
743 | (tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT))) | ||
744 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
745 | } | ||
724 | } | 746 | } |
725 | 747 | ||
726 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 748 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
@@ -813,7 +835,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
813 | { | 835 | { |
814 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 836 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
815 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 837 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
816 | set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); | 838 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
817 | } | 839 | } |
818 | 840 | ||
819 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 841 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
@@ -856,9 +878,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
856 | 878 | ||
857 | if (c->x86 == 15) | 879 | if (c->x86 == 15) |
858 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 880 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
859 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
860 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
861 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
862 | if (c->x86 == 6) | 881 | if (c->x86 == 6) |
863 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 882 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
864 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 883 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
@@ -867,6 +886,32 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
867 | srat_detect_node(); | 886 | srat_detect_node(); |
868 | } | 887 | } |
869 | 888 | ||
889 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
890 | { | ||
891 | if (c->x86 == 0x6 && c->x86_model >= 0xf) | ||
892 | set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); | ||
893 | } | ||
894 | |||
895 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | ||
896 | { | ||
897 | /* Cache sizes */ | ||
898 | unsigned n; | ||
899 | |||
900 | n = c->extended_cpuid_level; | ||
901 | if (n >= 0x80000008) { | ||
902 | unsigned eax = cpuid_eax(0x80000008); | ||
903 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
904 | c->x86_phys_bits = eax & 0xff; | ||
905 | } | ||
906 | |||
907 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | ||
908 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
909 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
910 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
911 | } | ||
912 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
913 | } | ||
914 | |||
870 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 915 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
871 | { | 916 | { |
872 | char *v = c->x86_vendor_id; | 917 | char *v = c->x86_vendor_id; |
@@ -875,6 +920,8 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
875 | c->x86_vendor = X86_VENDOR_AMD; | 920 | c->x86_vendor = X86_VENDOR_AMD; |
876 | else if (!strcmp(v, "GenuineIntel")) | 921 | else if (!strcmp(v, "GenuineIntel")) |
877 | c->x86_vendor = X86_VENDOR_INTEL; | 922 | c->x86_vendor = X86_VENDOR_INTEL; |
923 | else if (!strcmp(v, "CentaurHauls")) | ||
924 | c->x86_vendor = X86_VENDOR_CENTAUR; | ||
878 | else | 925 | else |
879 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 926 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
880 | } | 927 | } |
@@ -922,15 +969,16 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
922 | c->x86 += (tfms >> 20) & 0xff; | 969 | c->x86 += (tfms >> 20) & 0xff; |
923 | if (c->x86 >= 0x6) | 970 | if (c->x86 >= 0x6) |
924 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 971 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
925 | if (c->x86_capability[0] & (1<<19)) | 972 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) |
926 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 973 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
927 | } else { | 974 | } else { |
928 | /* Have CPUID level 0 only - unheard of */ | 975 | /* Have CPUID level 0 only - unheard of */ |
929 | c->x86 = 4; | 976 | c->x86 = 4; |
930 | } | 977 | } |
931 | 978 | ||
979 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
932 | #ifdef CONFIG_SMP | 980 | #ifdef CONFIG_SMP |
933 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; | 981 | c->phys_proc_id = c->initial_apicid; |
934 | #endif | 982 | #endif |
935 | /* AMD-defined flags: level 0x80000001 */ | 983 | /* AMD-defined flags: level 0x80000001 */ |
936 | xlvl = cpuid_eax(0x80000000); | 984 | xlvl = cpuid_eax(0x80000000); |
@@ -956,12 +1004,22 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
956 | if (c->extended_cpuid_level >= 0x80000007) | 1004 | if (c->extended_cpuid_level >= 0x80000007) |
957 | c->x86_power = cpuid_edx(0x80000007); | 1005 | c->x86_power = cpuid_edx(0x80000007); |
958 | 1006 | ||
1007 | |||
1008 | clear_cpu_cap(c, X86_FEATURE_PAT); | ||
1009 | |||
959 | switch (c->x86_vendor) { | 1010 | switch (c->x86_vendor) { |
960 | case X86_VENDOR_AMD: | 1011 | case X86_VENDOR_AMD: |
961 | early_init_amd(c); | 1012 | early_init_amd(c); |
1013 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
1014 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
962 | break; | 1015 | break; |
963 | case X86_VENDOR_INTEL: | 1016 | case X86_VENDOR_INTEL: |
964 | early_init_intel(c); | 1017 | early_init_intel(c); |
1018 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | ||
1019 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
1020 | break; | ||
1021 | case X86_VENDOR_CENTAUR: | ||
1022 | early_init_centaur(c); | ||
965 | break; | 1023 | break; |
966 | } | 1024 | } |
967 | 1025 | ||
@@ -999,6 +1057,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
999 | init_intel(c); | 1057 | init_intel(c); |
1000 | break; | 1058 | break; |
1001 | 1059 | ||
1060 | case X86_VENDOR_CENTAUR: | ||
1061 | init_centaur(c); | ||
1062 | break; | ||
1063 | |||
1002 | case X86_VENDOR_UNKNOWN: | 1064 | case X86_VENDOR_UNKNOWN: |
1003 | default: | 1065 | default: |
1004 | display_cacheinfo(c); | 1066 | display_cacheinfo(c); |
@@ -1028,14 +1090,24 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
1028 | #endif | 1090 | #endif |
1029 | select_idle_routine(c); | 1091 | select_idle_routine(c); |
1030 | 1092 | ||
1031 | if (c != &boot_cpu_data) | ||
1032 | mtrr_ap_init(); | ||
1033 | #ifdef CONFIG_NUMA | 1093 | #ifdef CONFIG_NUMA |
1034 | numa_add_cpu(smp_processor_id()); | 1094 | numa_add_cpu(smp_processor_id()); |
1035 | #endif | 1095 | #endif |
1036 | 1096 | ||
1037 | } | 1097 | } |
1038 | 1098 | ||
1099 | void __cpuinit identify_boot_cpu(void) | ||
1100 | { | ||
1101 | identify_cpu(&boot_cpu_data); | ||
1102 | } | ||
1103 | |||
1104 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | ||
1105 | { | ||
1106 | BUG_ON(c == &boot_cpu_data); | ||
1107 | identify_cpu(c); | ||
1108 | mtrr_ap_init(); | ||
1109 | } | ||
1110 | |||
1039 | static __init int setup_noclflush(char *arg) | 1111 | static __init int setup_noclflush(char *arg) |
1040 | { | 1112 | { |
1041 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | 1113 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); |
@@ -1064,123 +1136,3 @@ static __init int setup_disablecpuid(char *arg) | |||
1064 | return 1; | 1136 | return 1; |
1065 | } | 1137 | } |
1066 | __setup("clearcpuid=", setup_disablecpuid); | 1138 | __setup("clearcpuid=", setup_disablecpuid); |
1067 | |||
1068 | /* | ||
1069 | * Get CPU information for use by the procfs. | ||
1070 | */ | ||
1071 | |||
1072 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
1073 | { | ||
1074 | struct cpuinfo_x86 *c = v; | ||
1075 | int cpu = 0, i; | ||
1076 | |||
1077 | #ifdef CONFIG_SMP | ||
1078 | cpu = c->cpu_index; | ||
1079 | #endif | ||
1080 | |||
1081 | seq_printf(m, "processor\t: %u\n" | ||
1082 | "vendor_id\t: %s\n" | ||
1083 | "cpu family\t: %d\n" | ||
1084 | "model\t\t: %d\n" | ||
1085 | "model name\t: %s\n", | ||
1086 | (unsigned)cpu, | ||
1087 | c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", | ||
1088 | c->x86, | ||
1089 | (int)c->x86_model, | ||
1090 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); | ||
1091 | |||
1092 | if (c->x86_mask || c->cpuid_level >= 0) | ||
1093 | seq_printf(m, "stepping\t: %d\n", c->x86_mask); | ||
1094 | else | ||
1095 | seq_printf(m, "stepping\t: unknown\n"); | ||
1096 | |||
1097 | if (cpu_has(c, X86_FEATURE_TSC)) { | ||
1098 | unsigned int freq = cpufreq_quick_get((unsigned)cpu); | ||
1099 | |||
1100 | if (!freq) | ||
1101 | freq = cpu_khz; | ||
1102 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", | ||
1103 | freq / 1000, (freq % 1000)); | ||
1104 | } | ||
1105 | |||
1106 | /* Cache size */ | ||
1107 | if (c->x86_cache_size >= 0) | ||
1108 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); | ||
1109 | |||
1110 | #ifdef CONFIG_SMP | ||
1111 | if (smp_num_siblings * c->x86_max_cores > 1) { | ||
1112 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
1113 | seq_printf(m, "siblings\t: %d\n", | ||
1114 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
1115 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
1116 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
1117 | } | ||
1118 | #endif | ||
1119 | |||
1120 | seq_printf(m, | ||
1121 | "fpu\t\t: yes\n" | ||
1122 | "fpu_exception\t: yes\n" | ||
1123 | "cpuid level\t: %d\n" | ||
1124 | "wp\t\t: yes\n" | ||
1125 | "flags\t\t:", | ||
1126 | c->cpuid_level); | ||
1127 | |||
1128 | for (i = 0; i < 32*NCAPINTS; i++) | ||
1129 | if (cpu_has(c, i) && x86_cap_flags[i] != NULL) | ||
1130 | seq_printf(m, " %s", x86_cap_flags[i]); | ||
1131 | |||
1132 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", | ||
1133 | c->loops_per_jiffy/(500000/HZ), | ||
1134 | (c->loops_per_jiffy/(5000/HZ)) % 100); | ||
1135 | |||
1136 | if (c->x86_tlbsize > 0) | ||
1137 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); | ||
1138 | seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size); | ||
1139 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); | ||
1140 | |||
1141 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", | ||
1142 | c->x86_phys_bits, c->x86_virt_bits); | ||
1143 | |||
1144 | seq_printf(m, "power management:"); | ||
1145 | for (i = 0; i < 32; i++) { | ||
1146 | if (c->x86_power & (1 << i)) { | ||
1147 | if (i < ARRAY_SIZE(x86_power_flags) && | ||
1148 | x86_power_flags[i]) | ||
1149 | seq_printf(m, "%s%s", | ||
1150 | x86_power_flags[i][0]?" ":"", | ||
1151 | x86_power_flags[i]); | ||
1152 | else | ||
1153 | seq_printf(m, " [%d]", i); | ||
1154 | } | ||
1155 | } | ||
1156 | |||
1157 | seq_printf(m, "\n\n"); | ||
1158 | |||
1159 | return 0; | ||
1160 | } | ||
1161 | |||
1162 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
1163 | { | ||
1164 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | ||
1165 | *pos = first_cpu(cpu_online_map); | ||
1166 | if ((*pos) < NR_CPUS && cpu_online(*pos)) | ||
1167 | return &cpu_data(*pos); | ||
1168 | return NULL; | ||
1169 | } | ||
1170 | |||
1171 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
1172 | { | ||
1173 | *pos = next_cpu(*pos, cpu_online_map); | ||
1174 | return c_start(m, pos); | ||
1175 | } | ||
1176 | |||
1177 | static void c_stop(struct seq_file *m, void *v) | ||
1178 | { | ||
1179 | } | ||
1180 | |||
1181 | const struct seq_operations cpuinfo_op = { | ||
1182 | .start = c_start, | ||
1183 | .next = c_next, | ||
1184 | .stop = c_stop, | ||
1185 | .show = show_cpuinfo, | ||
1186 | }; | ||
diff --git a/arch/x86/kernel/sigframe_32.h b/arch/x86/kernel/sigframe.h index 0b2221711dad..72bbb519d2dc 100644 --- a/arch/x86/kernel/sigframe_32.h +++ b/arch/x86/kernel/sigframe.h | |||
@@ -1,5 +1,5 @@ | |||
1 | struct sigframe | 1 | #ifdef CONFIG_X86_32 |
2 | { | 2 | struct sigframe { |
3 | char __user *pretcode; | 3 | char __user *pretcode; |
4 | int sig; | 4 | int sig; |
5 | struct sigcontext sc; | 5 | struct sigcontext sc; |
@@ -8,8 +8,7 @@ struct sigframe | |||
8 | char retcode[8]; | 8 | char retcode[8]; |
9 | }; | 9 | }; |
10 | 10 | ||
11 | struct rt_sigframe | 11 | struct rt_sigframe { |
12 | { | ||
13 | char __user *pretcode; | 12 | char __user *pretcode; |
14 | int sig; | 13 | int sig; |
15 | struct siginfo __user *pinfo; | 14 | struct siginfo __user *pinfo; |
@@ -19,3 +18,10 @@ struct rt_sigframe | |||
19 | struct _fpstate fpstate; | 18 | struct _fpstate fpstate; |
20 | char retcode[8]; | 19 | char retcode[8]; |
21 | }; | 20 | }; |
21 | #else | ||
22 | struct rt_sigframe { | ||
23 | char __user *pretcode; | ||
24 | struct ucontext uc; | ||
25 | struct siginfo info; | ||
26 | }; | ||
27 | #endif | ||
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 0157a6f0f41f..f1b117930837 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c | |||
@@ -4,32 +4,44 @@ | |||
4 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | 4 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
5 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes | 5 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
6 | */ | 6 | */ |
7 | #include <linux/list.h> | ||
7 | 8 | ||
8 | #include <linux/sched.h> | 9 | #include <linux/personality.h> |
9 | #include <linux/mm.h> | 10 | #include <linux/binfmts.h> |
10 | #include <linux/smp.h> | 11 | #include <linux/suspend.h> |
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/ptrace.h> | ||
12 | #include <linux/signal.h> | 14 | #include <linux/signal.h> |
15 | #include <linux/stddef.h> | ||
16 | #include <linux/unistd.h> | ||
13 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/sched.h> | ||
14 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
15 | #include <linux/unistd.h> | ||
16 | #include <linux/stddef.h> | ||
17 | #include <linux/personality.h> | ||
18 | #include <linux/suspend.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/elf.h> | 20 | #include <linux/elf.h> |
21 | #include <linux/binfmts.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/mm.h> | ||
23 | |||
22 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
23 | #include <asm/ucontext.h> | 25 | #include <asm/ucontext.h> |
24 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
25 | #include <asm/i387.h> | 27 | #include <asm/i387.h> |
26 | #include <asm/vdso.h> | 28 | #include <asm/vdso.h> |
27 | #include "sigframe_32.h" | ||
28 | 29 | ||
29 | #define DEBUG_SIG 0 | 30 | #include "sigframe.h" |
30 | 31 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 32 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
32 | 33 | ||
34 | #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ | ||
35 | X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ | ||
36 | X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ | ||
37 | X86_EFLAGS_CF) | ||
38 | |||
39 | #ifdef CONFIG_X86_32 | ||
40 | # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) | ||
41 | #else | ||
42 | # define FIX_EFLAGS __FIX_EFLAGS | ||
43 | #endif | ||
44 | |||
33 | /* | 45 | /* |
34 | * Atomically swap in the new signal mask, and wait for a signal. | 46 | * Atomically swap in the new signal mask, and wait for a signal. |
35 | */ | 47 | */ |
@@ -46,10 +58,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask) | |||
46 | current->state = TASK_INTERRUPTIBLE; | 58 | current->state = TASK_INTERRUPTIBLE; |
47 | schedule(); | 59 | schedule(); |
48 | set_thread_flag(TIF_RESTORE_SIGMASK); | 60 | set_thread_flag(TIF_RESTORE_SIGMASK); |
61 | |||
49 | return -ERESTARTNOHAND; | 62 | return -ERESTARTNOHAND; |
50 | } | 63 | } |
51 | 64 | ||
52 | asmlinkage int | 65 | asmlinkage int |
53 | sys_sigaction(int sig, const struct old_sigaction __user *act, | 66 | sys_sigaction(int sig, const struct old_sigaction __user *act, |
54 | struct old_sigaction __user *oact) | 67 | struct old_sigaction __user *oact) |
55 | { | 68 | { |
@@ -58,10 +71,12 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, | |||
58 | 71 | ||
59 | if (act) { | 72 | if (act) { |
60 | old_sigset_t mask; | 73 | old_sigset_t mask; |
74 | |||
61 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 75 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
62 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 76 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
63 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | 77 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) |
64 | return -EFAULT; | 78 | return -EFAULT; |
79 | |||
65 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 80 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
66 | __get_user(mask, &act->sa_mask); | 81 | __get_user(mask, &act->sa_mask); |
67 | siginitset(&new_ka.sa.sa_mask, mask); | 82 | siginitset(&new_ka.sa.sa_mask, mask); |
@@ -74,6 +89,7 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, | |||
74 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 89 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
75 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | 90 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) |
76 | return -EFAULT; | 91 | return -EFAULT; |
92 | |||
77 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | 93 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
78 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | 94 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); |
79 | } | 95 | } |
@@ -81,10 +97,12 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, | |||
81 | return ret; | 97 | return ret; |
82 | } | 98 | } |
83 | 99 | ||
84 | asmlinkage int | 100 | asmlinkage int sys_sigaltstack(unsigned long bx) |
85 | sys_sigaltstack(unsigned long bx) | ||
86 | { | 101 | { |
87 | /* This is needed to make gcc realize it doesn't own the "struct pt_regs" */ | 102 | /* |
103 | * This is needed to make gcc realize it doesn't own the | ||
104 | * "struct pt_regs" | ||
105 | */ | ||
88 | struct pt_regs *regs = (struct pt_regs *)&bx; | 106 | struct pt_regs *regs = (struct pt_regs *)&bx; |
89 | const stack_t __user *uss = (const stack_t __user *)bx; | 107 | const stack_t __user *uss = (const stack_t __user *)bx; |
90 | stack_t __user *uoss = (stack_t __user *)regs->cx; | 108 | stack_t __user *uoss = (stack_t __user *)regs->cx; |
@@ -96,9 +114,9 @@ sys_sigaltstack(unsigned long bx) | |||
96 | /* | 114 | /* |
97 | * Do a signal return; undo the signal stack. | 115 | * Do a signal return; undo the signal stack. |
98 | */ | 116 | */ |
99 | |||
100 | static int | 117 | static int |
101 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax) | 118 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, |
119 | unsigned long *pax) | ||
102 | { | 120 | { |
103 | unsigned int err = 0; | 121 | unsigned int err = 0; |
104 | 122 | ||
@@ -120,37 +138,29 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax | |||
120 | #define GET_SEG(seg) \ | 138 | #define GET_SEG(seg) \ |
121 | { unsigned short tmp; \ | 139 | { unsigned short tmp; \ |
122 | err |= __get_user(tmp, &sc->seg); \ | 140 | err |= __get_user(tmp, &sc->seg); \ |
123 | loadsegment(seg,tmp); } | 141 | loadsegment(seg, tmp); } |
124 | |||
125 | #define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \ | ||
126 | X86_EFLAGS_OF | X86_EFLAGS_DF | \ | ||
127 | X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ | ||
128 | X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) | ||
129 | 142 | ||
130 | GET_SEG(gs); | 143 | GET_SEG(gs); |
131 | COPY_SEG(fs); | 144 | COPY_SEG(fs); |
132 | COPY_SEG(es); | 145 | COPY_SEG(es); |
133 | COPY_SEG(ds); | 146 | COPY_SEG(ds); |
134 | COPY(di); | 147 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); |
135 | COPY(si); | 148 | COPY(dx); COPY(cx); COPY(ip); |
136 | COPY(bp); | ||
137 | COPY(sp); | ||
138 | COPY(bx); | ||
139 | COPY(dx); | ||
140 | COPY(cx); | ||
141 | COPY(ip); | ||
142 | COPY_SEG_STRICT(cs); | 149 | COPY_SEG_STRICT(cs); |
143 | COPY_SEG_STRICT(ss); | 150 | COPY_SEG_STRICT(ss); |
144 | 151 | ||
145 | { | 152 | { |
146 | unsigned int tmpflags; | 153 | unsigned int tmpflags; |
154 | |||
147 | err |= __get_user(tmpflags, &sc->flags); | 155 | err |= __get_user(tmpflags, &sc->flags); |
148 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); | 156 | regs->flags = (regs->flags & ~FIX_EFLAGS) | |
157 | (tmpflags & FIX_EFLAGS); | ||
149 | regs->orig_ax = -1; /* disable syscall checks */ | 158 | regs->orig_ax = -1; /* disable syscall checks */ |
150 | } | 159 | } |
151 | 160 | ||
152 | { | 161 | { |
153 | struct _fpstate __user * buf; | 162 | struct _fpstate __user *buf; |
163 | |||
154 | err |= __get_user(buf, &sc->fpstate); | 164 | err |= __get_user(buf, &sc->fpstate); |
155 | if (buf) { | 165 | if (buf) { |
156 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | 166 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) |
@@ -158,6 +168,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax | |||
158 | err |= restore_i387(buf); | 168 | err |= restore_i387(buf); |
159 | } else { | 169 | } else { |
160 | struct task_struct *me = current; | 170 | struct task_struct *me = current; |
171 | |||
161 | if (used_math()) { | 172 | if (used_math()) { |
162 | clear_fpu(me); | 173 | clear_fpu(me); |
163 | clear_used_math(); | 174 | clear_used_math(); |
@@ -165,24 +176,26 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax | |||
165 | } | 176 | } |
166 | } | 177 | } |
167 | 178 | ||
168 | err |= __get_user(*peax, &sc->ax); | 179 | err |= __get_user(*pax, &sc->ax); |
169 | return err; | 180 | return err; |
170 | 181 | ||
171 | badframe: | 182 | badframe: |
172 | return 1; | 183 | return 1; |
173 | } | 184 | } |
174 | 185 | ||
175 | asmlinkage int sys_sigreturn(unsigned long __unused) | 186 | asmlinkage unsigned long sys_sigreturn(unsigned long __unused) |
176 | { | 187 | { |
177 | struct pt_regs *regs = (struct pt_regs *) &__unused; | 188 | struct sigframe __user *frame; |
178 | struct sigframe __user *frame = (struct sigframe __user *)(regs->sp - 8); | 189 | struct pt_regs *regs; |
190 | unsigned long ax; | ||
179 | sigset_t set; | 191 | sigset_t set; |
180 | int ax; | 192 | |
193 | regs = (struct pt_regs *) &__unused; | ||
194 | frame = (struct sigframe __user *)(regs->sp - 8); | ||
181 | 195 | ||
182 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 196 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
183 | goto badframe; | 197 | goto badframe; |
184 | if (__get_user(set.sig[0], &frame->sc.oldmask) | 198 | if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 |
185 | || (_NSIG_WORDS > 1 | ||
186 | && __copy_from_user(&set.sig[1], &frame->extramask, | 199 | && __copy_from_user(&set.sig[1], &frame->extramask, |
187 | sizeof(frame->extramask)))) | 200 | sizeof(frame->extramask)))) |
188 | goto badframe; | 201 | goto badframe; |
@@ -192,33 +205,35 @@ asmlinkage int sys_sigreturn(unsigned long __unused) | |||
192 | current->blocked = set; | 205 | current->blocked = set; |
193 | recalc_sigpending(); | 206 | recalc_sigpending(); |
194 | spin_unlock_irq(¤t->sighand->siglock); | 207 | spin_unlock_irq(¤t->sighand->siglock); |
195 | 208 | ||
196 | if (restore_sigcontext(regs, &frame->sc, &ax)) | 209 | if (restore_sigcontext(regs, &frame->sc, &ax)) |
197 | goto badframe; | 210 | goto badframe; |
198 | return ax; | 211 | return ax; |
199 | 212 | ||
200 | badframe: | 213 | badframe: |
201 | if (show_unhandled_signals && printk_ratelimit()) { | 214 | if (show_unhandled_signals && printk_ratelimit()) { |
202 | printk("%s%s[%d] bad frame in sigreturn frame:%p ip:%lx" | 215 | printk(KERN_INFO "%s%s[%d] bad frame in sigreturn frame:" |
203 | " sp:%lx oeax:%lx", | 216 | "%p ip:%lx sp:%lx oeax:%lx", |
204 | task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, | 217 | task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, |
205 | current->comm, task_pid_nr(current), frame, regs->ip, | 218 | current->comm, task_pid_nr(current), frame, regs->ip, |
206 | regs->sp, regs->orig_ax); | 219 | regs->sp, regs->orig_ax); |
207 | print_vma_addr(" in ", regs->ip); | 220 | print_vma_addr(" in ", regs->ip); |
208 | printk("\n"); | 221 | printk(KERN_CONT "\n"); |
209 | } | 222 | } |
210 | 223 | ||
211 | force_sig(SIGSEGV, current); | 224 | force_sig(SIGSEGV, current); |
225 | |||
212 | return 0; | 226 | return 0; |
213 | } | 227 | } |
214 | 228 | ||
215 | asmlinkage int sys_rt_sigreturn(unsigned long __unused) | 229 | asmlinkage int sys_rt_sigreturn(unsigned long __unused) |
216 | { | 230 | { |
217 | struct pt_regs *regs = (struct pt_regs *) &__unused; | 231 | struct pt_regs *regs = (struct pt_regs *)&__unused; |
218 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp - 4); | 232 | struct rt_sigframe __user *frame; |
233 | unsigned long ax; | ||
219 | sigset_t set; | 234 | sigset_t set; |
220 | int ax; | ||
221 | 235 | ||
236 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); | ||
222 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 237 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
223 | goto badframe; | 238 | goto badframe; |
224 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | 239 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
@@ -229,7 +244,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) | |||
229 | current->blocked = set; | 244 | current->blocked = set; |
230 | recalc_sigpending(); | 245 | recalc_sigpending(); |
231 | spin_unlock_irq(¤t->sighand->siglock); | 246 | spin_unlock_irq(¤t->sighand->siglock); |
232 | 247 | ||
233 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 248 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
234 | goto badframe; | 249 | goto badframe; |
235 | 250 | ||
@@ -241,12 +256,11 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) | |||
241 | badframe: | 256 | badframe: |
242 | force_sig(SIGSEGV, current); | 257 | force_sig(SIGSEGV, current); |
243 | return 0; | 258 | return 0; |
244 | } | 259 | } |
245 | 260 | ||
246 | /* | 261 | /* |
247 | * Set up a signal frame. | 262 | * Set up a signal frame. |
248 | */ | 263 | */ |
249 | |||
250 | static int | 264 | static int |
251 | setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | 265 | setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, |
252 | struct pt_regs *regs, unsigned long mask) | 266 | struct pt_regs *regs, unsigned long mask) |
@@ -277,9 +291,9 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
277 | 291 | ||
278 | tmp = save_i387(fpstate); | 292 | tmp = save_i387(fpstate); |
279 | if (tmp < 0) | 293 | if (tmp < 0) |
280 | err = 1; | 294 | err = 1; |
281 | else | 295 | else |
282 | err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); | 296 | err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); |
283 | 297 | ||
284 | /* non-iBCS2 extensions.. */ | 298 | /* non-iBCS2 extensions.. */ |
285 | err |= __put_user(mask, &sc->oldmask); | 299 | err |= __put_user(mask, &sc->oldmask); |
@@ -292,7 +306,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
292 | * Determine which stack to use.. | 306 | * Determine which stack to use.. |
293 | */ | 307 | */ |
294 | static inline void __user * | 308 | static inline void __user * |
295 | get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | 309 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) |
296 | { | 310 | { |
297 | unsigned long sp; | 311 | unsigned long sp; |
298 | 312 | ||
@@ -310,32 +324,30 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
310 | if (ka->sa.sa_flags & SA_ONSTACK) { | 324 | if (ka->sa.sa_flags & SA_ONSTACK) { |
311 | if (sas_ss_flags(sp) == 0) | 325 | if (sas_ss_flags(sp) == 0) |
312 | sp = current->sas_ss_sp + current->sas_ss_size; | 326 | sp = current->sas_ss_sp + current->sas_ss_size; |
313 | } | 327 | } else { |
314 | 328 | /* This is the legacy signal stack switching. */ | |
315 | /* This is the legacy signal stack switching. */ | 329 | if ((regs->ss & 0xffff) != __USER_DS && |
316 | else if ((regs->ss & 0xffff) != __USER_DS && | 330 | !(ka->sa.sa_flags & SA_RESTORER) && |
317 | !(ka->sa.sa_flags & SA_RESTORER) && | 331 | ka->sa.sa_restorer) |
318 | ka->sa.sa_restorer) { | 332 | sp = (unsigned long) ka->sa.sa_restorer; |
319 | sp = (unsigned long) ka->sa.sa_restorer; | ||
320 | } | 333 | } |
321 | 334 | ||
322 | sp -= frame_size; | 335 | sp -= frame_size; |
323 | /* Align the stack pointer according to the i386 ABI, | 336 | /* |
324 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ | 337 | * Align the stack pointer according to the i386 ABI, |
338 | * i.e. so that on function entry ((sp + 4) & 15) == 0. | ||
339 | */ | ||
325 | sp = ((sp + 4) & -16ul) - 4; | 340 | sp = ((sp + 4) & -16ul) - 4; |
341 | |||
326 | return (void __user *) sp; | 342 | return (void __user *) sp; |
327 | } | 343 | } |
328 | 344 | ||
329 | /* These symbols are defined with the addresses in the vsyscall page. | 345 | static int |
330 | See vsyscall-sigreturn.S. */ | 346 | setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, |
331 | extern void __user __kernel_sigreturn; | 347 | struct pt_regs *regs) |
332 | extern void __user __kernel_rt_sigreturn; | ||
333 | |||
334 | static int setup_frame(int sig, struct k_sigaction *ka, | ||
335 | sigset_t *set, struct pt_regs * regs) | ||
336 | { | 348 | { |
337 | void __user *restorer; | ||
338 | struct sigframe __user *frame; | 349 | struct sigframe __user *frame; |
350 | void __user *restorer; | ||
339 | int err = 0; | 351 | int err = 0; |
340 | int usig; | 352 | int usig; |
341 | 353 | ||
@@ -365,7 +377,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
365 | goto give_sigsegv; | 377 | goto give_sigsegv; |
366 | } | 378 | } |
367 | 379 | ||
368 | if (current->binfmt->hasvdso) | 380 | if (current->mm->context.vdso) |
369 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); | 381 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); |
370 | else | 382 | else |
371 | restorer = &frame->retcode; | 383 | restorer = &frame->retcode; |
@@ -374,9 +386,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
374 | 386 | ||
375 | /* Set up to return from userspace. */ | 387 | /* Set up to return from userspace. */ |
376 | err |= __put_user(restorer, &frame->pretcode); | 388 | err |= __put_user(restorer, &frame->pretcode); |
377 | 389 | ||
378 | /* | 390 | /* |
379 | * This is popl %eax ; movl $,%eax ; int $0x80 | 391 | * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80 |
380 | * | 392 | * |
381 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | 393 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
382 | * reasons and because gdb uses it as a signature to notice | 394 | * reasons and because gdb uses it as a signature to notice |
@@ -390,11 +402,11 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
390 | goto give_sigsegv; | 402 | goto give_sigsegv; |
391 | 403 | ||
392 | /* Set up registers for signal handler */ | 404 | /* Set up registers for signal handler */ |
393 | regs->sp = (unsigned long) frame; | 405 | regs->sp = (unsigned long)frame; |
394 | regs->ip = (unsigned long) ka->sa.sa_handler; | 406 | regs->ip = (unsigned long)ka->sa.sa_handler; |
395 | regs->ax = (unsigned long) sig; | 407 | regs->ax = (unsigned long)sig; |
396 | regs->dx = (unsigned long) 0; | 408 | regs->dx = 0; |
397 | regs->cx = (unsigned long) 0; | 409 | regs->cx = 0; |
398 | 410 | ||
399 | regs->ds = __USER_DS; | 411 | regs->ds = __USER_DS; |
400 | regs->es = __USER_DS; | 412 | regs->es = __USER_DS; |
@@ -407,15 +419,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
407 | * The tracer may want to single-step inside the | 419 | * The tracer may want to single-step inside the |
408 | * handler too. | 420 | * handler too. |
409 | */ | 421 | */ |
410 | regs->flags &= ~(TF_MASK | X86_EFLAGS_DF); | 422 | regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); |
411 | if (test_thread_flag(TIF_SINGLESTEP)) | 423 | if (test_thread_flag(TIF_SINGLESTEP)) |
412 | ptrace_notify(SIGTRAP); | 424 | ptrace_notify(SIGTRAP); |
413 | 425 | ||
414 | #if DEBUG_SIG | ||
415 | printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", | ||
416 | current->comm, current->pid, frame, regs->ip, frame->pretcode); | ||
417 | #endif | ||
418 | |||
419 | return 0; | 426 | return 0; |
420 | 427 | ||
421 | give_sigsegv: | 428 | give_sigsegv: |
@@ -424,10 +431,10 @@ give_sigsegv: | |||
424 | } | 431 | } |
425 | 432 | ||
426 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 433 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
427 | sigset_t *set, struct pt_regs * regs) | 434 | sigset_t *set, struct pt_regs *regs) |
428 | { | 435 | { |
429 | void __user *restorer; | ||
430 | struct rt_sigframe __user *frame; | 436 | struct rt_sigframe __user *frame; |
437 | void __user *restorer; | ||
431 | int err = 0; | 438 | int err = 0; |
432 | int usig; | 439 | int usig; |
433 | 440 | ||
@@ -457,7 +464,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
457 | &frame->uc.uc_stack.ss_flags); | 464 | &frame->uc.uc_stack.ss_flags); |
458 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 465 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
459 | err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 466 | err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, |
460 | regs, set->sig[0]); | 467 | regs, set->sig[0]); |
461 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 468 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
462 | if (err) | 469 | if (err) |
463 | goto give_sigsegv; | 470 | goto give_sigsegv; |
@@ -467,9 +474,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
467 | if (ka->sa.sa_flags & SA_RESTORER) | 474 | if (ka->sa.sa_flags & SA_RESTORER) |
468 | restorer = ka->sa.sa_restorer; | 475 | restorer = ka->sa.sa_restorer; |
469 | err |= __put_user(restorer, &frame->pretcode); | 476 | err |= __put_user(restorer, &frame->pretcode); |
470 | 477 | ||
471 | /* | 478 | /* |
472 | * This is movl $,%ax ; int $0x80 | 479 | * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 |
473 | * | 480 | * |
474 | * WE DO NOT USE IT ANY MORE! It's only left here for historical | 481 | * WE DO NOT USE IT ANY MORE! It's only left here for historical |
475 | * reasons and because gdb uses it as a signature to notice | 482 | * reasons and because gdb uses it as a signature to notice |
@@ -483,11 +490,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
483 | goto give_sigsegv; | 490 | goto give_sigsegv; |
484 | 491 | ||
485 | /* Set up registers for signal handler */ | 492 | /* Set up registers for signal handler */ |
486 | regs->sp = (unsigned long) frame; | 493 | regs->sp = (unsigned long)frame; |
487 | regs->ip = (unsigned long) ka->sa.sa_handler; | 494 | regs->ip = (unsigned long)ka->sa.sa_handler; |
488 | regs->ax = (unsigned long) usig; | 495 | regs->ax = (unsigned long)usig; |
489 | regs->dx = (unsigned long) &frame->info; | 496 | regs->dx = (unsigned long)&frame->info; |
490 | regs->cx = (unsigned long) &frame->uc; | 497 | regs->cx = (unsigned long)&frame->uc; |
491 | 498 | ||
492 | regs->ds = __USER_DS; | 499 | regs->ds = __USER_DS; |
493 | regs->es = __USER_DS; | 500 | regs->es = __USER_DS; |
@@ -500,15 +507,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
500 | * The tracer may want to single-step inside the | 507 | * The tracer may want to single-step inside the |
501 | * handler too. | 508 | * handler too. |
502 | */ | 509 | */ |
503 | regs->flags &= ~(TF_MASK | X86_EFLAGS_DF); | 510 | regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); |
504 | if (test_thread_flag(TIF_SINGLESTEP)) | 511 | if (test_thread_flag(TIF_SINGLESTEP)) |
505 | ptrace_notify(SIGTRAP); | 512 | ptrace_notify(SIGTRAP); |
506 | 513 | ||
507 | #if DEBUG_SIG | ||
508 | printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", | ||
509 | current->comm, current->pid, frame, regs->ip, frame->pretcode); | ||
510 | #endif | ||
511 | |||
512 | return 0; | 514 | return 0; |
513 | 515 | ||
514 | give_sigsegv: | 516 | give_sigsegv: |
@@ -517,33 +519,33 @@ give_sigsegv: | |||
517 | } | 519 | } |
518 | 520 | ||
519 | /* | 521 | /* |
520 | * OK, we're invoking a handler | 522 | * OK, we're invoking a handler: |
521 | */ | 523 | */ |
522 | |||
523 | static int | 524 | static int |
524 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 525 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
525 | sigset_t *oldset, struct pt_regs * regs) | 526 | sigset_t *oldset, struct pt_regs *regs) |
526 | { | 527 | { |
527 | int ret; | 528 | int ret; |
528 | 529 | ||
529 | /* Are we from a system call? */ | 530 | /* Are we from a system call? */ |
530 | if (regs->orig_ax >= 0) { | 531 | if ((long)regs->orig_ax >= 0) { |
531 | /* If so, check system call restarting.. */ | 532 | /* If so, check system call restarting.. */ |
532 | switch (regs->ax) { | 533 | switch (regs->ax) { |
533 | case -ERESTART_RESTARTBLOCK: | 534 | case -ERESTART_RESTARTBLOCK: |
534 | case -ERESTARTNOHAND: | 535 | case -ERESTARTNOHAND: |
536 | regs->ax = -EINTR; | ||
537 | break; | ||
538 | |||
539 | case -ERESTARTSYS: | ||
540 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
535 | regs->ax = -EINTR; | 541 | regs->ax = -EINTR; |
536 | break; | 542 | break; |
537 | 543 | } | |
538 | case -ERESTARTSYS: | 544 | /* fallthrough */ |
539 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 545 | case -ERESTARTNOINTR: |
540 | regs->ax = -EINTR; | 546 | regs->ax = regs->orig_ax; |
541 | break; | 547 | regs->ip -= 2; |
542 | } | 548 | break; |
543 | /* fallthrough */ | ||
544 | case -ERESTARTNOINTR: | ||
545 | regs->ax = regs->orig_ax; | ||
546 | regs->ip -= 2; | ||
547 | } | 549 | } |
548 | } | 550 | } |
549 | 551 | ||
@@ -561,16 +563,17 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
561 | else | 563 | else |
562 | ret = setup_frame(sig, ka, oldset, regs); | 564 | ret = setup_frame(sig, ka, oldset, regs); |
563 | 565 | ||
564 | if (ret == 0) { | 566 | if (ret) |
565 | spin_lock_irq(¤t->sighand->siglock); | 567 | return ret; |
566 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
567 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
568 | sigaddset(¤t->blocked,sig); | ||
569 | recalc_sigpending(); | ||
570 | spin_unlock_irq(¤t->sighand->siglock); | ||
571 | } | ||
572 | 568 | ||
573 | return ret; | 569 | spin_lock_irq(¤t->sighand->siglock); |
570 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | ||
571 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
572 | sigaddset(¤t->blocked, sig); | ||
573 | recalc_sigpending(); | ||
574 | spin_unlock_irq(¤t->sighand->siglock); | ||
575 | |||
576 | return 0; | ||
574 | } | 577 | } |
575 | 578 | ||
576 | /* | 579 | /* |
@@ -580,18 +583,17 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
580 | */ | 583 | */ |
581 | static void do_signal(struct pt_regs *regs) | 584 | static void do_signal(struct pt_regs *regs) |
582 | { | 585 | { |
586 | struct k_sigaction ka; | ||
583 | siginfo_t info; | 587 | siginfo_t info; |
584 | int signr; | 588 | int signr; |
585 | struct k_sigaction ka; | ||
586 | sigset_t *oldset; | 589 | sigset_t *oldset; |
587 | 590 | ||
588 | /* | 591 | /* |
589 | * We want the common case to go fast, which | 592 | * We want the common case to go fast, which is why we may in certain |
590 | * is why we may in certain cases get here from | 593 | * cases get here from kernel mode. Just return without doing anything |
591 | * kernel mode. Just return without doing anything | 594 | * if so. |
592 | * if so. vm86 regs switched out by assembly code | 595 | * X86_32: vm86 regs switched out by assembly code before reaching |
593 | * before reaching here, so testing against kernel | 596 | * here, so testing against kernel CS suffices. |
594 | * CS suffices. | ||
595 | */ | 597 | */ |
596 | if (!user_mode(regs)) | 598 | if (!user_mode(regs)) |
597 | return; | 599 | return; |
@@ -603,29 +605,31 @@ static void do_signal(struct pt_regs *regs) | |||
603 | 605 | ||
604 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 606 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
605 | if (signr > 0) { | 607 | if (signr > 0) { |
606 | /* Re-enable any watchpoints before delivering the | 608 | /* |
609 | * Re-enable any watchpoints before delivering the | ||
607 | * signal to user space. The processor register will | 610 | * signal to user space. The processor register will |
608 | * have been cleared if the watchpoint triggered | 611 | * have been cleared if the watchpoint triggered |
609 | * inside the kernel. | 612 | * inside the kernel. |
610 | */ | 613 | */ |
611 | if (unlikely(current->thread.debugreg7)) | 614 | if (current->thread.debugreg7) |
612 | set_debugreg(current->thread.debugreg7, 7); | 615 | set_debugreg(current->thread.debugreg7, 7); |
613 | 616 | ||
614 | /* Whee! Actually deliver the signal. */ | 617 | /* Whee! Actually deliver the signal. */ |
615 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { | 618 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { |
616 | /* a signal was successfully delivered; the saved | 619 | /* |
620 | * a signal was successfully delivered; the saved | ||
617 | * sigmask will have been stored in the signal frame, | 621 | * sigmask will have been stored in the signal frame, |
618 | * and will be restored by sigreturn, so we can simply | 622 | * and will be restored by sigreturn, so we can simply |
619 | * clear the TIF_RESTORE_SIGMASK flag */ | 623 | * clear the TIF_RESTORE_SIGMASK flag |
624 | */ | ||
620 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 625 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
621 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 626 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
622 | } | 627 | } |
623 | |||
624 | return; | 628 | return; |
625 | } | 629 | } |
626 | 630 | ||
627 | /* Did we come from a system call? */ | 631 | /* Did we come from a system call? */ |
628 | if (regs->orig_ax >= 0) { | 632 | if ((long)regs->orig_ax >= 0) { |
629 | /* Restart the system call - no handlers present */ | 633 | /* Restart the system call - no handlers present */ |
630 | switch (regs->ax) { | 634 | switch (regs->ax) { |
631 | case -ERESTARTNOHAND: | 635 | case -ERESTARTNOHAND: |
@@ -642,8 +646,10 @@ static void do_signal(struct pt_regs *regs) | |||
642 | } | 646 | } |
643 | } | 647 | } |
644 | 648 | ||
645 | /* if there's no signal to deliver, we just put the saved sigmask | 649 | /* |
646 | * back */ | 650 | * If there's no signal to deliver, we just put the saved sigmask |
651 | * back. | ||
652 | */ | ||
647 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 653 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
648 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 654 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
649 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 655 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
@@ -654,13 +660,12 @@ static void do_signal(struct pt_regs *regs) | |||
654 | * notification of userspace execution resumption | 660 | * notification of userspace execution resumption |
655 | * - triggered by the TIF_WORK_MASK flags | 661 | * - triggered by the TIF_WORK_MASK flags |
656 | */ | 662 | */ |
657 | __attribute__((regparm(3))) | 663 | void |
658 | void do_notify_resume(struct pt_regs *regs, void *_unused, | 664 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) |
659 | __u32 thread_info_flags) | ||
660 | { | 665 | { |
661 | /* Pending single-step? */ | 666 | /* Pending single-step? */ |
662 | if (thread_info_flags & _TIF_SINGLESTEP) { | 667 | if (thread_info_flags & _TIF_SINGLESTEP) { |
663 | regs->flags |= TF_MASK; | 668 | regs->flags |= X86_EFLAGS_TF; |
664 | clear_thread_flag(TIF_SINGLESTEP); | 669 | clear_thread_flag(TIF_SINGLESTEP); |
665 | } | 670 | } |
666 | 671 | ||
@@ -670,6 +675,6 @@ void do_notify_resume(struct pt_regs *regs, void *_unused, | |||
670 | 675 | ||
671 | if (thread_info_flags & _TIF_HRTICK_RESCHED) | 676 | if (thread_info_flags & _TIF_HRTICK_RESCHED) |
672 | hrtick_resched(); | 677 | hrtick_resched(); |
673 | 678 | ||
674 | clear_thread_flag(TIF_IRET); | 679 | clear_thread_flag(TIF_IRET); |
675 | } | 680 | } |
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 1c83e5124c65..827179c5b32a 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c | |||
@@ -19,17 +19,28 @@ | |||
19 | #include <linux/stddef.h> | 19 | #include <linux/stddef.h> |
20 | #include <linux/personality.h> | 20 | #include <linux/personality.h> |
21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
22 | #include <asm/processor.h> | ||
22 | #include <asm/ucontext.h> | 23 | #include <asm/ucontext.h> |
23 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
24 | #include <asm/i387.h> | 25 | #include <asm/i387.h> |
25 | #include <asm/proto.h> | 26 | #include <asm/proto.h> |
26 | #include <asm/ia32_unistd.h> | 27 | #include <asm/ia32_unistd.h> |
27 | #include <asm/mce.h> | 28 | #include <asm/mce.h> |
28 | 29 | #include "sigframe.h" | |
29 | /* #define DEBUG_SIG 1 */ | ||
30 | 30 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
32 | 32 | ||
33 | #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ | ||
34 | X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ | ||
35 | X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ | ||
36 | X86_EFLAGS_CF) | ||
37 | |||
38 | #ifdef CONFIG_X86_32 | ||
39 | # define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) | ||
40 | #else | ||
41 | # define FIX_EFLAGS __FIX_EFLAGS | ||
42 | #endif | ||
43 | |||
33 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 44 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
34 | sigset_t *set, struct pt_regs * regs); | 45 | sigset_t *set, struct pt_regs * regs); |
35 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | 46 | int ia32_setup_frame(int sig, struct k_sigaction *ka, |
@@ -46,16 +57,9 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | |||
46 | /* | 57 | /* |
47 | * Do a signal return; undo the signal stack. | 58 | * Do a signal return; undo the signal stack. |
48 | */ | 59 | */ |
49 | |||
50 | struct rt_sigframe | ||
51 | { | ||
52 | char __user *pretcode; | ||
53 | struct ucontext uc; | ||
54 | struct siginfo info; | ||
55 | }; | ||
56 | |||
57 | static int | 60 | static int |
58 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax) | 61 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, |
62 | unsigned long *pax) | ||
59 | { | 63 | { |
60 | unsigned int err = 0; | 64 | unsigned int err = 0; |
61 | 65 | ||
@@ -87,7 +91,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned | |||
87 | { | 91 | { |
88 | unsigned int tmpflags; | 92 | unsigned int tmpflags; |
89 | err |= __get_user(tmpflags, &sc->flags); | 93 | err |= __get_user(tmpflags, &sc->flags); |
90 | regs->flags = (regs->flags & ~0x40DD5) | (tmpflags & 0x40DD5); | 94 | regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); |
91 | regs->orig_ax = -1; /* disable syscall checks */ | 95 | regs->orig_ax = -1; /* disable syscall checks */ |
92 | } | 96 | } |
93 | 97 | ||
@@ -108,7 +112,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned | |||
108 | } | 112 | } |
109 | } | 113 | } |
110 | 114 | ||
111 | err |= __get_user(*prax, &sc->ax); | 115 | err |= __get_user(*pax, &sc->ax); |
112 | return err; | 116 | return err; |
113 | 117 | ||
114 | badframe: | 118 | badframe: |
@@ -121,13 +125,11 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
121 | sigset_t set; | 125 | sigset_t set; |
122 | unsigned long ax; | 126 | unsigned long ax; |
123 | 127 | ||
124 | frame = (struct rt_sigframe __user *)(regs->sp - 8); | 128 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); |
125 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) { | 129 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
126 | goto badframe; | 130 | goto badframe; |
127 | } | 131 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
128 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) { | ||
129 | goto badframe; | 132 | goto badframe; |
130 | } | ||
131 | 133 | ||
132 | sigdelsetmask(&set, ~_BLOCKABLE); | 134 | sigdelsetmask(&set, ~_BLOCKABLE); |
133 | spin_lock_irq(¤t->sighand->siglock); | 135 | spin_lock_irq(¤t->sighand->siglock); |
@@ -138,10 +140,6 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
138 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 140 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
139 | goto badframe; | 141 | goto badframe; |
140 | 142 | ||
141 | #ifdef DEBUG_SIG | ||
142 | printk("%d sigreturn ip:%lx sp:%lx frame:%p ax:%lx\n",current->pid,regs->ip,regs->sp,frame,ax); | ||
143 | #endif | ||
144 | |||
145 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) | 143 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) |
146 | goto badframe; | 144 | goto badframe; |
147 | 145 | ||
@@ -270,10 +268,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
270 | if (err) | 268 | if (err) |
271 | goto give_sigsegv; | 269 | goto give_sigsegv; |
272 | 270 | ||
273 | #ifdef DEBUG_SIG | ||
274 | printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax); | ||
275 | #endif | ||
276 | |||
277 | /* Set up registers for signal handler */ | 271 | /* Set up registers for signal handler */ |
278 | regs->di = sig; | 272 | regs->di = sig; |
279 | /* In case the signal handler was declared without prototypes */ | 273 | /* In case the signal handler was declared without prototypes */ |
@@ -298,10 +292,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
298 | regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); | 292 | regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF); |
299 | if (test_thread_flag(TIF_SINGLESTEP)) | 293 | if (test_thread_flag(TIF_SINGLESTEP)) |
300 | ptrace_notify(SIGTRAP); | 294 | ptrace_notify(SIGTRAP); |
301 | #ifdef DEBUG_SIG | ||
302 | printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", | ||
303 | current->comm, current->pid, frame, regs->ip, frame->pretcode); | ||
304 | #endif | ||
305 | 295 | ||
306 | return 0; | 296 | return 0; |
307 | 297 | ||
@@ -345,35 +335,29 @@ static long current_syscall_ret(struct pt_regs *regs) | |||
345 | 335 | ||
346 | static int | 336 | static int |
347 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 337 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
348 | sigset_t *oldset, struct pt_regs *regs) | 338 | sigset_t *oldset, struct pt_regs *regs) |
349 | { | 339 | { |
350 | int ret; | 340 | int ret; |
351 | 341 | ||
352 | #ifdef DEBUG_SIG | ||
353 | printk("handle_signal pid:%d sig:%lu ip:%lx sp:%lx regs=%p\n", | ||
354 | current->pid, sig, | ||
355 | regs->ip, regs->sp, regs); | ||
356 | #endif | ||
357 | |||
358 | /* Are we from a system call? */ | 342 | /* Are we from a system call? */ |
359 | if (current_syscall(regs) >= 0) { | 343 | if (current_syscall(regs) >= 0) { |
360 | /* If so, check system call restarting.. */ | 344 | /* If so, check system call restarting.. */ |
361 | switch (current_syscall_ret(regs)) { | 345 | switch (current_syscall_ret(regs)) { |
362 | case -ERESTART_RESTARTBLOCK: | 346 | case -ERESTART_RESTARTBLOCK: |
363 | case -ERESTARTNOHAND: | 347 | case -ERESTARTNOHAND: |
364 | regs->ax = -EINTR; | 348 | regs->ax = -EINTR; |
365 | break; | 349 | break; |
366 | 350 | ||
367 | case -ERESTARTSYS: | 351 | case -ERESTARTSYS: |
368 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 352 | if (!(ka->sa.sa_flags & SA_RESTART)) { |
369 | regs->ax = -EINTR; | 353 | regs->ax = -EINTR; |
370 | break; | ||
371 | } | ||
372 | /* fallthrough */ | ||
373 | case -ERESTARTNOINTR: | ||
374 | regs->ax = regs->orig_ax; | ||
375 | regs->ip -= 2; | ||
376 | break; | 354 | break; |
355 | } | ||
356 | /* fallthrough */ | ||
357 | case -ERESTARTNOINTR: | ||
358 | regs->ax = regs->orig_ax; | ||
359 | regs->ip -= 2; | ||
360 | break; | ||
377 | } | 361 | } |
378 | } | 362 | } |
379 | 363 | ||
@@ -420,10 +404,11 @@ static void do_signal(struct pt_regs *regs) | |||
420 | sigset_t *oldset; | 404 | sigset_t *oldset; |
421 | 405 | ||
422 | /* | 406 | /* |
423 | * We want the common case to go fast, which | 407 | * We want the common case to go fast, which is why we may in certain |
424 | * is why we may in certain cases get here from | 408 | * cases get here from kernel mode. Just return without doing anything |
425 | * kernel mode. Just return without doing anything | ||
426 | * if so. | 409 | * if so. |
410 | * X86_32: vm86 regs switched out by assembly code before reaching | ||
411 | * here, so testing against kernel CS suffices. | ||
427 | */ | 412 | */ |
428 | if (!user_mode(regs)) | 413 | if (!user_mode(regs)) |
429 | return; | 414 | return; |
@@ -473,22 +458,19 @@ static void do_signal(struct pt_regs *regs) | |||
473 | } | 458 | } |
474 | } | 459 | } |
475 | 460 | ||
476 | /* if there's no signal to deliver, we just put the saved sigmask | 461 | /* |
477 | back. */ | 462 | * If there's no signal to deliver, we just put the saved sigmask |
463 | * back. | ||
464 | */ | ||
478 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 465 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
479 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 466 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
480 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 467 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
481 | } | 468 | } |
482 | } | 469 | } |
483 | 470 | ||
484 | void | 471 | void do_notify_resume(struct pt_regs *regs, void *unused, |
485 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | 472 | __u32 thread_info_flags) |
486 | { | 473 | { |
487 | #ifdef DEBUG_SIG | ||
488 | printk("do_notify_resume flags:%x ip:%lx sp:%lx caller:%p pending:%x\n", | ||
489 | thread_info_flags, regs->ip, regs->sp, __builtin_return_address(0),signal_pending(current)); | ||
490 | #endif | ||
491 | |||
492 | /* Pending single-step? */ | 474 | /* Pending single-step? */ |
493 | if (thread_info_flags & _TIF_SINGLESTEP) { | 475 | if (thread_info_flags & _TIF_SINGLESTEP) { |
494 | regs->flags |= X86_EFLAGS_TF; | 476 | regs->flags |= X86_EFLAGS_TF; |
@@ -502,7 +484,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
502 | #endif /* CONFIG_X86_MCE */ | 484 | #endif /* CONFIG_X86_MCE */ |
503 | 485 | ||
504 | /* deal with pending signal delivery */ | 486 | /* deal with pending signal delivery */ |
505 | if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK)) | 487 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
506 | do_signal(regs); | 488 | do_signal(regs); |
507 | 489 | ||
508 | if (thread_info_flags & _TIF_HRTICK_RESCHED) | 490 | if (thread_info_flags & _TIF_HRTICK_RESCHED) |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c new file mode 100644 index 000000000000..8f75893a6467 --- /dev/null +++ b/arch/x86/kernel/smp.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Intel SMP support routines. | ||
3 | * | ||
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | ||
5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | ||
6 | * (c) 2002,2003 Andi Kleen, SuSE Labs. | ||
7 | * | ||
8 | * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> | ||
9 | * | ||
10 | * This code is released under the GNU General Public License version 2 or | ||
11 | * later. | ||
12 | */ | ||
13 | |||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <linux/mm.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/kernel_stat.h> | ||
20 | #include <linux/mc146818rtc.h> | ||
21 | #include <linux/cache.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/cpu.h> | ||
24 | |||
25 | #include <asm/mtrr.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/mmu_context.h> | ||
28 | #include <asm/proto.h> | ||
29 | #include <mach_ipi.h> | ||
30 | #include <mach_apic.h> | ||
31 | /* | ||
32 | * Some notes on x86 processor bugs affecting SMP operation: | ||
33 | * | ||
34 | * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. | ||
35 | * The Linux implications for SMP are handled as follows: | ||
36 | * | ||
37 | * Pentium III / [Xeon] | ||
38 | * None of the E1AP-E3AP errata are visible to the user. | ||
39 | * | ||
40 | * E1AP. see PII A1AP | ||
41 | * E2AP. see PII A2AP | ||
42 | * E3AP. see PII A3AP | ||
43 | * | ||
44 | * Pentium II / [Xeon] | ||
45 | * None of the A1AP-A3AP errata are visible to the user. | ||
46 | * | ||
47 | * A1AP. see PPro 1AP | ||
48 | * A2AP. see PPro 2AP | ||
49 | * A3AP. see PPro 7AP | ||
50 | * | ||
51 | * Pentium Pro | ||
52 | * None of 1AP-9AP errata are visible to the normal user, | ||
53 | * except occasional delivery of 'spurious interrupt' as trap #15. | ||
54 | * This is very rare and a non-problem. | ||
55 | * | ||
56 | * 1AP. Linux maps APIC as non-cacheable | ||
57 | * 2AP. worked around in hardware | ||
58 | * 3AP. fixed in C0 and above steppings microcode update. | ||
59 | * Linux does not use excessive STARTUP_IPIs. | ||
60 | * 4AP. worked around in hardware | ||
61 | * 5AP. symmetric IO mode (normal Linux operation) not affected. | ||
62 | * 'noapic' mode has vector 0xf filled out properly. | ||
63 | * 6AP. 'noapic' mode might be affected - fixed in later steppings | ||
64 | * 7AP. We do not assume writes to the LVT deassering IRQs | ||
65 | * 8AP. We do not enable low power mode (deep sleep) during MP bootup | ||
66 | * 9AP. We do not use mixed mode | ||
67 | * | ||
68 | * Pentium | ||
69 | * There is a marginal case where REP MOVS on 100MHz SMP | ||
70 | * machines with B stepping processors can fail. XXX should provide | ||
71 | * an L1cache=Writethrough or L1cache=off option. | ||
72 | * | ||
73 | * B stepping CPUs may hang. There are hardware work arounds | ||
74 | * for this. We warn about it in case your board doesn't have the work | ||
75 | * arounds. Basically that's so I can tell anyone with a B stepping | ||
76 | * CPU and SMP problems "tough". | ||
77 | * | ||
78 | * Specific items [From Pentium Processor Specification Update] | ||
79 | * | ||
80 | * 1AP. Linux doesn't use remote read | ||
81 | * 2AP. Linux doesn't trust APIC errors | ||
82 | * 3AP. We work around this | ||
83 | * 4AP. Linux never generated 3 interrupts of the same priority | ||
84 | * to cause a lost local interrupt. | ||
85 | * 5AP. Remote read is never used | ||
86 | * 6AP. not affected - worked around in hardware | ||
87 | * 7AP. not affected - worked around in hardware | ||
88 | * 8AP. worked around in hardware - we get explicit CS errors if not | ||
89 | * 9AP. only 'noapic' mode affected. Might generate spurious | ||
90 | * interrupts, we log only the first one and count the | ||
91 | * rest silently. | ||
92 | * 10AP. not affected - worked around in hardware | ||
93 | * 11AP. Linux reads the APIC between writes to avoid this, as per | ||
94 | * the documentation. Make sure you preserve this as it affects | ||
95 | * the C stepping chips too. | ||
96 | * 12AP. not affected - worked around in hardware | ||
97 | * 13AP. not affected - worked around in hardware | ||
98 | * 14AP. we always deassert INIT during bootup | ||
99 | * 15AP. not affected - worked around in hardware | ||
100 | * 16AP. not affected - worked around in hardware | ||
101 | * 17AP. not affected - worked around in hardware | ||
102 | * 18AP. not affected - worked around in hardware | ||
103 | * 19AP. not affected - worked around in BIOS | ||
104 | * | ||
105 | * If this sounds worrying believe me these bugs are either ___RARE___, | ||
106 | * or are signal timing bugs worked around in hardware and there's | ||
107 | * about nothing of note with C stepping upwards. | ||
108 | */ | ||
109 | |||
110 | /* | ||
111 | * this function sends a 'reschedule' IPI to another CPU. | ||
112 | * it goes straight through and wastes no time serializing | ||
113 | * anything. Worst case is that we lose a reschedule ... | ||
114 | */ | ||
115 | static void native_smp_send_reschedule(int cpu) | ||
116 | { | ||
117 | if (unlikely(cpu_is_offline(cpu))) { | ||
118 | WARN_ON(1); | ||
119 | return; | ||
120 | } | ||
121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Structure and data for smp_call_function(). This is designed to minimise | ||
126 | * static memory requirements. It also looks cleaner. | ||
127 | */ | ||
128 | static DEFINE_SPINLOCK(call_lock); | ||
129 | |||
130 | struct call_data_struct { | ||
131 | void (*func) (void *info); | ||
132 | void *info; | ||
133 | atomic_t started; | ||
134 | atomic_t finished; | ||
135 | int wait; | ||
136 | }; | ||
137 | |||
138 | void lock_ipi_call_lock(void) | ||
139 | { | ||
140 | spin_lock_irq(&call_lock); | ||
141 | } | ||
142 | |||
143 | void unlock_ipi_call_lock(void) | ||
144 | { | ||
145 | spin_unlock_irq(&call_lock); | ||
146 | } | ||
147 | |||
148 | static struct call_data_struct *call_data; | ||
149 | |||
150 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
151 | int nonatomic, int wait) | ||
152 | { | ||
153 | struct call_data_struct data; | ||
154 | int cpus = num_online_cpus() - 1; | ||
155 | |||
156 | if (!cpus) | ||
157 | return; | ||
158 | |||
159 | data.func = func; | ||
160 | data.info = info; | ||
161 | atomic_set(&data.started, 0); | ||
162 | data.wait = wait; | ||
163 | if (wait) | ||
164 | atomic_set(&data.finished, 0); | ||
165 | |||
166 | call_data = &data; | ||
167 | mb(); | ||
168 | |||
169 | /* Send a message to all other CPUs and wait for them to respond */ | ||
170 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
171 | |||
172 | /* Wait for response */ | ||
173 | while (atomic_read(&data.started) != cpus) | ||
174 | cpu_relax(); | ||
175 | |||
176 | if (wait) | ||
177 | while (atomic_read(&data.finished) != cpus) | ||
178 | cpu_relax(); | ||
179 | } | ||
180 | |||
181 | |||
182 | /** | ||
183 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
184 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
185 | * @func: The function to run. This must be fast and non-blocking. | ||
186 | * @info: An arbitrary pointer to pass to the function. | ||
187 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
188 | * | ||
189 | * Returns 0 on success, else a negative status code. | ||
190 | * | ||
191 | * If @wait is true, then returns once @func has returned; otherwise | ||
192 | * it returns just before the target cpu calls @func. | ||
193 | * | ||
194 | * You must not call this function with disabled interrupts or from a | ||
195 | * hardware interrupt handler or from a bottom half handler. | ||
196 | */ | ||
197 | static int | ||
198 | native_smp_call_function_mask(cpumask_t mask, | ||
199 | void (*func)(void *), void *info, | ||
200 | int wait) | ||
201 | { | ||
202 | struct call_data_struct data; | ||
203 | cpumask_t allbutself; | ||
204 | int cpus; | ||
205 | |||
206 | /* Can deadlock when called with interrupts disabled */ | ||
207 | WARN_ON(irqs_disabled()); | ||
208 | |||
209 | /* Holding any lock stops cpus from going down. */ | ||
210 | spin_lock(&call_lock); | ||
211 | |||
212 | allbutself = cpu_online_map; | ||
213 | cpu_clear(smp_processor_id(), allbutself); | ||
214 | |||
215 | cpus_and(mask, mask, allbutself); | ||
216 | cpus = cpus_weight(mask); | ||
217 | |||
218 | if (!cpus) { | ||
219 | spin_unlock(&call_lock); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | data.func = func; | ||
224 | data.info = info; | ||
225 | atomic_set(&data.started, 0); | ||
226 | data.wait = wait; | ||
227 | if (wait) | ||
228 | atomic_set(&data.finished, 0); | ||
229 | |||
230 | call_data = &data; | ||
231 | wmb(); | ||
232 | |||
233 | /* Send a message to other CPUs */ | ||
234 | if (cpus_equal(mask, allbutself)) | ||
235 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
236 | else | ||
237 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
238 | |||
239 | /* Wait for response */ | ||
240 | while (atomic_read(&data.started) != cpus) | ||
241 | cpu_relax(); | ||
242 | |||
243 | if (wait) | ||
244 | while (atomic_read(&data.finished) != cpus) | ||
245 | cpu_relax(); | ||
246 | spin_unlock(&call_lock); | ||
247 | |||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static void stop_this_cpu(void *dummy) | ||
252 | { | ||
253 | local_irq_disable(); | ||
254 | /* | ||
255 | * Remove this CPU: | ||
256 | */ | ||
257 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
258 | disable_local_APIC(); | ||
259 | if (hlt_works(smp_processor_id())) | ||
260 | for (;;) halt(); | ||
261 | for (;;); | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * this function calls the 'stop' function on all other CPUs in the system. | ||
266 | */ | ||
267 | |||
268 | static void native_smp_send_stop(void) | ||
269 | { | ||
270 | int nolock; | ||
271 | unsigned long flags; | ||
272 | |||
273 | if (reboot_force) | ||
274 | return; | ||
275 | |||
276 | /* Don't deadlock on the call lock in panic */ | ||
277 | nolock = !spin_trylock(&call_lock); | ||
278 | local_irq_save(flags); | ||
279 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
280 | if (!nolock) | ||
281 | spin_unlock(&call_lock); | ||
282 | disable_local_APIC(); | ||
283 | local_irq_restore(flags); | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * Reschedule call back. Nothing to do, | ||
288 | * all the work is done automatically when | ||
289 | * we return from the interrupt. | ||
290 | */ | ||
291 | void smp_reschedule_interrupt(struct pt_regs *regs) | ||
292 | { | ||
293 | ack_APIC_irq(); | ||
294 | #ifdef CONFIG_X86_32 | ||
295 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
296 | #else | ||
297 | add_pda(irq_resched_count, 1); | ||
298 | #endif | ||
299 | } | ||
300 | |||
301 | void smp_call_function_interrupt(struct pt_regs *regs) | ||
302 | { | ||
303 | void (*func) (void *info) = call_data->func; | ||
304 | void *info = call_data->info; | ||
305 | int wait = call_data->wait; | ||
306 | |||
307 | ack_APIC_irq(); | ||
308 | /* | ||
309 | * Notify initiating CPU that I've grabbed the data and am | ||
310 | * about to execute the function | ||
311 | */ | ||
312 | mb(); | ||
313 | atomic_inc(&call_data->started); | ||
314 | /* | ||
315 | * At this point the info structure may be out of scope unless wait==1 | ||
316 | */ | ||
317 | irq_enter(); | ||
318 | (*func)(info); | ||
319 | #ifdef CONFIG_X86_32 | ||
320 | __get_cpu_var(irq_stat).irq_call_count++; | ||
321 | #else | ||
322 | add_pda(irq_call_count, 1); | ||
323 | #endif | ||
324 | irq_exit(); | ||
325 | |||
326 | if (wait) { | ||
327 | mb(); | ||
328 | atomic_inc(&call_data->finished); | ||
329 | } | ||
330 | } | ||
331 | |||
332 | struct smp_ops smp_ops = { | ||
333 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | ||
334 | .smp_prepare_cpus = native_smp_prepare_cpus, | ||
335 | .cpu_up = native_cpu_up, | ||
336 | .smp_cpus_done = native_smp_cpus_done, | ||
337 | |||
338 | .smp_send_stop = native_smp_send_stop, | ||
339 | .smp_send_reschedule = native_smp_send_reschedule, | ||
340 | .smp_call_function_mask = native_smp_call_function_mask, | ||
341 | }; | ||
342 | EXPORT_SYMBOL_GPL(smp_ops); | ||
343 | |||
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c deleted file mode 100644 index dc0cde9d16fb..000000000000 --- a/arch/x86/kernel/smp_32.c +++ /dev/null | |||
@@ -1,712 +0,0 @@ | |||
1 | /* | ||
2 | * Intel SMP support routines. | ||
3 | * | ||
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | ||
5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | ||
6 | * | ||
7 | * This code is released under the GNU General Public License version 2 or | ||
8 | * later. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | |||
13 | #include <linux/mm.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/kernel_stat.h> | ||
17 | #include <linux/mc146818rtc.h> | ||
18 | #include <linux/cache.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/module.h> | ||
22 | |||
23 | #include <asm/mtrr.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | #include <mach_apic.h> | ||
27 | |||
28 | /* | ||
29 | * Some notes on x86 processor bugs affecting SMP operation: | ||
30 | * | ||
31 | * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. | ||
32 | * The Linux implications for SMP are handled as follows: | ||
33 | * | ||
34 | * Pentium III / [Xeon] | ||
35 | * None of the E1AP-E3AP errata are visible to the user. | ||
36 | * | ||
37 | * E1AP. see PII A1AP | ||
38 | * E2AP. see PII A2AP | ||
39 | * E3AP. see PII A3AP | ||
40 | * | ||
41 | * Pentium II / [Xeon] | ||
42 | * None of the A1AP-A3AP errata are visible to the user. | ||
43 | * | ||
44 | * A1AP. see PPro 1AP | ||
45 | * A2AP. see PPro 2AP | ||
46 | * A3AP. see PPro 7AP | ||
47 | * | ||
48 | * Pentium Pro | ||
49 | * None of 1AP-9AP errata are visible to the normal user, | ||
50 | * except occasional delivery of 'spurious interrupt' as trap #15. | ||
51 | * This is very rare and a non-problem. | ||
52 | * | ||
53 | * 1AP. Linux maps APIC as non-cacheable | ||
54 | * 2AP. worked around in hardware | ||
55 | * 3AP. fixed in C0 and above steppings microcode update. | ||
56 | * Linux does not use excessive STARTUP_IPIs. | ||
57 | * 4AP. worked around in hardware | ||
58 | * 5AP. symmetric IO mode (normal Linux operation) not affected. | ||
59 | * 'noapic' mode has vector 0xf filled out properly. | ||
60 | * 6AP. 'noapic' mode might be affected - fixed in later steppings | ||
61 | * 7AP. We do not assume writes to the LVT deassering IRQs | ||
62 | * 8AP. We do not enable low power mode (deep sleep) during MP bootup | ||
63 | * 9AP. We do not use mixed mode | ||
64 | * | ||
65 | * Pentium | ||
66 | * There is a marginal case where REP MOVS on 100MHz SMP | ||
67 | * machines with B stepping processors can fail. XXX should provide | ||
68 | * an L1cache=Writethrough or L1cache=off option. | ||
69 | * | ||
70 | * B stepping CPUs may hang. There are hardware work arounds | ||
71 | * for this. We warn about it in case your board doesn't have the work | ||
72 | * arounds. Basically that's so I can tell anyone with a B stepping | ||
73 | * CPU and SMP problems "tough". | ||
74 | * | ||
75 | * Specific items [From Pentium Processor Specification Update] | ||
76 | * | ||
77 | * 1AP. Linux doesn't use remote read | ||
78 | * 2AP. Linux doesn't trust APIC errors | ||
79 | * 3AP. We work around this | ||
80 | * 4AP. Linux never generated 3 interrupts of the same priority | ||
81 | * to cause a lost local interrupt. | ||
82 | * 5AP. Remote read is never used | ||
83 | * 6AP. not affected - worked around in hardware | ||
84 | * 7AP. not affected - worked around in hardware | ||
85 | * 8AP. worked around in hardware - we get explicit CS errors if not | ||
86 | * 9AP. only 'noapic' mode affected. Might generate spurious | ||
87 | * interrupts, we log only the first one and count the | ||
88 | * rest silently. | ||
89 | * 10AP. not affected - worked around in hardware | ||
90 | * 11AP. Linux reads the APIC between writes to avoid this, as per | ||
91 | * the documentation. Make sure you preserve this as it affects | ||
92 | * the C stepping chips too. | ||
93 | * 12AP. not affected - worked around in hardware | ||
94 | * 13AP. not affected - worked around in hardware | ||
95 | * 14AP. we always deassert INIT during bootup | ||
96 | * 15AP. not affected - worked around in hardware | ||
97 | * 16AP. not affected - worked around in hardware | ||
98 | * 17AP. not affected - worked around in hardware | ||
99 | * 18AP. not affected - worked around in hardware | ||
100 | * 19AP. not affected - worked around in BIOS | ||
101 | * | ||
102 | * If this sounds worrying believe me these bugs are either ___RARE___, | ||
103 | * or are signal timing bugs worked around in hardware and there's | ||
104 | * about nothing of note with C stepping upwards. | ||
105 | */ | ||
106 | |||
107 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; | ||
108 | |||
109 | /* | ||
110 | * the following functions deal with sending IPIs between CPUs. | ||
111 | * | ||
112 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. | ||
113 | */ | ||
114 | |||
115 | static inline int __prepare_ICR (unsigned int shortcut, int vector) | ||
116 | { | ||
117 | unsigned int icr = shortcut | APIC_DEST_LOGICAL; | ||
118 | |||
119 | switch (vector) { | ||
120 | default: | ||
121 | icr |= APIC_DM_FIXED | vector; | ||
122 | break; | ||
123 | case NMI_VECTOR: | ||
124 | icr |= APIC_DM_NMI; | ||
125 | break; | ||
126 | } | ||
127 | return icr; | ||
128 | } | ||
129 | |||
130 | static inline int __prepare_ICR2 (unsigned int mask) | ||
131 | { | ||
132 | return SET_APIC_DEST_FIELD(mask); | ||
133 | } | ||
134 | |||
135 | void __send_IPI_shortcut(unsigned int shortcut, int vector) | ||
136 | { | ||
137 | /* | ||
138 | * Subtle. In the case of the 'never do double writes' workaround | ||
139 | * we have to lock out interrupts to be safe. As we don't care | ||
140 | * of the value read we use an atomic rmw access to avoid costly | ||
141 | * cli/sti. Otherwise we use an even cheaper single atomic write | ||
142 | * to the APIC. | ||
143 | */ | ||
144 | unsigned int cfg; | ||
145 | |||
146 | /* | ||
147 | * Wait for idle. | ||
148 | */ | ||
149 | apic_wait_icr_idle(); | ||
150 | |||
151 | /* | ||
152 | * No need to touch the target chip field | ||
153 | */ | ||
154 | cfg = __prepare_ICR(shortcut, vector); | ||
155 | |||
156 | /* | ||
157 | * Send the IPI. The write to APIC_ICR fires this off. | ||
158 | */ | ||
159 | apic_write_around(APIC_ICR, cfg); | ||
160 | } | ||
161 | |||
162 | void send_IPI_self(int vector) | ||
163 | { | ||
164 | __send_IPI_shortcut(APIC_DEST_SELF, vector); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * This is used to send an IPI with no shorthand notation (the destination is | ||
169 | * specified in bits 56 to 63 of the ICR). | ||
170 | */ | ||
171 | static inline void __send_IPI_dest_field(unsigned long mask, int vector) | ||
172 | { | ||
173 | unsigned long cfg; | ||
174 | |||
175 | /* | ||
176 | * Wait for idle. | ||
177 | */ | ||
178 | if (unlikely(vector == NMI_VECTOR)) | ||
179 | safe_apic_wait_icr_idle(); | ||
180 | else | ||
181 | apic_wait_icr_idle(); | ||
182 | |||
183 | /* | ||
184 | * prepare target chip field | ||
185 | */ | ||
186 | cfg = __prepare_ICR2(mask); | ||
187 | apic_write_around(APIC_ICR2, cfg); | ||
188 | |||
189 | /* | ||
190 | * program the ICR | ||
191 | */ | ||
192 | cfg = __prepare_ICR(0, vector); | ||
193 | |||
194 | /* | ||
195 | * Send the IPI. The write to APIC_ICR fires this off. | ||
196 | */ | ||
197 | apic_write_around(APIC_ICR, cfg); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * This is only used on smaller machines. | ||
202 | */ | ||
203 | void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | ||
204 | { | ||
205 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
206 | unsigned long flags; | ||
207 | |||
208 | local_irq_save(flags); | ||
209 | WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); | ||
210 | __send_IPI_dest_field(mask, vector); | ||
211 | local_irq_restore(flags); | ||
212 | } | ||
213 | |||
214 | void send_IPI_mask_sequence(cpumask_t mask, int vector) | ||
215 | { | ||
216 | unsigned long flags; | ||
217 | unsigned int query_cpu; | ||
218 | |||
219 | /* | ||
220 | * Hack. The clustered APIC addressing mode doesn't allow us to send | ||
221 | * to an arbitrary mask, so I do a unicasts to each CPU instead. This | ||
222 | * should be modified to do 1 message per cluster ID - mbligh | ||
223 | */ | ||
224 | |||
225 | local_irq_save(flags); | ||
226 | for_each_possible_cpu(query_cpu) { | ||
227 | if (cpu_isset(query_cpu, mask)) { | ||
228 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | ||
229 | vector); | ||
230 | } | ||
231 | } | ||
232 | local_irq_restore(flags); | ||
233 | } | ||
234 | |||
235 | #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */ | ||
236 | |||
237 | /* | ||
238 | * Smarter SMP flushing macros. | ||
239 | * c/o Linus Torvalds. | ||
240 | * | ||
241 | * These mean you can really definitely utterly forget about | ||
242 | * writing to user space from interrupts. (Its not allowed anyway). | ||
243 | * | ||
244 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | ||
245 | */ | ||
246 | |||
247 | static cpumask_t flush_cpumask; | ||
248 | static struct mm_struct * flush_mm; | ||
249 | static unsigned long flush_va; | ||
250 | static DEFINE_SPINLOCK(tlbstate_lock); | ||
251 | |||
252 | /* | ||
253 | * We cannot call mmdrop() because we are in interrupt context, | ||
254 | * instead update mm->cpu_vm_mask. | ||
255 | * | ||
256 | * We need to reload %cr3 since the page tables may be going | ||
257 | * away from under us.. | ||
258 | */ | ||
259 | void leave_mm(int cpu) | ||
260 | { | ||
261 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | ||
262 | BUG(); | ||
263 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | ||
264 | load_cr3(swapper_pg_dir); | ||
265 | } | ||
266 | EXPORT_SYMBOL_GPL(leave_mm); | ||
267 | |||
268 | /* | ||
269 | * | ||
270 | * The flush IPI assumes that a thread switch happens in this order: | ||
271 | * [cpu0: the cpu that switches] | ||
272 | * 1) switch_mm() either 1a) or 1b) | ||
273 | * 1a) thread switch to a different mm | ||
274 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | ||
275 | * Stop ipi delivery for the old mm. This is not synchronized with | ||
276 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | ||
277 | * for the wrong mm, and in the worst case we perform a superfluous | ||
278 | * tlb flush. | ||
279 | * 1a2) set cpu_tlbstate to TLBSTATE_OK | ||
280 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | ||
281 | * was in lazy tlb mode. | ||
282 | * 1a3) update cpu_tlbstate[].active_mm | ||
283 | * Now cpu0 accepts tlb flushes for the new mm. | ||
284 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | ||
285 | * Now the other cpus will send tlb flush ipis. | ||
286 | * 1a4) change cr3. | ||
287 | * 1b) thread switch without mm change | ||
288 | * cpu_tlbstate[].active_mm is correct, cpu0 already handles | ||
289 | * flush ipis. | ||
290 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | ||
291 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | ||
292 | * Atomically set the bit [other cpus will start sending flush ipis], | ||
293 | * and test the bit. | ||
294 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | ||
295 | * 2) switch %%esp, ie current | ||
296 | * | ||
297 | * The interrupt must handle 2 special cases: | ||
298 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | ||
299 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | ||
300 | * runs in kernel space, the cpu could load tlb entries for user space | ||
301 | * pages. | ||
302 | * | ||
303 | * The good news is that cpu_tlbstate is local to each cpu, no | ||
304 | * write/read ordering problems. | ||
305 | */ | ||
306 | |||
307 | /* | ||
308 | * TLB flush IPI: | ||
309 | * | ||
310 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | ||
311 | * 2) Leave the mm if we are in the lazy tlb mode. | ||
312 | */ | ||
313 | |||
314 | void smp_invalidate_interrupt(struct pt_regs *regs) | ||
315 | { | ||
316 | unsigned long cpu; | ||
317 | |||
318 | cpu = get_cpu(); | ||
319 | |||
320 | if (!cpu_isset(cpu, flush_cpumask)) | ||
321 | goto out; | ||
322 | /* | ||
323 | * This was a BUG() but until someone can quote me the | ||
324 | * line from the intel manual that guarantees an IPI to | ||
325 | * multiple CPUs is retried _only_ on the erroring CPUs | ||
326 | * its staying as a return | ||
327 | * | ||
328 | * BUG(); | ||
329 | */ | ||
330 | |||
331 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | ||
332 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | ||
333 | if (flush_va == TLB_FLUSH_ALL) | ||
334 | local_flush_tlb(); | ||
335 | else | ||
336 | __flush_tlb_one(flush_va); | ||
337 | } else | ||
338 | leave_mm(cpu); | ||
339 | } | ||
340 | ack_APIC_irq(); | ||
341 | smp_mb__before_clear_bit(); | ||
342 | cpu_clear(cpu, flush_cpumask); | ||
343 | smp_mb__after_clear_bit(); | ||
344 | out: | ||
345 | put_cpu_no_resched(); | ||
346 | __get_cpu_var(irq_stat).irq_tlb_count++; | ||
347 | } | ||
348 | |||
349 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | ||
350 | unsigned long va) | ||
351 | { | ||
352 | cpumask_t cpumask = *cpumaskp; | ||
353 | |||
354 | /* | ||
355 | * A couple of (to be removed) sanity checks: | ||
356 | * | ||
357 | * - current CPU must not be in mask | ||
358 | * - mask must exist :) | ||
359 | */ | ||
360 | BUG_ON(cpus_empty(cpumask)); | ||
361 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | ||
362 | BUG_ON(!mm); | ||
363 | |||
364 | #ifdef CONFIG_HOTPLUG_CPU | ||
365 | /* If a CPU which we ran on has gone down, OK. */ | ||
366 | cpus_and(cpumask, cpumask, cpu_online_map); | ||
367 | if (unlikely(cpus_empty(cpumask))) | ||
368 | return; | ||
369 | #endif | ||
370 | |||
371 | /* | ||
372 | * i'm not happy about this global shared spinlock in the | ||
373 | * MM hot path, but we'll see how contended it is. | ||
374 | * AK: x86-64 has a faster method that could be ported. | ||
375 | */ | ||
376 | spin_lock(&tlbstate_lock); | ||
377 | |||
378 | flush_mm = mm; | ||
379 | flush_va = va; | ||
380 | cpus_or(flush_cpumask, cpumask, flush_cpumask); | ||
381 | /* | ||
382 | * We have to send the IPI only to | ||
383 | * CPUs affected. | ||
384 | */ | ||
385 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); | ||
386 | |||
387 | while (!cpus_empty(flush_cpumask)) | ||
388 | /* nothing. lockup detection does not belong here */ | ||
389 | cpu_relax(); | ||
390 | |||
391 | flush_mm = NULL; | ||
392 | flush_va = 0; | ||
393 | spin_unlock(&tlbstate_lock); | ||
394 | } | ||
395 | |||
396 | void flush_tlb_current_task(void) | ||
397 | { | ||
398 | struct mm_struct *mm = current->mm; | ||
399 | cpumask_t cpu_mask; | ||
400 | |||
401 | preempt_disable(); | ||
402 | cpu_mask = mm->cpu_vm_mask; | ||
403 | cpu_clear(smp_processor_id(), cpu_mask); | ||
404 | |||
405 | local_flush_tlb(); | ||
406 | if (!cpus_empty(cpu_mask)) | ||
407 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | ||
408 | preempt_enable(); | ||
409 | } | ||
410 | |||
411 | void flush_tlb_mm (struct mm_struct * mm) | ||
412 | { | ||
413 | cpumask_t cpu_mask; | ||
414 | |||
415 | preempt_disable(); | ||
416 | cpu_mask = mm->cpu_vm_mask; | ||
417 | cpu_clear(smp_processor_id(), cpu_mask); | ||
418 | |||
419 | if (current->active_mm == mm) { | ||
420 | if (current->mm) | ||
421 | local_flush_tlb(); | ||
422 | else | ||
423 | leave_mm(smp_processor_id()); | ||
424 | } | ||
425 | if (!cpus_empty(cpu_mask)) | ||
426 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | ||
427 | |||
428 | preempt_enable(); | ||
429 | } | ||
430 | |||
431 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | ||
432 | { | ||
433 | struct mm_struct *mm = vma->vm_mm; | ||
434 | cpumask_t cpu_mask; | ||
435 | |||
436 | preempt_disable(); | ||
437 | cpu_mask = mm->cpu_vm_mask; | ||
438 | cpu_clear(smp_processor_id(), cpu_mask); | ||
439 | |||
440 | if (current->active_mm == mm) { | ||
441 | if(current->mm) | ||
442 | __flush_tlb_one(va); | ||
443 | else | ||
444 | leave_mm(smp_processor_id()); | ||
445 | } | ||
446 | |||
447 | if (!cpus_empty(cpu_mask)) | ||
448 | flush_tlb_others(cpu_mask, mm, va); | ||
449 | |||
450 | preempt_enable(); | ||
451 | } | ||
452 | EXPORT_SYMBOL(flush_tlb_page); | ||
453 | |||
454 | static void do_flush_tlb_all(void* info) | ||
455 | { | ||
456 | unsigned long cpu = smp_processor_id(); | ||
457 | |||
458 | __flush_tlb_all(); | ||
459 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | ||
460 | leave_mm(cpu); | ||
461 | } | ||
462 | |||
463 | void flush_tlb_all(void) | ||
464 | { | ||
465 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * this function sends a 'reschedule' IPI to another CPU. | ||
470 | * it goes straight through and wastes no time serializing | ||
471 | * anything. Worst case is that we lose a reschedule ... | ||
472 | */ | ||
473 | static void native_smp_send_reschedule(int cpu) | ||
474 | { | ||
475 | WARN_ON(cpu_is_offline(cpu)); | ||
476 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Structure and data for smp_call_function(). This is designed to minimise | ||
481 | * static memory requirements. It also looks cleaner. | ||
482 | */ | ||
483 | static DEFINE_SPINLOCK(call_lock); | ||
484 | |||
485 | struct call_data_struct { | ||
486 | void (*func) (void *info); | ||
487 | void *info; | ||
488 | atomic_t started; | ||
489 | atomic_t finished; | ||
490 | int wait; | ||
491 | }; | ||
492 | |||
493 | void lock_ipi_call_lock(void) | ||
494 | { | ||
495 | spin_lock_irq(&call_lock); | ||
496 | } | ||
497 | |||
498 | void unlock_ipi_call_lock(void) | ||
499 | { | ||
500 | spin_unlock_irq(&call_lock); | ||
501 | } | ||
502 | |||
503 | static struct call_data_struct *call_data; | ||
504 | |||
505 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
506 | int nonatomic, int wait) | ||
507 | { | ||
508 | struct call_data_struct data; | ||
509 | int cpus = num_online_cpus() - 1; | ||
510 | |||
511 | if (!cpus) | ||
512 | return; | ||
513 | |||
514 | data.func = func; | ||
515 | data.info = info; | ||
516 | atomic_set(&data.started, 0); | ||
517 | data.wait = wait; | ||
518 | if (wait) | ||
519 | atomic_set(&data.finished, 0); | ||
520 | |||
521 | call_data = &data; | ||
522 | mb(); | ||
523 | |||
524 | /* Send a message to all other CPUs and wait for them to respond */ | ||
525 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
526 | |||
527 | /* Wait for response */ | ||
528 | while (atomic_read(&data.started) != cpus) | ||
529 | cpu_relax(); | ||
530 | |||
531 | if (wait) | ||
532 | while (atomic_read(&data.finished) != cpus) | ||
533 | cpu_relax(); | ||
534 | } | ||
535 | |||
536 | |||
537 | /** | ||
538 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
539 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
540 | * @func: The function to run. This must be fast and non-blocking. | ||
541 | * @info: An arbitrary pointer to pass to the function. | ||
542 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
543 | * | ||
544 | * Returns 0 on success, else a negative status code. | ||
545 | * | ||
546 | * If @wait is true, then returns once @func has returned; otherwise | ||
547 | * it returns just before the target cpu calls @func. | ||
548 | * | ||
549 | * You must not call this function with disabled interrupts or from a | ||
550 | * hardware interrupt handler or from a bottom half handler. | ||
551 | */ | ||
552 | static int | ||
553 | native_smp_call_function_mask(cpumask_t mask, | ||
554 | void (*func)(void *), void *info, | ||
555 | int wait) | ||
556 | { | ||
557 | struct call_data_struct data; | ||
558 | cpumask_t allbutself; | ||
559 | int cpus; | ||
560 | |||
561 | /* Can deadlock when called with interrupts disabled */ | ||
562 | WARN_ON(irqs_disabled()); | ||
563 | |||
564 | /* Holding any lock stops cpus from going down. */ | ||
565 | spin_lock(&call_lock); | ||
566 | |||
567 | allbutself = cpu_online_map; | ||
568 | cpu_clear(smp_processor_id(), allbutself); | ||
569 | |||
570 | cpus_and(mask, mask, allbutself); | ||
571 | cpus = cpus_weight(mask); | ||
572 | |||
573 | if (!cpus) { | ||
574 | spin_unlock(&call_lock); | ||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | data.func = func; | ||
579 | data.info = info; | ||
580 | atomic_set(&data.started, 0); | ||
581 | data.wait = wait; | ||
582 | if (wait) | ||
583 | atomic_set(&data.finished, 0); | ||
584 | |||
585 | call_data = &data; | ||
586 | mb(); | ||
587 | |||
588 | /* Send a message to other CPUs */ | ||
589 | if (cpus_equal(mask, allbutself)) | ||
590 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
591 | else | ||
592 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
593 | |||
594 | /* Wait for response */ | ||
595 | while (atomic_read(&data.started) != cpus) | ||
596 | cpu_relax(); | ||
597 | |||
598 | if (wait) | ||
599 | while (atomic_read(&data.finished) != cpus) | ||
600 | cpu_relax(); | ||
601 | spin_unlock(&call_lock); | ||
602 | |||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | static void stop_this_cpu (void * dummy) | ||
607 | { | ||
608 | local_irq_disable(); | ||
609 | /* | ||
610 | * Remove this CPU: | ||
611 | */ | ||
612 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
613 | disable_local_APIC(); | ||
614 | if (cpu_data(smp_processor_id()).hlt_works_ok) | ||
615 | for(;;) halt(); | ||
616 | for (;;); | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * this function calls the 'stop' function on all other CPUs in the system. | ||
621 | */ | ||
622 | |||
623 | static void native_smp_send_stop(void) | ||
624 | { | ||
625 | /* Don't deadlock on the call lock in panic */ | ||
626 | int nolock = !spin_trylock(&call_lock); | ||
627 | unsigned long flags; | ||
628 | |||
629 | local_irq_save(flags); | ||
630 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
631 | if (!nolock) | ||
632 | spin_unlock(&call_lock); | ||
633 | disable_local_APIC(); | ||
634 | local_irq_restore(flags); | ||
635 | } | ||
636 | |||
637 | /* | ||
638 | * Reschedule call back. Nothing to do, | ||
639 | * all the work is done automatically when | ||
640 | * we return from the interrupt. | ||
641 | */ | ||
642 | void smp_reschedule_interrupt(struct pt_regs *regs) | ||
643 | { | ||
644 | ack_APIC_irq(); | ||
645 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
646 | } | ||
647 | |||
648 | void smp_call_function_interrupt(struct pt_regs *regs) | ||
649 | { | ||
650 | void (*func) (void *info) = call_data->func; | ||
651 | void *info = call_data->info; | ||
652 | int wait = call_data->wait; | ||
653 | |||
654 | ack_APIC_irq(); | ||
655 | /* | ||
656 | * Notify initiating CPU that I've grabbed the data and am | ||
657 | * about to execute the function | ||
658 | */ | ||
659 | mb(); | ||
660 | atomic_inc(&call_data->started); | ||
661 | /* | ||
662 | * At this point the info structure may be out of scope unless wait==1 | ||
663 | */ | ||
664 | irq_enter(); | ||
665 | (*func)(info); | ||
666 | __get_cpu_var(irq_stat).irq_call_count++; | ||
667 | irq_exit(); | ||
668 | |||
669 | if (wait) { | ||
670 | mb(); | ||
671 | atomic_inc(&call_data->finished); | ||
672 | } | ||
673 | } | ||
674 | |||
675 | static int convert_apicid_to_cpu(int apic_id) | ||
676 | { | ||
677 | int i; | ||
678 | |||
679 | for_each_possible_cpu(i) { | ||
680 | if (per_cpu(x86_cpu_to_apicid, i) == apic_id) | ||
681 | return i; | ||
682 | } | ||
683 | return -1; | ||
684 | } | ||
685 | |||
686 | int safe_smp_processor_id(void) | ||
687 | { | ||
688 | int apicid, cpuid; | ||
689 | |||
690 | if (!boot_cpu_has(X86_FEATURE_APIC)) | ||
691 | return 0; | ||
692 | |||
693 | apicid = hard_smp_processor_id(); | ||
694 | if (apicid == BAD_APICID) | ||
695 | return 0; | ||
696 | |||
697 | cpuid = convert_apicid_to_cpu(apicid); | ||
698 | |||
699 | return cpuid >= 0 ? cpuid : 0; | ||
700 | } | ||
701 | |||
702 | struct smp_ops smp_ops = { | ||
703 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | ||
704 | .smp_prepare_cpus = native_smp_prepare_cpus, | ||
705 | .cpu_up = native_cpu_up, | ||
706 | .smp_cpus_done = native_smp_cpus_done, | ||
707 | |||
708 | .smp_send_stop = native_smp_send_stop, | ||
709 | .smp_send_reschedule = native_smp_send_reschedule, | ||
710 | .smp_call_function_mask = native_smp_call_function_mask, | ||
711 | }; | ||
712 | EXPORT_SYMBOL_GPL(smp_ops); | ||
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot.c index 579b9b740c7c..e6abe8a49b1f 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | 4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> |
5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | 5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> |
6 | * Copyright 2001 Andi Kleen, SuSE Labs. | ||
6 | * | 7 | * |
7 | * Much of the core SMP work is based on previous work by Thomas Radke, to | 8 | * Much of the core SMP work is based on previous work by Thomas Radke, to |
8 | * whom a great many thanks are extended. | 9 | * whom a great many thanks are extended. |
@@ -29,53 +30,90 @@ | |||
29 | * Ingo Molnar : various cleanups and rewrites | 30 | * Ingo Molnar : various cleanups and rewrites |
30 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | 31 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. |
31 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | 32 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs |
33 | * Andi Kleen : Changed for SMP boot into long mode. | ||
32 | * Martin J. Bligh : Added support for multi-quad systems | 34 | * Martin J. Bligh : Added support for multi-quad systems |
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. | 35 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ | 36 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. |
37 | * Andi Kleen : Converted to new state machine. | ||
38 | * Ashok Raj : CPU hotplug support | ||
39 | * Glauber Costa : i386 and x86_64 integration | ||
40 | */ | ||
35 | 41 | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/init.h> | 42 | #include <linux/init.h> |
38 | #include <linux/kernel.h> | 43 | #include <linux/smp.h> |
39 | 44 | #include <linux/module.h> | |
40 | #include <linux/mm.h> | ||
41 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
42 | #include <linux/kernel_stat.h> | ||
43 | #include <linux/bootmem.h> | ||
44 | #include <linux/notifier.h> | ||
45 | #include <linux/cpu.h> | ||
46 | #include <linux/percpu.h> | 46 | #include <linux/percpu.h> |
47 | #include <linux/bootmem.h> | ||
48 | #include <linux/err.h> | ||
47 | #include <linux/nmi.h> | 49 | #include <linux/nmi.h> |
48 | 50 | ||
49 | #include <linux/delay.h> | 51 | #include <asm/acpi.h> |
50 | #include <linux/mc146818rtc.h> | ||
51 | #include <asm/tlbflush.h> | ||
52 | #include <asm/desc.h> | 52 | #include <asm/desc.h> |
53 | #include <asm/arch_hooks.h> | ||
54 | #include <asm/nmi.h> | 53 | #include <asm/nmi.h> |
54 | #include <asm/irq.h> | ||
55 | #include <asm/smp.h> | ||
56 | #include <asm/trampoline.h> | ||
57 | #include <asm/cpu.h> | ||
58 | #include <asm/numa.h> | ||
59 | #include <asm/pgtable.h> | ||
60 | #include <asm/tlbflush.h> | ||
61 | #include <asm/mtrr.h> | ||
62 | #include <asm/nmi.h> | ||
63 | #include <asm/vmi.h> | ||
64 | #include <linux/mc146818rtc.h> | ||
55 | 65 | ||
56 | #include <mach_apic.h> | 66 | #include <mach_apic.h> |
57 | #include <mach_wakecpu.h> | 67 | #include <mach_wakecpu.h> |
58 | #include <smpboot_hooks.h> | 68 | #include <smpboot_hooks.h> |
59 | #include <asm/vmi.h> | ||
60 | #include <asm/mtrr.h> | ||
61 | 69 | ||
62 | /* Set if we find a B stepping CPU */ | 70 | /* |
63 | static int __cpuinitdata smp_b_stepping; | 71 | * FIXME: For x86_64, those are defined in other files. But moving them here, |
72 | * would make the setup areas dependent on smp, which is a loss. When we | ||
73 | * integrate apic between arches, we can probably do a better job, but | ||
74 | * right now, they'll stay here -- glommer | ||
75 | */ | ||
76 | |||
77 | /* which logical CPU number maps to which CPU (physical APIC ID) */ | ||
78 | u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata = | ||
79 | { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
80 | void *x86_cpu_to_apicid_early_ptr; | ||
81 | |||
82 | u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata | ||
83 | = { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
84 | void *x86_bios_cpu_apicid_early_ptr; | ||
85 | |||
86 | #ifdef CONFIG_X86_32 | ||
87 | u8 apicid_2_node[MAX_APICID]; | ||
88 | #endif | ||
89 | |||
90 | /* State of each CPU */ | ||
91 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
92 | |||
93 | /* Store all idle threads, this can be reused instead of creating | ||
94 | * a new thread. Also avoids complicated thread destroy functionality | ||
95 | * for idle threads. | ||
96 | */ | ||
97 | #ifdef CONFIG_HOTPLUG_CPU | ||
98 | /* | ||
99 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
100 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
101 | */ | ||
102 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
103 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
104 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | ||
105 | #else | ||
106 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
107 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
108 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | ||
109 | #endif | ||
64 | 110 | ||
65 | /* Number of siblings per CPU package */ | 111 | /* Number of siblings per CPU package */ |
66 | int smp_num_siblings = 1; | 112 | int smp_num_siblings = 1; |
67 | EXPORT_SYMBOL(smp_num_siblings); | 113 | EXPORT_SYMBOL(smp_num_siblings); |
68 | 114 | ||
69 | /* Last level cache ID of each logical CPU */ | 115 | /* Last level cache ID of each logical CPU */ |
70 | DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID; | 116 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; |
71 | |||
72 | /* representing HT siblings of each logical CPU */ | ||
73 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
74 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | ||
75 | |||
76 | /* representing HT and core siblings of each logical CPU */ | ||
77 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | ||
78 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | ||
79 | 117 | ||
80 | /* bitmap of online cpus */ | 118 | /* bitmap of online cpus */ |
81 | cpumask_t cpu_online_map __read_mostly; | 119 | cpumask_t cpu_online_map __read_mostly; |
@@ -85,126 +123,94 @@ cpumask_t cpu_callin_map; | |||
85 | cpumask_t cpu_callout_map; | 123 | cpumask_t cpu_callout_map; |
86 | cpumask_t cpu_possible_map; | 124 | cpumask_t cpu_possible_map; |
87 | EXPORT_SYMBOL(cpu_possible_map); | 125 | EXPORT_SYMBOL(cpu_possible_map); |
88 | static cpumask_t smp_commenced_mask; | 126 | |
127 | /* representing HT siblings of each logical CPU */ | ||
128 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
129 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | ||
130 | |||
131 | /* representing HT and core siblings of each logical CPU */ | ||
132 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | ||
133 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | ||
89 | 134 | ||
90 | /* Per CPU bogomips and other parameters */ | 135 | /* Per CPU bogomips and other parameters */ |
91 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 136 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
92 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 137 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
93 | 138 | ||
94 | /* which logical CPU number maps to which CPU (physical APIC ID) */ | 139 | static atomic_t init_deasserted; |
95 | u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata = | ||
96 | { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
97 | void *x86_cpu_to_apicid_early_ptr; | ||
98 | DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID; | ||
99 | EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); | ||
100 | |||
101 | u8 apicid_2_node[MAX_APICID]; | ||
102 | 140 | ||
103 | /* | 141 | static int boot_cpu_logical_apicid; |
104 | * Trampoline 80x86 program as an array. | ||
105 | */ | ||
106 | 142 | ||
107 | extern const unsigned char trampoline_data []; | 143 | /* representing cpus for which sibling maps can be computed */ |
108 | extern const unsigned char trampoline_end []; | 144 | static cpumask_t cpu_sibling_setup_map; |
109 | static unsigned char *trampoline_base; | ||
110 | 145 | ||
111 | static void map_cpu_to_logical_apicid(void); | 146 | /* Set if we find a B stepping CPU */ |
147 | int __cpuinitdata smp_b_stepping; | ||
112 | 148 | ||
113 | /* State of each CPU. */ | 149 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
114 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
115 | 150 | ||
116 | /* | 151 | /* which logical CPUs are on which nodes */ |
117 | * Currently trivial. Write the real->protected mode | 152 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = |
118 | * bootstrap into the page concerned. The caller | 153 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; |
119 | * has made sure it's suitably aligned. | 154 | EXPORT_SYMBOL(node_to_cpumask_map); |
120 | */ | 155 | /* which node each logical CPU is on */ |
156 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | ||
157 | EXPORT_SYMBOL(cpu_to_node_map); | ||
121 | 158 | ||
122 | static unsigned long __cpuinit setup_trampoline(void) | 159 | /* set up a mapping between cpu and node. */ |
160 | static void map_cpu_to_node(int cpu, int node) | ||
123 | { | 161 | { |
124 | memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); | 162 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); |
125 | return virt_to_phys(trampoline_base); | 163 | cpu_set(cpu, node_to_cpumask_map[node]); |
164 | cpu_to_node_map[cpu] = node; | ||
126 | } | 165 | } |
127 | 166 | ||
128 | /* | 167 | /* undo a mapping between cpu and node. */ |
129 | * We are called very early to get the low memory for the | 168 | static void unmap_cpu_to_node(int cpu) |
130 | * SMP bootup trampoline page. | ||
131 | */ | ||
132 | void __init smp_alloc_memory(void) | ||
133 | { | 169 | { |
134 | trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); | 170 | int node; |
135 | /* | 171 | |
136 | * Has to be in very low memory so we can execute | 172 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); |
137 | * real-mode AP code. | 173 | for (node = 0; node < MAX_NUMNODES; node++) |
138 | */ | 174 | cpu_clear(cpu, node_to_cpumask_map[node]); |
139 | if (__pa(trampoline_base) >= 0x9F000) | 175 | cpu_to_node_map[cpu] = 0; |
140 | BUG(); | ||
141 | } | 176 | } |
177 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | ||
178 | #define map_cpu_to_node(cpu, node) ({}) | ||
179 | #define unmap_cpu_to_node(cpu) ({}) | ||
180 | #endif | ||
142 | 181 | ||
143 | /* | 182 | #ifdef CONFIG_X86_32 |
144 | * The bootstrap kernel entry code has set these up. Save them for | 183 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = |
145 | * a given CPU | 184 | { [0 ... NR_CPUS-1] = BAD_APICID }; |
146 | */ | ||
147 | 185 | ||
148 | void __cpuinit smp_store_cpu_info(int id) | 186 | void map_cpu_to_logical_apicid(void) |
149 | { | 187 | { |
150 | struct cpuinfo_x86 *c = &cpu_data(id); | 188 | int cpu = smp_processor_id(); |
151 | 189 | int apicid = logical_smp_processor_id(); | |
152 | *c = boot_cpu_data; | 190 | int node = apicid_to_node(apicid); |
153 | c->cpu_index = id; | ||
154 | if (id!=0) | ||
155 | identify_secondary_cpu(c); | ||
156 | /* | ||
157 | * Mask B, Pentium, but not Pentium MMX | ||
158 | */ | ||
159 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
160 | c->x86 == 5 && | ||
161 | c->x86_mask >= 1 && c->x86_mask <= 4 && | ||
162 | c->x86_model <= 3) | ||
163 | /* | ||
164 | * Remember we have B step Pentia with bugs | ||
165 | */ | ||
166 | smp_b_stepping = 1; | ||
167 | |||
168 | /* | ||
169 | * Certain Athlons might work (for various values of 'work') in SMP | ||
170 | * but they are not certified as MP capable. | ||
171 | */ | ||
172 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | ||
173 | |||
174 | if (num_possible_cpus() == 1) | ||
175 | goto valid_k7; | ||
176 | |||
177 | /* Athlon 660/661 is valid. */ | ||
178 | if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1))) | ||
179 | goto valid_k7; | ||
180 | |||
181 | /* Duron 670 is valid */ | ||
182 | if ((c->x86_model==7) && (c->x86_mask==0)) | ||
183 | goto valid_k7; | ||
184 | |||
185 | /* | ||
186 | * Athlon 662, Duron 671, and Athlon >model 7 have capability bit. | ||
187 | * It's worth noting that the A5 stepping (662) of some Athlon XP's | ||
188 | * have the MP bit set. | ||
189 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more. | ||
190 | */ | ||
191 | if (((c->x86_model==6) && (c->x86_mask>=2)) || | ||
192 | ((c->x86_model==7) && (c->x86_mask>=1)) || | ||
193 | (c->x86_model> 7)) | ||
194 | if (cpu_has_mp) | ||
195 | goto valid_k7; | ||
196 | 191 | ||
197 | /* If we get here, it's not a certified SMP capable AMD system. */ | 192 | if (!node_online(node)) |
198 | add_taint(TAINT_UNSAFE_SMP); | 193 | node = first_online_node; |
199 | } | ||
200 | 194 | ||
201 | valid_k7: | 195 | cpu_2_logical_apicid[cpu] = apicid; |
202 | ; | 196 | map_cpu_to_node(cpu, node); |
203 | } | 197 | } |
204 | 198 | ||
205 | static atomic_t init_deasserted; | 199 | void unmap_cpu_to_logical_apicid(int cpu) |
200 | { | ||
201 | cpu_2_logical_apicid[cpu] = BAD_APICID; | ||
202 | unmap_cpu_to_node(cpu); | ||
203 | } | ||
204 | #else | ||
205 | #define unmap_cpu_to_logical_apicid(cpu) do {} while (0) | ||
206 | #define map_cpu_to_logical_apicid() do {} while (0) | ||
207 | #endif | ||
206 | 208 | ||
207 | static void __cpuinit smp_callin(void) | 209 | /* |
210 | * Report back to the Boot Processor. | ||
211 | * Running on AP. | ||
212 | */ | ||
213 | void __cpuinit smp_callin(void) | ||
208 | { | 214 | { |
209 | int cpuid, phys_id; | 215 | int cpuid, phys_id; |
210 | unsigned long timeout; | 216 | unsigned long timeout; |
@@ -220,12 +226,11 @@ static void __cpuinit smp_callin(void) | |||
220 | /* | 226 | /* |
221 | * (This works even if the APIC is not enabled.) | 227 | * (This works even if the APIC is not enabled.) |
222 | */ | 228 | */ |
223 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | 229 | phys_id = GET_APIC_ID(read_apic_id()); |
224 | cpuid = smp_processor_id(); | 230 | cpuid = smp_processor_id(); |
225 | if (cpu_isset(cpuid, cpu_callin_map)) { | 231 | if (cpu_isset(cpuid, cpu_callin_map)) { |
226 | printk("huh, phys CPU#%d, CPU#%d already present??\n", | 232 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, |
227 | phys_id, cpuid); | 233 | phys_id, cpuid); |
228 | BUG(); | ||
229 | } | 234 | } |
230 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | 235 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); |
231 | 236 | ||
@@ -247,13 +252,12 @@ static void __cpuinit smp_callin(void) | |||
247 | */ | 252 | */ |
248 | if (cpu_isset(cpuid, cpu_callout_map)) | 253 | if (cpu_isset(cpuid, cpu_callout_map)) |
249 | break; | 254 | break; |
250 | rep_nop(); | 255 | cpu_relax(); |
251 | } | 256 | } |
252 | 257 | ||
253 | if (!time_before(jiffies, timeout)) { | 258 | if (!time_before(jiffies, timeout)) { |
254 | printk("BUG: CPU%d started up but did not get a callout!\n", | 259 | panic("%s: CPU%d started up but did not get a callout!\n", |
255 | cpuid); | 260 | __func__, cpuid); |
256 | BUG(); | ||
257 | } | 261 | } |
258 | 262 | ||
259 | /* | 263 | /* |
@@ -266,13 +270,19 @@ static void __cpuinit smp_callin(void) | |||
266 | Dprintk("CALLIN, before setup_local_APIC().\n"); | 270 | Dprintk("CALLIN, before setup_local_APIC().\n"); |
267 | smp_callin_clear_local_apic(); | 271 | smp_callin_clear_local_apic(); |
268 | setup_local_APIC(); | 272 | setup_local_APIC(); |
273 | end_local_APIC_setup(); | ||
269 | map_cpu_to_logical_apicid(); | 274 | map_cpu_to_logical_apicid(); |
270 | 275 | ||
271 | /* | 276 | /* |
272 | * Get our bogomips. | 277 | * Get our bogomips. |
278 | * | ||
279 | * Need to enable IRQs because it can take longer and then | ||
280 | * the NMI watchdog might kill us. | ||
273 | */ | 281 | */ |
282 | local_irq_enable(); | ||
274 | calibrate_delay(); | 283 | calibrate_delay(); |
275 | Dprintk("Stack at about %p\n",&cpuid); | 284 | local_irq_disable(); |
285 | Dprintk("Stack at about %p\n", &cpuid); | ||
276 | 286 | ||
277 | /* | 287 | /* |
278 | * Save our processor parameters | 288 | * Save our processor parameters |
@@ -285,91 +295,10 @@ static void __cpuinit smp_callin(void) | |||
285 | cpu_set(cpuid, cpu_callin_map); | 295 | cpu_set(cpuid, cpu_callin_map); |
286 | } | 296 | } |
287 | 297 | ||
288 | static int cpucount; | ||
289 | |||
290 | /* maps the cpu to the sched domain representing multi-core */ | ||
291 | cpumask_t cpu_coregroup_map(int cpu) | ||
292 | { | ||
293 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
294 | /* | ||
295 | * For perf, we return last level cache shared map. | ||
296 | * And for power savings, we return cpu_core_map | ||
297 | */ | ||
298 | if (sched_mc_power_savings || sched_smt_power_savings) | ||
299 | return per_cpu(cpu_core_map, cpu); | ||
300 | else | ||
301 | return c->llc_shared_map; | ||
302 | } | ||
303 | |||
304 | /* representing cpus for which sibling maps can be computed */ | ||
305 | static cpumask_t cpu_sibling_setup_map; | ||
306 | |||
307 | void __cpuinit set_cpu_sibling_map(int cpu) | ||
308 | { | ||
309 | int i; | ||
310 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
311 | |||
312 | cpu_set(cpu, cpu_sibling_setup_map); | ||
313 | |||
314 | if (smp_num_siblings > 1) { | ||
315 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | ||
316 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | ||
317 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | ||
318 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | ||
319 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | ||
320 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
321 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
322 | cpu_set(i, c->llc_shared_map); | ||
323 | cpu_set(cpu, cpu_data(i).llc_shared_map); | ||
324 | } | ||
325 | } | ||
326 | } else { | ||
327 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | ||
328 | } | ||
329 | |||
330 | cpu_set(cpu, c->llc_shared_map); | ||
331 | |||
332 | if (current_cpu_data.x86_max_cores == 1) { | ||
333 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); | ||
334 | c->booted_cores = 1; | ||
335 | return; | ||
336 | } | ||
337 | |||
338 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | ||
339 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | ||
340 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | ||
341 | cpu_set(i, c->llc_shared_map); | ||
342 | cpu_set(cpu, cpu_data(i).llc_shared_map); | ||
343 | } | ||
344 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | ||
345 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
346 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
347 | /* | ||
348 | * Does this new cpu bringup a new core? | ||
349 | */ | ||
350 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { | ||
351 | /* | ||
352 | * for each core in package, increment | ||
353 | * the booted_cores for this new cpu | ||
354 | */ | ||
355 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) | ||
356 | c->booted_cores++; | ||
357 | /* | ||
358 | * increment the core count for all | ||
359 | * the other cpus in this package | ||
360 | */ | ||
361 | if (i != cpu) | ||
362 | cpu_data(i).booted_cores++; | ||
363 | } else if (i != cpu && !c->booted_cores) | ||
364 | c->booted_cores = cpu_data(i).booted_cores; | ||
365 | } | ||
366 | } | ||
367 | } | ||
368 | |||
369 | /* | 298 | /* |
370 | * Activate a secondary processor. | 299 | * Activate a secondary processor. |
371 | */ | 300 | */ |
372 | static void __cpuinit start_secondary(void *unused) | 301 | void __cpuinit start_secondary(void *unused) |
373 | { | 302 | { |
374 | /* | 303 | /* |
375 | * Don't put *anything* before cpu_init(), SMP booting is too | 304 | * Don't put *anything* before cpu_init(), SMP booting is too |
@@ -382,24 +311,19 @@ static void __cpuinit start_secondary(void *unused) | |||
382 | cpu_init(); | 311 | cpu_init(); |
383 | preempt_disable(); | 312 | preempt_disable(); |
384 | smp_callin(); | 313 | smp_callin(); |
385 | while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) | 314 | |
386 | rep_nop(); | 315 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ |
316 | barrier(); | ||
387 | /* | 317 | /* |
388 | * Check TSC synchronization with the BP: | 318 | * Check TSC synchronization with the BP: |
389 | */ | 319 | */ |
390 | check_tsc_sync_target(); | 320 | check_tsc_sync_target(); |
391 | 321 | ||
392 | setup_secondary_clock(); | ||
393 | if (nmi_watchdog == NMI_IO_APIC) { | 322 | if (nmi_watchdog == NMI_IO_APIC) { |
394 | disable_8259A_irq(0); | 323 | disable_8259A_irq(0); |
395 | enable_NMI_through_LVT0(); | 324 | enable_NMI_through_LVT0(); |
396 | enable_8259A_irq(0); | 325 | enable_8259A_irq(0); |
397 | } | 326 | } |
398 | /* | ||
399 | * low-memory mappings have been cleared, flush them from | ||
400 | * the local TLBs too. | ||
401 | */ | ||
402 | local_flush_tlb(); | ||
403 | 327 | ||
404 | /* This must be done before setting cpu_online_map */ | 328 | /* This must be done before setting cpu_online_map */ |
405 | set_cpu_sibling_map(raw_smp_processor_id()); | 329 | set_cpu_sibling_map(raw_smp_processor_id()); |
@@ -414,17 +338,27 @@ static void __cpuinit start_secondary(void *unused) | |||
414 | * smp_call_function(). | 338 | * smp_call_function(). |
415 | */ | 339 | */ |
416 | lock_ipi_call_lock(); | 340 | lock_ipi_call_lock(); |
341 | #ifdef CONFIG_X86_64 | ||
342 | spin_lock(&vector_lock); | ||
343 | |||
344 | /* Setup the per cpu irq handling data structures */ | ||
345 | __setup_vector_irq(smp_processor_id()); | ||
346 | /* | ||
347 | * Allow the master to continue. | ||
348 | */ | ||
349 | spin_unlock(&vector_lock); | ||
350 | #endif | ||
417 | cpu_set(smp_processor_id(), cpu_online_map); | 351 | cpu_set(smp_processor_id(), cpu_online_map); |
418 | unlock_ipi_call_lock(); | 352 | unlock_ipi_call_lock(); |
419 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 353 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
420 | 354 | ||
421 | /* We can take interrupts now: we're officially "up". */ | 355 | setup_secondary_clock(); |
422 | local_irq_enable(); | ||
423 | 356 | ||
424 | wmb(); | 357 | wmb(); |
425 | cpu_idle(); | 358 | cpu_idle(); |
426 | } | 359 | } |
427 | 360 | ||
361 | #ifdef CONFIG_X86_32 | ||
428 | /* | 362 | /* |
429 | * Everything has been set up for the secondary | 363 | * Everything has been set up for the secondary |
430 | * CPUs - they just need to reload everything | 364 | * CPUs - they just need to reload everything |
@@ -442,89 +376,233 @@ void __devinit initialize_secondary(void) | |||
442 | "movl %0,%%esp\n\t" | 376 | "movl %0,%%esp\n\t" |
443 | "jmp *%1" | 377 | "jmp *%1" |
444 | : | 378 | : |
445 | :"m" (current->thread.sp),"m" (current->thread.ip)); | 379 | :"m" (current->thread.sp), "m" (current->thread.ip)); |
446 | } | 380 | } |
381 | #endif | ||
447 | 382 | ||
448 | /* Static state in head.S used to set up a CPU */ | 383 | static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) |
449 | extern struct { | 384 | { |
450 | void * sp; | 385 | #ifdef CONFIG_X86_32 |
451 | unsigned short ss; | 386 | /* |
452 | } stack_start; | 387 | * Mask B, Pentium, but not Pentium MMX |
388 | */ | ||
389 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
390 | c->x86 == 5 && | ||
391 | c->x86_mask >= 1 && c->x86_mask <= 4 && | ||
392 | c->x86_model <= 3) | ||
393 | /* | ||
394 | * Remember we have B step Pentia with bugs | ||
395 | */ | ||
396 | smp_b_stepping = 1; | ||
453 | 397 | ||
454 | #ifdef CONFIG_NUMA | 398 | /* |
399 | * Certain Athlons might work (for various values of 'work') in SMP | ||
400 | * but they are not certified as MP capable. | ||
401 | */ | ||
402 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | ||
455 | 403 | ||
456 | /* which logical CPUs are on which nodes */ | 404 | if (num_possible_cpus() == 1) |
457 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = | 405 | goto valid_k7; |
458 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; | ||
459 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
460 | /* which node each logical CPU is on */ | ||
461 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | ||
462 | EXPORT_SYMBOL(cpu_to_node_map); | ||
463 | 406 | ||
464 | /* set up a mapping between cpu and node. */ | 407 | /* Athlon 660/661 is valid. */ |
465 | static inline void map_cpu_to_node(int cpu, int node) | 408 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || |
466 | { | 409 | (c->x86_mask == 1))) |
467 | printk("Mapping cpu %d to node %d\n", cpu, node); | 410 | goto valid_k7; |
468 | cpu_set(cpu, node_to_cpumask_map[node]); | 411 | |
469 | cpu_to_node_map[cpu] = node; | 412 | /* Duron 670 is valid */ |
413 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | ||
414 | goto valid_k7; | ||
415 | |||
416 | /* | ||
417 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | ||
418 | * bit. It's worth noting that the A5 stepping (662) of some | ||
419 | * Athlon XP's have the MP bit set. | ||
420 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | ||
421 | * more. | ||
422 | */ | ||
423 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | ||
424 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | ||
425 | (c->x86_model > 7)) | ||
426 | if (cpu_has_mp) | ||
427 | goto valid_k7; | ||
428 | |||
429 | /* If we get here, not a certified SMP capable AMD system. */ | ||
430 | add_taint(TAINT_UNSAFE_SMP); | ||
431 | } | ||
432 | |||
433 | valid_k7: | ||
434 | ; | ||
435 | #endif | ||
470 | } | 436 | } |
471 | 437 | ||
472 | /* undo a mapping between cpu and node. */ | 438 | void __cpuinit smp_checks(void) |
473 | static inline void unmap_cpu_to_node(int cpu) | ||
474 | { | 439 | { |
475 | int node; | 440 | if (smp_b_stepping) |
441 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable" | ||
442 | "with B stepping processors.\n"); | ||
476 | 443 | ||
477 | printk("Unmapping cpu %d from all nodes\n", cpu); | 444 | /* |
478 | for (node = 0; node < MAX_NUMNODES; node ++) | 445 | * Don't taint if we are running SMP kernel on a single non-MP |
479 | cpu_clear(cpu, node_to_cpumask_map[node]); | 446 | * approved Athlon |
480 | cpu_to_node_map[cpu] = 0; | 447 | */ |
448 | if (tainted & TAINT_UNSAFE_SMP) { | ||
449 | if (num_online_cpus()) | ||
450 | printk(KERN_INFO "WARNING: This combination of AMD" | ||
451 | "processors is not suitable for SMP.\n"); | ||
452 | else | ||
453 | tainted &= ~TAINT_UNSAFE_SMP; | ||
454 | } | ||
481 | } | 455 | } |
482 | #else /* !CONFIG_NUMA */ | ||
483 | 456 | ||
484 | #define map_cpu_to_node(cpu, node) ({}) | 457 | /* |
485 | #define unmap_cpu_to_node(cpu) ({}) | 458 | * The bootstrap kernel entry code has set these up. Save them for |
459 | * a given CPU | ||
460 | */ | ||
486 | 461 | ||
487 | #endif /* CONFIG_NUMA */ | 462 | void __cpuinit smp_store_cpu_info(int id) |
463 | { | ||
464 | struct cpuinfo_x86 *c = &cpu_data(id); | ||
465 | |||
466 | *c = boot_cpu_data; | ||
467 | c->cpu_index = id; | ||
468 | if (id != 0) | ||
469 | identify_secondary_cpu(c); | ||
470 | smp_apply_quirks(c); | ||
471 | } | ||
488 | 472 | ||
489 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
490 | 473 | ||
491 | static void map_cpu_to_logical_apicid(void) | 474 | void __cpuinit set_cpu_sibling_map(int cpu) |
492 | { | 475 | { |
493 | int cpu = smp_processor_id(); | 476 | int i; |
494 | int apicid = logical_smp_processor_id(); | 477 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
495 | int node = apicid_to_node(apicid); | ||
496 | 478 | ||
497 | if (!node_online(node)) | 479 | cpu_set(cpu, cpu_sibling_setup_map); |
498 | node = first_online_node; | ||
499 | 480 | ||
500 | cpu_2_logical_apicid[cpu] = apicid; | 481 | if (smp_num_siblings > 1) { |
501 | map_cpu_to_node(cpu, node); | 482 | for_each_cpu_mask(i, cpu_sibling_setup_map) { |
483 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | ||
484 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | ||
485 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | ||
486 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | ||
487 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
488 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
489 | cpu_set(i, c->llc_shared_map); | ||
490 | cpu_set(cpu, cpu_data(i).llc_shared_map); | ||
491 | } | ||
492 | } | ||
493 | } else { | ||
494 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | ||
495 | } | ||
496 | |||
497 | cpu_set(cpu, c->llc_shared_map); | ||
498 | |||
499 | if (current_cpu_data.x86_max_cores == 1) { | ||
500 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); | ||
501 | c->booted_cores = 1; | ||
502 | return; | ||
503 | } | ||
504 | |||
505 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | ||
506 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | ||
507 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | ||
508 | cpu_set(i, c->llc_shared_map); | ||
509 | cpu_set(cpu, cpu_data(i).llc_shared_map); | ||
510 | } | ||
511 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | ||
512 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
513 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
514 | /* | ||
515 | * Does this new cpu bringup a new core? | ||
516 | */ | ||
517 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { | ||
518 | /* | ||
519 | * for each core in package, increment | ||
520 | * the booted_cores for this new cpu | ||
521 | */ | ||
522 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) | ||
523 | c->booted_cores++; | ||
524 | /* | ||
525 | * increment the core count for all | ||
526 | * the other cpus in this package | ||
527 | */ | ||
528 | if (i != cpu) | ||
529 | cpu_data(i).booted_cores++; | ||
530 | } else if (i != cpu && !c->booted_cores) | ||
531 | c->booted_cores = cpu_data(i).booted_cores; | ||
532 | } | ||
533 | } | ||
502 | } | 534 | } |
503 | 535 | ||
504 | static void unmap_cpu_to_logical_apicid(int cpu) | 536 | /* maps the cpu to the sched domain representing multi-core */ |
537 | cpumask_t cpu_coregroup_map(int cpu) | ||
505 | { | 538 | { |
506 | cpu_2_logical_apicid[cpu] = BAD_APICID; | 539 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
507 | unmap_cpu_to_node(cpu); | 540 | /* |
541 | * For perf, we return last level cache shared map. | ||
542 | * And for power savings, we return cpu_core_map | ||
543 | */ | ||
544 | if (sched_mc_power_savings || sched_smt_power_savings) | ||
545 | return per_cpu(cpu_core_map, cpu); | ||
546 | else | ||
547 | return c->llc_shared_map; | ||
548 | } | ||
549 | |||
550 | #ifdef CONFIG_X86_32 | ||
551 | /* | ||
552 | * We are called very early to get the low memory for the | ||
553 | * SMP bootup trampoline page. | ||
554 | */ | ||
555 | void __init smp_alloc_memory(void) | ||
556 | { | ||
557 | trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); | ||
558 | /* | ||
559 | * Has to be in very low memory so we can execute | ||
560 | * real-mode AP code. | ||
561 | */ | ||
562 | if (__pa(trampoline_base) >= 0x9F000) | ||
563 | BUG(); | ||
564 | } | ||
565 | #endif | ||
566 | |||
567 | void impress_friends(void) | ||
568 | { | ||
569 | int cpu; | ||
570 | unsigned long bogosum = 0; | ||
571 | /* | ||
572 | * Allow the user to impress friends. | ||
573 | */ | ||
574 | Dprintk("Before bogomips.\n"); | ||
575 | for_each_possible_cpu(cpu) | ||
576 | if (cpu_isset(cpu, cpu_callout_map)) | ||
577 | bogosum += cpu_data(cpu).loops_per_jiffy; | ||
578 | printk(KERN_INFO | ||
579 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | ||
580 | num_online_cpus(), | ||
581 | bogosum/(500000/HZ), | ||
582 | (bogosum/(5000/HZ))%100); | ||
583 | |||
584 | Dprintk("Before bogocount - setting activated=1.\n"); | ||
508 | } | 585 | } |
509 | 586 | ||
510 | static inline void __inquire_remote_apic(int apicid) | 587 | static inline void __inquire_remote_apic(int apicid) |
511 | { | 588 | { |
512 | int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | 589 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; |
513 | char *names[] = { "ID", "VERSION", "SPIV" }; | 590 | char *names[] = { "ID", "VERSION", "SPIV" }; |
514 | int timeout; | 591 | int timeout; |
515 | unsigned long status; | 592 | u32 status; |
516 | 593 | ||
517 | printk("Inquiring remote APIC #%d...\n", apicid); | 594 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); |
518 | 595 | ||
519 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | 596 | for (i = 0; i < ARRAY_SIZE(regs); i++) { |
520 | printk("... APIC #%d %s: ", apicid, names[i]); | 597 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); |
521 | 598 | ||
522 | /* | 599 | /* |
523 | * Wait for idle. | 600 | * Wait for idle. |
524 | */ | 601 | */ |
525 | status = safe_apic_wait_icr_idle(); | 602 | status = safe_apic_wait_icr_idle(); |
526 | if (status) | 603 | if (status) |
527 | printk("a previous APIC delivery may have failed\n"); | 604 | printk(KERN_CONT |
605 | "a previous APIC delivery may have failed\n"); | ||
528 | 606 | ||
529 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | 607 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); |
530 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); | 608 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); |
@@ -538,16 +616,16 @@ static inline void __inquire_remote_apic(int apicid) | |||
538 | switch (status) { | 616 | switch (status) { |
539 | case APIC_ICR_RR_VALID: | 617 | case APIC_ICR_RR_VALID: |
540 | status = apic_read(APIC_RRR); | 618 | status = apic_read(APIC_RRR); |
541 | printk("%lx\n", status); | 619 | printk(KERN_CONT "%08x\n", status); |
542 | break; | 620 | break; |
543 | default: | 621 | default: |
544 | printk("failed\n"); | 622 | printk(KERN_CONT "failed\n"); |
545 | } | 623 | } |
546 | } | 624 | } |
547 | } | 625 | } |
548 | 626 | ||
549 | #ifdef WAKE_SECONDARY_VIA_NMI | 627 | #ifdef WAKE_SECONDARY_VIA_NMI |
550 | /* | 628 | /* |
551 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | 629 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal |
552 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | 630 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this |
553 | * won't ... remember to clear down the APIC, etc later. | 631 | * won't ... remember to clear down the APIC, etc later. |
@@ -584,9 +662,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |||
584 | Dprintk("NMI sent.\n"); | 662 | Dprintk("NMI sent.\n"); |
585 | 663 | ||
586 | if (send_status) | 664 | if (send_status) |
587 | printk("APIC never delivered???\n"); | 665 | printk(KERN_ERR "APIC never delivered???\n"); |
588 | if (accept_status) | 666 | if (accept_status) |
589 | printk("APIC delivery error (%lx).\n", accept_status); | 667 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); |
590 | 668 | ||
591 | return (send_status | accept_status); | 669 | return (send_status | accept_status); |
592 | } | 670 | } |
@@ -637,6 +715,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
637 | Dprintk("Waiting for send to finish...\n"); | 715 | Dprintk("Waiting for send to finish...\n"); |
638 | send_status = safe_apic_wait_icr_idle(); | 716 | send_status = safe_apic_wait_icr_idle(); |
639 | 717 | ||
718 | mb(); | ||
640 | atomic_set(&init_deasserted, 1); | 719 | atomic_set(&init_deasserted, 1); |
641 | 720 | ||
642 | /* | 721 | /* |
@@ -655,7 +734,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
655 | * target processor state. | 734 | * target processor state. |
656 | */ | 735 | */ |
657 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | 736 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, |
658 | (unsigned long) stack_start.sp); | 737 | #ifdef CONFIG_X86_64 |
738 | (unsigned long)init_rsp); | ||
739 | #else | ||
740 | (unsigned long)stack_start.sp); | ||
741 | #endif | ||
659 | 742 | ||
660 | /* | 743 | /* |
661 | * Run STARTUP IPI loop. | 744 | * Run STARTUP IPI loop. |
@@ -665,7 +748,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
665 | maxlvt = lapic_get_maxlvt(); | 748 | maxlvt = lapic_get_maxlvt(); |
666 | 749 | ||
667 | for (j = 1; j <= num_starts; j++) { | 750 | for (j = 1; j <= num_starts; j++) { |
668 | Dprintk("Sending STARTUP #%d.\n",j); | 751 | Dprintk("Sending STARTUP #%d.\n", j); |
669 | apic_read_around(APIC_SPIV); | 752 | apic_read_around(APIC_SPIV); |
670 | apic_write(APIC_ESR, 0); | 753 | apic_write(APIC_ESR, 0); |
671 | apic_read(APIC_ESR); | 754 | apic_read(APIC_ESR); |
@@ -711,49 +794,29 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
711 | Dprintk("After Startup.\n"); | 794 | Dprintk("After Startup.\n"); |
712 | 795 | ||
713 | if (send_status) | 796 | if (send_status) |
714 | printk("APIC never delivered???\n"); | 797 | printk(KERN_ERR "APIC never delivered???\n"); |
715 | if (accept_status) | 798 | if (accept_status) |
716 | printk("APIC delivery error (%lx).\n", accept_status); | 799 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); |
717 | 800 | ||
718 | return (send_status | accept_status); | 801 | return (send_status | accept_status); |
719 | } | 802 | } |
720 | #endif /* WAKE_SECONDARY_VIA_INIT */ | 803 | #endif /* WAKE_SECONDARY_VIA_INIT */ |
721 | 804 | ||
722 | extern cpumask_t cpu_initialized; | 805 | struct create_idle { |
723 | static inline int alloc_cpu_id(void) | 806 | struct work_struct work; |
724 | { | 807 | struct task_struct *idle; |
725 | cpumask_t tmp_map; | 808 | struct completion done; |
726 | int cpu; | 809 | int cpu; |
727 | cpus_complement(tmp_map, cpu_present_map); | 810 | }; |
728 | cpu = first_cpu(tmp_map); | ||
729 | if (cpu >= NR_CPUS) | ||
730 | return -ENODEV; | ||
731 | return cpu; | ||
732 | } | ||
733 | 811 | ||
734 | #ifdef CONFIG_HOTPLUG_CPU | 812 | static void __cpuinit do_fork_idle(struct work_struct *work) |
735 | static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; | ||
736 | static inline struct task_struct * __cpuinit alloc_idle_task(int cpu) | ||
737 | { | 813 | { |
738 | struct task_struct *idle; | 814 | struct create_idle *c_idle = |
815 | container_of(work, struct create_idle, work); | ||
739 | 816 | ||
740 | if ((idle = cpu_idle_tasks[cpu]) != NULL) { | 817 | c_idle->idle = fork_idle(c_idle->cpu); |
741 | /* initialize thread_struct. we really want to avoid destroy | 818 | complete(&c_idle->done); |
742 | * idle tread | ||
743 | */ | ||
744 | idle->thread.sp = (unsigned long)task_pt_regs(idle); | ||
745 | init_idle(idle, cpu); | ||
746 | return idle; | ||
747 | } | ||
748 | idle = fork_idle(cpu); | ||
749 | |||
750 | if (!IS_ERR(idle)) | ||
751 | cpu_idle_tasks[cpu] = idle; | ||
752 | return idle; | ||
753 | } | 819 | } |
754 | #else | ||
755 | #define alloc_idle_task(cpu) fork_idle(cpu) | ||
756 | #endif | ||
757 | 820 | ||
758 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | 821 | static int __cpuinit do_boot_cpu(int apicid, int cpu) |
759 | /* | 822 | /* |
@@ -762,45 +825,92 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
762 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | 825 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. |
763 | */ | 826 | */ |
764 | { | 827 | { |
765 | struct task_struct *idle; | 828 | unsigned long boot_error = 0; |
766 | unsigned long boot_error; | ||
767 | int timeout; | 829 | int timeout; |
768 | unsigned long start_eip; | 830 | unsigned long start_ip; |
769 | unsigned short nmi_high = 0, nmi_low = 0; | 831 | unsigned short nmi_high = 0, nmi_low = 0; |
832 | struct create_idle c_idle = { | ||
833 | .cpu = cpu, | ||
834 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
835 | }; | ||
836 | INIT_WORK(&c_idle.work, do_fork_idle); | ||
837 | #ifdef CONFIG_X86_64 | ||
838 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | ||
839 | if (!cpu_gdt_descr[cpu].address && | ||
840 | !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { | ||
841 | printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); | ||
842 | return -1; | ||
843 | } | ||
770 | 844 | ||
771 | /* | 845 | /* Allocate node local memory for AP pdas */ |
772 | * Save current MTRR state in case it was changed since early boot | 846 | if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { |
773 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | 847 | struct x8664_pda *newpda, *pda; |
774 | */ | 848 | int node = cpu_to_node(cpu); |
775 | mtrr_save_state(); | 849 | pda = cpu_pda(cpu); |
850 | newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC, | ||
851 | node); | ||
852 | if (newpda) { | ||
853 | memcpy(newpda, pda, sizeof(struct x8664_pda)); | ||
854 | cpu_pda(cpu) = newpda; | ||
855 | } else | ||
856 | printk(KERN_ERR | ||
857 | "Could not allocate node local PDA for CPU %d on node %d\n", | ||
858 | cpu, node); | ||
859 | } | ||
860 | #endif | ||
861 | |||
862 | alternatives_smp_switch(1); | ||
863 | |||
864 | c_idle.idle = get_idle_for_cpu(cpu); | ||
776 | 865 | ||
777 | /* | 866 | /* |
778 | * We can't use kernel_thread since we must avoid to | 867 | * We can't use kernel_thread since we must avoid to |
779 | * reschedule the child. | 868 | * reschedule the child. |
780 | */ | 869 | */ |
781 | idle = alloc_idle_task(cpu); | 870 | if (c_idle.idle) { |
782 | if (IS_ERR(idle)) | 871 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) |
783 | panic("failed fork for CPU %d", cpu); | 872 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); |
873 | init_idle(c_idle.idle, cpu); | ||
874 | goto do_rest; | ||
875 | } | ||
784 | 876 | ||
877 | if (!keventd_up() || current_is_keventd()) | ||
878 | c_idle.work.func(&c_idle.work); | ||
879 | else { | ||
880 | schedule_work(&c_idle.work); | ||
881 | wait_for_completion(&c_idle.done); | ||
882 | } | ||
883 | |||
884 | if (IS_ERR(c_idle.idle)) { | ||
885 | printk("failed fork for CPU %d\n", cpu); | ||
886 | return PTR_ERR(c_idle.idle); | ||
887 | } | ||
888 | |||
889 | set_idle_for_cpu(cpu, c_idle.idle); | ||
890 | do_rest: | ||
891 | #ifdef CONFIG_X86_32 | ||
892 | per_cpu(current_task, cpu) = c_idle.idle; | ||
785 | init_gdt(cpu); | 893 | init_gdt(cpu); |
786 | per_cpu(current_task, cpu) = idle; | ||
787 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 894 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
895 | c_idle.idle->thread.ip = (unsigned long) start_secondary; | ||
896 | /* Stack for startup_32 can be just as for start_secondary onwards */ | ||
897 | stack_start.sp = (void *) c_idle.idle->thread.sp; | ||
898 | irq_ctx_init(cpu); | ||
899 | #else | ||
900 | cpu_pda(cpu)->pcurrent = c_idle.idle; | ||
901 | init_rsp = c_idle.idle->thread.sp; | ||
902 | load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); | ||
903 | initial_code = (unsigned long)start_secondary; | ||
904 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | ||
905 | #endif | ||
788 | 906 | ||
789 | idle->thread.ip = (unsigned long) start_secondary; | 907 | /* start_ip had better be page-aligned! */ |
790 | /* start_eip had better be page-aligned! */ | 908 | start_ip = setup_trampoline(); |
791 | start_eip = setup_trampoline(); | ||
792 | |||
793 | ++cpucount; | ||
794 | alternatives_smp_switch(1); | ||
795 | 909 | ||
796 | /* So we see what's up */ | 910 | /* So we see what's up */ |
797 | printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); | 911 | printk(KERN_INFO "Booting processor %d/%d ip %lx\n", |
798 | /* Stack for startup_32 can be just as for start_secondary onwards */ | 912 | cpu, apicid, start_ip); |
799 | stack_start.sp = (void *) idle->thread.sp; | ||
800 | 913 | ||
801 | irq_ctx_init(cpu); | ||
802 | |||
803 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | ||
804 | /* | 914 | /* |
805 | * This grunge runs the startup process for | 915 | * This grunge runs the startup process for |
806 | * the targeted processor. | 916 | * the targeted processor. |
@@ -812,12 +922,17 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
812 | 922 | ||
813 | store_NMI_vector(&nmi_high, &nmi_low); | 923 | store_NMI_vector(&nmi_high, &nmi_low); |
814 | 924 | ||
815 | smpboot_setup_warm_reset_vector(start_eip); | 925 | smpboot_setup_warm_reset_vector(start_ip); |
926 | /* | ||
927 | * Be paranoid about clearing APIC errors. | ||
928 | */ | ||
929 | apic_write(APIC_ESR, 0); | ||
930 | apic_read(APIC_ESR); | ||
816 | 931 | ||
817 | /* | 932 | /* |
818 | * Starting actual IPI sequence... | 933 | * Starting actual IPI sequence... |
819 | */ | 934 | */ |
820 | boot_error = wakeup_secondary_cpu(apicid, start_eip); | 935 | boot_error = wakeup_secondary_cpu(apicid, start_ip); |
821 | 936 | ||
822 | if (!boot_error) { | 937 | if (!boot_error) { |
823 | /* | 938 | /* |
@@ -839,18 +954,18 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
839 | if (cpu_isset(cpu, cpu_callin_map)) { | 954 | if (cpu_isset(cpu, cpu_callin_map)) { |
840 | /* number CPUs logically, starting from 1 (BSP is 0) */ | 955 | /* number CPUs logically, starting from 1 (BSP is 0) */ |
841 | Dprintk("OK.\n"); | 956 | Dprintk("OK.\n"); |
842 | printk("CPU%d: ", cpu); | 957 | printk(KERN_INFO "CPU%d: ", cpu); |
843 | print_cpu_info(&cpu_data(cpu)); | 958 | print_cpu_info(&cpu_data(cpu)); |
844 | Dprintk("CPU has booted.\n"); | 959 | Dprintk("CPU has booted.\n"); |
845 | } else { | 960 | } else { |
846 | boot_error= 1; | 961 | boot_error = 1; |
847 | if (*((volatile unsigned char *)trampoline_base) | 962 | if (*((volatile unsigned char *)trampoline_base) |
848 | == 0xA5) | 963 | == 0xA5) |
849 | /* trampoline started but...? */ | 964 | /* trampoline started but...? */ |
850 | printk("Stuck ??\n"); | 965 | printk(KERN_ERR "Stuck ??\n"); |
851 | else | 966 | else |
852 | /* trampoline code not run */ | 967 | /* trampoline code not run */ |
853 | printk("Not responding.\n"); | 968 | printk(KERN_ERR "Not responding.\n"); |
854 | inquire_remote_apic(apicid); | 969 | inquire_remote_apic(apicid); |
855 | } | 970 | } |
856 | } | 971 | } |
@@ -858,156 +973,159 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
858 | if (boot_error) { | 973 | if (boot_error) { |
859 | /* Try to put things back the way they were before ... */ | 974 | /* Try to put things back the way they were before ... */ |
860 | unmap_cpu_to_logical_apicid(cpu); | 975 | unmap_cpu_to_logical_apicid(cpu); |
861 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | 976 | #ifdef CONFIG_X86_64 |
977 | clear_node_cpumask(cpu); /* was set by numa_add_cpu */ | ||
978 | #endif | ||
979 | cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ | ||
862 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | 980 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ |
863 | cpucount--; | 981 | cpu_clear(cpu, cpu_possible_map); |
864 | } else { | 982 | cpu_clear(cpu, cpu_present_map); |
865 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | 983 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; |
866 | cpu_set(cpu, cpu_present_map); | ||
867 | } | 984 | } |
868 | 985 | ||
869 | /* mark "stuck" area as not stuck */ | 986 | /* mark "stuck" area as not stuck */ |
870 | *((volatile unsigned long *)trampoline_base) = 0; | 987 | *((volatile unsigned long *)trampoline_base) = 0; |
871 | 988 | ||
989 | /* | ||
990 | * Cleanup possible dangling ends... | ||
991 | */ | ||
992 | smpboot_restore_warm_reset_vector(); | ||
993 | |||
872 | return boot_error; | 994 | return boot_error; |
873 | } | 995 | } |
874 | 996 | ||
875 | #ifdef CONFIG_HOTPLUG_CPU | 997 | int __cpuinit native_cpu_up(unsigned int cpu) |
876 | void cpu_exit_clear(void) | ||
877 | { | 998 | { |
878 | int cpu = raw_smp_processor_id(); | 999 | int apicid = cpu_present_to_apicid(cpu); |
879 | 1000 | unsigned long flags; | |
880 | idle_task_exit(); | 1001 | int err; |
881 | |||
882 | cpucount --; | ||
883 | cpu_uninit(); | ||
884 | irq_ctx_exit(cpu); | ||
885 | |||
886 | cpu_clear(cpu, cpu_callout_map); | ||
887 | cpu_clear(cpu, cpu_callin_map); | ||
888 | 1002 | ||
889 | cpu_clear(cpu, smp_commenced_mask); | 1003 | WARN_ON(irqs_disabled()); |
890 | unmap_cpu_to_logical_apicid(cpu); | ||
891 | } | ||
892 | 1004 | ||
893 | struct warm_boot_cpu_info { | 1005 | Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
894 | struct completion *complete; | ||
895 | struct work_struct task; | ||
896 | int apicid; | ||
897 | int cpu; | ||
898 | }; | ||
899 | 1006 | ||
900 | static void __cpuinit do_warm_boot_cpu(struct work_struct *work) | 1007 | if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || |
901 | { | 1008 | !physid_isset(apicid, phys_cpu_present_map)) { |
902 | struct warm_boot_cpu_info *info = | 1009 | printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); |
903 | container_of(work, struct warm_boot_cpu_info, task); | 1010 | return -EINVAL; |
904 | do_boot_cpu(info->apicid, info->cpu); | 1011 | } |
905 | complete(info->complete); | ||
906 | } | ||
907 | 1012 | ||
908 | static int __cpuinit __smp_prepare_cpu(int cpu) | 1013 | /* |
909 | { | 1014 | * Already booted CPU? |
910 | DECLARE_COMPLETION_ONSTACK(done); | 1015 | */ |
911 | struct warm_boot_cpu_info info; | 1016 | if (cpu_isset(cpu, cpu_callin_map)) { |
912 | int apicid, ret; | 1017 | Dprintk("do_boot_cpu %d Already started\n", cpu); |
913 | 1018 | return -ENOSYS; | |
914 | apicid = per_cpu(x86_cpu_to_apicid, cpu); | ||
915 | if (apicid == BAD_APICID) { | ||
916 | ret = -ENODEV; | ||
917 | goto exit; | ||
918 | } | 1019 | } |
919 | 1020 | ||
920 | info.complete = &done; | 1021 | /* |
921 | info.apicid = apicid; | 1022 | * Save current MTRR state in case it was changed since early boot |
922 | info.cpu = cpu; | 1023 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: |
923 | INIT_WORK(&info.task, do_warm_boot_cpu); | 1024 | */ |
1025 | mtrr_save_state(); | ||
1026 | |||
1027 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
924 | 1028 | ||
1029 | #ifdef CONFIG_X86_32 | ||
925 | /* init low mem mapping */ | 1030 | /* init low mem mapping */ |
926 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | 1031 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, |
927 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | 1032 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); |
928 | flush_tlb_all(); | 1033 | flush_tlb_all(); |
929 | schedule_work(&info.task); | ||
930 | wait_for_completion(&done); | ||
931 | |||
932 | zap_low_mappings(); | ||
933 | ret = 0; | ||
934 | exit: | ||
935 | return ret; | ||
936 | } | ||
937 | #endif | ||
938 | |||
939 | /* | ||
940 | * Cycle through the processors sending APIC IPIs to boot each. | ||
941 | */ | ||
942 | |||
943 | static int boot_cpu_logical_apicid; | ||
944 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ | ||
945 | void *xquad_portio; | ||
946 | #ifdef CONFIG_X86_NUMAQ | ||
947 | EXPORT_SYMBOL(xquad_portio); | ||
948 | #endif | 1034 | #endif |
949 | 1035 | ||
950 | static void __init smp_boot_cpus(unsigned int max_cpus) | 1036 | err = do_boot_cpu(apicid, cpu); |
951 | { | 1037 | if (err < 0) { |
952 | int apicid, cpu, bit, kicked; | 1038 | Dprintk("do_boot_cpu failed %d\n", err); |
953 | unsigned long bogosum = 0; | 1039 | return err; |
1040 | } | ||
954 | 1041 | ||
955 | /* | 1042 | /* |
956 | * Setup boot CPU information | 1043 | * Check TSC synchronization with the AP (keep irqs disabled |
1044 | * while doing so): | ||
957 | */ | 1045 | */ |
958 | smp_store_cpu_info(0); /* Final full version of the data */ | 1046 | local_irq_save(flags); |
959 | printk("CPU%d: ", 0); | 1047 | check_tsc_sync_source(cpu); |
960 | print_cpu_info(&cpu_data(0)); | 1048 | local_irq_restore(flags); |
961 | 1049 | ||
962 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 1050 | while (!cpu_isset(cpu, cpu_online_map)) { |
963 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1051 | cpu_relax(); |
964 | per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid; | 1052 | touch_nmi_watchdog(); |
1053 | } | ||
965 | 1054 | ||
966 | current_thread_info()->cpu = 0; | 1055 | return 0; |
1056 | } | ||
967 | 1057 | ||
968 | set_cpu_sibling_map(0); | 1058 | /* |
1059 | * Fall back to non SMP mode after errors. | ||
1060 | * | ||
1061 | * RED-PEN audit/test this more. I bet there is more state messed up here. | ||
1062 | */ | ||
1063 | static __init void disable_smp(void) | ||
1064 | { | ||
1065 | cpu_present_map = cpumask_of_cpu(0); | ||
1066 | cpu_possible_map = cpumask_of_cpu(0); | ||
1067 | #ifdef CONFIG_X86_32 | ||
1068 | smpboot_clear_io_apic_irqs(); | ||
1069 | #endif | ||
1070 | if (smp_found_config) | ||
1071 | phys_cpu_present_map = | ||
1072 | physid_mask_of_physid(boot_cpu_physical_apicid); | ||
1073 | else | ||
1074 | phys_cpu_present_map = physid_mask_of_physid(0); | ||
1075 | map_cpu_to_logical_apicid(); | ||
1076 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | ||
1077 | cpu_set(0, per_cpu(cpu_core_map, 0)); | ||
1078 | } | ||
1079 | |||
1080 | /* | ||
1081 | * Various sanity checks. | ||
1082 | */ | ||
1083 | static int __init smp_sanity_check(unsigned max_cpus) | ||
1084 | { | ||
1085 | preempt_disable(); | ||
1086 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { | ||
1087 | printk(KERN_WARNING "weird, boot CPU (#%d) not listed" | ||
1088 | "by the BIOS.\n", hard_smp_processor_id()); | ||
1089 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | ||
1090 | } | ||
969 | 1091 | ||
970 | /* | 1092 | /* |
971 | * If we couldn't find an SMP configuration at boot time, | 1093 | * If we couldn't find an SMP configuration at boot time, |
972 | * get out of here now! | 1094 | * get out of here now! |
973 | */ | 1095 | */ |
974 | if (!smp_found_config && !acpi_lapic) { | 1096 | if (!smp_found_config && !acpi_lapic) { |
1097 | preempt_enable(); | ||
975 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); | 1098 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); |
976 | smpboot_clear_io_apic_irqs(); | 1099 | disable_smp(); |
977 | phys_cpu_present_map = physid_mask_of_physid(0); | ||
978 | if (APIC_init_uniprocessor()) | 1100 | if (APIC_init_uniprocessor()) |
979 | printk(KERN_NOTICE "Local APIC not detected." | 1101 | printk(KERN_NOTICE "Local APIC not detected." |
980 | " Using dummy APIC emulation.\n"); | 1102 | " Using dummy APIC emulation.\n"); |
981 | map_cpu_to_logical_apicid(); | 1103 | return -1; |
982 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | ||
983 | cpu_set(0, per_cpu(cpu_core_map, 0)); | ||
984 | return; | ||
985 | } | 1104 | } |
986 | 1105 | ||
987 | /* | 1106 | /* |
988 | * Should not be necessary because the MP table should list the boot | 1107 | * Should not be necessary because the MP table should list the boot |
989 | * CPU too, but we do it for the sake of robustness anyway. | 1108 | * CPU too, but we do it for the sake of robustness anyway. |
990 | * Makes no sense to do this check in clustered apic mode, so skip it | ||
991 | */ | 1109 | */ |
992 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { | 1110 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { |
993 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | 1111 | printk(KERN_NOTICE |
994 | boot_cpu_physical_apicid); | 1112 | "weird, boot CPU (#%d) not listed by the BIOS.\n", |
1113 | boot_cpu_physical_apicid); | ||
995 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | 1114 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); |
996 | } | 1115 | } |
1116 | preempt_enable(); | ||
997 | 1117 | ||
998 | /* | 1118 | /* |
999 | * If we couldn't find a local APIC, then get out of here now! | 1119 | * If we couldn't find a local APIC, then get out of here now! |
1000 | */ | 1120 | */ |
1001 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { | 1121 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && |
1122 | !cpu_has_apic) { | ||
1002 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | 1123 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", |
1003 | boot_cpu_physical_apicid); | 1124 | boot_cpu_physical_apicid); |
1004 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | 1125 | printk(KERN_ERR "... forcing use of dummy APIC emulation." |
1005 | smpboot_clear_io_apic_irqs(); | 1126 | "(tell your hw vendor)\n"); |
1006 | phys_cpu_present_map = physid_mask_of_physid(0); | 1127 | smpboot_clear_io_apic(); |
1007 | map_cpu_to_logical_apicid(); | 1128 | return -1; |
1008 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | ||
1009 | cpu_set(0, per_cpu(cpu_core_map, 0)); | ||
1010 | return; | ||
1011 | } | 1129 | } |
1012 | 1130 | ||
1013 | verify_local_APIC(); | 1131 | verify_local_APIC(); |
@@ -1016,137 +1134,148 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1016 | * If SMP should be disabled, then really disable it! | 1134 | * If SMP should be disabled, then really disable it! |
1017 | */ | 1135 | */ |
1018 | if (!max_cpus) { | 1136 | if (!max_cpus) { |
1019 | smp_found_config = 0; | 1137 | printk(KERN_INFO "SMP mode deactivated," |
1020 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); | 1138 | "forcing use of dummy APIC emulation.\n"); |
1021 | 1139 | smpboot_clear_io_apic(); | |
1140 | #ifdef CONFIG_X86_32 | ||
1022 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 1141 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
1023 | printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n"); | 1142 | printk(KERN_INFO "activating minimal APIC for" |
1143 | "NMI watchdog use.\n"); | ||
1024 | connect_bsp_APIC(); | 1144 | connect_bsp_APIC(); |
1025 | setup_local_APIC(); | 1145 | setup_local_APIC(); |
1146 | end_local_APIC_setup(); | ||
1026 | } | 1147 | } |
1027 | smpboot_clear_io_apic_irqs(); | 1148 | #endif |
1028 | phys_cpu_present_map = physid_mask_of_physid(0); | 1149 | return -1; |
1029 | map_cpu_to_logical_apicid(); | ||
1030 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | ||
1031 | cpu_set(0, per_cpu(cpu_core_map, 0)); | ||
1032 | return; | ||
1033 | } | 1150 | } |
1034 | 1151 | ||
1035 | connect_bsp_APIC(); | 1152 | return 0; |
1036 | setup_local_APIC(); | 1153 | } |
1037 | map_cpu_to_logical_apicid(); | ||
1038 | 1154 | ||
1155 | static void __init smp_cpu_index_default(void) | ||
1156 | { | ||
1157 | int i; | ||
1158 | struct cpuinfo_x86 *c; | ||
1039 | 1159 | ||
1040 | setup_portio_remap(); | 1160 | for_each_cpu_mask(i, cpu_possible_map) { |
1161 | c = &cpu_data(i); | ||
1162 | /* mark all to hotplug */ | ||
1163 | c->cpu_index = NR_CPUS; | ||
1164 | } | ||
1165 | } | ||
1041 | 1166 | ||
1167 | /* | ||
1168 | * Prepare for SMP bootup. The MP table or ACPI has been read | ||
1169 | * earlier. Just do some sanity checking here and enable APIC mode. | ||
1170 | */ | ||
1171 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | ||
1172 | { | ||
1173 | nmi_watchdog_default(); | ||
1174 | smp_cpu_index_default(); | ||
1175 | current_cpu_data = boot_cpu_data; | ||
1176 | cpu_callin_map = cpumask_of_cpu(0); | ||
1177 | mb(); | ||
1042 | /* | 1178 | /* |
1043 | * Scan the CPU present map and fire up the other CPUs via do_boot_cpu | 1179 | * Setup boot CPU information |
1044 | * | ||
1045 | * In clustered apic mode, phys_cpu_present_map is a constructed thus: | ||
1046 | * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the | ||
1047 | * clustered apic ID. | ||
1048 | */ | 1180 | */ |
1049 | Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map)); | 1181 | smp_store_cpu_info(0); /* Final full version of the data */ |
1050 | 1182 | boot_cpu_logical_apicid = logical_smp_processor_id(); | |
1051 | kicked = 1; | 1183 | current_thread_info()->cpu = 0; /* needed? */ |
1052 | for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) { | 1184 | set_cpu_sibling_map(0); |
1053 | apicid = cpu_present_to_apicid(bit); | ||
1054 | /* | ||
1055 | * Don't even attempt to start the boot CPU! | ||
1056 | */ | ||
1057 | if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID)) | ||
1058 | continue; | ||
1059 | 1185 | ||
1060 | if (!check_apicid_present(bit)) | 1186 | if (smp_sanity_check(max_cpus) < 0) { |
1061 | continue; | 1187 | printk(KERN_INFO "SMP disabled\n"); |
1062 | if (max_cpus <= cpucount+1) | 1188 | disable_smp(); |
1063 | continue; | 1189 | return; |
1190 | } | ||
1064 | 1191 | ||
1065 | if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu)) | 1192 | preempt_disable(); |
1066 | printk("CPU #%d not responding - cannot use it.\n", | 1193 | if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { |
1067 | apicid); | 1194 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
1068 | else | 1195 | GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); |
1069 | ++kicked; | 1196 | /* Or can we switch back to PIC here? */ |
1070 | } | 1197 | } |
1198 | preempt_enable(); | ||
1071 | 1199 | ||
1200 | #ifdef CONFIG_X86_32 | ||
1201 | connect_bsp_APIC(); | ||
1202 | #endif | ||
1072 | /* | 1203 | /* |
1073 | * Cleanup possible dangling ends... | 1204 | * Switch from PIC to APIC mode. |
1074 | */ | 1205 | */ |
1075 | smpboot_restore_warm_reset_vector(); | 1206 | setup_local_APIC(); |
1076 | 1207 | ||
1208 | #ifdef CONFIG_X86_64 | ||
1077 | /* | 1209 | /* |
1078 | * Allow the user to impress friends. | 1210 | * Enable IO APIC before setting up error vector |
1079 | */ | 1211 | */ |
1080 | Dprintk("Before bogomips.\n"); | 1212 | if (!skip_ioapic_setup && nr_ioapics) |
1081 | for_each_possible_cpu(cpu) | 1213 | enable_IO_APIC(); |
1082 | if (cpu_isset(cpu, cpu_callout_map)) | 1214 | #endif |
1083 | bogosum += cpu_data(cpu).loops_per_jiffy; | 1215 | end_local_APIC_setup(); |
1084 | printk(KERN_INFO | ||
1085 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | ||
1086 | cpucount+1, | ||
1087 | bogosum/(500000/HZ), | ||
1088 | (bogosum/(5000/HZ))%100); | ||
1089 | |||
1090 | Dprintk("Before bogocount - setting activated=1.\n"); | ||
1091 | |||
1092 | if (smp_b_stepping) | ||
1093 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); | ||
1094 | 1216 | ||
1095 | /* | 1217 | map_cpu_to_logical_apicid(); |
1096 | * Don't taint if we are running SMP kernel on a single non-MP | ||
1097 | * approved Athlon | ||
1098 | */ | ||
1099 | if (tainted & TAINT_UNSAFE_SMP) { | ||
1100 | if (cpucount) | ||
1101 | printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n"); | ||
1102 | else | ||
1103 | tainted &= ~TAINT_UNSAFE_SMP; | ||
1104 | } | ||
1105 | 1218 | ||
1106 | Dprintk("Boot done.\n"); | 1219 | setup_portio_remap(); |
1107 | 1220 | ||
1221 | smpboot_setup_io_apic(); | ||
1108 | /* | 1222 | /* |
1109 | * construct cpu_sibling_map, so that we can tell sibling CPUs | 1223 | * Set up local APIC timer on boot CPU. |
1110 | * efficiently. | ||
1111 | */ | 1224 | */ |
1112 | for_each_possible_cpu(cpu) { | ||
1113 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | ||
1114 | cpus_clear(per_cpu(cpu_core_map, cpu)); | ||
1115 | } | ||
1116 | |||
1117 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | ||
1118 | cpu_set(0, per_cpu(cpu_core_map, 0)); | ||
1119 | |||
1120 | smpboot_setup_io_apic(); | ||
1121 | 1225 | ||
1226 | printk(KERN_INFO "CPU%d: ", 0); | ||
1227 | print_cpu_info(&cpu_data(0)); | ||
1122 | setup_boot_clock(); | 1228 | setup_boot_clock(); |
1123 | } | 1229 | } |
1230 | /* | ||
1231 | * Early setup to make printk work. | ||
1232 | */ | ||
1233 | void __init native_smp_prepare_boot_cpu(void) | ||
1234 | { | ||
1235 | int me = smp_processor_id(); | ||
1236 | #ifdef CONFIG_X86_32 | ||
1237 | init_gdt(me); | ||
1238 | switch_to_new_gdt(); | ||
1239 | #endif | ||
1240 | /* already set me in cpu_online_map in boot_cpu_init() */ | ||
1241 | cpu_set(me, cpu_callout_map); | ||
1242 | per_cpu(cpu_state, me) = CPU_ONLINE; | ||
1243 | } | ||
1124 | 1244 | ||
1125 | /* These are wrappers to interface to the new boot process. Someone | 1245 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1126 | who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ | ||
1127 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | ||
1128 | { | 1246 | { |
1129 | smp_commenced_mask = cpumask_of_cpu(0); | 1247 | Dprintk("Boot done.\n"); |
1130 | cpu_callin_map = cpumask_of_cpu(0); | 1248 | |
1131 | mb(); | 1249 | impress_friends(); |
1132 | smp_boot_cpus(max_cpus); | 1250 | smp_checks(); |
1251 | #ifdef CONFIG_X86_IO_APIC | ||
1252 | setup_ioapic_dest(); | ||
1253 | #endif | ||
1254 | check_nmi_watchdog(); | ||
1255 | #ifdef CONFIG_X86_32 | ||
1256 | zap_low_mappings(); | ||
1257 | #endif | ||
1133 | } | 1258 | } |
1134 | 1259 | ||
1135 | void __init native_smp_prepare_boot_cpu(void) | 1260 | #ifdef CONFIG_HOTPLUG_CPU |
1261 | |||
1262 | # ifdef CONFIG_X86_32 | ||
1263 | void cpu_exit_clear(void) | ||
1136 | { | 1264 | { |
1137 | unsigned int cpu = smp_processor_id(); | 1265 | int cpu = raw_smp_processor_id(); |
1138 | 1266 | ||
1139 | init_gdt(cpu); | 1267 | idle_task_exit(); |
1140 | switch_to_new_gdt(); | 1268 | |
1269 | cpu_uninit(); | ||
1270 | irq_ctx_exit(cpu); | ||
1271 | |||
1272 | cpu_clear(cpu, cpu_callout_map); | ||
1273 | cpu_clear(cpu, cpu_callin_map); | ||
1141 | 1274 | ||
1142 | cpu_set(cpu, cpu_online_map); | 1275 | unmap_cpu_to_logical_apicid(cpu); |
1143 | cpu_set(cpu, cpu_callout_map); | ||
1144 | cpu_set(cpu, cpu_present_map); | ||
1145 | cpu_set(cpu, cpu_possible_map); | ||
1146 | __get_cpu_var(cpu_state) = CPU_ONLINE; | ||
1147 | } | 1276 | } |
1277 | # endif /* CONFIG_X86_32 */ | ||
1148 | 1278 | ||
1149 | #ifdef CONFIG_HOTPLUG_CPU | ||
1150 | void remove_siblinginfo(int cpu) | 1279 | void remove_siblinginfo(int cpu) |
1151 | { | 1280 | { |
1152 | int sibling; | 1281 | int sibling; |
@@ -1160,7 +1289,7 @@ void remove_siblinginfo(int cpu) | |||
1160 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) | 1289 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) |
1161 | cpu_data(sibling).booted_cores--; | 1290 | cpu_data(sibling).booted_cores--; |
1162 | } | 1291 | } |
1163 | 1292 | ||
1164 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) | 1293 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) |
1165 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | 1294 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); |
1166 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | 1295 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
@@ -1170,35 +1299,99 @@ void remove_siblinginfo(int cpu) | |||
1170 | cpu_clear(cpu, cpu_sibling_setup_map); | 1299 | cpu_clear(cpu, cpu_sibling_setup_map); |
1171 | } | 1300 | } |
1172 | 1301 | ||
1302 | int additional_cpus __initdata = -1; | ||
1303 | |||
1304 | static __init int setup_additional_cpus(char *s) | ||
1305 | { | ||
1306 | return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; | ||
1307 | } | ||
1308 | early_param("additional_cpus", setup_additional_cpus); | ||
1309 | |||
1310 | /* | ||
1311 | * cpu_possible_map should be static, it cannot change as cpu's | ||
1312 | * are onlined, or offlined. The reason is per-cpu data-structures | ||
1313 | * are allocated by some modules at init time, and dont expect to | ||
1314 | * do this dynamically on cpu arrival/departure. | ||
1315 | * cpu_present_map on the other hand can change dynamically. | ||
1316 | * In case when cpu_hotplug is not compiled, then we resort to current | ||
1317 | * behaviour, which is cpu_possible == cpu_present. | ||
1318 | * - Ashok Raj | ||
1319 | * | ||
1320 | * Three ways to find out the number of additional hotplug CPUs: | ||
1321 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | ||
1322 | * - The user can overwrite it with additional_cpus=NUM | ||
1323 | * - Otherwise don't reserve additional CPUs. | ||
1324 | * We do this because additional CPUs waste a lot of memory. | ||
1325 | * -AK | ||
1326 | */ | ||
1327 | __init void prefill_possible_map(void) | ||
1328 | { | ||
1329 | int i; | ||
1330 | int possible; | ||
1331 | |||
1332 | if (additional_cpus == -1) { | ||
1333 | if (disabled_cpus > 0) | ||
1334 | additional_cpus = disabled_cpus; | ||
1335 | else | ||
1336 | additional_cpus = 0; | ||
1337 | } | ||
1338 | possible = num_processors + additional_cpus; | ||
1339 | if (possible > NR_CPUS) | ||
1340 | possible = NR_CPUS; | ||
1341 | |||
1342 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | ||
1343 | possible, max_t(int, possible - num_processors, 0)); | ||
1344 | |||
1345 | for (i = 0; i < possible; i++) | ||
1346 | cpu_set(i, cpu_possible_map); | ||
1347 | } | ||
1348 | |||
1349 | static void __ref remove_cpu_from_maps(int cpu) | ||
1350 | { | ||
1351 | cpu_clear(cpu, cpu_online_map); | ||
1352 | #ifdef CONFIG_X86_64 | ||
1353 | cpu_clear(cpu, cpu_callout_map); | ||
1354 | cpu_clear(cpu, cpu_callin_map); | ||
1355 | /* was set by cpu_init() */ | ||
1356 | clear_bit(cpu, (unsigned long *)&cpu_initialized); | ||
1357 | clear_node_cpumask(cpu); | ||
1358 | #endif | ||
1359 | } | ||
1360 | |||
1173 | int __cpu_disable(void) | 1361 | int __cpu_disable(void) |
1174 | { | 1362 | { |
1175 | cpumask_t map = cpu_online_map; | ||
1176 | int cpu = smp_processor_id(); | 1363 | int cpu = smp_processor_id(); |
1177 | 1364 | ||
1178 | /* | 1365 | /* |
1179 | * Perhaps use cpufreq to drop frequency, but that could go | 1366 | * Perhaps use cpufreq to drop frequency, but that could go |
1180 | * into generic code. | 1367 | * into generic code. |
1181 | * | 1368 | * |
1182 | * We won't take down the boot processor on i386 due to some | 1369 | * We won't take down the boot processor on i386 due to some |
1183 | * interrupts only being able to be serviced by the BSP. | 1370 | * interrupts only being able to be serviced by the BSP. |
1184 | * Especially so if we're not using an IOAPIC -zwane | 1371 | * Especially so if we're not using an IOAPIC -zwane |
1185 | */ | 1372 | */ |
1186 | if (cpu == 0) | 1373 | if (cpu == 0) |
1187 | return -EBUSY; | 1374 | return -EBUSY; |
1375 | |||
1188 | if (nmi_watchdog == NMI_LOCAL_APIC) | 1376 | if (nmi_watchdog == NMI_LOCAL_APIC) |
1189 | stop_apic_nmi_watchdog(NULL); | 1377 | stop_apic_nmi_watchdog(NULL); |
1190 | clear_local_APIC(); | 1378 | clear_local_APIC(); |
1191 | /* Allow any queued timer interrupts to get serviced */ | 1379 | |
1380 | /* | ||
1381 | * HACK: | ||
1382 | * Allow any queued timer interrupts to get serviced | ||
1383 | * This is only a temporary solution until we cleanup | ||
1384 | * fixup_irqs as we do for IA64. | ||
1385 | */ | ||
1192 | local_irq_enable(); | 1386 | local_irq_enable(); |
1193 | mdelay(1); | 1387 | mdelay(1); |
1194 | local_irq_disable(); | ||
1195 | 1388 | ||
1389 | local_irq_disable(); | ||
1196 | remove_siblinginfo(cpu); | 1390 | remove_siblinginfo(cpu); |
1197 | 1391 | ||
1198 | cpu_clear(cpu, map); | ||
1199 | fixup_irqs(map); | ||
1200 | /* It's now safe to remove this processor from the online map */ | 1392 | /* It's now safe to remove this processor from the online map */ |
1201 | cpu_clear(cpu, cpu_online_map); | 1393 | remove_cpu_from_maps(cpu); |
1394 | fixup_irqs(cpu_online_map); | ||
1202 | return 0; | 1395 | return 0; |
1203 | } | 1396 | } |
1204 | 1397 | ||
@@ -1210,14 +1403,14 @@ void __cpu_die(unsigned int cpu) | |||
1210 | for (i = 0; i < 10; i++) { | 1403 | for (i = 0; i < 10; i++) { |
1211 | /* They ack this in play_dead by setting CPU_DEAD */ | 1404 | /* They ack this in play_dead by setting CPU_DEAD */ |
1212 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | 1405 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
1213 | printk ("CPU %d is now offline\n", cpu); | 1406 | printk(KERN_INFO "CPU %d is now offline\n", cpu); |
1214 | if (1 == num_online_cpus()) | 1407 | if (1 == num_online_cpus()) |
1215 | alternatives_smp_switch(0); | 1408 | alternatives_smp_switch(0); |
1216 | return; | 1409 | return; |
1217 | } | 1410 | } |
1218 | msleep(100); | 1411 | msleep(100); |
1219 | } | 1412 | } |
1220 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1413 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1221 | } | 1414 | } |
1222 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 1415 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
1223 | int __cpu_disable(void) | 1416 | int __cpu_disable(void) |
@@ -1230,81 +1423,7 @@ void __cpu_die(unsigned int cpu) | |||
1230 | /* We said "no" in __cpu_disable */ | 1423 | /* We said "no" in __cpu_disable */ |
1231 | BUG(); | 1424 | BUG(); |
1232 | } | 1425 | } |
1233 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1234 | |||
1235 | int __cpuinit native_cpu_up(unsigned int cpu) | ||
1236 | { | ||
1237 | unsigned long flags; | ||
1238 | #ifdef CONFIG_HOTPLUG_CPU | ||
1239 | int ret = 0; | ||
1240 | |||
1241 | /* | ||
1242 | * We do warm boot only on cpus that had booted earlier | ||
1243 | * Otherwise cold boot is all handled from smp_boot_cpus(). | ||
1244 | * cpu_callin_map is set during AP kickstart process. Its reset | ||
1245 | * when a cpu is taken offline from cpu_exit_clear(). | ||
1246 | */ | ||
1247 | if (!cpu_isset(cpu, cpu_callin_map)) | ||
1248 | ret = __smp_prepare_cpu(cpu); | ||
1249 | |||
1250 | if (ret) | ||
1251 | return -EIO; | ||
1252 | #endif | ||
1253 | |||
1254 | /* In case one didn't come up */ | ||
1255 | if (!cpu_isset(cpu, cpu_callin_map)) { | ||
1256 | printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); | ||
1257 | return -EIO; | ||
1258 | } | ||
1259 | |||
1260 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
1261 | /* Unleash the CPU! */ | ||
1262 | cpu_set(cpu, smp_commenced_mask); | ||
1263 | |||
1264 | /* | ||
1265 | * Check TSC synchronization with the AP (keep irqs disabled | ||
1266 | * while doing so): | ||
1267 | */ | ||
1268 | local_irq_save(flags); | ||
1269 | check_tsc_sync_source(cpu); | ||
1270 | local_irq_restore(flags); | ||
1271 | |||
1272 | while (!cpu_isset(cpu, cpu_online_map)) { | ||
1273 | cpu_relax(); | ||
1274 | touch_nmi_watchdog(); | ||
1275 | } | ||
1276 | |||
1277 | return 0; | ||
1278 | } | ||
1279 | |||
1280 | void __init native_smp_cpus_done(unsigned int max_cpus) | ||
1281 | { | ||
1282 | #ifdef CONFIG_X86_IO_APIC | ||
1283 | setup_ioapic_dest(); | ||
1284 | #endif | 1426 | #endif |
1285 | zap_low_mappings(); | ||
1286 | } | ||
1287 | |||
1288 | void __init smp_intr_init(void) | ||
1289 | { | ||
1290 | /* | ||
1291 | * IRQ0 must be given a fixed assignment and initialized, | ||
1292 | * because it's used before the IO-APIC is set up. | ||
1293 | */ | ||
1294 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); | ||
1295 | |||
1296 | /* | ||
1297 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | ||
1298 | * IPI, driven by wakeup. | ||
1299 | */ | ||
1300 | set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | ||
1301 | |||
1302 | /* IPI for invalidation */ | ||
1303 | set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | ||
1304 | |||
1305 | /* IPI for generic function call */ | ||
1306 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | ||
1307 | } | ||
1308 | 1427 | ||
1309 | /* | 1428 | /* |
1310 | * If the BIOS enumerates physical processors before logical, | 1429 | * If the BIOS enumerates physical processors before logical, |
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c deleted file mode 100644 index 0880f2c388a9..000000000000 --- a/arch/x86/kernel/smpboot_64.c +++ /dev/null | |||
@@ -1,1108 +0,0 @@ | |||
1 | /* | ||
2 | * x86 SMP booting functions | ||
3 | * | ||
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | ||
5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | ||
6 | * Copyright 2001 Andi Kleen, SuSE Labs. | ||
7 | * | ||
8 | * Much of the core SMP work is based on previous work by Thomas Radke, to | ||
9 | * whom a great many thanks are extended. | ||
10 | * | ||
11 | * Thanks to Intel for making available several different Pentium, | ||
12 | * Pentium Pro and Pentium-II/Xeon MP machines. | ||
13 | * Original development of Linux SMP code supported by Caldera. | ||
14 | * | ||
15 | * This code is released under the GNU General Public License version 2 | ||
16 | * | ||
17 | * Fixes | ||
18 | * Felix Koop : NR_CPUS used properly | ||
19 | * Jose Renau : Handle single CPU case. | ||
20 | * Alan Cox : By repeated request 8) - Total BogoMIP report. | ||
21 | * Greg Wright : Fix for kernel stacks panic. | ||
22 | * Erich Boleyn : MP v1.4 and additional changes. | ||
23 | * Matthias Sattler : Changes for 2.1 kernel map. | ||
24 | * Michel Lespinasse : Changes for 2.1 kernel map. | ||
25 | * Michael Chastain : Change trampoline.S to gnu as. | ||
26 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | ||
27 | * Ingo Molnar : Added APIC timers, based on code | ||
28 | * from Jose Renau | ||
29 | * Ingo Molnar : various cleanups and rewrites | ||
30 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | ||
31 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | ||
32 | * Andi Kleen : Changed for SMP boot into long mode. | ||
33 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. | ||
34 | * Andi Kleen : Converted to new state machine. | ||
35 | * Various cleanups. | ||
36 | * Probably mostly hotplug CPU ready now. | ||
37 | * Ashok Raj : CPU hotplug support | ||
38 | */ | ||
39 | |||
40 | |||
41 | #include <linux/init.h> | ||
42 | |||
43 | #include <linux/mm.h> | ||
44 | #include <linux/kernel_stat.h> | ||
45 | #include <linux/bootmem.h> | ||
46 | #include <linux/thread_info.h> | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/delay.h> | ||
49 | #include <linux/mc146818rtc.h> | ||
50 | #include <linux/smp.h> | ||
51 | #include <linux/kdebug.h> | ||
52 | |||
53 | #include <asm/mtrr.h> | ||
54 | #include <asm/pgalloc.h> | ||
55 | #include <asm/desc.h> | ||
56 | #include <asm/tlbflush.h> | ||
57 | #include <asm/proto.h> | ||
58 | #include <asm/nmi.h> | ||
59 | #include <asm/irq.h> | ||
60 | #include <asm/hw_irq.h> | ||
61 | #include <asm/numa.h> | ||
62 | |||
63 | /* Number of siblings per CPU package */ | ||
64 | int smp_num_siblings = 1; | ||
65 | EXPORT_SYMBOL(smp_num_siblings); | ||
66 | |||
67 | /* Last level cache ID of each logical CPU */ | ||
68 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | ||
69 | |||
70 | /* Bitmask of currently online CPUs */ | ||
71 | cpumask_t cpu_online_map __read_mostly; | ||
72 | |||
73 | EXPORT_SYMBOL(cpu_online_map); | ||
74 | |||
75 | /* | ||
76 | * Private maps to synchronize booting between AP and BP. | ||
77 | * Probably not needed anymore, but it makes for easier debugging. -AK | ||
78 | */ | ||
79 | cpumask_t cpu_callin_map; | ||
80 | cpumask_t cpu_callout_map; | ||
81 | cpumask_t cpu_possible_map; | ||
82 | EXPORT_SYMBOL(cpu_possible_map); | ||
83 | |||
84 | /* Per CPU bogomips and other parameters */ | ||
85 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | ||
86 | EXPORT_PER_CPU_SYMBOL(cpu_info); | ||
87 | |||
88 | /* Set when the idlers are all forked */ | ||
89 | int smp_threads_ready; | ||
90 | |||
91 | /* representing HT siblings of each logical CPU */ | ||
92 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
93 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | ||
94 | |||
95 | /* representing HT and core siblings of each logical CPU */ | ||
96 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | ||
97 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | ||
98 | |||
99 | /* | ||
100 | * Trampoline 80x86 program as an array. | ||
101 | */ | ||
102 | |||
103 | extern const unsigned char trampoline_data[]; | ||
104 | extern const unsigned char trampoline_end[]; | ||
105 | |||
106 | /* State of each CPU */ | ||
107 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
108 | |||
109 | /* | ||
110 | * Store all idle threads, this can be reused instead of creating | ||
111 | * a new thread. Also avoids complicated thread destroy functionality | ||
112 | * for idle threads. | ||
113 | */ | ||
114 | #ifdef CONFIG_HOTPLUG_CPU | ||
115 | /* | ||
116 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
117 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
118 | */ | ||
119 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
120 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
121 | #define set_idle_for_cpu(x,p) (per_cpu(idle_thread_array, x) = (p)) | ||
122 | #else | ||
123 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
124 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
125 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) | ||
126 | #endif | ||
127 | |||
128 | |||
129 | /* | ||
130 | * Currently trivial. Write the real->protected mode | ||
131 | * bootstrap into the page concerned. The caller | ||
132 | * has made sure it's suitably aligned. | ||
133 | */ | ||
134 | |||
135 | static unsigned long __cpuinit setup_trampoline(void) | ||
136 | { | ||
137 | void *tramp = __va(SMP_TRAMPOLINE_BASE); | ||
138 | memcpy(tramp, trampoline_data, trampoline_end - trampoline_data); | ||
139 | return virt_to_phys(tramp); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * The bootstrap kernel entry code has set these up. Save them for | ||
144 | * a given CPU | ||
145 | */ | ||
146 | |||
147 | static void __cpuinit smp_store_cpu_info(int id) | ||
148 | { | ||
149 | struct cpuinfo_x86 *c = &cpu_data(id); | ||
150 | |||
151 | *c = boot_cpu_data; | ||
152 | c->cpu_index = id; | ||
153 | identify_cpu(c); | ||
154 | print_cpu_info(c); | ||
155 | } | ||
156 | |||
157 | static atomic_t init_deasserted __cpuinitdata; | ||
158 | |||
159 | /* | ||
160 | * Report back to the Boot Processor. | ||
161 | * Running on AP. | ||
162 | */ | ||
163 | void __cpuinit smp_callin(void) | ||
164 | { | ||
165 | int cpuid, phys_id; | ||
166 | unsigned long timeout; | ||
167 | |||
168 | /* | ||
169 | * If waken up by an INIT in an 82489DX configuration | ||
170 | * we may get here before an INIT-deassert IPI reaches | ||
171 | * our local APIC. We have to wait for the IPI or we'll | ||
172 | * lock up on an APIC access. | ||
173 | */ | ||
174 | while (!atomic_read(&init_deasserted)) | ||
175 | cpu_relax(); | ||
176 | |||
177 | /* | ||
178 | * (This works even if the APIC is not enabled.) | ||
179 | */ | ||
180 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | ||
181 | cpuid = smp_processor_id(); | ||
182 | if (cpu_isset(cpuid, cpu_callin_map)) { | ||
183 | panic("smp_callin: phys CPU#%d, CPU#%d already present??\n", | ||
184 | phys_id, cpuid); | ||
185 | } | ||
186 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | ||
187 | |||
188 | /* | ||
189 | * STARTUP IPIs are fragile beasts as they might sometimes | ||
190 | * trigger some glue motherboard logic. Complete APIC bus | ||
191 | * silence for 1 second, this overestimates the time the | ||
192 | * boot CPU is spending to send the up to 2 STARTUP IPIs | ||
193 | * by a factor of two. This should be enough. | ||
194 | */ | ||
195 | |||
196 | /* | ||
197 | * Waiting 2s total for startup (udelay is not yet working) | ||
198 | */ | ||
199 | timeout = jiffies + 2*HZ; | ||
200 | while (time_before(jiffies, timeout)) { | ||
201 | /* | ||
202 | * Has the boot CPU finished it's STARTUP sequence? | ||
203 | */ | ||
204 | if (cpu_isset(cpuid, cpu_callout_map)) | ||
205 | break; | ||
206 | cpu_relax(); | ||
207 | } | ||
208 | |||
209 | if (!time_before(jiffies, timeout)) { | ||
210 | panic("smp_callin: CPU%d started up but did not get a callout!\n", | ||
211 | cpuid); | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * the boot CPU has finished the init stage and is spinning | ||
216 | * on callin_map until we finish. We are free to set up this | ||
217 | * CPU, first the APIC. (this is probably redundant on most | ||
218 | * boards) | ||
219 | */ | ||
220 | |||
221 | Dprintk("CALLIN, before setup_local_APIC().\n"); | ||
222 | setup_local_APIC(); | ||
223 | end_local_APIC_setup(); | ||
224 | |||
225 | /* | ||
226 | * Get our bogomips. | ||
227 | * | ||
228 | * Need to enable IRQs because it can take longer and then | ||
229 | * the NMI watchdog might kill us. | ||
230 | */ | ||
231 | local_irq_enable(); | ||
232 | calibrate_delay(); | ||
233 | local_irq_disable(); | ||
234 | Dprintk("Stack at about %p\n",&cpuid); | ||
235 | |||
236 | /* | ||
237 | * Save our processor parameters | ||
238 | */ | ||
239 | smp_store_cpu_info(cpuid); | ||
240 | |||
241 | /* | ||
242 | * Allow the master to continue. | ||
243 | */ | ||
244 | cpu_set(cpuid, cpu_callin_map); | ||
245 | } | ||
246 | |||
247 | /* maps the cpu to the sched domain representing multi-core */ | ||
248 | cpumask_t cpu_coregroup_map(int cpu) | ||
249 | { | ||
250 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
251 | /* | ||
252 | * For perf, we return last level cache shared map. | ||
253 | * And for power savings, we return cpu_core_map | ||
254 | */ | ||
255 | if (sched_mc_power_savings || sched_smt_power_savings) | ||
256 | return per_cpu(cpu_core_map, cpu); | ||
257 | else | ||
258 | return c->llc_shared_map; | ||
259 | } | ||
260 | |||
261 | /* representing cpus for which sibling maps can be computed */ | ||
262 | static cpumask_t cpu_sibling_setup_map; | ||
263 | |||
264 | static inline void set_cpu_sibling_map(int cpu) | ||
265 | { | ||
266 | int i; | ||
267 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
268 | |||
269 | cpu_set(cpu, cpu_sibling_setup_map); | ||
270 | |||
271 | if (smp_num_siblings > 1) { | ||
272 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | ||
273 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | ||
274 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | ||
275 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | ||
276 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | ||
277 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
278 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
279 | cpu_set(i, c->llc_shared_map); | ||
280 | cpu_set(cpu, cpu_data(i).llc_shared_map); | ||
281 | } | ||
282 | } | ||
283 | } else { | ||
284 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | ||
285 | } | ||
286 | |||
287 | cpu_set(cpu, c->llc_shared_map); | ||
288 | |||
289 | if (current_cpu_data.x86_max_cores == 1) { | ||
290 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); | ||
291 | c->booted_cores = 1; | ||
292 | return; | ||
293 | } | ||
294 | |||
295 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | ||
296 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | ||
297 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | ||
298 | cpu_set(i, c->llc_shared_map); | ||
299 | cpu_set(cpu, cpu_data(i).llc_shared_map); | ||
300 | } | ||
301 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | ||
302 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | ||
303 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | ||
304 | /* | ||
305 | * Does this new cpu bringup a new core? | ||
306 | */ | ||
307 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { | ||
308 | /* | ||
309 | * for each core in package, increment | ||
310 | * the booted_cores for this new cpu | ||
311 | */ | ||
312 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) | ||
313 | c->booted_cores++; | ||
314 | /* | ||
315 | * increment the core count for all | ||
316 | * the other cpus in this package | ||
317 | */ | ||
318 | if (i != cpu) | ||
319 | cpu_data(i).booted_cores++; | ||
320 | } else if (i != cpu && !c->booted_cores) | ||
321 | c->booted_cores = cpu_data(i).booted_cores; | ||
322 | } | ||
323 | } | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Setup code on secondary processor (after comming out of the trampoline) | ||
328 | */ | ||
329 | void __cpuinit start_secondary(void) | ||
330 | { | ||
331 | /* | ||
332 | * Dont put anything before smp_callin(), SMP | ||
333 | * booting is too fragile that we want to limit the | ||
334 | * things done here to the most necessary things. | ||
335 | */ | ||
336 | cpu_init(); | ||
337 | preempt_disable(); | ||
338 | smp_callin(); | ||
339 | |||
340 | /* otherwise gcc will move up the smp_processor_id before the cpu_init */ | ||
341 | barrier(); | ||
342 | |||
343 | /* | ||
344 | * Check TSC sync first: | ||
345 | */ | ||
346 | check_tsc_sync_target(); | ||
347 | |||
348 | if (nmi_watchdog == NMI_IO_APIC) { | ||
349 | disable_8259A_irq(0); | ||
350 | enable_NMI_through_LVT0(); | ||
351 | enable_8259A_irq(0); | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * The sibling maps must be set before turing the online map on for | ||
356 | * this cpu | ||
357 | */ | ||
358 | set_cpu_sibling_map(smp_processor_id()); | ||
359 | |||
360 | /* | ||
361 | * We need to hold call_lock, so there is no inconsistency | ||
362 | * between the time smp_call_function() determines number of | ||
363 | * IPI recipients, and the time when the determination is made | ||
364 | * for which cpus receive the IPI in genapic_flat.c. Holding this | ||
365 | * lock helps us to not include this cpu in a currently in progress | ||
366 | * smp_call_function(). | ||
367 | */ | ||
368 | lock_ipi_call_lock(); | ||
369 | spin_lock(&vector_lock); | ||
370 | |||
371 | /* Setup the per cpu irq handling data structures */ | ||
372 | __setup_vector_irq(smp_processor_id()); | ||
373 | /* | ||
374 | * Allow the master to continue. | ||
375 | */ | ||
376 | cpu_set(smp_processor_id(), cpu_online_map); | ||
377 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | ||
378 | spin_unlock(&vector_lock); | ||
379 | |||
380 | unlock_ipi_call_lock(); | ||
381 | |||
382 | setup_secondary_clock(); | ||
383 | |||
384 | cpu_idle(); | ||
385 | } | ||
386 | |||
387 | extern volatile unsigned long init_rsp; | ||
388 | extern void (*initial_code)(void); | ||
389 | |||
390 | #ifdef APIC_DEBUG | ||
391 | static void inquire_remote_apic(int apicid) | ||
392 | { | ||
393 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | ||
394 | char *names[] = { "ID", "VERSION", "SPIV" }; | ||
395 | int timeout; | ||
396 | u32 status; | ||
397 | |||
398 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | ||
399 | |||
400 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | ||
401 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); | ||
402 | |||
403 | /* | ||
404 | * Wait for idle. | ||
405 | */ | ||
406 | status = safe_apic_wait_icr_idle(); | ||
407 | if (status) | ||
408 | printk(KERN_CONT | ||
409 | "a previous APIC delivery may have failed\n"); | ||
410 | |||
411 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | ||
412 | apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); | ||
413 | |||
414 | timeout = 0; | ||
415 | do { | ||
416 | udelay(100); | ||
417 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | ||
418 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | ||
419 | |||
420 | switch (status) { | ||
421 | case APIC_ICR_RR_VALID: | ||
422 | status = apic_read(APIC_RRR); | ||
423 | printk(KERN_CONT "%08x\n", status); | ||
424 | break; | ||
425 | default: | ||
426 | printk(KERN_CONT "failed\n"); | ||
427 | } | ||
428 | } | ||
429 | } | ||
430 | #endif | ||
431 | |||
432 | /* | ||
433 | * Kick the secondary to wake up. | ||
434 | */ | ||
435 | static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip) | ||
436 | { | ||
437 | unsigned long send_status, accept_status = 0; | ||
438 | int maxlvt, num_starts, j; | ||
439 | |||
440 | Dprintk("Asserting INIT.\n"); | ||
441 | |||
442 | /* | ||
443 | * Turn INIT on target chip | ||
444 | */ | ||
445 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
446 | |||
447 | /* | ||
448 | * Send IPI | ||
449 | */ | ||
450 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT | ||
451 | | APIC_DM_INIT); | ||
452 | |||
453 | Dprintk("Waiting for send to finish...\n"); | ||
454 | send_status = safe_apic_wait_icr_idle(); | ||
455 | |||
456 | mdelay(10); | ||
457 | |||
458 | Dprintk("Deasserting INIT.\n"); | ||
459 | |||
460 | /* Target chip */ | ||
461 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
462 | |||
463 | /* Send IPI */ | ||
464 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | ||
465 | |||
466 | Dprintk("Waiting for send to finish...\n"); | ||
467 | send_status = safe_apic_wait_icr_idle(); | ||
468 | |||
469 | mb(); | ||
470 | atomic_set(&init_deasserted, 1); | ||
471 | |||
472 | num_starts = 2; | ||
473 | |||
474 | /* | ||
475 | * Run STARTUP IPI loop. | ||
476 | */ | ||
477 | Dprintk("#startup loops: %d.\n", num_starts); | ||
478 | |||
479 | maxlvt = lapic_get_maxlvt(); | ||
480 | |||
481 | for (j = 1; j <= num_starts; j++) { | ||
482 | Dprintk("Sending STARTUP #%d.\n",j); | ||
483 | apic_write(APIC_ESR, 0); | ||
484 | apic_read(APIC_ESR); | ||
485 | Dprintk("After apic_write.\n"); | ||
486 | |||
487 | /* | ||
488 | * STARTUP IPI | ||
489 | */ | ||
490 | |||
491 | /* Target chip */ | ||
492 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
493 | |||
494 | /* Boot on the stack */ | ||
495 | /* Kick the second */ | ||
496 | apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12)); | ||
497 | |||
498 | /* | ||
499 | * Give the other CPU some time to accept the IPI. | ||
500 | */ | ||
501 | udelay(300); | ||
502 | |||
503 | Dprintk("Startup point 1.\n"); | ||
504 | |||
505 | Dprintk("Waiting for send to finish...\n"); | ||
506 | send_status = safe_apic_wait_icr_idle(); | ||
507 | |||
508 | /* | ||
509 | * Give the other CPU some time to accept the IPI. | ||
510 | */ | ||
511 | udelay(200); | ||
512 | /* | ||
513 | * Due to the Pentium erratum 3AP. | ||
514 | */ | ||
515 | if (maxlvt > 3) { | ||
516 | apic_write(APIC_ESR, 0); | ||
517 | } | ||
518 | accept_status = (apic_read(APIC_ESR) & 0xEF); | ||
519 | if (send_status || accept_status) | ||
520 | break; | ||
521 | } | ||
522 | Dprintk("After Startup.\n"); | ||
523 | |||
524 | if (send_status) | ||
525 | printk(KERN_ERR "APIC never delivered???\n"); | ||
526 | if (accept_status) | ||
527 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | ||
528 | |||
529 | return (send_status | accept_status); | ||
530 | } | ||
531 | |||
532 | struct create_idle { | ||
533 | struct work_struct work; | ||
534 | struct task_struct *idle; | ||
535 | struct completion done; | ||
536 | int cpu; | ||
537 | }; | ||
538 | |||
539 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
540 | { | ||
541 | struct create_idle *c_idle = | ||
542 | container_of(work, struct create_idle, work); | ||
543 | |||
544 | c_idle->idle = fork_idle(c_idle->cpu); | ||
545 | complete(&c_idle->done); | ||
546 | } | ||
547 | |||
548 | /* | ||
549 | * Boot one CPU. | ||
550 | */ | ||
551 | static int __cpuinit do_boot_cpu(int cpu, int apicid) | ||
552 | { | ||
553 | unsigned long boot_error; | ||
554 | int timeout; | ||
555 | unsigned long start_rip; | ||
556 | struct create_idle c_idle = { | ||
557 | .cpu = cpu, | ||
558 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
559 | }; | ||
560 | INIT_WORK(&c_idle.work, do_fork_idle); | ||
561 | |||
562 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | ||
563 | if (!cpu_gdt_descr[cpu].address && | ||
564 | !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { | ||
565 | printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); | ||
566 | return -1; | ||
567 | } | ||
568 | |||
569 | /* Allocate node local memory for AP pdas */ | ||
570 | if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { | ||
571 | struct x8664_pda *newpda, *pda; | ||
572 | int node = cpu_to_node(cpu); | ||
573 | pda = cpu_pda(cpu); | ||
574 | newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC, | ||
575 | node); | ||
576 | if (newpda) { | ||
577 | memcpy(newpda, pda, sizeof (struct x8664_pda)); | ||
578 | cpu_pda(cpu) = newpda; | ||
579 | } else | ||
580 | printk(KERN_ERR | ||
581 | "Could not allocate node local PDA for CPU %d on node %d\n", | ||
582 | cpu, node); | ||
583 | } | ||
584 | |||
585 | alternatives_smp_switch(1); | ||
586 | |||
587 | c_idle.idle = get_idle_for_cpu(cpu); | ||
588 | |||
589 | if (c_idle.idle) { | ||
590 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) | ||
591 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); | ||
592 | init_idle(c_idle.idle, cpu); | ||
593 | goto do_rest; | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * During cold boot process, keventd thread is not spun up yet. | ||
598 | * When we do cpu hot-add, we create idle threads on the fly, we should | ||
599 | * not acquire any attributes from the calling context. Hence the clean | ||
600 | * way to create kernel_threads() is to do that from keventd(). | ||
601 | * We do the current_is_keventd() due to the fact that ACPI notifier | ||
602 | * was also queuing to keventd() and when the caller is already running | ||
603 | * in context of keventd(), we would end up with locking up the keventd | ||
604 | * thread. | ||
605 | */ | ||
606 | if (!keventd_up() || current_is_keventd()) | ||
607 | c_idle.work.func(&c_idle.work); | ||
608 | else { | ||
609 | schedule_work(&c_idle.work); | ||
610 | wait_for_completion(&c_idle.done); | ||
611 | } | ||
612 | |||
613 | if (IS_ERR(c_idle.idle)) { | ||
614 | printk("failed fork for CPU %d\n", cpu); | ||
615 | return PTR_ERR(c_idle.idle); | ||
616 | } | ||
617 | |||
618 | set_idle_for_cpu(cpu, c_idle.idle); | ||
619 | |||
620 | do_rest: | ||
621 | |||
622 | cpu_pda(cpu)->pcurrent = c_idle.idle; | ||
623 | |||
624 | start_rip = setup_trampoline(); | ||
625 | |||
626 | init_rsp = c_idle.idle->thread.sp; | ||
627 | load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); | ||
628 | initial_code = start_secondary; | ||
629 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | ||
630 | |||
631 | printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu, | ||
632 | cpus_weight(cpu_present_map), | ||
633 | apicid); | ||
634 | |||
635 | /* | ||
636 | * This grunge runs the startup process for | ||
637 | * the targeted processor. | ||
638 | */ | ||
639 | |||
640 | atomic_set(&init_deasserted, 0); | ||
641 | |||
642 | Dprintk("Setting warm reset code and vector.\n"); | ||
643 | |||
644 | CMOS_WRITE(0xa, 0xf); | ||
645 | local_flush_tlb(); | ||
646 | Dprintk("1.\n"); | ||
647 | *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4; | ||
648 | Dprintk("2.\n"); | ||
649 | *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf; | ||
650 | Dprintk("3.\n"); | ||
651 | |||
652 | /* | ||
653 | * Be paranoid about clearing APIC errors. | ||
654 | */ | ||
655 | apic_write(APIC_ESR, 0); | ||
656 | apic_read(APIC_ESR); | ||
657 | |||
658 | /* | ||
659 | * Status is now clean | ||
660 | */ | ||
661 | boot_error = 0; | ||
662 | |||
663 | /* | ||
664 | * Starting actual IPI sequence... | ||
665 | */ | ||
666 | boot_error = wakeup_secondary_via_INIT(apicid, start_rip); | ||
667 | |||
668 | if (!boot_error) { | ||
669 | /* | ||
670 | * allow APs to start initializing. | ||
671 | */ | ||
672 | Dprintk("Before Callout %d.\n", cpu); | ||
673 | cpu_set(cpu, cpu_callout_map); | ||
674 | Dprintk("After Callout %d.\n", cpu); | ||
675 | |||
676 | /* | ||
677 | * Wait 5s total for a response | ||
678 | */ | ||
679 | for (timeout = 0; timeout < 50000; timeout++) { | ||
680 | if (cpu_isset(cpu, cpu_callin_map)) | ||
681 | break; /* It has booted */ | ||
682 | udelay(100); | ||
683 | } | ||
684 | |||
685 | if (cpu_isset(cpu, cpu_callin_map)) { | ||
686 | /* number CPUs logically, starting from 1 (BSP is 0) */ | ||
687 | Dprintk("CPU has booted.\n"); | ||
688 | } else { | ||
689 | boot_error = 1; | ||
690 | if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) | ||
691 | == 0xA5) | ||
692 | /* trampoline started but...? */ | ||
693 | printk("Stuck ??\n"); | ||
694 | else | ||
695 | /* trampoline code not run */ | ||
696 | printk("Not responding.\n"); | ||
697 | #ifdef APIC_DEBUG | ||
698 | inquire_remote_apic(apicid); | ||
699 | #endif | ||
700 | } | ||
701 | } | ||
702 | if (boot_error) { | ||
703 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | ||
704 | clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ | ||
705 | clear_node_cpumask(cpu); /* was set by numa_add_cpu */ | ||
706 | cpu_clear(cpu, cpu_present_map); | ||
707 | cpu_clear(cpu, cpu_possible_map); | ||
708 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; | ||
709 | return -EIO; | ||
710 | } | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | cycles_t cacheflush_time; | ||
716 | unsigned long cache_decay_ticks; | ||
717 | |||
718 | /* | ||
719 | * Cleanup possible dangling ends... | ||
720 | */ | ||
721 | static __cpuinit void smp_cleanup_boot(void) | ||
722 | { | ||
723 | /* | ||
724 | * Paranoid: Set warm reset code and vector here back | ||
725 | * to default values. | ||
726 | */ | ||
727 | CMOS_WRITE(0, 0xf); | ||
728 | |||
729 | /* | ||
730 | * Reset trampoline flag | ||
731 | */ | ||
732 | *((volatile int *) phys_to_virt(0x467)) = 0; | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Fall back to non SMP mode after errors. | ||
737 | * | ||
738 | * RED-PEN audit/test this more. I bet there is more state messed up here. | ||
739 | */ | ||
740 | static __init void disable_smp(void) | ||
741 | { | ||
742 | cpu_present_map = cpumask_of_cpu(0); | ||
743 | cpu_possible_map = cpumask_of_cpu(0); | ||
744 | if (smp_found_config) | ||
745 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); | ||
746 | else | ||
747 | phys_cpu_present_map = physid_mask_of_physid(0); | ||
748 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); | ||
749 | cpu_set(0, per_cpu(cpu_core_map, 0)); | ||
750 | } | ||
751 | |||
752 | #ifdef CONFIG_HOTPLUG_CPU | ||
753 | |||
754 | int additional_cpus __initdata = -1; | ||
755 | |||
756 | /* | ||
757 | * cpu_possible_map should be static, it cannot change as cpu's | ||
758 | * are onlined, or offlined. The reason is per-cpu data-structures | ||
759 | * are allocated by some modules at init time, and dont expect to | ||
760 | * do this dynamically on cpu arrival/departure. | ||
761 | * cpu_present_map on the other hand can change dynamically. | ||
762 | * In case when cpu_hotplug is not compiled, then we resort to current | ||
763 | * behaviour, which is cpu_possible == cpu_present. | ||
764 | * - Ashok Raj | ||
765 | * | ||
766 | * Three ways to find out the number of additional hotplug CPUs: | ||
767 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | ||
768 | * - The user can overwrite it with additional_cpus=NUM | ||
769 | * - Otherwise don't reserve additional CPUs. | ||
770 | * We do this because additional CPUs waste a lot of memory. | ||
771 | * -AK | ||
772 | */ | ||
773 | __init void prefill_possible_map(void) | ||
774 | { | ||
775 | int i; | ||
776 | int possible; | ||
777 | |||
778 | if (additional_cpus == -1) { | ||
779 | if (disabled_cpus > 0) | ||
780 | additional_cpus = disabled_cpus; | ||
781 | else | ||
782 | additional_cpus = 0; | ||
783 | } | ||
784 | possible = num_processors + additional_cpus; | ||
785 | if (possible > NR_CPUS) | ||
786 | possible = NR_CPUS; | ||
787 | |||
788 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | ||
789 | possible, | ||
790 | max_t(int, possible - num_processors, 0)); | ||
791 | |||
792 | for (i = 0; i < possible; i++) | ||
793 | cpu_set(i, cpu_possible_map); | ||
794 | } | ||
795 | #endif | ||
796 | |||
797 | /* | ||
798 | * Various sanity checks. | ||
799 | */ | ||
800 | static int __init smp_sanity_check(unsigned max_cpus) | ||
801 | { | ||
802 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { | ||
803 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | ||
804 | hard_smp_processor_id()); | ||
805 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | * If we couldn't find an SMP configuration at boot time, | ||
810 | * get out of here now! | ||
811 | */ | ||
812 | if (!smp_found_config) { | ||
813 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); | ||
814 | disable_smp(); | ||
815 | if (APIC_init_uniprocessor()) | ||
816 | printk(KERN_NOTICE "Local APIC not detected." | ||
817 | " Using dummy APIC emulation.\n"); | ||
818 | return -1; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * Should not be necessary because the MP table should list the boot | ||
823 | * CPU too, but we do it for the sake of robustness anyway. | ||
824 | */ | ||
825 | if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) { | ||
826 | printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n", | ||
827 | boot_cpu_id); | ||
828 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | ||
829 | } | ||
830 | |||
831 | /* | ||
832 | * If we couldn't find a local APIC, then get out of here now! | ||
833 | */ | ||
834 | if (!cpu_has_apic) { | ||
835 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | ||
836 | boot_cpu_id); | ||
837 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | ||
838 | nr_ioapics = 0; | ||
839 | return -1; | ||
840 | } | ||
841 | |||
842 | /* | ||
843 | * If SMP should be disabled, then really disable it! | ||
844 | */ | ||
845 | if (!max_cpus) { | ||
846 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); | ||
847 | nr_ioapics = 0; | ||
848 | return -1; | ||
849 | } | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | static void __init smp_cpu_index_default(void) | ||
855 | { | ||
856 | int i; | ||
857 | struct cpuinfo_x86 *c; | ||
858 | |||
859 | for_each_cpu_mask(i, cpu_possible_map) { | ||
860 | c = &cpu_data(i); | ||
861 | /* mark all to hotplug */ | ||
862 | c->cpu_index = NR_CPUS; | ||
863 | } | ||
864 | } | ||
865 | |||
866 | /* | ||
867 | * Prepare for SMP bootup. The MP table or ACPI has been read | ||
868 | * earlier. Just do some sanity checking here and enable APIC mode. | ||
869 | */ | ||
870 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
871 | { | ||
872 | nmi_watchdog_default(); | ||
873 | smp_cpu_index_default(); | ||
874 | current_cpu_data = boot_cpu_data; | ||
875 | current_thread_info()->cpu = 0; /* needed? */ | ||
876 | set_cpu_sibling_map(0); | ||
877 | |||
878 | if (smp_sanity_check(max_cpus) < 0) { | ||
879 | printk(KERN_INFO "SMP disabled\n"); | ||
880 | disable_smp(); | ||
881 | return; | ||
882 | } | ||
883 | |||
884 | |||
885 | /* | ||
886 | * Switch from PIC to APIC mode. | ||
887 | */ | ||
888 | setup_local_APIC(); | ||
889 | |||
890 | /* | ||
891 | * Enable IO APIC before setting up error vector | ||
892 | */ | ||
893 | if (!skip_ioapic_setup && nr_ioapics) | ||
894 | enable_IO_APIC(); | ||
895 | end_local_APIC_setup(); | ||
896 | |||
897 | if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) { | ||
898 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | ||
899 | GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id); | ||
900 | /* Or can we switch back to PIC here? */ | ||
901 | } | ||
902 | |||
903 | /* | ||
904 | * Now start the IO-APICs | ||
905 | */ | ||
906 | if (!skip_ioapic_setup && nr_ioapics) | ||
907 | setup_IO_APIC(); | ||
908 | else | ||
909 | nr_ioapics = 0; | ||
910 | |||
911 | /* | ||
912 | * Set up local APIC timer on boot CPU. | ||
913 | */ | ||
914 | |||
915 | setup_boot_clock(); | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * Early setup to make printk work. | ||
920 | */ | ||
921 | void __init smp_prepare_boot_cpu(void) | ||
922 | { | ||
923 | int me = smp_processor_id(); | ||
924 | /* already set me in cpu_online_map in boot_cpu_init() */ | ||
925 | cpu_set(me, cpu_callout_map); | ||
926 | per_cpu(cpu_state, me) = CPU_ONLINE; | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | * Entry point to boot a CPU. | ||
931 | */ | ||
932 | int __cpuinit __cpu_up(unsigned int cpu) | ||
933 | { | ||
934 | int apicid = cpu_present_to_apicid(cpu); | ||
935 | unsigned long flags; | ||
936 | int err; | ||
937 | |||
938 | WARN_ON(irqs_disabled()); | ||
939 | |||
940 | Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); | ||
941 | |||
942 | if (apicid == BAD_APICID || apicid == boot_cpu_id || | ||
943 | !physid_isset(apicid, phys_cpu_present_map)) { | ||
944 | printk("__cpu_up: bad cpu %d\n", cpu); | ||
945 | return -EINVAL; | ||
946 | } | ||
947 | |||
948 | /* | ||
949 | * Already booted CPU? | ||
950 | */ | ||
951 | if (cpu_isset(cpu, cpu_callin_map)) { | ||
952 | Dprintk("do_boot_cpu %d Already started\n", cpu); | ||
953 | return -ENOSYS; | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * Save current MTRR state in case it was changed since early boot | ||
958 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | ||
959 | */ | ||
960 | mtrr_save_state(); | ||
961 | |||
962 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
963 | /* Boot it! */ | ||
964 | err = do_boot_cpu(cpu, apicid); | ||
965 | if (err < 0) { | ||
966 | Dprintk("do_boot_cpu failed %d\n", err); | ||
967 | return err; | ||
968 | } | ||
969 | |||
970 | /* Unleash the CPU! */ | ||
971 | Dprintk("waiting for cpu %d\n", cpu); | ||
972 | |||
973 | /* | ||
974 | * Make sure and check TSC sync: | ||
975 | */ | ||
976 | local_irq_save(flags); | ||
977 | check_tsc_sync_source(cpu); | ||
978 | local_irq_restore(flags); | ||
979 | |||
980 | while (!cpu_isset(cpu, cpu_online_map)) | ||
981 | cpu_relax(); | ||
982 | err = 0; | ||
983 | |||
984 | return err; | ||
985 | } | ||
986 | |||
987 | /* | ||
988 | * Finish the SMP boot. | ||
989 | */ | ||
990 | void __init smp_cpus_done(unsigned int max_cpus) | ||
991 | { | ||
992 | smp_cleanup_boot(); | ||
993 | setup_ioapic_dest(); | ||
994 | check_nmi_watchdog(); | ||
995 | } | ||
996 | |||
997 | #ifdef CONFIG_HOTPLUG_CPU | ||
998 | |||
999 | static void remove_siblinginfo(int cpu) | ||
1000 | { | ||
1001 | int sibling; | ||
1002 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
1003 | |||
1004 | for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { | ||
1005 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | ||
1006 | /* | ||
1007 | * last thread sibling in this cpu core going down | ||
1008 | */ | ||
1009 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) | ||
1010 | cpu_data(sibling).booted_cores--; | ||
1011 | } | ||
1012 | |||
1013 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) | ||
1014 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | ||
1015 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | ||
1016 | cpus_clear(per_cpu(cpu_core_map, cpu)); | ||
1017 | c->phys_proc_id = 0; | ||
1018 | c->cpu_core_id = 0; | ||
1019 | cpu_clear(cpu, cpu_sibling_setup_map); | ||
1020 | } | ||
1021 | |||
1022 | static void __ref remove_cpu_from_maps(void) | ||
1023 | { | ||
1024 | int cpu = smp_processor_id(); | ||
1025 | |||
1026 | cpu_clear(cpu, cpu_callout_map); | ||
1027 | cpu_clear(cpu, cpu_callin_map); | ||
1028 | clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ | ||
1029 | clear_node_cpumask(cpu); | ||
1030 | } | ||
1031 | |||
1032 | int __cpu_disable(void) | ||
1033 | { | ||
1034 | int cpu = smp_processor_id(); | ||
1035 | |||
1036 | /* | ||
1037 | * Perhaps use cpufreq to drop frequency, but that could go | ||
1038 | * into generic code. | ||
1039 | * | ||
1040 | * We won't take down the boot processor on i386 due to some | ||
1041 | * interrupts only being able to be serviced by the BSP. | ||
1042 | * Especially so if we're not using an IOAPIC -zwane | ||
1043 | */ | ||
1044 | if (cpu == 0) | ||
1045 | return -EBUSY; | ||
1046 | |||
1047 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1048 | stop_apic_nmi_watchdog(NULL); | ||
1049 | clear_local_APIC(); | ||
1050 | |||
1051 | /* | ||
1052 | * HACK: | ||
1053 | * Allow any queued timer interrupts to get serviced | ||
1054 | * This is only a temporary solution until we cleanup | ||
1055 | * fixup_irqs as we do for IA64. | ||
1056 | */ | ||
1057 | local_irq_enable(); | ||
1058 | mdelay(1); | ||
1059 | |||
1060 | local_irq_disable(); | ||
1061 | remove_siblinginfo(cpu); | ||
1062 | |||
1063 | spin_lock(&vector_lock); | ||
1064 | /* It's now safe to remove this processor from the online map */ | ||
1065 | cpu_clear(cpu, cpu_online_map); | ||
1066 | spin_unlock(&vector_lock); | ||
1067 | remove_cpu_from_maps(); | ||
1068 | fixup_irqs(cpu_online_map); | ||
1069 | return 0; | ||
1070 | } | ||
1071 | |||
1072 | void __cpu_die(unsigned int cpu) | ||
1073 | { | ||
1074 | /* We don't do anything here: idle task is faking death itself. */ | ||
1075 | unsigned int i; | ||
1076 | |||
1077 | for (i = 0; i < 10; i++) { | ||
1078 | /* They ack this in play_dead by setting CPU_DEAD */ | ||
1079 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | ||
1080 | printk ("CPU %d is now offline\n", cpu); | ||
1081 | if (1 == num_online_cpus()) | ||
1082 | alternatives_smp_switch(0); | ||
1083 | return; | ||
1084 | } | ||
1085 | msleep(100); | ||
1086 | } | ||
1087 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | ||
1088 | } | ||
1089 | |||
1090 | static __init int setup_additional_cpus(char *s) | ||
1091 | { | ||
1092 | return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; | ||
1093 | } | ||
1094 | early_param("additional_cpus", setup_additional_cpus); | ||
1095 | |||
1096 | #else /* ... !CONFIG_HOTPLUG_CPU */ | ||
1097 | |||
1098 | int __cpu_disable(void) | ||
1099 | { | ||
1100 | return -ENOSYS; | ||
1101 | } | ||
1102 | |||
1103 | void __cpu_die(unsigned int cpu) | ||
1104 | { | ||
1105 | /* We said "no" in __cpu_disable */ | ||
1106 | BUG(); | ||
1107 | } | ||
1108 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c new file mode 100644 index 000000000000..3449064d141a --- /dev/null +++ b/arch/x86/kernel/smpcommon.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * SMP stuff which is common to all sub-architectures. | ||
3 | */ | ||
4 | #include <linux/module.h> | ||
5 | #include <asm/smp.h> | ||
6 | |||
7 | #ifdef CONFIG_X86_32 | ||
8 | DEFINE_PER_CPU(unsigned long, this_cpu_off); | ||
9 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | ||
10 | |||
11 | /* Initialize the CPU's GDT. This is either the boot CPU doing itself | ||
12 | (still using the master per-cpu area), or a CPU doing it for a | ||
13 | secondary which will soon come up. */ | ||
14 | __cpuinit void init_gdt(int cpu) | ||
15 | { | ||
16 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
17 | |||
18 | pack_descriptor(&gdt[GDT_ENTRY_PERCPU], | ||
19 | __per_cpu_offset[cpu], 0xFFFFF, | ||
20 | 0x2 | DESCTYPE_S, 0x8); | ||
21 | |||
22 | gdt[GDT_ENTRY_PERCPU].s = 1; | ||
23 | |||
24 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; | ||
25 | per_cpu(cpu_number, cpu) = cpu; | ||
26 | } | ||
27 | #endif | ||
28 | |||
29 | /** | ||
30 | * smp_call_function(): Run a function on all other CPUs. | ||
31 | * @func: The function to run. This must be fast and non-blocking. | ||
32 | * @info: An arbitrary pointer to pass to the function. | ||
33 | * @nonatomic: Unused. | ||
34 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
35 | * | ||
36 | * Returns 0 on success, else a negative status code. | ||
37 | * | ||
38 | * If @wait is true, then returns once @func has returned; otherwise | ||
39 | * it returns just before the target cpu calls @func. | ||
40 | * | ||
41 | * You must not call this function with disabled interrupts or from a | ||
42 | * hardware interrupt handler or from a bottom half handler. | ||
43 | */ | ||
44 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
45 | int wait) | ||
46 | { | ||
47 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
48 | } | ||
49 | EXPORT_SYMBOL(smp_call_function); | ||
50 | |||
51 | /** | ||
52 | * smp_call_function_single - Run a function on a specific CPU | ||
53 | * @cpu: The target CPU. Cannot be the calling CPU. | ||
54 | * @func: The function to run. This must be fast and non-blocking. | ||
55 | * @info: An arbitrary pointer to pass to the function. | ||
56 | * @nonatomic: Unused. | ||
57 | * @wait: If true, wait until function has completed on other CPUs. | ||
58 | * | ||
59 | * Returns 0 on success, else a negative status code. | ||
60 | * | ||
61 | * If @wait is true, then returns once @func has returned; otherwise | ||
62 | * it returns just before the target cpu calls @func. | ||
63 | */ | ||
64 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
65 | int nonatomic, int wait) | ||
66 | { | ||
67 | /* prevent preemption and reschedule on another processor */ | ||
68 | int ret; | ||
69 | int me = get_cpu(); | ||
70 | if (cpu == me) { | ||
71 | local_irq_disable(); | ||
72 | func(info); | ||
73 | local_irq_enable(); | ||
74 | put_cpu(); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
79 | |||
80 | put_cpu(); | ||
81 | return ret; | ||
82 | } | ||
83 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/x86/kernel/smpcommon_32.c b/arch/x86/kernel/smpcommon_32.c index 8bc38af29aef..8b137891791f 100644 --- a/arch/x86/kernel/smpcommon_32.c +++ b/arch/x86/kernel/smpcommon_32.c | |||
@@ -1,82 +1 @@ | |||
1 | /* | ||
2 | * SMP stuff which is common to all sub-architectures. | ||
3 | */ | ||
4 | #include <linux/module.h> | ||
5 | #include <asm/smp.h> | ||
6 | |||
7 | DEFINE_PER_CPU(unsigned long, this_cpu_off); | ||
8 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | ||
9 | |||
10 | /* Initialize the CPU's GDT. This is either the boot CPU doing itself | ||
11 | (still using the master per-cpu area), or a CPU doing it for a | ||
12 | secondary which will soon come up. */ | ||
13 | __cpuinit void init_gdt(int cpu) | ||
14 | { | ||
15 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
16 | |||
17 | pack_descriptor(&gdt[GDT_ENTRY_PERCPU], | ||
18 | __per_cpu_offset[cpu], 0xFFFFF, | ||
19 | 0x2 | DESCTYPE_S, 0x8); | ||
20 | |||
21 | gdt[GDT_ENTRY_PERCPU].s = 1; | ||
22 | |||
23 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; | ||
24 | per_cpu(cpu_number, cpu) = cpu; | ||
25 | } | ||
26 | |||
27 | |||
28 | /** | ||
29 | * smp_call_function(): Run a function on all other CPUs. | ||
30 | * @func: The function to run. This must be fast and non-blocking. | ||
31 | * @info: An arbitrary pointer to pass to the function. | ||
32 | * @nonatomic: Unused. | ||
33 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
34 | * | ||
35 | * Returns 0 on success, else a negative status code. | ||
36 | * | ||
37 | * If @wait is true, then returns once @func has returned; otherwise | ||
38 | * it returns just before the target cpu calls @func. | ||
39 | * | ||
40 | * You must not call this function with disabled interrupts or from a | ||
41 | * hardware interrupt handler or from a bottom half handler. | ||
42 | */ | ||
43 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
44 | int wait) | ||
45 | { | ||
46 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
47 | } | ||
48 | EXPORT_SYMBOL(smp_call_function); | ||
49 | |||
50 | /** | ||
51 | * smp_call_function_single - Run a function on a specific CPU | ||
52 | * @cpu: The target CPU. Cannot be the calling CPU. | ||
53 | * @func: The function to run. This must be fast and non-blocking. | ||
54 | * @info: An arbitrary pointer to pass to the function. | ||
55 | * @nonatomic: Unused. | ||
56 | * @wait: If true, wait until function has completed on other CPUs. | ||
57 | * | ||
58 | * Returns 0 on success, else a negative status code. | ||
59 | * | ||
60 | * If @wait is true, then returns once @func has returned; otherwise | ||
61 | * it returns just before the target cpu calls @func. | ||
62 | */ | ||
63 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
64 | int nonatomic, int wait) | ||
65 | { | ||
66 | /* prevent preemption and reschedule on another processor */ | ||
67 | int ret; | ||
68 | int me = get_cpu(); | ||
69 | if (cpu == me) { | ||
70 | local_irq_disable(); | ||
71 | func(info); | ||
72 | local_irq_enable(); | ||
73 | put_cpu(); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
78 | |||
79 | put_cpu(); | ||
80 | return ret; | ||
81 | } | ||
82 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/x86/kernel/srat_32.c b/arch/x86/kernel/srat_32.c index b72e61359c36..70e4a374b4e8 100644 --- a/arch/x86/kernel/srat_32.c +++ b/arch/x86/kernel/srat_32.c | |||
@@ -277,14 +277,14 @@ int __init get_memcfg_from_srat(void) | |||
277 | rsdp_address = acpi_os_get_root_pointer(); | 277 | rsdp_address = acpi_os_get_root_pointer(); |
278 | if (!rsdp_address) { | 278 | if (!rsdp_address) { |
279 | printk("%s: System description tables not found\n", | 279 | printk("%s: System description tables not found\n", |
280 | __FUNCTION__); | 280 | __func__); |
281 | goto out_err; | 281 | goto out_err; |
282 | } | 282 | } |
283 | 283 | ||
284 | printk("%s: assigning address to rsdp\n", __FUNCTION__); | 284 | printk("%s: assigning address to rsdp\n", __func__); |
285 | rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address; | 285 | rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address; |
286 | if (!rsdp) { | 286 | if (!rsdp) { |
287 | printk("%s: Didn't find ACPI root!\n", __FUNCTION__); | 287 | printk("%s: Didn't find ACPI root!\n", __func__); |
288 | goto out_err; | 288 | goto out_err; |
289 | } | 289 | } |
290 | 290 | ||
@@ -292,7 +292,7 @@ int __init get_memcfg_from_srat(void) | |||
292 | rsdp->oem_id); | 292 | rsdp->oem_id); |
293 | 293 | ||
294 | if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) { | 294 | if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) { |
295 | printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__); | 295 | printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __func__); |
296 | goto out_err; | 296 | goto out_err; |
297 | } | 297 | } |
298 | 298 | ||
@@ -302,7 +302,7 @@ int __init get_memcfg_from_srat(void) | |||
302 | if (!rsdt) { | 302 | if (!rsdt) { |
303 | printk(KERN_WARNING | 303 | printk(KERN_WARNING |
304 | "%s: ACPI: Invalid root system description tables (RSDT)\n", | 304 | "%s: ACPI: Invalid root system description tables (RSDT)\n", |
305 | __FUNCTION__); | 305 | __func__); |
306 | goto out_err; | 306 | goto out_err; |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 071ff4798236..92c20fee6781 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -148,7 +148,7 @@ static void write_debugctlmsr(struct task_struct *child, unsigned long val) | |||
148 | if (child != current) | 148 | if (child != current) |
149 | return; | 149 | return; |
150 | 150 | ||
151 | wrmsrl(MSR_IA32_DEBUGCTLMSR, val); | 151 | update_debugctlmsr(val); |
152 | } | 152 | } |
153 | 153 | ||
154 | /* | 154 | /* |
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index 72f463401592..6878a9c2df5d 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c | |||
@@ -35,43 +35,47 @@ static struct rio_table_hdr *rio_table_hdr __initdata; | |||
35 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; | 35 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; |
36 | static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; | 36 | static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; |
37 | 37 | ||
38 | static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; | ||
39 | |||
38 | static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) | 40 | static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) |
39 | { | 41 | { |
40 | int twister = 0, node = 0; | 42 | int twister = 0, node = 0; |
41 | int i, bus, num_buses; | 43 | int i, bus, num_buses; |
42 | 44 | ||
43 | for(i = 0; i < rio_table_hdr->num_rio_dev; i++){ | 45 | for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { |
44 | if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id){ | 46 | if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) { |
45 | twister = rio_devs[i]->owner_id; | 47 | twister = rio_devs[i]->owner_id; |
46 | break; | 48 | break; |
47 | } | 49 | } |
48 | } | 50 | } |
49 | if (i == rio_table_hdr->num_rio_dev){ | 51 | if (i == rio_table_hdr->num_rio_dev) { |
50 | printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __FUNCTION__); | 52 | printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__); |
51 | return last_bus; | 53 | return last_bus; |
52 | } | 54 | } |
53 | 55 | ||
54 | for(i = 0; i < rio_table_hdr->num_scal_dev; i++){ | 56 | for (i = 0; i < rio_table_hdr->num_scal_dev; i++) { |
55 | if (scal_devs[i]->node_id == twister){ | 57 | if (scal_devs[i]->node_id == twister) { |
56 | node = scal_devs[i]->node_id; | 58 | node = scal_devs[i]->node_id; |
57 | break; | 59 | break; |
58 | } | 60 | } |
59 | } | 61 | } |
60 | if (i == rio_table_hdr->num_scal_dev){ | 62 | if (i == rio_table_hdr->num_scal_dev) { |
61 | printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __FUNCTION__); | 63 | printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__); |
62 | return last_bus; | 64 | return last_bus; |
63 | } | 65 | } |
64 | 66 | ||
65 | switch (rio_devs[wpeg_num]->type){ | 67 | switch (rio_devs[wpeg_num]->type) { |
66 | case CompatWPEG: | 68 | case CompatWPEG: |
67 | /* The Compatibility Winnipeg controls the 2 legacy buses, | 69 | /* |
70 | * The Compatibility Winnipeg controls the 2 legacy buses, | ||
68 | * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case | 71 | * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case |
69 | * a PCI-PCI bridge card is used in either slot: total 5 buses. | 72 | * a PCI-PCI bridge card is used in either slot: total 5 buses. |
70 | */ | 73 | */ |
71 | num_buses = 5; | 74 | num_buses = 5; |
72 | break; | 75 | break; |
73 | case AltWPEG: | 76 | case AltWPEG: |
74 | /* The Alternate Winnipeg controls the 2 133MHz buses [1 slot | 77 | /* |
78 | * The Alternate Winnipeg controls the 2 133MHz buses [1 slot | ||
75 | * each], their 2 "extra" buses, the 100MHz bus [2 slots] and | 79 | * each], their 2 "extra" buses, the 100MHz bus [2 slots] and |
76 | * the "extra" buses for each of those slots: total 7 buses. | 80 | * the "extra" buses for each of those slots: total 7 buses. |
77 | */ | 81 | */ |
@@ -79,17 +83,18 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) | |||
79 | break; | 83 | break; |
80 | case LookOutAWPEG: | 84 | case LookOutAWPEG: |
81 | case LookOutBWPEG: | 85 | case LookOutBWPEG: |
82 | /* A Lookout Winnipeg controls 3 100MHz buses [2 slots each] | 86 | /* |
87 | * A Lookout Winnipeg controls 3 100MHz buses [2 slots each] | ||
83 | * & the "extra" buses for each of those slots: total 9 buses. | 88 | * & the "extra" buses for each of those slots: total 9 buses. |
84 | */ | 89 | */ |
85 | num_buses = 9; | 90 | num_buses = 9; |
86 | break; | 91 | break; |
87 | default: | 92 | default: |
88 | printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __FUNCTION__); | 93 | printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__); |
89 | return last_bus; | 94 | return last_bus; |
90 | } | 95 | } |
91 | 96 | ||
92 | for(bus = last_bus; bus < last_bus + num_buses; bus++) | 97 | for (bus = last_bus; bus < last_bus + num_buses; bus++) |
93 | mp_bus_id_to_node[bus] = node; | 98 | mp_bus_id_to_node[bus] = node; |
94 | return bus; | 99 | return bus; |
95 | } | 100 | } |
@@ -99,14 +104,14 @@ static int __init build_detail_arrays(void) | |||
99 | unsigned long ptr; | 104 | unsigned long ptr; |
100 | int i, scal_detail_size, rio_detail_size; | 105 | int i, scal_detail_size, rio_detail_size; |
101 | 106 | ||
102 | if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){ | 107 | if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { |
103 | printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __FUNCTION__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); | 108 | printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); |
104 | return 0; | 109 | return 0; |
105 | } | 110 | } |
106 | 111 | ||
107 | switch (rio_table_hdr->version){ | 112 | switch (rio_table_hdr->version) { |
108 | default: | 113 | default: |
109 | printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __FUNCTION__, rio_table_hdr->version); | 114 | printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version); |
110 | return 0; | 115 | return 0; |
111 | case 2: | 116 | case 2: |
112 | scal_detail_size = 11; | 117 | scal_detail_size = 11; |
@@ -119,10 +124,10 @@ static int __init build_detail_arrays(void) | |||
119 | } | 124 | } |
120 | 125 | ||
121 | ptr = (unsigned long)rio_table_hdr + 3; | 126 | ptr = (unsigned long)rio_table_hdr + 3; |
122 | for(i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size) | 127 | for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size) |
123 | scal_devs[i] = (struct scal_detail *)ptr; | 128 | scal_devs[i] = (struct scal_detail *)ptr; |
124 | 129 | ||
125 | for(i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size) | 130 | for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size) |
126 | rio_devs[i] = (struct rio_detail *)ptr; | 131 | rio_devs[i] = (struct rio_detail *)ptr; |
127 | 132 | ||
128 | return 1; | 133 | return 1; |
@@ -140,9 +145,9 @@ void __init setup_summit(void) | |||
140 | 145 | ||
141 | rio_table_hdr = NULL; | 146 | rio_table_hdr = NULL; |
142 | offset = 0x180; | 147 | offset = 0x180; |
143 | while (offset){ | 148 | while (offset) { |
144 | /* The block id is stored in the 2nd word */ | 149 | /* The block id is stored in the 2nd word */ |
145 | if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){ | 150 | if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) { |
146 | /* set the pointer past the offset & block id */ | 151 | /* set the pointer past the offset & block id */ |
147 | rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4); | 152 | rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4); |
148 | break; | 153 | break; |
@@ -150,8 +155,8 @@ void __init setup_summit(void) | |||
150 | /* The next offset is stored in the 1st word. 0 means no more */ | 155 | /* The next offset is stored in the 1st word. 0 means no more */ |
151 | offset = *((unsigned short *)(ptr + offset)); | 156 | offset = *((unsigned short *)(ptr + offset)); |
152 | } | 157 | } |
153 | if (!rio_table_hdr){ | 158 | if (!rio_table_hdr) { |
154 | printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __FUNCTION__); | 159 | printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__); |
155 | return; | 160 | return; |
156 | } | 161 | } |
157 | 162 | ||
@@ -161,8 +166,8 @@ void __init setup_summit(void) | |||
161 | /* The first Winnipeg we're looking for has an index of 0 */ | 166 | /* The first Winnipeg we're looking for has an index of 0 */ |
162 | next_wpeg = 0; | 167 | next_wpeg = 0; |
163 | do { | 168 | do { |
164 | for(i = 0; i < rio_table_hdr->num_rio_dev; i++){ | 169 | for (i = 0; i < rio_table_hdr->num_rio_dev; i++) { |
165 | if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg){ | 170 | if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) { |
166 | /* It's the Winnipeg we're looking for! */ | 171 | /* It's the Winnipeg we're looking for! */ |
167 | next_bus = setup_pci_node_map_for_wpeg(i, next_bus); | 172 | next_bus = setup_pci_node_map_for_wpeg(i, next_bus); |
168 | next_wpeg++; | 173 | next_wpeg++; |
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c index 9d498c2f8eea..170d43c17487 100644 --- a/arch/x86/kernel/syscall_64.c +++ b/arch/x86/kernel/syscall_64.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* System call table for x86-64. */ | 1 | /* System call table for x86-64. */ |
2 | 2 | ||
3 | #include <linux/linkage.h> | 3 | #include <linux/linkage.h> |
4 | #include <linux/sys.h> | 4 | #include <linux/sys.h> |
@@ -7,20 +7,23 @@ | |||
7 | 7 | ||
8 | #define __NO_STUBS | 8 | #define __NO_STUBS |
9 | 9 | ||
10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | 10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; |
11 | #undef _ASM_X86_64_UNISTD_H_ | 11 | #undef _ASM_X86_64_UNISTD_H_ |
12 | #include <asm/unistd_64.h> | 12 | #include <asm/unistd_64.h> |
13 | 13 | ||
14 | #undef __SYSCALL | 14 | #undef __SYSCALL |
15 | #define __SYSCALL(nr, sym) [ nr ] = sym, | 15 | #define __SYSCALL(nr, sym) [nr] = sym, |
16 | #undef _ASM_X86_64_UNISTD_H_ | 16 | #undef _ASM_X86_64_UNISTD_H_ |
17 | 17 | ||
18 | typedef void (*sys_call_ptr_t)(void); | 18 | typedef void (*sys_call_ptr_t)(void); |
19 | 19 | ||
20 | extern void sys_ni_syscall(void); | 20 | extern void sys_ni_syscall(void); |
21 | 21 | ||
22 | const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { | 22 | const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { |
23 | /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ | 23 | /* |
24 | *Smells like a like a compiler bug -- it doesn't work | ||
25 | *when the & below is removed. | ||
26 | */ | ||
24 | [0 ... __NR_syscall_max] = &sys_ni_syscall, | 27 | [0 ... __NR_syscall_max] = &sys_ni_syscall, |
25 | #include <asm/unistd_64.h> | 28 | #include <asm/unistd_64.h> |
26 | }; | 29 | }; |
diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c index 10b8a6f69f84..787a5e499dd1 100644 --- a/arch/x86/kernel/test_nx.c +++ b/arch/x86/kernel/test_nx.c | |||
@@ -11,6 +11,8 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/sort.h> | 13 | #include <linux/sort.h> |
14 | #include <linux/slab.h> | ||
15 | |||
14 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
15 | #include <asm/asm.h> | 17 | #include <asm/asm.h> |
16 | 18 | ||
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c new file mode 100644 index 000000000000..9bb2363851af --- /dev/null +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -0,0 +1,243 @@ | |||
1 | #include <linux/spinlock.h> | ||
2 | #include <linux/cpu.h> | ||
3 | #include <linux/interrupt.h> | ||
4 | |||
5 | #include <asm/tlbflush.h> | ||
6 | |||
7 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) | ||
8 | ____cacheline_aligned = { &init_mm, 0, }; | ||
9 | |||
10 | /* must come after the send_IPI functions above for inlining */ | ||
11 | #include <mach_ipi.h> | ||
12 | |||
13 | /* | ||
14 | * Smarter SMP flushing macros. | ||
15 | * c/o Linus Torvalds. | ||
16 | * | ||
17 | * These mean you can really definitely utterly forget about | ||
18 | * writing to user space from interrupts. (Its not allowed anyway). | ||
19 | * | ||
20 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | ||
21 | */ | ||
22 | |||
23 | static cpumask_t flush_cpumask; | ||
24 | static struct mm_struct *flush_mm; | ||
25 | static unsigned long flush_va; | ||
26 | static DEFINE_SPINLOCK(tlbstate_lock); | ||
27 | |||
28 | /* | ||
29 | * We cannot call mmdrop() because we are in interrupt context, | ||
30 | * instead update mm->cpu_vm_mask. | ||
31 | * | ||
32 | * We need to reload %cr3 since the page tables may be going | ||
33 | * away from under us.. | ||
34 | */ | ||
35 | void leave_mm(int cpu) | ||
36 | { | ||
37 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | ||
38 | BUG(); | ||
39 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | ||
40 | load_cr3(swapper_pg_dir); | ||
41 | } | ||
42 | EXPORT_SYMBOL_GPL(leave_mm); | ||
43 | |||
44 | /* | ||
45 | * | ||
46 | * The flush IPI assumes that a thread switch happens in this order: | ||
47 | * [cpu0: the cpu that switches] | ||
48 | * 1) switch_mm() either 1a) or 1b) | ||
49 | * 1a) thread switch to a different mm | ||
50 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | ||
51 | * Stop ipi delivery for the old mm. This is not synchronized with | ||
52 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | ||
53 | * for the wrong mm, and in the worst case we perform a superfluous | ||
54 | * tlb flush. | ||
55 | * 1a2) set cpu_tlbstate to TLBSTATE_OK | ||
56 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | ||
57 | * was in lazy tlb mode. | ||
58 | * 1a3) update cpu_tlbstate[].active_mm | ||
59 | * Now cpu0 accepts tlb flushes for the new mm. | ||
60 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | ||
61 | * Now the other cpus will send tlb flush ipis. | ||
62 | * 1a4) change cr3. | ||
63 | * 1b) thread switch without mm change | ||
64 | * cpu_tlbstate[].active_mm is correct, cpu0 already handles | ||
65 | * flush ipis. | ||
66 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | ||
67 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | ||
68 | * Atomically set the bit [other cpus will start sending flush ipis], | ||
69 | * and test the bit. | ||
70 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | ||
71 | * 2) switch %%esp, ie current | ||
72 | * | ||
73 | * The interrupt must handle 2 special cases: | ||
74 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | ||
75 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | ||
76 | * runs in kernel space, the cpu could load tlb entries for user space | ||
77 | * pages. | ||
78 | * | ||
79 | * The good news is that cpu_tlbstate is local to each cpu, no | ||
80 | * write/read ordering problems. | ||
81 | */ | ||
82 | |||
83 | /* | ||
84 | * TLB flush IPI: | ||
85 | * | ||
86 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | ||
87 | * 2) Leave the mm if we are in the lazy tlb mode. | ||
88 | */ | ||
89 | |||
90 | void smp_invalidate_interrupt(struct pt_regs *regs) | ||
91 | { | ||
92 | unsigned long cpu; | ||
93 | |||
94 | cpu = get_cpu(); | ||
95 | |||
96 | if (!cpu_isset(cpu, flush_cpumask)) | ||
97 | goto out; | ||
98 | /* | ||
99 | * This was a BUG() but until someone can quote me the | ||
100 | * line from the intel manual that guarantees an IPI to | ||
101 | * multiple CPUs is retried _only_ on the erroring CPUs | ||
102 | * its staying as a return | ||
103 | * | ||
104 | * BUG(); | ||
105 | */ | ||
106 | |||
107 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | ||
108 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | ||
109 | if (flush_va == TLB_FLUSH_ALL) | ||
110 | local_flush_tlb(); | ||
111 | else | ||
112 | __flush_tlb_one(flush_va); | ||
113 | } else | ||
114 | leave_mm(cpu); | ||
115 | } | ||
116 | ack_APIC_irq(); | ||
117 | smp_mb__before_clear_bit(); | ||
118 | cpu_clear(cpu, flush_cpumask); | ||
119 | smp_mb__after_clear_bit(); | ||
120 | out: | ||
121 | put_cpu_no_resched(); | ||
122 | __get_cpu_var(irq_stat).irq_tlb_count++; | ||
123 | } | ||
124 | |||
125 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | ||
126 | unsigned long va) | ||
127 | { | ||
128 | cpumask_t cpumask = *cpumaskp; | ||
129 | |||
130 | /* | ||
131 | * A couple of (to be removed) sanity checks: | ||
132 | * | ||
133 | * - current CPU must not be in mask | ||
134 | * - mask must exist :) | ||
135 | */ | ||
136 | BUG_ON(cpus_empty(cpumask)); | ||
137 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | ||
138 | BUG_ON(!mm); | ||
139 | |||
140 | #ifdef CONFIG_HOTPLUG_CPU | ||
141 | /* If a CPU which we ran on has gone down, OK. */ | ||
142 | cpus_and(cpumask, cpumask, cpu_online_map); | ||
143 | if (unlikely(cpus_empty(cpumask))) | ||
144 | return; | ||
145 | #endif | ||
146 | |||
147 | /* | ||
148 | * i'm not happy about this global shared spinlock in the | ||
149 | * MM hot path, but we'll see how contended it is. | ||
150 | * AK: x86-64 has a faster method that could be ported. | ||
151 | */ | ||
152 | spin_lock(&tlbstate_lock); | ||
153 | |||
154 | flush_mm = mm; | ||
155 | flush_va = va; | ||
156 | cpus_or(flush_cpumask, cpumask, flush_cpumask); | ||
157 | /* | ||
158 | * We have to send the IPI only to | ||
159 | * CPUs affected. | ||
160 | */ | ||
161 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); | ||
162 | |||
163 | while (!cpus_empty(flush_cpumask)) | ||
164 | /* nothing. lockup detection does not belong here */ | ||
165 | cpu_relax(); | ||
166 | |||
167 | flush_mm = NULL; | ||
168 | flush_va = 0; | ||
169 | spin_unlock(&tlbstate_lock); | ||
170 | } | ||
171 | |||
172 | void flush_tlb_current_task(void) | ||
173 | { | ||
174 | struct mm_struct *mm = current->mm; | ||
175 | cpumask_t cpu_mask; | ||
176 | |||
177 | preempt_disable(); | ||
178 | cpu_mask = mm->cpu_vm_mask; | ||
179 | cpu_clear(smp_processor_id(), cpu_mask); | ||
180 | |||
181 | local_flush_tlb(); | ||
182 | if (!cpus_empty(cpu_mask)) | ||
183 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | ||
184 | preempt_enable(); | ||
185 | } | ||
186 | |||
187 | void flush_tlb_mm(struct mm_struct *mm) | ||
188 | { | ||
189 | cpumask_t cpu_mask; | ||
190 | |||
191 | preempt_disable(); | ||
192 | cpu_mask = mm->cpu_vm_mask; | ||
193 | cpu_clear(smp_processor_id(), cpu_mask); | ||
194 | |||
195 | if (current->active_mm == mm) { | ||
196 | if (current->mm) | ||
197 | local_flush_tlb(); | ||
198 | else | ||
199 | leave_mm(smp_processor_id()); | ||
200 | } | ||
201 | if (!cpus_empty(cpu_mask)) | ||
202 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | ||
203 | |||
204 | preempt_enable(); | ||
205 | } | ||
206 | |||
207 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | ||
208 | { | ||
209 | struct mm_struct *mm = vma->vm_mm; | ||
210 | cpumask_t cpu_mask; | ||
211 | |||
212 | preempt_disable(); | ||
213 | cpu_mask = mm->cpu_vm_mask; | ||
214 | cpu_clear(smp_processor_id(), cpu_mask); | ||
215 | |||
216 | if (current->active_mm == mm) { | ||
217 | if (current->mm) | ||
218 | __flush_tlb_one(va); | ||
219 | else | ||
220 | leave_mm(smp_processor_id()); | ||
221 | } | ||
222 | |||
223 | if (!cpus_empty(cpu_mask)) | ||
224 | flush_tlb_others(cpu_mask, mm, va); | ||
225 | |||
226 | preempt_enable(); | ||
227 | } | ||
228 | EXPORT_SYMBOL(flush_tlb_page); | ||
229 | |||
230 | static void do_flush_tlb_all(void *info) | ||
231 | { | ||
232 | unsigned long cpu = smp_processor_id(); | ||
233 | |||
234 | __flush_tlb_all(); | ||
235 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | ||
236 | leave_mm(cpu); | ||
237 | } | ||
238 | |||
239 | void flush_tlb_all(void) | ||
240 | { | ||
241 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | ||
242 | } | ||
243 | |||
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/tlb_64.c index 2fd74b06db67..1558e513757e 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -1,14 +1,3 @@ | |||
1 | /* | ||
2 | * Intel SMP support routines. | ||
3 | * | ||
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | ||
5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | ||
6 | * (c) 2002,2003 Andi Kleen, SuSE Labs. | ||
7 | * | ||
8 | * This code is released under the GNU General Public License version 2 or | ||
9 | * later. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | 1 | #include <linux/init.h> |
13 | 2 | ||
14 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
@@ -22,12 +11,12 @@ | |||
22 | #include <asm/mtrr.h> | 11 | #include <asm/mtrr.h> |
23 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
24 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
25 | #include <asm/mach_apic.h> | ||
26 | #include <asm/mmu_context.h> | 14 | #include <asm/mmu_context.h> |
27 | #include <asm/proto.h> | 15 | #include <asm/proto.h> |
28 | #include <asm/apicdef.h> | 16 | #include <asm/apicdef.h> |
29 | #include <asm/idle.h> | 17 | #include <asm/idle.h> |
30 | 18 | ||
19 | #include <mach_ipi.h> | ||
31 | /* | 20 | /* |
32 | * Smarter SMP flushing macros. | 21 | * Smarter SMP flushing macros. |
33 | * c/o Linus Torvalds. | 22 | * c/o Linus Torvalds. |
@@ -228,7 +217,7 @@ void flush_tlb_current_task(void) | |||
228 | preempt_enable(); | 217 | preempt_enable(); |
229 | } | 218 | } |
230 | 219 | ||
231 | void flush_tlb_mm (struct mm_struct * mm) | 220 | void flush_tlb_mm(struct mm_struct *mm) |
232 | { | 221 | { |
233 | cpumask_t cpu_mask; | 222 | cpumask_t cpu_mask; |
234 | 223 | ||
@@ -248,7 +237,7 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
248 | preempt_enable(); | 237 | preempt_enable(); |
249 | } | 238 | } |
250 | 239 | ||
251 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | 240 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) |
252 | { | 241 | { |
253 | struct mm_struct *mm = vma->vm_mm; | 242 | struct mm_struct *mm = vma->vm_mm; |
254 | cpumask_t cpu_mask; | 243 | cpumask_t cpu_mask; |
@@ -258,7 +247,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |||
258 | cpu_clear(smp_processor_id(), cpu_mask); | 247 | cpu_clear(smp_processor_id(), cpu_mask); |
259 | 248 | ||
260 | if (current->active_mm == mm) { | 249 | if (current->active_mm == mm) { |
261 | if(current->mm) | 250 | if (current->mm) |
262 | __flush_tlb_one(va); | 251 | __flush_tlb_one(va); |
263 | else | 252 | else |
264 | leave_mm(smp_processor_id()); | 253 | leave_mm(smp_processor_id()); |
@@ -270,7 +259,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |||
270 | preempt_enable(); | 259 | preempt_enable(); |
271 | } | 260 | } |
272 | 261 | ||
273 | static void do_flush_tlb_all(void* info) | 262 | static void do_flush_tlb_all(void *info) |
274 | { | 263 | { |
275 | unsigned long cpu = smp_processor_id(); | 264 | unsigned long cpu = smp_processor_id(); |
276 | 265 | ||
@@ -283,248 +272,3 @@ void flush_tlb_all(void) | |||
283 | { | 272 | { |
284 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 273 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); |
285 | } | 274 | } |
286 | |||
287 | /* | ||
288 | * this function sends a 'reschedule' IPI to another CPU. | ||
289 | * it goes straight through and wastes no time serializing | ||
290 | * anything. Worst case is that we lose a reschedule ... | ||
291 | */ | ||
292 | |||
293 | void smp_send_reschedule(int cpu) | ||
294 | { | ||
295 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Structure and data for smp_call_function(). This is designed to minimise | ||
300 | * static memory requirements. It also looks cleaner. | ||
301 | */ | ||
302 | static DEFINE_SPINLOCK(call_lock); | ||
303 | |||
304 | struct call_data_struct { | ||
305 | void (*func) (void *info); | ||
306 | void *info; | ||
307 | atomic_t started; | ||
308 | atomic_t finished; | ||
309 | int wait; | ||
310 | }; | ||
311 | |||
312 | static struct call_data_struct * call_data; | ||
313 | |||
314 | void lock_ipi_call_lock(void) | ||
315 | { | ||
316 | spin_lock_irq(&call_lock); | ||
317 | } | ||
318 | |||
319 | void unlock_ipi_call_lock(void) | ||
320 | { | ||
321 | spin_unlock_irq(&call_lock); | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * this function sends a 'generic call function' IPI to all other CPU | ||
326 | * of the system defined in the mask. | ||
327 | */ | ||
328 | static int __smp_call_function_mask(cpumask_t mask, | ||
329 | void (*func)(void *), void *info, | ||
330 | int wait) | ||
331 | { | ||
332 | struct call_data_struct data; | ||
333 | cpumask_t allbutself; | ||
334 | int cpus; | ||
335 | |||
336 | allbutself = cpu_online_map; | ||
337 | cpu_clear(smp_processor_id(), allbutself); | ||
338 | |||
339 | cpus_and(mask, mask, allbutself); | ||
340 | cpus = cpus_weight(mask); | ||
341 | |||
342 | if (!cpus) | ||
343 | return 0; | ||
344 | |||
345 | data.func = func; | ||
346 | data.info = info; | ||
347 | atomic_set(&data.started, 0); | ||
348 | data.wait = wait; | ||
349 | if (wait) | ||
350 | atomic_set(&data.finished, 0); | ||
351 | |||
352 | call_data = &data; | ||
353 | wmb(); | ||
354 | |||
355 | /* Send a message to other CPUs */ | ||
356 | if (cpus_equal(mask, allbutself)) | ||
357 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
358 | else | ||
359 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
360 | |||
361 | /* Wait for response */ | ||
362 | while (atomic_read(&data.started) != cpus) | ||
363 | cpu_relax(); | ||
364 | |||
365 | if (!wait) | ||
366 | return 0; | ||
367 | |||
368 | while (atomic_read(&data.finished) != cpus) | ||
369 | cpu_relax(); | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | /** | ||
374 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
375 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
376 | * @func: The function to run. This must be fast and non-blocking. | ||
377 | * @info: An arbitrary pointer to pass to the function. | ||
378 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
379 | * | ||
380 | * Returns 0 on success, else a negative status code. | ||
381 | * | ||
382 | * If @wait is true, then returns once @func has returned; otherwise | ||
383 | * it returns just before the target cpu calls @func. | ||
384 | * | ||
385 | * You must not call this function with disabled interrupts or from a | ||
386 | * hardware interrupt handler or from a bottom half handler. | ||
387 | */ | ||
388 | int smp_call_function_mask(cpumask_t mask, | ||
389 | void (*func)(void *), void *info, | ||
390 | int wait) | ||
391 | { | ||
392 | int ret; | ||
393 | |||
394 | /* Can deadlock when called with interrupts disabled */ | ||
395 | WARN_ON(irqs_disabled()); | ||
396 | |||
397 | spin_lock(&call_lock); | ||
398 | ret = __smp_call_function_mask(mask, func, info, wait); | ||
399 | spin_unlock(&call_lock); | ||
400 | return ret; | ||
401 | } | ||
402 | EXPORT_SYMBOL(smp_call_function_mask); | ||
403 | |||
404 | /* | ||
405 | * smp_call_function_single - Run a function on a specific CPU | ||
406 | * @func: The function to run. This must be fast and non-blocking. | ||
407 | * @info: An arbitrary pointer to pass to the function. | ||
408 | * @nonatomic: Currently unused. | ||
409 | * @wait: If true, wait until function has completed on other CPUs. | ||
410 | * | ||
411 | * Retrurns 0 on success, else a negative status code. | ||
412 | * | ||
413 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
414 | * or is or has executed. | ||
415 | */ | ||
416 | |||
417 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | ||
418 | int nonatomic, int wait) | ||
419 | { | ||
420 | /* prevent preemption and reschedule on another processor */ | ||
421 | int ret, me = get_cpu(); | ||
422 | |||
423 | /* Can deadlock when called with interrupts disabled */ | ||
424 | WARN_ON(irqs_disabled()); | ||
425 | |||
426 | if (cpu == me) { | ||
427 | local_irq_disable(); | ||
428 | func(info); | ||
429 | local_irq_enable(); | ||
430 | put_cpu(); | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
435 | |||
436 | put_cpu(); | ||
437 | return ret; | ||
438 | } | ||
439 | EXPORT_SYMBOL(smp_call_function_single); | ||
440 | |||
441 | /* | ||
442 | * smp_call_function - run a function on all other CPUs. | ||
443 | * @func: The function to run. This must be fast and non-blocking. | ||
444 | * @info: An arbitrary pointer to pass to the function. | ||
445 | * @nonatomic: currently unused. | ||
446 | * @wait: If true, wait (atomically) until function has completed on other | ||
447 | * CPUs. | ||
448 | * | ||
449 | * Returns 0 on success, else a negative status code. Does not return until | ||
450 | * remote CPUs are nearly ready to execute func or are or have executed. | ||
451 | * | ||
452 | * You must not call this function with disabled interrupts or from a | ||
453 | * hardware interrupt handler or from a bottom half handler. | ||
454 | * Actually there are a few legal cases, like panic. | ||
455 | */ | ||
456 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | ||
457 | int wait) | ||
458 | { | ||
459 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
460 | } | ||
461 | EXPORT_SYMBOL(smp_call_function); | ||
462 | |||
463 | static void stop_this_cpu(void *dummy) | ||
464 | { | ||
465 | local_irq_disable(); | ||
466 | /* | ||
467 | * Remove this CPU: | ||
468 | */ | ||
469 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
470 | disable_local_APIC(); | ||
471 | for (;;) | ||
472 | halt(); | ||
473 | } | ||
474 | |||
475 | void smp_send_stop(void) | ||
476 | { | ||
477 | int nolock; | ||
478 | unsigned long flags; | ||
479 | |||
480 | if (reboot_force) | ||
481 | return; | ||
482 | |||
483 | /* Don't deadlock on the call lock in panic */ | ||
484 | nolock = !spin_trylock(&call_lock); | ||
485 | local_irq_save(flags); | ||
486 | __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0); | ||
487 | if (!nolock) | ||
488 | spin_unlock(&call_lock); | ||
489 | disable_local_APIC(); | ||
490 | local_irq_restore(flags); | ||
491 | } | ||
492 | |||
493 | /* | ||
494 | * Reschedule call back. Nothing to do, | ||
495 | * all the work is done automatically when | ||
496 | * we return from the interrupt. | ||
497 | */ | ||
498 | asmlinkage void smp_reschedule_interrupt(void) | ||
499 | { | ||
500 | ack_APIC_irq(); | ||
501 | add_pda(irq_resched_count, 1); | ||
502 | } | ||
503 | |||
504 | asmlinkage void smp_call_function_interrupt(void) | ||
505 | { | ||
506 | void (*func) (void *info) = call_data->func; | ||
507 | void *info = call_data->info; | ||
508 | int wait = call_data->wait; | ||
509 | |||
510 | ack_APIC_irq(); | ||
511 | /* | ||
512 | * Notify initiating CPU that I've grabbed the data and am | ||
513 | * about to execute the function | ||
514 | */ | ||
515 | mb(); | ||
516 | atomic_inc(&call_data->started); | ||
517 | /* | ||
518 | * At this point the info structure may be out of scope unless wait==1 | ||
519 | */ | ||
520 | exit_idle(); | ||
521 | irq_enter(); | ||
522 | (*func)(info); | ||
523 | add_pda(irq_call_count, 1); | ||
524 | irq_exit(); | ||
525 | if (wait) { | ||
526 | mb(); | ||
527 | atomic_inc(&call_data->finished); | ||
528 | } | ||
529 | } | ||
530 | |||
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c new file mode 100644 index 000000000000..abbf199adebb --- /dev/null +++ b/arch/x86/kernel/trampoline.c | |||
@@ -0,0 +1,18 @@ | |||
1 | #include <linux/io.h> | ||
2 | |||
3 | #include <asm/trampoline.h> | ||
4 | |||
5 | /* ready for x86_64, no harm for x86, since it will overwrite after alloc */ | ||
6 | unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); | ||
7 | |||
8 | /* | ||
9 | * Currently trivial. Write the real->protected mode | ||
10 | * bootstrap into the page concerned. The caller | ||
11 | * has made sure it's suitably aligned. | ||
12 | */ | ||
13 | unsigned long setup_trampoline(void) | ||
14 | { | ||
15 | memcpy(trampoline_base, trampoline_data, | ||
16 | trampoline_end - trampoline_data); | ||
17 | return virt_to_phys(trampoline_base); | ||
18 | } | ||
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 4aedd0bcee4c..894293c598db 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S | |||
@@ -30,12 +30,7 @@ | |||
30 | #include <asm/msr.h> | 30 | #include <asm/msr.h> |
31 | #include <asm/segment.h> | 31 | #include <asm/segment.h> |
32 | 32 | ||
33 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ | ||
34 | #ifndef CONFIG_HOTPLUG_CPU | ||
35 | .section .init.data, "aw", @progbits | ||
36 | #else | ||
37 | .section .rodata, "a", @progbits | 33 | .section .rodata, "a", @progbits |
38 | #endif | ||
39 | 34 | ||
40 | .code16 | 35 | .code16 |
41 | 36 | ||
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index b22c01e05a18..65791ca2824a 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -9,26 +9,28 @@ | |||
9 | * 'Traps.c' handles hardware traps and faults after we have saved some | 9 | * 'Traps.c' handles hardware traps and faults after we have saved some |
10 | * state in 'asm.s'. | 10 | * state in 'asm.s'. |
11 | */ | 11 | */ |
12 | #include <linux/sched.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/kallsyms.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/highmem.h> | ||
16 | #include <linux/kprobes.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/utsname.h> | ||
19 | #include <linux/kdebug.h> | ||
13 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | ||
22 | #include <linux/ptrace.h> | ||
14 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/unwind.h> | ||
25 | #include <linux/delay.h> | ||
15 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/kexec.h> | ||
28 | #include <linux/sched.h> | ||
16 | #include <linux/timer.h> | 29 | #include <linux/timer.h> |
17 | #include <linux/mm.h> | ||
18 | #include <linux/init.h> | 30 | #include <linux/init.h> |
19 | #include <linux/delay.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/highmem.h> | ||
23 | #include <linux/kallsyms.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/utsname.h> | ||
26 | #include <linux/kprobes.h> | ||
27 | #include <linux/kexec.h> | ||
28 | #include <linux/unwind.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/nmi.h> | ||
31 | #include <linux/bug.h> | 31 | #include <linux/bug.h> |
32 | #include <linux/nmi.h> | ||
33 | #include <linux/mm.h> | ||
32 | 34 | ||
33 | #ifdef CONFIG_EISA | 35 | #ifdef CONFIG_EISA |
34 | #include <linux/ioport.h> | 36 | #include <linux/ioport.h> |
@@ -43,21 +45,18 @@ | |||
43 | #include <linux/edac.h> | 45 | #include <linux/edac.h> |
44 | #endif | 46 | #endif |
45 | 47 | ||
48 | #include <asm/arch_hooks.h> | ||
49 | #include <asm/stacktrace.h> | ||
46 | #include <asm/processor.h> | 50 | #include <asm/processor.h> |
47 | #include <asm/system.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/atomic.h> | ||
50 | #include <asm/debugreg.h> | 51 | #include <asm/debugreg.h> |
52 | #include <asm/atomic.h> | ||
53 | #include <asm/system.h> | ||
54 | #include <asm/unwind.h> | ||
51 | #include <asm/desc.h> | 55 | #include <asm/desc.h> |
52 | #include <asm/i387.h> | 56 | #include <asm/i387.h> |
53 | #include <asm/nmi.h> | 57 | #include <asm/nmi.h> |
54 | #include <asm/unwind.h> | ||
55 | #include <asm/smp.h> | 58 | #include <asm/smp.h> |
56 | #include <asm/arch_hooks.h> | 59 | #include <asm/io.h> |
57 | #include <linux/kdebug.h> | ||
58 | #include <asm/stacktrace.h> | ||
59 | |||
60 | #include <linux/module.h> | ||
61 | 60 | ||
62 | #include "mach_traps.h" | 61 | #include "mach_traps.h" |
63 | 62 | ||
@@ -69,7 +68,7 @@ EXPORT_SYMBOL_GPL(used_vectors); | |||
69 | asmlinkage int system_call(void); | 68 | asmlinkage int system_call(void); |
70 | 69 | ||
71 | /* Do we ignore FPU interrupts ? */ | 70 | /* Do we ignore FPU interrupts ? */ |
72 | char ignore_fpu_irq = 0; | 71 | char ignore_fpu_irq; |
73 | 72 | ||
74 | /* | 73 | /* |
75 | * The IDT has to be page-aligned to simplify the Pentium | 74 | * The IDT has to be page-aligned to simplify the Pentium |
@@ -105,12 +104,13 @@ static unsigned int code_bytes = 64; | |||
105 | void printk_address(unsigned long address, int reliable) | 104 | void printk_address(unsigned long address, int reliable) |
106 | { | 105 | { |
107 | #ifdef CONFIG_KALLSYMS | 106 | #ifdef CONFIG_KALLSYMS |
108 | unsigned long offset = 0, symsize; | 107 | char namebuf[KSYM_NAME_LEN]; |
108 | unsigned long offset = 0; | ||
109 | unsigned long symsize; | ||
109 | const char *symname; | 110 | const char *symname; |
110 | char *modname; | ||
111 | char *delim = ":"; | ||
112 | char namebuf[128]; | ||
113 | char reliab[4] = ""; | 111 | char reliab[4] = ""; |
112 | char *delim = ":"; | ||
113 | char *modname; | ||
114 | 114 | ||
115 | symname = kallsyms_lookup(address, &symsize, &offset, | 115 | symname = kallsyms_lookup(address, &symsize, &offset, |
116 | &modname, namebuf); | 116 | &modname, namebuf); |
@@ -138,13 +138,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned s | |||
138 | 138 | ||
139 | /* The form of the top of the frame on the stack */ | 139 | /* The form of the top of the frame on the stack */ |
140 | struct stack_frame { | 140 | struct stack_frame { |
141 | struct stack_frame *next_frame; | 141 | struct stack_frame *next_frame; |
142 | unsigned long return_address; | 142 | unsigned long return_address; |
143 | }; | 143 | }; |
144 | 144 | ||
145 | static inline unsigned long print_context_stack(struct thread_info *tinfo, | 145 | static inline unsigned long |
146 | unsigned long *stack, unsigned long bp, | 146 | print_context_stack(struct thread_info *tinfo, |
147 | const struct stacktrace_ops *ops, void *data) | 147 | unsigned long *stack, unsigned long bp, |
148 | const struct stacktrace_ops *ops, void *data) | ||
148 | { | 149 | { |
149 | struct stack_frame *frame = (struct stack_frame *)bp; | 150 | struct stack_frame *frame = (struct stack_frame *)bp; |
150 | 151 | ||
@@ -166,7 +167,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
166 | return bp; | 167 | return bp; |
167 | } | 168 | } |
168 | 169 | ||
169 | #define MSG(msg) ops->warning(data, msg) | 170 | #define MSG(msg) ops->warning(data, msg) |
170 | 171 | ||
171 | void dump_trace(struct task_struct *task, struct pt_regs *regs, | 172 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
172 | unsigned long *stack, unsigned long bp, | 173 | unsigned long *stack, unsigned long bp, |
@@ -177,6 +178,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
177 | 178 | ||
178 | if (!stack) { | 179 | if (!stack) { |
179 | unsigned long dummy; | 180 | unsigned long dummy; |
181 | |||
180 | stack = &dummy; | 182 | stack = &dummy; |
181 | if (task != current) | 183 | if (task != current) |
182 | stack = (unsigned long *)task->thread.sp; | 184 | stack = (unsigned long *)task->thread.sp; |
@@ -186,7 +188,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
186 | if (!bp) { | 188 | if (!bp) { |
187 | if (task == current) { | 189 | if (task == current) { |
188 | /* Grab bp right from our regs */ | 190 | /* Grab bp right from our regs */ |
189 | asm ("movl %%ebp, %0" : "=r" (bp) : ); | 191 | asm("movl %%ebp, %0" : "=r" (bp) :); |
190 | } else { | 192 | } else { |
191 | /* bp is the last reg pushed by switch_to */ | 193 | /* bp is the last reg pushed by switch_to */ |
192 | bp = *(unsigned long *) task->thread.sp; | 194 | bp = *(unsigned long *) task->thread.sp; |
@@ -196,15 +198,18 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
196 | 198 | ||
197 | while (1) { | 199 | while (1) { |
198 | struct thread_info *context; | 200 | struct thread_info *context; |
201 | |||
199 | context = (struct thread_info *) | 202 | context = (struct thread_info *) |
200 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 203 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); |
201 | bp = print_context_stack(context, stack, bp, ops, data); | 204 | bp = print_context_stack(context, stack, bp, ops, data); |
202 | /* Should be after the line below, but somewhere | 205 | /* |
203 | in early boot context comes out corrupted and we | 206 | * Should be after the line below, but somewhere |
204 | can't reference it -AK */ | 207 | * in early boot context comes out corrupted and we |
208 | * can't reference it: | ||
209 | */ | ||
205 | if (ops->stack(data, "IRQ") < 0) | 210 | if (ops->stack(data, "IRQ") < 0) |
206 | break; | 211 | break; |
207 | stack = (unsigned long*)context->previous_esp; | 212 | stack = (unsigned long *)context->previous_esp; |
208 | if (!stack) | 213 | if (!stack) |
209 | break; | 214 | break; |
210 | touch_nmi_watchdog(); | 215 | touch_nmi_watchdog(); |
@@ -243,15 +248,15 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) | |||
243 | } | 248 | } |
244 | 249 | ||
245 | static const struct stacktrace_ops print_trace_ops = { | 250 | static const struct stacktrace_ops print_trace_ops = { |
246 | .warning = print_trace_warning, | 251 | .warning = print_trace_warning, |
247 | .warning_symbol = print_trace_warning_symbol, | 252 | .warning_symbol = print_trace_warning_symbol, |
248 | .stack = print_trace_stack, | 253 | .stack = print_trace_stack, |
249 | .address = print_trace_address, | 254 | .address = print_trace_address, |
250 | }; | 255 | }; |
251 | 256 | ||
252 | static void | 257 | static void |
253 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 258 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
254 | unsigned long *stack, unsigned long bp, char *log_lvl) | 259 | unsigned long *stack, unsigned long bp, char *log_lvl) |
255 | { | 260 | { |
256 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); | 261 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); |
257 | printk("%s =======================\n", log_lvl); | 262 | printk("%s =======================\n", log_lvl); |
@@ -263,21 +268,22 @@ void show_trace(struct task_struct *task, struct pt_regs *regs, | |||
263 | show_trace_log_lvl(task, regs, stack, bp, ""); | 268 | show_trace_log_lvl(task, regs, stack, bp, ""); |
264 | } | 269 | } |
265 | 270 | ||
266 | static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | 271 | static void |
267 | unsigned long *sp, unsigned long bp, char *log_lvl) | 272 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
273 | unsigned long *sp, unsigned long bp, char *log_lvl) | ||
268 | { | 274 | { |
269 | unsigned long *stack; | 275 | unsigned long *stack; |
270 | int i; | 276 | int i; |
271 | 277 | ||
272 | if (sp == NULL) { | 278 | if (sp == NULL) { |
273 | if (task) | 279 | if (task) |
274 | sp = (unsigned long*)task->thread.sp; | 280 | sp = (unsigned long *)task->thread.sp; |
275 | else | 281 | else |
276 | sp = (unsigned long *)&sp; | 282 | sp = (unsigned long *)&sp; |
277 | } | 283 | } |
278 | 284 | ||
279 | stack = sp; | 285 | stack = sp; |
280 | for(i = 0; i < kstack_depth_to_print; i++) { | 286 | for (i = 0; i < kstack_depth_to_print; i++) { |
281 | if (kstack_end(stack)) | 287 | if (kstack_end(stack)) |
282 | break; | 288 | break; |
283 | if (i && ((i % 8) == 0)) | 289 | if (i && ((i % 8) == 0)) |
@@ -285,6 +291,7 @@ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
285 | printk("%08lx ", *stack++); | 291 | printk("%08lx ", *stack++); |
286 | } | 292 | } |
287 | printk("\n%sCall Trace:\n", log_lvl); | 293 | printk("\n%sCall Trace:\n", log_lvl); |
294 | |||
288 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); | 295 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
289 | } | 296 | } |
290 | 297 | ||
@@ -299,8 +306,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
299 | */ | 306 | */ |
300 | void dump_stack(void) | 307 | void dump_stack(void) |
301 | { | 308 | { |
302 | unsigned long stack; | ||
303 | unsigned long bp = 0; | 309 | unsigned long bp = 0; |
310 | unsigned long stack; | ||
304 | 311 | ||
305 | #ifdef CONFIG_FRAME_POINTER | 312 | #ifdef CONFIG_FRAME_POINTER |
306 | if (!bp) | 313 | if (!bp) |
@@ -312,6 +319,7 @@ void dump_stack(void) | |||
312 | init_utsname()->release, | 319 | init_utsname()->release, |
313 | (int)strcspn(init_utsname()->version, " "), | 320 | (int)strcspn(init_utsname()->version, " "), |
314 | init_utsname()->version); | 321 | init_utsname()->version); |
322 | |||
315 | show_trace(current, NULL, &stack, bp); | 323 | show_trace(current, NULL, &stack, bp); |
316 | } | 324 | } |
317 | 325 | ||
@@ -323,6 +331,7 @@ void show_registers(struct pt_regs *regs) | |||
323 | 331 | ||
324 | print_modules(); | 332 | print_modules(); |
325 | __show_registers(regs, 0); | 333 | __show_registers(regs, 0); |
334 | |||
326 | printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", | 335 | printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", |
327 | TASK_COMM_LEN, current->comm, task_pid_nr(current), | 336 | TASK_COMM_LEN, current->comm, task_pid_nr(current), |
328 | current_thread_info(), current, task_thread_info(current)); | 337 | current_thread_info(), current, task_thread_info(current)); |
@@ -331,10 +340,10 @@ void show_registers(struct pt_regs *regs) | |||
331 | * time of the fault.. | 340 | * time of the fault.. |
332 | */ | 341 | */ |
333 | if (!user_mode_vm(regs)) { | 342 | if (!user_mode_vm(regs)) { |
334 | u8 *ip; | ||
335 | unsigned int code_prologue = code_bytes * 43 / 64; | 343 | unsigned int code_prologue = code_bytes * 43 / 64; |
336 | unsigned int code_len = code_bytes; | 344 | unsigned int code_len = code_bytes; |
337 | unsigned char c; | 345 | unsigned char c; |
346 | u8 *ip; | ||
338 | 347 | ||
339 | printk("\n" KERN_EMERG "Stack: "); | 348 | printk("\n" KERN_EMERG "Stack: "); |
340 | show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); | 349 | show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); |
@@ -361,7 +370,7 @@ void show_registers(struct pt_regs *regs) | |||
361 | } | 370 | } |
362 | } | 371 | } |
363 | printk("\n"); | 372 | printk("\n"); |
364 | } | 373 | } |
365 | 374 | ||
366 | int is_valid_bugaddr(unsigned long ip) | 375 | int is_valid_bugaddr(unsigned long ip) |
367 | { | 376 | { |
@@ -377,10 +386,10 @@ int is_valid_bugaddr(unsigned long ip) | |||
377 | 386 | ||
378 | static int die_counter; | 387 | static int die_counter; |
379 | 388 | ||
380 | int __kprobes __die(const char * str, struct pt_regs * regs, long err) | 389 | int __kprobes __die(const char *str, struct pt_regs *regs, long err) |
381 | { | 390 | { |
382 | unsigned long sp; | ||
383 | unsigned short ss; | 391 | unsigned short ss; |
392 | unsigned long sp; | ||
384 | 393 | ||
385 | printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); | 394 | printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); |
386 | #ifdef CONFIG_PREEMPT | 395 | #ifdef CONFIG_PREEMPT |
@@ -395,8 +404,8 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
395 | printk("\n"); | 404 | printk("\n"); |
396 | 405 | ||
397 | if (notify_die(DIE_OOPS, str, regs, err, | 406 | if (notify_die(DIE_OOPS, str, regs, err, |
398 | current->thread.trap_no, SIGSEGV) != | 407 | current->thread.trap_no, SIGSEGV) != NOTIFY_STOP) { |
399 | NOTIFY_STOP) { | 408 | |
400 | show_registers(regs); | 409 | show_registers(regs); |
401 | /* Executive summary in case the oops scrolled away */ | 410 | /* Executive summary in case the oops scrolled away */ |
402 | sp = (unsigned long) (®s->sp); | 411 | sp = (unsigned long) (®s->sp); |
@@ -408,17 +417,18 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err) | |||
408 | printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); | 417 | printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); |
409 | print_symbol("%s", regs->ip); | 418 | print_symbol("%s", regs->ip); |
410 | printk(" SS:ESP %04x:%08lx\n", ss, sp); | 419 | printk(" SS:ESP %04x:%08lx\n", ss, sp); |
420 | |||
411 | return 0; | 421 | return 0; |
412 | } else { | ||
413 | return 1; | ||
414 | } | 422 | } |
423 | |||
424 | return 1; | ||
415 | } | 425 | } |
416 | 426 | ||
417 | /* | 427 | /* |
418 | * This is gone through when something in the kernel has done something bad and | 428 | * This is gone through when something in the kernel has done something bad |
419 | * is about to be terminated. | 429 | * and is about to be terminated: |
420 | */ | 430 | */ |
421 | void die(const char * str, struct pt_regs * regs, long err) | 431 | void die(const char *str, struct pt_regs *regs, long err) |
422 | { | 432 | { |
423 | static struct { | 433 | static struct { |
424 | raw_spinlock_t lock; | 434 | raw_spinlock_t lock; |
@@ -440,8 +450,9 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
440 | die.lock_owner = smp_processor_id(); | 450 | die.lock_owner = smp_processor_id(); |
441 | die.lock_owner_depth = 0; | 451 | die.lock_owner_depth = 0; |
442 | bust_spinlocks(1); | 452 | bust_spinlocks(1); |
443 | } else | 453 | } else { |
444 | raw_local_irq_save(flags); | 454 | raw_local_irq_save(flags); |
455 | } | ||
445 | 456 | ||
446 | if (++die.lock_owner_depth < 3) { | 457 | if (++die.lock_owner_depth < 3) { |
447 | report_bug(regs->ip, regs); | 458 | report_bug(regs->ip, regs); |
@@ -474,19 +485,20 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
474 | do_exit(SIGSEGV); | 485 | do_exit(SIGSEGV); |
475 | } | 486 | } |
476 | 487 | ||
477 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | 488 | static inline void |
489 | die_if_kernel(const char *str, struct pt_regs *regs, long err) | ||
478 | { | 490 | { |
479 | if (!user_mode_vm(regs)) | 491 | if (!user_mode_vm(regs)) |
480 | die(str, regs, err); | 492 | die(str, regs, err); |
481 | } | 493 | } |
482 | 494 | ||
483 | static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86, | 495 | static void __kprobes |
484 | struct pt_regs * regs, long error_code, | 496 | do_trap(int trapnr, int signr, char *str, int vm86, struct pt_regs *regs, |
485 | siginfo_t *info) | 497 | long error_code, siginfo_t *info) |
486 | { | 498 | { |
487 | struct task_struct *tsk = current; | 499 | struct task_struct *tsk = current; |
488 | 500 | ||
489 | if (regs->flags & VM_MASK) { | 501 | if (regs->flags & X86_VM_MASK) { |
490 | if (vm86) | 502 | if (vm86) |
491 | goto vm86_trap; | 503 | goto vm86_trap; |
492 | goto trap_signal; | 504 | goto trap_signal; |
@@ -495,111 +507,112 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86, | |||
495 | if (!user_mode(regs)) | 507 | if (!user_mode(regs)) |
496 | goto kernel_trap; | 508 | goto kernel_trap; |
497 | 509 | ||
498 | trap_signal: { | 510 | trap_signal: |
499 | /* | 511 | /* |
500 | * We want error_code and trap_no set for userspace faults and | 512 | * We want error_code and trap_no set for userspace faults and |
501 | * kernelspace faults which result in die(), but not | 513 | * kernelspace faults which result in die(), but not |
502 | * kernelspace faults which are fixed up. die() gives the | 514 | * kernelspace faults which are fixed up. die() gives the |
503 | * process no chance to handle the signal and notice the | 515 | * process no chance to handle the signal and notice the |
504 | * kernel fault information, so that won't result in polluting | 516 | * kernel fault information, so that won't result in polluting |
505 | * the information about previously queued, but not yet | 517 | * the information about previously queued, but not yet |
506 | * delivered, faults. See also do_general_protection below. | 518 | * delivered, faults. See also do_general_protection below. |
507 | */ | 519 | */ |
508 | tsk->thread.error_code = error_code; | 520 | tsk->thread.error_code = error_code; |
509 | tsk->thread.trap_no = trapnr; | 521 | tsk->thread.trap_no = trapnr; |
510 | 522 | ||
511 | if (info) | 523 | if (info) |
512 | force_sig_info(signr, info, tsk); | 524 | force_sig_info(signr, info, tsk); |
513 | else | 525 | else |
514 | force_sig(signr, tsk); | 526 | force_sig(signr, tsk); |
515 | return; | 527 | return; |
516 | } | ||
517 | 528 | ||
518 | kernel_trap: { | 529 | kernel_trap: |
519 | if (!fixup_exception(regs)) { | 530 | if (!fixup_exception(regs)) { |
520 | tsk->thread.error_code = error_code; | 531 | tsk->thread.error_code = error_code; |
521 | tsk->thread.trap_no = trapnr; | 532 | tsk->thread.trap_no = trapnr; |
522 | die(str, regs, error_code); | 533 | die(str, regs, error_code); |
523 | } | ||
524 | return; | ||
525 | } | 534 | } |
535 | return; | ||
526 | 536 | ||
527 | vm86_trap: { | 537 | vm86_trap: |
528 | int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr); | 538 | if (handle_vm86_trap((struct kernel_vm86_regs *) regs, |
529 | if (ret) goto trap_signal; | 539 | error_code, trapnr)) |
530 | return; | 540 | goto trap_signal; |
531 | } | 541 | return; |
532 | } | 542 | } |
533 | 543 | ||
534 | #define DO_ERROR(trapnr, signr, str, name) \ | 544 | #define DO_ERROR(trapnr, signr, str, name) \ |
535 | void do_##name(struct pt_regs * regs, long error_code) \ | 545 | void do_##name(struct pt_regs *regs, long error_code) \ |
536 | { \ | 546 | { \ |
537 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 547 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
538 | == NOTIFY_STOP) \ | 548 | == NOTIFY_STOP) \ |
539 | return; \ | 549 | return; \ |
540 | do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ | 550 | do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ |
541 | } | 551 | } |
542 | 552 | ||
543 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \ | 553 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \ |
544 | void do_##name(struct pt_regs * regs, long error_code) \ | 554 | void do_##name(struct pt_regs *regs, long error_code) \ |
545 | { \ | 555 | { \ |
546 | siginfo_t info; \ | 556 | siginfo_t info; \ |
547 | if (irq) \ | 557 | if (irq) \ |
548 | local_irq_enable(); \ | 558 | local_irq_enable(); \ |
549 | info.si_signo = signr; \ | 559 | info.si_signo = signr; \ |
550 | info.si_errno = 0; \ | 560 | info.si_errno = 0; \ |
551 | info.si_code = sicode; \ | 561 | info.si_code = sicode; \ |
552 | info.si_addr = (void __user *)siaddr; \ | 562 | info.si_addr = (void __user *)siaddr; \ |
553 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 563 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
554 | == NOTIFY_STOP) \ | 564 | == NOTIFY_STOP) \ |
555 | return; \ | 565 | return; \ |
556 | do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ | 566 | do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ |
557 | } | 567 | } |
558 | 568 | ||
559 | #define DO_VM86_ERROR(trapnr, signr, str, name) \ | 569 | #define DO_VM86_ERROR(trapnr, signr, str, name) \ |
560 | void do_##name(struct pt_regs * regs, long error_code) \ | 570 | void do_##name(struct pt_regs *regs, long error_code) \ |
561 | { \ | 571 | { \ |
562 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 572 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
563 | == NOTIFY_STOP) \ | 573 | == NOTIFY_STOP) \ |
564 | return; \ | 574 | return; \ |
565 | do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ | 575 | do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ |
566 | } | 576 | } |
567 | 577 | ||
568 | #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | 578 | #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
569 | void do_##name(struct pt_regs * regs, long error_code) \ | 579 | void do_##name(struct pt_regs *regs, long error_code) \ |
570 | { \ | 580 | { \ |
571 | siginfo_t info; \ | 581 | siginfo_t info; \ |
572 | info.si_signo = signr; \ | 582 | info.si_signo = signr; \ |
573 | info.si_errno = 0; \ | 583 | info.si_errno = 0; \ |
574 | info.si_code = sicode; \ | 584 | info.si_code = sicode; \ |
575 | info.si_addr = (void __user *)siaddr; \ | 585 | info.si_addr = (void __user *)siaddr; \ |
576 | trace_hardirqs_fixup(); \ | 586 | trace_hardirqs_fixup(); \ |
577 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 587 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
578 | == NOTIFY_STOP) \ | 588 | == NOTIFY_STOP) \ |
579 | return; \ | 589 | return; \ |
580 | do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ | 590 | do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ |
581 | } | 591 | } |
582 | 592 | ||
583 | DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) | 593 | DO_VM86_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) |
584 | #ifndef CONFIG_KPROBES | 594 | #ifndef CONFIG_KPROBES |
585 | DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) | 595 | DO_VM86_ERROR(3, SIGTRAP, "int3", int3) |
586 | #endif | 596 | #endif |
587 | DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) | 597 | DO_VM86_ERROR(4, SIGSEGV, "overflow", overflow) |
588 | DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) | 598 | DO_VM86_ERROR(5, SIGSEGV, "bounds", bounds) |
589 | DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0) | 599 | DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip, 0) |
590 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | 600 | DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
591 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | 601 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
592 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 602 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
593 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | 603 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) |
594 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) | 604 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) |
595 | DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1) | 605 | DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1) |
596 | 606 | ||
597 | void __kprobes do_general_protection(struct pt_regs * regs, | 607 | void __kprobes do_general_protection(struct pt_regs *regs, long error_code) |
598 | long error_code) | ||
599 | { | 608 | { |
600 | int cpu = get_cpu(); | 609 | struct thread_struct *thread; |
601 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 610 | struct tss_struct *tss; |
602 | struct thread_struct *thread = ¤t->thread; | 611 | int cpu; |
612 | |||
613 | cpu = get_cpu(); | ||
614 | tss = &per_cpu(init_tss, cpu); | ||
615 | thread = ¤t->thread; | ||
603 | 616 | ||
604 | /* | 617 | /* |
605 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an | 618 | * Perform the lazy TSS's I/O bitmap copy. If the TSS has an |
@@ -616,19 +629,21 @@ void __kprobes do_general_protection(struct pt_regs * regs, | |||
616 | * If the previously set map was extending to higher ports | 629 | * If the previously set map was extending to higher ports |
617 | * than the current one, pad extra space with 0xff (no access). | 630 | * than the current one, pad extra space with 0xff (no access). |
618 | */ | 631 | */ |
619 | if (thread->io_bitmap_max < tss->io_bitmap_max) | 632 | if (thread->io_bitmap_max < tss->io_bitmap_max) { |
620 | memset((char *) tss->io_bitmap + | 633 | memset((char *) tss->io_bitmap + |
621 | thread->io_bitmap_max, 0xff, | 634 | thread->io_bitmap_max, 0xff, |
622 | tss->io_bitmap_max - thread->io_bitmap_max); | 635 | tss->io_bitmap_max - thread->io_bitmap_max); |
636 | } | ||
623 | tss->io_bitmap_max = thread->io_bitmap_max; | 637 | tss->io_bitmap_max = thread->io_bitmap_max; |
624 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; | 638 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
625 | tss->io_bitmap_owner = thread; | 639 | tss->io_bitmap_owner = thread; |
626 | put_cpu(); | 640 | put_cpu(); |
641 | |||
627 | return; | 642 | return; |
628 | } | 643 | } |
629 | put_cpu(); | 644 | put_cpu(); |
630 | 645 | ||
631 | if (regs->flags & VM_MASK) | 646 | if (regs->flags & X86_VM_MASK) |
632 | goto gp_in_vm86; | 647 | goto gp_in_vm86; |
633 | 648 | ||
634 | if (!user_mode(regs)) | 649 | if (!user_mode(regs)) |
@@ -636,6 +651,7 @@ void __kprobes do_general_protection(struct pt_regs * regs, | |||
636 | 651 | ||
637 | current->thread.error_code = error_code; | 652 | current->thread.error_code = error_code; |
638 | current->thread.trap_no = 13; | 653 | current->thread.trap_no = 13; |
654 | |||
639 | if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) && | 655 | if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) && |
640 | printk_ratelimit()) { | 656 | printk_ratelimit()) { |
641 | printk(KERN_INFO | 657 | printk(KERN_INFO |
@@ -666,21 +682,24 @@ gp_in_kernel: | |||
666 | } | 682 | } |
667 | 683 | ||
668 | static __kprobes void | 684 | static __kprobes void |
669 | mem_parity_error(unsigned char reason, struct pt_regs * regs) | 685 | mem_parity_error(unsigned char reason, struct pt_regs *regs) |
670 | { | 686 | { |
671 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on " | 687 | printk(KERN_EMERG |
672 | "CPU %d.\n", reason, smp_processor_id()); | 688 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", |
673 | printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n"); | 689 | reason, smp_processor_id()); |
690 | |||
691 | printk(KERN_EMERG | ||
692 | "You have some hardware problem, likely on the PCI bus.\n"); | ||
674 | 693 | ||
675 | #if defined(CONFIG_EDAC) | 694 | #if defined(CONFIG_EDAC) |
676 | if(edac_handler_set()) { | 695 | if (edac_handler_set()) { |
677 | edac_atomic_assert_error(); | 696 | edac_atomic_assert_error(); |
678 | return; | 697 | return; |
679 | } | 698 | } |
680 | #endif | 699 | #endif |
681 | 700 | ||
682 | if (panic_on_unrecovered_nmi) | 701 | if (panic_on_unrecovered_nmi) |
683 | panic("NMI: Not continuing"); | 702 | panic("NMI: Not continuing"); |
684 | 703 | ||
685 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | 704 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); |
686 | 705 | ||
@@ -689,7 +708,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs) | |||
689 | } | 708 | } |
690 | 709 | ||
691 | static __kprobes void | 710 | static __kprobes void |
692 | io_check_error(unsigned char reason, struct pt_regs * regs) | 711 | io_check_error(unsigned char reason, struct pt_regs *regs) |
693 | { | 712 | { |
694 | unsigned long i; | 713 | unsigned long i; |
695 | 714 | ||
@@ -699,28 +718,37 @@ io_check_error(unsigned char reason, struct pt_regs * regs) | |||
699 | /* Re-enable the IOCK line, wait for a few seconds */ | 718 | /* Re-enable the IOCK line, wait for a few seconds */ |
700 | reason = (reason & 0xf) | 8; | 719 | reason = (reason & 0xf) | 8; |
701 | outb(reason, 0x61); | 720 | outb(reason, 0x61); |
721 | |||
702 | i = 2000; | 722 | i = 2000; |
703 | while (--i) udelay(1000); | 723 | while (--i) |
724 | udelay(1000); | ||
725 | |||
704 | reason &= ~8; | 726 | reason &= ~8; |
705 | outb(reason, 0x61); | 727 | outb(reason, 0x61); |
706 | } | 728 | } |
707 | 729 | ||
708 | static __kprobes void | 730 | static __kprobes void |
709 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | 731 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) |
710 | { | 732 | { |
733 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | ||
734 | return; | ||
711 | #ifdef CONFIG_MCA | 735 | #ifdef CONFIG_MCA |
712 | /* Might actually be able to figure out what the guilty party | 736 | /* |
713 | * is. */ | 737 | * Might actually be able to figure out what the guilty party |
714 | if( MCA_bus ) { | 738 | * is: |
739 | */ | ||
740 | if (MCA_bus) { | ||
715 | mca_handle_nmi(); | 741 | mca_handle_nmi(); |
716 | return; | 742 | return; |
717 | } | 743 | } |
718 | #endif | 744 | #endif |
719 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on " | 745 | printk(KERN_EMERG |
720 | "CPU %d.\n", reason, smp_processor_id()); | 746 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", |
747 | reason, smp_processor_id()); | ||
748 | |||
721 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); | 749 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); |
722 | if (panic_on_unrecovered_nmi) | 750 | if (panic_on_unrecovered_nmi) |
723 | panic("NMI: Not continuing"); | 751 | panic("NMI: Not continuing"); |
724 | 752 | ||
725 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | 753 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); |
726 | } | 754 | } |
@@ -729,14 +757,13 @@ static DEFINE_SPINLOCK(nmi_print_lock); | |||
729 | 757 | ||
730 | void __kprobes die_nmi(struct pt_regs *regs, const char *msg) | 758 | void __kprobes die_nmi(struct pt_regs *regs, const char *msg) |
731 | { | 759 | { |
732 | if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == | 760 | if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP) |
733 | NOTIFY_STOP) | ||
734 | return; | 761 | return; |
735 | 762 | ||
736 | spin_lock(&nmi_print_lock); | 763 | spin_lock(&nmi_print_lock); |
737 | /* | 764 | /* |
738 | * We are in trouble anyway, lets at least try | 765 | * We are in trouble anyway, lets at least try |
739 | * to get a message out. | 766 | * to get a message out: |
740 | */ | 767 | */ |
741 | bust_spinlocks(1); | 768 | bust_spinlocks(1); |
742 | printk(KERN_EMERG "%s", msg); | 769 | printk(KERN_EMERG "%s", msg); |
@@ -747,9 +774,10 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg) | |||
747 | spin_unlock(&nmi_print_lock); | 774 | spin_unlock(&nmi_print_lock); |
748 | bust_spinlocks(0); | 775 | bust_spinlocks(0); |
749 | 776 | ||
750 | /* If we are in kernel we are probably nested up pretty bad | 777 | /* |
751 | * and might aswell get out now while we still can. | 778 | * If we are in kernel we are probably nested up pretty bad |
752 | */ | 779 | * and might aswell get out now while we still can: |
780 | */ | ||
753 | if (!user_mode_vm(regs)) { | 781 | if (!user_mode_vm(regs)) { |
754 | current->thread.trap_no = 2; | 782 | current->thread.trap_no = 2; |
755 | crash_kexec(regs); | 783 | crash_kexec(regs); |
@@ -758,14 +786,14 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg) | |||
758 | do_exit(SIGSEGV); | 786 | do_exit(SIGSEGV); |
759 | } | 787 | } |
760 | 788 | ||
761 | static __kprobes void default_do_nmi(struct pt_regs * regs) | 789 | static __kprobes void default_do_nmi(struct pt_regs *regs) |
762 | { | 790 | { |
763 | unsigned char reason = 0; | 791 | unsigned char reason = 0; |
764 | 792 | ||
765 | /* Only the BSP gets external NMIs from the system. */ | 793 | /* Only the BSP gets external NMIs from the system: */ |
766 | if (!smp_processor_id()) | 794 | if (!smp_processor_id()) |
767 | reason = get_nmi_reason(); | 795 | reason = get_nmi_reason(); |
768 | 796 | ||
769 | if (!(reason & 0xc0)) { | 797 | if (!(reason & 0xc0)) { |
770 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) | 798 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) |
771 | == NOTIFY_STOP) | 799 | == NOTIFY_STOP) |
@@ -778,8 +806,10 @@ static __kprobes void default_do_nmi(struct pt_regs * regs) | |||
778 | if (nmi_watchdog_tick(regs, reason)) | 806 | if (nmi_watchdog_tick(regs, reason)) |
779 | return; | 807 | return; |
780 | if (!do_nmi_callback(regs, smp_processor_id())) | 808 | if (!do_nmi_callback(regs, smp_processor_id())) |
781 | #endif | ||
782 | unknown_nmi_error(reason, regs); | 809 | unknown_nmi_error(reason, regs); |
810 | #else | ||
811 | unknown_nmi_error(reason, regs); | ||
812 | #endif | ||
783 | 813 | ||
784 | return; | 814 | return; |
785 | } | 815 | } |
@@ -791,14 +821,14 @@ static __kprobes void default_do_nmi(struct pt_regs * regs) | |||
791 | io_check_error(reason, regs); | 821 | io_check_error(reason, regs); |
792 | /* | 822 | /* |
793 | * Reassert NMI in case it became active meanwhile | 823 | * Reassert NMI in case it became active meanwhile |
794 | * as it's edge-triggered. | 824 | * as it's edge-triggered: |
795 | */ | 825 | */ |
796 | reassert_nmi(); | 826 | reassert_nmi(); |
797 | } | 827 | } |
798 | 828 | ||
799 | static int ignore_nmis; | 829 | static int ignore_nmis; |
800 | 830 | ||
801 | __kprobes void do_nmi(struct pt_regs * regs, long error_code) | 831 | __kprobes void do_nmi(struct pt_regs *regs, long error_code) |
802 | { | 832 | { |
803 | int cpu; | 833 | int cpu; |
804 | 834 | ||
@@ -834,9 +864,12 @@ void __kprobes do_int3(struct pt_regs *regs, long error_code) | |||
834 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | 864 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) |
835 | == NOTIFY_STOP) | 865 | == NOTIFY_STOP) |
836 | return; | 866 | return; |
837 | /* This is an interrupt gate, because kprobes wants interrupts | 867 | /* |
838 | disabled. Normal trap handlers don't. */ | 868 | * This is an interrupt gate, because kprobes wants interrupts |
869 | * disabled. Normal trap handlers don't. | ||
870 | */ | ||
839 | restore_interrupts(regs); | 871 | restore_interrupts(regs); |
872 | |||
840 | do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); | 873 | do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); |
841 | } | 874 | } |
842 | #endif | 875 | #endif |
@@ -851,7 +884,7 @@ void __kprobes do_int3(struct pt_regs *regs, long error_code) | |||
851 | * from user space. Such code must not hold kernel locks (since it | 884 | * from user space. Such code must not hold kernel locks (since it |
852 | * can equally take a page fault), therefore it is safe to call | 885 | * can equally take a page fault), therefore it is safe to call |
853 | * force_sig_info even though that claims and releases locks. | 886 | * force_sig_info even though that claims and releases locks. |
854 | * | 887 | * |
855 | * Code in ./signal.c ensures that the debug control register | 888 | * Code in ./signal.c ensures that the debug control register |
856 | * is restored before we deliver any signal, and therefore that | 889 | * is restored before we deliver any signal, and therefore that |
857 | * user code runs with the correct debug control register even though | 890 | * user code runs with the correct debug control register even though |
@@ -863,10 +896,10 @@ void __kprobes do_int3(struct pt_regs *regs, long error_code) | |||
863 | * find every occurrence of the TF bit that could be saved away even | 896 | * find every occurrence of the TF bit that could be saved away even |
864 | * by user code) | 897 | * by user code) |
865 | */ | 898 | */ |
866 | void __kprobes do_debug(struct pt_regs * regs, long error_code) | 899 | void __kprobes do_debug(struct pt_regs *regs, long error_code) |
867 | { | 900 | { |
868 | unsigned int condition; | ||
869 | struct task_struct *tsk = current; | 901 | struct task_struct *tsk = current; |
902 | unsigned int condition; | ||
870 | 903 | ||
871 | trace_hardirqs_fixup(); | 904 | trace_hardirqs_fixup(); |
872 | 905 | ||
@@ -891,7 +924,7 @@ void __kprobes do_debug(struct pt_regs * regs, long error_code) | |||
891 | goto clear_dr7; | 924 | goto clear_dr7; |
892 | } | 925 | } |
893 | 926 | ||
894 | if (regs->flags & VM_MASK) | 927 | if (regs->flags & X86_VM_MASK) |
895 | goto debug_vm86; | 928 | goto debug_vm86; |
896 | 929 | ||
897 | /* Save debug status register where ptrace can see it */ | 930 | /* Save debug status register where ptrace can see it */ |
@@ -914,7 +947,8 @@ void __kprobes do_debug(struct pt_regs * regs, long error_code) | |||
914 | /* Ok, finally something we can handle */ | 947 | /* Ok, finally something we can handle */ |
915 | send_sigtrap(tsk, regs, error_code); | 948 | send_sigtrap(tsk, regs, error_code); |
916 | 949 | ||
917 | /* Disable additional traps. They'll be re-enabled when | 950 | /* |
951 | * Disable additional traps. They'll be re-enabled when | ||
918 | * the signal is delivered. | 952 | * the signal is delivered. |
919 | */ | 953 | */ |
920 | clear_dr7: | 954 | clear_dr7: |
@@ -927,7 +961,7 @@ debug_vm86: | |||
927 | 961 | ||
928 | clear_TF_reenable: | 962 | clear_TF_reenable: |
929 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); | 963 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); |
930 | regs->flags &= ~TF_MASK; | 964 | regs->flags &= ~X86_EFLAGS_TF; |
931 | return; | 965 | return; |
932 | } | 966 | } |
933 | 967 | ||
@@ -938,9 +972,10 @@ clear_TF_reenable: | |||
938 | */ | 972 | */ |
939 | void math_error(void __user *ip) | 973 | void math_error(void __user *ip) |
940 | { | 974 | { |
941 | struct task_struct * task; | 975 | struct task_struct *task; |
976 | unsigned short cwd; | ||
977 | unsigned short swd; | ||
942 | siginfo_t info; | 978 | siginfo_t info; |
943 | unsigned short cwd, swd; | ||
944 | 979 | ||
945 | /* | 980 | /* |
946 | * Save the info for the exception handler and clear the error. | 981 | * Save the info for the exception handler and clear the error. |
@@ -966,36 +1001,36 @@ void math_error(void __user *ip) | |||
966 | cwd = get_fpu_cwd(task); | 1001 | cwd = get_fpu_cwd(task); |
967 | swd = get_fpu_swd(task); | 1002 | swd = get_fpu_swd(task); |
968 | switch (swd & ~cwd & 0x3f) { | 1003 | switch (swd & ~cwd & 0x3f) { |
969 | case 0x000: /* No unmasked exception */ | 1004 | case 0x000: /* No unmasked exception */ |
970 | return; | 1005 | return; |
971 | default: /* Multiple exceptions */ | 1006 | default: /* Multiple exceptions */ |
972 | break; | 1007 | break; |
973 | case 0x001: /* Invalid Op */ | 1008 | case 0x001: /* Invalid Op */ |
974 | /* | 1009 | /* |
975 | * swd & 0x240 == 0x040: Stack Underflow | 1010 | * swd & 0x240 == 0x040: Stack Underflow |
976 | * swd & 0x240 == 0x240: Stack Overflow | 1011 | * swd & 0x240 == 0x240: Stack Overflow |
977 | * User must clear the SF bit (0x40) if set | 1012 | * User must clear the SF bit (0x40) if set |
978 | */ | 1013 | */ |
979 | info.si_code = FPE_FLTINV; | 1014 | info.si_code = FPE_FLTINV; |
980 | break; | 1015 | break; |
981 | case 0x002: /* Denormalize */ | 1016 | case 0x002: /* Denormalize */ |
982 | case 0x010: /* Underflow */ | 1017 | case 0x010: /* Underflow */ |
983 | info.si_code = FPE_FLTUND; | 1018 | info.si_code = FPE_FLTUND; |
984 | break; | 1019 | break; |
985 | case 0x004: /* Zero Divide */ | 1020 | case 0x004: /* Zero Divide */ |
986 | info.si_code = FPE_FLTDIV; | 1021 | info.si_code = FPE_FLTDIV; |
987 | break; | 1022 | break; |
988 | case 0x008: /* Overflow */ | 1023 | case 0x008: /* Overflow */ |
989 | info.si_code = FPE_FLTOVF; | 1024 | info.si_code = FPE_FLTOVF; |
990 | break; | 1025 | break; |
991 | case 0x020: /* Precision */ | 1026 | case 0x020: /* Precision */ |
992 | info.si_code = FPE_FLTRES; | 1027 | info.si_code = FPE_FLTRES; |
993 | break; | 1028 | break; |
994 | } | 1029 | } |
995 | force_sig_info(SIGFPE, &info, task); | 1030 | force_sig_info(SIGFPE, &info, task); |
996 | } | 1031 | } |
997 | 1032 | ||
998 | void do_coprocessor_error(struct pt_regs * regs, long error_code) | 1033 | void do_coprocessor_error(struct pt_regs *regs, long error_code) |
999 | { | 1034 | { |
1000 | ignore_fpu_irq = 1; | 1035 | ignore_fpu_irq = 1; |
1001 | math_error((void __user *)regs->ip); | 1036 | math_error((void __user *)regs->ip); |
@@ -1003,9 +1038,9 @@ void do_coprocessor_error(struct pt_regs * regs, long error_code) | |||
1003 | 1038 | ||
1004 | static void simd_math_error(void __user *ip) | 1039 | static void simd_math_error(void __user *ip) |
1005 | { | 1040 | { |
1006 | struct task_struct * task; | 1041 | struct task_struct *task; |
1007 | siginfo_t info; | ||
1008 | unsigned short mxcsr; | 1042 | unsigned short mxcsr; |
1043 | siginfo_t info; | ||
1009 | 1044 | ||
1010 | /* | 1045 | /* |
1011 | * Save the info for the exception handler and clear the error. | 1046 | * Save the info for the exception handler and clear the error. |
@@ -1026,82 +1061,80 @@ static void simd_math_error(void __user *ip) | |||
1026 | */ | 1061 | */ |
1027 | mxcsr = get_fpu_mxcsr(task); | 1062 | mxcsr = get_fpu_mxcsr(task); |
1028 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { | 1063 | switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { |
1029 | case 0x000: | 1064 | case 0x000: |
1030 | default: | 1065 | default: |
1031 | break; | 1066 | break; |
1032 | case 0x001: /* Invalid Op */ | 1067 | case 0x001: /* Invalid Op */ |
1033 | info.si_code = FPE_FLTINV; | 1068 | info.si_code = FPE_FLTINV; |
1034 | break; | 1069 | break; |
1035 | case 0x002: /* Denormalize */ | 1070 | case 0x002: /* Denormalize */ |
1036 | case 0x010: /* Underflow */ | 1071 | case 0x010: /* Underflow */ |
1037 | info.si_code = FPE_FLTUND; | 1072 | info.si_code = FPE_FLTUND; |
1038 | break; | 1073 | break; |
1039 | case 0x004: /* Zero Divide */ | 1074 | case 0x004: /* Zero Divide */ |
1040 | info.si_code = FPE_FLTDIV; | 1075 | info.si_code = FPE_FLTDIV; |
1041 | break; | 1076 | break; |
1042 | case 0x008: /* Overflow */ | 1077 | case 0x008: /* Overflow */ |
1043 | info.si_code = FPE_FLTOVF; | 1078 | info.si_code = FPE_FLTOVF; |
1044 | break; | 1079 | break; |
1045 | case 0x020: /* Precision */ | 1080 | case 0x020: /* Precision */ |
1046 | info.si_code = FPE_FLTRES; | 1081 | info.si_code = FPE_FLTRES; |
1047 | break; | 1082 | break; |
1048 | } | 1083 | } |
1049 | force_sig_info(SIGFPE, &info, task); | 1084 | force_sig_info(SIGFPE, &info, task); |
1050 | } | 1085 | } |
1051 | 1086 | ||
1052 | void do_simd_coprocessor_error(struct pt_regs * regs, | 1087 | void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) |
1053 | long error_code) | ||
1054 | { | 1088 | { |
1055 | if (cpu_has_xmm) { | 1089 | if (cpu_has_xmm) { |
1056 | /* Handle SIMD FPU exceptions on PIII+ processors. */ | 1090 | /* Handle SIMD FPU exceptions on PIII+ processors. */ |
1057 | ignore_fpu_irq = 1; | 1091 | ignore_fpu_irq = 1; |
1058 | simd_math_error((void __user *)regs->ip); | 1092 | simd_math_error((void __user *)regs->ip); |
1059 | } else { | 1093 | return; |
1060 | /* | 1094 | } |
1061 | * Handle strange cache flush from user space exception | 1095 | /* |
1062 | * in all other cases. This is undocumented behaviour. | 1096 | * Handle strange cache flush from user space exception |
1063 | */ | 1097 | * in all other cases. This is undocumented behaviour. |
1064 | if (regs->flags & VM_MASK) { | 1098 | */ |
1065 | handle_vm86_fault((struct kernel_vm86_regs *)regs, | 1099 | if (regs->flags & X86_VM_MASK) { |
1066 | error_code); | 1100 | handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code); |
1067 | return; | 1101 | return; |
1068 | } | ||
1069 | current->thread.trap_no = 19; | ||
1070 | current->thread.error_code = error_code; | ||
1071 | die_if_kernel("cache flush denied", regs, error_code); | ||
1072 | force_sig(SIGSEGV, current); | ||
1073 | } | 1102 | } |
1103 | current->thread.trap_no = 19; | ||
1104 | current->thread.error_code = error_code; | ||
1105 | die_if_kernel("cache flush denied", regs, error_code); | ||
1106 | force_sig(SIGSEGV, current); | ||
1074 | } | 1107 | } |
1075 | 1108 | ||
1076 | void do_spurious_interrupt_bug(struct pt_regs * regs, | 1109 | void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) |
1077 | long error_code) | ||
1078 | { | 1110 | { |
1079 | #if 0 | 1111 | #if 0 |
1080 | /* No need to warn about this any longer. */ | 1112 | /* No need to warn about this any longer. */ |
1081 | printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); | 1113 | printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); |
1082 | #endif | 1114 | #endif |
1083 | } | 1115 | } |
1084 | 1116 | ||
1085 | unsigned long patch_espfix_desc(unsigned long uesp, | 1117 | unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) |
1086 | unsigned long kesp) | ||
1087 | { | 1118 | { |
1088 | struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt; | 1119 | struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt; |
1089 | unsigned long base = (kesp - uesp) & -THREAD_SIZE; | 1120 | unsigned long base = (kesp - uesp) & -THREAD_SIZE; |
1090 | unsigned long new_kesp = kesp - base; | 1121 | unsigned long new_kesp = kesp - base; |
1091 | unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; | 1122 | unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; |
1092 | __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; | 1123 | __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; |
1124 | |||
1093 | /* Set up base for espfix segment */ | 1125 | /* Set up base for espfix segment */ |
1094 | desc &= 0x00f0ff0000000000ULL; | 1126 | desc &= 0x00f0ff0000000000ULL; |
1095 | desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | | 1127 | desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | |
1096 | ((((__u64)base) << 32) & 0xff00000000000000ULL) | | 1128 | ((((__u64)base) << 32) & 0xff00000000000000ULL) | |
1097 | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | | 1129 | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | |
1098 | (lim_pages & 0xffff); | 1130 | (lim_pages & 0xffff); |
1099 | *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; | 1131 | *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; |
1132 | |||
1100 | return new_kesp; | 1133 | return new_kesp; |
1101 | } | 1134 | } |
1102 | 1135 | ||
1103 | /* | 1136 | /* |
1104 | * 'math_state_restore()' saves the current math information in the | 1137 | * 'math_state_restore()' saves the current math information in the |
1105 | * old math state array, and gets the new ones from the current task | 1138 | * old math state array, and gets the new ones from the current task |
1106 | * | 1139 | * |
1107 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | 1140 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. |
@@ -1115,7 +1148,7 @@ asmlinkage void math_state_restore(void) | |||
1115 | struct thread_info *thread = current_thread_info(); | 1148 | struct thread_info *thread = current_thread_info(); |
1116 | struct task_struct *tsk = thread->task; | 1149 | struct task_struct *tsk = thread->task; |
1117 | 1150 | ||
1118 | clts(); /* Allow maths ops (or we recurse) */ | 1151 | clts(); /* Allow maths ops (or we recurse) */ |
1119 | if (!tsk_used_math(tsk)) | 1152 | if (!tsk_used_math(tsk)) |
1120 | init_fpu(tsk); | 1153 | init_fpu(tsk); |
1121 | restore_fpu(tsk); | 1154 | restore_fpu(tsk); |
@@ -1128,53 +1161,52 @@ EXPORT_SYMBOL_GPL(math_state_restore); | |||
1128 | 1161 | ||
1129 | asmlinkage void math_emulate(long arg) | 1162 | asmlinkage void math_emulate(long arg) |
1130 | { | 1163 | { |
1131 | printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n"); | 1164 | printk(KERN_EMERG |
1132 | printk(KERN_EMERG "killing %s.\n",current->comm); | 1165 | "math-emulation not enabled and no coprocessor found.\n"); |
1133 | force_sig(SIGFPE,current); | 1166 | printk(KERN_EMERG "killing %s.\n", current->comm); |
1167 | force_sig(SIGFPE, current); | ||
1134 | schedule(); | 1168 | schedule(); |
1135 | } | 1169 | } |
1136 | 1170 | ||
1137 | #endif /* CONFIG_MATH_EMULATION */ | 1171 | #endif /* CONFIG_MATH_EMULATION */ |
1138 | 1172 | ||
1139 | |||
1140 | void __init trap_init(void) | 1173 | void __init trap_init(void) |
1141 | { | 1174 | { |
1142 | int i; | 1175 | int i; |
1143 | 1176 | ||
1144 | #ifdef CONFIG_EISA | 1177 | #ifdef CONFIG_EISA |
1145 | void __iomem *p = early_ioremap(0x0FFFD9, 4); | 1178 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
1146 | if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { | 1179 | |
1180 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) | ||
1147 | EISA_bus = 1; | 1181 | EISA_bus = 1; |
1148 | } | ||
1149 | early_iounmap(p, 4); | 1182 | early_iounmap(p, 4); |
1150 | #endif | 1183 | #endif |
1151 | 1184 | ||
1152 | #ifdef CONFIG_X86_LOCAL_APIC | 1185 | #ifdef CONFIG_X86_LOCAL_APIC |
1153 | init_apic_mappings(); | 1186 | init_apic_mappings(); |
1154 | #endif | 1187 | #endif |
1155 | 1188 | set_trap_gate(0, ÷_error); | |
1156 | set_trap_gate(0,÷_error); | 1189 | set_intr_gate(1, &debug); |
1157 | set_intr_gate(1,&debug); | 1190 | set_intr_gate(2, &nmi); |
1158 | set_intr_gate(2,&nmi); | ||
1159 | set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ | 1191 | set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ |
1160 | set_system_gate(4,&overflow); | 1192 | set_system_gate(4, &overflow); |
1161 | set_trap_gate(5,&bounds); | 1193 | set_trap_gate(5, &bounds); |
1162 | set_trap_gate(6,&invalid_op); | 1194 | set_trap_gate(6, &invalid_op); |
1163 | set_trap_gate(7,&device_not_available); | 1195 | set_trap_gate(7, &device_not_available); |
1164 | set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); | 1196 | set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); |
1165 | set_trap_gate(9,&coprocessor_segment_overrun); | 1197 | set_trap_gate(9, &coprocessor_segment_overrun); |
1166 | set_trap_gate(10,&invalid_TSS); | 1198 | set_trap_gate(10, &invalid_TSS); |
1167 | set_trap_gate(11,&segment_not_present); | 1199 | set_trap_gate(11, &segment_not_present); |
1168 | set_trap_gate(12,&stack_segment); | 1200 | set_trap_gate(12, &stack_segment); |
1169 | set_trap_gate(13,&general_protection); | 1201 | set_trap_gate(13, &general_protection); |
1170 | set_intr_gate(14,&page_fault); | 1202 | set_intr_gate(14, &page_fault); |
1171 | set_trap_gate(15,&spurious_interrupt_bug); | 1203 | set_trap_gate(15, &spurious_interrupt_bug); |
1172 | set_trap_gate(16,&coprocessor_error); | 1204 | set_trap_gate(16, &coprocessor_error); |
1173 | set_trap_gate(17,&alignment_check); | 1205 | set_trap_gate(17, &alignment_check); |
1174 | #ifdef CONFIG_X86_MCE | 1206 | #ifdef CONFIG_X86_MCE |
1175 | set_trap_gate(18,&machine_check); | 1207 | set_trap_gate(18, &machine_check); |
1176 | #endif | 1208 | #endif |
1177 | set_trap_gate(19,&simd_coprocessor_error); | 1209 | set_trap_gate(19, &simd_coprocessor_error); |
1178 | 1210 | ||
1179 | /* | 1211 | /* |
1180 | * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. | 1212 | * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. |
@@ -1187,21 +1219,22 @@ void __init trap_init(void) | |||
1187 | printk("done.\n"); | 1219 | printk("done.\n"); |
1188 | } | 1220 | } |
1189 | if (cpu_has_xmm) { | 1221 | if (cpu_has_xmm) { |
1190 | printk(KERN_INFO "Enabling unmasked SIMD FPU exception " | 1222 | printk(KERN_INFO |
1191 | "support... "); | 1223 | "Enabling unmasked SIMD FPU exception support... "); |
1192 | set_in_cr4(X86_CR4_OSXMMEXCPT); | 1224 | set_in_cr4(X86_CR4_OSXMMEXCPT); |
1193 | printk("done.\n"); | 1225 | printk("done.\n"); |
1194 | } | 1226 | } |
1195 | 1227 | ||
1196 | set_system_gate(SYSCALL_VECTOR,&system_call); | 1228 | set_system_gate(SYSCALL_VECTOR, &system_call); |
1197 | 1229 | ||
1198 | /* Reserve all the builtin and the syscall vector. */ | 1230 | /* Reserve all the builtin and the syscall vector: */ |
1199 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | 1231 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
1200 | set_bit(i, used_vectors); | 1232 | set_bit(i, used_vectors); |
1233 | |||
1201 | set_bit(SYSCALL_VECTOR, used_vectors); | 1234 | set_bit(SYSCALL_VECTOR, used_vectors); |
1202 | 1235 | ||
1203 | /* | 1236 | /* |
1204 | * Should be a barrier for any external CPU state. | 1237 | * Should be a barrier for any external CPU state: |
1205 | */ | 1238 | */ |
1206 | cpu_init(); | 1239 | cpu_init(); |
1207 | 1240 | ||
@@ -1211,6 +1244,7 @@ void __init trap_init(void) | |||
1211 | static int __init kstack_setup(char *s) | 1244 | static int __init kstack_setup(char *s) |
1212 | { | 1245 | { |
1213 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); | 1246 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); |
1247 | |||
1214 | return 1; | 1248 | return 1; |
1215 | } | 1249 | } |
1216 | __setup("kstack=", kstack_setup); | 1250 | __setup("kstack=", kstack_setup); |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 045466681911..79aa6fc0815c 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/kdebug.h> | 33 | #include <linux/kdebug.h> |
34 | #include <linux/utsname.h> | 34 | #include <linux/utsname.h> |
35 | 35 | ||
36 | #include <mach_traps.h> | ||
37 | |||
36 | #if defined(CONFIG_EDAC) | 38 | #if defined(CONFIG_EDAC) |
37 | #include <linux/edac.h> | 39 | #include <linux/edac.h> |
38 | #endif | 40 | #endif |
@@ -600,8 +602,13 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
600 | 602 | ||
601 | void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) | 603 | void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) |
602 | { | 604 | { |
603 | unsigned long flags = oops_begin(); | 605 | unsigned long flags; |
604 | 606 | ||
607 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == | ||
608 | NOTIFY_STOP) | ||
609 | return; | ||
610 | |||
611 | flags = oops_begin(); | ||
605 | /* | 612 | /* |
606 | * We are in trouble anyway, lets at least try | 613 | * We are in trouble anyway, lets at least try |
607 | * to get a message out. | 614 | * to get a message out. |
@@ -806,6 +813,8 @@ io_check_error(unsigned char reason, struct pt_regs * regs) | |||
806 | static __kprobes void | 813 | static __kprobes void |
807 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | 814 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) |
808 | { | 815 | { |
816 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | ||
817 | return; | ||
809 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | 818 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", |
810 | reason); | 819 | reason); |
811 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); | 820 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); |
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index c2241e04ea5f..3d7e6e9fa6c2 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -84,8 +84,8 @@ DEFINE_PER_CPU(unsigned long, cyc2ns); | |||
84 | 84 | ||
85 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | 85 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
86 | { | 86 | { |
87 | unsigned long flags, prev_scale, *scale; | ||
88 | unsigned long long tsc_now, ns_now; | 87 | unsigned long long tsc_now, ns_now; |
88 | unsigned long flags, *scale; | ||
89 | 89 | ||
90 | local_irq_save(flags); | 90 | local_irq_save(flags); |
91 | sched_clock_idle_sleep_event(); | 91 | sched_clock_idle_sleep_event(); |
@@ -95,7 +95,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
95 | rdtscll(tsc_now); | 95 | rdtscll(tsc_now); |
96 | ns_now = __cycles_2_ns(tsc_now); | 96 | ns_now = __cycles_2_ns(tsc_now); |
97 | 97 | ||
98 | prev_scale = *scale; | ||
99 | if (cpu_khz) | 98 | if (cpu_khz) |
100 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | 99 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; |
101 | 100 | ||
@@ -392,13 +391,15 @@ void __init tsc_init(void) | |||
392 | int cpu; | 391 | int cpu; |
393 | 392 | ||
394 | if (!cpu_has_tsc) | 393 | if (!cpu_has_tsc) |
395 | goto out_no_tsc; | 394 | return; |
396 | 395 | ||
397 | cpu_khz = calculate_cpu_khz(); | 396 | cpu_khz = calculate_cpu_khz(); |
398 | tsc_khz = cpu_khz; | 397 | tsc_khz = cpu_khz; |
399 | 398 | ||
400 | if (!cpu_khz) | 399 | if (!cpu_khz) { |
401 | goto out_no_tsc; | 400 | mark_tsc_unstable("could not calculate TSC khz"); |
401 | return; | ||
402 | } | ||
402 | 403 | ||
403 | printk("Detected %lu.%03lu MHz processor.\n", | 404 | printk("Detected %lu.%03lu MHz processor.\n", |
404 | (unsigned long)cpu_khz / 1000, | 405 | (unsigned long)cpu_khz / 1000, |
@@ -431,9 +432,4 @@ void __init tsc_init(void) | |||
431 | tsc_enabled = 1; | 432 | tsc_enabled = 1; |
432 | 433 | ||
433 | clocksource_register(&clocksource_tsc); | 434 | clocksource_register(&clocksource_tsc); |
434 | |||
435 | return; | ||
436 | |||
437 | out_no_tsc: | ||
438 | setup_clear_cpu_cap(X86_FEATURE_TSC); | ||
439 | } | 435 | } |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index d3bebaaad842..ceeba01e7f47 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -44,8 +44,8 @@ DEFINE_PER_CPU(unsigned long, cyc2ns); | |||
44 | 44 | ||
45 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | 45 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
46 | { | 46 | { |
47 | unsigned long flags, prev_scale, *scale; | ||
48 | unsigned long long tsc_now, ns_now; | 47 | unsigned long long tsc_now, ns_now; |
48 | unsigned long flags, *scale; | ||
49 | 49 | ||
50 | local_irq_save(flags); | 50 | local_irq_save(flags); |
51 | sched_clock_idle_sleep_event(); | 51 | sched_clock_idle_sleep_event(); |
@@ -55,7 +55,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
55 | rdtscll(tsc_now); | 55 | rdtscll(tsc_now); |
56 | ns_now = __cycles_2_ns(tsc_now); | 56 | ns_now = __cycles_2_ns(tsc_now); |
57 | 57 | ||
58 | prev_scale = *scale; | ||
59 | if (cpu_khz) | 58 | if (cpu_khz) |
60 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | 59 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; |
61 | 60 | ||
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 738c2104df30..38f566fa27d2 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -64,7 +64,7 @@ | |||
64 | 64 | ||
65 | 65 | ||
66 | #define KVM86 ((struct kernel_vm86_struct *)regs) | 66 | #define KVM86 ((struct kernel_vm86_struct *)regs) |
67 | #define VMPI KVM86->vm86plus | 67 | #define VMPI KVM86->vm86plus |
68 | 68 | ||
69 | 69 | ||
70 | /* | 70 | /* |
@@ -81,7 +81,7 @@ | |||
81 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | 81 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) |
82 | #define VEFLAGS (current->thread.v86flags) | 82 | #define VEFLAGS (current->thread.v86flags) |
83 | 83 | ||
84 | #define set_flags(X,new,mask) \ | 84 | #define set_flags(X, new, mask) \ |
85 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | 85 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) |
86 | 86 | ||
87 | #define SAFE_MASK (0xDD5) | 87 | #define SAFE_MASK (0xDD5) |
@@ -93,8 +93,10 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, | |||
93 | { | 93 | { |
94 | int ret = 0; | 94 | int ret = 0; |
95 | 95 | ||
96 | /* kernel_vm86_regs is missing gs, so copy everything up to | 96 | /* |
97 | (but not including) orig_eax, and then rest including orig_eax. */ | 97 | * kernel_vm86_regs is missing gs, so copy everything up to |
98 | * (but not including) orig_eax, and then rest including orig_eax. | ||
99 | */ | ||
98 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); | 100 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); |
99 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, | 101 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, |
100 | sizeof(struct kernel_vm86_regs) - | 102 | sizeof(struct kernel_vm86_regs) - |
@@ -120,7 +122,7 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, | |||
120 | return ret; | 122 | return ret; |
121 | } | 123 | } |
122 | 124 | ||
123 | struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) | 125 | struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) |
124 | { | 126 | { |
125 | struct tss_struct *tss; | 127 | struct tss_struct *tss; |
126 | struct pt_regs *ret; | 128 | struct pt_regs *ret; |
@@ -137,9 +139,9 @@ struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) | |||
137 | printk("no vm86_info: BAD\n"); | 139 | printk("no vm86_info: BAD\n"); |
138 | do_exit(SIGSEGV); | 140 | do_exit(SIGSEGV); |
139 | } | 141 | } |
140 | set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); | 142 | set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); |
141 | tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); | 143 | tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); |
142 | tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); | 144 | tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); |
143 | if (tmp) { | 145 | if (tmp) { |
144 | printk("vm86: could not access userspace vm86_info\n"); | 146 | printk("vm86: could not access userspace vm86_info\n"); |
145 | do_exit(SIGSEGV); | 147 | do_exit(SIGSEGV); |
@@ -237,20 +239,21 @@ asmlinkage int sys_vm86(struct pt_regs regs) | |||
237 | 239 | ||
238 | tsk = current; | 240 | tsk = current; |
239 | switch (regs.bx) { | 241 | switch (regs.bx) { |
240 | case VM86_REQUEST_IRQ: | 242 | case VM86_REQUEST_IRQ: |
241 | case VM86_FREE_IRQ: | 243 | case VM86_FREE_IRQ: |
242 | case VM86_GET_IRQ_BITS: | 244 | case VM86_GET_IRQ_BITS: |
243 | case VM86_GET_AND_RESET_IRQ: | 245 | case VM86_GET_AND_RESET_IRQ: |
244 | ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); | 246 | ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); |
245 | goto out; | 247 | goto out; |
246 | case VM86_PLUS_INSTALL_CHECK: | 248 | case VM86_PLUS_INSTALL_CHECK: |
247 | /* NOTE: on old vm86 stuff this will return the error | 249 | /* |
248 | from access_ok(), because the subfunction is | 250 | * NOTE: on old vm86 stuff this will return the error |
249 | interpreted as (invalid) address to vm86_struct. | 251 | * from access_ok(), because the subfunction is |
250 | So the installation check works. | 252 | * interpreted as (invalid) address to vm86_struct. |
251 | */ | 253 | * So the installation check works. |
252 | ret = 0; | 254 | */ |
253 | goto out; | 255 | ret = 0; |
256 | goto out; | ||
254 | } | 257 | } |
255 | 258 | ||
256 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | 259 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ |
@@ -296,21 +299,21 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
296 | VEFLAGS = info->regs.pt.flags; | 299 | VEFLAGS = info->regs.pt.flags; |
297 | info->regs.pt.flags &= SAFE_MASK; | 300 | info->regs.pt.flags &= SAFE_MASK; |
298 | info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; | 301 | info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; |
299 | info->regs.pt.flags |= VM_MASK; | 302 | info->regs.pt.flags |= X86_VM_MASK; |
300 | 303 | ||
301 | switch (info->cpu_type) { | 304 | switch (info->cpu_type) { |
302 | case CPU_286: | 305 | case CPU_286: |
303 | tsk->thread.v86mask = 0; | 306 | tsk->thread.v86mask = 0; |
304 | break; | 307 | break; |
305 | case CPU_386: | 308 | case CPU_386: |
306 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; | 309 | tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; |
307 | break; | 310 | break; |
308 | case CPU_486: | 311 | case CPU_486: |
309 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | 312 | tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; |
310 | break; | 313 | break; |
311 | default: | 314 | default: |
312 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | 315 | tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; |
313 | break; | 316 | break; |
314 | } | 317 | } |
315 | 318 | ||
316 | /* | 319 | /* |
@@ -346,9 +349,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
346 | /* we never return here */ | 349 | /* we never return here */ |
347 | } | 350 | } |
348 | 351 | ||
349 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | 352 | static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) |
350 | { | 353 | { |
351 | struct pt_regs * regs32; | 354 | struct pt_regs *regs32; |
352 | 355 | ||
353 | regs32 = save_v86_state(regs16); | 356 | regs32 = save_v86_state(regs16); |
354 | regs32->ax = retval; | 357 | regs32->ax = retval; |
@@ -358,29 +361,30 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | |||
358 | : : "r" (regs32), "r" (current_thread_info())); | 361 | : : "r" (regs32), "r" (current_thread_info())); |
359 | } | 362 | } |
360 | 363 | ||
361 | static inline void set_IF(struct kernel_vm86_regs * regs) | 364 | static inline void set_IF(struct kernel_vm86_regs *regs) |
362 | { | 365 | { |
363 | VEFLAGS |= VIF_MASK; | 366 | VEFLAGS |= X86_EFLAGS_VIF; |
364 | if (VEFLAGS & VIP_MASK) | 367 | if (VEFLAGS & X86_EFLAGS_VIP) |
365 | return_to_32bit(regs, VM86_STI); | 368 | return_to_32bit(regs, VM86_STI); |
366 | } | 369 | } |
367 | 370 | ||
368 | static inline void clear_IF(struct kernel_vm86_regs * regs) | 371 | static inline void clear_IF(struct kernel_vm86_regs *regs) |
369 | { | 372 | { |
370 | VEFLAGS &= ~VIF_MASK; | 373 | VEFLAGS &= ~X86_EFLAGS_VIF; |
371 | } | 374 | } |
372 | 375 | ||
373 | static inline void clear_TF(struct kernel_vm86_regs * regs) | 376 | static inline void clear_TF(struct kernel_vm86_regs *regs) |
374 | { | 377 | { |
375 | regs->pt.flags &= ~TF_MASK; | 378 | regs->pt.flags &= ~X86_EFLAGS_TF; |
376 | } | 379 | } |
377 | 380 | ||
378 | static inline void clear_AC(struct kernel_vm86_regs * regs) | 381 | static inline void clear_AC(struct kernel_vm86_regs *regs) |
379 | { | 382 | { |
380 | regs->pt.flags &= ~AC_MASK; | 383 | regs->pt.flags &= ~X86_EFLAGS_AC; |
381 | } | 384 | } |
382 | 385 | ||
383 | /* It is correct to call set_IF(regs) from the set_vflags_* | 386 | /* |
387 | * It is correct to call set_IF(regs) from the set_vflags_* | ||
384 | * functions. However someone forgot to call clear_IF(regs) | 388 | * functions. However someone forgot to call clear_IF(regs) |
385 | * in the opposite case. | 389 | * in the opposite case. |
386 | * After the command sequence CLI PUSHF STI POPF you should | 390 | * After the command sequence CLI PUSHF STI POPF you should |
@@ -391,41 +395,41 @@ static inline void clear_AC(struct kernel_vm86_regs * regs) | |||
391 | * [KD] | 395 | * [KD] |
392 | */ | 396 | */ |
393 | 397 | ||
394 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs) | 398 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) |
395 | { | 399 | { |
396 | set_flags(VEFLAGS, flags, current->thread.v86mask); | 400 | set_flags(VEFLAGS, flags, current->thread.v86mask); |
397 | set_flags(regs->pt.flags, flags, SAFE_MASK); | 401 | set_flags(regs->pt.flags, flags, SAFE_MASK); |
398 | if (flags & IF_MASK) | 402 | if (flags & X86_EFLAGS_IF) |
399 | set_IF(regs); | 403 | set_IF(regs); |
400 | else | 404 | else |
401 | clear_IF(regs); | 405 | clear_IF(regs); |
402 | } | 406 | } |
403 | 407 | ||
404 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | 408 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) |
405 | { | 409 | { |
406 | set_flags(VFLAGS, flags, current->thread.v86mask); | 410 | set_flags(VFLAGS, flags, current->thread.v86mask); |
407 | set_flags(regs->pt.flags, flags, SAFE_MASK); | 411 | set_flags(regs->pt.flags, flags, SAFE_MASK); |
408 | if (flags & IF_MASK) | 412 | if (flags & X86_EFLAGS_IF) |
409 | set_IF(regs); | 413 | set_IF(regs); |
410 | else | 414 | else |
411 | clear_IF(regs); | 415 | clear_IF(regs); |
412 | } | 416 | } |
413 | 417 | ||
414 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | 418 | static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) |
415 | { | 419 | { |
416 | unsigned long flags = regs->pt.flags & RETURN_MASK; | 420 | unsigned long flags = regs->pt.flags & RETURN_MASK; |
417 | 421 | ||
418 | if (VEFLAGS & VIF_MASK) | 422 | if (VEFLAGS & X86_EFLAGS_VIF) |
419 | flags |= IF_MASK; | 423 | flags |= X86_EFLAGS_IF; |
420 | flags |= IOPL_MASK; | 424 | flags |= X86_EFLAGS_IOPL; |
421 | return flags | (VEFLAGS & current->thread.v86mask); | 425 | return flags | (VEFLAGS & current->thread.v86mask); |
422 | } | 426 | } |
423 | 427 | ||
424 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | 428 | static inline int is_revectored(int nr, struct revectored_struct *bitmap) |
425 | { | 429 | { |
426 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | 430 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" |
427 | :"=r" (nr) | 431 | :"=r" (nr) |
428 | :"m" (*bitmap),"r" (nr)); | 432 | :"m" (*bitmap), "r" (nr)); |
429 | return nr; | 433 | return nr; |
430 | } | 434 | } |
431 | 435 | ||
@@ -437,7 +441,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
437 | ptr--; \ | 441 | ptr--; \ |
438 | if (put_user(__val, base + ptr) < 0) \ | 442 | if (put_user(__val, base + ptr) < 0) \ |
439 | goto err_label; \ | 443 | goto err_label; \ |
440 | } while(0) | 444 | } while (0) |
441 | 445 | ||
442 | #define pushw(base, ptr, val, err_label) \ | 446 | #define pushw(base, ptr, val, err_label) \ |
443 | do { \ | 447 | do { \ |
@@ -448,7 +452,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
448 | ptr--; \ | 452 | ptr--; \ |
449 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | 453 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ |
450 | goto err_label; \ | 454 | goto err_label; \ |
451 | } while(0) | 455 | } while (0) |
452 | 456 | ||
453 | #define pushl(base, ptr, val, err_label) \ | 457 | #define pushl(base, ptr, val, err_label) \ |
454 | do { \ | 458 | do { \ |
@@ -465,7 +469,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
465 | ptr--; \ | 469 | ptr--; \ |
466 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | 470 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ |
467 | goto err_label; \ | 471 | goto err_label; \ |
468 | } while(0) | 472 | } while (0) |
469 | 473 | ||
470 | #define popb(base, ptr, err_label) \ | 474 | #define popb(base, ptr, err_label) \ |
471 | ({ \ | 475 | ({ \ |
@@ -512,7 +516,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |||
512 | * in userspace is always better than an Oops anyway.) [KD] | 516 | * in userspace is always better than an Oops anyway.) [KD] |
513 | */ | 517 | */ |
514 | static void do_int(struct kernel_vm86_regs *regs, int i, | 518 | static void do_int(struct kernel_vm86_regs *regs, int i, |
515 | unsigned char __user * ssp, unsigned short sp) | 519 | unsigned char __user *ssp, unsigned short sp) |
516 | { | 520 | { |
517 | unsigned long __user *intr_ptr; | 521 | unsigned long __user *intr_ptr; |
518 | unsigned long segoffs; | 522 | unsigned long segoffs; |
@@ -521,7 +525,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, | |||
521 | goto cannot_handle; | 525 | goto cannot_handle; |
522 | if (is_revectored(i, &KVM86->int_revectored)) | 526 | if (is_revectored(i, &KVM86->int_revectored)) |
523 | goto cannot_handle; | 527 | goto cannot_handle; |
524 | if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | 528 | if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) |
525 | goto cannot_handle; | 529 | goto cannot_handle; |
526 | intr_ptr = (unsigned long __user *) (i << 2); | 530 | intr_ptr = (unsigned long __user *) (i << 2); |
527 | if (get_user(segoffs, intr_ptr)) | 531 | if (get_user(segoffs, intr_ptr)) |
@@ -543,30 +547,23 @@ cannot_handle: | |||
543 | return_to_32bit(regs, VM86_INTx + (i << 8)); | 547 | return_to_32bit(regs, VM86_INTx + (i << 8)); |
544 | } | 548 | } |
545 | 549 | ||
546 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | 550 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) |
547 | { | 551 | { |
548 | if (VMPI.is_vm86pus) { | 552 | if (VMPI.is_vm86pus) { |
549 | if ( (trapno==3) || (trapno==1) ) | 553 | if ((trapno == 3) || (trapno == 1)) |
550 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | 554 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); |
551 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); | 555 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); |
552 | return 0; | 556 | return 0; |
553 | } | 557 | } |
554 | if (trapno !=1) | 558 | if (trapno != 1) |
555 | return 1; /* we let this handle by the calling routine */ | 559 | return 1; /* we let this handle by the calling routine */ |
556 | if (current->ptrace & PT_PTRACED) { | ||
557 | unsigned long flags; | ||
558 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
559 | sigdelset(¤t->blocked, SIGTRAP); | ||
560 | recalc_sigpending(); | ||
561 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
562 | } | ||
563 | send_sig(SIGTRAP, current, 1); | ||
564 | current->thread.trap_no = trapno; | 560 | current->thread.trap_no = trapno; |
565 | current->thread.error_code = error_code; | 561 | current->thread.error_code = error_code; |
562 | force_sig(SIGTRAP, current); | ||
566 | return 0; | 563 | return 0; |
567 | } | 564 | } |
568 | 565 | ||
569 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | 566 | void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) |
570 | { | 567 | { |
571 | unsigned char opcode; | 568 | unsigned char opcode; |
572 | unsigned char __user *csp; | 569 | unsigned char __user *csp; |
@@ -576,11 +573,11 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
576 | 573 | ||
577 | #define CHECK_IF_IN_TRAP \ | 574 | #define CHECK_IF_IN_TRAP \ |
578 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | 575 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ |
579 | newflags |= TF_MASK | 576 | newflags |= X86_EFLAGS_TF |
580 | #define VM86_FAULT_RETURN do { \ | 577 | #define VM86_FAULT_RETURN do { \ |
581 | if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ | 578 | if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ |
582 | return_to_32bit(regs, VM86_PICRETURN); \ | 579 | return_to_32bit(regs, VM86_PICRETURN); \ |
583 | if (orig_flags & TF_MASK) \ | 580 | if (orig_flags & X86_EFLAGS_TF) \ |
584 | handle_vm86_trap(regs, 0, 1); \ | 581 | handle_vm86_trap(regs, 0, 1); \ |
585 | return; } while (0) | 582 | return; } while (0) |
586 | 583 | ||
@@ -595,17 +592,17 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
595 | pref_done = 0; | 592 | pref_done = 0; |
596 | do { | 593 | do { |
597 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | 594 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { |
598 | case 0x66: /* 32-bit data */ data32=1; break; | 595 | case 0x66: /* 32-bit data */ data32 = 1; break; |
599 | case 0x67: /* 32-bit address */ break; | 596 | case 0x67: /* 32-bit address */ break; |
600 | case 0x2e: /* CS */ break; | 597 | case 0x2e: /* CS */ break; |
601 | case 0x3e: /* DS */ break; | 598 | case 0x3e: /* DS */ break; |
602 | case 0x26: /* ES */ break; | 599 | case 0x26: /* ES */ break; |
603 | case 0x36: /* SS */ break; | 600 | case 0x36: /* SS */ break; |
604 | case 0x65: /* GS */ break; | 601 | case 0x65: /* GS */ break; |
605 | case 0x64: /* FS */ break; | 602 | case 0x64: /* FS */ break; |
606 | case 0xf2: /* repnz */ break; | 603 | case 0xf2: /* repnz */ break; |
607 | case 0xf3: /* rep */ break; | 604 | case 0xf3: /* rep */ break; |
608 | default: pref_done = 1; | 605 | default: pref_done = 1; |
609 | } | 606 | } |
610 | } while (!pref_done); | 607 | } while (!pref_done); |
611 | 608 | ||
@@ -628,7 +625,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
628 | { | 625 | { |
629 | unsigned long newflags; | 626 | unsigned long newflags; |
630 | if (data32) { | 627 | if (data32) { |
631 | newflags=popl(ssp, sp, simulate_sigsegv); | 628 | newflags = popl(ssp, sp, simulate_sigsegv); |
632 | SP(regs) += 4; | 629 | SP(regs) += 4; |
633 | } else { | 630 | } else { |
634 | newflags = popw(ssp, sp, simulate_sigsegv); | 631 | newflags = popw(ssp, sp, simulate_sigsegv); |
@@ -636,20 +633,20 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
636 | } | 633 | } |
637 | IP(regs) = ip; | 634 | IP(regs) = ip; |
638 | CHECK_IF_IN_TRAP; | 635 | CHECK_IF_IN_TRAP; |
639 | if (data32) { | 636 | if (data32) |
640 | set_vflags_long(newflags, regs); | 637 | set_vflags_long(newflags, regs); |
641 | } else { | 638 | else |
642 | set_vflags_short(newflags, regs); | 639 | set_vflags_short(newflags, regs); |
643 | } | 640 | |
644 | VM86_FAULT_RETURN; | 641 | VM86_FAULT_RETURN; |
645 | } | 642 | } |
646 | 643 | ||
647 | /* int xx */ | 644 | /* int xx */ |
648 | case 0xcd: { | 645 | case 0xcd: { |
649 | int intno=popb(csp, ip, simulate_sigsegv); | 646 | int intno = popb(csp, ip, simulate_sigsegv); |
650 | IP(regs) = ip; | 647 | IP(regs) = ip; |
651 | if (VMPI.vm86dbg_active) { | 648 | if (VMPI.vm86dbg_active) { |
652 | if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | 649 | if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) |
653 | return_to_32bit(regs, VM86_INTx + (intno << 8)); | 650 | return_to_32bit(regs, VM86_INTx + (intno << 8)); |
654 | } | 651 | } |
655 | do_int(regs, intno, ssp, sp); | 652 | do_int(regs, intno, ssp, sp); |
@@ -663,9 +660,9 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |||
663 | unsigned long newcs; | 660 | unsigned long newcs; |
664 | unsigned long newflags; | 661 | unsigned long newflags; |
665 | if (data32) { | 662 | if (data32) { |
666 | newip=popl(ssp, sp, simulate_sigsegv); | 663 | newip = popl(ssp, sp, simulate_sigsegv); |
667 | newcs=popl(ssp, sp, simulate_sigsegv); | 664 | newcs = popl(ssp, sp, simulate_sigsegv); |
668 | newflags=popl(ssp, sp, simulate_sigsegv); | 665 | newflags = popl(ssp, sp, simulate_sigsegv); |
669 | SP(regs) += 12; | 666 | SP(regs) += 12; |
670 | } else { | 667 | } else { |
671 | newip = popw(ssp, sp, simulate_sigsegv); | 668 | newip = popw(ssp, sp, simulate_sigsegv); |
@@ -734,18 +731,18 @@ static struct vm86_irqs { | |||
734 | static DEFINE_SPINLOCK(irqbits_lock); | 731 | static DEFINE_SPINLOCK(irqbits_lock); |
735 | static int irqbits; | 732 | static int irqbits; |
736 | 733 | ||
737 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | 734 | #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ |
738 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | 735 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ |
739 | | (1 << SIGUNUSED) ) | 736 | | (1 << SIGUNUSED)) |
740 | 737 | ||
741 | static irqreturn_t irq_handler(int intno, void *dev_id) | 738 | static irqreturn_t irq_handler(int intno, void *dev_id) |
742 | { | 739 | { |
743 | int irq_bit; | 740 | int irq_bit; |
744 | unsigned long flags; | 741 | unsigned long flags; |
745 | 742 | ||
746 | spin_lock_irqsave(&irqbits_lock, flags); | 743 | spin_lock_irqsave(&irqbits_lock, flags); |
747 | irq_bit = 1 << intno; | 744 | irq_bit = 1 << intno; |
748 | if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | 745 | if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) |
749 | goto out; | 746 | goto out; |
750 | irqbits |= irq_bit; | 747 | irqbits |= irq_bit; |
751 | if (vm86_irqs[intno].sig) | 748 | if (vm86_irqs[intno].sig) |
@@ -759,7 +756,7 @@ static irqreturn_t irq_handler(int intno, void *dev_id) | |||
759 | return IRQ_HANDLED; | 756 | return IRQ_HANDLED; |
760 | 757 | ||
761 | out: | 758 | out: |
762 | spin_unlock_irqrestore(&irqbits_lock, flags); | 759 | spin_unlock_irqrestore(&irqbits_lock, flags); |
763 | return IRQ_NONE; | 760 | return IRQ_NONE; |
764 | } | 761 | } |
765 | 762 | ||
@@ -770,9 +767,9 @@ static inline void free_vm86_irq(int irqnumber) | |||
770 | free_irq(irqnumber, NULL); | 767 | free_irq(irqnumber, NULL); |
771 | vm86_irqs[irqnumber].tsk = NULL; | 768 | vm86_irqs[irqnumber].tsk = NULL; |
772 | 769 | ||
773 | spin_lock_irqsave(&irqbits_lock, flags); | 770 | spin_lock_irqsave(&irqbits_lock, flags); |
774 | irqbits &= ~(1 << irqnumber); | 771 | irqbits &= ~(1 << irqnumber); |
775 | spin_unlock_irqrestore(&irqbits_lock, flags); | 772 | spin_unlock_irqrestore(&irqbits_lock, flags); |
776 | } | 773 | } |
777 | 774 | ||
778 | void release_vm86_irqs(struct task_struct *task) | 775 | void release_vm86_irqs(struct task_struct *task) |
@@ -788,10 +785,10 @@ static inline int get_and_reset_irq(int irqnumber) | |||
788 | int bit; | 785 | int bit; |
789 | unsigned long flags; | 786 | unsigned long flags; |
790 | int ret = 0; | 787 | int ret = 0; |
791 | 788 | ||
792 | if (invalid_vm86_irq(irqnumber)) return 0; | 789 | if (invalid_vm86_irq(irqnumber)) return 0; |
793 | if (vm86_irqs[irqnumber].tsk != current) return 0; | 790 | if (vm86_irqs[irqnumber].tsk != current) return 0; |
794 | spin_lock_irqsave(&irqbits_lock, flags); | 791 | spin_lock_irqsave(&irqbits_lock, flags); |
795 | bit = irqbits & (1 << irqnumber); | 792 | bit = irqbits & (1 << irqnumber); |
796 | irqbits &= ~bit; | 793 | irqbits &= ~bit; |
797 | if (bit) { | 794 | if (bit) { |
@@ -799,7 +796,7 @@ static inline int get_and_reset_irq(int irqnumber) | |||
799 | ret = 1; | 796 | ret = 1; |
800 | } | 797 | } |
801 | 798 | ||
802 | spin_unlock_irqrestore(&irqbits_lock, flags); | 799 | spin_unlock_irqrestore(&irqbits_lock, flags); |
803 | return ret; | 800 | return ret; |
804 | } | 801 | } |
805 | 802 | ||
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index 2ffa9656fe7a..ce5ed083a1e9 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S | |||
@@ -149,6 +149,11 @@ SECTIONS | |||
149 | *(.con_initcall.init) | 149 | *(.con_initcall.init) |
150 | __con_initcall_end = .; | 150 | __con_initcall_end = .; |
151 | } | 151 | } |
152 | .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { | ||
153 | __x86cpuvendor_start = .; | ||
154 | *(.x86cpuvendor.init) | ||
155 | __x86cpuvendor_end = .; | ||
156 | } | ||
152 | SECURITY_INIT | 157 | SECURITY_INIT |
153 | . = ALIGN(4); | 158 | . = ALIGN(4); |
154 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { | 159 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { |
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index fab132299735..b7ab3c335fae 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S | |||
@@ -177,6 +177,11 @@ SECTIONS | |||
177 | *(.con_initcall.init) | 177 | *(.con_initcall.init) |
178 | } | 178 | } |
179 | __con_initcall_end = .; | 179 | __con_initcall_end = .; |
180 | __x86cpuvendor_start = .; | ||
181 | .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { | ||
182 | *(.x86cpuvendor.init) | ||
183 | } | ||
184 | __x86cpuvendor_end = .; | ||
180 | SECURITY_INIT | 185 | SECURITY_INIT |
181 | 186 | ||
182 | . = ALIGN(8); | 187 | . = ALIGN(8); |
@@ -247,3 +252,9 @@ SECTIONS | |||
247 | 252 | ||
248 | DWARF_DEBUG | 253 | DWARF_DEBUG |
249 | } | 254 | } |
255 | |||
256 | /* | ||
257 | * Build-time check on the image size: | ||
258 | */ | ||
259 | ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), | ||
260 | "kernel image bigger than KERNEL_IMAGE_SIZE") | ||
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index d971210a6d36..caf2a26f5cfd 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * | 8 | * |
9 | * Ravikiran Thirumalai <kiran@scalemp.com>, | 9 | * Ravikiran Thirumalai <kiran@scalemp.com>, |
10 | * Shai Fultheim <shai@scalemp.com> | 10 | * Shai Fultheim <shai@scalemp.com> |
11 | * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>, | ||
12 | * Ravikiran Thirumalai <kiran@scalemp.com> | ||
11 | */ | 13 | */ |
12 | 14 | ||
13 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -15,38 +17,137 @@ | |||
15 | #include <linux/pci_regs.h> | 17 | #include <linux/pci_regs.h> |
16 | #include <asm/pci-direct.h> | 18 | #include <asm/pci-direct.h> |
17 | #include <asm/io.h> | 19 | #include <asm/io.h> |
20 | #include <asm/paravirt.h> | ||
18 | 21 | ||
19 | static int __init vsmp_init(void) | 22 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT |
23 | /* | ||
24 | * Interrupt control on vSMPowered systems: | ||
25 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' | ||
26 | * and vice versa. | ||
27 | */ | ||
28 | |||
29 | static unsigned long vsmp_save_fl(void) | ||
20 | { | 30 | { |
21 | void *address; | 31 | unsigned long flags = native_save_fl(); |
22 | unsigned int cap, ctl; | ||
23 | 32 | ||
24 | if (!early_pci_allowed()) | 33 | if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC)) |
25 | return 0; | 34 | flags &= ~X86_EFLAGS_IF; |
35 | return flags; | ||
36 | } | ||
26 | 37 | ||
27 | /* Check if we are running on a ScaleMP vSMP box */ | 38 | static void vsmp_restore_fl(unsigned long flags) |
28 | if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != | 39 | { |
29 | PCI_VENDOR_ID_SCALEMP) || | 40 | if (flags & X86_EFLAGS_IF) |
30 | (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != | 41 | flags &= ~X86_EFLAGS_AC; |
31 | PCI_DEVICE_ID_SCALEMP_VSMP_CTL)) | 42 | else |
32 | return 0; | 43 | flags |= X86_EFLAGS_AC; |
44 | native_restore_fl(flags); | ||
45 | } | ||
46 | |||
47 | static void vsmp_irq_disable(void) | ||
48 | { | ||
49 | unsigned long flags = native_save_fl(); | ||
50 | |||
51 | native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
52 | } | ||
53 | |||
54 | static void vsmp_irq_enable(void) | ||
55 | { | ||
56 | unsigned long flags = native_save_fl(); | ||
57 | |||
58 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
59 | } | ||
60 | |||
61 | static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, | ||
62 | unsigned long addr, unsigned len) | ||
63 | { | ||
64 | switch (type) { | ||
65 | case PARAVIRT_PATCH(pv_irq_ops.irq_enable): | ||
66 | case PARAVIRT_PATCH(pv_irq_ops.irq_disable): | ||
67 | case PARAVIRT_PATCH(pv_irq_ops.save_fl): | ||
68 | case PARAVIRT_PATCH(pv_irq_ops.restore_fl): | ||
69 | return paravirt_patch_default(type, clobbers, ibuf, addr, len); | ||
70 | default: | ||
71 | return native_patch(type, clobbers, ibuf, addr, len); | ||
72 | } | ||
73 | |||
74 | } | ||
75 | |||
76 | static void __init set_vsmp_pv_ops(void) | ||
77 | { | ||
78 | void *address; | ||
79 | unsigned int cap, ctl, cfg; | ||
33 | 80 | ||
34 | /* set vSMP magic bits to indicate vSMP capable kernel */ | 81 | /* set vSMP magic bits to indicate vSMP capable kernel */ |
35 | address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8); | 82 | cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0); |
83 | address = early_ioremap(cfg, 8); | ||
36 | cap = readl(address); | 84 | cap = readl(address); |
37 | ctl = readl(address + 4); | 85 | ctl = readl(address + 4); |
38 | printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", | 86 | printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", |
39 | cap, ctl); | 87 | cap, ctl); |
40 | if (cap & ctl & (1 << 4)) { | 88 | if (cap & ctl & (1 << 4)) { |
41 | /* Turn on vSMP IRQ fastpath handling (see system.h) */ | 89 | /* Setup irq ops and turn on vSMP IRQ fastpath handling */ |
90 | pv_irq_ops.irq_disable = vsmp_irq_disable; | ||
91 | pv_irq_ops.irq_enable = vsmp_irq_enable; | ||
92 | pv_irq_ops.save_fl = vsmp_save_fl; | ||
93 | pv_irq_ops.restore_fl = vsmp_restore_fl; | ||
94 | pv_init_ops.patch = vsmp_patch; | ||
95 | |||
42 | ctl &= ~(1 << 4); | 96 | ctl &= ~(1 << 4); |
43 | writel(ctl, address + 4); | 97 | writel(ctl, address + 4); |
44 | ctl = readl(address + 4); | 98 | ctl = readl(address + 4); |
45 | printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl); | 99 | printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl); |
46 | } | 100 | } |
47 | 101 | ||
48 | iounmap(address); | 102 | early_iounmap(address, 8); |
103 | } | ||
104 | #else | ||
105 | static void __init set_vsmp_pv_ops(void) | ||
106 | { | ||
107 | } | ||
108 | #endif | ||
109 | |||
110 | #ifdef CONFIG_PCI | ||
111 | static int is_vsmp = -1; | ||
112 | |||
113 | static void __init detect_vsmp_box(void) | ||
114 | { | ||
115 | is_vsmp = 0; | ||
116 | |||
117 | if (!early_pci_allowed()) | ||
118 | return; | ||
119 | |||
120 | /* Check if we are running on a ScaleMP vSMPowered box */ | ||
121 | if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) == | ||
122 | (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16))) | ||
123 | is_vsmp = 1; | ||
124 | } | ||
125 | |||
126 | int is_vsmp_box(void) | ||
127 | { | ||
128 | if (is_vsmp != -1) | ||
129 | return is_vsmp; | ||
130 | else { | ||
131 | WARN_ON_ONCE(1); | ||
132 | return 0; | ||
133 | } | ||
134 | } | ||
135 | #else | ||
136 | static int __init detect_vsmp_box(void) | ||
137 | { | ||
138 | } | ||
139 | int is_vsmp_box(void) | ||
140 | { | ||
49 | return 0; | 141 | return 0; |
50 | } | 142 | } |
143 | #endif | ||
51 | 144 | ||
52 | core_initcall(vsmp_init); | 145 | void __init vsmp_init(void) |
146 | { | ||
147 | detect_vsmp_box(); | ||
148 | if (!is_vsmp_box()) | ||
149 | return; | ||
150 | |||
151 | set_vsmp_pv_ops(); | ||
152 | return; | ||
153 | } | ||
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index a66e9c1a0537..58882f9f2637 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/smp.h> | 5 | #include <linux/smp.h> |
6 | 6 | ||
7 | #include <asm/semaphore.h> | ||
8 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
9 | #include <asm/uaccess.h> | 8 | #include <asm/uaccess.h> |
10 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
@@ -12,11 +11,6 @@ | |||
12 | 11 | ||
13 | EXPORT_SYMBOL(kernel_thread); | 12 | EXPORT_SYMBOL(kernel_thread); |
14 | 13 | ||
15 | EXPORT_SYMBOL(__down_failed); | ||
16 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
17 | EXPORT_SYMBOL(__down_failed_trylock); | ||
18 | EXPORT_SYMBOL(__up_wakeup); | ||
19 | |||
20 | EXPORT_SYMBOL(__get_user_1); | 14 | EXPORT_SYMBOL(__get_user_1); |
21 | EXPORT_SYMBOL(__get_user_2); | 15 | EXPORT_SYMBOL(__get_user_2); |
22 | EXPORT_SYMBOL(__get_user_4); | 16 | EXPORT_SYMBOL(__get_user_4); |
@@ -35,15 +29,17 @@ EXPORT_SYMBOL(__copy_from_user_inatomic); | |||
35 | EXPORT_SYMBOL(copy_page); | 29 | EXPORT_SYMBOL(copy_page); |
36 | EXPORT_SYMBOL(clear_page); | 30 | EXPORT_SYMBOL(clear_page); |
37 | 31 | ||
38 | /* Export string functions. We normally rely on gcc builtin for most of these, | 32 | /* |
39 | but gcc sometimes decides not to inline them. */ | 33 | * Export string functions. We normally rely on gcc builtin for most of these, |
34 | * but gcc sometimes decides not to inline them. | ||
35 | */ | ||
40 | #undef memcpy | 36 | #undef memcpy |
41 | #undef memset | 37 | #undef memset |
42 | #undef memmove | 38 | #undef memmove |
43 | 39 | ||
44 | extern void * memset(void *,int,__kernel_size_t); | 40 | extern void *memset(void *, int, __kernel_size_t); |
45 | extern void * memcpy(void *,const void *,__kernel_size_t); | 41 | extern void *memcpy(void *, const void *, __kernel_size_t); |
46 | extern void * __memcpy(void *,const void *,__kernel_size_t); | 42 | extern void *__memcpy(void *, const void *, __kernel_size_t); |
47 | 43 | ||
48 | EXPORT_SYMBOL(memset); | 44 | EXPORT_SYMBOL(memset); |
49 | EXPORT_SYMBOL(memcpy); | 45 | EXPORT_SYMBOL(memcpy); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 3335b4595efd..af65b2da3ba0 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -661,7 +661,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, | |||
661 | if (delta < LG_CLOCK_MIN_DELTA) { | 661 | if (delta < LG_CLOCK_MIN_DELTA) { |
662 | if (printk_ratelimit()) | 662 | if (printk_ratelimit()) |
663 | printk(KERN_DEBUG "%s: small delta %lu ns\n", | 663 | printk(KERN_DEBUG "%s: small delta %lu ns\n", |
664 | __FUNCTION__, delta); | 664 | __func__, delta); |
665 | return -ETIME; | 665 | return -ETIME; |
666 | } | 666 | } |
667 | 667 | ||
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index 37756b6fb329..5415a9d06f53 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c | |||
@@ -25,7 +25,7 @@ void *memmove(void *dest, const void *src, size_t n) | |||
25 | int d0, d1, d2; | 25 | int d0, d1, d2; |
26 | 26 | ||
27 | if (dest < src) { | 27 | if (dest < src) { |
28 | memcpy(dest,src,n); | 28 | memcpy(dest, src, n); |
29 | } else { | 29 | } else { |
30 | __asm__ __volatile__( | 30 | __asm__ __volatile__( |
31 | "std\n\t" | 31 | "std\n\t" |
diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c index 80175e47b190..0a33909bf122 100644 --- a/arch/x86/lib/memmove_64.c +++ b/arch/x86/lib/memmove_64.c | |||
@@ -6,10 +6,10 @@ | |||
6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
7 | 7 | ||
8 | #undef memmove | 8 | #undef memmove |
9 | void *memmove(void * dest,const void *src,size_t count) | 9 | void *memmove(void *dest, const void *src, size_t count) |
10 | { | 10 | { |
11 | if (dest < src) { | 11 | if (dest < src) { |
12 | return memcpy(dest,src,count); | 12 | return memcpy(dest, src, count); |
13 | } else { | 13 | } else { |
14 | char *p = dest + count; | 14 | char *p = dest + count; |
15 | const char *s = src + count; | 15 | const char *s = src + count; |
@@ -17,5 +17,5 @@ void *memmove(void * dest,const void *src,size_t count) | |||
17 | *--p = *--s; | 17 | *--p = *--s; |
18 | } | 18 | } |
19 | return dest; | 19 | return dest; |
20 | } | 20 | } |
21 | EXPORT_SYMBOL(memmove); | 21 | EXPORT_SYMBOL(memmove); |
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index cc9b4a4450f3..c9f2d9ba8dd8 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c | |||
@@ -1,32 +1,30 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/string.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/hardirq.h> | ||
5 | #include <linux/module.h> | ||
6 | |||
7 | #include <asm/asm.h> | ||
8 | #include <asm/i387.h> | ||
9 | |||
10 | |||
11 | /* | 1 | /* |
12 | * MMX 3DNow! library helper functions | 2 | * MMX 3DNow! library helper functions |
13 | * | 3 | * |
14 | * To do: | 4 | * To do: |
15 | * We can use MMX just for prefetch in IRQ's. This may be a win. | 5 | * We can use MMX just for prefetch in IRQ's. This may be a win. |
16 | * (reported so on K6-III) | 6 | * (reported so on K6-III) |
17 | * We should use a better code neutral filler for the short jump | 7 | * We should use a better code neutral filler for the short jump |
18 | * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? | 8 | * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? |
19 | * We also want to clobber the filler register so we don't get any | 9 | * We also want to clobber the filler register so we don't get any |
20 | * register forwarding stalls on the filler. | 10 | * register forwarding stalls on the filler. |
21 | * | 11 | * |
22 | * Add *user handling. Checksums are not a win with MMX on any CPU | 12 | * Add *user handling. Checksums are not a win with MMX on any CPU |
23 | * tested so far for any MMX solution figured. | 13 | * tested so far for any MMX solution figured. |
24 | * | 14 | * |
25 | * 22/09/2000 - Arjan van de Ven | 15 | * 22/09/2000 - Arjan van de Ven |
26 | * Improved for non-egineering-sample Athlons | 16 | * Improved for non-egineering-sample Athlons |
27 | * | 17 | * |
28 | */ | 18 | */ |
29 | 19 | #include <linux/hardirq.h> | |
20 | #include <linux/string.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | #include <asm/i387.h> | ||
26 | #include <asm/asm.h> | ||
27 | |||
30 | void *_mmx_memcpy(void *to, const void *from, size_t len) | 28 | void *_mmx_memcpy(void *to, const void *from, size_t len) |
31 | { | 29 | { |
32 | void *p; | 30 | void *p; |
@@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) | |||
51 | "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ | 49 | "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ |
52 | " jmp 2b\n" | 50 | " jmp 2b\n" |
53 | ".previous\n" | 51 | ".previous\n" |
54 | _ASM_EXTABLE(1b,3b) | 52 | _ASM_EXTABLE(1b, 3b) |
55 | : : "r" (from) ); | 53 | : : "r" (from)); |
56 | 54 | ||
57 | 55 | for ( ; i > 5; i--) { | |
58 | for(; i>5; i--) | ||
59 | { | ||
60 | __asm__ __volatile__ ( | 56 | __asm__ __volatile__ ( |
61 | "1: prefetch 320(%0)\n" | 57 | "1: prefetch 320(%0)\n" |
62 | "2: movq (%0), %%mm0\n" | 58 | "2: movq (%0), %%mm0\n" |
@@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) | |||
79 | "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ | 75 | "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ |
80 | " jmp 2b\n" | 76 | " jmp 2b\n" |
81 | ".previous\n" | 77 | ".previous\n" |
82 | _ASM_EXTABLE(1b,3b) | 78 | _ASM_EXTABLE(1b, 3b) |
83 | : : "r" (from), "r" (to) : "memory"); | 79 | : : "r" (from), "r" (to) : "memory"); |
84 | from+=64; | 80 | |
85 | to+=64; | 81 | from += 64; |
82 | to += 64; | ||
86 | } | 83 | } |
87 | 84 | ||
88 | for(; i>0; i--) | 85 | for ( ; i > 0; i--) { |
89 | { | ||
90 | __asm__ __volatile__ ( | 86 | __asm__ __volatile__ ( |
91 | " movq (%0), %%mm0\n" | 87 | " movq (%0), %%mm0\n" |
92 | " movq 8(%0), %%mm1\n" | 88 | " movq 8(%0), %%mm1\n" |
@@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) | |||
104 | " movq %%mm1, 40(%1)\n" | 100 | " movq %%mm1, 40(%1)\n" |
105 | " movq %%mm2, 48(%1)\n" | 101 | " movq %%mm2, 48(%1)\n" |
106 | " movq %%mm3, 56(%1)\n" | 102 | " movq %%mm3, 56(%1)\n" |
107 | : : "r" (from), "r" (to) : "memory"); | 103 | : : "r" (from), "r" (to) : "memory"); |
108 | from+=64; | 104 | |
109 | to+=64; | 105 | from += 64; |
106 | to += 64; | ||
110 | } | 107 | } |
111 | /* | 108 | /* |
112 | * Now do the tail of the block | 109 | * Now do the tail of the block: |
113 | */ | 110 | */ |
114 | __memcpy(to, from, len&63); | 111 | __memcpy(to, from, len & 63); |
115 | kernel_fpu_end(); | 112 | kernel_fpu_end(); |
113 | |||
116 | return p; | 114 | return p; |
117 | } | 115 | } |
116 | EXPORT_SYMBOL(_mmx_memcpy); | ||
118 | 117 | ||
119 | #ifdef CONFIG_MK7 | 118 | #ifdef CONFIG_MK7 |
120 | 119 | ||
@@ -128,13 +127,12 @@ static void fast_clear_page(void *page) | |||
128 | int i; | 127 | int i; |
129 | 128 | ||
130 | kernel_fpu_begin(); | 129 | kernel_fpu_begin(); |
131 | 130 | ||
132 | __asm__ __volatile__ ( | 131 | __asm__ __volatile__ ( |
133 | " pxor %%mm0, %%mm0\n" : : | 132 | " pxor %%mm0, %%mm0\n" : : |
134 | ); | 133 | ); |
135 | 134 | ||
136 | for(i=0;i<4096/64;i++) | 135 | for (i = 0; i < 4096/64; i++) { |
137 | { | ||
138 | __asm__ __volatile__ ( | 136 | __asm__ __volatile__ ( |
139 | " movntq %%mm0, (%0)\n" | 137 | " movntq %%mm0, (%0)\n" |
140 | " movntq %%mm0, 8(%0)\n" | 138 | " movntq %%mm0, 8(%0)\n" |
@@ -145,14 +143,15 @@ static void fast_clear_page(void *page) | |||
145 | " movntq %%mm0, 48(%0)\n" | 143 | " movntq %%mm0, 48(%0)\n" |
146 | " movntq %%mm0, 56(%0)\n" | 144 | " movntq %%mm0, 56(%0)\n" |
147 | : : "r" (page) : "memory"); | 145 | : : "r" (page) : "memory"); |
148 | page+=64; | 146 | page += 64; |
149 | } | 147 | } |
150 | /* since movntq is weakly-ordered, a "sfence" is needed to become | 148 | |
151 | * ordered again. | 149 | /* |
150 | * Since movntq is weakly-ordered, a "sfence" is needed to become | ||
151 | * ordered again: | ||
152 | */ | 152 | */ |
153 | __asm__ __volatile__ ( | 153 | __asm__ __volatile__("sfence\n"::); |
154 | " sfence \n" : : | 154 | |
155 | ); | ||
156 | kernel_fpu_end(); | 155 | kernel_fpu_end(); |
157 | } | 156 | } |
158 | 157 | ||
@@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from) | |||
162 | 161 | ||
163 | kernel_fpu_begin(); | 162 | kernel_fpu_begin(); |
164 | 163 | ||
165 | /* maybe the prefetch stuff can go before the expensive fnsave... | 164 | /* |
165 | * maybe the prefetch stuff can go before the expensive fnsave... | ||
166 | * but that is for later. -AV | 166 | * but that is for later. -AV |
167 | */ | 167 | */ |
168 | __asm__ __volatile__ ( | 168 | __asm__ __volatile__( |
169 | "1: prefetch (%0)\n" | 169 | "1: prefetch (%0)\n" |
170 | " prefetch 64(%0)\n" | 170 | " prefetch 64(%0)\n" |
171 | " prefetch 128(%0)\n" | 171 | " prefetch 128(%0)\n" |
@@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from) | |||
176 | "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ | 176 | "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ |
177 | " jmp 2b\n" | 177 | " jmp 2b\n" |
178 | ".previous\n" | 178 | ".previous\n" |
179 | _ASM_EXTABLE(1b,3b) | 179 | _ASM_EXTABLE(1b, 3b) : : "r" (from)); |
180 | : : "r" (from) ); | ||
181 | 180 | ||
182 | for(i=0; i<(4096-320)/64; i++) | 181 | for (i = 0; i < (4096-320)/64; i++) { |
183 | { | ||
184 | __asm__ __volatile__ ( | 182 | __asm__ __volatile__ ( |
185 | "1: prefetch 320(%0)\n" | 183 | "1: prefetch 320(%0)\n" |
186 | "2: movq (%0), %%mm0\n" | 184 | "2: movq (%0), %%mm0\n" |
@@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from) | |||
203 | "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ | 201 | "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ |
204 | " jmp 2b\n" | 202 | " jmp 2b\n" |
205 | ".previous\n" | 203 | ".previous\n" |
206 | _ASM_EXTABLE(1b,3b) | 204 | _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); |
207 | : : "r" (from), "r" (to) : "memory"); | 205 | |
208 | from+=64; | 206 | from += 64; |
209 | to+=64; | 207 | to += 64; |
210 | } | 208 | } |
211 | for(i=(4096-320)/64; i<4096/64; i++) | 209 | |
212 | { | 210 | for (i = (4096-320)/64; i < 4096/64; i++) { |
213 | __asm__ __volatile__ ( | 211 | __asm__ __volatile__ ( |
214 | "2: movq (%0), %%mm0\n" | 212 | "2: movq (%0), %%mm0\n" |
215 | " movntq %%mm0, (%1)\n" | 213 | " movntq %%mm0, (%1)\n" |
@@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from) | |||
227 | " movntq %%mm6, 48(%1)\n" | 225 | " movntq %%mm6, 48(%1)\n" |
228 | " movq 56(%0), %%mm7\n" | 226 | " movq 56(%0), %%mm7\n" |
229 | " movntq %%mm7, 56(%1)\n" | 227 | " movntq %%mm7, 56(%1)\n" |
230 | : : "r" (from), "r" (to) : "memory"); | 228 | : : "r" (from), "r" (to) : "memory"); |
231 | from+=64; | 229 | from += 64; |
232 | to+=64; | 230 | to += 64; |
233 | } | 231 | } |
234 | /* since movntq is weakly-ordered, a "sfence" is needed to become | 232 | /* |
235 | * ordered again. | 233 | * Since movntq is weakly-ordered, a "sfence" is needed to become |
234 | * ordered again: | ||
236 | */ | 235 | */ |
237 | __asm__ __volatile__ ( | 236 | __asm__ __volatile__("sfence \n"::); |
238 | " sfence \n" : : | ||
239 | ); | ||
240 | kernel_fpu_end(); | 237 | kernel_fpu_end(); |
241 | } | 238 | } |
242 | 239 | ||
243 | #else | 240 | #else /* CONFIG_MK7 */ |
244 | 241 | ||
245 | /* | 242 | /* |
246 | * Generic MMX implementation without K7 specific streaming | 243 | * Generic MMX implementation without K7 specific streaming |
247 | */ | 244 | */ |
248 | |||
249 | static void fast_clear_page(void *page) | 245 | static void fast_clear_page(void *page) |
250 | { | 246 | { |
251 | int i; | 247 | int i; |
252 | 248 | ||
253 | kernel_fpu_begin(); | 249 | kernel_fpu_begin(); |
254 | 250 | ||
255 | __asm__ __volatile__ ( | 251 | __asm__ __volatile__ ( |
256 | " pxor %%mm0, %%mm0\n" : : | 252 | " pxor %%mm0, %%mm0\n" : : |
257 | ); | 253 | ); |
258 | 254 | ||
259 | for(i=0;i<4096/128;i++) | 255 | for (i = 0; i < 4096/128; i++) { |
260 | { | ||
261 | __asm__ __volatile__ ( | 256 | __asm__ __volatile__ ( |
262 | " movq %%mm0, (%0)\n" | 257 | " movq %%mm0, (%0)\n" |
263 | " movq %%mm0, 8(%0)\n" | 258 | " movq %%mm0, 8(%0)\n" |
@@ -275,8 +270,8 @@ static void fast_clear_page(void *page) | |||
275 | " movq %%mm0, 104(%0)\n" | 270 | " movq %%mm0, 104(%0)\n" |
276 | " movq %%mm0, 112(%0)\n" | 271 | " movq %%mm0, 112(%0)\n" |
277 | " movq %%mm0, 120(%0)\n" | 272 | " movq %%mm0, 120(%0)\n" |
278 | : : "r" (page) : "memory"); | 273 | : : "r" (page) : "memory"); |
279 | page+=128; | 274 | page += 128; |
280 | } | 275 | } |
281 | 276 | ||
282 | kernel_fpu_end(); | 277 | kernel_fpu_end(); |
@@ -285,8 +280,7 @@ static void fast_clear_page(void *page) | |||
285 | static void fast_copy_page(void *to, void *from) | 280 | static void fast_copy_page(void *to, void *from) |
286 | { | 281 | { |
287 | int i; | 282 | int i; |
288 | 283 | ||
289 | |||
290 | kernel_fpu_begin(); | 284 | kernel_fpu_begin(); |
291 | 285 | ||
292 | __asm__ __volatile__ ( | 286 | __asm__ __volatile__ ( |
@@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from) | |||
300 | "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ | 294 | "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ |
301 | " jmp 2b\n" | 295 | " jmp 2b\n" |
302 | ".previous\n" | 296 | ".previous\n" |
303 | _ASM_EXTABLE(1b,3b) | 297 | _ASM_EXTABLE(1b, 3b) : : "r" (from)); |
304 | : : "r" (from) ); | ||
305 | 298 | ||
306 | for(i=0; i<4096/64; i++) | 299 | for (i = 0; i < 4096/64; i++) { |
307 | { | ||
308 | __asm__ __volatile__ ( | 300 | __asm__ __volatile__ ( |
309 | "1: prefetch 320(%0)\n" | 301 | "1: prefetch 320(%0)\n" |
310 | "2: movq (%0), %%mm0\n" | 302 | "2: movq (%0), %%mm0\n" |
@@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from) | |||
327 | "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ | 319 | "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ |
328 | " jmp 2b\n" | 320 | " jmp 2b\n" |
329 | ".previous\n" | 321 | ".previous\n" |
330 | _ASM_EXTABLE(1b,3b) | 322 | _ASM_EXTABLE(1b, 3b) |
331 | : : "r" (from), "r" (to) : "memory"); | 323 | : : "r" (from), "r" (to) : "memory"); |
332 | from+=64; | 324 | |
333 | to+=64; | 325 | from += 64; |
326 | to += 64; | ||
334 | } | 327 | } |
335 | kernel_fpu_end(); | 328 | kernel_fpu_end(); |
336 | } | 329 | } |
337 | 330 | ||
338 | 331 | #endif /* !CONFIG_MK7 */ | |
339 | #endif | ||
340 | 332 | ||
341 | /* | 333 | /* |
342 | * Favour MMX for page clear and copy. | 334 | * Favour MMX for page clear and copy: |
343 | */ | 335 | */ |
344 | 336 | static void slow_zero_page(void *page) | |
345 | static void slow_zero_page(void * page) | ||
346 | { | 337 | { |
347 | int d0, d1; | 338 | int d0, d1; |
348 | __asm__ __volatile__( \ | 339 | |
349 | "cld\n\t" \ | 340 | __asm__ __volatile__( |
350 | "rep ; stosl" \ | 341 | "cld\n\t" |
351 | : "=&c" (d0), "=&D" (d1) | 342 | "rep ; stosl" |
352 | :"a" (0),"1" (page),"0" (1024) | 343 | |
353 | :"memory"); | 344 | : "=&c" (d0), "=&D" (d1) |
345 | :"a" (0), "1" (page), "0" (1024) | ||
346 | :"memory"); | ||
354 | } | 347 | } |
355 | 348 | ||
356 | void mmx_clear_page(void * page) | 349 | void mmx_clear_page(void *page) |
357 | { | 350 | { |
358 | if(unlikely(in_interrupt())) | 351 | if (unlikely(in_interrupt())) |
359 | slow_zero_page(page); | 352 | slow_zero_page(page); |
360 | else | 353 | else |
361 | fast_clear_page(page); | 354 | fast_clear_page(page); |
362 | } | 355 | } |
356 | EXPORT_SYMBOL(mmx_clear_page); | ||
363 | 357 | ||
364 | static void slow_copy_page(void *to, void *from) | 358 | static void slow_copy_page(void *to, void *from) |
365 | { | 359 | { |
366 | int d0, d1, d2; | 360 | int d0, d1, d2; |
367 | __asm__ __volatile__( \ | 361 | |
368 | "cld\n\t" \ | 362 | __asm__ __volatile__( |
369 | "rep ; movsl" \ | 363 | "cld\n\t" |
370 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ | 364 | "rep ; movsl" |
371 | : "0" (1024),"1" ((long) to),"2" ((long) from) \ | 365 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) |
366 | : "0" (1024), "1" ((long) to), "2" ((long) from) | ||
372 | : "memory"); | 367 | : "memory"); |
373 | } | 368 | } |
374 | |||
375 | 369 | ||
376 | void mmx_copy_page(void *to, void *from) | 370 | void mmx_copy_page(void *to, void *from) |
377 | { | 371 | { |
378 | if(unlikely(in_interrupt())) | 372 | if (unlikely(in_interrupt())) |
379 | slow_copy_page(to, from); | 373 | slow_copy_page(to, from); |
380 | else | 374 | else |
381 | fast_copy_page(to, from); | 375 | fast_copy_page(to, from); |
382 | } | 376 | } |
383 | |||
384 | EXPORT_SYMBOL(_mmx_memcpy); | ||
385 | EXPORT_SYMBOL(mmx_clear_page); | ||
386 | EXPORT_SYMBOL(mmx_copy_page); | 377 | EXPORT_SYMBOL(mmx_copy_page); |
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index 3899bd37fdf0..648fe4741782 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S | |||
@@ -30,89 +30,6 @@ | |||
30 | * value or just clobbered.. | 30 | * value or just clobbered.. |
31 | */ | 31 | */ |
32 | .section .sched.text, "ax" | 32 | .section .sched.text, "ax" |
33 | ENTRY(__down_failed) | ||
34 | CFI_STARTPROC | ||
35 | FRAME | ||
36 | pushl %edx | ||
37 | CFI_ADJUST_CFA_OFFSET 4 | ||
38 | CFI_REL_OFFSET edx,0 | ||
39 | pushl %ecx | ||
40 | CFI_ADJUST_CFA_OFFSET 4 | ||
41 | CFI_REL_OFFSET ecx,0 | ||
42 | call __down | ||
43 | popl %ecx | ||
44 | CFI_ADJUST_CFA_OFFSET -4 | ||
45 | CFI_RESTORE ecx | ||
46 | popl %edx | ||
47 | CFI_ADJUST_CFA_OFFSET -4 | ||
48 | CFI_RESTORE edx | ||
49 | ENDFRAME | ||
50 | ret | ||
51 | CFI_ENDPROC | ||
52 | ENDPROC(__down_failed) | ||
53 | |||
54 | ENTRY(__down_failed_interruptible) | ||
55 | CFI_STARTPROC | ||
56 | FRAME | ||
57 | pushl %edx | ||
58 | CFI_ADJUST_CFA_OFFSET 4 | ||
59 | CFI_REL_OFFSET edx,0 | ||
60 | pushl %ecx | ||
61 | CFI_ADJUST_CFA_OFFSET 4 | ||
62 | CFI_REL_OFFSET ecx,0 | ||
63 | call __down_interruptible | ||
64 | popl %ecx | ||
65 | CFI_ADJUST_CFA_OFFSET -4 | ||
66 | CFI_RESTORE ecx | ||
67 | popl %edx | ||
68 | CFI_ADJUST_CFA_OFFSET -4 | ||
69 | CFI_RESTORE edx | ||
70 | ENDFRAME | ||
71 | ret | ||
72 | CFI_ENDPROC | ||
73 | ENDPROC(__down_failed_interruptible) | ||
74 | |||
75 | ENTRY(__down_failed_trylock) | ||
76 | CFI_STARTPROC | ||
77 | FRAME | ||
78 | pushl %edx | ||
79 | CFI_ADJUST_CFA_OFFSET 4 | ||
80 | CFI_REL_OFFSET edx,0 | ||
81 | pushl %ecx | ||
82 | CFI_ADJUST_CFA_OFFSET 4 | ||
83 | CFI_REL_OFFSET ecx,0 | ||
84 | call __down_trylock | ||
85 | popl %ecx | ||
86 | CFI_ADJUST_CFA_OFFSET -4 | ||
87 | CFI_RESTORE ecx | ||
88 | popl %edx | ||
89 | CFI_ADJUST_CFA_OFFSET -4 | ||
90 | CFI_RESTORE edx | ||
91 | ENDFRAME | ||
92 | ret | ||
93 | CFI_ENDPROC | ||
94 | ENDPROC(__down_failed_trylock) | ||
95 | |||
96 | ENTRY(__up_wakeup) | ||
97 | CFI_STARTPROC | ||
98 | FRAME | ||
99 | pushl %edx | ||
100 | CFI_ADJUST_CFA_OFFSET 4 | ||
101 | CFI_REL_OFFSET edx,0 | ||
102 | pushl %ecx | ||
103 | CFI_ADJUST_CFA_OFFSET 4 | ||
104 | CFI_REL_OFFSET ecx,0 | ||
105 | call __up | ||
106 | popl %ecx | ||
107 | CFI_ADJUST_CFA_OFFSET -4 | ||
108 | CFI_RESTORE ecx | ||
109 | popl %edx | ||
110 | CFI_ADJUST_CFA_OFFSET -4 | ||
111 | CFI_RESTORE edx | ||
112 | ENDFRAME | ||
113 | ret | ||
114 | CFI_ENDPROC | ||
115 | ENDPROC(__up_wakeup) | ||
116 | 33 | ||
117 | /* | 34 | /* |
118 | * rw spinlock fallbacks | 35 | * rw spinlock fallbacks |
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index c2c0504a3071..94972e7c094d 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c | |||
@@ -14,25 +14,25 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | 15 | ||
16 | #ifdef __HAVE_ARCH_STRCPY | 16 | #ifdef __HAVE_ARCH_STRCPY |
17 | char *strcpy(char * dest,const char *src) | 17 | char *strcpy(char *dest, const char *src) |
18 | { | 18 | { |
19 | int d0, d1, d2; | 19 | int d0, d1, d2; |
20 | asm volatile( "1:\tlodsb\n\t" | 20 | asm volatile("1:\tlodsb\n\t" |
21 | "stosb\n\t" | 21 | "stosb\n\t" |
22 | "testb %%al,%%al\n\t" | 22 | "testb %%al,%%al\n\t" |
23 | "jne 1b" | 23 | "jne 1b" |
24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) | 24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) |
25 | :"0" (src),"1" (dest) : "memory"); | 25 | :"0" (src), "1" (dest) : "memory"); |
26 | return dest; | 26 | return dest; |
27 | } | 27 | } |
28 | EXPORT_SYMBOL(strcpy); | 28 | EXPORT_SYMBOL(strcpy); |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #ifdef __HAVE_ARCH_STRNCPY | 31 | #ifdef __HAVE_ARCH_STRNCPY |
32 | char *strncpy(char * dest,const char *src,size_t count) | 32 | char *strncpy(char *dest, const char *src, size_t count) |
33 | { | 33 | { |
34 | int d0, d1, d2, d3; | 34 | int d0, d1, d2, d3; |
35 | asm volatile( "1:\tdecl %2\n\t" | 35 | asm volatile("1:\tdecl %2\n\t" |
36 | "js 2f\n\t" | 36 | "js 2f\n\t" |
37 | "lodsb\n\t" | 37 | "lodsb\n\t" |
38 | "stosb\n\t" | 38 | "stosb\n\t" |
@@ -42,17 +42,17 @@ char *strncpy(char * dest,const char *src,size_t count) | |||
42 | "stosb\n" | 42 | "stosb\n" |
43 | "2:" | 43 | "2:" |
44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) | 44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) |
45 | :"0" (src),"1" (dest),"2" (count) : "memory"); | 45 | :"0" (src), "1" (dest), "2" (count) : "memory"); |
46 | return dest; | 46 | return dest; |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(strncpy); | 48 | EXPORT_SYMBOL(strncpy); |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #ifdef __HAVE_ARCH_STRCAT | 51 | #ifdef __HAVE_ARCH_STRCAT |
52 | char *strcat(char * dest,const char * src) | 52 | char *strcat(char *dest, const char *src) |
53 | { | 53 | { |
54 | int d0, d1, d2, d3; | 54 | int d0, d1, d2, d3; |
55 | asm volatile( "repne\n\t" | 55 | asm volatile("repne\n\t" |
56 | "scasb\n\t" | 56 | "scasb\n\t" |
57 | "decl %1\n" | 57 | "decl %1\n" |
58 | "1:\tlodsb\n\t" | 58 | "1:\tlodsb\n\t" |
@@ -67,10 +67,10 @@ EXPORT_SYMBOL(strcat); | |||
67 | #endif | 67 | #endif |
68 | 68 | ||
69 | #ifdef __HAVE_ARCH_STRNCAT | 69 | #ifdef __HAVE_ARCH_STRNCAT |
70 | char *strncat(char * dest,const char * src,size_t count) | 70 | char *strncat(char *dest, const char *src, size_t count) |
71 | { | 71 | { |
72 | int d0, d1, d2, d3; | 72 | int d0, d1, d2, d3; |
73 | asm volatile( "repne\n\t" | 73 | asm volatile("repne\n\t" |
74 | "scasb\n\t" | 74 | "scasb\n\t" |
75 | "decl %1\n\t" | 75 | "decl %1\n\t" |
76 | "movl %8,%3\n" | 76 | "movl %8,%3\n" |
@@ -83,7 +83,7 @@ char *strncat(char * dest,const char * src,size_t count) | |||
83 | "2:\txorl %2,%2\n\t" | 83 | "2:\txorl %2,%2\n\t" |
84 | "stosb" | 84 | "stosb" |
85 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) | 85 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) |
86 | : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count) | 86 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu), "g" (count) |
87 | : "memory"); | 87 | : "memory"); |
88 | return dest; | 88 | return dest; |
89 | } | 89 | } |
@@ -91,11 +91,11 @@ EXPORT_SYMBOL(strncat); | |||
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #ifdef __HAVE_ARCH_STRCMP | 93 | #ifdef __HAVE_ARCH_STRCMP |
94 | int strcmp(const char * cs,const char * ct) | 94 | int strcmp(const char *cs, const char *ct) |
95 | { | 95 | { |
96 | int d0, d1; | 96 | int d0, d1; |
97 | int res; | 97 | int res; |
98 | asm volatile( "1:\tlodsb\n\t" | 98 | asm volatile("1:\tlodsb\n\t" |
99 | "scasb\n\t" | 99 | "scasb\n\t" |
100 | "jne 2f\n\t" | 100 | "jne 2f\n\t" |
101 | "testb %%al,%%al\n\t" | 101 | "testb %%al,%%al\n\t" |
@@ -106,7 +106,7 @@ int strcmp(const char * cs,const char * ct) | |||
106 | "orb $1,%%al\n" | 106 | "orb $1,%%al\n" |
107 | "3:" | 107 | "3:" |
108 | :"=a" (res), "=&S" (d0), "=&D" (d1) | 108 | :"=a" (res), "=&S" (d0), "=&D" (d1) |
109 | :"1" (cs),"2" (ct) | 109 | :"1" (cs), "2" (ct) |
110 | :"memory"); | 110 | :"memory"); |
111 | return res; | 111 | return res; |
112 | } | 112 | } |
@@ -114,11 +114,11 @@ EXPORT_SYMBOL(strcmp); | |||
114 | #endif | 114 | #endif |
115 | 115 | ||
116 | #ifdef __HAVE_ARCH_STRNCMP | 116 | #ifdef __HAVE_ARCH_STRNCMP |
117 | int strncmp(const char * cs,const char * ct,size_t count) | 117 | int strncmp(const char *cs, const char *ct, size_t count) |
118 | { | 118 | { |
119 | int res; | 119 | int res; |
120 | int d0, d1, d2; | 120 | int d0, d1, d2; |
121 | asm volatile( "1:\tdecl %3\n\t" | 121 | asm volatile("1:\tdecl %3\n\t" |
122 | "js 2f\n\t" | 122 | "js 2f\n\t" |
123 | "lodsb\n\t" | 123 | "lodsb\n\t" |
124 | "scasb\n\t" | 124 | "scasb\n\t" |
@@ -131,7 +131,7 @@ int strncmp(const char * cs,const char * ct,size_t count) | |||
131 | "orb $1,%%al\n" | 131 | "orb $1,%%al\n" |
132 | "4:" | 132 | "4:" |
133 | :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) | 133 | :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) |
134 | :"1" (cs),"2" (ct),"3" (count) | 134 | :"1" (cs), "2" (ct), "3" (count) |
135 | :"memory"); | 135 | :"memory"); |
136 | return res; | 136 | return res; |
137 | } | 137 | } |
@@ -139,11 +139,11 @@ EXPORT_SYMBOL(strncmp); | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef __HAVE_ARCH_STRCHR | 141 | #ifdef __HAVE_ARCH_STRCHR |
142 | char *strchr(const char * s, int c) | 142 | char *strchr(const char *s, int c) |
143 | { | 143 | { |
144 | int d0; | 144 | int d0; |
145 | char * res; | 145 | char *res; |
146 | asm volatile( "movb %%al,%%ah\n" | 146 | asm volatile("movb %%al,%%ah\n" |
147 | "1:\tlodsb\n\t" | 147 | "1:\tlodsb\n\t" |
148 | "cmpb %%ah,%%al\n\t" | 148 | "cmpb %%ah,%%al\n\t" |
149 | "je 2f\n\t" | 149 | "je 2f\n\t" |
@@ -153,7 +153,7 @@ char *strchr(const char * s, int c) | |||
153 | "2:\tmovl %1,%0\n\t" | 153 | "2:\tmovl %1,%0\n\t" |
154 | "decl %0" | 154 | "decl %0" |
155 | :"=a" (res), "=&S" (d0) | 155 | :"=a" (res), "=&S" (d0) |
156 | :"1" (s),"0" (c) | 156 | :"1" (s), "0" (c) |
157 | :"memory"); | 157 | :"memory"); |
158 | return res; | 158 | return res; |
159 | } | 159 | } |
@@ -161,16 +161,16 @@ EXPORT_SYMBOL(strchr); | |||
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #ifdef __HAVE_ARCH_STRLEN | 163 | #ifdef __HAVE_ARCH_STRLEN |
164 | size_t strlen(const char * s) | 164 | size_t strlen(const char *s) |
165 | { | 165 | { |
166 | int d0; | 166 | int d0; |
167 | int res; | 167 | int res; |
168 | asm volatile( "repne\n\t" | 168 | asm volatile("repne\n\t" |
169 | "scasb\n\t" | 169 | "scasb\n\t" |
170 | "notl %0\n\t" | 170 | "notl %0\n\t" |
171 | "decl %0" | 171 | "decl %0" |
172 | :"=c" (res), "=&D" (d0) | 172 | :"=c" (res), "=&D" (d0) |
173 | :"1" (s),"a" (0), "0" (0xffffffffu) | 173 | :"1" (s), "a" (0), "0" (0xffffffffu) |
174 | :"memory"); | 174 | :"memory"); |
175 | return res; | 175 | return res; |
176 | } | 176 | } |
@@ -178,19 +178,19 @@ EXPORT_SYMBOL(strlen); | |||
178 | #endif | 178 | #endif |
179 | 179 | ||
180 | #ifdef __HAVE_ARCH_MEMCHR | 180 | #ifdef __HAVE_ARCH_MEMCHR |
181 | void *memchr(const void *cs,int c,size_t count) | 181 | void *memchr(const void *cs, int c, size_t count) |
182 | { | 182 | { |
183 | int d0; | 183 | int d0; |
184 | void *res; | 184 | void *res; |
185 | if (!count) | 185 | if (!count) |
186 | return NULL; | 186 | return NULL; |
187 | asm volatile( "repne\n\t" | 187 | asm volatile("repne\n\t" |
188 | "scasb\n\t" | 188 | "scasb\n\t" |
189 | "je 1f\n\t" | 189 | "je 1f\n\t" |
190 | "movl $1,%0\n" | 190 | "movl $1,%0\n" |
191 | "1:\tdecl %0" | 191 | "1:\tdecl %0" |
192 | :"=D" (res), "=&c" (d0) | 192 | :"=D" (res), "=&c" (d0) |
193 | :"a" (c),"0" (cs),"1" (count) | 193 | :"a" (c), "0" (cs), "1" (count) |
194 | :"memory"); | 194 | :"memory"); |
195 | return res; | 195 | return res; |
196 | } | 196 | } |
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(memchr); | |||
198 | #endif | 198 | #endif |
199 | 199 | ||
200 | #ifdef __HAVE_ARCH_MEMSCAN | 200 | #ifdef __HAVE_ARCH_MEMSCAN |
201 | void *memscan(void * addr, int c, size_t size) | 201 | void *memscan(void *addr, int c, size_t size) |
202 | { | 202 | { |
203 | if (!size) | 203 | if (!size) |
204 | return addr; | 204 | return addr; |
@@ -219,7 +219,7 @@ size_t strnlen(const char *s, size_t count) | |||
219 | { | 219 | { |
220 | int d0; | 220 | int d0; |
221 | int res; | 221 | int res; |
222 | asm volatile( "movl %2,%0\n\t" | 222 | asm volatile("movl %2,%0\n\t" |
223 | "jmp 2f\n" | 223 | "jmp 2f\n" |
224 | "1:\tcmpb $0,(%0)\n\t" | 224 | "1:\tcmpb $0,(%0)\n\t" |
225 | "je 3f\n\t" | 225 | "je 3f\n\t" |
@@ -229,7 +229,7 @@ size_t strnlen(const char *s, size_t count) | |||
229 | "jne 1b\n" | 229 | "jne 1b\n" |
230 | "3:\tsubl %2,%0" | 230 | "3:\tsubl %2,%0" |
231 | :"=a" (res), "=&d" (d0) | 231 | :"=a" (res), "=&d" (d0) |
232 | :"c" (s),"1" (count) | 232 | :"c" (s), "1" (count) |
233 | :"memory"); | 233 | :"memory"); |
234 | return res; | 234 | return res; |
235 | } | 235 | } |
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index a3dafbf59dae..42e8a50303f3 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c | |||
@@ -1,9 +1,9 @@ | |||
1 | #include <linux/string.h> | 1 | #include <linux/string.h> |
2 | 2 | ||
3 | char * strstr(const char * cs,const char * ct) | 3 | char *strstr(const char *cs, const char *ct) |
4 | { | 4 | { |
5 | int d0, d1; | 5 | int d0, d1; |
6 | register char * __res; | 6 | register char *__res; |
7 | __asm__ __volatile__( | 7 | __asm__ __volatile__( |
8 | "movl %6,%%edi\n\t" | 8 | "movl %6,%%edi\n\t" |
9 | "repne\n\t" | 9 | "repne\n\t" |
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index 8b92d428ab02..e009251d4e9f 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S | |||
@@ -41,11 +41,6 @@ | |||
41 | thunk rwsem_downgrade_thunk,rwsem_downgrade_wake | 41 | thunk rwsem_downgrade_thunk,rwsem_downgrade_wake |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | thunk __down_failed,__down | ||
45 | thunk_retrax __down_failed_interruptible,__down_interruptible | ||
46 | thunk_retrax __down_failed_trylock,__down_trylock | ||
47 | thunk __up_wakeup,__up | ||
48 | |||
49 | #ifdef CONFIG_TRACE_IRQFLAGS | 44 | #ifdef CONFIG_TRACE_IRQFLAGS |
50 | thunk trace_hardirqs_on_thunk,trace_hardirqs_on | 45 | thunk trace_hardirqs_on_thunk,trace_hardirqs_on |
51 | thunk trace_hardirqs_off_thunk,trace_hardirqs_off | 46 | thunk trace_hardirqs_off_thunk,trace_hardirqs_off |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e849b9998b0e..24e60944971a 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * User address space access functions. | 2 | * User address space access functions. |
3 | * The non inlined parts of asm-i386/uaccess.h are here. | 3 | * The non inlined parts of asm-i386/uaccess.h are here. |
4 | * | 4 | * |
@@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon | |||
22 | #endif | 22 | #endif |
23 | return 1; | 23 | return 1; |
24 | } | 24 | } |
25 | #define movsl_is_ok(a1,a2,n) \ | 25 | #define movsl_is_ok(a1, a2, n) \ |
26 | __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n)) | 26 | __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Copy a null terminated string from userspace. | 29 | * Copy a null terminated string from userspace. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #define __do_strncpy_from_user(dst,src,count,res) \ | 32 | #define __do_strncpy_from_user(dst, src, count, res) \ |
33 | do { \ | 33 | do { \ |
34 | int __d0, __d1, __d2; \ | 34 | int __d0, __d1, __d2; \ |
35 | might_sleep(); \ | 35 | might_sleep(); \ |
@@ -61,7 +61,7 @@ do { \ | |||
61 | * least @count bytes long. | 61 | * least @count bytes long. |
62 | * @src: Source address, in user space. | 62 | * @src: Source address, in user space. |
63 | * @count: Maximum number of bytes to copy, including the trailing NUL. | 63 | * @count: Maximum number of bytes to copy, including the trailing NUL. |
64 | * | 64 | * |
65 | * Copies a NUL-terminated string from userspace to kernel space. | 65 | * Copies a NUL-terminated string from userspace to kernel space. |
66 | * Caller must check the specified block with access_ok() before calling | 66 | * Caller must check the specified block with access_ok() before calling |
67 | * this function. | 67 | * this function. |
@@ -90,7 +90,7 @@ EXPORT_SYMBOL(__strncpy_from_user); | |||
90 | * least @count bytes long. | 90 | * least @count bytes long. |
91 | * @src: Source address, in user space. | 91 | * @src: Source address, in user space. |
92 | * @count: Maximum number of bytes to copy, including the trailing NUL. | 92 | * @count: Maximum number of bytes to copy, including the trailing NUL. |
93 | * | 93 | * |
94 | * Copies a NUL-terminated string from userspace to kernel space. | 94 | * Copies a NUL-terminated string from userspace to kernel space. |
95 | * | 95 | * |
96 | * On success, returns the length of the string (not including the trailing | 96 | * On success, returns the length of the string (not including the trailing |
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(strncpy_from_user); | |||
120 | do { \ | 120 | do { \ |
121 | int __d0; \ | 121 | int __d0; \ |
122 | might_sleep(); \ | 122 | might_sleep(); \ |
123 | __asm__ __volatile__( \ | 123 | __asm__ __volatile__( \ |
124 | "0: rep; stosl\n" \ | 124 | "0: rep; stosl\n" \ |
125 | " movl %2,%0\n" \ | 125 | " movl %2,%0\n" \ |
126 | "1: rep; stosb\n" \ | 126 | "1: rep; stosb\n" \ |
@@ -333,17 +333,17 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
333 | __asm__ __volatile__( | 333 | __asm__ __volatile__( |
334 | " .align 2,0x90\n" | 334 | " .align 2,0x90\n" |
335 | "0: movl 32(%4), %%eax\n" | 335 | "0: movl 32(%4), %%eax\n" |
336 | " cmpl $67, %0\n" | 336 | " cmpl $67, %0\n" |
337 | " jbe 2f\n" | 337 | " jbe 2f\n" |
338 | "1: movl 64(%4), %%eax\n" | 338 | "1: movl 64(%4), %%eax\n" |
339 | " .align 2,0x90\n" | 339 | " .align 2,0x90\n" |
340 | "2: movl 0(%4), %%eax\n" | 340 | "2: movl 0(%4), %%eax\n" |
341 | "21: movl 4(%4), %%edx\n" | 341 | "21: movl 4(%4), %%edx\n" |
342 | " movl %%eax, 0(%3)\n" | 342 | " movl %%eax, 0(%3)\n" |
343 | " movl %%edx, 4(%3)\n" | 343 | " movl %%edx, 4(%3)\n" |
344 | "3: movl 8(%4), %%eax\n" | 344 | "3: movl 8(%4), %%eax\n" |
345 | "31: movl 12(%4),%%edx\n" | 345 | "31: movl 12(%4),%%edx\n" |
346 | " movl %%eax, 8(%3)\n" | 346 | " movl %%eax, 8(%3)\n" |
347 | " movl %%edx, 12(%3)\n" | 347 | " movl %%edx, 12(%3)\n" |
348 | "4: movl 16(%4), %%eax\n" | 348 | "4: movl 16(%4), %%eax\n" |
349 | "41: movl 20(%4), %%edx\n" | 349 | "41: movl 20(%4), %%edx\n" |
@@ -369,38 +369,38 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
369 | "91: movl 60(%4), %%edx\n" | 369 | "91: movl 60(%4), %%edx\n" |
370 | " movl %%eax, 56(%3)\n" | 370 | " movl %%eax, 56(%3)\n" |
371 | " movl %%edx, 60(%3)\n" | 371 | " movl %%edx, 60(%3)\n" |
372 | " addl $-64, %0\n" | 372 | " addl $-64, %0\n" |
373 | " addl $64, %4\n" | 373 | " addl $64, %4\n" |
374 | " addl $64, %3\n" | 374 | " addl $64, %3\n" |
375 | " cmpl $63, %0\n" | 375 | " cmpl $63, %0\n" |
376 | " ja 0b\n" | 376 | " ja 0b\n" |
377 | "5: movl %0, %%eax\n" | 377 | "5: movl %0, %%eax\n" |
378 | " shrl $2, %0\n" | 378 | " shrl $2, %0\n" |
379 | " andl $3, %%eax\n" | 379 | " andl $3, %%eax\n" |
380 | " cld\n" | 380 | " cld\n" |
381 | "6: rep; movsl\n" | 381 | "6: rep; movsl\n" |
382 | " movl %%eax,%0\n" | 382 | " movl %%eax,%0\n" |
383 | "7: rep; movsb\n" | 383 | "7: rep; movsb\n" |
384 | "8:\n" | 384 | "8:\n" |
385 | ".section .fixup,\"ax\"\n" | 385 | ".section .fixup,\"ax\"\n" |
386 | "9: lea 0(%%eax,%0,4),%0\n" | 386 | "9: lea 0(%%eax,%0,4),%0\n" |
387 | "16: pushl %0\n" | 387 | "16: pushl %0\n" |
388 | " pushl %%eax\n" | 388 | " pushl %%eax\n" |
389 | " xorl %%eax,%%eax\n" | 389 | " xorl %%eax,%%eax\n" |
390 | " rep; stosb\n" | 390 | " rep; stosb\n" |
391 | " popl %%eax\n" | 391 | " popl %%eax\n" |
392 | " popl %0\n" | 392 | " popl %0\n" |
393 | " jmp 8b\n" | 393 | " jmp 8b\n" |
394 | ".previous\n" | 394 | ".previous\n" |
395 | ".section __ex_table,\"a\"\n" | 395 | ".section __ex_table,\"a\"\n" |
396 | " .align 4\n" | 396 | " .align 4\n" |
397 | " .long 0b,16b\n" | 397 | " .long 0b,16b\n" |
398 | " .long 1b,16b\n" | 398 | " .long 1b,16b\n" |
399 | " .long 2b,16b\n" | 399 | " .long 2b,16b\n" |
400 | " .long 21b,16b\n" | 400 | " .long 21b,16b\n" |
401 | " .long 3b,16b\n" | 401 | " .long 3b,16b\n" |
402 | " .long 31b,16b\n" | 402 | " .long 31b,16b\n" |
403 | " .long 4b,16b\n" | 403 | " .long 4b,16b\n" |
404 | " .long 41b,16b\n" | 404 | " .long 41b,16b\n" |
405 | " .long 10b,16b\n" | 405 | " .long 10b,16b\n" |
406 | " .long 51b,16b\n" | 406 | " .long 51b,16b\n" |
@@ -412,9 +412,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
412 | " .long 81b,16b\n" | 412 | " .long 81b,16b\n" |
413 | " .long 14b,16b\n" | 413 | " .long 14b,16b\n" |
414 | " .long 91b,16b\n" | 414 | " .long 91b,16b\n" |
415 | " .long 6b,9b\n" | 415 | " .long 6b,9b\n" |
416 | " .long 7b,16b\n" | 416 | " .long 7b,16b\n" |
417 | ".previous" | 417 | ".previous" |
418 | : "=&c"(size), "=&D" (d0), "=&S" (d1) | 418 | : "=&c"(size), "=&D" (d0), "=&S" (d1) |
419 | : "1"(to), "2"(from), "0"(size) | 419 | : "1"(to), "2"(from), "0"(size) |
420 | : "eax", "edx", "memory"); | 420 | : "eax", "edx", "memory"); |
@@ -429,7 +429,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) | |||
429 | static unsigned long __copy_user_zeroing_intel_nocache(void *to, | 429 | static unsigned long __copy_user_zeroing_intel_nocache(void *to, |
430 | const void __user *from, unsigned long size) | 430 | const void __user *from, unsigned long size) |
431 | { | 431 | { |
432 | int d0, d1; | 432 | int d0, d1; |
433 | 433 | ||
434 | __asm__ __volatile__( | 434 | __asm__ __volatile__( |
435 | " .align 2,0x90\n" | 435 | " .align 2,0x90\n" |
@@ -526,7 +526,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, | |||
526 | static unsigned long __copy_user_intel_nocache(void *to, | 526 | static unsigned long __copy_user_intel_nocache(void *to, |
527 | const void __user *from, unsigned long size) | 527 | const void __user *from, unsigned long size) |
528 | { | 528 | { |
529 | int d0, d1; | 529 | int d0, d1; |
530 | 530 | ||
531 | __asm__ __volatile__( | 531 | __asm__ __volatile__( |
532 | " .align 2,0x90\n" | 532 | " .align 2,0x90\n" |
@@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to, | |||
629 | #endif /* CONFIG_X86_INTEL_USERCOPY */ | 629 | #endif /* CONFIG_X86_INTEL_USERCOPY */ |
630 | 630 | ||
631 | /* Generic arbitrary sized copy. */ | 631 | /* Generic arbitrary sized copy. */ |
632 | #define __copy_user(to,from,size) \ | 632 | #define __copy_user(to, from, size) \ |
633 | do { \ | 633 | do { \ |
634 | int __d0, __d1, __d2; \ | 634 | int __d0, __d1, __d2; \ |
635 | __asm__ __volatile__( \ | 635 | __asm__ __volatile__( \ |
@@ -665,7 +665,7 @@ do { \ | |||
665 | : "memory"); \ | 665 | : "memory"); \ |
666 | } while (0) | 666 | } while (0) |
667 | 667 | ||
668 | #define __copy_user_zeroing(to,from,size) \ | 668 | #define __copy_user_zeroing(to, from, size) \ |
669 | do { \ | 669 | do { \ |
670 | int __d0, __d1, __d2; \ | 670 | int __d0, __d1, __d2; \ |
671 | __asm__ __volatile__( \ | 671 | __asm__ __volatile__( \ |
@@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, | |||
712 | { | 712 | { |
713 | #ifndef CONFIG_X86_WP_WORKS_OK | 713 | #ifndef CONFIG_X86_WP_WORKS_OK |
714 | if (unlikely(boot_cpu_data.wp_works_ok == 0) && | 714 | if (unlikely(boot_cpu_data.wp_works_ok == 0) && |
715 | ((unsigned long )to) < TASK_SIZE) { | 715 | ((unsigned long)to) < TASK_SIZE) { |
716 | /* | 716 | /* |
717 | * When we are in an atomic section (see | 717 | * When we are in an atomic section (see |
718 | * mm/filemap.c:file_read_actor), return the full | 718 | * mm/filemap.c:file_read_actor), return the full |
@@ -721,26 +721,26 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, | |||
721 | if (in_atomic()) | 721 | if (in_atomic()) |
722 | return n; | 722 | return n; |
723 | 723 | ||
724 | /* | 724 | /* |
725 | * CPU does not honor the WP bit when writing | 725 | * CPU does not honor the WP bit when writing |
726 | * from supervisory mode, and due to preemption or SMP, | 726 | * from supervisory mode, and due to preemption or SMP, |
727 | * the page tables can change at any time. | 727 | * the page tables can change at any time. |
728 | * Do it manually. Manfred <manfred@colorfullife.com> | 728 | * Do it manually. Manfred <manfred@colorfullife.com> |
729 | */ | 729 | */ |
730 | while (n) { | 730 | while (n) { |
731 | unsigned long offset = ((unsigned long)to)%PAGE_SIZE; | 731 | unsigned long offset = ((unsigned long)to)%PAGE_SIZE; |
732 | unsigned long len = PAGE_SIZE - offset; | 732 | unsigned long len = PAGE_SIZE - offset; |
733 | int retval; | 733 | int retval; |
734 | struct page *pg; | 734 | struct page *pg; |
735 | void *maddr; | 735 | void *maddr; |
736 | 736 | ||
737 | if (len > n) | 737 | if (len > n) |
738 | len = n; | 738 | len = n; |
739 | 739 | ||
740 | survive: | 740 | survive: |
741 | down_read(¤t->mm->mmap_sem); | 741 | down_read(¤t->mm->mmap_sem); |
742 | retval = get_user_pages(current, current->mm, | 742 | retval = get_user_pages(current, current->mm, |
743 | (unsigned long )to, 1, 1, 0, &pg, NULL); | 743 | (unsigned long)to, 1, 1, 0, &pg, NULL); |
744 | 744 | ||
745 | if (retval == -ENOMEM && is_global_init(current)) { | 745 | if (retval == -ENOMEM && is_global_init(current)) { |
746 | up_read(¤t->mm->mmap_sem); | 746 | up_read(¤t->mm->mmap_sem); |
@@ -750,8 +750,8 @@ survive: | |||
750 | 750 | ||
751 | if (retval != 1) { | 751 | if (retval != 1) { |
752 | up_read(¤t->mm->mmap_sem); | 752 | up_read(¤t->mm->mmap_sem); |
753 | break; | 753 | break; |
754 | } | 754 | } |
755 | 755 | ||
756 | maddr = kmap_atomic(pg, KM_USER0); | 756 | maddr = kmap_atomic(pg, KM_USER0); |
757 | memcpy(maddr + offset, from, len); | 757 | memcpy(maddr + offset, from, len); |
@@ -802,12 +802,12 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | |||
802 | unsigned long n) | 802 | unsigned long n) |
803 | { | 803 | { |
804 | #ifdef CONFIG_X86_INTEL_USERCOPY | 804 | #ifdef CONFIG_X86_INTEL_USERCOPY |
805 | if ( n > 64 && cpu_has_xmm2) | 805 | if (n > 64 && cpu_has_xmm2) |
806 | n = __copy_user_zeroing_intel_nocache(to, from, n); | 806 | n = __copy_user_zeroing_intel_nocache(to, from, n); |
807 | else | 807 | else |
808 | __copy_user_zeroing(to, from, n); | 808 | __copy_user_zeroing(to, from, n); |
809 | #else | 809 | #else |
810 | __copy_user_zeroing(to, from, n); | 810 | __copy_user_zeroing(to, from, n); |
811 | #endif | 811 | #endif |
812 | return n; | 812 | return n; |
813 | } | 813 | } |
@@ -817,12 +817,12 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr | |||
817 | unsigned long n) | 817 | unsigned long n) |
818 | { | 818 | { |
819 | #ifdef CONFIG_X86_INTEL_USERCOPY | 819 | #ifdef CONFIG_X86_INTEL_USERCOPY |
820 | if ( n > 64 && cpu_has_xmm2) | 820 | if (n > 64 && cpu_has_xmm2) |
821 | n = __copy_user_intel_nocache(to, from, n); | 821 | n = __copy_user_intel_nocache(to, from, n); |
822 | else | 822 | else |
823 | __copy_user(to, from, n); | 823 | __copy_user(to, from, n); |
824 | #else | 824 | #else |
825 | __copy_user(to, from, n); | 825 | __copy_user(to, from, n); |
826 | #endif | 826 | #endif |
827 | return n; | 827 | return n; |
828 | } | 828 | } |
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 292a225edabe..95fc463056d0 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. | 2 | * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. |
3 | * Drives the local APIC in "clustered mode". | 3 | * Drives the local APIC in "clustered mode". |
4 | */ | 4 | */ |
@@ -32,26 +32,26 @@ static int hp_ht_bigsmp(const struct dmi_system_id *d) | |||
32 | 32 | ||
33 | 33 | ||
34 | static const struct dmi_system_id bigsmp_dmi_table[] = { | 34 | static const struct dmi_system_id bigsmp_dmi_table[] = { |
35 | { hp_ht_bigsmp, "HP ProLiant DL760 G2", { | 35 | { hp_ht_bigsmp, "HP ProLiant DL760 G2", |
36 | DMI_MATCH(DMI_BIOS_VENDOR, "HP"), | 36 | { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), |
37 | DMI_MATCH(DMI_BIOS_VERSION, "P44-"), | 37 | DMI_MATCH(DMI_BIOS_VERSION, "P44-"),} |
38 | }}, | 38 | }, |
39 | 39 | ||
40 | { hp_ht_bigsmp, "HP ProLiant DL740", { | 40 | { hp_ht_bigsmp, "HP ProLiant DL740", |
41 | DMI_MATCH(DMI_BIOS_VENDOR, "HP"), | 41 | { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), |
42 | DMI_MATCH(DMI_BIOS_VERSION, "P47-"), | 42 | DMI_MATCH(DMI_BIOS_VERSION, "P47-"),} |
43 | }}, | 43 | }, |
44 | { } | 44 | { } |
45 | }; | 45 | }; |
46 | 46 | ||
47 | 47 | ||
48 | static int probe_bigsmp(void) | 48 | static int probe_bigsmp(void) |
49 | { | 49 | { |
50 | if (def_to_bigsmp) | 50 | if (def_to_bigsmp) |
51 | dmi_bigsmp = 1; | 51 | dmi_bigsmp = 1; |
52 | else | 52 | else |
53 | dmi_check_system(bigsmp_dmi_table); | 53 | dmi_check_system(bigsmp_dmi_table); |
54 | return dmi_bigsmp; | 54 | return dmi_bigsmp; |
55 | } | 55 | } |
56 | 56 | ||
57 | struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); | 57 | struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); |
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c index 1af0cc7648f0..9e835a11a13a 100644 --- a/arch/x86/mach-generic/default.c +++ b/arch/x86/mach-generic/default.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Default generic APIC driver. This handles up to 8 CPUs. | 2 | * Default generic APIC driver. This handles up to 8 CPUs. |
3 | */ | 3 | */ |
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
@@ -19,8 +19,8 @@ | |||
19 | 19 | ||
20 | /* should be called last. */ | 20 | /* should be called last. */ |
21 | static int probe_default(void) | 21 | static int probe_default(void) |
22 | { | 22 | { |
23 | return 1; | 23 | return 1; |
24 | } | 24 | } |
25 | 25 | ||
26 | struct genapic apic_default = APIC_INIT("default", probe_default); | 26 | struct genapic apic_default = APIC_INIT("default", probe_default); |
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index f410d3cb5659..c5ae751b994a 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /* Copyright 2003 Andi Kleen, SuSE Labs. | 1 | /* |
2 | * Subject to the GNU Public License, v.2 | 2 | * Copyright 2003 Andi Kleen, SuSE Labs. |
3 | * | 3 | * Subject to the GNU Public License, v.2 |
4 | * | ||
4 | * Generic x86 APIC driver probe layer. | 5 | * Generic x86 APIC driver probe layer. |
5 | */ | 6 | */ |
6 | #include <linux/threads.h> | 7 | #include <linux/threads.h> |
7 | #include <linux/cpumask.h> | 8 | #include <linux/cpumask.h> |
8 | #include <linux/string.h> | 9 | #include <linux/string.h> |
@@ -24,7 +25,7 @@ struct genapic *genapic = &apic_default; | |||
24 | 25 | ||
25 | static struct genapic *apic_probe[] __initdata = { | 26 | static struct genapic *apic_probe[] __initdata = { |
26 | &apic_summit, | 27 | &apic_summit, |
27 | &apic_bigsmp, | 28 | &apic_bigsmp, |
28 | &apic_es7000, | 29 | &apic_es7000, |
29 | &apic_default, /* must be last */ | 30 | &apic_default, /* must be last */ |
30 | NULL, | 31 | NULL, |
@@ -69,7 +70,7 @@ void __init generic_bigsmp_probe(void) | |||
69 | } | 70 | } |
70 | 71 | ||
71 | void __init generic_apic_probe(void) | 72 | void __init generic_apic_probe(void) |
72 | { | 73 | { |
73 | if (!cmdline_apic) { | 74 | if (!cmdline_apic) { |
74 | int i; | 75 | int i; |
75 | for (i = 0; apic_probe[i]; i++) { | 76 | for (i = 0; apic_probe[i]; i++) { |
@@ -83,40 +84,40 @@ void __init generic_apic_probe(void) | |||
83 | panic("Didn't find an APIC driver"); | 84 | panic("Didn't find an APIC driver"); |
84 | } | 85 | } |
85 | printk(KERN_INFO "Using APIC driver %s\n", genapic->name); | 86 | printk(KERN_INFO "Using APIC driver %s\n", genapic->name); |
86 | } | 87 | } |
87 | 88 | ||
88 | /* These functions can switch the APIC even after the initial ->probe() */ | 89 | /* These functions can switch the APIC even after the initial ->probe() */ |
89 | 90 | ||
90 | int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) | 91 | int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) |
91 | { | 92 | { |
92 | int i; | 93 | int i; |
93 | for (i = 0; apic_probe[i]; ++i) { | 94 | for (i = 0; apic_probe[i]; ++i) { |
94 | if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { | 95 | if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) { |
95 | if (!cmdline_apic) { | 96 | if (!cmdline_apic) { |
96 | genapic = apic_probe[i]; | 97 | genapic = apic_probe[i]; |
97 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 98 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
98 | genapic->name); | 99 | genapic->name); |
99 | } | 100 | } |
100 | return 1; | 101 | return 1; |
101 | } | 102 | } |
102 | } | 103 | } |
103 | return 0; | 104 | return 0; |
104 | } | 105 | } |
105 | 106 | ||
106 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 107 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
107 | { | 108 | { |
108 | int i; | 109 | int i; |
109 | for (i = 0; apic_probe[i]; ++i) { | 110 | for (i = 0; apic_probe[i]; ++i) { |
110 | if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { | 111 | if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { |
111 | if (!cmdline_apic) { | 112 | if (!cmdline_apic) { |
112 | genapic = apic_probe[i]; | 113 | genapic = apic_probe[i]; |
113 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 114 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
114 | genapic->name); | 115 | genapic->name); |
115 | } | 116 | } |
116 | return 1; | 117 | return 1; |
117 | } | 118 | } |
118 | } | 119 | } |
119 | return 0; | 120 | return 0; |
120 | } | 121 | } |
121 | 122 | ||
122 | int hard_smp_processor_id(void) | 123 | int hard_smp_processor_id(void) |
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 74883ccb8f73..a97ea0f35b1e 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * APIC driver for the IBM "Summit" chipset. | 2 | * APIC driver for the IBM "Summit" chipset. |
3 | */ | 3 | */ |
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
@@ -19,9 +19,9 @@ | |||
19 | #include <asm/mach-summit/mach_mpparse.h> | 19 | #include <asm/mach-summit/mach_mpparse.h> |
20 | 20 | ||
21 | static int probe_summit(void) | 21 | static int probe_summit(void) |
22 | { | 22 | { |
23 | /* probed later in mptable/ACPI hooks */ | 23 | /* probed later in mptable/ACPI hooks */ |
24 | return 0; | 24 | return 0; |
25 | } | 25 | } |
26 | 26 | ||
27 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); | 27 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); |
diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile index 1faac8125e3d..8325b4ca431c 100644 --- a/arch/x86/mach-rdc321x/Makefile +++ b/arch/x86/mach-rdc321x/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the RDC321x specific parts of the kernel | 2 | # Makefile for the RDC321x specific parts of the kernel |
3 | # | 3 | # |
4 | obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o wdt.o | 4 | obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o |
5 | 5 | ||
diff --git a/arch/x86/mach-rdc321x/wdt.c b/arch/x86/mach-rdc321x/wdt.c deleted file mode 100644 index ec5625ae7061..000000000000 --- a/arch/x86/mach-rdc321x/wdt.c +++ /dev/null | |||
@@ -1,275 +0,0 @@ | |||
1 | /* | ||
2 | * RDC321x watchdog driver | ||
3 | * | ||
4 | * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org> | ||
5 | * | ||
6 | * This driver is highly inspired from the cpu5_wdt driver | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/miscdevice.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/ioport.h> | ||
32 | #include <linux/timer.h> | ||
33 | #include <linux/completion.h> | ||
34 | #include <linux/jiffies.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <linux/watchdog.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/uaccess.h> | ||
39 | |||
40 | #include <asm/mach-rdc321x/rdc321x_defs.h> | ||
41 | |||
42 | #define RDC_WDT_MASK 0x80000000 /* Mask */ | ||
43 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ | ||
44 | #define RDC_WDT_WTI 0x00200000 /* Generate CPU reset/NMI/WDT on timeout */ | ||
45 | #define RDC_WDT_RST 0x00100000 /* Reset bit */ | ||
46 | #define RDC_WDT_WIF 0x00040000 /* WDT IRQ Flag */ | ||
47 | #define RDC_WDT_IRT 0x00000100 /* IRQ Routing table */ | ||
48 | #define RDC_WDT_CNT 0x00000001 /* WDT count */ | ||
49 | |||
50 | #define RDC_CLS_TMR 0x80003844 /* Clear timer */ | ||
51 | |||
52 | #define RDC_WDT_INTERVAL (HZ/10+1) | ||
53 | |||
54 | int nowayout = WATCHDOG_NOWAYOUT; | ||
55 | module_param(nowayout, int, 0); | ||
56 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | ||
57 | |||
58 | static int ticks = 1000; | ||
59 | |||
60 | /* some device data */ | ||
61 | |||
62 | static struct { | ||
63 | struct completion stop; | ||
64 | volatile int running; | ||
65 | struct timer_list timer; | ||
66 | volatile int queue; | ||
67 | int default_ticks; | ||
68 | unsigned long inuse; | ||
69 | } rdc321x_wdt_device; | ||
70 | |||
71 | /* generic helper functions */ | ||
72 | |||
73 | static void rdc321x_wdt_trigger(unsigned long unused) | ||
74 | { | ||
75 | if (rdc321x_wdt_device.running) | ||
76 | ticks--; | ||
77 | |||
78 | /* keep watchdog alive */ | ||
79 | outl(RDC_WDT_EN|inl(RDC3210_CFGREG_DATA), RDC3210_CFGREG_DATA); | ||
80 | |||
81 | /* requeue?? */ | ||
82 | if (rdc321x_wdt_device.queue && ticks) | ||
83 | mod_timer(&rdc321x_wdt_device.timer, | ||
84 | jiffies + RDC_WDT_INTERVAL); | ||
85 | else { | ||
86 | /* ticks doesn't matter anyway */ | ||
87 | complete(&rdc321x_wdt_device.stop); | ||
88 | } | ||
89 | |||
90 | } | ||
91 | |||
92 | static void rdc321x_wdt_reset(void) | ||
93 | { | ||
94 | ticks = rdc321x_wdt_device.default_ticks; | ||
95 | } | ||
96 | |||
97 | static void rdc321x_wdt_start(void) | ||
98 | { | ||
99 | if (!rdc321x_wdt_device.queue) { | ||
100 | rdc321x_wdt_device.queue = 1; | ||
101 | |||
102 | /* Clear the timer */ | ||
103 | outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR); | ||
104 | |||
105 | /* Enable watchdog and set the timeout to 81.92 us */ | ||
106 | outl(RDC_WDT_EN|RDC_WDT_CNT, RDC3210_CFGREG_DATA); | ||
107 | |||
108 | mod_timer(&rdc321x_wdt_device.timer, | ||
109 | jiffies + RDC_WDT_INTERVAL); | ||
110 | } | ||
111 | |||
112 | /* if process dies, counter is not decremented */ | ||
113 | rdc321x_wdt_device.running++; | ||
114 | } | ||
115 | |||
116 | static int rdc321x_wdt_stop(void) | ||
117 | { | ||
118 | if (rdc321x_wdt_device.running) | ||
119 | rdc321x_wdt_device.running = 0; | ||
120 | |||
121 | ticks = rdc321x_wdt_device.default_ticks; | ||
122 | |||
123 | return -EIO; | ||
124 | } | ||
125 | |||
126 | /* filesystem operations */ | ||
127 | |||
128 | static int rdc321x_wdt_open(struct inode *inode, struct file *file) | ||
129 | { | ||
130 | if (test_and_set_bit(0, &rdc321x_wdt_device.inuse)) | ||
131 | return -EBUSY; | ||
132 | |||
133 | return nonseekable_open(inode, file); | ||
134 | } | ||
135 | |||
136 | static int rdc321x_wdt_release(struct inode *inode, struct file *file) | ||
137 | { | ||
138 | clear_bit(0, &rdc321x_wdt_device.inuse); | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file, | ||
143 | unsigned int cmd, unsigned long arg) | ||
144 | { | ||
145 | void __user *argp = (void __user *)arg; | ||
146 | unsigned int value; | ||
147 | static struct watchdog_info ident = { | ||
148 | .options = WDIOF_CARDRESET, | ||
149 | .identity = "RDC321x WDT", | ||
150 | }; | ||
151 | |||
152 | switch (cmd) { | ||
153 | case WDIOC_KEEPALIVE: | ||
154 | rdc321x_wdt_reset(); | ||
155 | break; | ||
156 | case WDIOC_GETSTATUS: | ||
157 | /* Read the value from the DATA register */ | ||
158 | value = inl(RDC3210_CFGREG_DATA); | ||
159 | if (copy_to_user(argp, &value, sizeof(int))) | ||
160 | return -EFAULT; | ||
161 | break; | ||
162 | case WDIOC_GETSUPPORT: | ||
163 | if (copy_to_user(argp, &ident, sizeof(ident))) | ||
164 | return -EFAULT; | ||
165 | break; | ||
166 | case WDIOC_SETOPTIONS: | ||
167 | if (copy_from_user(&value, argp, sizeof(int))) | ||
168 | return -EFAULT; | ||
169 | switch (value) { | ||
170 | case WDIOS_ENABLECARD: | ||
171 | rdc321x_wdt_start(); | ||
172 | break; | ||
173 | case WDIOS_DISABLECARD: | ||
174 | return rdc321x_wdt_stop(); | ||
175 | default: | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | break; | ||
179 | default: | ||
180 | return -ENOTTY; | ||
181 | } | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf, | ||
186 | size_t count, loff_t *ppos) | ||
187 | { | ||
188 | if (!count) | ||
189 | return -EIO; | ||
190 | |||
191 | rdc321x_wdt_reset(); | ||
192 | |||
193 | return count; | ||
194 | } | ||
195 | |||
196 | static const struct file_operations rdc321x_wdt_fops = { | ||
197 | .owner = THIS_MODULE, | ||
198 | .llseek = no_llseek, | ||
199 | .ioctl = rdc321x_wdt_ioctl, | ||
200 | .open = rdc321x_wdt_open, | ||
201 | .write = rdc321x_wdt_write, | ||
202 | .release = rdc321x_wdt_release, | ||
203 | }; | ||
204 | |||
205 | static struct miscdevice rdc321x_wdt_misc = { | ||
206 | .minor = WATCHDOG_MINOR, | ||
207 | .name = "watchdog", | ||
208 | .fops = &rdc321x_wdt_fops, | ||
209 | }; | ||
210 | |||
211 | static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) | ||
212 | { | ||
213 | int err; | ||
214 | |||
215 | err = misc_register(&rdc321x_wdt_misc); | ||
216 | if (err < 0) { | ||
217 | printk(KERN_ERR PFX "watchdog misc_register failed\n"); | ||
218 | return err; | ||
219 | } | ||
220 | |||
221 | /* Reset the watchdog */ | ||
222 | outl(RDC_WDT_RST, RDC3210_CFGREG_DATA); | ||
223 | |||
224 | init_completion(&rdc321x_wdt_device.stop); | ||
225 | rdc321x_wdt_device.queue = 0; | ||
226 | |||
227 | clear_bit(0, &rdc321x_wdt_device.inuse); | ||
228 | |||
229 | setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0); | ||
230 | |||
231 | rdc321x_wdt_device.default_ticks = ticks; | ||
232 | |||
233 | printk(KERN_INFO PFX "watchdog init success\n"); | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static int rdc321x_wdt_remove(struct platform_device *pdev) | ||
239 | { | ||
240 | if (rdc321x_wdt_device.queue) { | ||
241 | rdc321x_wdt_device.queue = 0; | ||
242 | wait_for_completion(&rdc321x_wdt_device.stop); | ||
243 | } | ||
244 | |||
245 | misc_deregister(&rdc321x_wdt_misc); | ||
246 | |||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static struct platform_driver rdc321x_wdt_driver = { | ||
251 | .probe = rdc321x_wdt_probe, | ||
252 | .remove = rdc321x_wdt_remove, | ||
253 | .driver = { | ||
254 | .owner = THIS_MODULE, | ||
255 | .name = "rdc321x-wdt", | ||
256 | }, | ||
257 | }; | ||
258 | |||
259 | static int __init rdc321x_wdt_init(void) | ||
260 | { | ||
261 | return platform_driver_register(&rdc321x_wdt_driver); | ||
262 | } | ||
263 | |||
264 | static void __exit rdc321x_wdt_exit(void) | ||
265 | { | ||
266 | platform_driver_unregister(&rdc321x_wdt_driver); | ||
267 | } | ||
268 | |||
269 | module_init(rdc321x_wdt_init); | ||
270 | module_exit(rdc321x_wdt_exit); | ||
271 | |||
272 | MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); | ||
273 | MODULE_DESCRIPTION("RDC321x watchdog driver"); | ||
274 | MODULE_LICENSE("GPL"); | ||
275 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 3cc8eb2f36a9..be7235bf105d 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/pgalloc.h> | 27 | #include <asm/pgalloc.h> |
28 | #include <asm/tlbflush.h> | 28 | #include <asm/tlbflush.h> |
29 | #include <asm/arch_hooks.h> | 29 | #include <asm/arch_hooks.h> |
30 | #include <asm/trampoline.h> | ||
30 | 31 | ||
31 | /* TLB state -- visible externally, indexed physically */ | 32 | /* TLB state -- visible externally, indexed physically */ |
32 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; | 33 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; |
@@ -210,7 +211,7 @@ static int cpucount = 0; | |||
210 | /* steal a page from the bottom of memory for the trampoline and | 211 | /* steal a page from the bottom of memory for the trampoline and |
211 | * squirrel its address away here. This will be in kernel virtual | 212 | * squirrel its address away here. This will be in kernel virtual |
212 | * space */ | 213 | * space */ |
213 | static __u32 trampoline_base; | 214 | unsigned char *trampoline_base; |
214 | 215 | ||
215 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ | 216 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ |
216 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; | 217 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; |
@@ -429,15 +430,15 @@ void __init smp_store_cpu_info(int id) | |||
429 | } | 430 | } |
430 | 431 | ||
431 | /* set up the trampoline and return the physical address of the code */ | 432 | /* set up the trampoline and return the physical address of the code */ |
432 | static __u32 __init setup_trampoline(void) | 433 | unsigned long __init setup_trampoline(void) |
433 | { | 434 | { |
434 | /* these two are global symbols in trampoline.S */ | 435 | /* these two are global symbols in trampoline.S */ |
435 | extern const __u8 trampoline_end[]; | 436 | extern const __u8 trampoline_end[]; |
436 | extern const __u8 trampoline_data[]; | 437 | extern const __u8 trampoline_data[]; |
437 | 438 | ||
438 | memcpy((__u8 *) trampoline_base, trampoline_data, | 439 | memcpy(trampoline_base, trampoline_data, |
439 | trampoline_end - trampoline_data); | 440 | trampoline_end - trampoline_data); |
440 | return virt_to_phys((__u8 *) trampoline_base); | 441 | return virt_to_phys(trampoline_base); |
441 | } | 442 | } |
442 | 443 | ||
443 | /* Routine initially called when a non-boot CPU is brought online */ | 444 | /* Routine initially called when a non-boot CPU is brought online */ |
@@ -520,13 +521,6 @@ static void __init do_boot_cpu(__u8 cpu) | |||
520 | & ~(voyager_extended_vic_processors | 521 | & ~(voyager_extended_vic_processors |
521 | & voyager_allowed_boot_processors); | 522 | & voyager_allowed_boot_processors); |
522 | 523 | ||
523 | /* This is an area in head.S which was used to set up the | ||
524 | * initial kernel stack. We need to alter this to give the | ||
525 | * booting CPU a new stack (taken from its idle process) */ | ||
526 | extern struct { | ||
527 | __u8 *sp; | ||
528 | unsigned short ss; | ||
529 | } stack_start; | ||
530 | /* This is the format of the CPI IDT gate (in real mode) which | 524 | /* This is the format of the CPI IDT gate (in real mode) which |
531 | * we're hijacking to boot the CPU */ | 525 | * we're hijacking to boot the CPU */ |
532 | union IDTFormat { | 526 | union IDTFormat { |
@@ -1166,7 +1160,7 @@ void flush_tlb_all(void) | |||
1166 | * is sorted out */ | 1160 | * is sorted out */ |
1167 | void __init smp_alloc_memory(void) | 1161 | void __init smp_alloc_memory(void) |
1168 | { | 1162 | { |
1169 | trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE); | 1163 | trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); |
1170 | if (__pa(trampoline_base) >= 0x93000) | 1164 | if (__pa(trampoline_base) >= 0x93000) |
1171 | BUG(); | 1165 | BUG(); |
1172 | } | 1166 | } |
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 760baeea5f07..4bab3b145392 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c | |||
@@ -276,6 +276,7 @@ asmlinkage void math_emulate(long arg) | |||
276 | entry_sel_off.offset = FPU_ORIG_EIP; | 276 | entry_sel_off.offset = FPU_ORIG_EIP; |
277 | entry_sel_off.selector = FPU_CS; | 277 | entry_sel_off.selector = FPU_CS; |
278 | entry_sel_off.opcode = (byte1 << 8) | FPU_modrm; | 278 | entry_sel_off.opcode = (byte1 << 8) | FPU_modrm; |
279 | entry_sel_off.empty = 0; | ||
279 | 280 | ||
280 | FPU_rm = FPU_modrm & 7; | 281 | FPU_rm = FPU_modrm & 7; |
281 | 282 | ||
diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c index 799d4af5be66..02af772a24db 100644 --- a/arch/x86/math-emu/reg_ld_str.c +++ b/arch/x86/math-emu/reg_ld_str.c | |||
@@ -383,15 +383,15 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) | |||
383 | int exp; | 383 | int exp; |
384 | FPU_REG tmp; | 384 | FPU_REG tmp; |
385 | 385 | ||
386 | l[0] = 0; | ||
387 | l[1] = 0; | ||
386 | if (st0_tag == TAG_Valid) { | 388 | if (st0_tag == TAG_Valid) { |
387 | reg_copy(st0_ptr, &tmp); | 389 | reg_copy(st0_ptr, &tmp); |
388 | exp = exponent(&tmp); | 390 | exp = exponent(&tmp); |
389 | 391 | ||
390 | if (exp < DOUBLE_Emin) { /* It may be a denormal */ | 392 | if (exp < DOUBLE_Emin) { /* It may be a denormal */ |
391 | addexponent(&tmp, -DOUBLE_Emin + 52); /* largest exp to be 51 */ | 393 | addexponent(&tmp, -DOUBLE_Emin + 52); /* largest exp to be 51 */ |
392 | 394 | denormal_arg: | |
393 | denormal_arg: | ||
394 | |||
395 | if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) { | 395 | if ((precision_loss = FPU_round_to_int(&tmp, st0_tag))) { |
396 | #ifdef PECULIAR_486 | 396 | #ifdef PECULIAR_486 |
397 | /* Did it round to a non-denormal ? */ | 397 | /* Did it round to a non-denormal ? */ |
@@ -477,8 +477,7 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) | |||
477 | 477 | ||
478 | /* This is a special case: see sec 16.2.5.1 of the 80486 book */ | 478 | /* This is a special case: see sec 16.2.5.1 of the 80486 book */ |
479 | /* Overflow to infinity */ | 479 | /* Overflow to infinity */ |
480 | l[0] = 0x00000000; /* Set to */ | 480 | l[1] = 0x7ff00000; /* Set to + INF */ |
481 | l[1] = 0x7ff00000; /* + INF */ | ||
482 | } else { | 481 | } else { |
483 | if (precision_loss) { | 482 | if (precision_loss) { |
484 | if (increment) | 483 | if (increment) |
@@ -492,8 +491,6 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) | |||
492 | } | 491 | } |
493 | } else if (st0_tag == TAG_Zero) { | 492 | } else if (st0_tag == TAG_Zero) { |
494 | /* Number is zero */ | 493 | /* Number is zero */ |
495 | l[0] = 0; | ||
496 | l[1] = 0; | ||
497 | } else if (st0_tag == TAG_Special) { | 494 | } else if (st0_tag == TAG_Special) { |
498 | st0_tag = FPU_Special(st0_ptr); | 495 | st0_tag = FPU_Special(st0_ptr); |
499 | if (st0_tag == TW_Denormal) { | 496 | if (st0_tag == TW_Denormal) { |
@@ -508,7 +505,6 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) | |||
508 | reg_copy(st0_ptr, &tmp); | 505 | reg_copy(st0_ptr, &tmp); |
509 | goto denormal_arg; | 506 | goto denormal_arg; |
510 | } else if (st0_tag == TW_Infinity) { | 507 | } else if (st0_tag == TW_Infinity) { |
511 | l[0] = 0; | ||
512 | l[1] = 0x7ff00000; | 508 | l[1] = 0x7ff00000; |
513 | } else if (st0_tag == TW_NaN) { | 509 | } else if (st0_tag == TW_NaN) { |
514 | /* Is it really a NaN ? */ | 510 | /* Is it really a NaN ? */ |
@@ -532,7 +528,6 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) | |||
532 | EXCEPTION(EX_Invalid); | 528 | EXCEPTION(EX_Invalid); |
533 | if (!(control_word & CW_Invalid)) | 529 | if (!(control_word & CW_Invalid)) |
534 | return 0; | 530 | return 0; |
535 | l[0] = 0; | ||
536 | l[1] = 0xfff80000; | 531 | l[1] = 0xfff80000; |
537 | } | 532 | } |
538 | } | 533 | } |
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 983291096848..20941d2954e2 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,5 +1,17 @@ | |||
1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | ||
2 | pat.o | ||
3 | |||
4 | obj-$(CONFIG_X86_32) += pgtable_32.o | ||
5 | |||
6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
7 | obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o | ||
8 | |||
9 | obj-$(CONFIG_HIGHMEM) += highmem_32.o | ||
10 | |||
1 | ifeq ($(CONFIG_X86_32),y) | 11 | ifeq ($(CONFIG_X86_32),y) |
2 | include ${srctree}/arch/x86/mm/Makefile_32 | 12 | obj-$(CONFIG_NUMA) += discontig_32.o |
3 | else | 13 | else |
4 | include ${srctree}/arch/x86/mm/Makefile_64 | 14 | obj-$(CONFIG_NUMA) += numa_64.o |
15 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o | ||
16 | obj-$(CONFIG_ACPI_NUMA) += srat_64.o | ||
5 | endif | 17 | endif |
diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32 deleted file mode 100644 index c36ae88bb543..000000000000 --- a/arch/x86/mm/Makefile_32 +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the linux i386-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init_32.o pgtable_32.o fault.o ioremap.o extable.o pageattr.o mmap.o | ||
6 | |||
7 | obj-$(CONFIG_NUMA) += discontig_32.o | ||
8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
9 | obj-$(CONFIG_HIGHMEM) += highmem_32.o | ||
diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64 deleted file mode 100644 index 688c8c28ac8f..000000000000 --- a/arch/x86/mm/Makefile_64 +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the linux x86_64-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init_64.o fault.o ioremap.o extable.o pageattr.o mmap.o | ||
6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
7 | obj-$(CONFIG_NUMA) += numa_64.o | ||
8 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o | ||
9 | obj-$(CONFIG_ACPI_NUMA) += srat_64.o | ||
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 8e25e06ff730..eba0bbede7a6 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/e820.h> | 37 | #include <asm/e820.h> |
38 | #include <asm/setup.h> | 38 | #include <asm/setup.h> |
39 | #include <asm/mmzone.h> | 39 | #include <asm/mmzone.h> |
40 | #include <bios_ebda.h> | 40 | #include <asm/bios_ebda.h> |
41 | 41 | ||
42 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | 42 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
43 | EXPORT_SYMBOL(node_data); | 43 | EXPORT_SYMBOL(node_data); |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c new file mode 100644 index 000000000000..6791b8334bc6 --- /dev/null +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -0,0 +1,354 @@ | |||
1 | /* | ||
2 | * Debug helper to dump the current kernel pagetables of the system | ||
3 | * so that we can see what the various memory ranges are set to. | ||
4 | * | ||
5 | * (C) Copyright 2008 Intel Corporation | ||
6 | * | ||
7 | * Author: Arjan van de Ven <arjan@linux.intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; version 2 | ||
12 | * of the License. | ||
13 | */ | ||
14 | |||
15 | #include <linux/debugfs.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/seq_file.h> | ||
19 | |||
20 | #include <asm/pgtable.h> | ||
21 | |||
22 | /* | ||
23 | * The dumper groups pagetable entries of the same type into one, and for | ||
24 | * that it needs to keep some state when walking, and flush this state | ||
25 | * when a "break" in the continuity is found. | ||
26 | */ | ||
27 | struct pg_state { | ||
28 | int level; | ||
29 | pgprot_t current_prot; | ||
30 | unsigned long start_address; | ||
31 | unsigned long current_address; | ||
32 | const struct addr_marker *marker; | ||
33 | }; | ||
34 | |||
35 | struct addr_marker { | ||
36 | unsigned long start_address; | ||
37 | const char *name; | ||
38 | }; | ||
39 | |||
40 | /* Address space markers hints */ | ||
41 | static struct addr_marker address_markers[] = { | ||
42 | { 0, "User Space" }, | ||
43 | #ifdef CONFIG_X86_64 | ||
44 | { 0x8000000000000000UL, "Kernel Space" }, | ||
45 | { 0xffff810000000000UL, "Low Kernel Mapping" }, | ||
46 | { VMALLOC_START, "vmalloc() Area" }, | ||
47 | { VMEMMAP_START, "Vmemmap" }, | ||
48 | { __START_KERNEL_map, "High Kernel Mapping" }, | ||
49 | { MODULES_VADDR, "Modules" }, | ||
50 | { MODULES_END, "End Modules" }, | ||
51 | #else | ||
52 | { PAGE_OFFSET, "Kernel Mapping" }, | ||
53 | { 0/* VMALLOC_START */, "vmalloc() Area" }, | ||
54 | { 0/*VMALLOC_END*/, "vmalloc() End" }, | ||
55 | # ifdef CONFIG_HIGHMEM | ||
56 | { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, | ||
57 | # endif | ||
58 | { 0/*FIXADDR_START*/, "Fixmap Area" }, | ||
59 | #endif | ||
60 | { -1, NULL } /* End of list */ | ||
61 | }; | ||
62 | |||
63 | /* Multipliers for offsets within the PTEs */ | ||
64 | #define PTE_LEVEL_MULT (PAGE_SIZE) | ||
65 | #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) | ||
66 | #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT) | ||
67 | #define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT) | ||
68 | |||
69 | /* | ||
70 | * Print a readable form of a pgprot_t to the seq_file | ||
71 | */ | ||
72 | static void printk_prot(struct seq_file *m, pgprot_t prot, int level) | ||
73 | { | ||
74 | pgprotval_t pr = pgprot_val(prot); | ||
75 | static const char * const level_name[] = | ||
76 | { "cr3", "pgd", "pud", "pmd", "pte" }; | ||
77 | |||
78 | if (!pgprot_val(prot)) { | ||
79 | /* Not present */ | ||
80 | seq_printf(m, " "); | ||
81 | } else { | ||
82 | if (pr & _PAGE_USER) | ||
83 | seq_printf(m, "USR "); | ||
84 | else | ||
85 | seq_printf(m, " "); | ||
86 | if (pr & _PAGE_RW) | ||
87 | seq_printf(m, "RW "); | ||
88 | else | ||
89 | seq_printf(m, "ro "); | ||
90 | if (pr & _PAGE_PWT) | ||
91 | seq_printf(m, "PWT "); | ||
92 | else | ||
93 | seq_printf(m, " "); | ||
94 | if (pr & _PAGE_PCD) | ||
95 | seq_printf(m, "PCD "); | ||
96 | else | ||
97 | seq_printf(m, " "); | ||
98 | |||
99 | /* Bit 9 has a different meaning on level 3 vs 4 */ | ||
100 | if (level <= 3) { | ||
101 | if (pr & _PAGE_PSE) | ||
102 | seq_printf(m, "PSE "); | ||
103 | else | ||
104 | seq_printf(m, " "); | ||
105 | } else { | ||
106 | if (pr & _PAGE_PAT) | ||
107 | seq_printf(m, "pat "); | ||
108 | else | ||
109 | seq_printf(m, " "); | ||
110 | } | ||
111 | if (pr & _PAGE_GLOBAL) | ||
112 | seq_printf(m, "GLB "); | ||
113 | else | ||
114 | seq_printf(m, " "); | ||
115 | if (pr & _PAGE_NX) | ||
116 | seq_printf(m, "NX "); | ||
117 | else | ||
118 | seq_printf(m, "x "); | ||
119 | } | ||
120 | seq_printf(m, "%s\n", level_name[level]); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * On 64 bits, sign-extend the 48 bit address to 64 bit | ||
125 | */ | ||
126 | static unsigned long normalize_addr(unsigned long u) | ||
127 | { | ||
128 | #ifdef CONFIG_X86_64 | ||
129 | return (signed long)(u << 16) >> 16; | ||
130 | #else | ||
131 | return u; | ||
132 | #endif | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * This function gets called on a break in a continuous series | ||
137 | * of PTE entries; the next one is different so we need to | ||
138 | * print what we collected so far. | ||
139 | */ | ||
140 | static void note_page(struct seq_file *m, struct pg_state *st, | ||
141 | pgprot_t new_prot, int level) | ||
142 | { | ||
143 | pgprotval_t prot, cur; | ||
144 | static const char units[] = "KMGTPE"; | ||
145 | |||
146 | /* | ||
147 | * If we have a "break" in the series, we need to flush the state that | ||
148 | * we have now. "break" is either changing perms, levels or | ||
149 | * address space marker. | ||
150 | */ | ||
151 | prot = pgprot_val(new_prot) & ~(PTE_MASK); | ||
152 | cur = pgprot_val(st->current_prot) & ~(PTE_MASK); | ||
153 | |||
154 | if (!st->level) { | ||
155 | /* First entry */ | ||
156 | st->current_prot = new_prot; | ||
157 | st->level = level; | ||
158 | st->marker = address_markers; | ||
159 | seq_printf(m, "---[ %s ]---\n", st->marker->name); | ||
160 | } else if (prot != cur || level != st->level || | ||
161 | st->current_address >= st->marker[1].start_address) { | ||
162 | const char *unit = units; | ||
163 | unsigned long delta; | ||
164 | |||
165 | /* | ||
166 | * Now print the actual finished series | ||
167 | */ | ||
168 | seq_printf(m, "0x%p-0x%p ", | ||
169 | (void *)st->start_address, | ||
170 | (void *)st->current_address); | ||
171 | |||
172 | delta = (st->current_address - st->start_address) >> 10; | ||
173 | while (!(delta & 1023) && unit[1]) { | ||
174 | delta >>= 10; | ||
175 | unit++; | ||
176 | } | ||
177 | seq_printf(m, "%9lu%c ", delta, *unit); | ||
178 | printk_prot(m, st->current_prot, st->level); | ||
179 | |||
180 | /* | ||
181 | * We print markers for special areas of address space, | ||
182 | * such as the start of vmalloc space etc. | ||
183 | * This helps in the interpretation. | ||
184 | */ | ||
185 | if (st->current_address >= st->marker[1].start_address) { | ||
186 | st->marker++; | ||
187 | seq_printf(m, "---[ %s ]---\n", st->marker->name); | ||
188 | } | ||
189 | |||
190 | st->start_address = st->current_address; | ||
191 | st->current_prot = new_prot; | ||
192 | st->level = level; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, | ||
197 | unsigned long P) | ||
198 | { | ||
199 | int i; | ||
200 | pte_t *start; | ||
201 | |||
202 | start = (pte_t *) pmd_page_vaddr(addr); | ||
203 | for (i = 0; i < PTRS_PER_PTE; i++) { | ||
204 | pgprot_t prot = pte_pgprot(*start); | ||
205 | |||
206 | st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); | ||
207 | note_page(m, st, prot, 4); | ||
208 | start++; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | #if PTRS_PER_PMD > 1 | ||
213 | |||
214 | static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, | ||
215 | unsigned long P) | ||
216 | { | ||
217 | int i; | ||
218 | pmd_t *start; | ||
219 | |||
220 | start = (pmd_t *) pud_page_vaddr(addr); | ||
221 | for (i = 0; i < PTRS_PER_PMD; i++) { | ||
222 | st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); | ||
223 | if (!pmd_none(*start)) { | ||
224 | pgprotval_t prot = pmd_val(*start) & ~PTE_MASK; | ||
225 | |||
226 | if (pmd_large(*start) || !pmd_present(*start)) | ||
227 | note_page(m, st, __pgprot(prot), 3); | ||
228 | else | ||
229 | walk_pte_level(m, st, *start, | ||
230 | P + i * PMD_LEVEL_MULT); | ||
231 | } else | ||
232 | note_page(m, st, __pgprot(0), 3); | ||
233 | start++; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | #else | ||
238 | #define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p) | ||
239 | #define pud_large(a) pmd_large(__pmd(pud_val(a))) | ||
240 | #define pud_none(a) pmd_none(__pmd(pud_val(a))) | ||
241 | #endif | ||
242 | |||
243 | #if PTRS_PER_PUD > 1 | ||
244 | |||
245 | static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr, | ||
246 | unsigned long P) | ||
247 | { | ||
248 | int i; | ||
249 | pud_t *start; | ||
250 | |||
251 | start = (pud_t *) pgd_page_vaddr(addr); | ||
252 | |||
253 | for (i = 0; i < PTRS_PER_PUD; i++) { | ||
254 | st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); | ||
255 | if (!pud_none(*start)) { | ||
256 | pgprotval_t prot = pud_val(*start) & ~PTE_MASK; | ||
257 | |||
258 | if (pud_large(*start) || !pud_present(*start)) | ||
259 | note_page(m, st, __pgprot(prot), 2); | ||
260 | else | ||
261 | walk_pmd_level(m, st, *start, | ||
262 | P + i * PUD_LEVEL_MULT); | ||
263 | } else | ||
264 | note_page(m, st, __pgprot(0), 2); | ||
265 | |||
266 | start++; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | #else | ||
271 | #define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(pgd_val(a)),p) | ||
272 | #define pgd_large(a) pud_large(__pud(pgd_val(a))) | ||
273 | #define pgd_none(a) pud_none(__pud(pgd_val(a))) | ||
274 | #endif | ||
275 | |||
276 | static void walk_pgd_level(struct seq_file *m) | ||
277 | { | ||
278 | #ifdef CONFIG_X86_64 | ||
279 | pgd_t *start = (pgd_t *) &init_level4_pgt; | ||
280 | #else | ||
281 | pgd_t *start = swapper_pg_dir; | ||
282 | #endif | ||
283 | int i; | ||
284 | struct pg_state st; | ||
285 | |||
286 | memset(&st, 0, sizeof(st)); | ||
287 | |||
288 | for (i = 0; i < PTRS_PER_PGD; i++) { | ||
289 | st.current_address = normalize_addr(i * PGD_LEVEL_MULT); | ||
290 | if (!pgd_none(*start)) { | ||
291 | pgprotval_t prot = pgd_val(*start) & ~PTE_MASK; | ||
292 | |||
293 | if (pgd_large(*start) || !pgd_present(*start)) | ||
294 | note_page(m, &st, __pgprot(prot), 1); | ||
295 | else | ||
296 | walk_pud_level(m, &st, *start, | ||
297 | i * PGD_LEVEL_MULT); | ||
298 | } else | ||
299 | note_page(m, &st, __pgprot(0), 1); | ||
300 | |||
301 | start++; | ||
302 | } | ||
303 | |||
304 | /* Flush out the last page */ | ||
305 | st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); | ||
306 | note_page(m, &st, __pgprot(0), 0); | ||
307 | } | ||
308 | |||
309 | static int ptdump_show(struct seq_file *m, void *v) | ||
310 | { | ||
311 | walk_pgd_level(m); | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static int ptdump_open(struct inode *inode, struct file *filp) | ||
316 | { | ||
317 | return single_open(filp, ptdump_show, NULL); | ||
318 | } | ||
319 | |||
320 | static const struct file_operations ptdump_fops = { | ||
321 | .open = ptdump_open, | ||
322 | .read = seq_read, | ||
323 | .llseek = seq_lseek, | ||
324 | .release = single_release, | ||
325 | }; | ||
326 | |||
327 | int pt_dump_init(void) | ||
328 | { | ||
329 | struct dentry *pe; | ||
330 | |||
331 | #ifdef CONFIG_X86_32 | ||
332 | /* Not a compile-time constant on x86-32 */ | ||
333 | address_markers[2].start_address = VMALLOC_START; | ||
334 | address_markers[3].start_address = VMALLOC_END; | ||
335 | # ifdef CONFIG_HIGHMEM | ||
336 | address_markers[4].start_address = PKMAP_BASE; | ||
337 | address_markers[5].start_address = FIXADDR_START; | ||
338 | # else | ||
339 | address_markers[4].start_address = FIXADDR_START; | ||
340 | # endif | ||
341 | #endif | ||
342 | |||
343 | pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, | ||
344 | &ptdump_fops); | ||
345 | if (!pe) | ||
346 | return -ENOMEM; | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | __initcall(pt_dump_init); | ||
352 | MODULE_LICENSE("GPL"); | ||
353 | MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>"); | ||
354 | MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables"); | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index ec08d8389850..fd7e1798c75a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -639,7 +639,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
639 | #ifdef CONFIG_X86_32 | 639 | #ifdef CONFIG_X86_32 |
640 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc | 640 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc |
641 | fault has been handled. */ | 641 | fault has been handled. */ |
642 | if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) | 642 | if (regs->flags & (X86_EFLAGS_IF | X86_VM_MASK)) |
643 | local_irq_enable(); | 643 | local_irq_enable(); |
644 | 644 | ||
645 | /* | 645 | /* |
@@ -976,9 +976,5 @@ void vmalloc_sync_all(void) | |||
976 | if (address == start) | 976 | if (address == start) |
977 | start = address + PGDIR_SIZE; | 977 | start = address + PGDIR_SIZE; |
978 | } | 978 | } |
979 | /* Check that there is no need to do the same for the modules area. */ | ||
980 | BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); | ||
981 | BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == | ||
982 | (__START_KERNEL & PGDIR_MASK))); | ||
983 | #endif | 979 | #endif |
984 | } | 980 | } |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index ee1091a46964..1500dc8d63e4 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -51,6 +51,8 @@ | |||
51 | 51 | ||
52 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 52 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
53 | 53 | ||
54 | unsigned long max_pfn_mapped; | ||
55 | |||
54 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 56 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
55 | unsigned long highstart_pfn, highend_pfn; | 57 | unsigned long highstart_pfn, highend_pfn; |
56 | 58 | ||
@@ -179,8 +181,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
179 | /* | 181 | /* |
180 | * Map with big pages if possible, otherwise | 182 | * Map with big pages if possible, otherwise |
181 | * create normal page tables: | 183 | * create normal page tables: |
184 | * | ||
185 | * Don't use a large page for the first 2/4MB of memory | ||
186 | * because there are often fixed size MTRRs in there | ||
187 | * and overlapping MTRRs into large pages can cause | ||
188 | * slowdowns. | ||
182 | */ | 189 | */ |
183 | if (cpu_has_pse) { | 190 | if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) { |
184 | unsigned int addr2; | 191 | unsigned int addr2; |
185 | pgprot_t prot = PAGE_KERNEL_LARGE; | 192 | pgprot_t prot = PAGE_KERNEL_LARGE; |
186 | 193 | ||
@@ -194,6 +201,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
194 | set_pmd(pmd, pfn_pmd(pfn, prot)); | 201 | set_pmd(pmd, pfn_pmd(pfn, prot)); |
195 | 202 | ||
196 | pfn += PTRS_PER_PTE; | 203 | pfn += PTRS_PER_PTE; |
204 | max_pfn_mapped = pfn; | ||
197 | continue; | 205 | continue; |
198 | } | 206 | } |
199 | pte = one_page_table_init(pmd); | 207 | pte = one_page_table_init(pmd); |
@@ -208,6 +216,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
208 | 216 | ||
209 | set_pte(pte, pfn_pte(pfn, prot)); | 217 | set_pte(pte, pfn_pte(pfn, prot)); |
210 | } | 218 | } |
219 | max_pfn_mapped = pfn; | ||
211 | } | 220 | } |
212 | } | 221 | } |
213 | } | 222 | } |
@@ -723,25 +732,17 @@ void mark_rodata_ro(void) | |||
723 | unsigned long start = PFN_ALIGN(_text); | 732 | unsigned long start = PFN_ALIGN(_text); |
724 | unsigned long size = PFN_ALIGN(_etext) - start; | 733 | unsigned long size = PFN_ALIGN(_etext) - start; |
725 | 734 | ||
726 | #ifndef CONFIG_KPROBES | 735 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
727 | #ifdef CONFIG_HOTPLUG_CPU | 736 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", |
728 | /* It must still be possible to apply SMP alternatives. */ | 737 | size >> 10); |
729 | if (num_possible_cpus() <= 1) | ||
730 | #endif | ||
731 | { | ||
732 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | ||
733 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", | ||
734 | size >> 10); | ||
735 | 738 | ||
736 | #ifdef CONFIG_CPA_DEBUG | 739 | #ifdef CONFIG_CPA_DEBUG |
737 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", | 740 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", |
738 | start, start+size); | 741 | start, start+size); |
739 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); | 742 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); |
740 | 743 | ||
741 | printk(KERN_INFO "Testing CPA: write protecting again\n"); | 744 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
742 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | 745 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); |
743 | #endif | ||
744 | } | ||
745 | #endif | 746 | #endif |
746 | start += size; | 747 | start += size; |
747 | size = (unsigned long)__end_rodata - start; | 748 | size = (unsigned long)__end_rodata - start; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a02a14f0f324..1076097dcab2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -54,6 +54,26 @@ static unsigned long dma_reserve __initdata; | |||
54 | 54 | ||
55 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 55 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
56 | 56 | ||
57 | int direct_gbpages __meminitdata | ||
58 | #ifdef CONFIG_DIRECT_GBPAGES | ||
59 | = 1 | ||
60 | #endif | ||
61 | ; | ||
62 | |||
63 | static int __init parse_direct_gbpages_off(char *arg) | ||
64 | { | ||
65 | direct_gbpages = 0; | ||
66 | return 0; | ||
67 | } | ||
68 | early_param("nogbpages", parse_direct_gbpages_off); | ||
69 | |||
70 | static int __init parse_direct_gbpages_on(char *arg) | ||
71 | { | ||
72 | direct_gbpages = 1; | ||
73 | return 0; | ||
74 | } | ||
75 | early_param("gbpages", parse_direct_gbpages_on); | ||
76 | |||
57 | /* | 77 | /* |
58 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | 78 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the |
59 | * physical space so we can cache the place of the first one and move | 79 | * physical space so we can cache the place of the first one and move |
@@ -69,9 +89,6 @@ void show_mem(void) | |||
69 | 89 | ||
70 | printk(KERN_INFO "Mem-info:\n"); | 90 | printk(KERN_INFO "Mem-info:\n"); |
71 | show_free_areas(); | 91 | show_free_areas(); |
72 | printk(KERN_INFO "Free swap: %6ldkB\n", | ||
73 | nr_swap_pages << (PAGE_SHIFT-10)); | ||
74 | |||
75 | for_each_online_pgdat(pgdat) { | 92 | for_each_online_pgdat(pgdat) { |
76 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | 93 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
77 | /* | 94 | /* |
@@ -296,7 +313,7 @@ __meminit void early_iounmap(void *addr, unsigned long size) | |||
296 | __flush_tlb_all(); | 313 | __flush_tlb_all(); |
297 | } | 314 | } |
298 | 315 | ||
299 | static void __meminit | 316 | static unsigned long __meminit |
300 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) | 317 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) |
301 | { | 318 | { |
302 | int i = pmd_index(address); | 319 | int i = pmd_index(address); |
@@ -318,21 +335,26 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) | |||
318 | set_pte((pte_t *)pmd, | 335 | set_pte((pte_t *)pmd, |
319 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 336 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
320 | } | 337 | } |
338 | return address; | ||
321 | } | 339 | } |
322 | 340 | ||
323 | static void __meminit | 341 | static unsigned long __meminit |
324 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | 342 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) |
325 | { | 343 | { |
326 | pmd_t *pmd = pmd_offset(pud, 0); | 344 | pmd_t *pmd = pmd_offset(pud, 0); |
345 | unsigned long last_map_addr; | ||
346 | |||
327 | spin_lock(&init_mm.page_table_lock); | 347 | spin_lock(&init_mm.page_table_lock); |
328 | phys_pmd_init(pmd, address, end); | 348 | last_map_addr = phys_pmd_init(pmd, address, end); |
329 | spin_unlock(&init_mm.page_table_lock); | 349 | spin_unlock(&init_mm.page_table_lock); |
330 | __flush_tlb_all(); | 350 | __flush_tlb_all(); |
351 | return last_map_addr; | ||
331 | } | 352 | } |
332 | 353 | ||
333 | static void __meminit | 354 | static unsigned long __meminit |
334 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | 355 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) |
335 | { | 356 | { |
357 | unsigned long last_map_addr = end; | ||
336 | int i = pud_index(addr); | 358 | int i = pud_index(addr); |
337 | 359 | ||
338 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { | 360 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { |
@@ -350,7 +372,15 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | |||
350 | } | 372 | } |
351 | 373 | ||
352 | if (pud_val(*pud)) { | 374 | if (pud_val(*pud)) { |
353 | phys_pmd_update(pud, addr, end); | 375 | if (!pud_large(*pud)) |
376 | last_map_addr = phys_pmd_update(pud, addr, end); | ||
377 | continue; | ||
378 | } | ||
379 | |||
380 | if (direct_gbpages) { | ||
381 | set_pte((pte_t *)pud, | ||
382 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | ||
383 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; | ||
354 | continue; | 384 | continue; |
355 | } | 385 | } |
356 | 386 | ||
@@ -358,12 +388,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | |||
358 | 388 | ||
359 | spin_lock(&init_mm.page_table_lock); | 389 | spin_lock(&init_mm.page_table_lock); |
360 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); | 390 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
361 | phys_pmd_init(pmd, addr, end); | 391 | last_map_addr = phys_pmd_init(pmd, addr, end); |
362 | spin_unlock(&init_mm.page_table_lock); | 392 | spin_unlock(&init_mm.page_table_lock); |
363 | 393 | ||
364 | unmap_low_page(pmd); | 394 | unmap_low_page(pmd); |
365 | } | 395 | } |
366 | __flush_tlb_all(); | 396 | __flush_tlb_all(); |
397 | |||
398 | return last_map_addr >> PAGE_SHIFT; | ||
367 | } | 399 | } |
368 | 400 | ||
369 | static void __init find_early_table_space(unsigned long end) | 401 | static void __init find_early_table_space(unsigned long end) |
@@ -371,9 +403,11 @@ static void __init find_early_table_space(unsigned long end) | |||
371 | unsigned long puds, pmds, tables, start; | 403 | unsigned long puds, pmds, tables, start; |
372 | 404 | ||
373 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 405 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
374 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 406 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); |
375 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + | 407 | if (!direct_gbpages) { |
376 | round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | 408 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
409 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | ||
410 | } | ||
377 | 411 | ||
378 | /* | 412 | /* |
379 | * RED-PEN putting page tables only on node 0 could | 413 | * RED-PEN putting page tables only on node 0 could |
@@ -393,16 +427,135 @@ static void __init find_early_table_space(unsigned long end) | |||
393 | (table_start << PAGE_SHIFT) + tables); | 427 | (table_start << PAGE_SHIFT) + tables); |
394 | } | 428 | } |
395 | 429 | ||
430 | static void __init init_gbpages(void) | ||
431 | { | ||
432 | if (direct_gbpages && cpu_has_gbpages) | ||
433 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | ||
434 | else | ||
435 | direct_gbpages = 0; | ||
436 | } | ||
437 | |||
438 | #ifdef CONFIG_MEMTEST_BOOTPARAM | ||
439 | |||
440 | static void __init memtest(unsigned long start_phys, unsigned long size, | ||
441 | unsigned pattern) | ||
442 | { | ||
443 | unsigned long i; | ||
444 | unsigned long *start; | ||
445 | unsigned long start_bad; | ||
446 | unsigned long last_bad; | ||
447 | unsigned long val; | ||
448 | unsigned long start_phys_aligned; | ||
449 | unsigned long count; | ||
450 | unsigned long incr; | ||
451 | |||
452 | switch (pattern) { | ||
453 | case 0: | ||
454 | val = 0UL; | ||
455 | break; | ||
456 | case 1: | ||
457 | val = -1UL; | ||
458 | break; | ||
459 | case 2: | ||
460 | val = 0x5555555555555555UL; | ||
461 | break; | ||
462 | case 3: | ||
463 | val = 0xaaaaaaaaaaaaaaaaUL; | ||
464 | break; | ||
465 | default: | ||
466 | return; | ||
467 | } | ||
468 | |||
469 | incr = sizeof(unsigned long); | ||
470 | start_phys_aligned = ALIGN(start_phys, incr); | ||
471 | count = (size - (start_phys_aligned - start_phys))/incr; | ||
472 | start = __va(start_phys_aligned); | ||
473 | start_bad = 0; | ||
474 | last_bad = 0; | ||
475 | |||
476 | for (i = 0; i < count; i++) | ||
477 | start[i] = val; | ||
478 | for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { | ||
479 | if (*start != val) { | ||
480 | if (start_phys_aligned == last_bad + incr) { | ||
481 | last_bad += incr; | ||
482 | } else { | ||
483 | if (start_bad) { | ||
484 | printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved", | ||
485 | val, start_bad, last_bad + incr); | ||
486 | reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); | ||
487 | } | ||
488 | start_bad = last_bad = start_phys_aligned; | ||
489 | } | ||
490 | } | ||
491 | } | ||
492 | if (start_bad) { | ||
493 | printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved", | ||
494 | val, start_bad, last_bad + incr); | ||
495 | reserve_early(start_bad, last_bad - start_bad, "BAD RAM"); | ||
496 | } | ||
497 | |||
498 | } | ||
499 | |||
500 | static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE; | ||
501 | |||
502 | static int __init parse_memtest(char *arg) | ||
503 | { | ||
504 | if (arg) | ||
505 | memtest_pattern = simple_strtoul(arg, NULL, 0); | ||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | early_param("memtest", parse_memtest); | ||
510 | |||
511 | static void __init early_memtest(unsigned long start, unsigned long end) | ||
512 | { | ||
513 | unsigned long t_start, t_size; | ||
514 | unsigned pattern; | ||
515 | |||
516 | if (!memtest_pattern) | ||
517 | return; | ||
518 | |||
519 | printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); | ||
520 | for (pattern = 0; pattern < memtest_pattern; pattern++) { | ||
521 | t_start = start; | ||
522 | t_size = 0; | ||
523 | while (t_start < end) { | ||
524 | t_start = find_e820_area_size(t_start, &t_size, 1); | ||
525 | |||
526 | /* done ? */ | ||
527 | if (t_start >= end) | ||
528 | break; | ||
529 | if (t_start + t_size > end) | ||
530 | t_size = end - t_start; | ||
531 | |||
532 | printk(KERN_CONT "\n %016lx - %016lx pattern %d", | ||
533 | t_start, t_start + t_size, pattern); | ||
534 | |||
535 | memtest(t_start, t_size, pattern); | ||
536 | |||
537 | t_start += t_size; | ||
538 | } | ||
539 | } | ||
540 | printk(KERN_CONT "\n"); | ||
541 | } | ||
542 | #else | ||
543 | static void __init early_memtest(unsigned long start, unsigned long end) | ||
544 | { | ||
545 | } | ||
546 | #endif | ||
547 | |||
396 | /* | 548 | /* |
397 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 549 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
398 | * This runs before bootmem is initialized and gets pages directly from | 550 | * This runs before bootmem is initialized and gets pages directly from |
399 | * the physical memory. To access them they are temporarily mapped. | 551 | * the physical memory. To access them they are temporarily mapped. |
400 | */ | 552 | */ |
401 | void __init_refok init_memory_mapping(unsigned long start, unsigned long end) | 553 | unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) |
402 | { | 554 | { |
403 | unsigned long next; | 555 | unsigned long next, last_map_addr = end; |
556 | unsigned long start_phys = start, end_phys = end; | ||
404 | 557 | ||
405 | pr_debug("init_memory_mapping\n"); | 558 | printk(KERN_INFO "init_memory_mapping\n"); |
406 | 559 | ||
407 | /* | 560 | /* |
408 | * Find space for the kernel direct mapping tables. | 561 | * Find space for the kernel direct mapping tables. |
@@ -411,8 +564,10 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) | |||
411 | * memory mapped. Unfortunately this is done currently before the | 564 | * memory mapped. Unfortunately this is done currently before the |
412 | * nodes are discovered. | 565 | * nodes are discovered. |
413 | */ | 566 | */ |
414 | if (!after_bootmem) | 567 | if (!after_bootmem) { |
568 | init_gbpages(); | ||
415 | find_early_table_space(end); | 569 | find_early_table_space(end); |
570 | } | ||
416 | 571 | ||
417 | start = (unsigned long)__va(start); | 572 | start = (unsigned long)__va(start); |
418 | end = (unsigned long)__va(end); | 573 | end = (unsigned long)__va(end); |
@@ -430,7 +585,7 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) | |||
430 | next = start + PGDIR_SIZE; | 585 | next = start + PGDIR_SIZE; |
431 | if (next > end) | 586 | if (next > end) |
432 | next = end; | 587 | next = end; |
433 | phys_pud_init(pud, __pa(start), __pa(next)); | 588 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next)); |
434 | if (!after_bootmem) | 589 | if (!after_bootmem) |
435 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); | 590 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); |
436 | unmap_low_page(pud); | 591 | unmap_low_page(pud); |
@@ -443,6 +598,11 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end) | |||
443 | if (!after_bootmem) | 598 | if (!after_bootmem) |
444 | reserve_early(table_start << PAGE_SHIFT, | 599 | reserve_early(table_start << PAGE_SHIFT, |
445 | table_end << PAGE_SHIFT, "PGTABLE"); | 600 | table_end << PAGE_SHIFT, "PGTABLE"); |
601 | |||
602 | if (!after_bootmem) | ||
603 | early_memtest(start_phys, end_phys); | ||
604 | |||
605 | return last_map_addr; | ||
446 | } | 606 | } |
447 | 607 | ||
448 | #ifndef CONFIG_NUMA | 608 | #ifndef CONFIG_NUMA |
@@ -482,11 +642,13 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
482 | { | 642 | { |
483 | struct pglist_data *pgdat = NODE_DATA(nid); | 643 | struct pglist_data *pgdat = NODE_DATA(nid); |
484 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; | 644 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
485 | unsigned long start_pfn = start >> PAGE_SHIFT; | 645 | unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; |
486 | unsigned long nr_pages = size >> PAGE_SHIFT; | 646 | unsigned long nr_pages = size >> PAGE_SHIFT; |
487 | int ret; | 647 | int ret; |
488 | 648 | ||
489 | init_memory_mapping(start, start + size-1); | 649 | last_mapped_pfn = init_memory_mapping(start, start + size-1); |
650 | if (last_mapped_pfn > max_pfn_mapped) | ||
651 | max_pfn_mapped = last_mapped_pfn; | ||
490 | 652 | ||
491 | ret = __add_pages(zone, start_pfn, nr_pages); | 653 | ret = __add_pages(zone, start_pfn, nr_pages); |
492 | WARN_ON(1); | 654 | WARN_ON(1); |
@@ -596,24 +758,7 @@ EXPORT_SYMBOL_GPL(rodata_test_data); | |||
596 | 758 | ||
597 | void mark_rodata_ro(void) | 759 | void mark_rodata_ro(void) |
598 | { | 760 | { |
599 | unsigned long start = (unsigned long)_stext, end; | 761 | unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); |
600 | |||
601 | #ifdef CONFIG_HOTPLUG_CPU | ||
602 | /* It must still be possible to apply SMP alternatives. */ | ||
603 | if (num_possible_cpus() > 1) | ||
604 | start = (unsigned long)_etext; | ||
605 | #endif | ||
606 | |||
607 | #ifdef CONFIG_KPROBES | ||
608 | start = (unsigned long)__start_rodata; | ||
609 | #endif | ||
610 | |||
611 | end = (unsigned long)__end_rodata; | ||
612 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; | ||
613 | end &= PAGE_MASK; | ||
614 | if (end <= start) | ||
615 | return; | ||
616 | |||
617 | 762 | ||
618 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", | 763 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
619 | (end - start) >> 10); | 764 | (end - start) >> 10); |
@@ -636,6 +781,7 @@ void mark_rodata_ro(void) | |||
636 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); | 781 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
637 | #endif | 782 | #endif |
638 | } | 783 | } |
784 | |||
639 | #endif | 785 | #endif |
640 | 786 | ||
641 | #ifdef CONFIG_BLK_DEV_INITRD | 787 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -657,7 +803,7 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len) | |||
657 | * This can happen with kdump kernels when accessing | 803 | * This can happen with kdump kernels when accessing |
658 | * firmware tables: | 804 | * firmware tables: |
659 | */ | 805 | */ |
660 | if (pfn < end_pfn_map) | 806 | if (pfn < max_pfn_mapped) |
661 | return; | 807 | return; |
662 | 808 | ||
663 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", | 809 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 794895c6dcc9..c590fd200e29 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -19,11 +19,7 @@ | |||
19 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
20 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
21 | #include <asm/pgalloc.h> | 21 | #include <asm/pgalloc.h> |
22 | 22 | #include <asm/pat.h> | |
23 | enum ioremap_mode { | ||
24 | IOR_MODE_UNCACHED, | ||
25 | IOR_MODE_CACHED, | ||
26 | }; | ||
27 | 23 | ||
28 | #ifdef CONFIG_X86_64 | 24 | #ifdef CONFIG_X86_64 |
29 | 25 | ||
@@ -35,11 +31,23 @@ unsigned long __phys_addr(unsigned long x) | |||
35 | } | 31 | } |
36 | EXPORT_SYMBOL(__phys_addr); | 32 | EXPORT_SYMBOL(__phys_addr); |
37 | 33 | ||
34 | static inline int phys_addr_valid(unsigned long addr) | ||
35 | { | ||
36 | return addr < (1UL << boot_cpu_data.x86_phys_bits); | ||
37 | } | ||
38 | |||
39 | #else | ||
40 | |||
41 | static inline int phys_addr_valid(unsigned long addr) | ||
42 | { | ||
43 | return 1; | ||
44 | } | ||
45 | |||
38 | #endif | 46 | #endif |
39 | 47 | ||
40 | int page_is_ram(unsigned long pagenr) | 48 | int page_is_ram(unsigned long pagenr) |
41 | { | 49 | { |
42 | unsigned long addr, end; | 50 | resource_size_t addr, end; |
43 | int i; | 51 | int i; |
44 | 52 | ||
45 | /* | 53 | /* |
@@ -78,19 +86,22 @@ int page_is_ram(unsigned long pagenr) | |||
78 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 86 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
79 | * conflicts. | 87 | * conflicts. |
80 | */ | 88 | */ |
81 | static int ioremap_change_attr(unsigned long vaddr, unsigned long size, | 89 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
82 | enum ioremap_mode mode) | 90 | unsigned long prot_val) |
83 | { | 91 | { |
84 | unsigned long nrpages = size >> PAGE_SHIFT; | 92 | unsigned long nrpages = size >> PAGE_SHIFT; |
85 | int err; | 93 | int err; |
86 | 94 | ||
87 | switch (mode) { | 95 | switch (prot_val) { |
88 | case IOR_MODE_UNCACHED: | 96 | case _PAGE_CACHE_UC: |
89 | default: | 97 | default: |
90 | err = set_memory_uc(vaddr, nrpages); | 98 | err = _set_memory_uc(vaddr, nrpages); |
99 | break; | ||
100 | case _PAGE_CACHE_WC: | ||
101 | err = _set_memory_wc(vaddr, nrpages); | ||
91 | break; | 102 | break; |
92 | case IOR_MODE_CACHED: | 103 | case _PAGE_CACHE_WB: |
93 | err = set_memory_wb(vaddr, nrpages); | 104 | err = _set_memory_wb(vaddr, nrpages); |
94 | break; | 105 | break; |
95 | } | 106 | } |
96 | 107 | ||
@@ -107,17 +118,27 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size, | |||
107 | * caller shouldn't need to know that small detail. | 118 | * caller shouldn't need to know that small detail. |
108 | */ | 119 | */ |
109 | static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | 120 | static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, |
110 | enum ioremap_mode mode) | 121 | unsigned long prot_val) |
111 | { | 122 | { |
112 | unsigned long pfn, offset, last_addr, vaddr; | 123 | unsigned long pfn, offset, vaddr; |
124 | resource_size_t last_addr; | ||
113 | struct vm_struct *area; | 125 | struct vm_struct *area; |
126 | unsigned long new_prot_val; | ||
114 | pgprot_t prot; | 127 | pgprot_t prot; |
128 | int retval; | ||
115 | 129 | ||
116 | /* Don't allow wraparound or zero size */ | 130 | /* Don't allow wraparound or zero size */ |
117 | last_addr = phys_addr + size - 1; | 131 | last_addr = phys_addr + size - 1; |
118 | if (!size || last_addr < phys_addr) | 132 | if (!size || last_addr < phys_addr) |
119 | return NULL; | 133 | return NULL; |
120 | 134 | ||
135 | if (!phys_addr_valid(phys_addr)) { | ||
136 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", | ||
137 | phys_addr); | ||
138 | WARN_ON_ONCE(1); | ||
139 | return NULL; | ||
140 | } | ||
141 | |||
121 | /* | 142 | /* |
122 | * Don't remap the low PCI/ISA area, it's always mapped.. | 143 | * Don't remap the low PCI/ISA area, it's always mapped.. |
123 | */ | 144 | */ |
@@ -127,25 +148,14 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
127 | /* | 148 | /* |
128 | * Don't allow anybody to remap normal RAM that we're using.. | 149 | * Don't allow anybody to remap normal RAM that we're using.. |
129 | */ | 150 | */ |
130 | for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped && | 151 | for (pfn = phys_addr >> PAGE_SHIFT; |
131 | (pfn << PAGE_SHIFT) < last_addr; pfn++) { | 152 | (pfn << PAGE_SHIFT) < last_addr; pfn++) { |
132 | if (page_is_ram(pfn) && pfn_valid(pfn) && | ||
133 | !PageReserved(pfn_to_page(pfn))) | ||
134 | return NULL; | ||
135 | } | ||
136 | 153 | ||
137 | switch (mode) { | 154 | int is_ram = page_is_ram(pfn); |
138 | case IOR_MODE_UNCACHED: | 155 | |
139 | default: | 156 | if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) |
140 | /* | 157 | return NULL; |
141 | * FIXME: we will use UC MINUS for now, as video fb drivers | 158 | WARN_ON_ONCE(is_ram); |
142 | * depend on it. Upcoming ioremap_wc() will fix this behavior. | ||
143 | */ | ||
144 | prot = PAGE_KERNEL_UC_MINUS; | ||
145 | break; | ||
146 | case IOR_MODE_CACHED: | ||
147 | prot = PAGE_KERNEL; | ||
148 | break; | ||
149 | } | 159 | } |
150 | 160 | ||
151 | /* | 161 | /* |
@@ -155,6 +165,49 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
155 | phys_addr &= PAGE_MASK; | 165 | phys_addr &= PAGE_MASK; |
156 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 166 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
157 | 167 | ||
168 | retval = reserve_memtype(phys_addr, phys_addr + size, | ||
169 | prot_val, &new_prot_val); | ||
170 | if (retval) { | ||
171 | pr_debug("Warning: reserve_memtype returned %d\n", retval); | ||
172 | return NULL; | ||
173 | } | ||
174 | |||
175 | if (prot_val != new_prot_val) { | ||
176 | /* | ||
177 | * Do not fallback to certain memory types with certain | ||
178 | * requested type: | ||
179 | * - request is uncached, return cannot be write-back | ||
180 | * - request is uncached, return cannot be write-combine | ||
181 | * - request is write-combine, return cannot be write-back | ||
182 | */ | ||
183 | if ((prot_val == _PAGE_CACHE_UC && | ||
184 | (new_prot_val == _PAGE_CACHE_WB || | ||
185 | new_prot_val == _PAGE_CACHE_WC)) || | ||
186 | (prot_val == _PAGE_CACHE_WC && | ||
187 | new_prot_val == _PAGE_CACHE_WB)) { | ||
188 | pr_debug( | ||
189 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", | ||
190 | phys_addr, phys_addr + size, | ||
191 | prot_val, new_prot_val); | ||
192 | free_memtype(phys_addr, phys_addr + size); | ||
193 | return NULL; | ||
194 | } | ||
195 | prot_val = new_prot_val; | ||
196 | } | ||
197 | |||
198 | switch (prot_val) { | ||
199 | case _PAGE_CACHE_UC: | ||
200 | default: | ||
201 | prot = PAGE_KERNEL_NOCACHE; | ||
202 | break; | ||
203 | case _PAGE_CACHE_WC: | ||
204 | prot = PAGE_KERNEL_WC; | ||
205 | break; | ||
206 | case _PAGE_CACHE_WB: | ||
207 | prot = PAGE_KERNEL; | ||
208 | break; | ||
209 | } | ||
210 | |||
158 | /* | 211 | /* |
159 | * Ok, go for it.. | 212 | * Ok, go for it.. |
160 | */ | 213 | */ |
@@ -164,11 +217,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
164 | area->phys_addr = phys_addr; | 217 | area->phys_addr = phys_addr; |
165 | vaddr = (unsigned long) area->addr; | 218 | vaddr = (unsigned long) area->addr; |
166 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { | 219 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { |
220 | free_memtype(phys_addr, phys_addr + size); | ||
167 | free_vm_area(area); | 221 | free_vm_area(area); |
168 | return NULL; | 222 | return NULL; |
169 | } | 223 | } |
170 | 224 | ||
171 | if (ioremap_change_attr(vaddr, size, mode) < 0) { | 225 | if (ioremap_change_attr(vaddr, size, prot_val) < 0) { |
226 | free_memtype(phys_addr, phys_addr + size); | ||
172 | vunmap(area->addr); | 227 | vunmap(area->addr); |
173 | return NULL; | 228 | return NULL; |
174 | } | 229 | } |
@@ -199,13 +254,32 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, | |||
199 | */ | 254 | */ |
200 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | 255 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
201 | { | 256 | { |
202 | return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); | 257 | return __ioremap(phys_addr, size, _PAGE_CACHE_UC); |
203 | } | 258 | } |
204 | EXPORT_SYMBOL(ioremap_nocache); | 259 | EXPORT_SYMBOL(ioremap_nocache); |
205 | 260 | ||
261 | /** | ||
262 | * ioremap_wc - map memory into CPU space write combined | ||
263 | * @offset: bus address of the memory | ||
264 | * @size: size of the resource to map | ||
265 | * | ||
266 | * This version of ioremap ensures that the memory is marked write combining. | ||
267 | * Write combining allows faster writes to some hardware devices. | ||
268 | * | ||
269 | * Must be freed with iounmap. | ||
270 | */ | ||
271 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) | ||
272 | { | ||
273 | if (pat_wc_enabled) | ||
274 | return __ioremap(phys_addr, size, _PAGE_CACHE_WC); | ||
275 | else | ||
276 | return ioremap_nocache(phys_addr, size); | ||
277 | } | ||
278 | EXPORT_SYMBOL(ioremap_wc); | ||
279 | |||
206 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | 280 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
207 | { | 281 | { |
208 | return __ioremap(phys_addr, size, IOR_MODE_CACHED); | 282 | return __ioremap(phys_addr, size, _PAGE_CACHE_WB); |
209 | } | 283 | } |
210 | EXPORT_SYMBOL(ioremap_cache); | 284 | EXPORT_SYMBOL(ioremap_cache); |
211 | 285 | ||
@@ -252,6 +326,8 @@ void iounmap(volatile void __iomem *addr) | |||
252 | return; | 326 | return; |
253 | } | 327 | } |
254 | 328 | ||
329 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); | ||
330 | |||
255 | /* Finally remove it */ | 331 | /* Finally remove it */ |
256 | o = remove_vm_area((void *)addr); | 332 | o = remove_vm_area((void *)addr); |
257 | BUG_ON(p != o || o == NULL); | 333 | BUG_ON(p != o || o == NULL); |
@@ -272,8 +348,8 @@ static int __init early_ioremap_debug_setup(char *str) | |||
272 | early_param("early_ioremap_debug", early_ioremap_debug_setup); | 348 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
273 | 349 | ||
274 | static __initdata int after_paging_init; | 350 | static __initdata int after_paging_init; |
275 | static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] | 351 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] |
276 | __attribute__((aligned(PAGE_SIZE))); | 352 | __section(.bss.page_aligned); |
277 | 353 | ||
278 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | 354 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
279 | { | 355 | { |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 16b82ad34b96..2ea56f48f29b 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -31,13 +31,15 @@ bootmem_data_t plat_node_bdata[MAX_NUMNODES]; | |||
31 | 31 | ||
32 | struct memnode memnode; | 32 | struct memnode memnode; |
33 | 33 | ||
34 | #ifdef CONFIG_SMP | ||
34 | int x86_cpu_to_node_map_init[NR_CPUS] = { | 35 | int x86_cpu_to_node_map_init[NR_CPUS] = { |
35 | [0 ... NR_CPUS-1] = NUMA_NO_NODE | 36 | [0 ... NR_CPUS-1] = NUMA_NO_NODE |
36 | }; | 37 | }; |
37 | void *x86_cpu_to_node_map_early_ptr; | 38 | void *x86_cpu_to_node_map_early_ptr; |
39 | EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); | ||
40 | #endif | ||
38 | DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE; | 41 | DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE; |
39 | EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map); | 42 | EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
40 | EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); | ||
41 | 43 | ||
42 | s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | 44 | s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { |
43 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | 45 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
@@ -548,8 +550,6 @@ void __cpuinit numa_set_node(int cpu, int node) | |||
548 | { | 550 | { |
549 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; | 551 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; |
550 | 552 | ||
551 | cpu_pda(cpu)->nodenumber = node; | ||
552 | |||
553 | if(cpu_to_node_map) | 553 | if(cpu_to_node_map) |
554 | cpu_to_node_map[cpu] = node; | 554 | cpu_to_node_map[cpu] = node; |
555 | else if(per_cpu_offset(cpu)) | 555 | else if(per_cpu_offset(cpu)) |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7b79f6be4e7d..7d9517abc9af 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/seq_file.h> | ||
13 | #include <linux/debugfs.h> | ||
12 | 14 | ||
13 | #include <asm/e820.h> | 15 | #include <asm/e820.h> |
14 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
@@ -17,6 +19,7 @@ | |||
17 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
18 | #include <asm/pgalloc.h> | 20 | #include <asm/pgalloc.h> |
19 | #include <asm/proto.h> | 21 | #include <asm/proto.h> |
22 | #include <asm/pat.h> | ||
20 | 23 | ||
21 | /* | 24 | /* |
22 | * The current flushing context - we pass it instead of 5 arguments: | 25 | * The current flushing context - we pass it instead of 5 arguments: |
@@ -28,6 +31,7 @@ struct cpa_data { | |||
28 | int numpages; | 31 | int numpages; |
29 | int flushtlb; | 32 | int flushtlb; |
30 | unsigned long pfn; | 33 | unsigned long pfn; |
34 | unsigned force_split : 1; | ||
31 | }; | 35 | }; |
32 | 36 | ||
33 | #ifdef CONFIG_X86_64 | 37 | #ifdef CONFIG_X86_64 |
@@ -259,6 +263,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
259 | int i, do_split = 1; | 263 | int i, do_split = 1; |
260 | unsigned int level; | 264 | unsigned int level; |
261 | 265 | ||
266 | if (cpa->force_split) | ||
267 | return 1; | ||
268 | |||
262 | spin_lock_irqsave(&pgd_lock, flags); | 269 | spin_lock_irqsave(&pgd_lock, flags); |
263 | /* | 270 | /* |
264 | * Check for races, another CPU might have split this page | 271 | * Check for races, another CPU might have split this page |
@@ -693,7 +700,8 @@ static inline int cache_attr(pgprot_t attr) | |||
693 | } | 700 | } |
694 | 701 | ||
695 | static int change_page_attr_set_clr(unsigned long addr, int numpages, | 702 | static int change_page_attr_set_clr(unsigned long addr, int numpages, |
696 | pgprot_t mask_set, pgprot_t mask_clr) | 703 | pgprot_t mask_set, pgprot_t mask_clr, |
704 | int force_split) | ||
697 | { | 705 | { |
698 | struct cpa_data cpa; | 706 | struct cpa_data cpa; |
699 | int ret, cache, checkalias; | 707 | int ret, cache, checkalias; |
@@ -704,7 +712,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, | |||
704 | */ | 712 | */ |
705 | mask_set = canon_pgprot(mask_set); | 713 | mask_set = canon_pgprot(mask_set); |
706 | mask_clr = canon_pgprot(mask_clr); | 714 | mask_clr = canon_pgprot(mask_clr); |
707 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) | 715 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
708 | return 0; | 716 | return 0; |
709 | 717 | ||
710 | /* Ensure we are PAGE_SIZE aligned */ | 718 | /* Ensure we are PAGE_SIZE aligned */ |
@@ -721,6 +729,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, | |||
721 | cpa.mask_set = mask_set; | 729 | cpa.mask_set = mask_set; |
722 | cpa.mask_clr = mask_clr; | 730 | cpa.mask_clr = mask_clr; |
723 | cpa.flushtlb = 0; | 731 | cpa.flushtlb = 0; |
732 | cpa.force_split = force_split; | ||
724 | 733 | ||
725 | /* No alias checking for _NX bit modifications */ | 734 | /* No alias checking for _NX bit modifications */ |
726 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | 735 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; |
@@ -759,26 +768,61 @@ out: | |||
759 | static inline int change_page_attr_set(unsigned long addr, int numpages, | 768 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
760 | pgprot_t mask) | 769 | pgprot_t mask) |
761 | { | 770 | { |
762 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); | 771 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0); |
763 | } | 772 | } |
764 | 773 | ||
765 | static inline int change_page_attr_clear(unsigned long addr, int numpages, | 774 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
766 | pgprot_t mask) | 775 | pgprot_t mask) |
767 | { | 776 | { |
768 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); | 777 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0); |
769 | } | 778 | } |
770 | 779 | ||
771 | int set_memory_uc(unsigned long addr, int numpages) | 780 | int _set_memory_uc(unsigned long addr, int numpages) |
772 | { | 781 | { |
773 | return change_page_attr_set(addr, numpages, | 782 | return change_page_attr_set(addr, numpages, |
774 | __pgprot(_PAGE_PCD)); | 783 | __pgprot(_PAGE_CACHE_UC)); |
784 | } | ||
785 | |||
786 | int set_memory_uc(unsigned long addr, int numpages) | ||
787 | { | ||
788 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, | ||
789 | _PAGE_CACHE_UC, NULL)) | ||
790 | return -EINVAL; | ||
791 | |||
792 | return _set_memory_uc(addr, numpages); | ||
775 | } | 793 | } |
776 | EXPORT_SYMBOL(set_memory_uc); | 794 | EXPORT_SYMBOL(set_memory_uc); |
777 | 795 | ||
778 | int set_memory_wb(unsigned long addr, int numpages) | 796 | int _set_memory_wc(unsigned long addr, int numpages) |
797 | { | ||
798 | return change_page_attr_set(addr, numpages, | ||
799 | __pgprot(_PAGE_CACHE_WC)); | ||
800 | } | ||
801 | |||
802 | int set_memory_wc(unsigned long addr, int numpages) | ||
803 | { | ||
804 | if (!pat_wc_enabled) | ||
805 | return set_memory_uc(addr, numpages); | ||
806 | |||
807 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, | ||
808 | _PAGE_CACHE_WC, NULL)) | ||
809 | return -EINVAL; | ||
810 | |||
811 | return _set_memory_wc(addr, numpages); | ||
812 | } | ||
813 | EXPORT_SYMBOL(set_memory_wc); | ||
814 | |||
815 | int _set_memory_wb(unsigned long addr, int numpages) | ||
779 | { | 816 | { |
780 | return change_page_attr_clear(addr, numpages, | 817 | return change_page_attr_clear(addr, numpages, |
781 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | 818 | __pgprot(_PAGE_CACHE_MASK)); |
819 | } | ||
820 | |||
821 | int set_memory_wb(unsigned long addr, int numpages) | ||
822 | { | ||
823 | free_memtype(addr, addr + numpages * PAGE_SIZE); | ||
824 | |||
825 | return _set_memory_wb(addr, numpages); | ||
782 | } | 826 | } |
783 | EXPORT_SYMBOL(set_memory_wb); | 827 | EXPORT_SYMBOL(set_memory_wb); |
784 | 828 | ||
@@ -809,6 +853,12 @@ int set_memory_np(unsigned long addr, int numpages) | |||
809 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); | 853 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
810 | } | 854 | } |
811 | 855 | ||
856 | int set_memory_4k(unsigned long addr, int numpages) | ||
857 | { | ||
858 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), | ||
859 | __pgprot(0), 1); | ||
860 | } | ||
861 | |||
812 | int set_pages_uc(struct page *page, int numpages) | 862 | int set_pages_uc(struct page *page, int numpages) |
813 | { | 863 | { |
814 | unsigned long addr = (unsigned long)page_address(page); | 864 | unsigned long addr = (unsigned long)page_address(page); |
@@ -918,6 +968,45 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
918 | cpa_fill_pool(NULL); | 968 | cpa_fill_pool(NULL); |
919 | } | 969 | } |
920 | 970 | ||
971 | #ifdef CONFIG_DEBUG_FS | ||
972 | static int dpa_show(struct seq_file *m, void *v) | ||
973 | { | ||
974 | seq_puts(m, "DEBUG_PAGEALLOC\n"); | ||
975 | seq_printf(m, "pool_size : %lu\n", pool_size); | ||
976 | seq_printf(m, "pool_pages : %lu\n", pool_pages); | ||
977 | seq_printf(m, "pool_low : %lu\n", pool_low); | ||
978 | seq_printf(m, "pool_used : %lu\n", pool_used); | ||
979 | seq_printf(m, "pool_failed : %lu\n", pool_failed); | ||
980 | |||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | static int dpa_open(struct inode *inode, struct file *filp) | ||
985 | { | ||
986 | return single_open(filp, dpa_show, NULL); | ||
987 | } | ||
988 | |||
989 | static const struct file_operations dpa_fops = { | ||
990 | .open = dpa_open, | ||
991 | .read = seq_read, | ||
992 | .llseek = seq_lseek, | ||
993 | .release = single_release, | ||
994 | }; | ||
995 | |||
996 | int __init debug_pagealloc_proc_init(void) | ||
997 | { | ||
998 | struct dentry *de; | ||
999 | |||
1000 | de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL, | ||
1001 | &dpa_fops); | ||
1002 | if (!de) | ||
1003 | return -ENOMEM; | ||
1004 | |||
1005 | return 0; | ||
1006 | } | ||
1007 | __initcall(debug_pagealloc_proc_init); | ||
1008 | #endif | ||
1009 | |||
921 | #ifdef CONFIG_HIBERNATION | 1010 | #ifdef CONFIG_HIBERNATION |
922 | 1011 | ||
923 | bool kernel_page_present(struct page *page) | 1012 | bool kernel_page_present(struct page *page) |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c new file mode 100644 index 000000000000..72c0f6097402 --- /dev/null +++ b/arch/x86/mm/pat.c | |||
@@ -0,0 +1,421 @@ | |||
1 | /* | ||
2 | * Handle caching attributes in page tables (PAT) | ||
3 | * | ||
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | ||
6 | * | ||
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | ||
8 | */ | ||
9 | |||
10 | #include <linux/mm.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/fs.h> | ||
14 | |||
15 | #include <asm/msr.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/pat.h> | ||
20 | #include <asm/e820.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/fcntl.h> | ||
23 | #include <asm/mtrr.h> | ||
24 | |||
25 | int pat_wc_enabled = 1; | ||
26 | |||
27 | static u64 __read_mostly boot_pat_state; | ||
28 | |||
29 | static int nopat(char *str) | ||
30 | { | ||
31 | pat_wc_enabled = 0; | ||
32 | printk(KERN_INFO "x86: PAT support disabled.\n"); | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | early_param("nopat", nopat); | ||
37 | |||
38 | static int pat_known_cpu(void) | ||
39 | { | ||
40 | if (!pat_wc_enabled) | ||
41 | return 0; | ||
42 | |||
43 | if (cpu_has_pat) | ||
44 | return 1; | ||
45 | |||
46 | pat_wc_enabled = 0; | ||
47 | printk(KERN_INFO "CPU and/or kernel does not support PAT.\n"); | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | enum { | ||
52 | PAT_UC = 0, /* uncached */ | ||
53 | PAT_WC = 1, /* Write combining */ | ||
54 | PAT_WT = 4, /* Write Through */ | ||
55 | PAT_WP = 5, /* Write Protected */ | ||
56 | PAT_WB = 6, /* Write Back (default) */ | ||
57 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | ||
58 | }; | ||
59 | |||
60 | #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8)) | ||
61 | |||
62 | void pat_init(void) | ||
63 | { | ||
64 | u64 pat; | ||
65 | |||
66 | #ifndef CONFIG_X86_PAT | ||
67 | nopat(NULL); | ||
68 | #endif | ||
69 | |||
70 | /* Boot CPU enables PAT based on CPU feature */ | ||
71 | if (!smp_processor_id() && !pat_known_cpu()) | ||
72 | return; | ||
73 | |||
74 | /* APs enable PAT iff boot CPU has enabled it before */ | ||
75 | if (smp_processor_id() && !pat_wc_enabled) | ||
76 | return; | ||
77 | |||
78 | /* Set PWT to Write-Combining. All other bits stay the same */ | ||
79 | /* | ||
80 | * PTE encoding used in Linux: | ||
81 | * PAT | ||
82 | * |PCD | ||
83 | * ||PWT | ||
84 | * ||| | ||
85 | * 000 WB _PAGE_CACHE_WB | ||
86 | * 001 WC _PAGE_CACHE_WC | ||
87 | * 010 UC- _PAGE_CACHE_UC_MINUS | ||
88 | * 011 UC _PAGE_CACHE_UC | ||
89 | * PAT bit unused | ||
90 | */ | ||
91 | pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) | | ||
92 | PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); | ||
93 | |||
94 | /* Boot CPU check */ | ||
95 | if (!smp_processor_id()) { | ||
96 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); | ||
97 | } | ||
98 | |||
99 | wrmsrl(MSR_IA32_CR_PAT, pat); | ||
100 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | ||
101 | smp_processor_id(), boot_pat_state, pat); | ||
102 | } | ||
103 | |||
104 | #undef PAT | ||
105 | |||
106 | static char *cattr_name(unsigned long flags) | ||
107 | { | ||
108 | switch (flags & _PAGE_CACHE_MASK) { | ||
109 | case _PAGE_CACHE_UC: return "uncached"; | ||
110 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | ||
111 | case _PAGE_CACHE_WB: return "write-back"; | ||
112 | case _PAGE_CACHE_WC: return "write-combining"; | ||
113 | default: return "broken"; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * The global memtype list keeps track of memory type for specific | ||
119 | * physical memory areas. Conflicting memory types in different | ||
120 | * mappings can cause CPU cache corruption. To avoid this we keep track. | ||
121 | * | ||
122 | * The list is sorted based on starting address and can contain multiple | ||
123 | * entries for each address (this allows reference counting for overlapping | ||
124 | * areas). All the aliases have the same cache attributes of course. | ||
125 | * Zero attributes are represented as holes. | ||
126 | * | ||
127 | * Currently the data structure is a list because the number of mappings | ||
128 | * are expected to be relatively small. If this should be a problem | ||
129 | * it could be changed to a rbtree or similar. | ||
130 | * | ||
131 | * memtype_lock protects the whole list. | ||
132 | */ | ||
133 | |||
134 | struct memtype { | ||
135 | u64 start; | ||
136 | u64 end; | ||
137 | unsigned long type; | ||
138 | struct list_head nd; | ||
139 | }; | ||
140 | |||
141 | static LIST_HEAD(memtype_list); | ||
142 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | ||
143 | |||
144 | /* | ||
145 | * Does intersection of PAT memory type and MTRR memory type and returns | ||
146 | * the resulting memory type as PAT understands it. | ||
147 | * (Type in pat and mtrr will not have same value) | ||
148 | * The intersection is based on "Effective Memory Type" tables in IA-32 | ||
149 | * SDM vol 3a | ||
150 | */ | ||
151 | static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, | ||
152 | unsigned long *ret_prot) | ||
153 | { | ||
154 | unsigned long pat_type; | ||
155 | u8 mtrr_type; | ||
156 | |||
157 | mtrr_type = mtrr_type_lookup(start, end); | ||
158 | if (mtrr_type == 0xFF) { /* MTRR not enabled */ | ||
159 | *ret_prot = prot; | ||
160 | return 0; | ||
161 | } | ||
162 | if (mtrr_type == 0xFE) { /* MTRR match error */ | ||
163 | *ret_prot = _PAGE_CACHE_UC; | ||
164 | return -1; | ||
165 | } | ||
166 | if (mtrr_type != MTRR_TYPE_UNCACHABLE && | ||
167 | mtrr_type != MTRR_TYPE_WRBACK && | ||
168 | mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ | ||
169 | *ret_prot = _PAGE_CACHE_UC; | ||
170 | return -1; | ||
171 | } | ||
172 | |||
173 | pat_type = prot & _PAGE_CACHE_MASK; | ||
174 | prot &= (~_PAGE_CACHE_MASK); | ||
175 | |||
176 | /* Currently doing intersection by hand. Optimize it later. */ | ||
177 | if (pat_type == _PAGE_CACHE_WC) { | ||
178 | *ret_prot = prot | _PAGE_CACHE_WC; | ||
179 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { | ||
180 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; | ||
181 | } else if (pat_type == _PAGE_CACHE_UC || | ||
182 | mtrr_type == MTRR_TYPE_UNCACHABLE) { | ||
183 | *ret_prot = prot | _PAGE_CACHE_UC; | ||
184 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { | ||
185 | *ret_prot = prot | _PAGE_CACHE_WC; | ||
186 | } else { | ||
187 | *ret_prot = prot | _PAGE_CACHE_WB; | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, | ||
194 | unsigned long *ret_type) | ||
195 | { | ||
196 | struct memtype *new_entry = NULL; | ||
197 | struct memtype *parse; | ||
198 | unsigned long actual_type; | ||
199 | int err = 0; | ||
200 | |||
201 | /* Only track when pat_wc_enabled */ | ||
202 | if (!pat_wc_enabled) { | ||
203 | if (ret_type) | ||
204 | *ret_type = req_type; | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | /* Low ISA region is always mapped WB in page table. No need to track */ | ||
210 | if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) { | ||
211 | if (ret_type) | ||
212 | *ret_type = _PAGE_CACHE_WB; | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | req_type &= _PAGE_CACHE_MASK; | ||
218 | err = pat_x_mtrr_type(start, end, req_type, &actual_type); | ||
219 | if (err) { | ||
220 | if (ret_type) | ||
221 | *ret_type = actual_type; | ||
222 | |||
223 | return -EINVAL; | ||
224 | } | ||
225 | |||
226 | new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | ||
227 | if (!new_entry) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | new_entry->start = start; | ||
231 | new_entry->end = end; | ||
232 | new_entry->type = actual_type; | ||
233 | |||
234 | if (ret_type) | ||
235 | *ret_type = actual_type; | ||
236 | |||
237 | spin_lock(&memtype_lock); | ||
238 | |||
239 | /* Search for existing mapping that overlaps the current range */ | ||
240 | list_for_each_entry(parse, &memtype_list, nd) { | ||
241 | struct memtype *saved_ptr; | ||
242 | |||
243 | if (parse->start >= end) { | ||
244 | printk("New Entry\n"); | ||
245 | list_add(&new_entry->nd, parse->nd.prev); | ||
246 | new_entry = NULL; | ||
247 | break; | ||
248 | } | ||
249 | |||
250 | if (start <= parse->start && end >= parse->start) { | ||
251 | if (actual_type != parse->type && ret_type) { | ||
252 | actual_type = parse->type; | ||
253 | *ret_type = actual_type; | ||
254 | new_entry->type = actual_type; | ||
255 | } | ||
256 | |||
257 | if (actual_type != parse->type) { | ||
258 | printk( | ||
259 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
260 | current->comm, current->pid, | ||
261 | start, end, | ||
262 | cattr_name(actual_type), | ||
263 | cattr_name(parse->type)); | ||
264 | err = -EBUSY; | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | saved_ptr = parse; | ||
269 | /* | ||
270 | * Check to see whether the request overlaps more | ||
271 | * than one entry in the list | ||
272 | */ | ||
273 | list_for_each_entry_continue(parse, &memtype_list, nd) { | ||
274 | if (end <= parse->start) { | ||
275 | break; | ||
276 | } | ||
277 | |||
278 | if (actual_type != parse->type) { | ||
279 | printk( | ||
280 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
281 | current->comm, current->pid, | ||
282 | start, end, | ||
283 | cattr_name(actual_type), | ||
284 | cattr_name(parse->type)); | ||
285 | err = -EBUSY; | ||
286 | break; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | if (err) { | ||
291 | break; | ||
292 | } | ||
293 | |||
294 | printk("Overlap at 0x%Lx-0x%Lx\n", | ||
295 | saved_ptr->start, saved_ptr->end); | ||
296 | /* No conflict. Go ahead and add this new entry */ | ||
297 | list_add(&new_entry->nd, saved_ptr->nd.prev); | ||
298 | new_entry = NULL; | ||
299 | break; | ||
300 | } | ||
301 | |||
302 | if (start < parse->end) { | ||
303 | if (actual_type != parse->type && ret_type) { | ||
304 | actual_type = parse->type; | ||
305 | *ret_type = actual_type; | ||
306 | new_entry->type = actual_type; | ||
307 | } | ||
308 | |||
309 | if (actual_type != parse->type) { | ||
310 | printk( | ||
311 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
312 | current->comm, current->pid, | ||
313 | start, end, | ||
314 | cattr_name(actual_type), | ||
315 | cattr_name(parse->type)); | ||
316 | err = -EBUSY; | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | saved_ptr = parse; | ||
321 | /* | ||
322 | * Check to see whether the request overlaps more | ||
323 | * than one entry in the list | ||
324 | */ | ||
325 | list_for_each_entry_continue(parse, &memtype_list, nd) { | ||
326 | if (end <= parse->start) { | ||
327 | break; | ||
328 | } | ||
329 | |||
330 | if (actual_type != parse->type) { | ||
331 | printk( | ||
332 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
333 | current->comm, current->pid, | ||
334 | start, end, | ||
335 | cattr_name(actual_type), | ||
336 | cattr_name(parse->type)); | ||
337 | err = -EBUSY; | ||
338 | break; | ||
339 | } | ||
340 | } | ||
341 | |||
342 | if (err) { | ||
343 | break; | ||
344 | } | ||
345 | |||
346 | printk("Overlap at 0x%Lx-0x%Lx\n", | ||
347 | saved_ptr->start, saved_ptr->end); | ||
348 | /* No conflict. Go ahead and add this new entry */ | ||
349 | list_add(&new_entry->nd, &saved_ptr->nd); | ||
350 | new_entry = NULL; | ||
351 | break; | ||
352 | } | ||
353 | } | ||
354 | |||
355 | if (err) { | ||
356 | printk( | ||
357 | "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n", | ||
358 | start, end, cattr_name(new_entry->type), | ||
359 | cattr_name(req_type)); | ||
360 | kfree(new_entry); | ||
361 | spin_unlock(&memtype_lock); | ||
362 | return err; | ||
363 | } | ||
364 | |||
365 | if (new_entry) { | ||
366 | /* No conflict. Not yet added to the list. Add to the tail */ | ||
367 | list_add_tail(&new_entry->nd, &memtype_list); | ||
368 | printk("New Entry\n"); | ||
369 | } | ||
370 | |||
371 | if (ret_type) { | ||
372 | printk( | ||
373 | "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | ||
374 | start, end, cattr_name(actual_type), | ||
375 | cattr_name(req_type), cattr_name(*ret_type)); | ||
376 | } else { | ||
377 | printk( | ||
378 | "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n", | ||
379 | start, end, cattr_name(actual_type), | ||
380 | cattr_name(req_type)); | ||
381 | } | ||
382 | |||
383 | spin_unlock(&memtype_lock); | ||
384 | return err; | ||
385 | } | ||
386 | |||
387 | int free_memtype(u64 start, u64 end) | ||
388 | { | ||
389 | struct memtype *ml; | ||
390 | int err = -EINVAL; | ||
391 | |||
392 | /* Only track when pat_wc_enabled */ | ||
393 | if (!pat_wc_enabled) { | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | /* Low ISA region is always mapped WB. No need to track */ | ||
398 | if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) { | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | spin_lock(&memtype_lock); | ||
403 | list_for_each_entry(ml, &memtype_list, nd) { | ||
404 | if (ml->start == start && ml->end == end) { | ||
405 | list_del(&ml->nd); | ||
406 | kfree(ml); | ||
407 | err = 0; | ||
408 | break; | ||
409 | } | ||
410 | } | ||
411 | spin_unlock(&memtype_lock); | ||
412 | |||
413 | if (err) { | ||
414 | printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n", | ||
415 | current->comm, current->pid, start, end); | ||
416 | } | ||
417 | |||
418 | printk( "free_memtype request 0x%Lx-0x%Lx\n", start, end); | ||
419 | return err; | ||
420 | } | ||
421 | |||
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 2f9e9afcb9f4..3165ec0672bd 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -36,7 +36,6 @@ void show_mem(void) | |||
36 | 36 | ||
37 | printk(KERN_INFO "Mem-info:\n"); | 37 | printk(KERN_INFO "Mem-info:\n"); |
38 | show_free_areas(); | 38 | show_free_areas(); |
39 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
40 | for_each_online_pgdat(pgdat) { | 39 | for_each_online_pgdat(pgdat) { |
41 | pgdat_resize_lock(pgdat, &flags); | 40 | pgdat_resize_lock(pgdat, &flags); |
42 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | 41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
@@ -381,3 +380,10 @@ void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | |||
381 | } | 380 | } |
382 | 381 | ||
383 | #endif | 382 | #endif |
383 | |||
384 | int pmd_bad(pmd_t pmd) | ||
385 | { | ||
386 | WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd)); | ||
387 | |||
388 | return pmd_bad_v1(pmd); | ||
389 | } | ||
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 845001c617cc..1bae9c855ceb 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/proto.h> | 20 | #include <asm/proto.h> |
21 | #include <asm/numa.h> | 21 | #include <asm/numa.h> |
22 | #include <asm/e820.h> | 22 | #include <asm/e820.h> |
23 | #include <asm/genapic.h> | ||
23 | 24 | ||
24 | int acpi_numa __initdata; | 25 | int acpi_numa __initdata; |
25 | 26 | ||
@@ -132,7 +133,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
132 | int pxm, node; | 133 | int pxm, node; |
133 | int apic_id; | 134 | int apic_id; |
134 | 135 | ||
135 | apic_id = pa->apic_id; | ||
136 | if (srat_disabled()) | 136 | if (srat_disabled()) |
137 | return; | 137 | return; |
138 | if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { | 138 | if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { |
@@ -148,6 +148,11 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
148 | bad_srat(); | 148 | bad_srat(); |
149 | return; | 149 | return; |
150 | } | 150 | } |
151 | |||
152 | if (is_uv_system()) | ||
153 | apic_id = (pa->apic_id << 8) | pa->local_sapic_eid; | ||
154 | else | ||
155 | apic_id = pa->apic_id; | ||
151 | apicid_to_node[apic_id] = node; | 156 | apicid_to_node[apic_id] = node; |
152 | acpi_numa = 1; | 157 | acpi_numa = 1; |
153 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", | 158 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", |
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c index 5341d481d92f..cdfe4c54deca 100644 --- a/arch/x86/oprofile/init.c +++ b/arch/x86/oprofile/init.c | |||
@@ -10,18 +10,19 @@ | |||
10 | #include <linux/oprofile.h> | 10 | #include <linux/oprofile.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | 13 | ||
14 | /* We support CPUs that have performance counters like the Pentium Pro | 14 | /* |
15 | * We support CPUs that have performance counters like the Pentium Pro | ||
15 | * with the NMI mode driver. | 16 | * with the NMI mode driver. |
16 | */ | 17 | */ |
17 | 18 | ||
18 | extern int op_nmi_init(struct oprofile_operations * ops); | 19 | extern int op_nmi_init(struct oprofile_operations *ops); |
19 | extern int op_nmi_timer_init(struct oprofile_operations * ops); | 20 | extern int op_nmi_timer_init(struct oprofile_operations *ops); |
20 | extern void op_nmi_exit(void); | 21 | extern void op_nmi_exit(void); |
21 | extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); | 22 | extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); |
22 | 23 | ||
23 | 24 | ||
24 | int __init oprofile_arch_init(struct oprofile_operations * ops) | 25 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
25 | { | 26 | { |
26 | int ret; | 27 | int ret; |
27 | 28 | ||
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c index 1418e36ae7ab..e3ecb71b5790 100644 --- a/arch/x86/oprofile/nmi_timer_int.c +++ b/arch/x86/oprofile/nmi_timer_int.c | |||
@@ -17,14 +17,14 @@ | |||
17 | #include <asm/nmi.h> | 17 | #include <asm/nmi.h> |
18 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | 20 | ||
21 | static int profile_timer_exceptions_notify(struct notifier_block *self, | 21 | static int profile_timer_exceptions_notify(struct notifier_block *self, |
22 | unsigned long val, void *data) | 22 | unsigned long val, void *data) |
23 | { | 23 | { |
24 | struct die_args *args = (struct die_args *)data; | 24 | struct die_args *args = (struct die_args *)data; |
25 | int ret = NOTIFY_DONE; | 25 | int ret = NOTIFY_DONE; |
26 | 26 | ||
27 | switch(val) { | 27 | switch (val) { |
28 | case DIE_NMI: | 28 | case DIE_NMI: |
29 | oprofile_add_sample(args->regs, 0); | 29 | oprofile_add_sample(args->regs, 0); |
30 | ret = NOTIFY_STOP; | 30 | ret = NOTIFY_STOP; |
@@ -56,7 +56,7 @@ static void timer_stop(void) | |||
56 | } | 56 | } |
57 | 57 | ||
58 | 58 | ||
59 | int __init op_nmi_timer_init(struct oprofile_operations * ops) | 59 | int __init op_nmi_timer_init(struct oprofile_operations *ops) |
60 | { | 60 | { |
61 | if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0)) | 61 | if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0)) |
62 | return -ENODEV; | 62 | return -ENODEV; |
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c index c3ee43333f26..3d534879a9dc 100644 --- a/arch/x86/oprofile/op_model_athlon.c +++ b/arch/x86/oprofile/op_model_athlon.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /** | 1 | /* |
2 | * @file op_model_athlon.h | 2 | * @file op_model_athlon.h |
3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations | 3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations |
4 | * | 4 | * |
@@ -14,28 +14,28 @@ | |||
14 | #include <asm/ptrace.h> | 14 | #include <asm/ptrace.h> |
15 | #include <asm/msr.h> | 15 | #include <asm/msr.h> |
16 | #include <asm/nmi.h> | 16 | #include <asm/nmi.h> |
17 | 17 | ||
18 | #include "op_x86_model.h" | 18 | #include "op_x86_model.h" |
19 | #include "op_counter.h" | 19 | #include "op_counter.h" |
20 | 20 | ||
21 | #define NUM_COUNTERS 4 | 21 | #define NUM_COUNTERS 4 |
22 | #define NUM_CONTROLS 4 | 22 | #define NUM_CONTROLS 4 |
23 | 23 | ||
24 | #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) | 24 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) |
25 | #define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) | 25 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) |
26 | #define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) | 26 | #define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) |
27 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) | 27 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) |
28 | 28 | ||
29 | #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) | 29 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) |
30 | #define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0) | 30 | #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) |
31 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) | 31 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) |
32 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | 32 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) |
33 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | 33 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) |
34 | #define CTRL_CLEAR_LO(x) (x &= (1<<21)) | 34 | #define CTRL_CLEAR_LO(x) (x &= (1<<21)) |
35 | #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) | 35 | #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) |
36 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | 36 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) |
37 | #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) | 37 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) |
38 | #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) | 38 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) |
39 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | 39 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) |
40 | #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) | 40 | #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) |
41 | #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) | 41 | #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) |
@@ -43,19 +43,19 @@ | |||
43 | #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) | 43 | #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) |
44 | 44 | ||
45 | static unsigned long reset_value[NUM_COUNTERS]; | 45 | static unsigned long reset_value[NUM_COUNTERS]; |
46 | 46 | ||
47 | static void athlon_fill_in_addresses(struct op_msrs * const msrs) | 47 | static void athlon_fill_in_addresses(struct op_msrs * const msrs) |
48 | { | 48 | { |
49 | int i; | 49 | int i; |
50 | 50 | ||
51 | for (i=0; i < NUM_COUNTERS; i++) { | 51 | for (i = 0; i < NUM_COUNTERS; i++) { |
52 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | 52 | if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) |
53 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; | 53 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; |
54 | else | 54 | else |
55 | msrs->counters[i].addr = 0; | 55 | msrs->counters[i].addr = 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | for (i=0; i < NUM_CONTROLS; i++) { | 58 | for (i = 0; i < NUM_CONTROLS; i++) { |
59 | if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) | 59 | if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) |
60 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; | 60 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; |
61 | else | 61 | else |
@@ -63,15 +63,15 @@ static void athlon_fill_in_addresses(struct op_msrs * const msrs) | |||
63 | } | 63 | } |
64 | } | 64 | } |
65 | 65 | ||
66 | 66 | ||
67 | static void athlon_setup_ctrs(struct op_msrs const * const msrs) | 67 | static void athlon_setup_ctrs(struct op_msrs const * const msrs) |
68 | { | 68 | { |
69 | unsigned int low, high; | 69 | unsigned int low, high; |
70 | int i; | 70 | int i; |
71 | 71 | ||
72 | /* clear all counters */ | 72 | /* clear all counters */ |
73 | for (i = 0 ; i < NUM_CONTROLS; ++i) { | 73 | for (i = 0 ; i < NUM_CONTROLS; ++i) { |
74 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | 74 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) |
75 | continue; | 75 | continue; |
76 | CTRL_READ(low, high, msrs, i); | 76 | CTRL_READ(low, high, msrs, i); |
77 | CTRL_CLEAR_LO(low); | 77 | CTRL_CLEAR_LO(low); |
@@ -81,14 +81,14 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs) | |||
81 | 81 | ||
82 | /* avoid a false detection of ctr overflows in NMI handler */ | 82 | /* avoid a false detection of ctr overflows in NMI handler */ |
83 | for (i = 0; i < NUM_COUNTERS; ++i) { | 83 | for (i = 0; i < NUM_COUNTERS; ++i) { |
84 | if (unlikely(!CTR_IS_RESERVED(msrs,i))) | 84 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) |
85 | continue; | 85 | continue; |
86 | CTR_WRITE(1, msrs, i); | 86 | CTR_WRITE(1, msrs, i); |
87 | } | 87 | } |
88 | 88 | ||
89 | /* enable active counters */ | 89 | /* enable active counters */ |
90 | for (i = 0; i < NUM_COUNTERS; ++i) { | 90 | for (i = 0; i < NUM_COUNTERS; ++i) { |
91 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) { | 91 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { |
92 | reset_value[i] = counter_config[i].count; | 92 | reset_value[i] = counter_config[i].count; |
93 | 93 | ||
94 | CTR_WRITE(counter_config[i].count, msrs, i); | 94 | CTR_WRITE(counter_config[i].count, msrs, i); |
@@ -112,7 +112,7 @@ static void athlon_setup_ctrs(struct op_msrs const * const msrs) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | 115 | ||
116 | static int athlon_check_ctrs(struct pt_regs * const regs, | 116 | static int athlon_check_ctrs(struct pt_regs * const regs, |
117 | struct op_msrs const * const msrs) | 117 | struct op_msrs const * const msrs) |
118 | { | 118 | { |
@@ -133,7 +133,7 @@ static int athlon_check_ctrs(struct pt_regs * const regs, | |||
133 | return 1; | 133 | return 1; |
134 | } | 134 | } |
135 | 135 | ||
136 | 136 | ||
137 | static void athlon_start(struct op_msrs const * const msrs) | 137 | static void athlon_start(struct op_msrs const * const msrs) |
138 | { | 138 | { |
139 | unsigned int low, high; | 139 | unsigned int low, high; |
@@ -150,7 +150,7 @@ static void athlon_start(struct op_msrs const * const msrs) | |||
150 | 150 | ||
151 | static void athlon_stop(struct op_msrs const * const msrs) | 151 | static void athlon_stop(struct op_msrs const * const msrs) |
152 | { | 152 | { |
153 | unsigned int low,high; | 153 | unsigned int low, high; |
154 | int i; | 154 | int i; |
155 | 155 | ||
156 | /* Subtle: stop on all counters to avoid race with | 156 | /* Subtle: stop on all counters to avoid race with |
@@ -169,11 +169,11 @@ static void athlon_shutdown(struct op_msrs const * const msrs) | |||
169 | int i; | 169 | int i; |
170 | 170 | ||
171 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 171 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { |
172 | if (CTR_IS_RESERVED(msrs,i)) | 172 | if (CTR_IS_RESERVED(msrs, i)) |
173 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | 173 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); |
174 | } | 174 | } |
175 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { | 175 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { |
176 | if (CTRL_IS_RESERVED(msrs,i)) | 176 | if (CTRL_IS_RESERVED(msrs, i)) |
177 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); | 177 | release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); |
178 | } | 178 | } |
179 | } | 179 | } |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index c554f52cb808..eff431f6c57b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /** | 1 | /* |
2 | * @file op_model_ppro.h | 2 | * @file op_model_ppro.h |
3 | * pentium pro / P6 model-specific MSR operations | 3 | * pentium pro / P6 model-specific MSR operations |
4 | * | 4 | * |
@@ -15,45 +15,45 @@ | |||
15 | #include <asm/msr.h> | 15 | #include <asm/msr.h> |
16 | #include <asm/apic.h> | 16 | #include <asm/apic.h> |
17 | #include <asm/nmi.h> | 17 | #include <asm/nmi.h> |
18 | 18 | ||
19 | #include "op_x86_model.h" | 19 | #include "op_x86_model.h" |
20 | #include "op_counter.h" | 20 | #include "op_counter.h" |
21 | 21 | ||
22 | #define NUM_COUNTERS 2 | 22 | #define NUM_COUNTERS 2 |
23 | #define NUM_CONTROLS 2 | 23 | #define NUM_CONTROLS 2 |
24 | 24 | ||
25 | #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) | 25 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) |
26 | #define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) | 26 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) |
27 | #define CTR_32BIT_WRITE(l,msrs,c) \ | 27 | #define CTR_32BIT_WRITE(l, msrs, c) \ |
28 | do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0);} while (0) | 28 | do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0) |
29 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) | 29 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) |
30 | 30 | ||
31 | #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) | 31 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) |
32 | #define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0) | 32 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) |
33 | #define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0) | 33 | #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) |
34 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) | 34 | #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) |
35 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) | 35 | #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) |
36 | #define CTRL_CLEAR(x) (x &= (1<<21)) | 36 | #define CTRL_CLEAR(x) (x &= (1<<21)) |
37 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) | 37 | #define CTRL_SET_ENABLE(val) (val |= 1<<20) |
38 | #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) | 38 | #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) |
39 | #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) | 39 | #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) |
40 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) | 40 | #define CTRL_SET_UM(val, m) (val |= (m << 8)) |
41 | #define CTRL_SET_EVENT(val, e) (val |= e) | 41 | #define CTRL_SET_EVENT(val, e) (val |= e) |
42 | 42 | ||
43 | static unsigned long reset_value[NUM_COUNTERS]; | 43 | static unsigned long reset_value[NUM_COUNTERS]; |
44 | 44 | ||
45 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) | 45 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) |
46 | { | 46 | { |
47 | int i; | 47 | int i; |
48 | 48 | ||
49 | for (i=0; i < NUM_COUNTERS; i++) { | 49 | for (i = 0; i < NUM_COUNTERS; i++) { |
50 | if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) | 50 | if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) |
51 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | 51 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; |
52 | else | 52 | else |
53 | msrs->counters[i].addr = 0; | 53 | msrs->counters[i].addr = 0; |
54 | } | 54 | } |
55 | 55 | ||
56 | for (i=0; i < NUM_CONTROLS; i++) { | 56 | for (i = 0; i < NUM_CONTROLS; i++) { |
57 | if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) | 57 | if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) |
58 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | 58 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; |
59 | else | 59 | else |
@@ -69,23 +69,23 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
69 | 69 | ||
70 | /* clear all counters */ | 70 | /* clear all counters */ |
71 | for (i = 0 ; i < NUM_CONTROLS; ++i) { | 71 | for (i = 0 ; i < NUM_CONTROLS; ++i) { |
72 | if (unlikely(!CTRL_IS_RESERVED(msrs,i))) | 72 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) |
73 | continue; | 73 | continue; |
74 | CTRL_READ(low, high, msrs, i); | 74 | CTRL_READ(low, high, msrs, i); |
75 | CTRL_CLEAR(low); | 75 | CTRL_CLEAR(low); |
76 | CTRL_WRITE(low, high, msrs, i); | 76 | CTRL_WRITE(low, high, msrs, i); |
77 | } | 77 | } |
78 | 78 | ||
79 | /* avoid a false detection of ctr overflows in NMI handler */ | 79 | /* avoid a false detection of ctr overflows in NMI handler */ |
80 | for (i = 0; i < NUM_COUNTERS; ++i) { | 80 | for (i = 0; i < NUM_COUNTERS; ++i) { |
81 | if (unlikely(!CTR_IS_RESERVED(msrs,i))) | 81 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) |
82 | continue; | 82 | continue; |
83 | CTR_32BIT_WRITE(1, msrs, i); | 83 | CTR_32BIT_WRITE(1, msrs, i); |
84 | } | 84 | } |
85 | 85 | ||
86 | /* enable active counters */ | 86 | /* enable active counters */ |
87 | for (i = 0; i < NUM_COUNTERS; ++i) { | 87 | for (i = 0; i < NUM_COUNTERS; ++i) { |
88 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) { | 88 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { |
89 | reset_value[i] = counter_config[i].count; | 89 | reset_value[i] = counter_config[i].count; |
90 | 90 | ||
91 | CTR_32BIT_WRITE(counter_config[i].count, msrs, i); | 91 | CTR_32BIT_WRITE(counter_config[i].count, msrs, i); |
@@ -104,13 +104,13 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
104 | } | 104 | } |
105 | } | 105 | } |
106 | 106 | ||
107 | 107 | ||
108 | static int ppro_check_ctrs(struct pt_regs * const regs, | 108 | static int ppro_check_ctrs(struct pt_regs * const regs, |
109 | struct op_msrs const * const msrs) | 109 | struct op_msrs const * const msrs) |
110 | { | 110 | { |
111 | unsigned int low, high; | 111 | unsigned int low, high; |
112 | int i; | 112 | int i; |
113 | 113 | ||
114 | for (i = 0 ; i < NUM_COUNTERS; ++i) { | 114 | for (i = 0 ; i < NUM_COUNTERS; ++i) { |
115 | if (!reset_value[i]) | 115 | if (!reset_value[i]) |
116 | continue; | 116 | continue; |
@@ -135,10 +135,10 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
135 | return 1; | 135 | return 1; |
136 | } | 136 | } |
137 | 137 | ||
138 | 138 | ||
139 | static void ppro_start(struct op_msrs const * const msrs) | 139 | static void ppro_start(struct op_msrs const * const msrs) |
140 | { | 140 | { |
141 | unsigned int low,high; | 141 | unsigned int low, high; |
142 | int i; | 142 | int i; |
143 | 143 | ||
144 | for (i = 0; i < NUM_COUNTERS; ++i) { | 144 | for (i = 0; i < NUM_COUNTERS; ++i) { |
@@ -153,7 +153,7 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
153 | 153 | ||
154 | static void ppro_stop(struct op_msrs const * const msrs) | 154 | static void ppro_stop(struct op_msrs const * const msrs) |
155 | { | 155 | { |
156 | unsigned int low,high; | 156 | unsigned int low, high; |
157 | int i; | 157 | int i; |
158 | 158 | ||
159 | for (i = 0; i < NUM_COUNTERS; ++i) { | 159 | for (i = 0; i < NUM_COUNTERS; ++i) { |
@@ -170,11 +170,11 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
170 | int i; | 170 | int i; |
171 | 171 | ||
172 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { | 172 | for (i = 0 ; i < NUM_COUNTERS ; ++i) { |
173 | if (CTR_IS_RESERVED(msrs,i)) | 173 | if (CTR_IS_RESERVED(msrs, i)) |
174 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | 174 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
175 | } | 175 | } |
176 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { | 176 | for (i = 0 ; i < NUM_CONTROLS ; ++i) { |
177 | if (CTRL_IS_RESERVED(msrs,i)) | 177 | if (CTRL_IS_RESERVED(msrs, i)) |
178 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); | 178 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); |
179 | } | 179 | } |
180 | } | 180 | } |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 103b9dff1213..2ead72363077 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -30,6 +30,9 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/ioport.h> | 31 | #include <linux/ioport.h> |
32 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
33 | #include <linux/bootmem.h> | ||
34 | |||
35 | #include <asm/pat.h> | ||
33 | 36 | ||
34 | #include "pci.h" | 37 | #include "pci.h" |
35 | 38 | ||
@@ -297,10 +300,35 @@ void pcibios_set_master(struct pci_dev *dev) | |||
297 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | 300 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); |
298 | } | 301 | } |
299 | 302 | ||
303 | static void pci_unmap_page_range(struct vm_area_struct *vma) | ||
304 | { | ||
305 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
306 | free_memtype(addr, addr + vma->vm_end - vma->vm_start); | ||
307 | } | ||
308 | |||
309 | static void pci_track_mmap_page_range(struct vm_area_struct *vma) | ||
310 | { | ||
311 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
312 | unsigned long flags = pgprot_val(vma->vm_page_prot) | ||
313 | & _PAGE_CACHE_MASK; | ||
314 | |||
315 | reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); | ||
316 | } | ||
317 | |||
318 | static struct vm_operations_struct pci_mmap_ops = { | ||
319 | .open = pci_track_mmap_page_range, | ||
320 | .close = pci_unmap_page_range, | ||
321 | }; | ||
322 | |||
300 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 323 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
301 | enum pci_mmap_state mmap_state, int write_combine) | 324 | enum pci_mmap_state mmap_state, int write_combine) |
302 | { | 325 | { |
303 | unsigned long prot; | 326 | unsigned long prot; |
327 | u64 addr = vma->vm_pgoff << PAGE_SHIFT; | ||
328 | unsigned long len = vma->vm_end - vma->vm_start; | ||
329 | unsigned long flags; | ||
330 | unsigned long new_flags; | ||
331 | int retval; | ||
304 | 332 | ||
305 | /* I/O space cannot be accessed via normal processor loads and | 333 | /* I/O space cannot be accessed via normal processor loads and |
306 | * stores on this platform. | 334 | * stores on this platform. |
@@ -308,21 +336,50 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
308 | if (mmap_state == pci_mmap_io) | 336 | if (mmap_state == pci_mmap_io) |
309 | return -EINVAL; | 337 | return -EINVAL; |
310 | 338 | ||
311 | /* Leave vm_pgoff as-is, the PCI space address is the physical | ||
312 | * address on this platform. | ||
313 | */ | ||
314 | prot = pgprot_val(vma->vm_page_prot); | 339 | prot = pgprot_val(vma->vm_page_prot); |
315 | if (boot_cpu_data.x86 > 3) | 340 | if (pat_wc_enabled && write_combine) |
316 | prot |= _PAGE_PCD | _PAGE_PWT; | 341 | prot |= _PAGE_CACHE_WC; |
342 | else if (boot_cpu_data.x86 > 3) | ||
343 | prot |= _PAGE_CACHE_UC; | ||
344 | |||
317 | vma->vm_page_prot = __pgprot(prot); | 345 | vma->vm_page_prot = __pgprot(prot); |
318 | 346 | ||
319 | /* Write-combine setting is ignored, it is changed via the mtrr | 347 | flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; |
320 | * interfaces on this platform. | 348 | retval = reserve_memtype(addr, addr + len, flags, &new_flags); |
321 | */ | 349 | if (retval) |
350 | return retval; | ||
351 | |||
352 | if (flags != new_flags) { | ||
353 | /* | ||
354 | * Do not fallback to certain memory types with certain | ||
355 | * requested type: | ||
356 | * - request is uncached, return cannot be write-back | ||
357 | * - request is uncached, return cannot be write-combine | ||
358 | * - request is write-combine, return cannot be write-back | ||
359 | */ | ||
360 | if ((flags == _PAGE_CACHE_UC && | ||
361 | (new_flags == _PAGE_CACHE_WB || | ||
362 | new_flags == _PAGE_CACHE_WC)) || | ||
363 | (flags == _PAGE_CACHE_WC && | ||
364 | new_flags == _PAGE_CACHE_WB)) { | ||
365 | free_memtype(addr, addr+len); | ||
366 | return -EINVAL; | ||
367 | } | ||
368 | flags = new_flags; | ||
369 | } | ||
370 | |||
371 | if (vma->vm_pgoff <= max_pfn_mapped && | ||
372 | ioremap_change_attr((unsigned long)__va(addr), len, flags)) { | ||
373 | free_memtype(addr, addr + len); | ||
374 | return -EINVAL; | ||
375 | } | ||
376 | |||
322 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 377 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
323 | vma->vm_end - vma->vm_start, | 378 | vma->vm_end - vma->vm_start, |
324 | vma->vm_page_prot)) | 379 | vma->vm_page_prot)) |
325 | return -EAGAIN; | 380 | return -EAGAIN; |
326 | 381 | ||
382 | vma->vm_ops = &pci_mmap_ops; | ||
383 | |||
327 | return 0; | 384 | return 0; |
328 | } | 385 | } |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index a8715861877e..579745ca6b66 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -200,7 +200,7 @@ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq) | |||
200 | { | 200 | { |
201 | static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; | 201 | static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; |
202 | 202 | ||
203 | WARN_ON_ONCE(pirq >= 16); | 203 | WARN_ON_ONCE(pirq > 16); |
204 | return irqmap[read_config_nybble(router, 0x48, pirq-1)]; | 204 | return irqmap[read_config_nybble(router, 0x48, pirq-1)]; |
205 | } | 205 | } |
206 | 206 | ||
@@ -209,7 +209,7 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i | |||
209 | static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; | 209 | static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; |
210 | unsigned int val = irqmap[irq]; | 210 | unsigned int val = irqmap[irq]; |
211 | 211 | ||
212 | WARN_ON_ONCE(pirq >= 16); | 212 | WARN_ON_ONCE(pirq > 16); |
213 | if (val) { | 213 | if (val) { |
214 | write_config_nybble(router, 0x48, pirq-1, val); | 214 | write_config_nybble(router, 0x48, pirq-1, val); |
215 | return 1; | 215 | return 1; |
@@ -260,7 +260,7 @@ static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq | |||
260 | { | 260 | { |
261 | static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; | 261 | static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; |
262 | 262 | ||
263 | WARN_ON_ONCE(pirq >= 5); | 263 | WARN_ON_ONCE(pirq > 5); |
264 | return read_config_nybble(router, 0x55, pirqmap[pirq-1]); | 264 | return read_config_nybble(router, 0x55, pirqmap[pirq-1]); |
265 | } | 265 | } |
266 | 266 | ||
@@ -268,7 +268,7 @@ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq | |||
268 | { | 268 | { |
269 | static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; | 269 | static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; |
270 | 270 | ||
271 | WARN_ON_ONCE(pirq >= 5); | 271 | WARN_ON_ONCE(pirq > 5); |
272 | write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); | 272 | write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); |
273 | return 1; | 273 | return 1; |
274 | } | 274 | } |
@@ -282,7 +282,7 @@ static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq) | |||
282 | { | 282 | { |
283 | static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; | 283 | static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; |
284 | 284 | ||
285 | WARN_ON_ONCE(pirq >= 4); | 285 | WARN_ON_ONCE(pirq > 4); |
286 | return read_config_nybble(router,0x43, pirqmap[pirq-1]); | 286 | return read_config_nybble(router,0x43, pirqmap[pirq-1]); |
287 | } | 287 | } |
288 | 288 | ||
@@ -290,7 +290,7 @@ static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i | |||
290 | { | 290 | { |
291 | static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; | 291 | static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; |
292 | 292 | ||
293 | WARN_ON_ONCE(pirq >= 4); | 293 | WARN_ON_ONCE(pirq > 4); |
294 | write_config_nybble(router, 0x43, pirqmap[pirq-1], irq); | 294 | write_config_nybble(router, 0x43, pirqmap[pirq-1], irq); |
295 | return 1; | 295 | return 1; |
296 | } | 296 | } |
diff --git a/arch/x86/pci/numa.c b/arch/x86/pci/numa.c index 55270c26237c..d9afbae5092b 100644 --- a/arch/x86/pci/numa.c +++ b/arch/x86/pci/numa.c | |||
@@ -11,11 +11,41 @@ | |||
11 | #define XQUAD_PORTIO_BASE 0xfe400000 | 11 | #define XQUAD_PORTIO_BASE 0xfe400000 |
12 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ | 12 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ |
13 | 13 | ||
14 | int mp_bus_id_to_node[MAX_MP_BUSSES]; | ||
14 | #define BUS2QUAD(global) (mp_bus_id_to_node[global]) | 15 | #define BUS2QUAD(global) (mp_bus_id_to_node[global]) |
16 | |||
17 | int mp_bus_id_to_local[MAX_MP_BUSSES]; | ||
15 | #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) | 18 | #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) |
19 | |||
20 | void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
21 | struct mpc_config_translation *translation) | ||
22 | { | ||
23 | int quad = translation->trans_quad; | ||
24 | int local = translation->trans_local; | ||
25 | |||
26 | mp_bus_id_to_node[m->mpc_busid] = quad; | ||
27 | mp_bus_id_to_local[m->mpc_busid] = local; | ||
28 | printk(KERN_INFO "Bus #%d is %s (node %d)\n", | ||
29 | m->mpc_busid, name, quad); | ||
30 | } | ||
31 | |||
32 | int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | ||
16 | #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) | 33 | #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) |
34 | void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
35 | struct mpc_config_translation *translation) | ||
36 | { | ||
37 | int quad = translation->trans_quad; | ||
38 | int local = translation->trans_local; | ||
39 | |||
40 | quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; | ||
41 | } | ||
42 | |||
43 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ | ||
44 | void *xquad_portio; | ||
45 | #ifdef CONFIG_X86_NUMAQ | ||
46 | EXPORT_SYMBOL(xquad_portio); | ||
47 | #endif | ||
17 | 48 | ||
18 | extern void *xquad_portio; /* Where the IO area was mapped */ | ||
19 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) | 49 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) |
20 | 50 | ||
21 | #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ | 51 | #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ |
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index 7f9c6da04a4c..7dc5d5cf50a2 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c | |||
@@ -27,17 +27,17 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
27 | /* | 27 | /* |
28 | * descriptor tables | 28 | * descriptor tables |
29 | */ | 29 | */ |
30 | store_gdt(&ctxt->gdt); | 30 | store_gdt(&ctxt->gdt); |
31 | store_idt(&ctxt->idt); | 31 | store_idt(&ctxt->idt); |
32 | store_tr(ctxt->tr); | 32 | store_tr(ctxt->tr); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * segment registers | 35 | * segment registers |
36 | */ | 36 | */ |
37 | savesegment(es, ctxt->es); | 37 | savesegment(es, ctxt->es); |
38 | savesegment(fs, ctxt->fs); | 38 | savesegment(fs, ctxt->fs); |
39 | savesegment(gs, ctxt->gs); | 39 | savesegment(gs, ctxt->gs); |
40 | savesegment(ss, ctxt->ss); | 40 | savesegment(ss, ctxt->ss); |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * control registers | 43 | * control registers |
@@ -48,10 +48,12 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
48 | ctxt->cr4 = read_cr4(); | 48 | ctxt->cr4 = read_cr4(); |
49 | } | 49 | } |
50 | 50 | ||
51 | /* Needed by apm.c */ | ||
51 | void save_processor_state(void) | 52 | void save_processor_state(void) |
52 | { | 53 | { |
53 | __save_processor_state(&saved_context); | 54 | __save_processor_state(&saved_context); |
54 | } | 55 | } |
56 | EXPORT_SYMBOL(save_processor_state); | ||
55 | 57 | ||
56 | static void do_fpu_end(void) | 58 | static void do_fpu_end(void) |
57 | { | 59 | { |
@@ -64,9 +66,14 @@ static void do_fpu_end(void) | |||
64 | static void fix_processor_context(void) | 66 | static void fix_processor_context(void) |
65 | { | 67 | { |
66 | int cpu = smp_processor_id(); | 68 | int cpu = smp_processor_id(); |
67 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 69 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
68 | 70 | ||
69 | set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ | 71 | set_tss_desc(cpu, t); /* |
72 | * This just modifies memory; should not be | ||
73 | * necessary. But... This is necessary, because | ||
74 | * 386 hardware has concept of busy TSS or some | ||
75 | * similar stupidity. | ||
76 | */ | ||
70 | 77 | ||
71 | load_TR_desc(); /* This does ltr */ | 78 | load_TR_desc(); /* This does ltr */ |
72 | load_LDT(¤t->active_mm->context); /* This does lldt */ | 79 | load_LDT(¤t->active_mm->context); /* This does lldt */ |
@@ -100,16 +107,16 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
100 | * now restore the descriptor tables to their proper values | 107 | * now restore the descriptor tables to their proper values |
101 | * ltr is done i fix_processor_context(). | 108 | * ltr is done i fix_processor_context(). |
102 | */ | 109 | */ |
103 | load_gdt(&ctxt->gdt); | 110 | load_gdt(&ctxt->gdt); |
104 | load_idt(&ctxt->idt); | 111 | load_idt(&ctxt->idt); |
105 | 112 | ||
106 | /* | 113 | /* |
107 | * segment registers | 114 | * segment registers |
108 | */ | 115 | */ |
109 | loadsegment(es, ctxt->es); | 116 | loadsegment(es, ctxt->es); |
110 | loadsegment(fs, ctxt->fs); | 117 | loadsegment(fs, ctxt->fs); |
111 | loadsegment(gs, ctxt->gs); | 118 | loadsegment(gs, ctxt->gs); |
112 | loadsegment(ss, ctxt->ss); | 119 | loadsegment(ss, ctxt->ss); |
113 | 120 | ||
114 | /* | 121 | /* |
115 | * sysenter MSRs | 122 | * sysenter MSRs |
@@ -123,11 +130,9 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
123 | mcheck_init(&boot_cpu_data); | 130 | mcheck_init(&boot_cpu_data); |
124 | } | 131 | } |
125 | 132 | ||
133 | /* Needed by apm.c */ | ||
126 | void restore_processor_state(void) | 134 | void restore_processor_state(void) |
127 | { | 135 | { |
128 | __restore_processor_state(&saved_context); | 136 | __restore_processor_state(&saved_context); |
129 | } | 137 | } |
130 | |||
131 | /* Needed by apm.c */ | ||
132 | EXPORT_SYMBOL(save_processor_state); | ||
133 | EXPORT_SYMBOL(restore_processor_state); | 138 | EXPORT_SYMBOL(restore_processor_state); |
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 0a8f4742ef51..17a6b057856b 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -39,7 +39,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE | |||
39 | 39 | ||
40 | CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64 | 40 | CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64 |
41 | 41 | ||
42 | $(vobjs): KBUILD_CFLAGS = $(CFL) | 42 | $(vobjs): KBUILD_CFLAGS += $(CFL) |
43 | 43 | ||
44 | targets += vdso-syms.lds | 44 | targets += vdso-syms.lds |
45 | obj-$(VDSO64-y) += vdso-syms.lds | 45 | obj-$(VDSO64-y) += vdso-syms.lds |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 348f1341e1c8..e2af8eee80e3 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -210,8 +210,12 @@ static int use_sysenter __read_mostly = -1; | |||
210 | /* May not be __init: called during resume */ | 210 | /* May not be __init: called during resume */ |
211 | void syscall32_cpu_init(void) | 211 | void syscall32_cpu_init(void) |
212 | { | 212 | { |
213 | if (use_sysenter < 0) | 213 | if (use_sysenter < 0) { |
214 | use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL); | 214 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
215 | use_sysenter = 1; | ||
216 | if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) | ||
217 | use_sysenter = 1; | ||
218 | } | ||
215 | 219 | ||
216 | /* Load these always in case some future AMD CPU supports | 220 | /* Load these always in case some future AMD CPU supports |
217 | SYSENTER from compat mode too. */ | 221 | SYSENTER from compat mode too. */ |
@@ -325,6 +329,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | |||
325 | int ret = 0; | 329 | int ret = 0; |
326 | bool compat; | 330 | bool compat; |
327 | 331 | ||
332 | if (vdso_enabled == VDSO_DISABLED) | ||
333 | return 0; | ||
334 | |||
328 | down_write(&mm->mmap_sem); | 335 | down_write(&mm->mmap_sem); |
329 | 336 | ||
330 | /* Test compat mode once here, in case someone | 337 | /* Test compat mode once here, in case someone |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 27ee26aedf94..c0388220cf97 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/page-flags.h> | 26 | #include <linux/page-flags.h> |
27 | #include <linux/highmem.h> | 27 | #include <linux/highmem.h> |
28 | #include <linux/console.h> | ||
28 | 29 | ||
29 | #include <xen/interface/xen.h> | 30 | #include <xen/interface/xen.h> |
30 | #include <xen/interface/physdev.h> | 31 | #include <xen/interface/physdev.h> |
@@ -889,7 +890,6 @@ void __init xen_setup_vcpu_info_placement(void) | |||
889 | pv_irq_ops.irq_disable = xen_irq_disable_direct; | 890 | pv_irq_ops.irq_disable = xen_irq_disable_direct; |
890 | pv_irq_ops.irq_enable = xen_irq_enable_direct; | 891 | pv_irq_ops.irq_enable = xen_irq_enable_direct; |
891 | pv_mmu_ops.read_cr2 = xen_read_cr2_direct; | 892 | pv_mmu_ops.read_cr2 = xen_read_cr2_direct; |
892 | pv_cpu_ops.iret = xen_iret_direct; | ||
893 | } | 893 | } |
894 | } | 894 | } |
895 | 895 | ||
@@ -993,7 +993,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
993 | .read_tsc = native_read_tsc, | 993 | .read_tsc = native_read_tsc, |
994 | .read_pmc = native_read_pmc, | 994 | .read_pmc = native_read_pmc, |
995 | 995 | ||
996 | .iret = (void *)&hypercall_page[__HYPERVISOR_iret], | 996 | .iret = xen_iret, |
997 | .irq_enable_syscall_ret = NULL, /* never called */ | 997 | .irq_enable_syscall_ret = NULL, /* never called */ |
998 | 998 | ||
999 | .load_tr_desc = paravirt_nop, | 999 | .load_tr_desc = paravirt_nop, |
@@ -1228,6 +1228,9 @@ asmlinkage void __init xen_start_kernel(void) | |||
1228 | ? __pa(xen_start_info->mod_start) : 0; | 1228 | ? __pa(xen_start_info->mod_start) : 0; |
1229 | boot_params.hdr.ramdisk_size = xen_start_info->mod_len; | 1229 | boot_params.hdr.ramdisk_size = xen_start_info->mod_len; |
1230 | 1230 | ||
1231 | if (!is_initial_xendomain()) | ||
1232 | add_preferred_console("hvc", 0, NULL); | ||
1233 | |||
1231 | /* Start the world */ | 1234 | /* Start the world */ |
1232 | start_kernel(); | 1235 | start_kernel(); |
1233 | } | 1236 | } |
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 5e6f36f6d876..5791eb2e3750 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c | |||
@@ -76,7 +76,7 @@ void xen_mc_flush(void) | |||
76 | if (ret) { | 76 | if (ret) { |
77 | printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", | 77 | printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", |
78 | ret, smp_processor_id()); | 78 | ret, smp_processor_id()); |
79 | for(i = 0; i < b->mcidx; i++) { | 79 | for (i = 0; i < b->mcidx; i++) { |
80 | printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n", | 80 | printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n", |
81 | i+1, b->mcidx, | 81 | i+1, b->mcidx, |
82 | b->debug[i].op, | 82 | b->debug[i].op, |
@@ -93,7 +93,7 @@ void xen_mc_flush(void) | |||
93 | 93 | ||
94 | local_irq_restore(flags); | 94 | local_irq_restore(flags); |
95 | 95 | ||
96 | for(i = 0; i < b->cbidx; i++) { | 96 | for (i = 0; i < b->cbidx; i++) { |
97 | struct callback *cb = &b->callbacks[i]; | 97 | struct callback *cb = &b->callbacks[i]; |
98 | 98 | ||
99 | (*cb->fn)(cb->data); | 99 | (*cb->fn)(cb->data); |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index aafc54437403..e340ff92f6b6 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include "xen-ops.h" | 35 | #include "xen-ops.h" |
36 | #include "mmu.h" | 36 | #include "mmu.h" |
37 | 37 | ||
38 | static cpumask_t cpu_initialized_map; | 38 | static cpumask_t xen_cpu_initialized_map; |
39 | static DEFINE_PER_CPU(int, resched_irq); | 39 | static DEFINE_PER_CPU(int, resched_irq); |
40 | static DEFINE_PER_CPU(int, callfunc_irq); | 40 | static DEFINE_PER_CPU(int, callfunc_irq); |
41 | 41 | ||
@@ -179,7 +179,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
179 | if (xen_smp_intr_init(0)) | 179 | if (xen_smp_intr_init(0)) |
180 | BUG(); | 180 | BUG(); |
181 | 181 | ||
182 | cpu_initialized_map = cpumask_of_cpu(0); | 182 | xen_cpu_initialized_map = cpumask_of_cpu(0); |
183 | 183 | ||
184 | /* Restrict the possible_map according to max_cpus. */ | 184 | /* Restrict the possible_map according to max_cpus. */ |
185 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | 185 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { |
@@ -210,7 +210,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
210 | struct vcpu_guest_context *ctxt; | 210 | struct vcpu_guest_context *ctxt; |
211 | struct gdt_page *gdt = &per_cpu(gdt_page, cpu); | 211 | struct gdt_page *gdt = &per_cpu(gdt_page, cpu); |
212 | 212 | ||
213 | if (cpu_test_and_set(cpu, cpu_initialized_map)) | 213 | if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) |
214 | return 0; | 214 | return 0; |
215 | 215 | ||
216 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | 216 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 6b7190449d07..fe161ed4b01e 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -135,13 +135,8 @@ ENDPATCH(xen_restore_fl_direct) | |||
135 | current stack state in whatever form its in, we keep things | 135 | current stack state in whatever form its in, we keep things |
136 | simple by only using a single register which is pushed/popped | 136 | simple by only using a single register which is pushed/popped |
137 | on the stack. | 137 | on the stack. |
138 | |||
139 | Non-direct iret could be done in the same way, but it would | ||
140 | require an annoying amount of code duplication. We'll assume | ||
141 | that direct mode will be the common case once the hypervisor | ||
142 | support becomes commonplace. | ||
143 | */ | 138 | */ |
144 | ENTRY(xen_iret_direct) | 139 | ENTRY(xen_iret) |
145 | /* test eflags for special cases */ | 140 | /* test eflags for special cases */ |
146 | testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) | 141 | testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) |
147 | jnz hyper_iret | 142 | jnz hyper_iret |
@@ -155,9 +150,9 @@ ENTRY(xen_iret_direct) | |||
155 | GET_THREAD_INFO(%eax) | 150 | GET_THREAD_INFO(%eax) |
156 | movl TI_cpu(%eax),%eax | 151 | movl TI_cpu(%eax),%eax |
157 | movl __per_cpu_offset(,%eax,4),%eax | 152 | movl __per_cpu_offset(,%eax,4),%eax |
158 | lea per_cpu__xen_vcpu_info(%eax),%eax | 153 | mov per_cpu__xen_vcpu(%eax),%eax |
159 | #else | 154 | #else |
160 | movl $per_cpu__xen_vcpu_info, %eax | 155 | movl per_cpu__xen_vcpu, %eax |
161 | #endif | 156 | #endif |
162 | 157 | ||
163 | /* check IF state we're restoring */ | 158 | /* check IF state we're restoring */ |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index b02a909bfd4c..956a491ea998 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -63,5 +63,5 @@ DECL_ASM(void, xen_irq_disable_direct, void); | |||
63 | DECL_ASM(unsigned long, xen_save_fl_direct, void); | 63 | DECL_ASM(unsigned long, xen_save_fl_direct, void); |
64 | DECL_ASM(void, xen_restore_fl_direct, unsigned long); | 64 | DECL_ASM(void, xen_restore_fl_direct, unsigned long); |
65 | 65 | ||
66 | void xen_iret_direct(void); | 66 | void xen_iret(void); |
67 | #endif /* XEN_OPS_H */ | 67 | #endif /* XEN_OPS_H */ |
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index f582d6a24ec2..7419dbccf027 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o vmlinux.lds |
6 | 6 | ||
7 | 7 | ||
8 | obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ | 8 | obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ |
9 | setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ | 9 | setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ |
10 | pci-dma.o init_task.o io.o | 10 | pci-dma.o init_task.o io.o |
11 | 11 | ||
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c deleted file mode 100644 index 995c6410ae10..000000000000 --- a/arch/xtensa/kernel/semaphore.c +++ /dev/null | |||
@@ -1,226 +0,0 @@ | |||
1 | /* | ||
2 | * arch/xtensa/kernel/semaphore.c | ||
3 | * | ||
4 | * Generic semaphore code. Buyer beware. Do your own specific changes | ||
5 | * in <asm/semaphore-helper.h> | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
12 | * | ||
13 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | ||
14 | * Chris Zankel <chris@zankel.net> | ||
15 | * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca> | ||
16 | * Kevin Chea | ||
17 | */ | ||
18 | |||
19 | #include <linux/sched.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | /* | ||
26 | * These two _must_ execute atomically wrt each other. | ||
27 | */ | ||
28 | |||
29 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
30 | { | ||
31 | atomic_inc((atomic_t *)&sem->sleepers); | ||
32 | } | ||
33 | |||
34 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
35 | { | ||
36 | unsigned long flags; | ||
37 | int ret = 0; | ||
38 | |||
39 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
40 | if (sem->sleepers > 0) { | ||
41 | sem->sleepers--; | ||
42 | ret = 1; | ||
43 | } | ||
44 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * waking_non_zero_interruptible: | ||
50 | * 1 got the lock | ||
51 | * 0 go to sleep | ||
52 | * -EINTR interrupted | ||
53 | * | ||
54 | * We must undo the sem->count down_interruptible() increment while we are | ||
55 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
56 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
57 | */ | ||
58 | |||
59 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
60 | struct task_struct *tsk) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | int ret = 0; | ||
64 | |||
65 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
66 | if (sem->sleepers > 0) { | ||
67 | sem->sleepers--; | ||
68 | ret = 1; | ||
69 | } else if (signal_pending(tsk)) { | ||
70 | atomic_inc(&sem->count); | ||
71 | ret = -EINTR; | ||
72 | } | ||
73 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * waking_non_zero_trylock: | ||
79 | * 1 failed to lock | ||
80 | * 0 got the lock | ||
81 | * | ||
82 | * We must undo the sem->count down_trylock() increment while we are | ||
83 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
84 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
85 | */ | ||
86 | |||
87 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | int ret = 1; | ||
91 | |||
92 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
93 | if (sem->sleepers <= 0) | ||
94 | atomic_inc(&sem->count); | ||
95 | else { | ||
96 | sem->sleepers--; | ||
97 | ret = 0; | ||
98 | } | ||
99 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | DEFINE_SPINLOCK(semaphore_wake_lock); | ||
104 | |||
105 | /* | ||
106 | * Semaphores are implemented using a two-way counter: | ||
107 | * The "count" variable is decremented for each process | ||
108 | * that tries to sleep, while the "waking" variable is | ||
109 | * incremented when the "up()" code goes to wake up waiting | ||
110 | * processes. | ||
111 | * | ||
112 | * Notably, the inline "up()" and "down()" functions can | ||
113 | * efficiently test if they need to do any extra work (up | ||
114 | * needs to do something only if count was negative before | ||
115 | * the increment operation. | ||
116 | * | ||
117 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
118 | * atomically. | ||
119 | * | ||
120 | * When __up() is called, the count was negative before | ||
121 | * incrementing it, and we need to wake up somebody. | ||
122 | * | ||
123 | * This routine adds one to the count of processes that need to | ||
124 | * wake up and exit. ALL waiting processes actually wake up but | ||
125 | * only the one that gets to the "waking" field first will gate | ||
126 | * through and acquire the semaphore. The others will go back | ||
127 | * to sleep. | ||
128 | * | ||
129 | * Note that these functions are only called when there is | ||
130 | * contention on the lock, and as such all this is the | ||
131 | * "non-critical" part of the whole semaphore business. The | ||
132 | * critical part is the inline stuff in <asm/semaphore.h> | ||
133 | * where we want to avoid any extra jumps and calls. | ||
134 | */ | ||
135 | |||
136 | void __up(struct semaphore *sem) | ||
137 | { | ||
138 | wake_one_more(sem); | ||
139 | wake_up(&sem->wait); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Perform the "down" function. Return zero for semaphore acquired, | ||
144 | * return negative for signalled out of the function. | ||
145 | * | ||
146 | * If called from __down, the return is ignored and the wait loop is | ||
147 | * not interruptible. This means that a task waiting on a semaphore | ||
148 | * using "down()" cannot be killed until someone does an "up()" on | ||
149 | * the semaphore. | ||
150 | * | ||
151 | * If called from __down_interruptible, the return value gets checked | ||
152 | * upon return. If the return value is negative then the task continues | ||
153 | * with the negative value in the return register (it can be tested by | ||
154 | * the caller). | ||
155 | * | ||
156 | * Either form may be used in conjunction with "up()". | ||
157 | * | ||
158 | */ | ||
159 | |||
160 | #define DOWN_VAR \ | ||
161 | struct task_struct *tsk = current; \ | ||
162 | wait_queue_t wait; \ | ||
163 | init_waitqueue_entry(&wait, tsk); | ||
164 | |||
165 | #define DOWN_HEAD(task_state) \ | ||
166 | \ | ||
167 | \ | ||
168 | tsk->state = (task_state); \ | ||
169 | add_wait_queue(&sem->wait, &wait); \ | ||
170 | \ | ||
171 | /* \ | ||
172 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
173 | * so we must wait. \ | ||
174 | * \ | ||
175 | * We can let go the lock for purposes of waiting. \ | ||
176 | * We re-acquire it after awaking so as to protect \ | ||
177 | * all semaphore operations. \ | ||
178 | * \ | ||
179 | * If "up()" is called before we call waking_non_zero() then \ | ||
180 | * we will catch it right away. If it is called later then \ | ||
181 | * we will have to go through a wakeup cycle to catch it. \ | ||
182 | * \ | ||
183 | * Multiple waiters contend for the semaphore lock to see \ | ||
184 | * who gets to gate through and who has to wait some more. \ | ||
185 | */ \ | ||
186 | for (;;) { | ||
187 | |||
188 | #define DOWN_TAIL(task_state) \ | ||
189 | tsk->state = (task_state); \ | ||
190 | } \ | ||
191 | tsk->state = TASK_RUNNING; \ | ||
192 | remove_wait_queue(&sem->wait, &wait); | ||
193 | |||
194 | void __sched __down(struct semaphore * sem) | ||
195 | { | ||
196 | DOWN_VAR | ||
197 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
198 | if (waking_non_zero(sem)) | ||
199 | break; | ||
200 | schedule(); | ||
201 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
202 | } | ||
203 | |||
204 | int __sched __down_interruptible(struct semaphore * sem) | ||
205 | { | ||
206 | int ret = 0; | ||
207 | DOWN_VAR | ||
208 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
209 | |||
210 | ret = waking_non_zero_interruptible(sem, tsk); | ||
211 | if (ret) | ||
212 | { | ||
213 | if (ret == 1) | ||
214 | /* ret != 0 only if we get interrupted -arca */ | ||
215 | ret = 0; | ||
216 | break; | ||
217 | } | ||
218 | schedule(); | ||
219 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | int __down_trylock(struct semaphore * sem) | ||
224 | { | ||
225 | return waking_non_zero_trylock(sem); | ||
226 | } | ||
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 60dbdb43fb4c..6e52cdd6166f 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/page.h> | 27 | #include <asm/page.h> |
28 | #include <asm/pgalloc.h> | 28 | #include <asm/pgalloc.h> |
29 | #include <asm/semaphore.h> | ||
30 | #ifdef CONFIG_BLK_DEV_FD | 29 | #ifdef CONFIG_BLK_DEV_FD |
31 | #include <asm/floppy.h> | 30 | #include <asm/floppy.h> |
32 | #endif | 31 | #endif |
@@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3); | |||
71 | EXPORT_SYMBOL(__udivdi3); | 70 | EXPORT_SYMBOL(__udivdi3); |
72 | EXPORT_SYMBOL(__umoddi3); | 71 | EXPORT_SYMBOL(__umoddi3); |
73 | 72 | ||
74 | /* | ||
75 | * Semaphore operations | ||
76 | */ | ||
77 | EXPORT_SYMBOL(__down); | ||
78 | EXPORT_SYMBOL(__down_interruptible); | ||
79 | EXPORT_SYMBOL(__down_trylock); | ||
80 | EXPORT_SYMBOL(__up); | ||
81 | |||
82 | #ifdef CONFIG_NET | 73 | #ifdef CONFIG_NET |
83 | /* | 74 | /* |
84 | * Networking support | 75 | * Networking support |