diff options
Diffstat (limited to 'include')
243 files changed, 6514 insertions, 8420 deletions
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h index f1e9278a9fe2..d9b2034ed1d2 100644 --- a/include/asm-alpha/semaphore.h +++ b/include/asm-alpha/semaphore.h | |||
@@ -1,149 +1 @@ | |||
1 | #ifndef _ALPHA_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ALPHA_SEMAPHORE_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores.. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1996, 2000 Richard Henderson | ||
9 | */ | ||
10 | |||
11 | #include <asm/current.h> | ||
12 | #include <asm/system.h> | ||
13 | #include <asm/atomic.h> | ||
14 | #include <linux/compiler.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/rwsem.h> | ||
17 | |||
18 | struct semaphore { | ||
19 | atomic_t count; | ||
20 | wait_queue_head_t wait; | ||
21 | }; | ||
22 | |||
23 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
24 | { \ | ||
25 | .count = ATOMIC_INIT(n), \ | ||
26 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ | ||
27 | } | ||
28 | |||
29 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
31 | |||
32 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
33 | |||
34 | static inline void sema_init(struct semaphore *sem, int val) | ||
35 | { | ||
36 | /* | ||
37 | * Logically, | ||
38 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
39 | * except that gcc produces better initializing by parts yet. | ||
40 | */ | ||
41 | |||
42 | atomic_set(&sem->count, val); | ||
43 | init_waitqueue_head(&sem->wait); | ||
44 | } | ||
45 | |||
46 | static inline void init_MUTEX (struct semaphore *sem) | ||
47 | { | ||
48 | sema_init(sem, 1); | ||
49 | } | ||
50 | |||
51 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
52 | { | ||
53 | sema_init(sem, 0); | ||
54 | } | ||
55 | |||
56 | extern void down(struct semaphore *); | ||
57 | extern void __down_failed(struct semaphore *); | ||
58 | extern int down_interruptible(struct semaphore *); | ||
59 | extern int __down_failed_interruptible(struct semaphore *); | ||
60 | extern int down_trylock(struct semaphore *); | ||
61 | extern void up(struct semaphore *); | ||
62 | extern void __up_wakeup(struct semaphore *); | ||
63 | |||
64 | /* | ||
65 | * Hidden out of line code is fun, but extremely messy. Rely on newer | ||
66 | * compilers to do a respectable job with this. The contention cases | ||
67 | * are handled out of line in arch/alpha/kernel/semaphore.c. | ||
68 | */ | ||
69 | |||
70 | static inline void __down(struct semaphore *sem) | ||
71 | { | ||
72 | long count; | ||
73 | might_sleep(); | ||
74 | count = atomic_dec_return(&sem->count); | ||
75 | if (unlikely(count < 0)) | ||
76 | __down_failed(sem); | ||
77 | } | ||
78 | |||
79 | static inline int __down_interruptible(struct semaphore *sem) | ||
80 | { | ||
81 | long count; | ||
82 | might_sleep(); | ||
83 | count = atomic_dec_return(&sem->count); | ||
84 | if (unlikely(count < 0)) | ||
85 | return __down_failed_interruptible(sem); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * down_trylock returns 0 on success, 1 if we failed to get the lock. | ||
91 | */ | ||
92 | |||
93 | static inline int __down_trylock(struct semaphore *sem) | ||
94 | { | ||
95 | long ret; | ||
96 | |||
97 | /* "Equivalent" C: | ||
98 | |||
99 | do { | ||
100 | ret = ldl_l; | ||
101 | --ret; | ||
102 | if (ret < 0) | ||
103 | break; | ||
104 | ret = stl_c = ret; | ||
105 | } while (ret == 0); | ||
106 | */ | ||
107 | __asm__ __volatile__( | ||
108 | "1: ldl_l %0,%1\n" | ||
109 | " subl %0,1,%0\n" | ||
110 | " blt %0,2f\n" | ||
111 | " stl_c %0,%1\n" | ||
112 | " beq %0,3f\n" | ||
113 | " mb\n" | ||
114 | "2:\n" | ||
115 | ".subsection 2\n" | ||
116 | "3: br 1b\n" | ||
117 | ".previous" | ||
118 | : "=&r" (ret), "=m" (sem->count) | ||
119 | : "m" (sem->count)); | ||
120 | |||
121 | return ret < 0; | ||
122 | } | ||
123 | |||
124 | static inline void __up(struct semaphore *sem) | ||
125 | { | ||
126 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
127 | __up_wakeup(sem); | ||
128 | } | ||
129 | |||
130 | #if !defined(CONFIG_DEBUG_SEMAPHORE) | ||
131 | extern inline void down(struct semaphore *sem) | ||
132 | { | ||
133 | __down(sem); | ||
134 | } | ||
135 | extern inline int down_interruptible(struct semaphore *sem) | ||
136 | { | ||
137 | return __down_interruptible(sem); | ||
138 | } | ||
139 | extern inline int down_trylock(struct semaphore *sem) | ||
140 | { | ||
141 | return __down_trylock(sem); | ||
142 | } | ||
143 | extern inline void up(struct semaphore *sem) | ||
144 | { | ||
145 | __up(sem); | ||
146 | } | ||
147 | #endif | ||
148 | |||
149 | #endif | ||
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h deleted file mode 100644 index 1d7f1987edb9..000000000000 --- a/include/asm-arm/semaphore-helper.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | #ifndef ASMARM_SEMAPHORE_HELPER_H | ||
2 | #define ASMARM_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * These two _must_ execute atomically wrt each other. | ||
6 | */ | ||
7 | static inline void wake_one_more(struct semaphore * sem) | ||
8 | { | ||
9 | unsigned long flags; | ||
10 | |||
11 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
12 | if (atomic_read(&sem->count) <= 0) | ||
13 | sem->waking++; | ||
14 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
15 | } | ||
16 | |||
17 | static inline int waking_non_zero(struct semaphore *sem) | ||
18 | { | ||
19 | unsigned long flags; | ||
20 | int ret = 0; | ||
21 | |||
22 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
23 | if (sem->waking > 0) { | ||
24 | sem->waking--; | ||
25 | ret = 1; | ||
26 | } | ||
27 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | /* | ||
32 | * waking non zero interruptible | ||
33 | * 1 got the lock | ||
34 | * 0 go to sleep | ||
35 | * -EINTR interrupted | ||
36 | * | ||
37 | * We must undo the sem->count down_interruptible() increment while we are | ||
38 | * protected by the spinlock in order to make this atomic_inc() with the | ||
39 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
40 | */ | ||
41 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
42 | struct task_struct *tsk) | ||
43 | { | ||
44 | unsigned long flags; | ||
45 | int ret = 0; | ||
46 | |||
47 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
48 | if (sem->waking > 0) { | ||
49 | sem->waking--; | ||
50 | ret = 1; | ||
51 | } else if (signal_pending(tsk)) { | ||
52 | atomic_inc(&sem->count); | ||
53 | ret = -EINTR; | ||
54 | } | ||
55 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * waking_non_zero_try_lock: | ||
61 | * 1 failed to lock | ||
62 | * 0 got the lock | ||
63 | * | ||
64 | * We must undo the sem->count down_interruptible() increment while we are | ||
65 | * protected by the spinlock in order to make this atomic_inc() with the | ||
66 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
67 | */ | ||
68 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
69 | { | ||
70 | unsigned long flags; | ||
71 | int ret = 1; | ||
72 | |||
73 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
74 | if (sem->waking <= 0) | ||
75 | atomic_inc(&sem->count); | ||
76 | else { | ||
77 | sem->waking--; | ||
78 | ret = 0; | ||
79 | } | ||
80 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | #endif | ||
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h index 1c8b441f89e3..d9b2034ed1d2 100644 --- a/include/asm-arm/semaphore.h +++ b/include/asm-arm/semaphore.h | |||
@@ -1,98 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * linux/include/asm-arm/semaphore.h | ||
3 | */ | ||
4 | #ifndef __ASM_ARM_SEMAPHORE_H | ||
5 | #define __ASM_ARM_SEMAPHORE_H | ||
6 | |||
7 | #include <linux/linkage.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/rwsem.h> | ||
11 | |||
12 | #include <asm/atomic.h> | ||
13 | #include <asm/locks.h> | ||
14 | |||
15 | struct semaphore { | ||
16 | atomic_t count; | ||
17 | int sleepers; | ||
18 | wait_queue_head_t wait; | ||
19 | }; | ||
20 | |||
21 | #define __SEMAPHORE_INIT(name, cnt) \ | ||
22 | { \ | ||
23 | .count = ATOMIC_INIT(cnt), \ | ||
24 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ | ||
25 | } | ||
26 | |||
27 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
28 | struct semaphore name = __SEMAPHORE_INIT(name,count) | ||
29 | |||
30 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
31 | |||
32 | static inline void sema_init(struct semaphore *sem, int val) | ||
33 | { | ||
34 | atomic_set(&sem->count, val); | ||
35 | sem->sleepers = 0; | ||
36 | init_waitqueue_head(&sem->wait); | ||
37 | } | ||
38 | |||
39 | static inline void init_MUTEX(struct semaphore *sem) | ||
40 | { | ||
41 | sema_init(sem, 1); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 0); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * special register calling convention | ||
51 | */ | ||
52 | asmlinkage void __down_failed(void); | ||
53 | asmlinkage int __down_interruptible_failed(void); | ||
54 | asmlinkage int __down_trylock_failed(void); | ||
55 | asmlinkage void __up_wakeup(void); | ||
56 | |||
57 | extern void __down(struct semaphore * sem); | ||
58 | extern int __down_interruptible(struct semaphore * sem); | ||
59 | extern int __down_trylock(struct semaphore * sem); | ||
60 | extern void __up(struct semaphore * sem); | ||
61 | |||
62 | /* | ||
63 | * This is ugly, but we want the default case to fall through. | ||
64 | * "__down" is the actual routine that waits... | ||
65 | */ | ||
66 | static inline void down(struct semaphore * sem) | ||
67 | { | ||
68 | might_sleep(); | ||
69 | __down_op(sem, __down_failed); | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * This is ugly, but we want the default case to fall through. | ||
74 | * "__down_interruptible" is the actual routine that waits... | ||
75 | */ | ||
76 | static inline int down_interruptible (struct semaphore * sem) | ||
77 | { | ||
78 | might_sleep(); | ||
79 | return __down_op_ret(sem, __down_interruptible_failed); | ||
80 | } | ||
81 | |||
82 | static inline int down_trylock(struct semaphore *sem) | ||
83 | { | ||
84 | return __down_op_ret(sem, __down_trylock_failed); | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Note! This is subtle. We jump to wake people up only if | ||
89 | * the semaphore was negative (== somebody was waiting on it). | ||
90 | * The default case (no contention) will result in NO | ||
91 | * jumps for both down() and up(). | ||
92 | */ | ||
93 | static inline void up(struct semaphore * sem) | ||
94 | { | ||
95 | __up_op(sem, __up_wakeup); | ||
96 | } | ||
97 | |||
98 | #endif | ||
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h index feaf1d453386..d9b2034ed1d2 100644 --- a/include/asm-avr32/semaphore.h +++ b/include/asm-avr32/semaphore.h | |||
@@ -1,108 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * SMP- and interrupt-safe semaphores. | ||
3 | * | ||
4 | * Copyright (C) 2006 Atmel Corporation | ||
5 | * | ||
6 | * Based on include/asm-i386/semaphore.h | ||
7 | * Copyright (C) 1996 Linus Torvalds | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | #ifndef __ASM_AVR32_SEMAPHORE_H | ||
14 | #define __ASM_AVR32_SEMAPHORE_H | ||
15 | |||
16 | #include <linux/linkage.h> | ||
17 | |||
18 | #include <asm/system.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <linux/rwsem.h> | ||
22 | |||
23 | struct semaphore { | ||
24 | atomic_t count; | ||
25 | int sleepers; | ||
26 | wait_queue_head_t wait; | ||
27 | }; | ||
28 | |||
29 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
30 | { \ | ||
31 | .count = ATOMIC_INIT(n), \ | ||
32 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
33 | } | ||
34 | |||
35 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
36 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
37 | |||
38 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
39 | |||
40 | static inline void sema_init (struct semaphore *sem, int val) | ||
41 | { | ||
42 | atomic_set(&sem->count, val); | ||
43 | sem->sleepers = 0; | ||
44 | init_waitqueue_head(&sem->wait); | ||
45 | } | ||
46 | |||
47 | static inline void init_MUTEX (struct semaphore *sem) | ||
48 | { | ||
49 | sema_init(sem, 1); | ||
50 | } | ||
51 | |||
52 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
53 | { | ||
54 | sema_init(sem, 0); | ||
55 | } | ||
56 | |||
57 | void __down(struct semaphore * sem); | ||
58 | int __down_interruptible(struct semaphore * sem); | ||
59 | void __up(struct semaphore * sem); | ||
60 | |||
61 | /* | ||
62 | * This is ugly, but we want the default case to fall through. | ||
63 | * "__down_failed" is a special asm handler that calls the C | ||
64 | * routine that actually waits. See arch/i386/kernel/semaphore.c | ||
65 | */ | ||
66 | static inline void down(struct semaphore * sem) | ||
67 | { | ||
68 | might_sleep(); | ||
69 | if (unlikely(atomic_dec_return (&sem->count) < 0)) | ||
70 | __down (sem); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Interruptible try to acquire a semaphore. If we obtained | ||
75 | * it, return zero. If we were interrupted, returns -EINTR | ||
76 | */ | ||
77 | static inline int down_interruptible(struct semaphore * sem) | ||
78 | { | ||
79 | int ret = 0; | ||
80 | |||
81 | might_sleep(); | ||
82 | if (unlikely(atomic_dec_return (&sem->count) < 0)) | ||
83 | ret = __down_interruptible (sem); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Non-blockingly attempt to down() a semaphore. | ||
89 | * Returns zero if we acquired it | ||
90 | */ | ||
91 | static inline int down_trylock(struct semaphore * sem) | ||
92 | { | ||
93 | return atomic_dec_if_positive(&sem->count) < 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Note! This is subtle. We jump to wake people up only if | ||
98 | * the semaphore was negative (== somebody was waiting on it). | ||
99 | * The default case (no contention) will result in NO | ||
100 | * jumps for both down() and up(). | ||
101 | */ | ||
102 | static inline void up(struct semaphore * sem) | ||
103 | { | ||
104 | if (unlikely(atomic_inc_return (&sem->count) <= 0)) | ||
105 | __up (sem); | ||
106 | } | ||
107 | |||
108 | #endif /*__ASM_AVR32_SEMAPHORE_H */ | ||
diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h deleted file mode 100644 index 9082b0dc3eb5..000000000000 --- a/include/asm-blackfin/semaphore-helper.h +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | /* Based on M68K version, Lineo Inc. May 2001 */ | ||
2 | |||
3 | #ifndef _BFIN_SEMAPHORE_HELPER_H | ||
4 | #define _BFIN_SEMAPHORE_HELPER_H | ||
5 | |||
6 | /* | ||
7 | * SMP- and interrupt-safe semaphores helper functions. | ||
8 | * | ||
9 | * (C) Copyright 1996 Linus Torvalds | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <asm/errno.h> | ||
14 | |||
15 | /* | ||
16 | * These two _must_ execute atomically wrt each other. | ||
17 | */ | ||
18 | static inline void wake_one_more(struct semaphore *sem) | ||
19 | { | ||
20 | atomic_inc(&sem->waking); | ||
21 | } | ||
22 | |||
23 | static inline int waking_non_zero(struct semaphore *sem) | ||
24 | { | ||
25 | int ret; | ||
26 | unsigned long flags = 0; | ||
27 | |||
28 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
29 | ret = 0; | ||
30 | if (atomic_read(&sem->waking) > 0) { | ||
31 | atomic_dec(&sem->waking); | ||
32 | ret = 1; | ||
33 | } | ||
34 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * waking_non_zero_interruptible: | ||
40 | * 1 got the lock | ||
41 | * 0 go to sleep | ||
42 | * -EINTR interrupted | ||
43 | */ | ||
44 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
45 | struct task_struct *tsk) | ||
46 | { | ||
47 | int ret = 0; | ||
48 | unsigned long flags = 0; | ||
49 | |||
50 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
51 | if (atomic_read(&sem->waking) > 0) { | ||
52 | atomic_dec(&sem->waking); | ||
53 | ret = 1; | ||
54 | } else if (signal_pending(tsk)) { | ||
55 | atomic_inc(&sem->count); | ||
56 | ret = -EINTR; | ||
57 | } | ||
58 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * waking_non_zero_trylock: | ||
64 | * 1 failed to lock | ||
65 | * 0 got the lock | ||
66 | */ | ||
67 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
68 | { | ||
69 | int ret = 1; | ||
70 | unsigned long flags = 0; | ||
71 | |||
72 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
73 | if (atomic_read(&sem->waking) > 0) { | ||
74 | atomic_dec(&sem->waking); | ||
75 | ret = 0; | ||
76 | } else | ||
77 | atomic_inc(&sem->count); | ||
78 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | #endif /* _BFIN_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h index 533f90fb2e4e..d9b2034ed1d2 100644 --- a/include/asm-blackfin/semaphore.h +++ b/include/asm-blackfin/semaphore.h | |||
@@ -1,105 +1 @@ | |||
1 | #ifndef _BFIN_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _BFIN_SEMAPHORE_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #include <linux/linkage.h> | ||
7 | #include <linux/wait.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/rwsem.h> | ||
10 | #include <asm/atomic.h> | ||
11 | |||
12 | /* | ||
13 | * Interrupt-safe semaphores.. | ||
14 | * | ||
15 | * (C) Copyright 1996 Linus Torvalds | ||
16 | * | ||
17 | * BFIN version by akbar hussain Lineo Inc April 2001 | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | struct semaphore { | ||
22 | atomic_t count; | ||
23 | int sleepers; | ||
24 | wait_queue_head_t wait; | ||
25 | }; | ||
26 | |||
27 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
28 | { \ | ||
29 | .count = ATOMIC_INIT(n), \ | ||
30 | .sleepers = 0, \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
38 | |||
39 | static inline void sema_init(struct semaphore *sem, int val) | ||
40 | { | ||
41 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX(struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 1); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 0); | ||
52 | } | ||
53 | |||
54 | asmlinkage void __down(struct semaphore *sem); | ||
55 | asmlinkage int __down_interruptible(struct semaphore *sem); | ||
56 | asmlinkage int __down_trylock(struct semaphore *sem); | ||
57 | asmlinkage void __up(struct semaphore *sem); | ||
58 | |||
59 | extern spinlock_t semaphore_wake_lock; | ||
60 | |||
61 | /* | ||
62 | * This is ugly, but we want the default case to fall through. | ||
63 | * "down_failed" is a special asm handler that calls the C | ||
64 | * routine that actually waits. | ||
65 | */ | ||
66 | static inline void down(struct semaphore *sem) | ||
67 | { | ||
68 | might_sleep(); | ||
69 | if (atomic_dec_return(&sem->count) < 0) | ||
70 | __down(sem); | ||
71 | } | ||
72 | |||
73 | static inline int down_interruptible(struct semaphore *sem) | ||
74 | { | ||
75 | int ret = 0; | ||
76 | |||
77 | might_sleep(); | ||
78 | if (atomic_dec_return(&sem->count) < 0) | ||
79 | ret = __down_interruptible(sem); | ||
80 | return (ret); | ||
81 | } | ||
82 | |||
83 | static inline int down_trylock(struct semaphore *sem) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (atomic_dec_return(&sem->count) < 0) | ||
88 | ret = __down_trylock(sem); | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Note! This is subtle. We jump to wake people up only if | ||
94 | * the semaphore was negative (== somebody was waiting on it). | ||
95 | * The default case (no contention) will result in NO | ||
96 | * jumps for both down() and up(). | ||
97 | */ | ||
98 | static inline void up(struct semaphore *sem) | ||
99 | { | ||
100 | if (atomic_inc_return(&sem->count) <= 0) | ||
101 | __up(sem); | ||
102 | } | ||
103 | |||
104 | #endif /* __ASSEMBLY__ */ | ||
105 | #endif /* _BFIN_SEMAPHORE_H */ | ||
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h deleted file mode 100644 index 27bfeca1b981..000000000000 --- a/include/asm-cris/semaphore-helper.h +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | /* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $ | ||
2 | * | ||
3 | * SMP- and interrupt-safe semaphores helper functions. Generic versions, no | ||
4 | * optimizations whatsoever... | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_SEMAPHORE_HELPER_H | ||
9 | #define _ASM_SEMAPHORE_HELPER_H | ||
10 | |||
11 | #include <asm/atomic.h> | ||
12 | #include <linux/errno.h> | ||
13 | |||
14 | #define read(a) ((a)->counter) | ||
15 | #define inc(a) (((a)->counter)++) | ||
16 | #define dec(a) (((a)->counter)--) | ||
17 | |||
18 | #define count_inc(a) ((*(a))++) | ||
19 | |||
20 | /* | ||
21 | * These two _must_ execute atomically wrt each other. | ||
22 | */ | ||
23 | static inline void wake_one_more(struct semaphore * sem) | ||
24 | { | ||
25 | atomic_inc(&sem->waking); | ||
26 | } | ||
27 | |||
28 | static inline int waking_non_zero(struct semaphore *sem) | ||
29 | { | ||
30 | unsigned long flags; | ||
31 | int ret = 0; | ||
32 | |||
33 | local_irq_save(flags); | ||
34 | if (read(&sem->waking) > 0) { | ||
35 | dec(&sem->waking); | ||
36 | ret = 1; | ||
37 | } | ||
38 | local_irq_restore(flags); | ||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
43 | struct task_struct *tsk) | ||
44 | { | ||
45 | int ret = 0; | ||
46 | unsigned long flags; | ||
47 | |||
48 | local_irq_save(flags); | ||
49 | if (read(&sem->waking) > 0) { | ||
50 | dec(&sem->waking); | ||
51 | ret = 1; | ||
52 | } else if (signal_pending(tsk)) { | ||
53 | inc(&sem->count); | ||
54 | ret = -EINTR; | ||
55 | } | ||
56 | local_irq_restore(flags); | ||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
61 | { | ||
62 | int ret = 1; | ||
63 | unsigned long flags; | ||
64 | |||
65 | local_irq_save(flags); | ||
66 | if (read(&sem->waking) <= 0) | ||
67 | inc(&sem->count); | ||
68 | else { | ||
69 | dec(&sem->waking); | ||
70 | ret = 0; | ||
71 | } | ||
72 | local_irq_restore(flags); | ||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | #endif /* _ASM_SEMAPHORE_HELPER_H */ | ||
77 | |||
78 | |||
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h index 31a4ac448195..d9b2034ed1d2 100644 --- a/include/asm-cris/semaphore.h +++ b/include/asm-cris/semaphore.h | |||
@@ -1,133 +1 @@ | |||
1 | /* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */ | #include <linux/semaphore.h> | |
2 | |||
3 | /* On the i386 these are coded in asm, perhaps we should as well. Later.. */ | ||
4 | |||
5 | #ifndef _CRIS_SEMAPHORE_H | ||
6 | #define _CRIS_SEMAPHORE_H | ||
7 | |||
8 | #define RW_LOCK_BIAS 0x01000000 | ||
9 | |||
10 | #include <linux/wait.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/rwsem.h> | ||
13 | |||
14 | #include <asm/system.h> | ||
15 | #include <asm/atomic.h> | ||
16 | |||
17 | /* | ||
18 | * CRIS semaphores, implemented in C-only so far. | ||
19 | */ | ||
20 | |||
21 | struct semaphore { | ||
22 | atomic_t count; | ||
23 | atomic_t waking; | ||
24 | wait_queue_head_t wait; | ||
25 | }; | ||
26 | |||
27 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
28 | { \ | ||
29 | .count = ATOMIC_INIT(n), \ | ||
30 | .waking = ATOMIC_INIT(0), \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
38 | |||
39 | static inline void sema_init(struct semaphore *sem, int val) | ||
40 | { | ||
41 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX (struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 1); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 0); | ||
52 | } | ||
53 | |||
54 | extern void __down(struct semaphore * sem); | ||
55 | extern int __down_interruptible(struct semaphore * sem); | ||
56 | extern int __down_trylock(struct semaphore * sem); | ||
57 | extern void __up(struct semaphore * sem); | ||
58 | |||
59 | /* notice - we probably can do cli/sti here instead of saving */ | ||
60 | |||
61 | static inline void down(struct semaphore * sem) | ||
62 | { | ||
63 | unsigned long flags; | ||
64 | int failed; | ||
65 | |||
66 | might_sleep(); | ||
67 | |||
68 | /* atomically decrement the semaphores count, and if its negative, we wait */ | ||
69 | cris_atomic_save(sem, flags); | ||
70 | failed = --(sem->count.counter) < 0; | ||
71 | cris_atomic_restore(sem, flags); | ||
72 | if(failed) { | ||
73 | __down(sem); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * This version waits in interruptible state so that the waiting | ||
79 | * process can be killed. The down_interruptible routine | ||
80 | * returns negative for signalled and zero for semaphore acquired. | ||
81 | */ | ||
82 | |||
83 | static inline int down_interruptible(struct semaphore * sem) | ||
84 | { | ||
85 | unsigned long flags; | ||
86 | int failed; | ||
87 | |||
88 | might_sleep(); | ||
89 | |||
90 | /* atomically decrement the semaphores count, and if its negative, we wait */ | ||
91 | cris_atomic_save(sem, flags); | ||
92 | failed = --(sem->count.counter) < 0; | ||
93 | cris_atomic_restore(sem, flags); | ||
94 | if(failed) | ||
95 | failed = __down_interruptible(sem); | ||
96 | return(failed); | ||
97 | } | ||
98 | |||
99 | static inline int down_trylock(struct semaphore * sem) | ||
100 | { | ||
101 | unsigned long flags; | ||
102 | int failed; | ||
103 | |||
104 | cris_atomic_save(sem, flags); | ||
105 | failed = --(sem->count.counter) < 0; | ||
106 | cris_atomic_restore(sem, flags); | ||
107 | if(failed) | ||
108 | failed = __down_trylock(sem); | ||
109 | return(failed); | ||
110 | |||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Note! This is subtle. We jump to wake people up only if | ||
115 | * the semaphore was negative (== somebody was waiting on it). | ||
116 | * The default case (no contention) will result in NO | ||
117 | * jumps for both down() and up(). | ||
118 | */ | ||
119 | static inline void up(struct semaphore * sem) | ||
120 | { | ||
121 | unsigned long flags; | ||
122 | int wakeup; | ||
123 | |||
124 | /* atomically increment the semaphores count, and if it was negative, we wake people */ | ||
125 | cris_atomic_save(sem, flags); | ||
126 | wakeup = ++(sem->count.counter) <= 0; | ||
127 | cris_atomic_restore(sem, flags); | ||
128 | if(wakeup) { | ||
129 | __up(sem); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | #endif | ||
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h index d7aaa1911a1a..d9b2034ed1d2 100644 --- a/include/asm-frv/semaphore.h +++ b/include/asm-frv/semaphore.h | |||
@@ -1,155 +1 @@ | |||
1 | /* semaphore.h: semaphores for the FR-V | #include <linux/semaphore.h> | |
2 | * | ||
3 | * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_SEMAPHORE_H | ||
12 | #define _ASM_SEMAPHORE_H | ||
13 | |||
14 | #define RW_LOCK_BIAS 0x01000000 | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/wait.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/rwsem.h> | ||
22 | |||
23 | /* | ||
24 | * the semaphore definition | ||
25 | * - if counter is >0 then there are tokens available on the semaphore for down to collect | ||
26 | * - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait | ||
27 | * - if wait_list is not empty, then there are processes waiting for the semaphore | ||
28 | */ | ||
29 | struct semaphore { | ||
30 | unsigned counter; | ||
31 | spinlock_t wait_lock; | ||
32 | struct list_head wait_list; | ||
33 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
34 | unsigned __magic; | ||
35 | #endif | ||
36 | }; | ||
37 | |||
38 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
39 | # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic | ||
40 | #else | ||
41 | # define __SEM_DEBUG_INIT(name) | ||
42 | #endif | ||
43 | |||
44 | |||
45 | #define __SEMAPHORE_INITIALIZER(name,count) \ | ||
46 | { count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) } | ||
47 | |||
48 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
49 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
50 | |||
51 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
52 | |||
53 | static inline void sema_init (struct semaphore *sem, int val) | ||
54 | { | ||
55 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
56 | } | ||
57 | |||
58 | static inline void init_MUTEX (struct semaphore *sem) | ||
59 | { | ||
60 | sema_init(sem, 1); | ||
61 | } | ||
62 | |||
63 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
64 | { | ||
65 | sema_init(sem, 0); | ||
66 | } | ||
67 | |||
68 | extern void __down(struct semaphore *sem, unsigned long flags); | ||
69 | extern int __down_interruptible(struct semaphore *sem, unsigned long flags); | ||
70 | extern void __up(struct semaphore *sem); | ||
71 | |||
72 | static inline void down(struct semaphore *sem) | ||
73 | { | ||
74 | unsigned long flags; | ||
75 | |||
76 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
77 | CHECK_MAGIC(sem->__magic); | ||
78 | #endif | ||
79 | |||
80 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
81 | if (likely(sem->counter > 0)) { | ||
82 | sem->counter--; | ||
83 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
84 | } | ||
85 | else { | ||
86 | __down(sem, flags); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static inline int down_interruptible(struct semaphore *sem) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | int ret = 0; | ||
94 | |||
95 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
96 | CHECK_MAGIC(sem->__magic); | ||
97 | #endif | ||
98 | |||
99 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
100 | if (likely(sem->counter > 0)) { | ||
101 | sem->counter--; | ||
102 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
103 | } | ||
104 | else { | ||
105 | ret = __down_interruptible(sem, flags); | ||
106 | } | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * non-blockingly attempt to down() a semaphore. | ||
112 | * - returns zero if we acquired it | ||
113 | */ | ||
114 | static inline int down_trylock(struct semaphore *sem) | ||
115 | { | ||
116 | unsigned long flags; | ||
117 | int success = 0; | ||
118 | |||
119 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
120 | CHECK_MAGIC(sem->__magic); | ||
121 | #endif | ||
122 | |||
123 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
124 | if (sem->counter > 0) { | ||
125 | sem->counter--; | ||
126 | success = 1; | ||
127 | } | ||
128 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
129 | return !success; | ||
130 | } | ||
131 | |||
132 | static inline void up(struct semaphore *sem) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
137 | CHECK_MAGIC(sem->__magic); | ||
138 | #endif | ||
139 | |||
140 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
141 | if (!list_empty(&sem->wait_list)) | ||
142 | __up(sem); | ||
143 | else | ||
144 | sem->counter++; | ||
145 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
146 | } | ||
147 | |||
148 | static inline int sem_getcount(struct semaphore *sem) | ||
149 | { | ||
150 | return sem->counter; | ||
151 | } | ||
152 | |||
153 | #endif /* __ASSEMBLY__ */ | ||
154 | |||
155 | #endif | ||
diff --git a/include/asm-h8300/semaphore-helper.h b/include/asm-h8300/semaphore-helper.h deleted file mode 100644 index 4fea36be5fd8..000000000000 --- a/include/asm-h8300/semaphore-helper.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | #ifndef _H8300_SEMAPHORE_HELPER_H | ||
2 | #define _H8300_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * | ||
9 | * based on | ||
10 | * m68k version by Andreas Schwab | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | |||
15 | /* | ||
16 | * These two _must_ execute atomically wrt each other. | ||
17 | */ | ||
18 | static inline void wake_one_more(struct semaphore * sem) | ||
19 | { | ||
20 | atomic_inc((atomic_t *)&sem->sleepers); | ||
21 | } | ||
22 | |||
23 | static inline int waking_non_zero(struct semaphore *sem) | ||
24 | { | ||
25 | int ret; | ||
26 | unsigned long flags; | ||
27 | |||
28 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
29 | ret = 0; | ||
30 | if (sem->sleepers > 0) { | ||
31 | sem->sleepers--; | ||
32 | ret = 1; | ||
33 | } | ||
34 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * waking_non_zero_interruptible: | ||
40 | * 1 got the lock | ||
41 | * 0 go to sleep | ||
42 | * -EINTR interrupted | ||
43 | */ | ||
44 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
45 | struct task_struct *tsk) | ||
46 | { | ||
47 | int ret; | ||
48 | unsigned long flags; | ||
49 | |||
50 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
51 | ret = 0; | ||
52 | if (sem->sleepers > 0) { | ||
53 | sem->sleepers--; | ||
54 | ret = 1; | ||
55 | } else if (signal_pending(tsk)) { | ||
56 | atomic_inc(&sem->count); | ||
57 | ret = -EINTR; | ||
58 | } | ||
59 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * waking_non_zero_trylock: | ||
65 | * 1 failed to lock | ||
66 | * 0 got the lock | ||
67 | */ | ||
68 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
69 | { | ||
70 | int ret; | ||
71 | unsigned long flags; | ||
72 | |||
73 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
74 | ret = 1; | ||
75 | if (sem->sleepers <= 0) | ||
76 | atomic_inc(&sem->count); | ||
77 | else { | ||
78 | sem->sleepers--; | ||
79 | ret = 0; | ||
80 | } | ||
81 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | #endif | ||
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h index f3ffff83ff09..d9b2034ed1d2 100644 --- a/include/asm-h8300/semaphore.h +++ b/include/asm-h8300/semaphore.h | |||
@@ -1,190 +1 @@ | |||
1 | #ifndef _H8300_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _H8300_SEMAPHORE_H | ||
3 | |||
4 | #define RW_LOCK_BIAS 0x01000000 | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | |||
13 | #include <asm/system.h> | ||
14 | #include <asm/atomic.h> | ||
15 | |||
16 | /* | ||
17 | * Interrupt-safe semaphores.. | ||
18 | * | ||
19 | * (C) Copyright 1996 Linus Torvalds | ||
20 | * | ||
21 | * H8/300 version by Yoshinori Sato | ||
22 | */ | ||
23 | |||
24 | |||
25 | struct semaphore { | ||
26 | atomic_t count; | ||
27 | int sleepers; | ||
28 | wait_queue_head_t wait; | ||
29 | }; | ||
30 | |||
31 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
32 | { \ | ||
33 | .count = ATOMIC_INIT(n), \ | ||
34 | .sleepers = 0, \ | ||
35 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
36 | } | ||
37 | |||
38 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
39 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
40 | |||
41 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
42 | |||
43 | static inline void sema_init (struct semaphore *sem, int val) | ||
44 | { | ||
45 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
46 | } | ||
47 | |||
48 | static inline void init_MUTEX (struct semaphore *sem) | ||
49 | { | ||
50 | sema_init(sem, 1); | ||
51 | } | ||
52 | |||
53 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
54 | { | ||
55 | sema_init(sem, 0); | ||
56 | } | ||
57 | |||
58 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
59 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
60 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
61 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
62 | |||
63 | asmlinkage void __down(struct semaphore * sem); | ||
64 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
65 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
66 | asmlinkage void __up(struct semaphore * sem); | ||
67 | |||
68 | extern spinlock_t semaphore_wake_lock; | ||
69 | |||
70 | /* | ||
71 | * This is ugly, but we want the default case to fall through. | ||
72 | * "down_failed" is a special asm handler that calls the C | ||
73 | * routine that actually waits. See arch/m68k/lib/semaphore.S | ||
74 | */ | ||
75 | static inline void down(struct semaphore * sem) | ||
76 | { | ||
77 | register atomic_t *count asm("er0"); | ||
78 | |||
79 | might_sleep(); | ||
80 | |||
81 | count = &(sem->count); | ||
82 | __asm__ __volatile__( | ||
83 | "stc ccr,r3l\n\t" | ||
84 | "orc #0x80,ccr\n\t" | ||
85 | "mov.l %2, er1\n\t" | ||
86 | "dec.l #1,er1\n\t" | ||
87 | "mov.l er1,%0\n\t" | ||
88 | "bpl 1f\n\t" | ||
89 | "ldc r3l,ccr\n\t" | ||
90 | "mov.l %1,er0\n\t" | ||
91 | "jsr @___down\n\t" | ||
92 | "bra 2f\n" | ||
93 | "1:\n\t" | ||
94 | "ldc r3l,ccr\n" | ||
95 | "2:" | ||
96 | : "=m"(*count) | ||
97 | : "g"(sem),"m"(*count) | ||
98 | : "cc", "er1", "er2", "er3"); | ||
99 | } | ||
100 | |||
101 | static inline int down_interruptible(struct semaphore * sem) | ||
102 | { | ||
103 | register atomic_t *count asm("er0"); | ||
104 | |||
105 | might_sleep(); | ||
106 | |||
107 | count = &(sem->count); | ||
108 | __asm__ __volatile__( | ||
109 | "stc ccr,r1l\n\t" | ||
110 | "orc #0x80,ccr\n\t" | ||
111 | "mov.l %3, er2\n\t" | ||
112 | "dec.l #1,er2\n\t" | ||
113 | "mov.l er2,%1\n\t" | ||
114 | "bpl 1f\n\t" | ||
115 | "ldc r1l,ccr\n\t" | ||
116 | "mov.l %2,er0\n\t" | ||
117 | "jsr @___down_interruptible\n\t" | ||
118 | "bra 2f\n" | ||
119 | "1:\n\t" | ||
120 | "ldc r1l,ccr\n\t" | ||
121 | "sub.l %0,%0\n\t" | ||
122 | "2:\n\t" | ||
123 | : "=r" (count),"=m" (*count) | ||
124 | : "g"(sem),"m"(*count) | ||
125 | : "cc", "er1", "er2", "er3"); | ||
126 | return (int)count; | ||
127 | } | ||
128 | |||
129 | static inline int down_trylock(struct semaphore * sem) | ||
130 | { | ||
131 | register atomic_t *count asm("er0"); | ||
132 | |||
133 | count = &(sem->count); | ||
134 | __asm__ __volatile__( | ||
135 | "stc ccr,r3l\n\t" | ||
136 | "orc #0x80,ccr\n\t" | ||
137 | "mov.l %3,er2\n\t" | ||
138 | "dec.l #1,er2\n\t" | ||
139 | "mov.l er2,%0\n\t" | ||
140 | "bpl 1f\n\t" | ||
141 | "ldc r3l,ccr\n\t" | ||
142 | "jmp @3f\n\t" | ||
143 | LOCK_SECTION_START(".align 2\n\t") | ||
144 | "3:\n\t" | ||
145 | "mov.l %2,er0\n\t" | ||
146 | "jsr @___down_trylock\n\t" | ||
147 | "jmp @2f\n\t" | ||
148 | LOCK_SECTION_END | ||
149 | "1:\n\t" | ||
150 | "ldc r3l,ccr\n\t" | ||
151 | "sub.l %1,%1\n" | ||
152 | "2:" | ||
153 | : "=m" (*count),"=r"(count) | ||
154 | : "g"(sem),"m"(*count) | ||
155 | : "cc", "er1","er2", "er3"); | ||
156 | return (int)count; | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Note! This is subtle. We jump to wake people up only if | ||
161 | * the semaphore was negative (== somebody was waiting on it). | ||
162 | * The default case (no contention) will result in NO | ||
163 | * jumps for both down() and up(). | ||
164 | */ | ||
165 | static inline void up(struct semaphore * sem) | ||
166 | { | ||
167 | register atomic_t *count asm("er0"); | ||
168 | |||
169 | count = &(sem->count); | ||
170 | __asm__ __volatile__( | ||
171 | "stc ccr,r3l\n\t" | ||
172 | "orc #0x80,ccr\n\t" | ||
173 | "mov.l %2,er1\n\t" | ||
174 | "inc.l #1,er1\n\t" | ||
175 | "mov.l er1,%0\n\t" | ||
176 | "ldc r3l,ccr\n\t" | ||
177 | "sub.l er2,er2\n\t" | ||
178 | "cmp.l er2,er1\n\t" | ||
179 | "bgt 1f\n\t" | ||
180 | "mov.l %1,er0\n\t" | ||
181 | "jsr @___up\n" | ||
182 | "1:" | ||
183 | : "=m"(*count) | ||
184 | : "g"(sem),"m"(*count) | ||
185 | : "cc", "er1", "er2", "er3"); | ||
186 | } | ||
187 | |||
188 | #endif /* __ASSEMBLY__ */ | ||
189 | |||
190 | #endif | ||
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h index d8393d11288d..d9b2034ed1d2 100644 --- a/include/asm-ia64/semaphore.h +++ b/include/asm-ia64/semaphore.h | |||
@@ -1,99 +1 @@ | |||
1 | #ifndef _ASM_IA64_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ASM_IA64_SEMAPHORE_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 1998-2000 Hewlett-Packard Co | ||
6 | * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/wait.h> | ||
10 | #include <linux/rwsem.h> | ||
11 | |||
12 | #include <asm/atomic.h> | ||
13 | |||
14 | struct semaphore { | ||
15 | atomic_t count; | ||
16 | int sleepers; | ||
17 | wait_queue_head_t wait; | ||
18 | }; | ||
19 | |||
20 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
21 | { \ | ||
22 | .count = ATOMIC_INIT(n), \ | ||
23 | .sleepers = 0, \ | ||
24 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
25 | } | ||
26 | |||
27 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
28 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
29 | |||
30 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
31 | |||
32 | static inline void | ||
33 | sema_init (struct semaphore *sem, int val) | ||
34 | { | ||
35 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
36 | } | ||
37 | |||
38 | static inline void | ||
39 | init_MUTEX (struct semaphore *sem) | ||
40 | { | ||
41 | sema_init(sem, 1); | ||
42 | } | ||
43 | |||
44 | static inline void | ||
45 | init_MUTEX_LOCKED (struct semaphore *sem) | ||
46 | { | ||
47 | sema_init(sem, 0); | ||
48 | } | ||
49 | |||
50 | extern void __down (struct semaphore * sem); | ||
51 | extern int __down_interruptible (struct semaphore * sem); | ||
52 | extern int __down_trylock (struct semaphore * sem); | ||
53 | extern void __up (struct semaphore * sem); | ||
54 | |||
55 | /* | ||
56 | * Atomically decrement the semaphore's count. If it goes negative, | ||
57 | * block the calling thread in the TASK_UNINTERRUPTIBLE state. | ||
58 | */ | ||
59 | static inline void | ||
60 | down (struct semaphore *sem) | ||
61 | { | ||
62 | might_sleep(); | ||
63 | if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) | ||
64 | __down(sem); | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Atomically decrement the semaphore's count. If it goes negative, | ||
69 | * block the calling thread in the TASK_INTERRUPTIBLE state. | ||
70 | */ | ||
71 | static inline int | ||
72 | down_interruptible (struct semaphore * sem) | ||
73 | { | ||
74 | int ret = 0; | ||
75 | |||
76 | might_sleep(); | ||
77 | if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) | ||
78 | ret = __down_interruptible(sem); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | down_trylock (struct semaphore *sem) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) | ||
88 | ret = __down_trylock(sem); | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static inline void | ||
93 | up (struct semaphore * sem) | ||
94 | { | ||
95 | if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1) | ||
96 | __up(sem); | ||
97 | } | ||
98 | |||
99 | #endif /* _ASM_IA64_SEMAPHORE_H */ | ||
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h index b5bf95a6f2b4..d9b2034ed1d2 100644 --- a/include/asm-m32r/semaphore.h +++ b/include/asm-m32r/semaphore.h | |||
@@ -1,144 +1 @@ | |||
1 | #ifndef _ASM_M32R_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ASM_M32R_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | /* | ||
9 | * SMP- and interrupt-safe semaphores.. | ||
10 | * | ||
11 | * Copyright (C) 1996 Linus Torvalds | ||
12 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> | ||
13 | */ | ||
14 | |||
15 | #include <linux/wait.h> | ||
16 | #include <linux/rwsem.h> | ||
17 | #include <asm/assembler.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | struct semaphore { | ||
22 | atomic_t count; | ||
23 | int sleepers; | ||
24 | wait_queue_head_t wait; | ||
25 | }; | ||
26 | |||
27 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
28 | { \ | ||
29 | .count = ATOMIC_INIT(n), \ | ||
30 | .sleepers = 0, \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
38 | |||
39 | static inline void sema_init (struct semaphore *sem, int val) | ||
40 | { | ||
41 | /* | ||
42 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
43 | * | ||
44 | * i'd rather use the more flexible initialization above, but sadly | ||
45 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well. | ||
46 | */ | ||
47 | atomic_set(&sem->count, val); | ||
48 | sem->sleepers = 0; | ||
49 | init_waitqueue_head(&sem->wait); | ||
50 | } | ||
51 | |||
52 | static inline void init_MUTEX (struct semaphore *sem) | ||
53 | { | ||
54 | sema_init(sem, 1); | ||
55 | } | ||
56 | |||
57 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
58 | { | ||
59 | sema_init(sem, 0); | ||
60 | } | ||
61 | |||
62 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
63 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
64 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
65 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
66 | |||
67 | asmlinkage void __down(struct semaphore * sem); | ||
68 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
69 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
70 | asmlinkage void __up(struct semaphore * sem); | ||
71 | |||
72 | /* | ||
73 | * Atomically decrement the semaphore's count. If it goes negative, | ||
74 | * block the calling thread in the TASK_UNINTERRUPTIBLE state. | ||
75 | */ | ||
76 | static inline void down(struct semaphore * sem) | ||
77 | { | ||
78 | might_sleep(); | ||
79 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
80 | __down(sem); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Interruptible try to acquire a semaphore. If we obtained | ||
85 | * it, return zero. If we were interrupted, returns -EINTR | ||
86 | */ | ||
87 | static inline int down_interruptible(struct semaphore * sem) | ||
88 | { | ||
89 | int result = 0; | ||
90 | |||
91 | might_sleep(); | ||
92 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
93 | result = __down_interruptible(sem); | ||
94 | |||
95 | return result; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Non-blockingly attempt to down() a semaphore. | ||
100 | * Returns zero if we acquired it | ||
101 | */ | ||
102 | static inline int down_trylock(struct semaphore * sem) | ||
103 | { | ||
104 | unsigned long flags; | ||
105 | long count; | ||
106 | int result = 0; | ||
107 | |||
108 | local_irq_save(flags); | ||
109 | __asm__ __volatile__ ( | ||
110 | "# down_trylock \n\t" | ||
111 | DCACHE_CLEAR("%0", "r4", "%1") | ||
112 | M32R_LOCK" %0, @%1; \n\t" | ||
113 | "addi %0, #-1; \n\t" | ||
114 | M32R_UNLOCK" %0, @%1; \n\t" | ||
115 | : "=&r" (count) | ||
116 | : "r" (&sem->count) | ||
117 | : "memory" | ||
118 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
119 | , "r4" | ||
120 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
121 | ); | ||
122 | local_irq_restore(flags); | ||
123 | |||
124 | if (unlikely(count < 0)) | ||
125 | result = __down_trylock(sem); | ||
126 | |||
127 | return result; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Note! This is subtle. We jump to wake people up only if | ||
132 | * the semaphore was negative (== somebody was waiting on it). | ||
133 | * The default case (no contention) will result in NO | ||
134 | * jumps for both down() and up(). | ||
135 | */ | ||
136 | static inline void up(struct semaphore * sem) | ||
137 | { | ||
138 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
139 | __up(sem); | ||
140 | } | ||
141 | |||
142 | #endif /* __KERNEL__ */ | ||
143 | |||
144 | #endif /* _ASM_M32R_SEMAPHORE_H */ | ||
diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h deleted file mode 100644 index eef30ba0b499..000000000000 --- a/include/asm-m68k/semaphore-helper.h +++ /dev/null | |||
@@ -1,142 +0,0 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_HELPER_H | ||
2 | #define _M68K_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * | ||
9 | * m68k version by Andreas Schwab | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | |||
14 | /* | ||
15 | * These two _must_ execute atomically wrt each other. | ||
16 | */ | ||
17 | static inline void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc(&sem->waking); | ||
20 | } | ||
21 | |||
22 | #ifndef CONFIG_RMW_INSNS | ||
23 | extern spinlock_t semaphore_wake_lock; | ||
24 | #endif | ||
25 | |||
26 | static inline int waking_non_zero(struct semaphore *sem) | ||
27 | { | ||
28 | int ret; | ||
29 | #ifndef CONFIG_RMW_INSNS | ||
30 | unsigned long flags; | ||
31 | |||
32 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
33 | ret = 0; | ||
34 | if (atomic_read(&sem->waking) > 0) { | ||
35 | atomic_dec(&sem->waking); | ||
36 | ret = 1; | ||
37 | } | ||
38 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
39 | #else | ||
40 | int tmp1, tmp2; | ||
41 | |||
42 | __asm__ __volatile__ | ||
43 | ("1: movel %1,%2\n" | ||
44 | " jle 2f\n" | ||
45 | " subql #1,%2\n" | ||
46 | " casl %1,%2,%3\n" | ||
47 | " jne 1b\n" | ||
48 | " moveq #1,%0\n" | ||
49 | "2:" | ||
50 | : "=d" (ret), "=d" (tmp1), "=d" (tmp2) | ||
51 | : "m" (sem->waking), "0" (0), "1" (sem->waking)); | ||
52 | #endif | ||
53 | |||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * waking_non_zero_interruptible: | ||
59 | * 1 got the lock | ||
60 | * 0 go to sleep | ||
61 | * -EINTR interrupted | ||
62 | */ | ||
63 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
64 | struct task_struct *tsk) | ||
65 | { | ||
66 | int ret; | ||
67 | #ifndef CONFIG_RMW_INSNS | ||
68 | unsigned long flags; | ||
69 | |||
70 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
71 | ret = 0; | ||
72 | if (atomic_read(&sem->waking) > 0) { | ||
73 | atomic_dec(&sem->waking); | ||
74 | ret = 1; | ||
75 | } else if (signal_pending(tsk)) { | ||
76 | atomic_inc(&sem->count); | ||
77 | ret = -EINTR; | ||
78 | } | ||
79 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
80 | #else | ||
81 | int tmp1, tmp2; | ||
82 | |||
83 | __asm__ __volatile__ | ||
84 | ("1: movel %1,%2\n" | ||
85 | " jle 2f\n" | ||
86 | " subql #1,%2\n" | ||
87 | " casl %1,%2,%3\n" | ||
88 | " jne 1b\n" | ||
89 | " moveq #1,%0\n" | ||
90 | " jra %a4\n" | ||
91 | "2:" | ||
92 | : "=d" (ret), "=d" (tmp1), "=d" (tmp2) | ||
93 | : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking)); | ||
94 | if (signal_pending(tsk)) { | ||
95 | atomic_inc(&sem->count); | ||
96 | ret = -EINTR; | ||
97 | } | ||
98 | next: | ||
99 | #endif | ||
100 | |||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * waking_non_zero_trylock: | ||
106 | * 1 failed to lock | ||
107 | * 0 got the lock | ||
108 | */ | ||
109 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
110 | { | ||
111 | int ret; | ||
112 | #ifndef CONFIG_RMW_INSNS | ||
113 | unsigned long flags; | ||
114 | |||
115 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
116 | ret = 1; | ||
117 | if (atomic_read(&sem->waking) > 0) { | ||
118 | atomic_dec(&sem->waking); | ||
119 | ret = 0; | ||
120 | } else | ||
121 | atomic_inc(&sem->count); | ||
122 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
123 | #else | ||
124 | int tmp1, tmp2; | ||
125 | |||
126 | __asm__ __volatile__ | ||
127 | ("1: movel %1,%2\n" | ||
128 | " jle 2f\n" | ||
129 | " subql #1,%2\n" | ||
130 | " casl %1,%2,%3\n" | ||
131 | " jne 1b\n" | ||
132 | " moveq #0,%0\n" | ||
133 | "2:" | ||
134 | : "=d" (ret), "=d" (tmp1), "=d" (tmp2) | ||
135 | : "m" (sem->waking), "0" (1), "1" (sem->waking)); | ||
136 | if (ret) | ||
137 | atomic_inc(&sem->count); | ||
138 | #endif | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | #endif | ||
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h index 64d6b119bb0a..d9b2034ed1d2 100644 --- a/include/asm-m68k/semaphore.h +++ b/include/asm-m68k/semaphore.h | |||
@@ -1,163 +1 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _M68K_SEMAPHORE_H | ||
3 | |||
4 | #define RW_LOCK_BIAS 0x01000000 | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | #include <linux/stringify.h> | ||
13 | |||
14 | #include <asm/system.h> | ||
15 | #include <asm/atomic.h> | ||
16 | |||
17 | /* | ||
18 | * Interrupt-safe semaphores.. | ||
19 | * | ||
20 | * (C) Copyright 1996 Linus Torvalds | ||
21 | * | ||
22 | * m68k version by Andreas Schwab | ||
23 | */ | ||
24 | |||
25 | |||
26 | struct semaphore { | ||
27 | atomic_t count; | ||
28 | atomic_t waking; | ||
29 | wait_queue_head_t wait; | ||
30 | }; | ||
31 | |||
32 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
33 | { \ | ||
34 | .count = ATOMIC_INIT(n), \ | ||
35 | .waking = ATOMIC_INIT(0), \ | ||
36 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
37 | } | ||
38 | |||
39 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
40 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
41 | |||
42 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
43 | |||
44 | static inline void sema_init(struct semaphore *sem, int val) | ||
45 | { | ||
46 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX (struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 1); | ||
52 | } | ||
53 | |||
54 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
55 | { | ||
56 | sema_init(sem, 0); | ||
57 | } | ||
58 | |||
59 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
60 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
61 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
62 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
63 | |||
64 | asmlinkage void __down(struct semaphore * sem); | ||
65 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
66 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
67 | asmlinkage void __up(struct semaphore * sem); | ||
68 | |||
69 | /* | ||
70 | * This is ugly, but we want the default case to fall through. | ||
71 | * "down_failed" is a special asm handler that calls the C | ||
72 | * routine that actually waits. See arch/m68k/lib/semaphore.S | ||
73 | */ | ||
74 | static inline void down(struct semaphore *sem) | ||
75 | { | ||
76 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
77 | |||
78 | might_sleep(); | ||
79 | __asm__ __volatile__( | ||
80 | "| atomic down operation\n\t" | ||
81 | "subql #1,%0@\n\t" | ||
82 | "jmi 2f\n\t" | ||
83 | "1:\n" | ||
84 | LOCK_SECTION_START(".even\n\t") | ||
85 | "2:\tpea 1b\n\t" | ||
86 | "jbra __down_failed\n" | ||
87 | LOCK_SECTION_END | ||
88 | : /* no outputs */ | ||
89 | : "a" (sem1) | ||
90 | : "memory"); | ||
91 | } | ||
92 | |||
93 | static inline int down_interruptible(struct semaphore *sem) | ||
94 | { | ||
95 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
96 | register int result __asm__ ("%d0"); | ||
97 | |||
98 | might_sleep(); | ||
99 | __asm__ __volatile__( | ||
100 | "| atomic interruptible down operation\n\t" | ||
101 | "subql #1,%1@\n\t" | ||
102 | "jmi 2f\n\t" | ||
103 | "clrl %0\n" | ||
104 | "1:\n" | ||
105 | LOCK_SECTION_START(".even\n\t") | ||
106 | "2:\tpea 1b\n\t" | ||
107 | "jbra __down_failed_interruptible\n" | ||
108 | LOCK_SECTION_END | ||
109 | : "=d" (result) | ||
110 | : "a" (sem1) | ||
111 | : "memory"); | ||
112 | return result; | ||
113 | } | ||
114 | |||
115 | static inline int down_trylock(struct semaphore *sem) | ||
116 | { | ||
117 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
118 | register int result __asm__ ("%d0"); | ||
119 | |||
120 | __asm__ __volatile__( | ||
121 | "| atomic down trylock operation\n\t" | ||
122 | "subql #1,%1@\n\t" | ||
123 | "jmi 2f\n\t" | ||
124 | "clrl %0\n" | ||
125 | "1:\n" | ||
126 | LOCK_SECTION_START(".even\n\t") | ||
127 | "2:\tpea 1b\n\t" | ||
128 | "jbra __down_failed_trylock\n" | ||
129 | LOCK_SECTION_END | ||
130 | : "=d" (result) | ||
131 | : "a" (sem1) | ||
132 | : "memory"); | ||
133 | return result; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Note! This is subtle. We jump to wake people up only if | ||
138 | * the semaphore was negative (== somebody was waiting on it). | ||
139 | * The default case (no contention) will result in NO | ||
140 | * jumps for both down() and up(). | ||
141 | */ | ||
142 | static inline void up(struct semaphore *sem) | ||
143 | { | ||
144 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
145 | |||
146 | __asm__ __volatile__( | ||
147 | "| atomic up operation\n\t" | ||
148 | "addql #1,%0@\n\t" | ||
149 | "jle 2f\n" | ||
150 | "1:\n" | ||
151 | LOCK_SECTION_START(".even\n\t") | ||
152 | "2:\t" | ||
153 | "pea 1b\n\t" | ||
154 | "jbra __up_wakeup\n" | ||
155 | LOCK_SECTION_END | ||
156 | : /* no outputs */ | ||
157 | : "a" (sem1) | ||
158 | : "memory"); | ||
159 | } | ||
160 | |||
161 | #endif /* __ASSEMBLY__ */ | ||
162 | |||
163 | #endif | ||
diff --git a/include/asm-m68knommu/semaphore-helper.h b/include/asm-m68knommu/semaphore-helper.h deleted file mode 100644 index 43da7bc483c7..000000000000 --- a/include/asm-m68knommu/semaphore-helper.h +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_HELPER_H | ||
2 | #define _M68K_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * | ||
9 | * m68k version by Andreas Schwab | ||
10 | */ | ||
11 | |||
12 | |||
13 | /* | ||
14 | * These two _must_ execute atomically wrt each other. | ||
15 | */ | ||
16 | static inline void wake_one_more(struct semaphore * sem) | ||
17 | { | ||
18 | atomic_inc(&sem->waking); | ||
19 | } | ||
20 | |||
21 | static inline int waking_non_zero(struct semaphore *sem) | ||
22 | { | ||
23 | int ret; | ||
24 | unsigned long flags; | ||
25 | |||
26 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
27 | ret = 0; | ||
28 | if (atomic_read(&sem->waking) > 0) { | ||
29 | atomic_dec(&sem->waking); | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | */ | ||
42 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
43 | struct task_struct *tsk) | ||
44 | { | ||
45 | int ret; | ||
46 | unsigned long flags; | ||
47 | |||
48 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
49 | ret = 0; | ||
50 | if (atomic_read(&sem->waking) > 0) { | ||
51 | atomic_dec(&sem->waking); | ||
52 | ret = 1; | ||
53 | } else if (signal_pending(tsk)) { | ||
54 | atomic_inc(&sem->count); | ||
55 | ret = -EINTR; | ||
56 | } | ||
57 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * waking_non_zero_trylock: | ||
63 | * 1 failed to lock | ||
64 | * 0 got the lock | ||
65 | */ | ||
66 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
67 | { | ||
68 | int ret; | ||
69 | unsigned long flags; | ||
70 | |||
71 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
72 | ret = 1; | ||
73 | if (atomic_read(&sem->waking) > 0) { | ||
74 | atomic_dec(&sem->waking); | ||
75 | ret = 0; | ||
76 | } else | ||
77 | atomic_inc(&sem->count); | ||
78 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | #endif | ||
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h index 5779eb6c0689..d9b2034ed1d2 100644 --- a/include/asm-m68knommu/semaphore.h +++ b/include/asm-m68knommu/semaphore.h | |||
@@ -1,153 +1 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _M68K_SEMAPHORE_H | ||
3 | |||
4 | #define RW_LOCK_BIAS 0x01000000 | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | |||
13 | #include <asm/system.h> | ||
14 | #include <asm/atomic.h> | ||
15 | |||
16 | /* | ||
17 | * Interrupt-safe semaphores.. | ||
18 | * | ||
19 | * (C) Copyright 1996 Linus Torvalds | ||
20 | * | ||
21 | * m68k version by Andreas Schwab | ||
22 | */ | ||
23 | |||
24 | |||
25 | struct semaphore { | ||
26 | atomic_t count; | ||
27 | atomic_t waking; | ||
28 | wait_queue_head_t wait; | ||
29 | }; | ||
30 | |||
31 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
32 | { \ | ||
33 | .count = ATOMIC_INIT(n), \ | ||
34 | .waking = ATOMIC_INIT(0), \ | ||
35 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
36 | } | ||
37 | |||
38 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
39 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
40 | |||
41 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
42 | |||
43 | static inline void sema_init (struct semaphore *sem, int val) | ||
44 | { | ||
45 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
46 | } | ||
47 | |||
48 | static inline void init_MUTEX (struct semaphore *sem) | ||
49 | { | ||
50 | sema_init(sem, 1); | ||
51 | } | ||
52 | |||
53 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
54 | { | ||
55 | sema_init(sem, 0); | ||
56 | } | ||
57 | |||
58 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
59 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
60 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
61 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
62 | |||
63 | asmlinkage void __down(struct semaphore * sem); | ||
64 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
65 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
66 | asmlinkage void __up(struct semaphore * sem); | ||
67 | |||
68 | extern spinlock_t semaphore_wake_lock; | ||
69 | |||
70 | /* | ||
71 | * This is ugly, but we want the default case to fall through. | ||
72 | * "down_failed" is a special asm handler that calls the C | ||
73 | * routine that actually waits. See arch/m68k/lib/semaphore.S | ||
74 | */ | ||
75 | static inline void down(struct semaphore * sem) | ||
76 | { | ||
77 | might_sleep(); | ||
78 | __asm__ __volatile__( | ||
79 | "| atomic down operation\n\t" | ||
80 | "movel %0, %%a1\n\t" | ||
81 | "lea %%pc@(1f), %%a0\n\t" | ||
82 | "subql #1, %%a1@\n\t" | ||
83 | "jmi __down_failed\n" | ||
84 | "1:" | ||
85 | : /* no outputs */ | ||
86 | : "g" (sem) | ||
87 | : "cc", "%a0", "%a1", "memory"); | ||
88 | } | ||
89 | |||
90 | static inline int down_interruptible(struct semaphore * sem) | ||
91 | { | ||
92 | int ret; | ||
93 | |||
94 | might_sleep(); | ||
95 | __asm__ __volatile__( | ||
96 | "| atomic down operation\n\t" | ||
97 | "movel %1, %%a1\n\t" | ||
98 | "lea %%pc@(1f), %%a0\n\t" | ||
99 | "subql #1, %%a1@\n\t" | ||
100 | "jmi __down_failed_interruptible\n\t" | ||
101 | "clrl %%d0\n" | ||
102 | "1: movel %%d0, %0\n" | ||
103 | : "=d" (ret) | ||
104 | : "g" (sem) | ||
105 | : "cc", "%d0", "%a0", "%a1", "memory"); | ||
106 | return(ret); | ||
107 | } | ||
108 | |||
109 | static inline int down_trylock(struct semaphore * sem) | ||
110 | { | ||
111 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
112 | register int result __asm__ ("%d0"); | ||
113 | |||
114 | __asm__ __volatile__( | ||
115 | "| atomic down trylock operation\n\t" | ||
116 | "subql #1,%1@\n\t" | ||
117 | "jmi 2f\n\t" | ||
118 | "clrl %0\n" | ||
119 | "1:\n" | ||
120 | ".section .text.lock,\"ax\"\n" | ||
121 | ".even\n" | ||
122 | "2:\tpea 1b\n\t" | ||
123 | "jbra __down_failed_trylock\n" | ||
124 | ".previous" | ||
125 | : "=d" (result) | ||
126 | : "a" (sem1) | ||
127 | : "memory"); | ||
128 | return result; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Note! This is subtle. We jump to wake people up only if | ||
133 | * the semaphore was negative (== somebody was waiting on it). | ||
134 | * The default case (no contention) will result in NO | ||
135 | * jumps for both down() and up(). | ||
136 | */ | ||
137 | static inline void up(struct semaphore * sem) | ||
138 | { | ||
139 | __asm__ __volatile__( | ||
140 | "| atomic up operation\n\t" | ||
141 | "movel %0, %%a1\n\t" | ||
142 | "lea %%pc@(1f), %%a0\n\t" | ||
143 | "addql #1, %%a1@\n\t" | ||
144 | "jle __up_wakeup\n" | ||
145 | "1:" | ||
146 | : /* no outputs */ | ||
147 | : "g" (sem) | ||
148 | : "cc", "%a0", "%a1", "memory"); | ||
149 | } | ||
150 | |||
151 | #endif /* __ASSEMBLY__ */ | ||
152 | |||
153 | #endif | ||
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h index fdf8042b784b..d9b2034ed1d2 100644 --- a/include/asm-mips/semaphore.h +++ b/include/asm-mips/semaphore.h | |||
@@ -1,108 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1996 Linus Torvalds | ||
7 | * Copyright (C) 1998, 99, 2000, 01, 04 Ralf Baechle | ||
8 | * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. | ||
9 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. | ||
10 | * | ||
11 | * In all honesty, little of the old MIPS code left - the PPC64 variant was | ||
12 | * just looking nice and portable so I ripped it. Credits to whoever wrote | ||
13 | * it. | ||
14 | */ | ||
15 | #ifndef __ASM_SEMAPHORE_H | ||
16 | #define __ASM_SEMAPHORE_H | ||
17 | |||
18 | /* | ||
19 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | ||
20 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | ||
21 | * Rework semaphores to use atomic_dec_if_positive. | ||
22 | * -- Paul Mackerras (paulus@samba.org) | ||
23 | */ | ||
24 | |||
25 | #ifdef __KERNEL__ | ||
26 | |||
27 | #include <asm/atomic.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <linux/wait.h> | ||
30 | #include <linux/rwsem.h> | ||
31 | |||
32 | struct semaphore { | ||
33 | /* | ||
34 | * Note that any negative value of count is equivalent to 0, | ||
35 | * but additionally indicates that some process(es) might be | ||
36 | * sleeping on `wait'. | ||
37 | */ | ||
38 | atomic_t count; | ||
39 | wait_queue_head_t wait; | ||
40 | }; | ||
41 | |||
42 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
43 | { \ | ||
44 | .count = ATOMIC_INIT(n), \ | ||
45 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
46 | } | ||
47 | |||
48 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
49 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
50 | |||
51 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
52 | |||
53 | static inline void sema_init(struct semaphore *sem, int val) | ||
54 | { | ||
55 | atomic_set(&sem->count, val); | ||
56 | init_waitqueue_head(&sem->wait); | ||
57 | } | ||
58 | |||
59 | static inline void init_MUTEX(struct semaphore *sem) | ||
60 | { | ||
61 | sema_init(sem, 1); | ||
62 | } | ||
63 | |||
64 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
65 | { | ||
66 | sema_init(sem, 0); | ||
67 | } | ||
68 | |||
69 | extern void __down(struct semaphore * sem); | ||
70 | extern int __down_interruptible(struct semaphore * sem); | ||
71 | extern void __up(struct semaphore * sem); | ||
72 | |||
73 | static inline void down(struct semaphore * sem) | ||
74 | { | ||
75 | might_sleep(); | ||
76 | |||
77 | /* | ||
78 | * Try to get the semaphore, take the slow path if we fail. | ||
79 | */ | ||
80 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
81 | __down(sem); | ||
82 | } | ||
83 | |||
84 | static inline int down_interruptible(struct semaphore * sem) | ||
85 | { | ||
86 | int ret = 0; | ||
87 | |||
88 | might_sleep(); | ||
89 | |||
90 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
91 | ret = __down_interruptible(sem); | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | static inline int down_trylock(struct semaphore * sem) | ||
96 | { | ||
97 | return atomic_dec_if_positive(&sem->count) < 0; | ||
98 | } | ||
99 | |||
100 | static inline void up(struct semaphore * sem) | ||
101 | { | ||
102 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
103 | __up(sem); | ||
104 | } | ||
105 | |||
106 | #endif /* __KERNEL__ */ | ||
107 | |||
108 | #endif /* __ASM_SEMAPHORE_H */ | ||
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h index 5a9e1ad0b253..d9b2034ed1d2 100644 --- a/include/asm-mn10300/semaphore.h +++ b/include/asm-mn10300/semaphore.h | |||
@@ -1,169 +1 @@ | |||
1 | /* MN10300 Semaphores | #include <linux/semaphore.h> | |
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_SEMAPHORE_H | ||
12 | #define _ASM_SEMAPHORE_H | ||
13 | |||
14 | #ifndef __ASSEMBLY__ | ||
15 | |||
16 | #include <linux/linkage.h> | ||
17 | #include <linux/wait.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/rwsem.h> | ||
20 | |||
21 | #define SEMAPHORE_DEBUG 0 | ||
22 | |||
23 | /* | ||
24 | * the semaphore definition | ||
25 | * - if count is >0 then there are tokens available on the semaphore for down | ||
26 | * to collect | ||
27 | * - if count is <=0 then there are no spare tokens, and anyone that wants one | ||
28 | * must wait | ||
29 | * - if wait_list is not empty, then there are processes waiting for the | ||
30 | * semaphore | ||
31 | */ | ||
32 | struct semaphore { | ||
33 | atomic_t count; /* it's not really atomic, it's | ||
34 | * just that certain modules | ||
35 | * expect to be able to access | ||
36 | * it directly */ | ||
37 | spinlock_t wait_lock; | ||
38 | struct list_head wait_list; | ||
39 | #if SEMAPHORE_DEBUG | ||
40 | unsigned __magic; | ||
41 | #endif | ||
42 | }; | ||
43 | |||
44 | #if SEMAPHORE_DEBUG | ||
45 | # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic | ||
46 | #else | ||
47 | # define __SEM_DEBUG_INIT(name) | ||
48 | #endif | ||
49 | |||
50 | |||
51 | #define __SEMAPHORE_INITIALIZER(name, init_count) \ | ||
52 | { \ | ||
53 | .count = ATOMIC_INIT(init_count), \ | ||
54 | .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
55 | .wait_list = LIST_HEAD_INIT((name).wait_list) \ | ||
56 | __SEM_DEBUG_INIT(name) \ | ||
57 | } | ||
58 | |||
59 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
60 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
61 | |||
62 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
63 | #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) | ||
64 | |||
65 | static inline void sema_init(struct semaphore *sem, int val) | ||
66 | { | ||
67 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
68 | } | ||
69 | |||
70 | static inline void init_MUTEX(struct semaphore *sem) | ||
71 | { | ||
72 | sema_init(sem, 1); | ||
73 | } | ||
74 | |||
75 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
76 | { | ||
77 | sema_init(sem, 0); | ||
78 | } | ||
79 | |||
80 | extern void __down(struct semaphore *sem, unsigned long flags); | ||
81 | extern int __down_interruptible(struct semaphore *sem, unsigned long flags); | ||
82 | extern void __up(struct semaphore *sem); | ||
83 | |||
84 | static inline void down(struct semaphore *sem) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | int count; | ||
88 | |||
89 | #if SEMAPHORE_DEBUG | ||
90 | CHECK_MAGIC(sem->__magic); | ||
91 | #endif | ||
92 | |||
93 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
94 | count = atomic_read(&sem->count); | ||
95 | if (likely(count > 0)) { | ||
96 | atomic_set(&sem->count, count - 1); | ||
97 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
98 | } else { | ||
99 | __down(sem, flags); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static inline int down_interruptible(struct semaphore *sem) | ||
104 | { | ||
105 | unsigned long flags; | ||
106 | int count, ret = 0; | ||
107 | |||
108 | #if SEMAPHORE_DEBUG | ||
109 | CHECK_MAGIC(sem->__magic); | ||
110 | #endif | ||
111 | |||
112 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
113 | count = atomic_read(&sem->count); | ||
114 | if (likely(count > 0)) { | ||
115 | atomic_set(&sem->count, count - 1); | ||
116 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
117 | } else { | ||
118 | ret = __down_interruptible(sem, flags); | ||
119 | } | ||
120 | return ret; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * non-blockingly attempt to down() a semaphore. | ||
125 | * - returns zero if we acquired it | ||
126 | */ | ||
127 | static inline int down_trylock(struct semaphore *sem) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | int count, success = 0; | ||
131 | |||
132 | #if SEMAPHORE_DEBUG | ||
133 | CHECK_MAGIC(sem->__magic); | ||
134 | #endif | ||
135 | |||
136 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
137 | count = atomic_read(&sem->count); | ||
138 | if (likely(count > 0)) { | ||
139 | atomic_set(&sem->count, count - 1); | ||
140 | success = 1; | ||
141 | } | ||
142 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
143 | return !success; | ||
144 | } | ||
145 | |||
146 | static inline void up(struct semaphore *sem) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | #if SEMAPHORE_DEBUG | ||
151 | CHECK_MAGIC(sem->__magic); | ||
152 | #endif | ||
153 | |||
154 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
155 | if (!list_empty(&sem->wait_list)) | ||
156 | __up(sem); | ||
157 | else | ||
158 | atomic_set(&sem->count, atomic_read(&sem->count) + 1); | ||
159 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
160 | } | ||
161 | |||
162 | static inline int sem_getcount(struct semaphore *sem) | ||
163 | { | ||
164 | return atomic_read(&sem->count); | ||
165 | } | ||
166 | |||
167 | #endif /* __ASSEMBLY__ */ | ||
168 | |||
169 | #endif | ||
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h deleted file mode 100644 index 387f7c1277a2..000000000000 --- a/include/asm-parisc/semaphore-helper.h +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | #ifndef _ASM_PARISC_SEMAPHORE_HELPER_H | ||
2 | #define _ASM_PARISC_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1999 Andrea Arcangeli | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * These two _must_ execute atomically wrt each other. | ||
13 | * | ||
14 | * This is trivially done with load_locked/store_cond, | ||
15 | * which we have. Let the rest of the losers suck eggs. | ||
16 | */ | ||
17 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc((atomic_t *)&sem->waking); | ||
20 | } | ||
21 | |||
22 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | int ret = 0; | ||
26 | |||
27 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
28 | if (sem->waking > 0) { | ||
29 | sem->waking--; | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | * | ||
42 | * We must undo the sem->count down_interruptible() increment while we are | ||
43 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
44 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
45 | */ | ||
46 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
47 | struct task_struct *tsk) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | int ret = 0; | ||
51 | |||
52 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
53 | if (sem->waking > 0) { | ||
54 | sem->waking--; | ||
55 | ret = 1; | ||
56 | } else if (signal_pending(tsk)) { | ||
57 | atomic_inc(&sem->count); | ||
58 | ret = -EINTR; | ||
59 | } | ||
60 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * waking_non_zero_trylock: | ||
66 | * 1 failed to lock | ||
67 | * 0 got the lock | ||
68 | * | ||
69 | * We must undo the sem->count down_trylock() increment while we are | ||
70 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
71 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
72 | */ | ||
73 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | int ret = 1; | ||
77 | |||
78 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
79 | if (sem->waking <= 0) | ||
80 | atomic_inc(&sem->count); | ||
81 | else { | ||
82 | sem->waking--; | ||
83 | ret = 0; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | #endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h index a16271cdc748..d9b2034ed1d2 100644 --- a/include/asm-parisc/semaphore.h +++ b/include/asm-parisc/semaphore.h | |||
@@ -1,145 +1 @@ | |||
1 | /* SMP- and interrupt-safe semaphores. | #include <linux/semaphore.h> | |
2 | * PA-RISC version by Matthew Wilcox | ||
3 | * | ||
4 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) | ||
5 | * Copyright (C) 1996 Linus Torvalds | ||
6 | * Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org > | ||
7 | * Copyright (C) 2000 Grant Grundler < grundler a debian org > | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #ifndef _ASM_PARISC_SEMAPHORE_H | ||
25 | #define _ASM_PARISC_SEMAPHORE_H | ||
26 | |||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/wait.h> | ||
29 | #include <linux/rwsem.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | |||
33 | /* | ||
34 | * The `count' is initialised to the number of people who are allowed to | ||
35 | * take the lock. (Normally we want a mutex, so this is `1'). if | ||
36 | * `count' is positive, the lock can be taken. if it's 0, no-one is | ||
37 | * waiting on it. if it's -1, at least one task is waiting. | ||
38 | */ | ||
39 | struct semaphore { | ||
40 | spinlock_t sentry; | ||
41 | int count; | ||
42 | wait_queue_head_t wait; | ||
43 | }; | ||
44 | |||
45 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
46 | { \ | ||
47 | .sentry = SPIN_LOCK_UNLOCKED, \ | ||
48 | .count = n, \ | ||
49 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
50 | } | ||
51 | |||
52 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
53 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
54 | |||
55 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
56 | |||
57 | static inline void sema_init (struct semaphore *sem, int val) | ||
58 | { | ||
59 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
60 | } | ||
61 | |||
62 | static inline void init_MUTEX (struct semaphore *sem) | ||
63 | { | ||
64 | sema_init(sem, 1); | ||
65 | } | ||
66 | |||
67 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
68 | { | ||
69 | sema_init(sem, 0); | ||
70 | } | ||
71 | |||
72 | static inline int sem_getcount(struct semaphore *sem) | ||
73 | { | ||
74 | return sem->count; | ||
75 | } | ||
76 | |||
77 | asmlinkage void __down(struct semaphore * sem); | ||
78 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
79 | asmlinkage void __up(struct semaphore * sem); | ||
80 | |||
81 | /* Semaphores can be `tried' from irq context. So we have to disable | ||
82 | * interrupts while we're messing with the semaphore. Sorry. | ||
83 | */ | ||
84 | |||
85 | static inline void down(struct semaphore * sem) | ||
86 | { | ||
87 | might_sleep(); | ||
88 | spin_lock_irq(&sem->sentry); | ||
89 | if (sem->count > 0) { | ||
90 | sem->count--; | ||
91 | } else { | ||
92 | __down(sem); | ||
93 | } | ||
94 | spin_unlock_irq(&sem->sentry); | ||
95 | } | ||
96 | |||
97 | static inline int down_interruptible(struct semaphore * sem) | ||
98 | { | ||
99 | int ret = 0; | ||
100 | might_sleep(); | ||
101 | spin_lock_irq(&sem->sentry); | ||
102 | if (sem->count > 0) { | ||
103 | sem->count--; | ||
104 | } else { | ||
105 | ret = __down_interruptible(sem); | ||
106 | } | ||
107 | spin_unlock_irq(&sem->sentry); | ||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * down_trylock returns 0 on success, 1 if we failed to get the lock. | ||
113 | * May not sleep, but must preserve irq state | ||
114 | */ | ||
115 | static inline int down_trylock(struct semaphore * sem) | ||
116 | { | ||
117 | unsigned long flags; | ||
118 | int count; | ||
119 | |||
120 | spin_lock_irqsave(&sem->sentry, flags); | ||
121 | count = sem->count - 1; | ||
122 | if (count >= 0) | ||
123 | sem->count = count; | ||
124 | spin_unlock_irqrestore(&sem->sentry, flags); | ||
125 | return (count < 0); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Note! This is subtle. We jump to wake people up only if | ||
130 | * the semaphore was negative (== somebody was waiting on it). | ||
131 | */ | ||
132 | static inline void up(struct semaphore * sem) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | spin_lock_irqsave(&sem->sentry, flags); | ||
137 | if (sem->count < 0) { | ||
138 | __up(sem); | ||
139 | } else { | ||
140 | sem->count++; | ||
141 | } | ||
142 | spin_unlock_irqrestore(&sem->sentry, flags); | ||
143 | } | ||
144 | |||
145 | #endif /* _ASM_PARISC_SEMAPHORE_H */ | ||
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h index 48dd32e07749..d9b2034ed1d2 100644 --- a/include/asm-powerpc/semaphore.h +++ b/include/asm-powerpc/semaphore.h | |||
@@ -1,94 +1 @@ | |||
1 | #ifndef _ASM_POWERPC_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ASM_POWERPC_SEMAPHORE_H | ||
3 | |||
4 | /* | ||
5 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | ||
6 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | ||
7 | * Rework semaphores to use atomic_dec_if_positive. | ||
8 | * -- Paul Mackerras (paulus@samba.org) | ||
9 | */ | ||
10 | |||
11 | #ifdef __KERNEL__ | ||
12 | |||
13 | #include <asm/atomic.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/rwsem.h> | ||
17 | |||
18 | struct semaphore { | ||
19 | /* | ||
20 | * Note that any negative value of count is equivalent to 0, | ||
21 | * but additionally indicates that some process(es) might be | ||
22 | * sleeping on `wait'. | ||
23 | */ | ||
24 | atomic_t count; | ||
25 | wait_queue_head_t wait; | ||
26 | }; | ||
27 | |||
28 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
29 | { \ | ||
30 | .count = ATOMIC_INIT(n), \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
38 | |||
39 | static inline void sema_init (struct semaphore *sem, int val) | ||
40 | { | ||
41 | atomic_set(&sem->count, val); | ||
42 | init_waitqueue_head(&sem->wait); | ||
43 | } | ||
44 | |||
45 | static inline void init_MUTEX (struct semaphore *sem) | ||
46 | { | ||
47 | sema_init(sem, 1); | ||
48 | } | ||
49 | |||
50 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
51 | { | ||
52 | sema_init(sem, 0); | ||
53 | } | ||
54 | |||
55 | extern void __down(struct semaphore * sem); | ||
56 | extern int __down_interruptible(struct semaphore * sem); | ||
57 | extern void __up(struct semaphore * sem); | ||
58 | |||
59 | static inline void down(struct semaphore * sem) | ||
60 | { | ||
61 | might_sleep(); | ||
62 | |||
63 | /* | ||
64 | * Try to get the semaphore, take the slow path if we fail. | ||
65 | */ | ||
66 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
67 | __down(sem); | ||
68 | } | ||
69 | |||
70 | static inline int down_interruptible(struct semaphore * sem) | ||
71 | { | ||
72 | int ret = 0; | ||
73 | |||
74 | might_sleep(); | ||
75 | |||
76 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
77 | ret = __down_interruptible(sem); | ||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | static inline int down_trylock(struct semaphore * sem) | ||
82 | { | ||
83 | return atomic_dec_if_positive(&sem->count) < 0; | ||
84 | } | ||
85 | |||
86 | static inline void up(struct semaphore * sem) | ||
87 | { | ||
88 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
89 | __up(sem); | ||
90 | } | ||
91 | |||
92 | #endif /* __KERNEL__ */ | ||
93 | |||
94 | #endif /* _ASM_POWERPC_SEMAPHORE_H */ | ||
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index 123b557c3ff4..0818ecd30ca6 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h | |||
@@ -397,6 +397,10 @@ struct cio_iplinfo { | |||
397 | 397 | ||
398 | extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); | 398 | extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); |
399 | 399 | ||
400 | /* Function from drivers/s390/cio/chsc.c */ | ||
401 | int chsc_sstpc(void *page, unsigned int op, u16 ctrl); | ||
402 | int chsc_sstpi(void *page, void *result, size_t size); | ||
403 | |||
400 | #endif | 404 | #endif |
401 | 405 | ||
402 | #endif | 406 | #endif |
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h index 352dde194f3c..e5a6a9ba3adf 100644 --- a/include/asm-s390/cpu.h +++ b/include/asm-s390/cpu.h | |||
@@ -22,4 +22,12 @@ struct s390_idle_data { | |||
22 | 22 | ||
23 | DECLARE_PER_CPU(struct s390_idle_data, s390_idle); | 23 | DECLARE_PER_CPU(struct s390_idle_data, s390_idle); |
24 | 24 | ||
25 | void s390_idle_leave(void); | ||
26 | |||
27 | static inline void s390_idle_check(void) | ||
28 | { | ||
29 | if ((&__get_cpu_var(s390_idle))->in_idle) | ||
30 | s390_idle_leave(); | ||
31 | } | ||
32 | |||
25 | #endif /* _ASM_S390_CPU_H_ */ | 33 | #endif /* _ASM_S390_CPU_H_ */ |
diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h index c00dd2b3dc50..335baf4fc64f 100644 --- a/include/asm-s390/debug.h +++ b/include/asm-s390/debug.h | |||
@@ -73,6 +73,7 @@ typedef struct debug_info { | |||
73 | struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; | 73 | struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; |
74 | struct debug_view* views[DEBUG_MAX_VIEWS]; | 74 | struct debug_view* views[DEBUG_MAX_VIEWS]; |
75 | char name[DEBUG_MAX_NAME_LEN]; | 75 | char name[DEBUG_MAX_NAME_LEN]; |
76 | mode_t mode; | ||
76 | } debug_info_t; | 77 | } debug_info_t; |
77 | 78 | ||
78 | typedef int (debug_header_proc_t) (debug_info_t* id, | 79 | typedef int (debug_header_proc_t) (debug_info_t* id, |
@@ -122,6 +123,10 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level, | |||
122 | debug_info_t* debug_register(char* name, int pages, int nr_areas, | 123 | debug_info_t* debug_register(char* name, int pages, int nr_areas, |
123 | int buf_size); | 124 | int buf_size); |
124 | 125 | ||
126 | debug_info_t *debug_register_mode(char *name, int pages, int nr_areas, | ||
127 | int buf_size, mode_t mode, uid_t uid, | ||
128 | gid_t gid); | ||
129 | |||
125 | void debug_unregister(debug_info_t* id); | 130 | void debug_unregister(debug_info_t* id); |
126 | 131 | ||
127 | void debug_set_level(debug_info_t* id, int new_level); | 132 | void debug_set_level(debug_info_t* id, int new_level); |
diff --git a/include/asm-s390/extmem.h b/include/asm-s390/extmem.h index c8802c934b74..33837d756184 100644 --- a/include/asm-s390/extmem.h +++ b/include/asm-s390/extmem.h | |||
@@ -22,11 +22,12 @@ | |||
22 | #define SEGMENT_SHARED 0 | 22 | #define SEGMENT_SHARED 0 |
23 | #define SEGMENT_EXCLUSIVE 1 | 23 | #define SEGMENT_EXCLUSIVE 1 |
24 | 24 | ||
25 | extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length); | 25 | int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length); |
26 | extern void segment_unload(char *name); | 26 | void segment_unload(char *name); |
27 | extern void segment_save(char *name); | 27 | void segment_save(char *name); |
28 | extern int segment_type (char* name); | 28 | int segment_type (char* name); |
29 | extern int segment_modify_shared (char *name, int do_nonshared); | 29 | int segment_modify_shared (char *name, int do_nonshared); |
30 | void segment_warning(int rc, char *seg_name); | ||
30 | 31 | ||
31 | #endif | 32 | #endif |
32 | #endif | 33 | #endif |
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h index 31beb18cb3d1..4b7cb964ff35 100644 --- a/include/asm-s390/hardirq.h +++ b/include/asm-s390/hardirq.h | |||
@@ -32,6 +32,6 @@ typedef struct { | |||
32 | 32 | ||
33 | #define HARDIRQ_BITS 8 | 33 | #define HARDIRQ_BITS 8 |
34 | 34 | ||
35 | extern void account_ticks(u64 time); | 35 | void clock_comparator_work(void); |
36 | 36 | ||
37 | #endif /* __ASM_HARDIRQ_H */ | 37 | #endif /* __ASM_HARDIRQ_H */ |
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 801a6fd35b5b..5de3efb31445 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h | |||
@@ -56,6 +56,8 @@ | |||
56 | #define __LC_IO_INT_WORD 0x0C0 | 56 | #define __LC_IO_INT_WORD 0x0C0 |
57 | #define __LC_MCCK_CODE 0x0E8 | 57 | #define __LC_MCCK_CODE 0x0E8 |
58 | 58 | ||
59 | #define __LC_LAST_BREAK 0x110 | ||
60 | |||
59 | #define __LC_RETURN_PSW 0x200 | 61 | #define __LC_RETURN_PSW 0x200 |
60 | 62 | ||
61 | #define __LC_SAVE_AREA 0xC00 | 63 | #define __LC_SAVE_AREA 0xC00 |
@@ -80,7 +82,6 @@ | |||
80 | #define __LC_CPUID 0xC60 | 82 | #define __LC_CPUID 0xC60 |
81 | #define __LC_CPUADDR 0xC68 | 83 | #define __LC_CPUADDR 0xC68 |
82 | #define __LC_IPLDEV 0xC7C | 84 | #define __LC_IPLDEV 0xC7C |
83 | #define __LC_JIFFY_TIMER 0xC80 | ||
84 | #define __LC_CURRENT 0xC90 | 85 | #define __LC_CURRENT 0xC90 |
85 | #define __LC_INT_CLOCK 0xC98 | 86 | #define __LC_INT_CLOCK 0xC98 |
86 | #else /* __s390x__ */ | 87 | #else /* __s390x__ */ |
@@ -103,7 +104,6 @@ | |||
103 | #define __LC_CPUID 0xD80 | 104 | #define __LC_CPUID 0xD80 |
104 | #define __LC_CPUADDR 0xD88 | 105 | #define __LC_CPUADDR 0xD88 |
105 | #define __LC_IPLDEV 0xDB8 | 106 | #define __LC_IPLDEV 0xDB8 |
106 | #define __LC_JIFFY_TIMER 0xDC0 | ||
107 | #define __LC_CURRENT 0xDD8 | 107 | #define __LC_CURRENT 0xDD8 |
108 | #define __LC_INT_CLOCK 0xDE8 | 108 | #define __LC_INT_CLOCK 0xDE8 |
109 | #endif /* __s390x__ */ | 109 | #endif /* __s390x__ */ |
@@ -276,7 +276,7 @@ struct _lowcore | |||
276 | /* entry.S sensitive area end */ | 276 | /* entry.S sensitive area end */ |
277 | 277 | ||
278 | /* SMP info area: defined by DJB */ | 278 | /* SMP info area: defined by DJB */ |
279 | __u64 jiffy_timer; /* 0xc80 */ | 279 | __u64 clock_comparator; /* 0xc80 */ |
280 | __u32 ext_call_fast; /* 0xc88 */ | 280 | __u32 ext_call_fast; /* 0xc88 */ |
281 | __u32 percpu_offset; /* 0xc8c */ | 281 | __u32 percpu_offset; /* 0xc8c */ |
282 | __u32 current_task; /* 0xc90 */ | 282 | __u32 current_task; /* 0xc90 */ |
@@ -368,11 +368,12 @@ struct _lowcore | |||
368 | /* entry.S sensitive area end */ | 368 | /* entry.S sensitive area end */ |
369 | 369 | ||
370 | /* SMP info area: defined by DJB */ | 370 | /* SMP info area: defined by DJB */ |
371 | __u64 jiffy_timer; /* 0xdc0 */ | 371 | __u64 clock_comparator; /* 0xdc0 */ |
372 | __u64 ext_call_fast; /* 0xdc8 */ | 372 | __u64 ext_call_fast; /* 0xdc8 */ |
373 | __u64 percpu_offset; /* 0xdd0 */ | 373 | __u64 percpu_offset; /* 0xdd0 */ |
374 | __u64 current_task; /* 0xdd8 */ | 374 | __u64 current_task; /* 0xdd8 */ |
375 | __u64 softirq_pending; /* 0xde0 */ | 375 | __u32 softirq_pending; /* 0xde0 */ |
376 | __u32 pad_0x0de4; /* 0xde4 */ | ||
376 | __u64 int_clock; /* 0xde8 */ | 377 | __u64 int_clock; /* 0xde8 */ |
377 | __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ | 378 | __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ |
378 | 379 | ||
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 51d88912aa20..8eaf343a12a8 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h | |||
@@ -175,6 +175,13 @@ extern void task_show_regs(struct seq_file *m, struct task_struct *task); | |||
175 | extern void show_registers(struct pt_regs *regs); | 175 | extern void show_registers(struct pt_regs *regs); |
176 | extern void show_code(struct pt_regs *regs); | 176 | extern void show_code(struct pt_regs *regs); |
177 | extern void show_trace(struct task_struct *task, unsigned long *sp); | 177 | extern void show_trace(struct task_struct *task, unsigned long *sp); |
178 | #ifdef CONFIG_64BIT | ||
179 | extern void show_last_breaking_event(struct pt_regs *regs); | ||
180 | #else | ||
181 | static inline void show_last_breaking_event(struct pt_regs *regs) | ||
182 | { | ||
183 | } | ||
184 | #endif | ||
178 | 185 | ||
179 | unsigned long get_wchan(struct task_struct *p); | 186 | unsigned long get_wchan(struct task_struct *p); |
180 | #define task_pt_regs(tsk) ((struct pt_regs *) \ | 187 | #define task_pt_regs(tsk) ((struct pt_regs *) \ |
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h index 0e7001ad8392..d9b2034ed1d2 100644 --- a/include/asm-s390/semaphore.h +++ b/include/asm-s390/semaphore.h | |||
@@ -1,107 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * include/asm-s390/semaphore.h | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * | ||
7 | * Derived from "include/asm-i386/semaphore.h" | ||
8 | * (C) Copyright 1996 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #ifndef _S390_SEMAPHORE_H | ||
12 | #define _S390_SEMAPHORE_H | ||
13 | |||
14 | #include <asm/system.h> | ||
15 | #include <asm/atomic.h> | ||
16 | #include <linux/wait.h> | ||
17 | #include <linux/rwsem.h> | ||
18 | |||
19 | struct semaphore { | ||
20 | /* | ||
21 | * Note that any negative value of count is equivalent to 0, | ||
22 | * but additionally indicates that some process(es) might be | ||
23 | * sleeping on `wait'. | ||
24 | */ | ||
25 | atomic_t count; | ||
26 | wait_queue_head_t wait; | ||
27 | }; | ||
28 | |||
29 | #define __SEMAPHORE_INITIALIZER(name,count) \ | ||
30 | { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) } | ||
31 | |||
32 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
34 | |||
35 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
36 | |||
37 | static inline void sema_init (struct semaphore *sem, int val) | ||
38 | { | ||
39 | atomic_set(&sem->count, val); | ||
40 | init_waitqueue_head(&sem->wait); | ||
41 | } | ||
42 | |||
43 | static inline void init_MUTEX (struct semaphore *sem) | ||
44 | { | ||
45 | sema_init(sem, 1); | ||
46 | } | ||
47 | |||
48 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
49 | { | ||
50 | sema_init(sem, 0); | ||
51 | } | ||
52 | |||
53 | asmlinkage void __down(struct semaphore * sem); | ||
54 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
55 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
56 | asmlinkage void __up(struct semaphore * sem); | ||
57 | |||
58 | static inline void down(struct semaphore * sem) | ||
59 | { | ||
60 | might_sleep(); | ||
61 | if (atomic_dec_return(&sem->count) < 0) | ||
62 | __down(sem); | ||
63 | } | ||
64 | |||
65 | static inline int down_interruptible(struct semaphore * sem) | ||
66 | { | ||
67 | int ret = 0; | ||
68 | |||
69 | might_sleep(); | ||
70 | if (atomic_dec_return(&sem->count) < 0) | ||
71 | ret = __down_interruptible(sem); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | static inline int down_trylock(struct semaphore * sem) | ||
76 | { | ||
77 | int old_val, new_val; | ||
78 | |||
79 | /* | ||
80 | * This inline assembly atomically implements the equivalent | ||
81 | * to the following C code: | ||
82 | * old_val = sem->count.counter; | ||
83 | * if ((new_val = old_val) > 0) | ||
84 | * sem->count.counter = --new_val; | ||
85 | * In the ppc code this is called atomic_dec_if_positive. | ||
86 | */ | ||
87 | asm volatile( | ||
88 | " l %0,0(%3)\n" | ||
89 | "0: ltr %1,%0\n" | ||
90 | " jle 1f\n" | ||
91 | " ahi %1,-1\n" | ||
92 | " cs %0,%1,0(%3)\n" | ||
93 | " jl 0b\n" | ||
94 | "1:" | ||
95 | : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter) | ||
96 | : "a" (&sem->count.counter), "m" (sem->count.counter) | ||
97 | : "cc", "memory"); | ||
98 | return old_val <= 0; | ||
99 | } | ||
100 | |||
101 | static inline void up(struct semaphore * sem) | ||
102 | { | ||
103 | if (atomic_inc_return(&sem->count) <= 0) | ||
104 | __up(sem); | ||
105 | } | ||
106 | |||
107 | #endif | ||
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index c7b74326a527..6f3821a6a902 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h | |||
@@ -90,6 +90,9 @@ extern void __cpu_die (unsigned int cpu); | |||
90 | extern void cpu_die (void) __attribute__ ((noreturn)); | 90 | extern void cpu_die (void) __attribute__ ((noreturn)); |
91 | extern int __cpu_up (unsigned int cpu); | 91 | extern int __cpu_up (unsigned int cpu); |
92 | 92 | ||
93 | extern struct mutex smp_cpu_state_mutex; | ||
94 | extern int smp_cpu_polarization[]; | ||
95 | |||
93 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 96 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), |
94 | void *info, int wait); | 97 | void *info, int wait); |
95 | #endif | 98 | #endif |
diff --git a/include/asm-s390/sysinfo.h b/include/asm-s390/sysinfo.h new file mode 100644 index 000000000000..abe10ae15e46 --- /dev/null +++ b/include/asm-s390/sysinfo.h | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * definition for store system information stsi | ||
3 | * | ||
4 | * Copyright IBM Corp. 2001,2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Ulrich Weigand <weigand@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | */ | ||
13 | |||
14 | struct sysinfo_1_1_1 { | ||
15 | char reserved_0[32]; | ||
16 | char manufacturer[16]; | ||
17 | char type[4]; | ||
18 | char reserved_1[12]; | ||
19 | char model_capacity[16]; | ||
20 | char sequence[16]; | ||
21 | char plant[4]; | ||
22 | char model[16]; | ||
23 | char model_perm_cap[16]; | ||
24 | char model_temp_cap[16]; | ||
25 | char model_cap_rating[4]; | ||
26 | char model_perm_cap_rating[4]; | ||
27 | char model_temp_cap_rating[4]; | ||
28 | }; | ||
29 | |||
30 | struct sysinfo_1_2_1 { | ||
31 | char reserved_0[80]; | ||
32 | char sequence[16]; | ||
33 | char plant[4]; | ||
34 | char reserved_1[2]; | ||
35 | unsigned short cpu_address; | ||
36 | }; | ||
37 | |||
38 | struct sysinfo_1_2_2 { | ||
39 | char format; | ||
40 | char reserved_0[1]; | ||
41 | unsigned short acc_offset; | ||
42 | char reserved_1[24]; | ||
43 | unsigned int secondary_capability; | ||
44 | unsigned int capability; | ||
45 | unsigned short cpus_total; | ||
46 | unsigned short cpus_configured; | ||
47 | unsigned short cpus_standby; | ||
48 | unsigned short cpus_reserved; | ||
49 | unsigned short adjustment[0]; | ||
50 | }; | ||
51 | |||
52 | struct sysinfo_1_2_2_extension { | ||
53 | unsigned int alt_capability; | ||
54 | unsigned short alt_adjustment[0]; | ||
55 | }; | ||
56 | |||
57 | struct sysinfo_2_2_1 { | ||
58 | char reserved_0[80]; | ||
59 | char sequence[16]; | ||
60 | char plant[4]; | ||
61 | unsigned short cpu_id; | ||
62 | unsigned short cpu_address; | ||
63 | }; | ||
64 | |||
65 | struct sysinfo_2_2_2 { | ||
66 | char reserved_0[32]; | ||
67 | unsigned short lpar_number; | ||
68 | char reserved_1; | ||
69 | unsigned char characteristics; | ||
70 | unsigned short cpus_total; | ||
71 | unsigned short cpus_configured; | ||
72 | unsigned short cpus_standby; | ||
73 | unsigned short cpus_reserved; | ||
74 | char name[8]; | ||
75 | unsigned int caf; | ||
76 | char reserved_2[16]; | ||
77 | unsigned short cpus_dedicated; | ||
78 | unsigned short cpus_shared; | ||
79 | }; | ||
80 | |||
81 | #define LPAR_CHAR_DEDICATED (1 << 7) | ||
82 | #define LPAR_CHAR_SHARED (1 << 6) | ||
83 | #define LPAR_CHAR_LIMITED (1 << 5) | ||
84 | |||
85 | struct sysinfo_3_2_2 { | ||
86 | char reserved_0[31]; | ||
87 | unsigned char count; | ||
88 | struct { | ||
89 | char reserved_0[4]; | ||
90 | unsigned short cpus_total; | ||
91 | unsigned short cpus_configured; | ||
92 | unsigned short cpus_standby; | ||
93 | unsigned short cpus_reserved; | ||
94 | char name[8]; | ||
95 | unsigned int caf; | ||
96 | char cpi[16]; | ||
97 | char reserved_1[24]; | ||
98 | |||
99 | } vm[8]; | ||
100 | }; | ||
101 | |||
102 | static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) | ||
103 | { | ||
104 | register int r0 asm("0") = (fc << 28) | sel1; | ||
105 | register int r1 asm("1") = sel2; | ||
106 | |||
107 | asm volatile( | ||
108 | " stsi 0(%2)\n" | ||
109 | "0: jz 2f\n" | ||
110 | "1: lhi %0,%3\n" | ||
111 | "2:\n" | ||
112 | EX_TABLE(0b, 1b) | ||
113 | : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS) | ||
114 | : "cc", "memory"); | ||
115 | return r0; | ||
116 | } | ||
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 15aba30601a3..92098df4d6e3 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h | |||
@@ -406,6 +406,8 @@ __set_psw_mask(unsigned long mask) | |||
406 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) | 406 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) |
407 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | 407 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) |
408 | 408 | ||
409 | int stfle(unsigned long long *list, int doublewords); | ||
410 | |||
409 | #ifdef CONFIG_SMP | 411 | #ifdef CONFIG_SMP |
410 | 412 | ||
411 | extern void smp_ctl_set_bit(int cr, int bit); | 413 | extern void smp_ctl_set_bit(int cr, int bit); |
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h index 98229db24314..d744c3d62de5 100644 --- a/include/asm-s390/timex.h +++ b/include/asm-s390/timex.h | |||
@@ -62,16 +62,18 @@ static inline unsigned long long get_clock (void) | |||
62 | return clk; | 62 | return clk; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void get_clock_extended(void *dest) | 65 | static inline unsigned long long get_clock_xt(void) |
66 | { | 66 | { |
67 | typedef struct { unsigned long long clk[2]; } __clock_t; | 67 | unsigned char clk[16]; |
68 | 68 | ||
69 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 69 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
70 | asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc"); | 70 | asm volatile("stcke %0" : "=Q" (clk) : : "cc"); |
71 | #else /* __GNUC__ */ | 71 | #else /* __GNUC__ */ |
72 | asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest)) | 72 | asm volatile("stcke 0(%1)" : "=m" (clk) |
73 | : "a" ((__clock_t *)dest) : "cc"); | 73 | : "a" (clk) : "cc"); |
74 | #endif /* __GNUC__ */ | 74 | #endif /* __GNUC__ */ |
75 | |||
76 | return *((unsigned long long *)&clk[1]); | ||
75 | } | 77 | } |
76 | 78 | ||
77 | static inline cycles_t get_cycles(void) | 79 | static inline cycles_t get_cycles(void) |
@@ -81,5 +83,6 @@ static inline cycles_t get_cycles(void) | |||
81 | 83 | ||
82 | int get_sync_clock(unsigned long long *clock); | 84 | int get_sync_clock(unsigned long long *clock); |
83 | void init_cpu_timer(void); | 85 | void init_cpu_timer(void); |
86 | unsigned long long monotonic_clock(void); | ||
84 | 87 | ||
85 | #endif | 88 | #endif |
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index 35fb4f9127b2..9e57a93d7de1 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h | |||
@@ -13,12 +13,14 @@ static inline void __tlb_flush_local(void) | |||
13 | asm volatile("ptlb" : : : "memory"); | 13 | asm volatile("ptlb" : : : "memory"); |
14 | } | 14 | } |
15 | 15 | ||
16 | #ifdef CONFIG_SMP | ||
16 | /* | 17 | /* |
17 | * Flush all tlb entries on all cpus. | 18 | * Flush all tlb entries on all cpus. |
18 | */ | 19 | */ |
20 | void smp_ptlb_all(void); | ||
21 | |||
19 | static inline void __tlb_flush_global(void) | 22 | static inline void __tlb_flush_global(void) |
20 | { | 23 | { |
21 | extern void smp_ptlb_all(void); | ||
22 | register unsigned long reg2 asm("2"); | 24 | register unsigned long reg2 asm("2"); |
23 | register unsigned long reg3 asm("3"); | 25 | register unsigned long reg3 asm("3"); |
24 | register unsigned long reg4 asm("4"); | 26 | register unsigned long reg4 asm("4"); |
@@ -39,6 +41,25 @@ static inline void __tlb_flush_global(void) | |||
39 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); | 41 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); |
40 | } | 42 | } |
41 | 43 | ||
44 | static inline void __tlb_flush_full(struct mm_struct *mm) | ||
45 | { | ||
46 | cpumask_t local_cpumask; | ||
47 | |||
48 | preempt_disable(); | ||
49 | /* | ||
50 | * If the process only ran on the local cpu, do a local flush. | ||
51 | */ | ||
52 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
53 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) | ||
54 | __tlb_flush_local(); | ||
55 | else | ||
56 | __tlb_flush_global(); | ||
57 | preempt_enable(); | ||
58 | } | ||
59 | #else | ||
60 | #define __tlb_flush_full(mm) __tlb_flush_local() | ||
61 | #endif | ||
62 | |||
42 | /* | 63 | /* |
43 | * Flush all tlb entries of a page table on all cpus. | 64 | * Flush all tlb entries of a page table on all cpus. |
44 | */ | 65 | */ |
@@ -51,8 +72,6 @@ static inline void __tlb_flush_idte(unsigned long asce) | |||
51 | 72 | ||
52 | static inline void __tlb_flush_mm(struct mm_struct * mm) | 73 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
53 | { | 74 | { |
54 | cpumask_t local_cpumask; | ||
55 | |||
56 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) | 75 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) |
57 | return; | 76 | return; |
58 | /* | 77 | /* |
@@ -69,16 +88,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
69 | mm->context.asce_bits); | 88 | mm->context.asce_bits); |
70 | return; | 89 | return; |
71 | } | 90 | } |
72 | preempt_disable(); | 91 | __tlb_flush_full(mm); |
73 | /* | ||
74 | * If the process only ran on the local cpu, do a local flush. | ||
75 | */ | ||
76 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
77 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) | ||
78 | __tlb_flush_local(); | ||
79 | else | ||
80 | __tlb_flush_global(); | ||
81 | preempt_enable(); | ||
82 | } | 92 | } |
83 | 93 | ||
84 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | 94 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) |
diff --git a/include/asm-s390/topology.h b/include/asm-s390/topology.h index 613aa64019da..8e97b06f298a 100644 --- a/include/asm-s390/topology.h +++ b/include/asm-s390/topology.h | |||
@@ -1,6 +1,29 @@ | |||
1 | #ifndef _ASM_S390_TOPOLOGY_H | 1 | #ifndef _ASM_S390_TOPOLOGY_H |
2 | #define _ASM_S390_TOPOLOGY_H | 2 | #define _ASM_S390_TOPOLOGY_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
5 | |||
6 | #define mc_capable() (1) | ||
7 | |||
8 | cpumask_t cpu_coregroup_map(unsigned int cpu); | ||
9 | |||
10 | int topology_set_cpu_management(int fc); | ||
11 | void topology_schedule_update(void); | ||
12 | |||
13 | #define POLARIZATION_UNKNWN (-1) | ||
14 | #define POLARIZATION_HRZ (0) | ||
15 | #define POLARIZATION_VL (1) | ||
16 | #define POLARIZATION_VM (2) | ||
17 | #define POLARIZATION_VH (3) | ||
18 | |||
19 | #ifdef CONFIG_SMP | ||
20 | void s390_init_cpu_topology(void); | ||
21 | #else | ||
22 | static inline void s390_init_cpu_topology(void) | ||
23 | { | ||
24 | }; | ||
25 | #endif | ||
26 | |||
4 | #include <asm-generic/topology.h> | 27 | #include <asm-generic/topology.h> |
5 | 28 | ||
6 | #endif /* _ASM_S390_TOPOLOGY_H */ | 29 | #endif /* _ASM_S390_TOPOLOGY_H */ |
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h deleted file mode 100644 index bd8230c369ca..000000000000 --- a/include/asm-sh/semaphore-helper.h +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | #ifndef __ASM_SH_SEMAPHORE_HELPER_H | ||
2 | #define __ASM_SH_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1999 Andrea Arcangeli | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * These two _must_ execute atomically wrt each other. | ||
13 | * | ||
14 | * This is trivially done with load_locked/store_cond, | ||
15 | * which we have. Let the rest of the losers suck eggs. | ||
16 | */ | ||
17 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc((atomic_t *)&sem->sleepers); | ||
20 | } | ||
21 | |||
22 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | int ret = 0; | ||
26 | |||
27 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
28 | if (sem->sleepers > 0) { | ||
29 | sem->sleepers--; | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | * | ||
42 | * We must undo the sem->count down_interruptible() increment while we are | ||
43 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
44 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
45 | */ | ||
46 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
47 | struct task_struct *tsk) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | int ret = 0; | ||
51 | |||
52 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
53 | if (sem->sleepers > 0) { | ||
54 | sem->sleepers--; | ||
55 | ret = 1; | ||
56 | } else if (signal_pending(tsk)) { | ||
57 | atomic_inc(&sem->count); | ||
58 | ret = -EINTR; | ||
59 | } | ||
60 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * waking_non_zero_trylock: | ||
66 | * 1 failed to lock | ||
67 | * 0 got the lock | ||
68 | * | ||
69 | * We must undo the sem->count down_trylock() increment while we are | ||
70 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
71 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
72 | */ | ||
73 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | int ret = 1; | ||
77 | |||
78 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
79 | if (sem->sleepers <= 0) | ||
80 | atomic_inc(&sem->count); | ||
81 | else { | ||
82 | sem->sleepers--; | ||
83 | ret = 0; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | #endif /* __ASM_SH_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h index 9e5a37c4dce2..d9b2034ed1d2 100644 --- a/include/asm-sh/semaphore.h +++ b/include/asm-sh/semaphore.h | |||
@@ -1,115 +1 @@ | |||
1 | #ifndef __ASM_SH_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define __ASM_SH_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | /* | ||
8 | * SMP- and interrupt-safe semaphores. | ||
9 | * | ||
10 | * (C) Copyright 1996 Linus Torvalds | ||
11 | * | ||
12 | * SuperH verison by Niibe Yutaka | ||
13 | * (Currently no asm implementation but generic C code...) | ||
14 | */ | ||
15 | |||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/rwsem.h> | ||
18 | #include <linux/wait.h> | ||
19 | |||
20 | #include <asm/system.h> | ||
21 | #include <asm/atomic.h> | ||
22 | |||
23 | struct semaphore { | ||
24 | atomic_t count; | ||
25 | int sleepers; | ||
26 | wait_queue_head_t wait; | ||
27 | }; | ||
28 | |||
29 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
30 | { \ | ||
31 | .count = ATOMIC_INIT(n), \ | ||
32 | .sleepers = 0, \ | ||
33 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
34 | } | ||
35 | |||
36 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
37 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
38 | |||
39 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
40 | |||
41 | static inline void sema_init (struct semaphore *sem, int val) | ||
42 | { | ||
43 | /* | ||
44 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
45 | * | ||
46 | * i'd rather use the more flexible initialization above, but sadly | ||
47 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
48 | */ | ||
49 | atomic_set(&sem->count, val); | ||
50 | sem->sleepers = 0; | ||
51 | init_waitqueue_head(&sem->wait); | ||
52 | } | ||
53 | |||
54 | static inline void init_MUTEX (struct semaphore *sem) | ||
55 | { | ||
56 | sema_init(sem, 1); | ||
57 | } | ||
58 | |||
59 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
60 | { | ||
61 | sema_init(sem, 0); | ||
62 | } | ||
63 | |||
64 | #if 0 | ||
65 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
66 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
67 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
68 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
69 | #endif | ||
70 | |||
71 | asmlinkage void __down(struct semaphore * sem); | ||
72 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
73 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
74 | asmlinkage void __up(struct semaphore * sem); | ||
75 | |||
76 | extern spinlock_t semaphore_wake_lock; | ||
77 | |||
78 | static inline void down(struct semaphore * sem) | ||
79 | { | ||
80 | might_sleep(); | ||
81 | if (atomic_dec_return(&sem->count) < 0) | ||
82 | __down(sem); | ||
83 | } | ||
84 | |||
85 | static inline int down_interruptible(struct semaphore * sem) | ||
86 | { | ||
87 | int ret = 0; | ||
88 | |||
89 | might_sleep(); | ||
90 | if (atomic_dec_return(&sem->count) < 0) | ||
91 | ret = __down_interruptible(sem); | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | static inline int down_trylock(struct semaphore * sem) | ||
96 | { | ||
97 | int ret = 0; | ||
98 | |||
99 | if (atomic_dec_return(&sem->count) < 0) | ||
100 | ret = __down_trylock(sem); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Note! This is subtle. We jump to wake people up only if | ||
106 | * the semaphore was negative (== somebody was waiting on it). | ||
107 | */ | ||
108 | static inline void up(struct semaphore * sem) | ||
109 | { | ||
110 | if (atomic_inc_return(&sem->count) <= 0) | ||
111 | __up(sem); | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | #endif /* __ASM_SH_SEMAPHORE_H */ | ||
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h index 8018f9f4d497..d9b2034ed1d2 100644 --- a/include/asm-sparc/semaphore.h +++ b/include/asm-sparc/semaphore.h | |||
@@ -1,192 +1 @@ | |||
1 | #ifndef _SPARC_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _SPARC_SEMAPHORE_H | ||
3 | |||
4 | /* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */ | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | #include <asm/atomic.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/rwsem.h> | ||
11 | |||
12 | struct semaphore { | ||
13 | atomic24_t count; | ||
14 | int sleepers; | ||
15 | wait_queue_head_t wait; | ||
16 | }; | ||
17 | |||
18 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
19 | { \ | ||
20 | .count = ATOMIC24_INIT(n), \ | ||
21 | .sleepers = 0, \ | ||
22 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
23 | } | ||
24 | |||
25 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
26 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
27 | |||
28 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
29 | |||
30 | static inline void sema_init (struct semaphore *sem, int val) | ||
31 | { | ||
32 | atomic24_set(&sem->count, val); | ||
33 | sem->sleepers = 0; | ||
34 | init_waitqueue_head(&sem->wait); | ||
35 | } | ||
36 | |||
37 | static inline void init_MUTEX (struct semaphore *sem) | ||
38 | { | ||
39 | sema_init(sem, 1); | ||
40 | } | ||
41 | |||
42 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
43 | { | ||
44 | sema_init(sem, 0); | ||
45 | } | ||
46 | |||
47 | extern void __down(struct semaphore * sem); | ||
48 | extern int __down_interruptible(struct semaphore * sem); | ||
49 | extern int __down_trylock(struct semaphore * sem); | ||
50 | extern void __up(struct semaphore * sem); | ||
51 | |||
52 | static inline void down(struct semaphore * sem) | ||
53 | { | ||
54 | register volatile int *ptr asm("g1"); | ||
55 | register int increment asm("g2"); | ||
56 | |||
57 | might_sleep(); | ||
58 | |||
59 | ptr = &(sem->count.counter); | ||
60 | increment = 1; | ||
61 | |||
62 | __asm__ __volatile__( | ||
63 | "mov %%o7, %%g4\n\t" | ||
64 | "call ___atomic24_sub\n\t" | ||
65 | " add %%o7, 8, %%o7\n\t" | ||
66 | "tst %%g2\n\t" | ||
67 | "bl 2f\n\t" | ||
68 | " nop\n" | ||
69 | "1:\n\t" | ||
70 | ".subsection 2\n" | ||
71 | "2:\n\t" | ||
72 | "save %%sp, -64, %%sp\n\t" | ||
73 | "mov %%g1, %%l1\n\t" | ||
74 | "mov %%g5, %%l5\n\t" | ||
75 | "call %3\n\t" | ||
76 | " mov %%g1, %%o0\n\t" | ||
77 | "mov %%l1, %%g1\n\t" | ||
78 | "ba 1b\n\t" | ||
79 | " restore %%l5, %%g0, %%g5\n\t" | ||
80 | ".previous\n" | ||
81 | : "=&r" (increment) | ||
82 | : "0" (increment), "r" (ptr), "i" (__down) | ||
83 | : "g3", "g4", "g7", "memory", "cc"); | ||
84 | } | ||
85 | |||
86 | static inline int down_interruptible(struct semaphore * sem) | ||
87 | { | ||
88 | register volatile int *ptr asm("g1"); | ||
89 | register int increment asm("g2"); | ||
90 | |||
91 | might_sleep(); | ||
92 | |||
93 | ptr = &(sem->count.counter); | ||
94 | increment = 1; | ||
95 | |||
96 | __asm__ __volatile__( | ||
97 | "mov %%o7, %%g4\n\t" | ||
98 | "call ___atomic24_sub\n\t" | ||
99 | " add %%o7, 8, %%o7\n\t" | ||
100 | "tst %%g2\n\t" | ||
101 | "bl 2f\n\t" | ||
102 | " clr %%g2\n" | ||
103 | "1:\n\t" | ||
104 | ".subsection 2\n" | ||
105 | "2:\n\t" | ||
106 | "save %%sp, -64, %%sp\n\t" | ||
107 | "mov %%g1, %%l1\n\t" | ||
108 | "mov %%g5, %%l5\n\t" | ||
109 | "call %3\n\t" | ||
110 | " mov %%g1, %%o0\n\t" | ||
111 | "mov %%l1, %%g1\n\t" | ||
112 | "mov %%l5, %%g5\n\t" | ||
113 | "ba 1b\n\t" | ||
114 | " restore %%o0, %%g0, %%g2\n\t" | ||
115 | ".previous\n" | ||
116 | : "=&r" (increment) | ||
117 | : "0" (increment), "r" (ptr), "i" (__down_interruptible) | ||
118 | : "g3", "g4", "g7", "memory", "cc"); | ||
119 | |||
120 | return increment; | ||
121 | } | ||
122 | |||
123 | static inline int down_trylock(struct semaphore * sem) | ||
124 | { | ||
125 | register volatile int *ptr asm("g1"); | ||
126 | register int increment asm("g2"); | ||
127 | |||
128 | ptr = &(sem->count.counter); | ||
129 | increment = 1; | ||
130 | |||
131 | __asm__ __volatile__( | ||
132 | "mov %%o7, %%g4\n\t" | ||
133 | "call ___atomic24_sub\n\t" | ||
134 | " add %%o7, 8, %%o7\n\t" | ||
135 | "tst %%g2\n\t" | ||
136 | "bl 2f\n\t" | ||
137 | " clr %%g2\n" | ||
138 | "1:\n\t" | ||
139 | ".subsection 2\n" | ||
140 | "2:\n\t" | ||
141 | "save %%sp, -64, %%sp\n\t" | ||
142 | "mov %%g1, %%l1\n\t" | ||
143 | "mov %%g5, %%l5\n\t" | ||
144 | "call %3\n\t" | ||
145 | " mov %%g1, %%o0\n\t" | ||
146 | "mov %%l1, %%g1\n\t" | ||
147 | "mov %%l5, %%g5\n\t" | ||
148 | "ba 1b\n\t" | ||
149 | " restore %%o0, %%g0, %%g2\n\t" | ||
150 | ".previous\n" | ||
151 | : "=&r" (increment) | ||
152 | : "0" (increment), "r" (ptr), "i" (__down_trylock) | ||
153 | : "g3", "g4", "g7", "memory", "cc"); | ||
154 | |||
155 | return increment; | ||
156 | } | ||
157 | |||
158 | static inline void up(struct semaphore * sem) | ||
159 | { | ||
160 | register volatile int *ptr asm("g1"); | ||
161 | register int increment asm("g2"); | ||
162 | |||
163 | ptr = &(sem->count.counter); | ||
164 | increment = 1; | ||
165 | |||
166 | __asm__ __volatile__( | ||
167 | "mov %%o7, %%g4\n\t" | ||
168 | "call ___atomic24_add\n\t" | ||
169 | " add %%o7, 8, %%o7\n\t" | ||
170 | "tst %%g2\n\t" | ||
171 | "ble 2f\n\t" | ||
172 | " nop\n" | ||
173 | "1:\n\t" | ||
174 | ".subsection 2\n" | ||
175 | "2:\n\t" | ||
176 | "save %%sp, -64, %%sp\n\t" | ||
177 | "mov %%g1, %%l1\n\t" | ||
178 | "mov %%g5, %%l5\n\t" | ||
179 | "call %3\n\t" | ||
180 | " mov %%g1, %%o0\n\t" | ||
181 | "mov %%l1, %%g1\n\t" | ||
182 | "ba 1b\n\t" | ||
183 | " restore %%l5, %%g0, %%g5\n\t" | ||
184 | ".previous\n" | ||
185 | : "=&r" (increment) | ||
186 | : "0" (increment), "r" (ptr), "i" (__up) | ||
187 | : "g3", "g4", "g7", "memory", "cc"); | ||
188 | } | ||
189 | |||
190 | #endif /* __KERNEL__ */ | ||
191 | |||
192 | #endif /* !(_SPARC_SEMAPHORE_H) */ | ||
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h index 7f7c0c4e024f..d9b2034ed1d2 100644 --- a/include/asm-sparc64/semaphore.h +++ b/include/asm-sparc64/semaphore.h | |||
@@ -1,53 +1 @@ | |||
1 | #ifndef _SPARC64_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _SPARC64_SEMAPHORE_H | ||
3 | |||
4 | /* These are actually reasonable on the V9. | ||
5 | * | ||
6 | * See asm-ppc/semaphore.h for implementation commentary, | ||
7 | * only sparc64 specific issues are commented here. | ||
8 | */ | ||
9 | #ifdef __KERNEL__ | ||
10 | |||
11 | #include <asm/atomic.h> | ||
12 | #include <asm/system.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/rwsem.h> | ||
15 | |||
16 | struct semaphore { | ||
17 | atomic_t count; | ||
18 | wait_queue_head_t wait; | ||
19 | }; | ||
20 | |||
21 | #define __SEMAPHORE_INITIALIZER(name, count) \ | ||
22 | { ATOMIC_INIT(count), \ | ||
23 | __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) } | ||
24 | |||
25 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
26 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
27 | |||
28 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
29 | |||
30 | static inline void sema_init (struct semaphore *sem, int val) | ||
31 | { | ||
32 | atomic_set(&sem->count, val); | ||
33 | init_waitqueue_head(&sem->wait); | ||
34 | } | ||
35 | |||
36 | static inline void init_MUTEX (struct semaphore *sem) | ||
37 | { | ||
38 | sema_init(sem, 1); | ||
39 | } | ||
40 | |||
41 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
42 | { | ||
43 | sema_init(sem, 0); | ||
44 | } | ||
45 | |||
46 | extern void up(struct semaphore *sem); | ||
47 | extern void down(struct semaphore *sem); | ||
48 | extern int down_trylock(struct semaphore *sem); | ||
49 | extern int down_interruptible(struct semaphore *sem); | ||
50 | |||
51 | #endif /* __KERNEL__ */ | ||
52 | |||
53 | #endif /* !(_SPARC64_SEMAPHORE_H) */ | ||
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h index ff13c34de421..d9b2034ed1d2 100644 --- a/include/asm-um/semaphore.h +++ b/include/asm-um/semaphore.h | |||
@@ -1,6 +1 @@ | |||
1 | #ifndef __UM_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define __UM_SEMAPHORE_H | ||
3 | |||
4 | #include "asm/arch/semaphore.h" | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h index 10ed0ccf37df..d9b2034ed1d2 100644 --- a/include/asm-v850/semaphore.h +++ b/include/asm-v850/semaphore.h | |||
@@ -1,84 +1 @@ | |||
1 | #ifndef __V850_SEMAPHORE_H__ | #include <linux/semaphore.h> | |
2 | #define __V850_SEMAPHORE_H__ | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | #include <linux/spinlock.h> | ||
6 | #include <linux/wait.h> | ||
7 | #include <linux/rwsem.h> | ||
8 | |||
9 | #include <asm/atomic.h> | ||
10 | |||
11 | struct semaphore { | ||
12 | atomic_t count; | ||
13 | int sleepers; | ||
14 | wait_queue_head_t wait; | ||
15 | }; | ||
16 | |||
17 | #define __SEMAPHORE_INITIALIZER(name,count) \ | ||
18 | { ATOMIC_INIT (count), 0, \ | ||
19 | __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) } | ||
20 | |||
21 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
22 | struct semaphore name = __SEMAPHORE_INITIALIZER (name,count) | ||
23 | |||
24 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1) | ||
25 | |||
26 | static inline void sema_init (struct semaphore *sem, int val) | ||
27 | { | ||
28 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
29 | } | ||
30 | |||
31 | static inline void init_MUTEX (struct semaphore *sem) | ||
32 | { | ||
33 | sema_init (sem, 1); | ||
34 | } | ||
35 | |||
36 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
37 | { | ||
38 | sema_init (sem, 0); | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * special register calling convention | ||
43 | */ | ||
44 | asmlinkage void __down_failed (void); | ||
45 | asmlinkage int __down_interruptible_failed (void); | ||
46 | asmlinkage int __down_trylock_failed (void); | ||
47 | asmlinkage void __up_wakeup (void); | ||
48 | |||
49 | extern void __down (struct semaphore * sem); | ||
50 | extern int __down_interruptible (struct semaphore * sem); | ||
51 | extern int __down_trylock (struct semaphore * sem); | ||
52 | extern void __up (struct semaphore * sem); | ||
53 | |||
54 | static inline void down (struct semaphore * sem) | ||
55 | { | ||
56 | might_sleep(); | ||
57 | if (atomic_dec_return (&sem->count) < 0) | ||
58 | __down (sem); | ||
59 | } | ||
60 | |||
61 | static inline int down_interruptible (struct semaphore * sem) | ||
62 | { | ||
63 | int ret = 0; | ||
64 | might_sleep(); | ||
65 | if (atomic_dec_return (&sem->count) < 0) | ||
66 | ret = __down_interruptible (sem); | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static inline int down_trylock (struct semaphore *sem) | ||
71 | { | ||
72 | int ret = 0; | ||
73 | if (atomic_dec_return (&sem->count) < 0) | ||
74 | ret = __down_trylock (sem); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static inline void up (struct semaphore * sem) | ||
79 | { | ||
80 | if (atomic_inc_return (&sem->count) <= 0) | ||
81 | __up (sem); | ||
82 | } | ||
83 | |||
84 | #endif /* __V850_SEMAPHORE_H__ */ | ||
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index 3b8160a2b47e..1e3554596f72 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild | |||
@@ -10,6 +10,7 @@ header-y += prctl.h | |||
10 | header-y += ptrace-abi.h | 10 | header-y += ptrace-abi.h |
11 | header-y += sigcontext32.h | 11 | header-y += sigcontext32.h |
12 | header-y += ucontext.h | 12 | header-y += ucontext.h |
13 | header-y += processor-flags.h | ||
13 | 14 | ||
14 | unifdef-y += e820.h | 15 | unifdef-y += e820.h |
15 | unifdef-y += ist.h | 16 | unifdef-y += ist.h |
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h index d2b6e11d3e97..714207a1c387 100644 --- a/include/asm-x86/a.out-core.h +++ b/include/asm-x86/a.out-core.h | |||
@@ -29,8 +29,9 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
29 | dump->magic = CMAGIC; | 29 | dump->magic = CMAGIC; |
30 | dump->start_code = 0; | 30 | dump->start_code = 0; |
31 | dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); | 31 | dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); |
32 | dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; | 32 | dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; |
33 | dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; | 33 | dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) |
34 | >> PAGE_SHIFT; | ||
34 | dump->u_dsize -= dump->u_tsize; | 35 | dump->u_dsize -= dump->u_tsize; |
35 | dump->u_ssize = 0; | 36 | dump->u_ssize = 0; |
36 | dump->u_debugreg[0] = current->thread.debugreg0; | 37 | dump->u_debugreg[0] = current->thread.debugreg0; |
@@ -43,7 +44,8 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
43 | dump->u_debugreg[7] = current->thread.debugreg7; | 44 | dump->u_debugreg[7] = current->thread.debugreg7; |
44 | 45 | ||
45 | if (dump->start_stack < TASK_SIZE) | 46 | if (dump->start_stack < TASK_SIZE) |
46 | dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; | 47 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) |
48 | >> PAGE_SHIFT; | ||
47 | 49 | ||
48 | dump->regs.bx = regs->bx; | 50 | dump->regs.bx = regs->bx; |
49 | dump->regs.cx = regs->cx; | 51 | dump->regs.cx = regs->cx; |
@@ -55,7 +57,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
55 | dump->regs.ds = (u16)regs->ds; | 57 | dump->regs.ds = (u16)regs->ds; |
56 | dump->regs.es = (u16)regs->es; | 58 | dump->regs.es = (u16)regs->es; |
57 | dump->regs.fs = (u16)regs->fs; | 59 | dump->regs.fs = (u16)regs->fs; |
58 | savesegment(gs,gs); | 60 | savesegment(gs, gs); |
59 | dump->regs.orig_ax = regs->orig_ax; | 61 | dump->regs.orig_ax = regs->orig_ax; |
60 | dump->regs.ip = regs->ip; | 62 | dump->regs.ip = regs->ip; |
61 | dump->regs.cs = (u16)regs->cs; | 63 | dump->regs.cs = (u16)regs->cs; |
@@ -63,7 +65,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
63 | dump->regs.sp = regs->sp; | 65 | dump->regs.sp = regs->sp; |
64 | dump->regs.ss = (u16)regs->ss; | 66 | dump->regs.ss = (u16)regs->ss; |
65 | 67 | ||
66 | dump->u_fpvalid = dump_fpu (regs, &dump->i387); | 68 | dump->u_fpvalid = dump_fpu(regs, &dump->i387); |
67 | } | 69 | } |
68 | 70 | ||
69 | #endif /* CONFIG_X86_32 */ | 71 | #endif /* CONFIG_X86_32 */ |
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 7a72d6aa50be..14411c9de46f 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h | |||
@@ -67,16 +67,16 @@ int __acpi_release_global_lock(unsigned int *lock); | |||
67 | */ | 67 | */ |
68 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ | 68 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ |
69 | asm("divl %2;" \ | 69 | asm("divl %2;" \ |
70 | :"=a"(q32), "=d"(r32) \ | 70 | : "=a"(q32), "=d"(r32) \ |
71 | :"r"(d32), \ | 71 | : "r"(d32), \ |
72 | "0"(n_lo), "1"(n_hi)) | 72 | "0"(n_lo), "1"(n_hi)) |
73 | 73 | ||
74 | 74 | ||
75 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ | 75 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ |
76 | asm("shrl $1,%2 ;" \ | 76 | asm("shrl $1,%2 ;" \ |
77 | "rcrl $1,%3;" \ | 77 | "rcrl $1,%3;" \ |
78 | :"=r"(n_hi), "=r"(n_lo) \ | 78 | : "=r"(n_hi), "=r"(n_lo) \ |
79 | :"0"(n_hi), "1"(n_lo)) | 79 | : "0"(n_hi), "1"(n_lo)) |
80 | 80 | ||
81 | #ifdef CONFIG_ACPI | 81 | #ifdef CONFIG_ACPI |
82 | extern int acpi_lapic; | 82 | extern int acpi_lapic; |
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index d8bacf3c4b08..1f6a9ca10126 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h | |||
@@ -66,8 +66,8 @@ extern void alternatives_smp_module_del(struct module *mod); | |||
66 | extern void alternatives_smp_switch(int smp); | 66 | extern void alternatives_smp_switch(int smp); |
67 | #else | 67 | #else |
68 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | 68 | static inline void alternatives_smp_module_add(struct module *mod, char *name, |
69 | void *locks, void *locks_end, | 69 | void *locks, void *locks_end, |
70 | void *text, void *text_end) {} | 70 | void *text, void *text_end) {} |
71 | static inline void alternatives_smp_module_del(struct module *mod) {} | 71 | static inline void alternatives_smp_module_del(struct module *mod) {} |
72 | static inline void alternatives_smp_switch(int smp) {} | 72 | static inline void alternatives_smp_switch(int smp) {} |
73 | #endif /* CONFIG_SMP */ | 73 | #endif /* CONFIG_SMP */ |
@@ -148,14 +148,34 @@ struct paravirt_patch_site; | |||
148 | void apply_paravirt(struct paravirt_patch_site *start, | 148 | void apply_paravirt(struct paravirt_patch_site *start, |
149 | struct paravirt_patch_site *end); | 149 | struct paravirt_patch_site *end); |
150 | #else | 150 | #else |
151 | static inline void | 151 | static inline void apply_paravirt(struct paravirt_patch_site *start, |
152 | apply_paravirt(struct paravirt_patch_site *start, | 152 | struct paravirt_patch_site *end) |
153 | struct paravirt_patch_site *end) | ||
154 | {} | 153 | {} |
155 | #define __parainstructions NULL | 154 | #define __parainstructions NULL |
156 | #define __parainstructions_end NULL | 155 | #define __parainstructions_end NULL |
157 | #endif | 156 | #endif |
158 | 157 | ||
159 | extern void text_poke(void *addr, unsigned char *opcode, int len); | 158 | extern void add_nops(void *insns, unsigned int len); |
159 | |||
160 | /* | ||
161 | * Clear and restore the kernel write-protection flag on the local CPU. | ||
162 | * Allows the kernel to edit read-only pages. | ||
163 | * Side-effect: any interrupt handler running between save and restore will have | ||
164 | * the ability to write to read-only pages. | ||
165 | * | ||
166 | * Warning: | ||
167 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and | ||
168 | * no thread can be preempted in the instructions being modified (no iret to an | ||
169 | * invalid instruction possible) or if the instructions are changed from a | ||
170 | * consistent state to another consistent state atomically. | ||
171 | * More care must be taken when modifying code in the SMP case because of | ||
172 | * Intel's errata. | ||
173 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | ||
174 | * inconsistent instruction while you patch. | ||
175 | * The _early version expects the memory to already be RW. | ||
176 | */ | ||
177 | |||
178 | extern void *text_poke(void *addr, const void *opcode, size_t len); | ||
179 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
160 | 180 | ||
161 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 181 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index bcfc07fd3661..be9639a9a186 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -44,7 +44,6 @@ extern int apic_runs_main_timer; | |||
44 | extern int ioapic_force; | 44 | extern int ioapic_force; |
45 | extern int disable_apic; | 45 | extern int disable_apic; |
46 | extern int disable_apic_timer; | 46 | extern int disable_apic_timer; |
47 | extern unsigned boot_cpu_id; | ||
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Basic functions accessing APICs. | 49 | * Basic functions accessing APICs. |
@@ -59,6 +58,8 @@ extern unsigned boot_cpu_id; | |||
59 | #define setup_secondary_clock setup_secondary_APIC_clock | 58 | #define setup_secondary_clock setup_secondary_APIC_clock |
60 | #endif | 59 | #endif |
61 | 60 | ||
61 | extern int is_vsmp_box(void); | ||
62 | |||
62 | static inline void native_apic_write(unsigned long reg, u32 v) | 63 | static inline void native_apic_write(unsigned long reg, u32 v) |
63 | { | 64 | { |
64 | *((volatile u32 *)(APIC_BASE + reg)) = v; | 65 | *((volatile u32 *)(APIC_BASE + reg)) = v; |
@@ -66,7 +67,7 @@ static inline void native_apic_write(unsigned long reg, u32 v) | |||
66 | 67 | ||
67 | static inline void native_apic_write_atomic(unsigned long reg, u32 v) | 68 | static inline void native_apic_write_atomic(unsigned long reg, u32 v) |
68 | { | 69 | { |
69 | (void) xchg((u32*)(APIC_BASE + reg), v); | 70 | (void)xchg((u32 *)(APIC_BASE + reg), v); |
70 | } | 71 | } |
71 | 72 | ||
72 | static inline u32 native_apic_read(unsigned long reg) | 73 | static inline u32 native_apic_read(unsigned long reg) |
@@ -123,7 +124,7 @@ extern void enable_NMI_through_LVT0(void); | |||
123 | * On 32bit this is mach-xxx local | 124 | * On 32bit this is mach-xxx local |
124 | */ | 125 | */ |
125 | #ifdef CONFIG_X86_64 | 126 | #ifdef CONFIG_X86_64 |
126 | extern void setup_apic_routing(void); | 127 | extern void early_init_lapic_mapping(void); |
127 | #endif | 128 | #endif |
128 | 129 | ||
129 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | 130 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); |
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 550af7a6f88e..6b9008c78731 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h | |||
@@ -12,17 +12,15 @@ | |||
12 | 12 | ||
13 | #define APIC_ID 0x20 | 13 | #define APIC_ID 0x20 |
14 | 14 | ||
15 | #ifdef CONFIG_X86_64 | ||
16 | # define APIC_ID_MASK (0xFFu<<24) | ||
17 | # define GET_APIC_ID(x) (((x)>>24)&0xFFu) | ||
18 | # define SET_APIC_ID(x) (((x)<<24)) | ||
19 | #endif | ||
20 | |||
21 | #define APIC_LVR 0x30 | 15 | #define APIC_LVR 0x30 |
22 | #define APIC_LVR_MASK 0xFF00FF | 16 | #define APIC_LVR_MASK 0xFF00FF |
23 | #define GET_APIC_VERSION(x) ((x)&0xFFu) | 17 | #define GET_APIC_VERSION(x) ((x) & 0xFFu) |
24 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) | 18 | #define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) |
25 | #define APIC_INTEGRATED(x) ((x)&0xF0u) | 19 | #ifdef CONFIG_X86_32 |
20 | # define APIC_INTEGRATED(x) ((x) & 0xF0u) | ||
21 | #else | ||
22 | # define APIC_INTEGRATED(x) (1) | ||
23 | #endif | ||
26 | #define APIC_XAPIC(x) ((x) >= 0x14) | 24 | #define APIC_XAPIC(x) ((x) >= 0x14) |
27 | #define APIC_TASKPRI 0x80 | 25 | #define APIC_TASKPRI 0x80 |
28 | #define APIC_TPRI_MASK 0xFFu | 26 | #define APIC_TPRI_MASK 0xFFu |
@@ -33,16 +31,16 @@ | |||
33 | #define APIC_EIO_ACK 0x0 | 31 | #define APIC_EIO_ACK 0x0 |
34 | #define APIC_RRR 0xC0 | 32 | #define APIC_RRR 0xC0 |
35 | #define APIC_LDR 0xD0 | 33 | #define APIC_LDR 0xD0 |
36 | #define APIC_LDR_MASK (0xFFu<<24) | 34 | #define APIC_LDR_MASK (0xFFu << 24) |
37 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) | 35 | #define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) |
38 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) | 36 | #define SET_APIC_LOGICAL_ID(x) (((x) << 24)) |
39 | #define APIC_ALL_CPUS 0xFFu | 37 | #define APIC_ALL_CPUS 0xFFu |
40 | #define APIC_DFR 0xE0 | 38 | #define APIC_DFR 0xE0 |
41 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul | 39 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul |
42 | #define APIC_DFR_FLAT 0xFFFFFFFFul | 40 | #define APIC_DFR_FLAT 0xFFFFFFFFul |
43 | #define APIC_SPIV 0xF0 | 41 | #define APIC_SPIV 0xF0 |
44 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | 42 | #define APIC_SPIV_FOCUS_DISABLED (1 << 9) |
45 | #define APIC_SPIV_APIC_ENABLED (1<<8) | 43 | #define APIC_SPIV_APIC_ENABLED (1 << 8) |
46 | #define APIC_ISR 0x100 | 44 | #define APIC_ISR 0x100 |
47 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ | 45 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ |
48 | #define APIC_TMR 0x180 | 46 | #define APIC_TMR 0x180 |
@@ -78,27 +76,27 @@ | |||
78 | #define APIC_DM_EXTINT 0x00700 | 76 | #define APIC_DM_EXTINT 0x00700 |
79 | #define APIC_VECTOR_MASK 0x000FF | 77 | #define APIC_VECTOR_MASK 0x000FF |
80 | #define APIC_ICR2 0x310 | 78 | #define APIC_ICR2 0x310 |
81 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) | 79 | #define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) |
82 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) | 80 | #define SET_APIC_DEST_FIELD(x) ((x) << 24) |
83 | #define APIC_LVTT 0x320 | 81 | #define APIC_LVTT 0x320 |
84 | #define APIC_LVTTHMR 0x330 | 82 | #define APIC_LVTTHMR 0x330 |
85 | #define APIC_LVTPC 0x340 | 83 | #define APIC_LVTPC 0x340 |
86 | #define APIC_LVT0 0x350 | 84 | #define APIC_LVT0 0x350 |
87 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) | 85 | #define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) |
88 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) | 86 | #define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) |
89 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) | 87 | #define SET_APIC_TIMER_BASE(x) (((x) << 18)) |
90 | #define APIC_TIMER_BASE_CLKIN 0x0 | 88 | #define APIC_TIMER_BASE_CLKIN 0x0 |
91 | #define APIC_TIMER_BASE_TMBASE 0x1 | 89 | #define APIC_TIMER_BASE_TMBASE 0x1 |
92 | #define APIC_TIMER_BASE_DIV 0x2 | 90 | #define APIC_TIMER_BASE_DIV 0x2 |
93 | #define APIC_LVT_TIMER_PERIODIC (1<<17) | 91 | #define APIC_LVT_TIMER_PERIODIC (1 << 17) |
94 | #define APIC_LVT_MASKED (1<<16) | 92 | #define APIC_LVT_MASKED (1 << 16) |
95 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) | 93 | #define APIC_LVT_LEVEL_TRIGGER (1 << 15) |
96 | #define APIC_LVT_REMOTE_IRR (1<<14) | 94 | #define APIC_LVT_REMOTE_IRR (1 << 14) |
97 | #define APIC_INPUT_POLARITY (1<<13) | 95 | #define APIC_INPUT_POLARITY (1 << 13) |
98 | #define APIC_SEND_PENDING (1<<12) | 96 | #define APIC_SEND_PENDING (1 << 12) |
99 | #define APIC_MODE_MASK 0x700 | 97 | #define APIC_MODE_MASK 0x700 |
100 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) | 98 | #define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) |
101 | #define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8)) | 99 | #define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) |
102 | #define APIC_MODE_FIXED 0x0 | 100 | #define APIC_MODE_FIXED 0x0 |
103 | #define APIC_MODE_NMI 0x4 | 101 | #define APIC_MODE_NMI 0x4 |
104 | #define APIC_MODE_EXTINT 0x7 | 102 | #define APIC_MODE_EXTINT 0x7 |
@@ -107,7 +105,7 @@ | |||
107 | #define APIC_TMICT 0x380 | 105 | #define APIC_TMICT 0x380 |
108 | #define APIC_TMCCT 0x390 | 106 | #define APIC_TMCCT 0x390 |
109 | #define APIC_TDCR 0x3E0 | 107 | #define APIC_TDCR 0x3E0 |
110 | #define APIC_TDR_DIV_TMBASE (1<<2) | 108 | #define APIC_TDR_DIV_TMBASE (1 << 2) |
111 | #define APIC_TDR_DIV_1 0xB | 109 | #define APIC_TDR_DIV_1 0xB |
112 | #define APIC_TDR_DIV_2 0x0 | 110 | #define APIC_TDR_DIV_2 0x0 |
113 | #define APIC_TDR_DIV_4 0x1 | 111 | #define APIC_TDR_DIV_4 0x1 |
@@ -117,14 +115,14 @@ | |||
117 | #define APIC_TDR_DIV_64 0x9 | 115 | #define APIC_TDR_DIV_64 0x9 |
118 | #define APIC_TDR_DIV_128 0xA | 116 | #define APIC_TDR_DIV_128 0xA |
119 | #define APIC_EILVT0 0x500 | 117 | #define APIC_EILVT0 0x500 |
120 | #define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */ | 118 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
121 | #define APIC_EILVT_NR_AMD_10H 4 | 119 | #define APIC_EILVT_NR_AMD_10H 4 |
122 | #define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF) | 120 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
123 | #define APIC_EILVT_MSG_FIX 0x0 | 121 | #define APIC_EILVT_MSG_FIX 0x0 |
124 | #define APIC_EILVT_MSG_SMI 0x2 | 122 | #define APIC_EILVT_MSG_SMI 0x2 |
125 | #define APIC_EILVT_MSG_NMI 0x4 | 123 | #define APIC_EILVT_MSG_NMI 0x4 |
126 | #define APIC_EILVT_MSG_EXT 0x7 | 124 | #define APIC_EILVT_MSG_EXT 0x7 |
127 | #define APIC_EILVT_MASKED (1<<16) | 125 | #define APIC_EILVT_MASKED (1 << 16) |
128 | #define APIC_EILVT1 0x510 | 126 | #define APIC_EILVT1 0x510 |
129 | #define APIC_EILVT2 0x520 | 127 | #define APIC_EILVT2 0x520 |
130 | #define APIC_EILVT3 0x530 | 128 | #define APIC_EILVT3 0x530 |
@@ -135,7 +133,7 @@ | |||
135 | # define MAX_IO_APICS 64 | 133 | # define MAX_IO_APICS 64 |
136 | #else | 134 | #else |
137 | # define MAX_IO_APICS 128 | 135 | # define MAX_IO_APICS 128 |
138 | # define MAX_LOCAL_APIC 256 | 136 | # define MAX_LOCAL_APIC 32768 |
139 | #endif | 137 | #endif |
140 | 138 | ||
141 | /* | 139 | /* |
@@ -408,6 +406,9 @@ struct local_apic { | |||
408 | 406 | ||
409 | #undef u32 | 407 | #undef u32 |
410 | 408 | ||
411 | #define BAD_APICID 0xFFu | 409 | #ifdef CONFIG_X86_32 |
412 | 410 | #define BAD_APICID 0xFFu | |
411 | #else | ||
412 | #define BAD_APICID 0xFFFFu | ||
413 | #endif | ||
413 | #endif | 414 | #endif |
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h index 437aac801711..21a4825148c0 100644 --- a/include/asm-x86/atomic_32.h +++ b/include/asm-x86/atomic_32.h | |||
@@ -15,138 +15,133 @@ | |||
15 | * on us. We need to use _exactly_ the address the user gave us, | 15 | * on us. We need to use _exactly_ the address the user gave us, |
16 | * not some alias that contains the same information. | 16 | * not some alias that contains the same information. |
17 | */ | 17 | */ |
18 | typedef struct { int counter; } atomic_t; | 18 | typedef struct { |
19 | int counter; | ||
20 | } atomic_t; | ||
19 | 21 | ||
20 | #define ATOMIC_INIT(i) { (i) } | 22 | #define ATOMIC_INIT(i) { (i) } |
21 | 23 | ||
22 | /** | 24 | /** |
23 | * atomic_read - read atomic variable | 25 | * atomic_read - read atomic variable |
24 | * @v: pointer of type atomic_t | 26 | * @v: pointer of type atomic_t |
25 | * | 27 | * |
26 | * Atomically reads the value of @v. | 28 | * Atomically reads the value of @v. |
27 | */ | 29 | */ |
28 | #define atomic_read(v) ((v)->counter) | 30 | #define atomic_read(v) ((v)->counter) |
29 | 31 | ||
30 | /** | 32 | /** |
31 | * atomic_set - set atomic variable | 33 | * atomic_set - set atomic variable |
32 | * @v: pointer of type atomic_t | 34 | * @v: pointer of type atomic_t |
33 | * @i: required value | 35 | * @i: required value |
34 | * | 36 | * |
35 | * Atomically sets the value of @v to @i. | 37 | * Atomically sets the value of @v to @i. |
36 | */ | 38 | */ |
37 | #define atomic_set(v,i) (((v)->counter) = (i)) | 39 | #define atomic_set(v, i) (((v)->counter) = (i)) |
38 | 40 | ||
39 | /** | 41 | /** |
40 | * atomic_add - add integer to atomic variable | 42 | * atomic_add - add integer to atomic variable |
41 | * @i: integer value to add | 43 | * @i: integer value to add |
42 | * @v: pointer of type atomic_t | 44 | * @v: pointer of type atomic_t |
43 | * | 45 | * |
44 | * Atomically adds @i to @v. | 46 | * Atomically adds @i to @v. |
45 | */ | 47 | */ |
46 | static __inline__ void atomic_add(int i, atomic_t *v) | 48 | static inline void atomic_add(int i, atomic_t *v) |
47 | { | 49 | { |
48 | __asm__ __volatile__( | 50 | asm volatile(LOCK_PREFIX "addl %1,%0" |
49 | LOCK_PREFIX "addl %1,%0" | 51 | : "+m" (v->counter) |
50 | :"+m" (v->counter) | 52 | : "ir" (i)); |
51 | :"ir" (i)); | ||
52 | } | 53 | } |
53 | 54 | ||
54 | /** | 55 | /** |
55 | * atomic_sub - subtract integer from atomic variable | 56 | * atomic_sub - subtract integer from atomic variable |
56 | * @i: integer value to subtract | 57 | * @i: integer value to subtract |
57 | * @v: pointer of type atomic_t | 58 | * @v: pointer of type atomic_t |
58 | * | 59 | * |
59 | * Atomically subtracts @i from @v. | 60 | * Atomically subtracts @i from @v. |
60 | */ | 61 | */ |
61 | static __inline__ void atomic_sub(int i, atomic_t *v) | 62 | static inline void atomic_sub(int i, atomic_t *v) |
62 | { | 63 | { |
63 | __asm__ __volatile__( | 64 | asm volatile(LOCK_PREFIX "subl %1,%0" |
64 | LOCK_PREFIX "subl %1,%0" | 65 | : "+m" (v->counter) |
65 | :"+m" (v->counter) | 66 | : "ir" (i)); |
66 | :"ir" (i)); | ||
67 | } | 67 | } |
68 | 68 | ||
69 | /** | 69 | /** |
70 | * atomic_sub_and_test - subtract value from variable and test result | 70 | * atomic_sub_and_test - subtract value from variable and test result |
71 | * @i: integer value to subtract | 71 | * @i: integer value to subtract |
72 | * @v: pointer of type atomic_t | 72 | * @v: pointer of type atomic_t |
73 | * | 73 | * |
74 | * Atomically subtracts @i from @v and returns | 74 | * Atomically subtracts @i from @v and returns |
75 | * true if the result is zero, or false for all | 75 | * true if the result is zero, or false for all |
76 | * other cases. | 76 | * other cases. |
77 | */ | 77 | */ |
78 | static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | 78 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
79 | { | 79 | { |
80 | unsigned char c; | 80 | unsigned char c; |
81 | 81 | ||
82 | __asm__ __volatile__( | 82 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
83 | LOCK_PREFIX "subl %2,%0; sete %1" | 83 | : "+m" (v->counter), "=qm" (c) |
84 | :"+m" (v->counter), "=qm" (c) | 84 | : "ir" (i) : "memory"); |
85 | :"ir" (i) : "memory"); | ||
86 | return c; | 85 | return c; |
87 | } | 86 | } |
88 | 87 | ||
89 | /** | 88 | /** |
90 | * atomic_inc - increment atomic variable | 89 | * atomic_inc - increment atomic variable |
91 | * @v: pointer of type atomic_t | 90 | * @v: pointer of type atomic_t |
92 | * | 91 | * |
93 | * Atomically increments @v by 1. | 92 | * Atomically increments @v by 1. |
94 | */ | 93 | */ |
95 | static __inline__ void atomic_inc(atomic_t *v) | 94 | static inline void atomic_inc(atomic_t *v) |
96 | { | 95 | { |
97 | __asm__ __volatile__( | 96 | asm volatile(LOCK_PREFIX "incl %0" |
98 | LOCK_PREFIX "incl %0" | 97 | : "+m" (v->counter)); |
99 | :"+m" (v->counter)); | ||
100 | } | 98 | } |
101 | 99 | ||
102 | /** | 100 | /** |
103 | * atomic_dec - decrement atomic variable | 101 | * atomic_dec - decrement atomic variable |
104 | * @v: pointer of type atomic_t | 102 | * @v: pointer of type atomic_t |
105 | * | 103 | * |
106 | * Atomically decrements @v by 1. | 104 | * Atomically decrements @v by 1. |
107 | */ | 105 | */ |
108 | static __inline__ void atomic_dec(atomic_t *v) | 106 | static inline void atomic_dec(atomic_t *v) |
109 | { | 107 | { |
110 | __asm__ __volatile__( | 108 | asm volatile(LOCK_PREFIX "decl %0" |
111 | LOCK_PREFIX "decl %0" | 109 | : "+m" (v->counter)); |
112 | :"+m" (v->counter)); | ||
113 | } | 110 | } |
114 | 111 | ||
115 | /** | 112 | /** |
116 | * atomic_dec_and_test - decrement and test | 113 | * atomic_dec_and_test - decrement and test |
117 | * @v: pointer of type atomic_t | 114 | * @v: pointer of type atomic_t |
118 | * | 115 | * |
119 | * Atomically decrements @v by 1 and | 116 | * Atomically decrements @v by 1 and |
120 | * returns true if the result is 0, or false for all other | 117 | * returns true if the result is 0, or false for all other |
121 | * cases. | 118 | * cases. |
122 | */ | 119 | */ |
123 | static __inline__ int atomic_dec_and_test(atomic_t *v) | 120 | static inline int atomic_dec_and_test(atomic_t *v) |
124 | { | 121 | { |
125 | unsigned char c; | 122 | unsigned char c; |
126 | 123 | ||
127 | __asm__ __volatile__( | 124 | asm volatile(LOCK_PREFIX "decl %0; sete %1" |
128 | LOCK_PREFIX "decl %0; sete %1" | 125 | : "+m" (v->counter), "=qm" (c) |
129 | :"+m" (v->counter), "=qm" (c) | 126 | : : "memory"); |
130 | : : "memory"); | ||
131 | return c != 0; | 127 | return c != 0; |
132 | } | 128 | } |
133 | 129 | ||
134 | /** | 130 | /** |
135 | * atomic_inc_and_test - increment and test | 131 | * atomic_inc_and_test - increment and test |
136 | * @v: pointer of type atomic_t | 132 | * @v: pointer of type atomic_t |
137 | * | 133 | * |
138 | * Atomically increments @v by 1 | 134 | * Atomically increments @v by 1 |
139 | * and returns true if the result is zero, or false for all | 135 | * and returns true if the result is zero, or false for all |
140 | * other cases. | 136 | * other cases. |
141 | */ | 137 | */ |
142 | static __inline__ int atomic_inc_and_test(atomic_t *v) | 138 | static inline int atomic_inc_and_test(atomic_t *v) |
143 | { | 139 | { |
144 | unsigned char c; | 140 | unsigned char c; |
145 | 141 | ||
146 | __asm__ __volatile__( | 142 | asm volatile(LOCK_PREFIX "incl %0; sete %1" |
147 | LOCK_PREFIX "incl %0; sete %1" | 143 | : "+m" (v->counter), "=qm" (c) |
148 | :"+m" (v->counter), "=qm" (c) | 144 | : : "memory"); |
149 | : : "memory"); | ||
150 | return c != 0; | 145 | return c != 0; |
151 | } | 146 | } |
152 | 147 | ||
@@ -154,19 +149,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) | |||
154 | * atomic_add_negative - add and test if negative | 149 | * atomic_add_negative - add and test if negative |
155 | * @v: pointer of type atomic_t | 150 | * @v: pointer of type atomic_t |
156 | * @i: integer value to add | 151 | * @i: integer value to add |
157 | * | 152 | * |
158 | * Atomically adds @i to @v and returns true | 153 | * Atomically adds @i to @v and returns true |
159 | * if the result is negative, or false when | 154 | * if the result is negative, or false when |
160 | * result is greater than or equal to zero. | 155 | * result is greater than or equal to zero. |
161 | */ | 156 | */ |
162 | static __inline__ int atomic_add_negative(int i, atomic_t *v) | 157 | static inline int atomic_add_negative(int i, atomic_t *v) |
163 | { | 158 | { |
164 | unsigned char c; | 159 | unsigned char c; |
165 | 160 | ||
166 | __asm__ __volatile__( | 161 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
167 | LOCK_PREFIX "addl %2,%0; sets %1" | 162 | : "+m" (v->counter), "=qm" (c) |
168 | :"+m" (v->counter), "=qm" (c) | 163 | : "ir" (i) : "memory"); |
169 | :"ir" (i) : "memory"); | ||
170 | return c; | 164 | return c; |
171 | } | 165 | } |
172 | 166 | ||
@@ -177,20 +171,19 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) | |||
177 | * | 171 | * |
178 | * Atomically adds @i to @v and returns @i + @v | 172 | * Atomically adds @i to @v and returns @i + @v |
179 | */ | 173 | */ |
180 | static __inline__ int atomic_add_return(int i, atomic_t *v) | 174 | static inline int atomic_add_return(int i, atomic_t *v) |
181 | { | 175 | { |
182 | int __i; | 176 | int __i; |
183 | #ifdef CONFIG_M386 | 177 | #ifdef CONFIG_M386 |
184 | unsigned long flags; | 178 | unsigned long flags; |
185 | if(unlikely(boot_cpu_data.x86 <= 3)) | 179 | if (unlikely(boot_cpu_data.x86 <= 3)) |
186 | goto no_xadd; | 180 | goto no_xadd; |
187 | #endif | 181 | #endif |
188 | /* Modern 486+ processor */ | 182 | /* Modern 486+ processor */ |
189 | __i = i; | 183 | __i = i; |
190 | __asm__ __volatile__( | 184 | asm volatile(LOCK_PREFIX "xaddl %0, %1" |
191 | LOCK_PREFIX "xaddl %0, %1" | 185 | : "+r" (i), "+m" (v->counter) |
192 | :"+r" (i), "+m" (v->counter) | 186 | : : "memory"); |
193 | : : "memory"); | ||
194 | return i + __i; | 187 | return i + __i; |
195 | 188 | ||
196 | #ifdef CONFIG_M386 | 189 | #ifdef CONFIG_M386 |
@@ -210,9 +203,9 @@ no_xadd: /* Legacy 386 processor */ | |||
210 | * | 203 | * |
211 | * Atomically subtracts @i from @v and returns @v - @i | 204 | * Atomically subtracts @i from @v and returns @v - @i |
212 | */ | 205 | */ |
213 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | 206 | static inline int atomic_sub_return(int i, atomic_t *v) |
214 | { | 207 | { |
215 | return atomic_add_return(-i,v); | 208 | return atomic_add_return(-i, v); |
216 | } | 209 | } |
217 | 210 | ||
218 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | 211 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
@@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
227 | * Atomically adds @a to @v, so long as @v was not already @u. | 220 | * Atomically adds @a to @v, so long as @v was not already @u. |
228 | * Returns non-zero if @v was not @u, and zero otherwise. | 221 | * Returns non-zero if @v was not @u, and zero otherwise. |
229 | */ | 222 | */ |
230 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 223 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
231 | { | 224 | { |
232 | int c, old; | 225 | int c, old; |
233 | c = atomic_read(v); | 226 | c = atomic_read(v); |
@@ -244,17 +237,17 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
244 | 237 | ||
245 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 238 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
246 | 239 | ||
247 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | 240 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
248 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | 241 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
249 | 242 | ||
250 | /* These are x86-specific, used by some header files */ | 243 | /* These are x86-specific, used by some header files */ |
251 | #define atomic_clear_mask(mask, addr) \ | 244 | #define atomic_clear_mask(mask, addr) \ |
252 | __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ | 245 | asm volatile(LOCK_PREFIX "andl %0,%1" \ |
253 | : : "r" (~(mask)),"m" (*addr) : "memory") | 246 | : : "r" (~(mask)), "m" (*(addr)) : "memory") |
254 | 247 | ||
255 | #define atomic_set_mask(mask, addr) \ | 248 | #define atomic_set_mask(mask, addr) \ |
256 | __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ | 249 | asm volatile(LOCK_PREFIX "orl %0,%1" \ |
257 | : : "r" (mask),"m" (*(addr)) : "memory") | 250 | : : "r" (mask), "m" (*(addr)) : "memory") |
258 | 251 | ||
259 | /* Atomic operations are already serializing on x86 */ | 252 | /* Atomic operations are already serializing on x86 */ |
260 | #define smp_mb__before_atomic_dec() barrier() | 253 | #define smp_mb__before_atomic_dec() barrier() |
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index 2d20a7a19f62..3e0cd7d38335 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h | |||
@@ -22,140 +22,135 @@ | |||
22 | * on us. We need to use _exactly_ the address the user gave us, | 22 | * on us. We need to use _exactly_ the address the user gave us, |
23 | * not some alias that contains the same information. | 23 | * not some alias that contains the same information. |
24 | */ | 24 | */ |
25 | typedef struct { int counter; } atomic_t; | 25 | typedef struct { |
26 | int counter; | ||
27 | } atomic_t; | ||
26 | 28 | ||
27 | #define ATOMIC_INIT(i) { (i) } | 29 | #define ATOMIC_INIT(i) { (i) } |
28 | 30 | ||
29 | /** | 31 | /** |
30 | * atomic_read - read atomic variable | 32 | * atomic_read - read atomic variable |
31 | * @v: pointer of type atomic_t | 33 | * @v: pointer of type atomic_t |
32 | * | 34 | * |
33 | * Atomically reads the value of @v. | 35 | * Atomically reads the value of @v. |
34 | */ | 36 | */ |
35 | #define atomic_read(v) ((v)->counter) | 37 | #define atomic_read(v) ((v)->counter) |
36 | 38 | ||
37 | /** | 39 | /** |
38 | * atomic_set - set atomic variable | 40 | * atomic_set - set atomic variable |
39 | * @v: pointer of type atomic_t | 41 | * @v: pointer of type atomic_t |
40 | * @i: required value | 42 | * @i: required value |
41 | * | 43 | * |
42 | * Atomically sets the value of @v to @i. | 44 | * Atomically sets the value of @v to @i. |
43 | */ | 45 | */ |
44 | #define atomic_set(v,i) (((v)->counter) = (i)) | 46 | #define atomic_set(v, i) (((v)->counter) = (i)) |
45 | 47 | ||
46 | /** | 48 | /** |
47 | * atomic_add - add integer to atomic variable | 49 | * atomic_add - add integer to atomic variable |
48 | * @i: integer value to add | 50 | * @i: integer value to add |
49 | * @v: pointer of type atomic_t | 51 | * @v: pointer of type atomic_t |
50 | * | 52 | * |
51 | * Atomically adds @i to @v. | 53 | * Atomically adds @i to @v. |
52 | */ | 54 | */ |
53 | static __inline__ void atomic_add(int i, atomic_t *v) | 55 | static inline void atomic_add(int i, atomic_t *v) |
54 | { | 56 | { |
55 | __asm__ __volatile__( | 57 | asm volatile(LOCK_PREFIX "addl %1,%0" |
56 | LOCK_PREFIX "addl %1,%0" | 58 | : "=m" (v->counter) |
57 | :"=m" (v->counter) | 59 | : "ir" (i), "m" (v->counter)); |
58 | :"ir" (i), "m" (v->counter)); | ||
59 | } | 60 | } |
60 | 61 | ||
61 | /** | 62 | /** |
62 | * atomic_sub - subtract the atomic variable | 63 | * atomic_sub - subtract the atomic variable |
63 | * @i: integer value to subtract | 64 | * @i: integer value to subtract |
64 | * @v: pointer of type atomic_t | 65 | * @v: pointer of type atomic_t |
65 | * | 66 | * |
66 | * Atomically subtracts @i from @v. | 67 | * Atomically subtracts @i from @v. |
67 | */ | 68 | */ |
68 | static __inline__ void atomic_sub(int i, atomic_t *v) | 69 | static inline void atomic_sub(int i, atomic_t *v) |
69 | { | 70 | { |
70 | __asm__ __volatile__( | 71 | asm volatile(LOCK_PREFIX "subl %1,%0" |
71 | LOCK_PREFIX "subl %1,%0" | 72 | : "=m" (v->counter) |
72 | :"=m" (v->counter) | 73 | : "ir" (i), "m" (v->counter)); |
73 | :"ir" (i), "m" (v->counter)); | ||
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * atomic_sub_and_test - subtract value from variable and test result | 77 | * atomic_sub_and_test - subtract value from variable and test result |
78 | * @i: integer value to subtract | 78 | * @i: integer value to subtract |
79 | * @v: pointer of type atomic_t | 79 | * @v: pointer of type atomic_t |
80 | * | 80 | * |
81 | * Atomically subtracts @i from @v and returns | 81 | * Atomically subtracts @i from @v and returns |
82 | * true if the result is zero, or false for all | 82 | * true if the result is zero, or false for all |
83 | * other cases. | 83 | * other cases. |
84 | */ | 84 | */ |
85 | static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | 85 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
86 | { | 86 | { |
87 | unsigned char c; | 87 | unsigned char c; |
88 | 88 | ||
89 | __asm__ __volatile__( | 89 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
90 | LOCK_PREFIX "subl %2,%0; sete %1" | 90 | : "=m" (v->counter), "=qm" (c) |
91 | :"=m" (v->counter), "=qm" (c) | 91 | : "ir" (i), "m" (v->counter) : "memory"); |
92 | :"ir" (i), "m" (v->counter) : "memory"); | ||
93 | return c; | 92 | return c; |
94 | } | 93 | } |
95 | 94 | ||
96 | /** | 95 | /** |
97 | * atomic_inc - increment atomic variable | 96 | * atomic_inc - increment atomic variable |
98 | * @v: pointer of type atomic_t | 97 | * @v: pointer of type atomic_t |
99 | * | 98 | * |
100 | * Atomically increments @v by 1. | 99 | * Atomically increments @v by 1. |
101 | */ | 100 | */ |
102 | static __inline__ void atomic_inc(atomic_t *v) | 101 | static inline void atomic_inc(atomic_t *v) |
103 | { | 102 | { |
104 | __asm__ __volatile__( | 103 | asm volatile(LOCK_PREFIX "incl %0" |
105 | LOCK_PREFIX "incl %0" | 104 | : "=m" (v->counter) |
106 | :"=m" (v->counter) | 105 | : "m" (v->counter)); |
107 | :"m" (v->counter)); | ||
108 | } | 106 | } |
109 | 107 | ||
110 | /** | 108 | /** |
111 | * atomic_dec - decrement atomic variable | 109 | * atomic_dec - decrement atomic variable |
112 | * @v: pointer of type atomic_t | 110 | * @v: pointer of type atomic_t |
113 | * | 111 | * |
114 | * Atomically decrements @v by 1. | 112 | * Atomically decrements @v by 1. |
115 | */ | 113 | */ |
116 | static __inline__ void atomic_dec(atomic_t *v) | 114 | static inline void atomic_dec(atomic_t *v) |
117 | { | 115 | { |
118 | __asm__ __volatile__( | 116 | asm volatile(LOCK_PREFIX "decl %0" |
119 | LOCK_PREFIX "decl %0" | 117 | : "=m" (v->counter) |
120 | :"=m" (v->counter) | 118 | : "m" (v->counter)); |
121 | :"m" (v->counter)); | ||
122 | } | 119 | } |
123 | 120 | ||
124 | /** | 121 | /** |
125 | * atomic_dec_and_test - decrement and test | 122 | * atomic_dec_and_test - decrement and test |
126 | * @v: pointer of type atomic_t | 123 | * @v: pointer of type atomic_t |
127 | * | 124 | * |
128 | * Atomically decrements @v by 1 and | 125 | * Atomically decrements @v by 1 and |
129 | * returns true if the result is 0, or false for all other | 126 | * returns true if the result is 0, or false for all other |
130 | * cases. | 127 | * cases. |
131 | */ | 128 | */ |
132 | static __inline__ int atomic_dec_and_test(atomic_t *v) | 129 | static inline int atomic_dec_and_test(atomic_t *v) |
133 | { | 130 | { |
134 | unsigned char c; | 131 | unsigned char c; |
135 | 132 | ||
136 | __asm__ __volatile__( | 133 | asm volatile(LOCK_PREFIX "decl %0; sete %1" |
137 | LOCK_PREFIX "decl %0; sete %1" | 134 | : "=m" (v->counter), "=qm" (c) |
138 | :"=m" (v->counter), "=qm" (c) | 135 | : "m" (v->counter) : "memory"); |
139 | :"m" (v->counter) : "memory"); | ||
140 | return c != 0; | 136 | return c != 0; |
141 | } | 137 | } |
142 | 138 | ||
143 | /** | 139 | /** |
144 | * atomic_inc_and_test - increment and test | 140 | * atomic_inc_and_test - increment and test |
145 | * @v: pointer of type atomic_t | 141 | * @v: pointer of type atomic_t |
146 | * | 142 | * |
147 | * Atomically increments @v by 1 | 143 | * Atomically increments @v by 1 |
148 | * and returns true if the result is zero, or false for all | 144 | * and returns true if the result is zero, or false for all |
149 | * other cases. | 145 | * other cases. |
150 | */ | 146 | */ |
151 | static __inline__ int atomic_inc_and_test(atomic_t *v) | 147 | static inline int atomic_inc_and_test(atomic_t *v) |
152 | { | 148 | { |
153 | unsigned char c; | 149 | unsigned char c; |
154 | 150 | ||
155 | __asm__ __volatile__( | 151 | asm volatile(LOCK_PREFIX "incl %0; sete %1" |
156 | LOCK_PREFIX "incl %0; sete %1" | 152 | : "=m" (v->counter), "=qm" (c) |
157 | :"=m" (v->counter), "=qm" (c) | 153 | : "m" (v->counter) : "memory"); |
158 | :"m" (v->counter) : "memory"); | ||
159 | return c != 0; | 154 | return c != 0; |
160 | } | 155 | } |
161 | 156 | ||
@@ -163,19 +158,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) | |||
163 | * atomic_add_negative - add and test if negative | 158 | * atomic_add_negative - add and test if negative |
164 | * @i: integer value to add | 159 | * @i: integer value to add |
165 | * @v: pointer of type atomic_t | 160 | * @v: pointer of type atomic_t |
166 | * | 161 | * |
167 | * Atomically adds @i to @v and returns true | 162 | * Atomically adds @i to @v and returns true |
168 | * if the result is negative, or false when | 163 | * if the result is negative, or false when |
169 | * result is greater than or equal to zero. | 164 | * result is greater than or equal to zero. |
170 | */ | 165 | */ |
171 | static __inline__ int atomic_add_negative(int i, atomic_t *v) | 166 | static inline int atomic_add_negative(int i, atomic_t *v) |
172 | { | 167 | { |
173 | unsigned char c; | 168 | unsigned char c; |
174 | 169 | ||
175 | __asm__ __volatile__( | 170 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
176 | LOCK_PREFIX "addl %2,%0; sets %1" | 171 | : "=m" (v->counter), "=qm" (c) |
177 | :"=m" (v->counter), "=qm" (c) | 172 | : "ir" (i), "m" (v->counter) : "memory"); |
178 | :"ir" (i), "m" (v->counter) : "memory"); | ||
179 | return c; | 173 | return c; |
180 | } | 174 | } |
181 | 175 | ||
@@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) | |||
186 | * | 180 | * |
187 | * Atomically adds @i to @v and returns @i + @v | 181 | * Atomically adds @i to @v and returns @i + @v |
188 | */ | 182 | */ |
189 | static __inline__ int atomic_add_return(int i, atomic_t *v) | 183 | static inline int atomic_add_return(int i, atomic_t *v) |
190 | { | 184 | { |
191 | int __i = i; | 185 | int __i = i; |
192 | __asm__ __volatile__( | 186 | asm volatile(LOCK_PREFIX "xaddl %0, %1" |
193 | LOCK_PREFIX "xaddl %0, %1" | 187 | : "+r" (i), "+m" (v->counter) |
194 | :"+r" (i), "+m" (v->counter) | 188 | : : "memory"); |
195 | : : "memory"); | ||
196 | return i + __i; | 189 | return i + __i; |
197 | } | 190 | } |
198 | 191 | ||
199 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | 192 | static inline int atomic_sub_return(int i, atomic_t *v) |
200 | { | 193 | { |
201 | return atomic_add_return(-i,v); | 194 | return atomic_add_return(-i, v); |
202 | } | 195 | } |
203 | 196 | ||
204 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | 197 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
205 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | 198 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
206 | 199 | ||
207 | /* An 64bit atomic type */ | 200 | /* An 64bit atomic type */ |
208 | 201 | ||
209 | typedef struct { long counter; } atomic64_t; | 202 | typedef struct { |
203 | long counter; | ||
204 | } atomic64_t; | ||
210 | 205 | ||
211 | #define ATOMIC64_INIT(i) { (i) } | 206 | #define ATOMIC64_INIT(i) { (i) } |
212 | 207 | ||
@@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t; | |||
226 | * | 221 | * |
227 | * Atomically sets the value of @v to @i. | 222 | * Atomically sets the value of @v to @i. |
228 | */ | 223 | */ |
229 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 224 | #define atomic64_set(v, i) (((v)->counter) = (i)) |
230 | 225 | ||
231 | /** | 226 | /** |
232 | * atomic64_add - add integer to atomic64 variable | 227 | * atomic64_add - add integer to atomic64 variable |
@@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t; | |||
235 | * | 230 | * |
236 | * Atomically adds @i to @v. | 231 | * Atomically adds @i to @v. |
237 | */ | 232 | */ |
238 | static __inline__ void atomic64_add(long i, atomic64_t *v) | 233 | static inline void atomic64_add(long i, atomic64_t *v) |
239 | { | 234 | { |
240 | __asm__ __volatile__( | 235 | asm volatile(LOCK_PREFIX "addq %1,%0" |
241 | LOCK_PREFIX "addq %1,%0" | 236 | : "=m" (v->counter) |
242 | :"=m" (v->counter) | 237 | : "ir" (i), "m" (v->counter)); |
243 | :"ir" (i), "m" (v->counter)); | ||
244 | } | 238 | } |
245 | 239 | ||
246 | /** | 240 | /** |
@@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) | |||
250 | * | 244 | * |
251 | * Atomically subtracts @i from @v. | 245 | * Atomically subtracts @i from @v. |
252 | */ | 246 | */ |
253 | static __inline__ void atomic64_sub(long i, atomic64_t *v) | 247 | static inline void atomic64_sub(long i, atomic64_t *v) |
254 | { | 248 | { |
255 | __asm__ __volatile__( | 249 | asm volatile(LOCK_PREFIX "subq %1,%0" |
256 | LOCK_PREFIX "subq %1,%0" | 250 | : "=m" (v->counter) |
257 | :"=m" (v->counter) | 251 | : "ir" (i), "m" (v->counter)); |
258 | :"ir" (i), "m" (v->counter)); | ||
259 | } | 252 | } |
260 | 253 | ||
261 | /** | 254 | /** |
@@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v) | |||
267 | * true if the result is zero, or false for all | 260 | * true if the result is zero, or false for all |
268 | * other cases. | 261 | * other cases. |
269 | */ | 262 | */ |
270 | static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) | 263 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) |
271 | { | 264 | { |
272 | unsigned char c; | 265 | unsigned char c; |
273 | 266 | ||
274 | __asm__ __volatile__( | 267 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" |
275 | LOCK_PREFIX "subq %2,%0; sete %1" | 268 | : "=m" (v->counter), "=qm" (c) |
276 | :"=m" (v->counter), "=qm" (c) | 269 | : "ir" (i), "m" (v->counter) : "memory"); |
277 | :"ir" (i), "m" (v->counter) : "memory"); | ||
278 | return c; | 270 | return c; |
279 | } | 271 | } |
280 | 272 | ||
@@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) | |||
284 | * | 276 | * |
285 | * Atomically increments @v by 1. | 277 | * Atomically increments @v by 1. |
286 | */ | 278 | */ |
287 | static __inline__ void atomic64_inc(atomic64_t *v) | 279 | static inline void atomic64_inc(atomic64_t *v) |
288 | { | 280 | { |
289 | __asm__ __volatile__( | 281 | asm volatile(LOCK_PREFIX "incq %0" |
290 | LOCK_PREFIX "incq %0" | 282 | : "=m" (v->counter) |
291 | :"=m" (v->counter) | 283 | : "m" (v->counter)); |
292 | :"m" (v->counter)); | ||
293 | } | 284 | } |
294 | 285 | ||
295 | /** | 286 | /** |
@@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v) | |||
298 | * | 289 | * |
299 | * Atomically decrements @v by 1. | 290 | * Atomically decrements @v by 1. |
300 | */ | 291 | */ |
301 | static __inline__ void atomic64_dec(atomic64_t *v) | 292 | static inline void atomic64_dec(atomic64_t *v) |
302 | { | 293 | { |
303 | __asm__ __volatile__( | 294 | asm volatile(LOCK_PREFIX "decq %0" |
304 | LOCK_PREFIX "decq %0" | 295 | : "=m" (v->counter) |
305 | :"=m" (v->counter) | 296 | : "m" (v->counter)); |
306 | :"m" (v->counter)); | ||
307 | } | 297 | } |
308 | 298 | ||
309 | /** | 299 | /** |
@@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v) | |||
314 | * returns true if the result is 0, or false for all other | 304 | * returns true if the result is 0, or false for all other |
315 | * cases. | 305 | * cases. |
316 | */ | 306 | */ |
317 | static __inline__ int atomic64_dec_and_test(atomic64_t *v) | 307 | static inline int atomic64_dec_and_test(atomic64_t *v) |
318 | { | 308 | { |
319 | unsigned char c; | 309 | unsigned char c; |
320 | 310 | ||
321 | __asm__ __volatile__( | 311 | asm volatile(LOCK_PREFIX "decq %0; sete %1" |
322 | LOCK_PREFIX "decq %0; sete %1" | 312 | : "=m" (v->counter), "=qm" (c) |
323 | :"=m" (v->counter), "=qm" (c) | 313 | : "m" (v->counter) : "memory"); |
324 | :"m" (v->counter) : "memory"); | ||
325 | return c != 0; | 314 | return c != 0; |
326 | } | 315 | } |
327 | 316 | ||
@@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) | |||
333 | * and returns true if the result is zero, or false for all | 322 | * and returns true if the result is zero, or false for all |
334 | * other cases. | 323 | * other cases. |
335 | */ | 324 | */ |
336 | static __inline__ int atomic64_inc_and_test(atomic64_t *v) | 325 | static inline int atomic64_inc_and_test(atomic64_t *v) |
337 | { | 326 | { |
338 | unsigned char c; | 327 | unsigned char c; |
339 | 328 | ||
340 | __asm__ __volatile__( | 329 | asm volatile(LOCK_PREFIX "incq %0; sete %1" |
341 | LOCK_PREFIX "incq %0; sete %1" | 330 | : "=m" (v->counter), "=qm" (c) |
342 | :"=m" (v->counter), "=qm" (c) | 331 | : "m" (v->counter) : "memory"); |
343 | :"m" (v->counter) : "memory"); | ||
344 | return c != 0; | 332 | return c != 0; |
345 | } | 333 | } |
346 | 334 | ||
@@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) | |||
353 | * if the result is negative, or false when | 341 | * if the result is negative, or false when |
354 | * result is greater than or equal to zero. | 342 | * result is greater than or equal to zero. |
355 | */ | 343 | */ |
356 | static __inline__ int atomic64_add_negative(long i, atomic64_t *v) | 344 | static inline int atomic64_add_negative(long i, atomic64_t *v) |
357 | { | 345 | { |
358 | unsigned char c; | 346 | unsigned char c; |
359 | 347 | ||
360 | __asm__ __volatile__( | 348 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" |
361 | LOCK_PREFIX "addq %2,%0; sets %1" | 349 | : "=m" (v->counter), "=qm" (c) |
362 | :"=m" (v->counter), "=qm" (c) | 350 | : "ir" (i), "m" (v->counter) : "memory"); |
363 | :"ir" (i), "m" (v->counter) : "memory"); | ||
364 | return c; | 351 | return c; |
365 | } | 352 | } |
366 | 353 | ||
@@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) | |||
371 | * | 358 | * |
372 | * Atomically adds @i to @v and returns @i + @v | 359 | * Atomically adds @i to @v and returns @i + @v |
373 | */ | 360 | */ |
374 | static __inline__ long atomic64_add_return(long i, atomic64_t *v) | 361 | static inline long atomic64_add_return(long i, atomic64_t *v) |
375 | { | 362 | { |
376 | long __i = i; | 363 | long __i = i; |
377 | __asm__ __volatile__( | 364 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" |
378 | LOCK_PREFIX "xaddq %0, %1;" | 365 | : "+r" (i), "+m" (v->counter) |
379 | :"+r" (i), "+m" (v->counter) | 366 | : : "memory"); |
380 | : : "memory"); | ||
381 | return i + __i; | 367 | return i + __i; |
382 | } | 368 | } |
383 | 369 | ||
384 | static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | 370 | static inline long atomic64_sub_return(long i, atomic64_t *v) |
385 | { | 371 | { |
386 | return atomic64_add_return(-i,v); | 372 | return atomic64_add_return(-i, v); |
387 | } | 373 | } |
388 | 374 | ||
389 | #define atomic64_inc_return(v) (atomic64_add_return(1,v)) | 375 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) |
390 | #define atomic64_dec_return(v) (atomic64_sub_return(1,v)) | 376 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) |
391 | 377 | ||
392 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 378 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
393 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 379 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
394 | 380 | ||
395 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 381 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
396 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 382 | #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) |
397 | 383 | ||
398 | /** | 384 | /** |
399 | * atomic_add_unless - add unless the number is a given value | 385 | * atomic_add_unless - add unless the number is a given value |
@@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | |||
404 | * Atomically adds @a to @v, so long as it was not @u. | 390 | * Atomically adds @a to @v, so long as it was not @u. |
405 | * Returns non-zero if @v was not @u, and zero otherwise. | 391 | * Returns non-zero if @v was not @u, and zero otherwise. |
406 | */ | 392 | */ |
407 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 393 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
408 | { | 394 | { |
409 | int c, old; | 395 | int c, old; |
410 | c = atomic_read(v); | 396 | c = atomic_read(v); |
@@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
430 | * Atomically adds @a to @v, so long as it was not @u. | 416 | * Atomically adds @a to @v, so long as it was not @u. |
431 | * Returns non-zero if @v was not @u, and zero otherwise. | 417 | * Returns non-zero if @v was not @u, and zero otherwise. |
432 | */ | 418 | */ |
433 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 419 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) |
434 | { | 420 | { |
435 | long c, old; | 421 | long c, old; |
436 | c = atomic64_read(v); | 422 | c = atomic64_read(v); |
@@ -448,13 +434,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
448 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 434 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
449 | 435 | ||
450 | /* These are x86-specific, used by some header files */ | 436 | /* These are x86-specific, used by some header files */ |
451 | #define atomic_clear_mask(mask, addr) \ | 437 | #define atomic_clear_mask(mask, addr) \ |
452 | __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ | 438 | asm volatile(LOCK_PREFIX "andl %0,%1" \ |
453 | : : "r" (~(mask)),"m" (*addr) : "memory") | 439 | : : "r" (~(mask)), "m" (*(addr)) : "memory") |
454 | 440 | ||
455 | #define atomic_set_mask(mask, addr) \ | 441 | #define atomic_set_mask(mask, addr) \ |
456 | __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ | 442 | asm volatile(LOCK_PREFIX "orl %0,%1" \ |
457 | : : "r" ((unsigned)mask),"m" (*(addr)) : "memory") | 443 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ |
444 | : "memory") | ||
458 | 445 | ||
459 | /* Atomic operations are already serializing on x86 */ | 446 | /* Atomic operations are already serializing on x86 */ |
460 | #define smp_mb__before_atomic_dec() barrier() | 447 | #define smp_mb__before_atomic_dec() barrier() |
diff --git a/include/asm-x86/mach-default/bios_ebda.h b/include/asm-x86/bios_ebda.h index 9cbd9a668af8..9cbd9a668af8 100644 --- a/include/asm-x86/mach-default/bios_ebda.h +++ b/include/asm-x86/bios_ebda.h | |||
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1a23ce1a5697..1ae7b270a1ef 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h | |||
@@ -23,10 +23,13 @@ | |||
23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | 23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
24 | /* Technically wrong, but this avoids compilation errors on some gcc | 24 | /* Technically wrong, but this avoids compilation errors on some gcc |
25 | versions. */ | 25 | versions. */ |
26 | #define ADDR "=m" (*(volatile long *) addr) | 26 | #define ADDR "=m" (*(volatile long *)addr) |
27 | #define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5]) | ||
27 | #else | 28 | #else |
28 | #define ADDR "+m" (*(volatile long *) addr) | 29 | #define ADDR "+m" (*(volatile long *) addr) |
30 | #define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5]) | ||
29 | #endif | 31 | #endif |
32 | #define BASE_ADDR "m" (*(volatile int *)addr) | ||
30 | 33 | ||
31 | /** | 34 | /** |
32 | * set_bit - Atomically set a bit in memory | 35 | * set_bit - Atomically set a bit in memory |
@@ -45,9 +48,7 @@ | |||
45 | */ | 48 | */ |
46 | static inline void set_bit(int nr, volatile void *addr) | 49 | static inline void set_bit(int nr, volatile void *addr) |
47 | { | 50 | { |
48 | asm volatile(LOCK_PREFIX "bts %1,%0" | 51 | asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
49 | : ADDR | ||
50 | : "Ir" (nr) : "memory"); | ||
51 | } | 52 | } |
52 | 53 | ||
53 | /** | 54 | /** |
@@ -79,9 +80,7 @@ static inline void __set_bit(int nr, volatile void *addr) | |||
79 | */ | 80 | */ |
80 | static inline void clear_bit(int nr, volatile void *addr) | 81 | static inline void clear_bit(int nr, volatile void *addr) |
81 | { | 82 | { |
82 | asm volatile(LOCK_PREFIX "btr %1,%0" | 83 | asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
83 | : ADDR | ||
84 | : "Ir" (nr)); | ||
85 | } | 84 | } |
86 | 85 | ||
87 | /* | 86 | /* |
@@ -100,7 +99,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) | |||
100 | 99 | ||
101 | static inline void __clear_bit(int nr, volatile void *addr) | 100 | static inline void __clear_bit(int nr, volatile void *addr) |
102 | { | 101 | { |
103 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 102 | asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
104 | } | 103 | } |
105 | 104 | ||
106 | /* | 105 | /* |
@@ -135,7 +134,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) | |||
135 | */ | 134 | */ |
136 | static inline void __change_bit(int nr, volatile void *addr) | 135 | static inline void __change_bit(int nr, volatile void *addr) |
137 | { | 136 | { |
138 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 137 | asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
139 | } | 138 | } |
140 | 139 | ||
141 | /** | 140 | /** |
@@ -149,8 +148,7 @@ static inline void __change_bit(int nr, volatile void *addr) | |||
149 | */ | 148 | */ |
150 | static inline void change_bit(int nr, volatile void *addr) | 149 | static inline void change_bit(int nr, volatile void *addr) |
151 | { | 150 | { |
152 | asm volatile(LOCK_PREFIX "btc %1,%0" | 151 | asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
153 | : ADDR : "Ir" (nr)); | ||
154 | } | 152 | } |
155 | 153 | ||
156 | /** | 154 | /** |
@@ -166,9 +164,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) | |||
166 | int oldbit; | 164 | int oldbit; |
167 | 165 | ||
168 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" | 166 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" |
169 | "sbb %0,%0" | 167 | "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
170 | : "=r" (oldbit), ADDR | ||
171 | : "Ir" (nr) : "memory"); | ||
172 | 168 | ||
173 | return oldbit; | 169 | return oldbit; |
174 | } | 170 | } |
@@ -198,10 +194,9 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) | |||
198 | { | 194 | { |
199 | int oldbit; | 195 | int oldbit; |
200 | 196 | ||
201 | asm("bts %2,%1\n\t" | 197 | asm volatile("bts %2,%3\n\t" |
202 | "sbb %0,%0" | 198 | "sbb %0,%0" |
203 | : "=r" (oldbit), ADDR | 199 | : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); |
204 | : "Ir" (nr)); | ||
205 | return oldbit; | 200 | return oldbit; |
206 | } | 201 | } |
207 | 202 | ||
@@ -219,8 +214,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) | |||
219 | 214 | ||
220 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" | 215 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" |
221 | "sbb %0,%0" | 216 | "sbb %0,%0" |
222 | : "=r" (oldbit), ADDR | 217 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
223 | : "Ir" (nr) : "memory"); | ||
224 | 218 | ||
225 | return oldbit; | 219 | return oldbit; |
226 | } | 220 | } |
@@ -238,10 +232,9 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) | |||
238 | { | 232 | { |
239 | int oldbit; | 233 | int oldbit; |
240 | 234 | ||
241 | asm volatile("btr %2,%1\n\t" | 235 | asm volatile("btr %2,%3\n\t" |
242 | "sbb %0,%0" | 236 | "sbb %0,%0" |
243 | : "=r" (oldbit), ADDR | 237 | : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); |
244 | : "Ir" (nr)); | ||
245 | return oldbit; | 238 | return oldbit; |
246 | } | 239 | } |
247 | 240 | ||
@@ -250,10 +243,9 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) | |||
250 | { | 243 | { |
251 | int oldbit; | 244 | int oldbit; |
252 | 245 | ||
253 | asm volatile("btc %2,%1\n\t" | 246 | asm volatile("btc %2,%3\n\t" |
254 | "sbb %0,%0" | 247 | "sbb %0,%0" |
255 | : "=r" (oldbit), ADDR | 248 | : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); |
256 | : "Ir" (nr) : "memory"); | ||
257 | 249 | ||
258 | return oldbit; | 250 | return oldbit; |
259 | } | 251 | } |
@@ -272,8 +264,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) | |||
272 | 264 | ||
273 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" | 265 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" |
274 | "sbb %0,%0" | 266 | "sbb %0,%0" |
275 | : "=r" (oldbit), ADDR | 267 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
276 | : "Ir" (nr) : "memory"); | ||
277 | 268 | ||
278 | return oldbit; | 269 | return oldbit; |
279 | } | 270 | } |
@@ -288,10 +279,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr) | |||
288 | { | 279 | { |
289 | int oldbit; | 280 | int oldbit; |
290 | 281 | ||
291 | asm volatile("bt %2,%1\n\t" | 282 | asm volatile("bt %2,%3\n\t" |
292 | "sbb %0,%0" | 283 | "sbb %0,%0" |
293 | : "=r" (oldbit) | 284 | : "=r" (oldbit) |
294 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | 285 | : "m" (((volatile const int *)addr)[nr >> 5]), |
286 | "Ir" (nr), BASE_ADDR); | ||
295 | 287 | ||
296 | return oldbit; | 288 | return oldbit; |
297 | } | 289 | } |
@@ -310,6 +302,8 @@ static int test_bit(int nr, const volatile unsigned long *addr); | |||
310 | constant_test_bit((nr),(addr)) : \ | 302 | constant_test_bit((nr),(addr)) : \ |
311 | variable_test_bit((nr),(addr))) | 303 | variable_test_bit((nr),(addr))) |
312 | 304 | ||
305 | #undef BASE_ADDR | ||
306 | #undef BIT_ADDR | ||
313 | #undef ADDR | 307 | #undef ADDR |
314 | 308 | ||
315 | #ifdef CONFIG_X86_32 | 309 | #ifdef CONFIG_X86_32 |
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index e4d75fcf9c03..2513a81f82aa 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h | |||
@@ -20,20 +20,22 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | |||
20 | 20 | ||
21 | if (!size) | 21 | if (!size) |
22 | return 0; | 22 | return 0; |
23 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | 23 | /* This looks at memory. |
24 | __asm__ __volatile__( | 24 | * Mark it volatile to tell gcc not to move it around |
25 | "movl $-1,%%eax\n\t" | 25 | */ |
26 | "xorl %%edx,%%edx\n\t" | 26 | asm volatile("movl $-1,%%eax\n\t" |
27 | "repe; scasl\n\t" | 27 | "xorl %%edx,%%edx\n\t" |
28 | "je 1f\n\t" | 28 | "repe; scasl\n\t" |
29 | "xorl -4(%%edi),%%eax\n\t" | 29 | "je 1f\n\t" |
30 | "subl $4,%%edi\n\t" | 30 | "xorl -4(%%edi),%%eax\n\t" |
31 | "bsfl %%eax,%%edx\n" | 31 | "subl $4,%%edi\n\t" |
32 | "1:\tsubl %%ebx,%%edi\n\t" | 32 | "bsfl %%eax,%%edx\n" |
33 | "shll $3,%%edi\n\t" | 33 | "1:\tsubl %%ebx,%%edi\n\t" |
34 | "addl %%edi,%%edx" | 34 | "shll $3,%%edi\n\t" |
35 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | 35 | "addl %%edi,%%edx" |
36 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | 36 | : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) |
37 | : "1" ((size + 31) >> 5), "2" (addr), | ||
38 | "b" (addr) : "memory"); | ||
37 | return res; | 39 | return res; |
38 | } | 40 | } |
39 | 41 | ||
@@ -75,7 +77,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | |||
75 | unsigned long val = *addr++; | 77 | unsigned long val = *addr++; |
76 | if (val) | 78 | if (val) |
77 | return __ffs(val) + x; | 79 | return __ffs(val) + x; |
78 | x += (sizeof(*addr)<<3); | 80 | x += sizeof(*addr) << 3; |
79 | } | 81 | } |
80 | return x; | 82 | return x; |
81 | } | 83 | } |
@@ -152,10 +154,10 @@ static inline int fls(int x) | |||
152 | 154 | ||
153 | #include <asm-generic/bitops/ext2-non-atomic.h> | 155 | #include <asm-generic/bitops/ext2-non-atomic.h> |
154 | 156 | ||
155 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 157 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
156 | test_and_set_bit((nr), (unsigned long *)addr) | 158 | test_and_set_bit((nr), (unsigned long *)(addr)) |
157 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 159 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
158 | test_and_clear_bit((nr), (unsigned long *)addr) | 160 | test_and_clear_bit((nr), (unsigned long *)(addr)) |
159 | 161 | ||
160 | #include <asm-generic/bitops/minix.h> | 162 | #include <asm-generic/bitops/minix.h> |
161 | 163 | ||
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index aaf15194d536..365f8207ea59 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h | |||
@@ -17,35 +17,35 @@ static inline long __scanbit(unsigned long val, unsigned long max) | |||
17 | return val; | 17 | return val; |
18 | } | 18 | } |
19 | 19 | ||
20 | #define find_first_bit(addr,size) \ | ||
21 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
22 | (__scanbit(*(unsigned long *)addr,(size))) : \ | ||
23 | find_first_bit(addr,size))) | ||
24 | |||
25 | #define find_next_bit(addr,size,off) \ | 20 | #define find_next_bit(addr,size,off) \ |
26 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 21 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
27 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ | 22 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ |
28 | find_next_bit(addr,size,off))) | 23 | find_next_bit(addr,size,off))) |
29 | 24 | ||
30 | #define find_first_zero_bit(addr,size) \ | ||
31 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
32 | (__scanbit(~*(unsigned long *)addr,(size))) : \ | ||
33 | find_first_zero_bit(addr,size))) | ||
34 | |||
35 | #define find_next_zero_bit(addr,size,off) \ | 25 | #define find_next_zero_bit(addr,size,off) \ |
36 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 26 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
37 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ | 27 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ |
38 | find_next_zero_bit(addr,size,off))) | 28 | find_next_zero_bit(addr,size,off))) |
39 | 29 | ||
40 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | 30 | #define find_first_bit(addr, size) \ |
41 | int len) | 31 | ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ |
42 | { | 32 | ? (__scanbit(*(unsigned long *)(addr), (size))) \ |
43 | unsigned long end = i + len; | 33 | : find_first_bit((addr), (size)))) |
34 | |||
35 | #define find_first_zero_bit(addr, size) \ | ||
36 | ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ | ||
37 | ? (__scanbit(~*(unsigned long *)(addr), (size))) \ | ||
38 | : find_first_zero_bit((addr), (size)))) | ||
39 | |||
40 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | ||
41 | int len) | ||
42 | { | ||
43 | unsigned long end = i + len; | ||
44 | while (i < end) { | 44 | while (i < end) { |
45 | __set_bit(i, bitmap); | 45 | __set_bit(i, bitmap); |
46 | i++; | 46 | i++; |
47 | } | 47 | } |
48 | } | 48 | } |
49 | 49 | ||
50 | /** | 50 | /** |
51 | * ffz - find first zero in word. | 51 | * ffz - find first zero in word. |
@@ -150,10 +150,10 @@ static inline int fls(int x) | |||
150 | 150 | ||
151 | #include <asm-generic/bitops/ext2-non-atomic.h> | 151 | #include <asm-generic/bitops/ext2-non-atomic.h> |
152 | 152 | ||
153 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 153 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
154 | test_and_set_bit((nr),(unsigned long*)addr) | 154 | test_and_set_bit((nr), (unsigned long *)(addr)) |
155 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 155 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
156 | test_and_clear_bit((nr),(unsigned long*)addr) | 156 | test_and_clear_bit((nr), (unsigned long *)(addr)) |
157 | 157 | ||
158 | #include <asm-generic/bitops/minix.h> | 158 | #include <asm-generic/bitops/minix.h> |
159 | 159 | ||
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h index 8d477a201392..b69aa64b82a4 100644 --- a/include/asm-x86/bug.h +++ b/include/asm-x86/bug.h | |||
@@ -12,25 +12,25 @@ | |||
12 | # define __BUG_C0 "2:\t.quad 1b, %c0\n" | 12 | # define __BUG_C0 "2:\t.quad 1b, %c0\n" |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | #define BUG() \ | 15 | #define BUG() \ |
16 | do { \ | 16 | do { \ |
17 | asm volatile("1:\tud2\n" \ | 17 | asm volatile("1:\tud2\n" \ |
18 | ".pushsection __bug_table,\"a\"\n" \ | 18 | ".pushsection __bug_table,\"a\"\n" \ |
19 | __BUG_C0 \ | 19 | __BUG_C0 \ |
20 | "\t.word %c1, 0\n" \ | 20 | "\t.word %c1, 0\n" \ |
21 | "\t.org 2b+%c2\n" \ | 21 | "\t.org 2b+%c2\n" \ |
22 | ".popsection" \ | 22 | ".popsection" \ |
23 | : : "i" (__FILE__), "i" (__LINE__), \ | 23 | : : "i" (__FILE__), "i" (__LINE__), \ |
24 | "i" (sizeof(struct bug_entry))); \ | 24 | "i" (sizeof(struct bug_entry))); \ |
25 | for(;;) ; \ | 25 | for (;;) ; \ |
26 | } while(0) | 26 | } while (0) |
27 | 27 | ||
28 | #else | 28 | #else |
29 | #define BUG() \ | 29 | #define BUG() \ |
30 | do { \ | 30 | do { \ |
31 | asm volatile("ud2"); \ | 31 | asm volatile("ud2"); \ |
32 | for(;;) ; \ | 32 | for (;;) ; \ |
33 | } while(0) | 33 | } while (0) |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #endif /* !CONFIG_BUG */ | 36 | #endif /* !CONFIG_BUG */ |
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h index fe2f2e5d51ba..e02ae2d89acf 100644 --- a/include/asm-x86/byteorder.h +++ b/include/asm-x86/byteorder.h | |||
@@ -8,50 +8,59 @@ | |||
8 | 8 | ||
9 | #ifdef __i386__ | 9 | #ifdef __i386__ |
10 | 10 | ||
11 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | 11 | static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) |
12 | { | 12 | { |
13 | #ifdef CONFIG_X86_BSWAP | 13 | #ifdef CONFIG_X86_BSWAP |
14 | __asm__("bswap %0" : "=r" (x) : "0" (x)); | 14 | asm("bswap %0" : "=r" (x) : "0" (x)); |
15 | #else | 15 | #else |
16 | __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ | 16 | asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
17 | "rorl $16,%0\n\t" /* swap words */ | 17 | "rorl $16,%0\n\t" /* swap words */ |
18 | "xchgb %b0,%h0" /* swap higher bytes */ | 18 | "xchgb %b0,%h0" /* swap higher bytes */ |
19 | :"=q" (x) | 19 | : "=q" (x) |
20 | : "0" (x)); | 20 | : "0" (x)); |
21 | #endif | 21 | #endif |
22 | return x; | 22 | return x; |
23 | } | 23 | } |
24 | 24 | ||
25 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) | 25 | static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) |
26 | { | 26 | { |
27 | union { | 27 | union { |
28 | struct { __u32 a,b; } s; | 28 | struct { |
29 | __u32 a; | ||
30 | __u32 b; | ||
31 | } s; | ||
29 | __u64 u; | 32 | __u64 u; |
30 | } v; | 33 | } v; |
31 | v.u = val; | 34 | v.u = val; |
32 | #ifdef CONFIG_X86_BSWAP | 35 | #ifdef CONFIG_X86_BSWAP |
33 | __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | 36 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
34 | : "=r" (v.s.a), "=r" (v.s.b) | 37 | : "=r" (v.s.a), "=r" (v.s.b) |
35 | : "0" (v.s.a), "1" (v.s.b)); | 38 | : "0" (v.s.a), "1" (v.s.b)); |
36 | #else | 39 | #else |
37 | v.s.a = ___arch__swab32(v.s.a); | 40 | v.s.a = ___arch__swab32(v.s.a); |
38 | v.s.b = ___arch__swab32(v.s.b); | 41 | v.s.b = ___arch__swab32(v.s.b); |
39 | __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); | 42 | asm("xchgl %0,%1" |
43 | : "=r" (v.s.a), "=r" (v.s.b) | ||
44 | : "0" (v.s.a), "1" (v.s.b)); | ||
40 | #endif | 45 | #endif |
41 | return v.u; | 46 | return v.u; |
42 | } | 47 | } |
43 | 48 | ||
44 | #else /* __i386__ */ | 49 | #else /* __i386__ */ |
45 | 50 | ||
46 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) | 51 | static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) |
47 | { | 52 | { |
48 | __asm__("bswapq %0" : "=r" (x) : "0" (x)); | 53 | asm("bswapq %0" |
54 | : "=r" (x) | ||
55 | : "0" (x)); | ||
49 | return x; | 56 | return x; |
50 | } | 57 | } |
51 | 58 | ||
52 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | 59 | static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) |
53 | { | 60 | { |
54 | __asm__("bswapl %0" : "=r" (x) : "0" (x)); | 61 | asm("bswapl %0" |
62 | : "=r" (x) | ||
63 | : "0" (x)); | ||
55 | return x; | 64 | return x; |
56 | } | 65 | } |
57 | 66 | ||
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 5396c212d8c0..f4c0ab50d2c2 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h | |||
@@ -14,33 +14,85 @@ | |||
14 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 14 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
15 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 15 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
16 | #define flush_icache_range(start, end) do { } while (0) | 16 | #define flush_icache_range(start, end) do { } while (0) |
17 | #define flush_icache_page(vma,pg) do { } while (0) | 17 | #define flush_icache_page(vma, pg) do { } while (0) |
18 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | 18 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) |
19 | #define flush_cache_vmap(start, end) do { } while (0) | 19 | #define flush_cache_vmap(start, end) do { } while (0) |
20 | #define flush_cache_vunmap(start, end) do { } while (0) | 20 | #define flush_cache_vunmap(start, end) do { } while (0) |
21 | 21 | ||
22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
23 | memcpy(dst, src, len) | 23 | memcpy((dst), (src), (len)) |
24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
25 | memcpy(dst, src, len) | 25 | memcpy((dst), (src), (len)) |
26 | 26 | ||
27 | int __deprecated_for_modules change_page_attr(struct page *page, int numpages, | ||
28 | pgprot_t prot); | ||
29 | 27 | ||
30 | int set_pages_uc(struct page *page, int numpages); | 28 | /* |
31 | int set_pages_wb(struct page *page, int numpages); | 29 | * The set_memory_* API can be used to change various attributes of a virtual |
32 | int set_pages_x(struct page *page, int numpages); | 30 | * address range. The attributes include: |
33 | int set_pages_nx(struct page *page, int numpages); | 31 | * Cachability : UnCached, WriteCombining, WriteBack |
34 | int set_pages_ro(struct page *page, int numpages); | 32 | * Executability : eXeutable, NoteXecutable |
35 | int set_pages_rw(struct page *page, int numpages); | 33 | * Read/Write : ReadOnly, ReadWrite |
34 | * Presence : NotPresent | ||
35 | * | ||
36 | * Within a catagory, the attributes are mutually exclusive. | ||
37 | * | ||
38 | * The implementation of this API will take care of various aspects that | ||
39 | * are associated with changing such attributes, such as: | ||
40 | * - Flushing TLBs | ||
41 | * - Flushing CPU caches | ||
42 | * - Making sure aliases of the memory behind the mapping don't violate | ||
43 | * coherency rules as defined by the CPU in the system. | ||
44 | * | ||
45 | * What this API does not do: | ||
46 | * - Provide exclusion between various callers - including callers that | ||
47 | * operation on other mappings of the same physical page | ||
48 | * - Restore default attributes when a page is freed | ||
49 | * - Guarantee that mappings other than the requested one are | ||
50 | * in any state, other than that these do not violate rules for | ||
51 | * the CPU you have. Do not depend on any effects on other mappings, | ||
52 | * CPUs other than the one you have may have more relaxed rules. | ||
53 | * The caller is required to take care of these. | ||
54 | */ | ||
36 | 55 | ||
56 | int _set_memory_uc(unsigned long addr, int numpages); | ||
57 | int _set_memory_wc(unsigned long addr, int numpages); | ||
58 | int _set_memory_wb(unsigned long addr, int numpages); | ||
37 | int set_memory_uc(unsigned long addr, int numpages); | 59 | int set_memory_uc(unsigned long addr, int numpages); |
60 | int set_memory_wc(unsigned long addr, int numpages); | ||
38 | int set_memory_wb(unsigned long addr, int numpages); | 61 | int set_memory_wb(unsigned long addr, int numpages); |
39 | int set_memory_x(unsigned long addr, int numpages); | 62 | int set_memory_x(unsigned long addr, int numpages); |
40 | int set_memory_nx(unsigned long addr, int numpages); | 63 | int set_memory_nx(unsigned long addr, int numpages); |
41 | int set_memory_ro(unsigned long addr, int numpages); | 64 | int set_memory_ro(unsigned long addr, int numpages); |
42 | int set_memory_rw(unsigned long addr, int numpages); | 65 | int set_memory_rw(unsigned long addr, int numpages); |
43 | int set_memory_np(unsigned long addr, int numpages); | 66 | int set_memory_np(unsigned long addr, int numpages); |
67 | int set_memory_4k(unsigned long addr, int numpages); | ||
68 | |||
69 | /* | ||
70 | * For legacy compatibility with the old APIs, a few functions | ||
71 | * are provided that work on a "struct page". | ||
72 | * These functions operate ONLY on the 1:1 kernel mapping of the | ||
73 | * memory that the struct page represents, and internally just | ||
74 | * call the set_memory_* function. See the description of the | ||
75 | * set_memory_* function for more details on conventions. | ||
76 | * | ||
77 | * These APIs should be considered *deprecated* and are likely going to | ||
78 | * be removed in the future. | ||
79 | * The reason for this is the implicit operation on the 1:1 mapping only, | ||
80 | * making this not a generally useful API. | ||
81 | * | ||
82 | * Specifically, many users of the old APIs had a virtual address, | ||
83 | * called virt_to_page() or vmalloc_to_page() on that address to | ||
84 | * get a struct page* that the old API required. | ||
85 | * To convert these cases, use set_memory_*() on the original | ||
86 | * virtual address, do not use these functions. | ||
87 | */ | ||
88 | |||
89 | int set_pages_uc(struct page *page, int numpages); | ||
90 | int set_pages_wb(struct page *page, int numpages); | ||
91 | int set_pages_x(struct page *page, int numpages); | ||
92 | int set_pages_nx(struct page *page, int numpages); | ||
93 | int set_pages_ro(struct page *page, int numpages); | ||
94 | int set_pages_rw(struct page *page, int numpages); | ||
95 | |||
44 | 96 | ||
45 | void clflush_cache_range(void *addr, unsigned int size); | 97 | void clflush_cache_range(void *addr, unsigned int size); |
46 | 98 | ||
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h index 75194abbe8ee..52bbb0d8c4c1 100644 --- a/include/asm-x86/checksum_32.h +++ b/include/asm-x86/checksum_32.h | |||
@@ -28,7 +28,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, | 30 | asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, |
31 | int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); | 31 | int len, __wsum sum, |
32 | int *src_err_ptr, int *dst_err_ptr); | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Note: when you get a NULL pointer exception here this means someone | 35 | * Note: when you get a NULL pointer exception here this means someone |
@@ -37,20 +38,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, | |||
37 | * If you use these functions directly please don't forget the | 38 | * If you use these functions directly please don't forget the |
38 | * access_ok(). | 39 | * access_ok(). |
39 | */ | 40 | */ |
40 | static __inline__ | 41 | static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
41 | __wsum csum_partial_copy_nocheck (const void *src, void *dst, | 42 | int len, __wsum sum) |
42 | int len, __wsum sum) | ||
43 | { | 43 | { |
44 | return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); | 44 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); |
45 | } | 45 | } |
46 | 46 | ||
47 | static __inline__ | 47 | static inline __wsum csum_partial_copy_from_user(const void __user *src, |
48 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | 48 | void *dst, |
49 | int len, __wsum sum, int *err_ptr) | 49 | int len, __wsum sum, |
50 | int *err_ptr) | ||
50 | { | 51 | { |
51 | might_sleep(); | 52 | might_sleep(); |
52 | return csum_partial_copy_generic((__force void *)src, dst, | 53 | return csum_partial_copy_generic((__force void *)src, dst, |
53 | len, sum, err_ptr, NULL); | 54 | len, sum, err_ptr, NULL); |
54 | } | 55 | } |
55 | 56 | ||
56 | /* | 57 | /* |
@@ -64,30 +65,29 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
64 | { | 65 | { |
65 | unsigned int sum; | 66 | unsigned int sum; |
66 | 67 | ||
67 | __asm__ __volatile__( | 68 | asm volatile("movl (%1), %0 ;\n" |
68 | "movl (%1), %0 ;\n" | 69 | "subl $4, %2 ;\n" |
69 | "subl $4, %2 ;\n" | 70 | "jbe 2f ;\n" |
70 | "jbe 2f ;\n" | 71 | "addl 4(%1), %0 ;\n" |
71 | "addl 4(%1), %0 ;\n" | 72 | "adcl 8(%1), %0 ;\n" |
72 | "adcl 8(%1), %0 ;\n" | 73 | "adcl 12(%1), %0;\n" |
73 | "adcl 12(%1), %0 ;\n" | 74 | "1: adcl 16(%1), %0 ;\n" |
74 | "1: adcl 16(%1), %0 ;\n" | 75 | "lea 4(%1), %1 ;\n" |
75 | "lea 4(%1), %1 ;\n" | 76 | "decl %2 ;\n" |
76 | "decl %2 ;\n" | 77 | "jne 1b ;\n" |
77 | "jne 1b ;\n" | 78 | "adcl $0, %0 ;\n" |
78 | "adcl $0, %0 ;\n" | 79 | "movl %0, %2 ;\n" |
79 | "movl %0, %2 ;\n" | 80 | "shrl $16, %0 ;\n" |
80 | "shrl $16, %0 ;\n" | 81 | "addw %w2, %w0 ;\n" |
81 | "addw %w2, %w0 ;\n" | 82 | "adcl $0, %0 ;\n" |
82 | "adcl $0, %0 ;\n" | 83 | "notl %0 ;\n" |
83 | "notl %0 ;\n" | 84 | "2: ;\n" |
84 | "2: ;\n" | ||
85 | /* Since the input registers which are loaded with iph and ihl | 85 | /* Since the input registers which are loaded with iph and ihl |
86 | are modified, we must also specify them as outputs, or gcc | 86 | are modified, we must also specify them as outputs, or gcc |
87 | will assume they contain their original values. */ | 87 | will assume they contain their original values. */ |
88 | : "=r" (sum), "=r" (iph), "=r" (ihl) | 88 | : "=r" (sum), "=r" (iph), "=r" (ihl) |
89 | : "1" (iph), "2" (ihl) | 89 | : "1" (iph), "2" (ihl) |
90 | : "memory"); | 90 | : "memory"); |
91 | return (__force __sum16)sum; | 91 | return (__force __sum16)sum; |
92 | } | 92 | } |
93 | 93 | ||
@@ -97,29 +97,27 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
97 | 97 | ||
98 | static inline __sum16 csum_fold(__wsum sum) | 98 | static inline __sum16 csum_fold(__wsum sum) |
99 | { | 99 | { |
100 | __asm__( | 100 | asm("addl %1, %0 ;\n" |
101 | "addl %1, %0 ;\n" | 101 | "adcl $0xffff, %0 ;\n" |
102 | "adcl $0xffff, %0 ;\n" | 102 | : "=r" (sum) |
103 | : "=r" (sum) | 103 | : "r" ((__force u32)sum << 16), |
104 | : "r" ((__force u32)sum << 16), | 104 | "0" ((__force u32)sum & 0xffff0000)); |
105 | "0" ((__force u32)sum & 0xffff0000) | ||
106 | ); | ||
107 | return (__force __sum16)(~(__force u32)sum >> 16); | 105 | return (__force __sum16)(~(__force u32)sum >> 16); |
108 | } | 106 | } |
109 | 107 | ||
110 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | 108 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
111 | unsigned short len, | 109 | unsigned short len, |
112 | unsigned short proto, | 110 | unsigned short proto, |
113 | __wsum sum) | 111 | __wsum sum) |
114 | { | 112 | { |
115 | __asm__( | 113 | asm("addl %1, %0 ;\n" |
116 | "addl %1, %0 ;\n" | 114 | "adcl %2, %0 ;\n" |
117 | "adcl %2, %0 ;\n" | 115 | "adcl %3, %0 ;\n" |
118 | "adcl %3, %0 ;\n" | 116 | "adcl $0, %0 ;\n" |
119 | "adcl $0, %0 ;\n" | 117 | : "=r" (sum) |
120 | : "=r" (sum) | 118 | : "g" (daddr), "g"(saddr), |
121 | : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum)); | 119 | "g" ((len + proto) << 8), "0" (sum)); |
122 | return sum; | 120 | return sum; |
123 | } | 121 | } |
124 | 122 | ||
125 | /* | 123 | /* |
@@ -127,11 +125,11 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | |||
127 | * returns a 16-bit checksum, already complemented | 125 | * returns a 16-bit checksum, already complemented |
128 | */ | 126 | */ |
129 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | 127 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
130 | unsigned short len, | 128 | unsigned short len, |
131 | unsigned short proto, | 129 | unsigned short proto, |
132 | __wsum sum) | 130 | __wsum sum) |
133 | { | 131 | { |
134 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 132 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); |
135 | } | 133 | } |
136 | 134 | ||
137 | /* | 135 | /* |
@@ -141,30 +139,29 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | |||
141 | 139 | ||
142 | static inline __sum16 ip_compute_csum(const void *buff, int len) | 140 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
143 | { | 141 | { |
144 | return csum_fold (csum_partial(buff, len, 0)); | 142 | return csum_fold(csum_partial(buff, len, 0)); |
145 | } | 143 | } |
146 | 144 | ||
147 | #define _HAVE_ARCH_IPV6_CSUM | 145 | #define _HAVE_ARCH_IPV6_CSUM |
148 | static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | 146 | static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
149 | const struct in6_addr *daddr, | 147 | const struct in6_addr *daddr, |
150 | __u32 len, unsigned short proto, | 148 | __u32 len, unsigned short proto, |
151 | __wsum sum) | 149 | __wsum sum) |
152 | { | 150 | { |
153 | __asm__( | 151 | asm("addl 0(%1), %0 ;\n" |
154 | "addl 0(%1), %0 ;\n" | 152 | "adcl 4(%1), %0 ;\n" |
155 | "adcl 4(%1), %0 ;\n" | 153 | "adcl 8(%1), %0 ;\n" |
156 | "adcl 8(%1), %0 ;\n" | 154 | "adcl 12(%1), %0 ;\n" |
157 | "adcl 12(%1), %0 ;\n" | 155 | "adcl 0(%2), %0 ;\n" |
158 | "adcl 0(%2), %0 ;\n" | 156 | "adcl 4(%2), %0 ;\n" |
159 | "adcl 4(%2), %0 ;\n" | 157 | "adcl 8(%2), %0 ;\n" |
160 | "adcl 8(%2), %0 ;\n" | 158 | "adcl 12(%2), %0 ;\n" |
161 | "adcl 12(%2), %0 ;\n" | 159 | "adcl %3, %0 ;\n" |
162 | "adcl %3, %0 ;\n" | 160 | "adcl %4, %0 ;\n" |
163 | "adcl %4, %0 ;\n" | 161 | "adcl $0, %0 ;\n" |
164 | "adcl $0, %0 ;\n" | 162 | : "=&r" (sum) |
165 | : "=&r" (sum) | 163 | : "r" (saddr), "r" (daddr), |
166 | : "r" (saddr), "r" (daddr), | 164 | "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); |
167 | "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); | ||
168 | 165 | ||
169 | return csum_fold(sum); | 166 | return csum_fold(sum); |
170 | } | 167 | } |
@@ -173,14 +170,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
173 | * Copy and checksum to user | 170 | * Copy and checksum to user |
174 | */ | 171 | */ |
175 | #define HAVE_CSUM_COPY_USER | 172 | #define HAVE_CSUM_COPY_USER |
176 | static __inline__ __wsum csum_and_copy_to_user(const void *src, | 173 | static inline __wsum csum_and_copy_to_user(const void *src, |
177 | void __user *dst, | 174 | void __user *dst, |
178 | int len, __wsum sum, | 175 | int len, __wsum sum, |
179 | int *err_ptr) | 176 | int *err_ptr) |
180 | { | 177 | { |
181 | might_sleep(); | 178 | might_sleep(); |
182 | if (access_ok(VERIFY_WRITE, dst, len)) | 179 | if (access_ok(VERIFY_WRITE, dst, len)) |
183 | return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); | 180 | return csum_partial_copy_generic(src, (__force void *)dst, |
181 | len, sum, NULL, err_ptr); | ||
184 | 182 | ||
185 | if (len) | 183 | if (len) |
186 | *err_ptr = -EFAULT; | 184 | *err_ptr = -EFAULT; |
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h index e5f79997decc..8bd861cc5267 100644 --- a/include/asm-x86/checksum_64.h +++ b/include/asm-x86/checksum_64.h | |||
@@ -1,33 +1,31 @@ | |||
1 | #ifndef _X86_64_CHECKSUM_H | 1 | #ifndef _X86_64_CHECKSUM_H |
2 | #define _X86_64_CHECKSUM_H | 2 | #define _X86_64_CHECKSUM_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Checksums for x86-64 | 5 | * Checksums for x86-64 |
6 | * Copyright 2002 by Andi Kleen, SuSE Labs | 6 | * Copyright 2002 by Andi Kleen, SuSE Labs |
7 | * with some code from asm-x86/checksum.h | 7 | * with some code from asm-x86/checksum.h |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
12 | #include <asm/byteorder.h> | 12 | #include <asm/byteorder.h> |
13 | 13 | ||
14 | /** | 14 | /** |
15 | * csum_fold - Fold and invert a 32bit checksum. | 15 | * csum_fold - Fold and invert a 32bit checksum. |
16 | * sum: 32bit unfolded sum | 16 | * sum: 32bit unfolded sum |
17 | * | 17 | * |
18 | * Fold a 32bit running checksum to 16bit and invert it. This is usually | 18 | * Fold a 32bit running checksum to 16bit and invert it. This is usually |
19 | * the last step before putting a checksum into a packet. | 19 | * the last step before putting a checksum into a packet. |
20 | * Make sure not to mix with 64bit checksums. | 20 | * Make sure not to mix with 64bit checksums. |
21 | */ | 21 | */ |
22 | static inline __sum16 csum_fold(__wsum sum) | 22 | static inline __sum16 csum_fold(__wsum sum) |
23 | { | 23 | { |
24 | __asm__( | 24 | asm(" addl %1,%0\n" |
25 | " addl %1,%0\n" | 25 | " adcl $0xffff,%0" |
26 | " adcl $0xffff,%0" | 26 | : "=r" (sum) |
27 | : "=r" (sum) | 27 | : "r" ((__force u32)sum << 16), |
28 | : "r" ((__force u32)sum << 16), | 28 | "0" ((__force u32)sum & 0xffff0000)); |
29 | "0" ((__force u32)sum & 0xffff0000) | ||
30 | ); | ||
31 | return (__force __sum16)(~(__force u32)sum >> 16); | 29 | return (__force __sum16)(~(__force u32)sum >> 16); |
32 | } | 30 | } |
33 | 31 | ||
@@ -43,46 +41,46 @@ static inline __sum16 csum_fold(__wsum sum) | |||
43 | * ip_fast_csum - Compute the IPv4 header checksum efficiently. | 41 | * ip_fast_csum - Compute the IPv4 header checksum efficiently. |
44 | * iph: ipv4 header | 42 | * iph: ipv4 header |
45 | * ihl: length of header / 4 | 43 | * ihl: length of header / 4 |
46 | */ | 44 | */ |
47 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | 45 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
48 | { | 46 | { |
49 | unsigned int sum; | 47 | unsigned int sum; |
50 | 48 | ||
51 | asm( " movl (%1), %0\n" | 49 | asm(" movl (%1), %0\n" |
52 | " subl $4, %2\n" | 50 | " subl $4, %2\n" |
53 | " jbe 2f\n" | 51 | " jbe 2f\n" |
54 | " addl 4(%1), %0\n" | 52 | " addl 4(%1), %0\n" |
55 | " adcl 8(%1), %0\n" | 53 | " adcl 8(%1), %0\n" |
56 | " adcl 12(%1), %0\n" | 54 | " adcl 12(%1), %0\n" |
57 | "1: adcl 16(%1), %0\n" | 55 | "1: adcl 16(%1), %0\n" |
58 | " lea 4(%1), %1\n" | 56 | " lea 4(%1), %1\n" |
59 | " decl %2\n" | 57 | " decl %2\n" |
60 | " jne 1b\n" | 58 | " jne 1b\n" |
61 | " adcl $0, %0\n" | 59 | " adcl $0, %0\n" |
62 | " movl %0, %2\n" | 60 | " movl %0, %2\n" |
63 | " shrl $16, %0\n" | 61 | " shrl $16, %0\n" |
64 | " addw %w2, %w0\n" | 62 | " addw %w2, %w0\n" |
65 | " adcl $0, %0\n" | 63 | " adcl $0, %0\n" |
66 | " notl %0\n" | 64 | " notl %0\n" |
67 | "2:" | 65 | "2:" |
68 | /* Since the input registers which are loaded with iph and ihl | 66 | /* Since the input registers which are loaded with iph and ihl |
69 | are modified, we must also specify them as outputs, or gcc | 67 | are modified, we must also specify them as outputs, or gcc |
70 | will assume they contain their original values. */ | 68 | will assume they contain their original values. */ |
71 | : "=r" (sum), "=r" (iph), "=r" (ihl) | 69 | : "=r" (sum), "=r" (iph), "=r" (ihl) |
72 | : "1" (iph), "2" (ihl) | 70 | : "1" (iph), "2" (ihl) |
73 | : "memory"); | 71 | : "memory"); |
74 | return (__force __sum16)sum; | 72 | return (__force __sum16)sum; |
75 | } | 73 | } |
76 | 74 | ||
77 | /** | 75 | /** |
78 | * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. | 76 | * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. |
79 | * @saddr: source address | 77 | * @saddr: source address |
80 | * @daddr: destination address | 78 | * @daddr: destination address |
81 | * @len: length of packet | 79 | * @len: length of packet |
82 | * @proto: ip protocol of packet | 80 | * @proto: ip protocol of packet |
83 | * @sum: initial sum to be added in (32bit unfolded) | 81 | * @sum: initial sum to be added in (32bit unfolded) |
84 | * | 82 | * |
85 | * Returns the pseudo header checksum the input data. Result is | 83 | * Returns the pseudo header checksum the input data. Result is |
86 | * 32bit unfolded. | 84 | * 32bit unfolded. |
87 | */ | 85 | */ |
88 | static inline __wsum | 86 | static inline __wsum |
@@ -93,32 +91,32 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, | |||
93 | " adcl %2, %0\n" | 91 | " adcl %2, %0\n" |
94 | " adcl %3, %0\n" | 92 | " adcl %3, %0\n" |
95 | " adcl $0, %0\n" | 93 | " adcl $0, %0\n" |
96 | : "=r" (sum) | 94 | : "=r" (sum) |
97 | : "g" (daddr), "g" (saddr), | 95 | : "g" (daddr), "g" (saddr), |
98 | "g" ((len + proto)<<8), "0" (sum)); | 96 | "g" ((len + proto)<<8), "0" (sum)); |
99 | return sum; | 97 | return sum; |
100 | } | 98 | } |
101 | 99 | ||
102 | 100 | ||
103 | /** | 101 | /** |
104 | * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. | 102 | * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. |
105 | * @saddr: source address | 103 | * @saddr: source address |
106 | * @daddr: destination address | 104 | * @daddr: destination address |
107 | * @len: length of packet | 105 | * @len: length of packet |
108 | * @proto: ip protocol of packet | 106 | * @proto: ip protocol of packet |
109 | * @sum: initial sum to be added in (32bit unfolded) | 107 | * @sum: initial sum to be added in (32bit unfolded) |
110 | * | 108 | * |
111 | * Returns the 16bit pseudo header checksum the input data already | 109 | * Returns the 16bit pseudo header checksum the input data already |
112 | * complemented and ready to be filled in. | 110 | * complemented and ready to be filled in. |
113 | */ | 111 | */ |
114 | static inline __sum16 | 112 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
115 | csum_tcpudp_magic(__be32 saddr, __be32 daddr, | 113 | unsigned short len, |
116 | unsigned short len, unsigned short proto, __wsum sum) | 114 | unsigned short proto, __wsum sum) |
117 | { | 115 | { |
118 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 116 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); |
119 | } | 117 | } |
120 | 118 | ||
121 | /** | 119 | /** |
122 | * csum_partial - Compute an internet checksum. | 120 | * csum_partial - Compute an internet checksum. |
123 | * @buff: buffer to be checksummed | 121 | * @buff: buffer to be checksummed |
124 | * @len: length of buffer. | 122 | * @len: length of buffer. |
@@ -127,7 +125,7 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, | |||
127 | * Returns the 32bit unfolded internet checksum of the buffer. | 125 | * Returns the 32bit unfolded internet checksum of the buffer. |
128 | * Before filling it in it needs to be csum_fold()'ed. | 126 | * Before filling it in it needs to be csum_fold()'ed. |
129 | * buff should be aligned to a 64bit boundary if possible. | 127 | * buff should be aligned to a 64bit boundary if possible. |
130 | */ | 128 | */ |
131 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); | 129 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); |
132 | 130 | ||
133 | #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 | 131 | #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 |
@@ -136,23 +134,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); | |||
136 | 134 | ||
137 | /* Do not call this directly. Use the wrappers below */ | 135 | /* Do not call this directly. Use the wrappers below */ |
138 | extern __wsum csum_partial_copy_generic(const void *src, const void *dst, | 136 | extern __wsum csum_partial_copy_generic(const void *src, const void *dst, |
139 | int len, | 137 | int len, __wsum sum, |
140 | __wsum sum, | 138 | int *src_err_ptr, int *dst_err_ptr); |
141 | int *src_err_ptr, int *dst_err_ptr); | ||
142 | 139 | ||
143 | 140 | ||
144 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | 141 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
145 | int len, __wsum isum, int *errp); | 142 | int len, __wsum isum, int *errp); |
146 | extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, | 143 | extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, |
147 | int len, __wsum isum, int *errp); | 144 | int len, __wsum isum, int *errp); |
148 | extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, | 145 | extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
149 | __wsum sum); | 146 | int len, __wsum sum); |
150 | 147 | ||
151 | /* Old names. To be removed. */ | 148 | /* Old names. To be removed. */ |
152 | #define csum_and_copy_to_user csum_partial_copy_to_user | 149 | #define csum_and_copy_to_user csum_partial_copy_to_user |
153 | #define csum_and_copy_from_user csum_partial_copy_from_user | 150 | #define csum_and_copy_from_user csum_partial_copy_from_user |
154 | 151 | ||
155 | /** | 152 | /** |
156 | * ip_compute_csum - Compute an 16bit IP checksum. | 153 | * ip_compute_csum - Compute an 16bit IP checksum. |
157 | * @buff: buffer address. | 154 | * @buff: buffer address. |
158 | * @len: length of buffer. | 155 | * @len: length of buffer. |
@@ -170,7 +167,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len); | |||
170 | * @proto: protocol of packet | 167 | * @proto: protocol of packet |
171 | * @sum: initial sum (32bit unfolded) to be added in | 168 | * @sum: initial sum (32bit unfolded) to be added in |
172 | * | 169 | * |
173 | * Computes an IPv6 pseudo header checksum. This sum is added the checksum | 170 | * Computes an IPv6 pseudo header checksum. This sum is added the checksum |
174 | * into UDP/TCP packets and contains some link layer information. | 171 | * into UDP/TCP packets and contains some link layer information. |
175 | * Returns the unfolded 32bit checksum. | 172 | * Returns the unfolded 32bit checksum. |
176 | */ | 173 | */ |
@@ -185,11 +182,10 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, | |||
185 | static inline unsigned add32_with_carry(unsigned a, unsigned b) | 182 | static inline unsigned add32_with_carry(unsigned a, unsigned b) |
186 | { | 183 | { |
187 | asm("addl %2,%0\n\t" | 184 | asm("addl %2,%0\n\t" |
188 | "adcl $0,%0" | 185 | "adcl $0,%0" |
189 | : "=r" (a) | 186 | : "=r" (a) |
190 | : "0" (a), "r" (b)); | 187 | : "0" (a), "r" (b)); |
191 | return a; | 188 | return a; |
192 | } | 189 | } |
193 | 190 | ||
194 | #endif | 191 | #endif |
195 | |||
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index 959fad00dff5..bf5a69d1329e 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h | |||
@@ -8,9 +8,12 @@ | |||
8 | * you need to test for the feature in boot_cpu_data. | 8 | * you need to test for the feature in boot_cpu_data. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | 11 | #define xchg(ptr, v) \ |
12 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | ||
12 | 13 | ||
13 | struct __xchg_dummy { unsigned long a[100]; }; | 14 | struct __xchg_dummy { |
15 | unsigned long a[100]; | ||
16 | }; | ||
14 | #define __xg(x) ((struct __xchg_dummy *)(x)) | 17 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
15 | 18 | ||
16 | /* | 19 | /* |
@@ -27,72 +30,74 @@ struct __xchg_dummy { unsigned long a[100]; }; | |||
27 | * of the instruction set reference 24319102.pdf. We need | 30 | * of the instruction set reference 24319102.pdf. We need |
28 | * the reader side to see the coherent 64bit value. | 31 | * the reader side to see the coherent 64bit value. |
29 | */ | 32 | */ |
30 | static inline void __set_64bit (unsigned long long * ptr, | 33 | static inline void __set_64bit(unsigned long long *ptr, |
31 | unsigned int low, unsigned int high) | 34 | unsigned int low, unsigned int high) |
32 | { | 35 | { |
33 | __asm__ __volatile__ ( | 36 | asm volatile("\n1:\t" |
34 | "\n1:\t" | 37 | "movl (%0), %%eax\n\t" |
35 | "movl (%0), %%eax\n\t" | 38 | "movl 4(%0), %%edx\n\t" |
36 | "movl 4(%0), %%edx\n\t" | 39 | LOCK_PREFIX "cmpxchg8b (%0)\n\t" |
37 | LOCK_PREFIX "cmpxchg8b (%0)\n\t" | 40 | "jnz 1b" |
38 | "jnz 1b" | 41 | : /* no outputs */ |
39 | : /* no outputs */ | 42 | : "D"(ptr), |
40 | : "D"(ptr), | 43 | "b"(low), |
41 | "b"(low), | 44 | "c"(high) |
42 | "c"(high) | 45 | : "ax", "dx", "memory"); |
43 | : "ax","dx","memory"); | ||
44 | } | 46 | } |
45 | 47 | ||
46 | static inline void __set_64bit_constant (unsigned long long *ptr, | 48 | static inline void __set_64bit_constant(unsigned long long *ptr, |
47 | unsigned long long value) | 49 | unsigned long long value) |
48 | { | 50 | { |
49 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | 51 | __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); |
50 | } | 52 | } |
51 | #define ll_low(x) *(((unsigned int*)&(x))+0) | ||
52 | #define ll_high(x) *(((unsigned int*)&(x))+1) | ||
53 | 53 | ||
54 | static inline void __set_64bit_var (unsigned long long *ptr, | 54 | #define ll_low(x) *(((unsigned int *)&(x)) + 0) |
55 | unsigned long long value) | 55 | #define ll_high(x) *(((unsigned int *)&(x)) + 1) |
56 | |||
57 | static inline void __set_64bit_var(unsigned long long *ptr, | ||
58 | unsigned long long value) | ||
56 | { | 59 | { |
57 | __set_64bit(ptr,ll_low(value), ll_high(value)); | 60 | __set_64bit(ptr, ll_low(value), ll_high(value)); |
58 | } | 61 | } |
59 | 62 | ||
60 | #define set_64bit(ptr,value) \ | 63 | #define set_64bit(ptr, value) \ |
61 | (__builtin_constant_p(value) ? \ | 64 | (__builtin_constant_p((value)) \ |
62 | __set_64bit_constant(ptr, value) : \ | 65 | ? __set_64bit_constant((ptr), (value)) \ |
63 | __set_64bit_var(ptr, value) ) | 66 | : __set_64bit_var((ptr), (value))) |
64 | 67 | ||
65 | #define _set_64bit(ptr,value) \ | 68 | #define _set_64bit(ptr, value) \ |
66 | (__builtin_constant_p(value) ? \ | 69 | (__builtin_constant_p(value) \ |
67 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | 70 | ? __set_64bit(ptr, (unsigned int)(value), \ |
68 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | 71 | (unsigned int)((value) >> 32)) \ |
72 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) | ||
69 | 73 | ||
70 | /* | 74 | /* |
71 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | 75 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
72 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 76 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
73 | * but generally the primitive is invalid, *ptr is output argument. --ANK | 77 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
74 | */ | 78 | */ |
75 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 79 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
80 | int size) | ||
76 | { | 81 | { |
77 | switch (size) { | 82 | switch (size) { |
78 | case 1: | 83 | case 1: |
79 | __asm__ __volatile__("xchgb %b0,%1" | 84 | asm volatile("xchgb %b0,%1" |
80 | :"=q" (x) | 85 | : "=q" (x) |
81 | :"m" (*__xg(ptr)), "0" (x) | 86 | : "m" (*__xg(ptr)), "0" (x) |
82 | :"memory"); | 87 | : "memory"); |
83 | break; | 88 | break; |
84 | case 2: | 89 | case 2: |
85 | __asm__ __volatile__("xchgw %w0,%1" | 90 | asm volatile("xchgw %w0,%1" |
86 | :"=r" (x) | 91 | : "=r" (x) |
87 | :"m" (*__xg(ptr)), "0" (x) | 92 | : "m" (*__xg(ptr)), "0" (x) |
88 | :"memory"); | 93 | : "memory"); |
89 | break; | 94 | break; |
90 | case 4: | 95 | case 4: |
91 | __asm__ __volatile__("xchgl %0,%1" | 96 | asm volatile("xchgl %0,%1" |
92 | :"=r" (x) | 97 | : "=r" (x) |
93 | :"m" (*__xg(ptr)), "0" (x) | 98 | : "m" (*__xg(ptr)), "0" (x) |
94 | :"memory"); | 99 | : "memory"); |
95 | break; | 100 | break; |
96 | } | 101 | } |
97 | return x; | 102 | return x; |
98 | } | 103 | } |
@@ -105,24 +110,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
105 | 110 | ||
106 | #ifdef CONFIG_X86_CMPXCHG | 111 | #ifdef CONFIG_X86_CMPXCHG |
107 | #define __HAVE_ARCH_CMPXCHG 1 | 112 | #define __HAVE_ARCH_CMPXCHG 1 |
108 | #define cmpxchg(ptr, o, n) \ | 113 | #define cmpxchg(ptr, o, n) \ |
109 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 114 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
110 | (unsigned long)(n), sizeof(*(ptr)))) | 115 | (unsigned long)(n), \ |
111 | #define sync_cmpxchg(ptr, o, n) \ | 116 | sizeof(*(ptr)))) |
112 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | 117 | #define sync_cmpxchg(ptr, o, n) \ |
113 | (unsigned long)(n), sizeof(*(ptr)))) | 118 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ |
114 | #define cmpxchg_local(ptr, o, n) \ | 119 | (unsigned long)(n), \ |
115 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | 120 | sizeof(*(ptr)))) |
116 | (unsigned long)(n), sizeof(*(ptr)))) | 121 | #define cmpxchg_local(ptr, o, n) \ |
122 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
123 | (unsigned long)(n), \ | ||
124 | sizeof(*(ptr)))) | ||
117 | #endif | 125 | #endif |
118 | 126 | ||
119 | #ifdef CONFIG_X86_CMPXCHG64 | 127 | #ifdef CONFIG_X86_CMPXCHG64 |
120 | #define cmpxchg64(ptr, o, n) \ | 128 | #define cmpxchg64(ptr, o, n) \ |
121 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ | 129 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
122 | (unsigned long long)(n))) | 130 | (unsigned long long)(n))) |
123 | #define cmpxchg64_local(ptr, o, n) \ | 131 | #define cmpxchg64_local(ptr, o, n) \ |
124 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ | 132 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
125 | (unsigned long long)(n))) | 133 | (unsigned long long)(n))) |
126 | #endif | 134 | #endif |
127 | 135 | ||
128 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 136 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
@@ -131,22 +139,22 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
131 | unsigned long prev; | 139 | unsigned long prev; |
132 | switch (size) { | 140 | switch (size) { |
133 | case 1: | 141 | case 1: |
134 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | 142 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" |
135 | : "=a"(prev) | 143 | : "=a"(prev) |
136 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 144 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
137 | : "memory"); | 145 | : "memory"); |
138 | return prev; | 146 | return prev; |
139 | case 2: | 147 | case 2: |
140 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | 148 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" |
141 | : "=a"(prev) | 149 | : "=a"(prev) |
142 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
143 | : "memory"); | 151 | : "memory"); |
144 | return prev; | 152 | return prev; |
145 | case 4: | 153 | case 4: |
146 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | 154 | asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" |
147 | : "=a"(prev) | 155 | : "=a"(prev) |
148 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
149 | : "memory"); | 157 | : "memory"); |
150 | return prev; | 158 | return prev; |
151 | } | 159 | } |
152 | return old; | 160 | return old; |
@@ -158,85 +166,88 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
158 | * isn't. | 166 | * isn't. |
159 | */ | 167 | */ |
160 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | 168 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, |
161 | unsigned long old, | 169 | unsigned long old, |
162 | unsigned long new, int size) | 170 | unsigned long new, int size) |
163 | { | 171 | { |
164 | unsigned long prev; | 172 | unsigned long prev; |
165 | switch (size) { | 173 | switch (size) { |
166 | case 1: | 174 | case 1: |
167 | __asm__ __volatile__("lock; cmpxchgb %b1,%2" | 175 | asm volatile("lock; cmpxchgb %b1,%2" |
168 | : "=a"(prev) | 176 | : "=a"(prev) |
169 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 177 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
170 | : "memory"); | 178 | : "memory"); |
171 | return prev; | 179 | return prev; |
172 | case 2: | 180 | case 2: |
173 | __asm__ __volatile__("lock; cmpxchgw %w1,%2" | 181 | asm volatile("lock; cmpxchgw %w1,%2" |
174 | : "=a"(prev) | 182 | : "=a"(prev) |
175 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 183 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
176 | : "memory"); | 184 | : "memory"); |
177 | return prev; | 185 | return prev; |
178 | case 4: | 186 | case 4: |
179 | __asm__ __volatile__("lock; cmpxchgl %1,%2" | 187 | asm volatile("lock; cmpxchgl %1,%2" |
180 | : "=a"(prev) | 188 | : "=a"(prev) |
181 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 189 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
182 | : "memory"); | 190 | : "memory"); |
183 | return prev; | 191 | return prev; |
184 | } | 192 | } |
185 | return old; | 193 | return old; |
186 | } | 194 | } |
187 | 195 | ||
188 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | 196 | static inline unsigned long __cmpxchg_local(volatile void *ptr, |
189 | unsigned long old, unsigned long new, int size) | 197 | unsigned long old, |
198 | unsigned long new, int size) | ||
190 | { | 199 | { |
191 | unsigned long prev; | 200 | unsigned long prev; |
192 | switch (size) { | 201 | switch (size) { |
193 | case 1: | 202 | case 1: |
194 | __asm__ __volatile__("cmpxchgb %b1,%2" | 203 | asm volatile("cmpxchgb %b1,%2" |
195 | : "=a"(prev) | 204 | : "=a"(prev) |
196 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 205 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
197 | : "memory"); | 206 | : "memory"); |
198 | return prev; | 207 | return prev; |
199 | case 2: | 208 | case 2: |
200 | __asm__ __volatile__("cmpxchgw %w1,%2" | 209 | asm volatile("cmpxchgw %w1,%2" |
201 | : "=a"(prev) | 210 | : "=a"(prev) |
202 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 211 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
203 | : "memory"); | 212 | : "memory"); |
204 | return prev; | 213 | return prev; |
205 | case 4: | 214 | case 4: |
206 | __asm__ __volatile__("cmpxchgl %1,%2" | 215 | asm volatile("cmpxchgl %1,%2" |
207 | : "=a"(prev) | 216 | : "=a"(prev) |
208 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 217 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
209 | : "memory"); | 218 | : "memory"); |
210 | return prev; | 219 | return prev; |
211 | } | 220 | } |
212 | return old; | 221 | return old; |
213 | } | 222 | } |
214 | 223 | ||
215 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | 224 | static inline unsigned long long __cmpxchg64(volatile void *ptr, |
216 | unsigned long long old, unsigned long long new) | 225 | unsigned long long old, |
226 | unsigned long long new) | ||
217 | { | 227 | { |
218 | unsigned long long prev; | 228 | unsigned long long prev; |
219 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | 229 | asm volatile(LOCK_PREFIX "cmpxchg8b %3" |
220 | : "=A"(prev) | 230 | : "=A"(prev) |
221 | : "b"((unsigned long)new), | 231 | : "b"((unsigned long)new), |
222 | "c"((unsigned long)(new >> 32)), | 232 | "c"((unsigned long)(new >> 32)), |
223 | "m"(*__xg(ptr)), | 233 | "m"(*__xg(ptr)), |
224 | "0"(old) | 234 | "0"(old) |
225 | : "memory"); | 235 | : "memory"); |
226 | return prev; | 236 | return prev; |
227 | } | 237 | } |
228 | 238 | ||
229 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | 239 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, |
230 | unsigned long long old, unsigned long long new) | 240 | unsigned long long old, |
241 | unsigned long long new) | ||
231 | { | 242 | { |
232 | unsigned long long prev; | 243 | unsigned long long prev; |
233 | __asm__ __volatile__("cmpxchg8b %3" | 244 | asm volatile("cmpxchg8b %3" |
234 | : "=A"(prev) | 245 | : "=A"(prev) |
235 | : "b"((unsigned long)new), | 246 | : "b"((unsigned long)new), |
236 | "c"((unsigned long)(new >> 32)), | 247 | "c"((unsigned long)(new >> 32)), |
237 | "m"(*__xg(ptr)), | 248 | "m"(*__xg(ptr)), |
238 | "0"(old) | 249 | "0"(old) |
239 | : "memory"); | 250 | : "memory"); |
240 | return prev; | 251 | return prev; |
241 | } | 252 | } |
242 | 253 | ||
@@ -252,7 +263,7 @@ extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); | |||
252 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); | 263 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); |
253 | 264 | ||
254 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | 265 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, |
255 | unsigned long new, int size) | 266 | unsigned long new, int size) |
256 | { | 267 | { |
257 | switch (size) { | 268 | switch (size) { |
258 | case 1: | 269 | case 1: |
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h index 56f5b41e071c..d9b26b9a28cf 100644 --- a/include/asm-x86/cmpxchg_64.h +++ b/include/asm-x86/cmpxchg_64.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
5 | 5 | ||
6 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | 6 | #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ |
7 | (ptr), sizeof(*(ptr)))) | ||
7 | 8 | ||
8 | #define __xg(x) ((volatile long *)(x)) | 9 | #define __xg(x) ((volatile long *)(x)) |
9 | 10 | ||
@@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | |||
19 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
20 | * but generally the primitive is invalid, *ptr is output argument. --ANK | 21 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
21 | */ | 22 | */ |
22 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 23 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
24 | int size) | ||
23 | { | 25 | { |
24 | switch (size) { | 26 | switch (size) { |
25 | case 1: | 27 | case 1: |
26 | __asm__ __volatile__("xchgb %b0,%1" | 28 | asm volatile("xchgb %b0,%1" |
27 | :"=q" (x) | 29 | : "=q" (x) |
28 | :"m" (*__xg(ptr)), "0" (x) | 30 | : "m" (*__xg(ptr)), "0" (x) |
29 | :"memory"); | 31 | : "memory"); |
30 | break; | 32 | break; |
31 | case 2: | 33 | case 2: |
32 | __asm__ __volatile__("xchgw %w0,%1" | 34 | asm volatile("xchgw %w0,%1" |
33 | :"=r" (x) | 35 | : "=r" (x) |
34 | :"m" (*__xg(ptr)), "0" (x) | 36 | : "m" (*__xg(ptr)), "0" (x) |
35 | :"memory"); | 37 | : "memory"); |
36 | break; | 38 | break; |
37 | case 4: | 39 | case 4: |
38 | __asm__ __volatile__("xchgl %k0,%1" | 40 | asm volatile("xchgl %k0,%1" |
39 | :"=r" (x) | 41 | : "=r" (x) |
40 | :"m" (*__xg(ptr)), "0" (x) | 42 | : "m" (*__xg(ptr)), "0" (x) |
41 | :"memory"); | 43 | : "memory"); |
42 | break; | 44 | break; |
43 | case 8: | 45 | case 8: |
44 | __asm__ __volatile__("xchgq %0,%1" | 46 | asm volatile("xchgq %0,%1" |
45 | :"=r" (x) | 47 | : "=r" (x) |
46 | :"m" (*__xg(ptr)), "0" (x) | 48 | : "m" (*__xg(ptr)), "0" (x) |
47 | :"memory"); | 49 | : "memory"); |
48 | break; | 50 | break; |
49 | } | 51 | } |
50 | return x; | 52 | return x; |
51 | } | 53 | } |
@@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
64 | unsigned long prev; | 66 | unsigned long prev; |
65 | switch (size) { | 67 | switch (size) { |
66 | case 1: | 68 | case 1: |
67 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | 69 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" |
68 | : "=a"(prev) | 70 | : "=a"(prev) |
69 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 71 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
70 | : "memory"); | 72 | : "memory"); |
71 | return prev; | 73 | return prev; |
72 | case 2: | 74 | case 2: |
73 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | 75 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" |
74 | : "=a"(prev) | 76 | : "=a"(prev) |
75 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 77 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
76 | : "memory"); | 78 | : "memory"); |
77 | return prev; | 79 | return prev; |
78 | case 4: | 80 | case 4: |
79 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" | 81 | asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" |
80 | : "=a"(prev) | 82 | : "=a"(prev) |
81 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 83 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
82 | : "memory"); | 84 | : "memory"); |
83 | return prev; | 85 | return prev; |
84 | case 8: | 86 | case 8: |
85 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" | 87 | asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" |
86 | : "=a"(prev) | 88 | : "=a"(prev) |
87 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 89 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
88 | : "memory"); | 90 | : "memory"); |
89 | return prev; | 91 | return prev; |
90 | } | 92 | } |
91 | return old; | 93 | return old; |
92 | } | 94 | } |
93 | 95 | ||
94 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | 96 | static inline unsigned long __cmpxchg_local(volatile void *ptr, |
95 | unsigned long old, unsigned long new, int size) | 97 | unsigned long old, |
98 | unsigned long new, int size) | ||
96 | { | 99 | { |
97 | unsigned long prev; | 100 | unsigned long prev; |
98 | switch (size) { | 101 | switch (size) { |
99 | case 1: | 102 | case 1: |
100 | __asm__ __volatile__("cmpxchgb %b1,%2" | 103 | asm volatile("cmpxchgb %b1,%2" |
101 | : "=a"(prev) | 104 | : "=a"(prev) |
102 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 105 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
103 | : "memory"); | 106 | : "memory"); |
104 | return prev; | 107 | return prev; |
105 | case 2: | 108 | case 2: |
106 | __asm__ __volatile__("cmpxchgw %w1,%2" | 109 | asm volatile("cmpxchgw %w1,%2" |
107 | : "=a"(prev) | 110 | : "=a"(prev) |
108 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 111 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
109 | : "memory"); | 112 | : "memory"); |
110 | return prev; | 113 | return prev; |
111 | case 4: | 114 | case 4: |
112 | __asm__ __volatile__("cmpxchgl %k1,%2" | 115 | asm volatile("cmpxchgl %k1,%2" |
113 | : "=a"(prev) | 116 | : "=a"(prev) |
114 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 117 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
115 | : "memory"); | 118 | : "memory"); |
116 | return prev; | 119 | return prev; |
117 | case 8: | 120 | case 8: |
118 | __asm__ __volatile__("cmpxchgq %1,%2" | 121 | asm volatile("cmpxchgq %1,%2" |
119 | : "=a"(prev) | 122 | : "=a"(prev) |
120 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 123 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
121 | : "memory"); | 124 | : "memory"); |
122 | return prev; | 125 | return prev; |
123 | } | 126 | } |
124 | return old; | 127 | return old; |
@@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
126 | 129 | ||
127 | #define cmpxchg(ptr, o, n) \ | 130 | #define cmpxchg(ptr, o, n) \ |
128 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 131 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
129 | (unsigned long)(n), sizeof(*(ptr)))) | 132 | (unsigned long)(n), sizeof(*(ptr)))) |
130 | #define cmpxchg64(ptr, o, n) \ | 133 | #define cmpxchg64(ptr, o, n) \ |
131 | ({ \ | 134 | ({ \ |
132 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 135 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
133 | cmpxchg((ptr), (o), (n)); \ | 136 | cmpxchg((ptr), (o), (n)); \ |
134 | }) | 137 | }) |
135 | #define cmpxchg_local(ptr, o, n) \ | 138 | #define cmpxchg_local(ptr, o, n) \ |
136 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | 139 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ |
137 | (unsigned long)(n), sizeof(*(ptr)))) | 140 | (unsigned long)(n), \ |
141 | sizeof(*(ptr)))) | ||
138 | #define cmpxchg64_local(ptr, o, n) \ | 142 | #define cmpxchg64_local(ptr, o, n) \ |
139 | ({ \ | 143 | ({ \ |
140 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 144 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
141 | cmpxchg_local((ptr), (o), (n)); \ | 145 | cmpxchg_local((ptr), (o), (n)); \ |
142 | }) | 146 | }) |
143 | 147 | ||
144 | #endif | 148 | #endif |
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h index d3e8f3e87ee8..1793ac317a30 100644 --- a/include/asm-x86/compat.h +++ b/include/asm-x86/compat.h | |||
@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
204 | return (u32)(unsigned long)uptr; | 204 | return (u32)(unsigned long)uptr; |
205 | } | 205 | } |
206 | 206 | ||
207 | static __inline__ void __user *compat_alloc_user_space(long len) | 207 | static inline void __user *compat_alloc_user_space(long len) |
208 | { | 208 | { |
209 | struct pt_regs *regs = task_pt_regs(current); | 209 | struct pt_regs *regs = task_pt_regs(current); |
210 | return (void __user *)regs->sp - len; | 210 | return (void __user *)regs->sp - len; |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 065e92966c7c..0d609c837a41 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -120,6 +120,9 @@ | |||
120 | extern const char * const x86_cap_flags[NCAPINTS*32]; | 120 | extern const char * const x86_cap_flags[NCAPINTS*32]; |
121 | extern const char * const x86_power_flags[32]; | 121 | extern const char * const x86_power_flags[32]; |
122 | 122 | ||
123 | #define test_cpu_cap(c, bit) \ | ||
124 | test_bit(bit, (unsigned long *)((c)->x86_capability)) | ||
125 | |||
123 | #define cpu_has(c, bit) \ | 126 | #define cpu_has(c, bit) \ |
124 | (__builtin_constant_p(bit) && \ | 127 | (__builtin_constant_p(bit) && \ |
125 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ | 128 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
@@ -131,7 +134,8 @@ extern const char * const x86_power_flags[32]; | |||
131 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ | 134 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
132 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ | 135 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ |
133 | ? 1 : \ | 136 | ? 1 : \ |
134 | test_bit(bit, (unsigned long *)((c)->x86_capability))) | 137 | test_cpu_cap(c, bit)) |
138 | |||
135 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) | 139 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
136 | 140 | ||
137 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) | 141 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
@@ -181,6 +185,8 @@ extern const char * const x86_power_flags[32]; | |||
181 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | 185 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
182 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | 186 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
183 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | 187 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
188 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) | ||
189 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) | ||
184 | 190 | ||
185 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 191 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
186 | # define cpu_has_invlpg 1 | 192 | # define cpu_has_invlpg 1 |
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h index d35248539912..5af9bdb97a16 100644 --- a/include/asm-x86/current_32.h +++ b/include/asm-x86/current_32.h | |||
@@ -11,7 +11,7 @@ static __always_inline struct task_struct *get_current(void) | |||
11 | { | 11 | { |
12 | return x86_read_percpu(current_task); | 12 | return x86_read_percpu(current_task); |
13 | } | 13 | } |
14 | 14 | ||
15 | #define current get_current() | 15 | #define current get_current() |
16 | 16 | ||
17 | #endif /* !(_I386_CURRENT_H) */ | 17 | #endif /* !(_I386_CURRENT_H) */ |
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h index bc8adecee66d..2d368ede2fc1 100644 --- a/include/asm-x86/current_64.h +++ b/include/asm-x86/current_64.h | |||
@@ -1,23 +1,23 @@ | |||
1 | #ifndef _X86_64_CURRENT_H | 1 | #ifndef _X86_64_CURRENT_H |
2 | #define _X86_64_CURRENT_H | 2 | #define _X86_64_CURRENT_H |
3 | 3 | ||
4 | #if !defined(__ASSEMBLY__) | 4 | #if !defined(__ASSEMBLY__) |
5 | struct task_struct; | 5 | struct task_struct; |
6 | 6 | ||
7 | #include <asm/pda.h> | 7 | #include <asm/pda.h> |
8 | 8 | ||
9 | static inline struct task_struct *get_current(void) | 9 | static inline struct task_struct *get_current(void) |
10 | { | 10 | { |
11 | struct task_struct *t = read_pda(pcurrent); | 11 | struct task_struct *t = read_pda(pcurrent); |
12 | return t; | 12 | return t; |
13 | } | 13 | } |
14 | 14 | ||
15 | #define current get_current() | 15 | #define current get_current() |
16 | 16 | ||
17 | #else | 17 | #else |
18 | 18 | ||
19 | #ifndef ASM_OFFSET_H | 19 | #ifndef ASM_OFFSET_H |
20 | #include <asm/asm-offsets.h> | 20 | #include <asm/asm-offsets.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg | 23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg |
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 5b6a05d3a771..268a012bcd79 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h | |||
@@ -62,8 +62,8 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | static inline void pack_gate(gate_desc *gate, unsigned char type, | 64 | static inline void pack_gate(gate_desc *gate, unsigned char type, |
65 | unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) | 65 | unsigned long base, unsigned dpl, unsigned flags, |
66 | 66 | unsigned short seg) | |
67 | { | 67 | { |
68 | gate->a = (seg << 16) | (base & 0xffff); | 68 | gate->a = (seg << 16) | (base & 0xffff); |
69 | gate->b = (base & 0xffff0000) | | 69 | gate->b = (base & 0xffff0000) | |
@@ -84,22 +84,23 @@ static inline int desc_empty(const void *ptr) | |||
84 | #define load_TR_desc() native_load_tr_desc() | 84 | #define load_TR_desc() native_load_tr_desc() |
85 | #define load_gdt(dtr) native_load_gdt(dtr) | 85 | #define load_gdt(dtr) native_load_gdt(dtr) |
86 | #define load_idt(dtr) native_load_idt(dtr) | 86 | #define load_idt(dtr) native_load_idt(dtr) |
87 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) | 87 | #define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) |
88 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) | 88 | #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) |
89 | 89 | ||
90 | #define store_gdt(dtr) native_store_gdt(dtr) | 90 | #define store_gdt(dtr) native_store_gdt(dtr) |
91 | #define store_idt(dtr) native_store_idt(dtr) | 91 | #define store_idt(dtr) native_store_idt(dtr) |
92 | #define store_tr(tr) (tr = native_store_tr()) | 92 | #define store_tr(tr) (tr = native_store_tr()) |
93 | #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) | 93 | #define store_ldt(ldt) asm("sldt %0":"=m" (ldt)) |
94 | 94 | ||
95 | #define load_TLS(t, cpu) native_load_tls(t, cpu) | 95 | #define load_TLS(t, cpu) native_load_tls(t, cpu) |
96 | #define set_ldt native_set_ldt | 96 | #define set_ldt native_set_ldt |
97 | 97 | ||
98 | #define write_ldt_entry(dt, entry, desc) \ | 98 | #define write_ldt_entry(dt, entry, desc) \ |
99 | native_write_ldt_entry(dt, entry, desc) | 99 | native_write_ldt_entry(dt, entry, desc) |
100 | #define write_gdt_entry(dt, entry, desc, type) \ | 100 | #define write_gdt_entry(dt, entry, desc, type) \ |
101 | native_write_gdt_entry(dt, entry, desc, type) | 101 | native_write_gdt_entry(dt, entry, desc, type) |
102 | #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) | 102 | #define write_idt_entry(dt, entry, g) \ |
103 | native_write_idt_entry(dt, entry, g) | ||
103 | #endif | 104 | #endif |
104 | 105 | ||
105 | static inline void native_write_idt_entry(gate_desc *idt, int entry, | 106 | static inline void native_write_idt_entry(gate_desc *idt, int entry, |
@@ -138,8 +139,8 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, | |||
138 | { | 139 | { |
139 | desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); | 140 | desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); |
140 | desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | | 141 | desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | |
141 | (limit & 0x000f0000) | ((type & 0xff) << 8) | | 142 | (limit & 0x000f0000) | ((type & 0xff) << 8) | |
142 | ((flags & 0xf) << 20); | 143 | ((flags & 0xf) << 20); |
143 | desc->p = 1; | 144 | desc->p = 1; |
144 | } | 145 | } |
145 | 146 | ||
@@ -159,7 +160,6 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, | |||
159 | desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; | 160 | desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; |
160 | desc->base3 = PTR_HIGH(addr); | 161 | desc->base3 = PTR_HIGH(addr); |
161 | #else | 162 | #else |
162 | |||
163 | pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); | 163 | pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); |
164 | #endif | 164 | #endif |
165 | } | 165 | } |
@@ -177,7 +177,8 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) | |||
177 | * last valid byte | 177 | * last valid byte |
178 | */ | 178 | */ |
179 | set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, | 179 | set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, |
180 | IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); | 180 | IO_BITMAP_OFFSET + IO_BITMAP_BYTES + |
181 | sizeof(unsigned long) - 1); | ||
181 | write_gdt_entry(d, entry, &tss, DESC_TSS); | 182 | write_gdt_entry(d, entry, &tss, DESC_TSS); |
182 | } | 183 | } |
183 | 184 | ||
@@ -186,7 +187,7 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) | |||
186 | static inline void native_set_ldt(const void *addr, unsigned int entries) | 187 | static inline void native_set_ldt(const void *addr, unsigned int entries) |
187 | { | 188 | { |
188 | if (likely(entries == 0)) | 189 | if (likely(entries == 0)) |
189 | __asm__ __volatile__("lldt %w0"::"q" (0)); | 190 | asm volatile("lldt %w0"::"q" (0)); |
190 | else { | 191 | else { |
191 | unsigned cpu = smp_processor_id(); | 192 | unsigned cpu = smp_processor_id(); |
192 | ldt_desc ldt; | 193 | ldt_desc ldt; |
@@ -195,7 +196,7 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) | |||
195 | DESC_LDT, entries * sizeof(ldt) - 1); | 196 | DESC_LDT, entries * sizeof(ldt) - 1); |
196 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, | 197 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, |
197 | &ldt, DESC_LDT); | 198 | &ldt, DESC_LDT); |
198 | __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); | 199 | asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); |
199 | } | 200 | } |
200 | } | 201 | } |
201 | 202 | ||
@@ -240,15 +241,15 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) | |||
240 | gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; | 241 | gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; |
241 | } | 242 | } |
242 | 243 | ||
243 | #define _LDT_empty(info) (\ | 244 | #define _LDT_empty(info) \ |
244 | (info)->base_addr == 0 && \ | 245 | ((info)->base_addr == 0 && \ |
245 | (info)->limit == 0 && \ | 246 | (info)->limit == 0 && \ |
246 | (info)->contents == 0 && \ | 247 | (info)->contents == 0 && \ |
247 | (info)->read_exec_only == 1 && \ | 248 | (info)->read_exec_only == 1 && \ |
248 | (info)->seg_32bit == 0 && \ | 249 | (info)->seg_32bit == 0 && \ |
249 | (info)->limit_in_pages == 0 && \ | 250 | (info)->limit_in_pages == 0 && \ |
250 | (info)->seg_not_present == 1 && \ | 251 | (info)->seg_not_present == 1 && \ |
251 | (info)->useable == 0) | 252 | (info)->useable == 0) |
252 | 253 | ||
253 | #ifdef CONFIG_X86_64 | 254 | #ifdef CONFIG_X86_64 |
254 | #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) | 255 | #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) |
@@ -287,7 +288,7 @@ static inline unsigned long get_desc_limit(const struct desc_struct *desc) | |||
287 | } | 288 | } |
288 | 289 | ||
289 | static inline void _set_gate(int gate, unsigned type, void *addr, | 290 | static inline void _set_gate(int gate, unsigned type, void *addr, |
290 | unsigned dpl, unsigned ist, unsigned seg) | 291 | unsigned dpl, unsigned ist, unsigned seg) |
291 | { | 292 | { |
292 | gate_desc s; | 293 | gate_desc s; |
293 | pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); | 294 | pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); |
@@ -370,10 +371,10 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist) | |||
370 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. | 371 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. |
371 | */ | 372 | */ |
372 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ | 373 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ |
373 | movb idx*8+4(gdt), lo_b; \ | 374 | movb idx * 8 + 4(gdt), lo_b; \ |
374 | movb idx*8+7(gdt), hi_b; \ | 375 | movb idx * 8 + 7(gdt), hi_b; \ |
375 | shll $16, base; \ | 376 | shll $16, base; \ |
376 | movw idx*8+2(gdt), lo_w; | 377 | movw idx * 8 + 2(gdt), lo_w; |
377 | 378 | ||
378 | 379 | ||
379 | #endif /* __ASSEMBLY__ */ | 380 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h index e33f078b3e54..eccb4ea1f918 100644 --- a/include/asm-x86/desc_defs.h +++ b/include/asm-x86/desc_defs.h | |||
@@ -18,17 +18,19 @@ | |||
18 | * incrementally. We keep the signature as a struct, rather than an union, | 18 | * incrementally. We keep the signature as a struct, rather than an union, |
19 | * so we can get rid of it transparently in the future -- glommer | 19 | * so we can get rid of it transparently in the future -- glommer |
20 | */ | 20 | */ |
21 | // 8 byte segment descriptor | 21 | /* 8 byte segment descriptor */ |
22 | struct desc_struct { | 22 | struct desc_struct { |
23 | union { | 23 | union { |
24 | struct { unsigned int a, b; }; | 24 | struct { |
25 | unsigned int a; | ||
26 | unsigned int b; | ||
27 | }; | ||
25 | struct { | 28 | struct { |
26 | u16 limit0; | 29 | u16 limit0; |
27 | u16 base0; | 30 | u16 base0; |
28 | unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; | 31 | unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; |
29 | unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; | 32 | unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; |
30 | }; | 33 | }; |
31 | |||
32 | }; | 34 | }; |
33 | } __attribute__((packed)); | 35 | } __attribute__((packed)); |
34 | 36 | ||
@@ -39,7 +41,7 @@ enum { | |||
39 | GATE_TASK = 0x5, | 41 | GATE_TASK = 0x5, |
40 | }; | 42 | }; |
41 | 43 | ||
42 | // 16byte gate | 44 | /* 16byte gate */ |
43 | struct gate_struct64 { | 45 | struct gate_struct64 { |
44 | u16 offset_low; | 46 | u16 offset_low; |
45 | u16 segment; | 47 | u16 segment; |
@@ -56,10 +58,10 @@ struct gate_struct64 { | |||
56 | enum { | 58 | enum { |
57 | DESC_TSS = 0x9, | 59 | DESC_TSS = 0x9, |
58 | DESC_LDT = 0x2, | 60 | DESC_LDT = 0x2, |
59 | DESCTYPE_S = 0x10, /* !system */ | 61 | DESCTYPE_S = 0x10, /* !system */ |
60 | }; | 62 | }; |
61 | 63 | ||
62 | // LDT or TSS descriptor in the GDT. 16 bytes. | 64 | /* LDT or TSS descriptor in the GDT. 16 bytes. */ |
63 | struct ldttss_desc64 { | 65 | struct ldttss_desc64 { |
64 | u16 limit0; | 66 | u16 limit0; |
65 | u16 base0; | 67 | u16 base0; |
@@ -84,7 +86,6 @@ struct desc_ptr { | |||
84 | unsigned long address; | 86 | unsigned long address; |
85 | } __attribute__((packed)) ; | 87 | } __attribute__((packed)) ; |
86 | 88 | ||
87 | |||
88 | #endif /* !__ASSEMBLY__ */ | 89 | #endif /* !__ASSEMBLY__ */ |
89 | 90 | ||
90 | #endif | 91 | #endif |
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h index e98d16e7a37a..0dbf8bf3ef0a 100644 --- a/include/asm-x86/div64.h +++ b/include/asm-x86/div64.h | |||
@@ -17,18 +17,20 @@ | |||
17 | * This ends up being the most efficient "calling | 17 | * This ends up being the most efficient "calling |
18 | * convention" on x86. | 18 | * convention" on x86. |
19 | */ | 19 | */ |
20 | #define do_div(n,base) ({ \ | 20 | #define do_div(n, base) \ |
21 | unsigned long __upper, __low, __high, __mod, __base; \ | 21 | ({ \ |
22 | __base = (base); \ | 22 | unsigned long __upper, __low, __high, __mod, __base; \ |
23 | asm("":"=a" (__low), "=d" (__high):"A" (n)); \ | 23 | __base = (base); \ |
24 | __upper = __high; \ | 24 | asm("":"=a" (__low), "=d" (__high) : "A" (n)); \ |
25 | if (__high) { \ | 25 | __upper = __high; \ |
26 | __upper = __high % (__base); \ | 26 | if (__high) { \ |
27 | __high = __high / (__base); \ | 27 | __upper = __high % (__base); \ |
28 | } \ | 28 | __high = __high / (__base); \ |
29 | asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ | 29 | } \ |
30 | asm("":"=A" (n):"a" (__low),"d" (__high)); \ | 30 | asm("divl %2":"=a" (__low), "=d" (__mod) \ |
31 | __mod; \ | 31 | : "rm" (__base), "0" (__low), "1" (__upper)); \ |
32 | asm("":"=A" (n) : "a" (__low), "d" (__high)); \ | ||
33 | __mod; \ | ||
32 | }) | 34 | }) |
33 | 35 | ||
34 | /* | 36 | /* |
@@ -37,14 +39,13 @@ | |||
37 | * | 39 | * |
38 | * Warning, this will do an exception if X overflows. | 40 | * Warning, this will do an exception if X overflows. |
39 | */ | 41 | */ |
40 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) | 42 | #define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c) |
41 | 43 | ||
42 | static inline long | 44 | static inline long div_ll_X_l_rem(long long divs, long div, long *rem) |
43 | div_ll_X_l_rem(long long divs, long div, long *rem) | ||
44 | { | 45 | { |
45 | long dum2; | 46 | long dum2; |
46 | __asm__("divl %2":"=a"(dum2), "=d"(*rem) | 47 | asm("divl %2":"=a"(dum2), "=d"(*rem) |
47 | : "rm"(div), "A"(divs)); | 48 | : "rm"(div), "A"(divs)); |
48 | 49 | ||
49 | return dum2; | 50 | return dum2; |
50 | 51 | ||
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h index e9733ce89880..ca1098a7e580 100644 --- a/include/asm-x86/dma.h +++ b/include/asm-x86/dma.h | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/io.h> /* need byte IO */ | 12 | #include <asm/io.h> /* need byte IO */ |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | 14 | ||
15 | |||
16 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | 15 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER |
17 | #define dma_outb outb_p | 16 | #define dma_outb outb_p |
18 | #else | 17 | #else |
@@ -74,15 +73,15 @@ | |||
74 | #ifdef CONFIG_X86_32 | 73 | #ifdef CONFIG_X86_32 |
75 | 74 | ||
76 | /* The maximum address that we can perform a DMA transfer to on this platform */ | 75 | /* The maximum address that we can perform a DMA transfer to on this platform */ |
77 | #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) | 76 | #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) |
78 | 77 | ||
79 | #else | 78 | #else |
80 | 79 | ||
81 | /* 16MB ISA DMA zone */ | 80 | /* 16MB ISA DMA zone */ |
82 | #define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) | 81 | #define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) |
83 | 82 | ||
84 | /* 4GB broken PCI/AGP hardware bus master zone */ | 83 | /* 4GB broken PCI/AGP hardware bus master zone */ |
85 | #define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) | 84 | #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) |
86 | 85 | ||
87 | /* Compat define for old dma zone */ | 86 | /* Compat define for old dma zone */ |
88 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) | 87 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) |
@@ -154,20 +153,20 @@ | |||
154 | 153 | ||
155 | extern spinlock_t dma_spin_lock; | 154 | extern spinlock_t dma_spin_lock; |
156 | 155 | ||
157 | static __inline__ unsigned long claim_dma_lock(void) | 156 | static inline unsigned long claim_dma_lock(void) |
158 | { | 157 | { |
159 | unsigned long flags; | 158 | unsigned long flags; |
160 | spin_lock_irqsave(&dma_spin_lock, flags); | 159 | spin_lock_irqsave(&dma_spin_lock, flags); |
161 | return flags; | 160 | return flags; |
162 | } | 161 | } |
163 | 162 | ||
164 | static __inline__ void release_dma_lock(unsigned long flags) | 163 | static inline void release_dma_lock(unsigned long flags) |
165 | { | 164 | { |
166 | spin_unlock_irqrestore(&dma_spin_lock, flags); | 165 | spin_unlock_irqrestore(&dma_spin_lock, flags); |
167 | } | 166 | } |
168 | 167 | ||
169 | /* enable/disable a specific DMA channel */ | 168 | /* enable/disable a specific DMA channel */ |
170 | static __inline__ void enable_dma(unsigned int dmanr) | 169 | static inline void enable_dma(unsigned int dmanr) |
171 | { | 170 | { |
172 | if (dmanr <= 3) | 171 | if (dmanr <= 3) |
173 | dma_outb(dmanr, DMA1_MASK_REG); | 172 | dma_outb(dmanr, DMA1_MASK_REG); |
@@ -175,7 +174,7 @@ static __inline__ void enable_dma(unsigned int dmanr) | |||
175 | dma_outb(dmanr & 3, DMA2_MASK_REG); | 174 | dma_outb(dmanr & 3, DMA2_MASK_REG); |
176 | } | 175 | } |
177 | 176 | ||
178 | static __inline__ void disable_dma(unsigned int dmanr) | 177 | static inline void disable_dma(unsigned int dmanr) |
179 | { | 178 | { |
180 | if (dmanr <= 3) | 179 | if (dmanr <= 3) |
181 | dma_outb(dmanr | 4, DMA1_MASK_REG); | 180 | dma_outb(dmanr | 4, DMA1_MASK_REG); |
@@ -190,7 +189,7 @@ static __inline__ void disable_dma(unsigned int dmanr) | |||
190 | * --- In order to do that, the DMA routines below should --- | 189 | * --- In order to do that, the DMA routines below should --- |
191 | * --- only be used while holding the DMA lock ! --- | 190 | * --- only be used while holding the DMA lock ! --- |
192 | */ | 191 | */ |
193 | static __inline__ void clear_dma_ff(unsigned int dmanr) | 192 | static inline void clear_dma_ff(unsigned int dmanr) |
194 | { | 193 | { |
195 | if (dmanr <= 3) | 194 | if (dmanr <= 3) |
196 | dma_outb(0, DMA1_CLEAR_FF_REG); | 195 | dma_outb(0, DMA1_CLEAR_FF_REG); |
@@ -199,7 +198,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr) | |||
199 | } | 198 | } |
200 | 199 | ||
201 | /* set mode (above) for a specific DMA channel */ | 200 | /* set mode (above) for a specific DMA channel */ |
202 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | 201 | static inline void set_dma_mode(unsigned int dmanr, char mode) |
203 | { | 202 | { |
204 | if (dmanr <= 3) | 203 | if (dmanr <= 3) |
205 | dma_outb(mode | dmanr, DMA1_MODE_REG); | 204 | dma_outb(mode | dmanr, DMA1_MODE_REG); |
@@ -212,7 +211,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | |||
212 | * the lower 16 bits of the DMA current address register, but a 64k boundary | 211 | * the lower 16 bits of the DMA current address register, but a 64k boundary |
213 | * may have been crossed. | 212 | * may have been crossed. |
214 | */ | 213 | */ |
215 | static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | 214 | static inline void set_dma_page(unsigned int dmanr, char pagenr) |
216 | { | 215 | { |
217 | switch (dmanr) { | 216 | switch (dmanr) { |
218 | case 0: | 217 | case 0: |
@@ -243,15 +242,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | |||
243 | /* Set transfer address & page bits for specific DMA channel. | 242 | /* Set transfer address & page bits for specific DMA channel. |
244 | * Assumes dma flipflop is clear. | 243 | * Assumes dma flipflop is clear. |
245 | */ | 244 | */ |
246 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | 245 | static inline void set_dma_addr(unsigned int dmanr, unsigned int a) |
247 | { | 246 | { |
248 | set_dma_page(dmanr, a>>16); | 247 | set_dma_page(dmanr, a>>16); |
249 | if (dmanr <= 3) { | 248 | if (dmanr <= 3) { |
250 | dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); | 249 | dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); |
251 | dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); | 250 | dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); |
252 | } else { | 251 | } else { |
253 | dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); | 252 | dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); |
254 | dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); | 253 | dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); |
255 | } | 254 | } |
256 | } | 255 | } |
257 | 256 | ||
@@ -264,18 +263,18 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | |||
264 | * Assumes dma flip-flop is clear. | 263 | * Assumes dma flip-flop is clear. |
265 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | 264 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. |
266 | */ | 265 | */ |
267 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | 266 | static inline void set_dma_count(unsigned int dmanr, unsigned int count) |
268 | { | 267 | { |
269 | count--; | 268 | count--; |
270 | if (dmanr <= 3) { | 269 | if (dmanr <= 3) { |
271 | dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | 270 | dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); |
272 | dma_outb((count >> 8) & 0xff, | 271 | dma_outb((count >> 8) & 0xff, |
273 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | 272 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); |
274 | } else { | 273 | } else { |
275 | dma_outb((count >> 1) & 0xff, | 274 | dma_outb((count >> 1) & 0xff, |
276 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 275 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); |
277 | dma_outb((count >> 9) & 0xff, | 276 | dma_outb((count >> 9) & 0xff, |
278 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 277 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); |
279 | } | 278 | } |
280 | } | 279 | } |
281 | 280 | ||
@@ -288,7 +287,7 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | |||
288 | * | 287 | * |
289 | * Assumes DMA flip-flop is clear. | 288 | * Assumes DMA flip-flop is clear. |
290 | */ | 289 | */ |
291 | static __inline__ int get_dma_residue(unsigned int dmanr) | 290 | static inline int get_dma_residue(unsigned int dmanr) |
292 | { | 291 | { |
293 | unsigned int io_port; | 292 | unsigned int io_port; |
294 | /* using short to get 16-bit wrap around */ | 293 | /* using short to get 16-bit wrap around */ |
diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h index eedc08526b0b..c950519a264d 100644 --- a/include/asm-x86/dwarf2_64.h +++ b/include/asm-x86/dwarf2_64.h | |||
@@ -1,16 +1,15 @@ | |||
1 | #ifndef _DWARF2_H | 1 | #ifndef _DWARF2_H |
2 | #define _DWARF2_H 1 | 2 | #define _DWARF2_H 1 |
3 | 3 | ||
4 | |||
5 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
6 | #warning "asm/dwarf2.h should be only included in pure assembly files" | 5 | #warning "asm/dwarf2.h should be only included in pure assembly files" |
7 | #endif | 6 | #endif |
8 | 7 | ||
9 | /* | 8 | /* |
10 | Macros for dwarf2 CFI unwind table entries. | 9 | Macros for dwarf2 CFI unwind table entries. |
11 | See "as.info" for details on these pseudo ops. Unfortunately | 10 | See "as.info" for details on these pseudo ops. Unfortunately |
12 | they are only supported in very new binutils, so define them | 11 | they are only supported in very new binutils, so define them |
13 | away for older version. | 12 | away for older version. |
14 | */ | 13 | */ |
15 | 14 | ||
16 | #ifdef CONFIG_AS_CFI | 15 | #ifdef CONFIG_AS_CFI |
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index e7207a6de3e0..43b1a8bd4b34 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h | |||
@@ -34,8 +34,8 @@ extern void e820_register_memory(void); | |||
34 | extern void limit_regions(unsigned long long size); | 34 | extern void limit_regions(unsigned long long size); |
35 | extern void print_memory_map(char *who); | 35 | extern void print_memory_map(char *who); |
36 | extern void init_iomem_resources(struct resource *code_resource, | 36 | extern void init_iomem_resources(struct resource *code_resource, |
37 | struct resource *data_resource, | 37 | struct resource *data_resource, |
38 | struct resource *bss_resource); | 38 | struct resource *bss_resource); |
39 | 39 | ||
40 | #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) | 40 | #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) |
41 | extern void e820_mark_nosave_regions(void); | 41 | extern void e820_mark_nosave_regions(void); |
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 22ede73ae724..f478c57eb060 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h | |||
@@ -14,20 +14,24 @@ | |||
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | 15 | ||
16 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
17 | extern unsigned long find_e820_area(unsigned long start, unsigned long end, | 17 | extern unsigned long find_e820_area(unsigned long start, unsigned long end, |
18 | unsigned size, unsigned long align); | 18 | unsigned long size, unsigned long align); |
19 | extern void add_memory_region(unsigned long start, unsigned long size, | 19 | extern unsigned long find_e820_area_size(unsigned long start, |
20 | unsigned long *sizep, | ||
21 | unsigned long align); | ||
22 | extern void add_memory_region(unsigned long start, unsigned long size, | ||
20 | int type); | 23 | int type); |
21 | extern void update_memory_range(u64 start, u64 size, unsigned old_type, | 24 | extern void update_memory_range(u64 start, u64 size, unsigned old_type, |
22 | unsigned new_type); | 25 | unsigned new_type); |
23 | extern void setup_memory_region(void); | 26 | extern void setup_memory_region(void); |
24 | extern void contig_e820_setup(void); | 27 | extern void contig_e820_setup(void); |
25 | extern unsigned long e820_end_of_ram(void); | 28 | extern unsigned long e820_end_of_ram(void); |
26 | extern void e820_reserve_resources(struct resource *code_resource, | 29 | extern void e820_reserve_resources(void); |
27 | struct resource *data_resource, struct resource *bss_resource); | ||
28 | extern void e820_mark_nosave_regions(void); | 30 | extern void e820_mark_nosave_regions(void); |
29 | extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); | 31 | extern int e820_any_mapped(unsigned long start, unsigned long end, |
30 | extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); | 32 | unsigned type); |
33 | extern int e820_all_mapped(unsigned long start, unsigned long end, | ||
34 | unsigned type); | ||
31 | extern int e820_any_non_reserved(unsigned long start, unsigned long end); | 35 | extern int e820_any_non_reserved(unsigned long start, unsigned long end); |
32 | extern int is_memory_any_valid(unsigned long start, unsigned long end); | 36 | extern int is_memory_any_valid(unsigned long start, unsigned long end); |
33 | extern int e820_all_non_reserved(unsigned long start, unsigned long end); | 37 | extern int e820_all_non_reserved(unsigned long start, unsigned long end); |
@@ -35,8 +39,8 @@ extern int is_memory_all_valid(unsigned long start, unsigned long end); | |||
35 | extern unsigned long e820_hole_size(unsigned long start, unsigned long end); | 39 | extern unsigned long e820_hole_size(unsigned long start, unsigned long end); |
36 | 40 | ||
37 | extern void e820_setup_gap(void); | 41 | extern void e820_setup_gap(void); |
38 | extern void e820_register_active_regions(int nid, | 42 | extern void e820_register_active_regions(int nid, unsigned long start_pfn, |
39 | unsigned long start_pfn, unsigned long end_pfn); | 43 | unsigned long end_pfn); |
40 | 44 | ||
41 | extern void finish_e820_parsing(void); | 45 | extern void finish_e820_parsing(void); |
42 | 46 | ||
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h index cf3200a745ad..a8088f63a30e 100644 --- a/include/asm-x86/edac.h +++ b/include/asm-x86/edac.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ | 4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ |
5 | 5 | ||
6 | static __inline__ void atomic_scrub(void *va, u32 size) | 6 | static inline void atomic_scrub(void *va, u32 size) |
7 | { | 7 | { |
8 | u32 i, *virt_addr = va; | 8 | u32 i, *virt_addr = va; |
9 | 9 | ||
@@ -12,7 +12,7 @@ static __inline__ void atomic_scrub(void *va, u32 size) | |||
12 | * are interrupt, DMA and SMP safe. | 12 | * are interrupt, DMA and SMP safe. |
13 | */ | 13 | */ |
14 | for (i = 0; i < size / 4; i++, virt_addr++) | 14 | for (i = 0; i < size / 4; i++, virt_addr++) |
15 | __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | 15 | asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); |
16 | } | 16 | } |
17 | 17 | ||
18 | #endif | 18 | #endif |
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h index ea9734b74aca..d53004b855cc 100644 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h | |||
@@ -20,7 +20,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #define efi_call_virt(f, args...) \ | 22 | #define efi_call_virt(f, args...) \ |
23 | ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) | 23 | ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) |
24 | 24 | ||
25 | #define efi_call_virt0(f) efi_call_virt(f) | 25 | #define efi_call_virt0(f) efi_call_virt(f) |
26 | #define efi_call_virt1(f, a1) efi_call_virt(f, a1) | 26 | #define efi_call_virt1(f, a1) efi_call_virt(f, a1) |
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index fb62f9941e38..8f232dc5b5fe 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | typedef unsigned long elf_greg_t; | 12 | typedef unsigned long elf_greg_t; |
13 | 13 | ||
14 | #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) | 14 | #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) |
15 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 15 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
16 | 16 | ||
17 | typedef struct user_i387_struct elf_fpregset_t; | 17 | typedef struct user_i387_struct elf_fpregset_t; |
@@ -82,8 +82,9 @@ extern unsigned int vdso_enabled; | |||
82 | #define elf_check_arch_ia32(x) \ | 82 | #define elf_check_arch_ia32(x) \ |
83 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) | 83 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) |
84 | 84 | ||
85 | #ifdef CONFIG_X86_32 | ||
86 | #include <asm/processor.h> | 85 | #include <asm/processor.h> |
86 | |||
87 | #ifdef CONFIG_X86_32 | ||
87 | #include <asm/system.h> /* for savesegment */ | 88 | #include <asm/system.h> /* for savesegment */ |
88 | #include <asm/desc.h> | 89 | #include <asm/desc.h> |
89 | 90 | ||
@@ -99,10 +100,11 @@ extern unsigned int vdso_enabled; | |||
99 | We might as well make sure everything else is cleared too (except for %esp), | 100 | We might as well make sure everything else is cleared too (except for %esp), |
100 | just to make things more deterministic. | 101 | just to make things more deterministic. |
101 | */ | 102 | */ |
102 | #define ELF_PLAT_INIT(_r, load_addr) do { \ | 103 | #define ELF_PLAT_INIT(_r, load_addr) \ |
103 | _r->bx = 0; _r->cx = 0; _r->dx = 0; \ | 104 | do { \ |
104 | _r->si = 0; _r->di = 0; _r->bp = 0; \ | 105 | _r->bx = 0; _r->cx = 0; _r->dx = 0; \ |
105 | _r->ax = 0; \ | 106 | _r->si = 0; _r->di = 0; _r->bp = 0; \ |
107 | _r->ax = 0; \ | ||
106 | } while (0) | 108 | } while (0) |
107 | 109 | ||
108 | /* | 110 | /* |
@@ -110,24 +112,25 @@ extern unsigned int vdso_enabled; | |||
110 | * now struct_user_regs, they are different) | 112 | * now struct_user_regs, they are different) |
111 | */ | 113 | */ |
112 | 114 | ||
113 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ | 115 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ |
114 | pr_reg[0] = regs->bx; \ | 116 | do { \ |
115 | pr_reg[1] = regs->cx; \ | 117 | pr_reg[0] = regs->bx; \ |
116 | pr_reg[2] = regs->dx; \ | 118 | pr_reg[1] = regs->cx; \ |
117 | pr_reg[3] = regs->si; \ | 119 | pr_reg[2] = regs->dx; \ |
118 | pr_reg[4] = regs->di; \ | 120 | pr_reg[3] = regs->si; \ |
119 | pr_reg[5] = regs->bp; \ | 121 | pr_reg[4] = regs->di; \ |
120 | pr_reg[6] = regs->ax; \ | 122 | pr_reg[5] = regs->bp; \ |
121 | pr_reg[7] = regs->ds & 0xffff; \ | 123 | pr_reg[6] = regs->ax; \ |
122 | pr_reg[8] = regs->es & 0xffff; \ | 124 | pr_reg[7] = regs->ds & 0xffff; \ |
123 | pr_reg[9] = regs->fs & 0xffff; \ | 125 | pr_reg[8] = regs->es & 0xffff; \ |
124 | savesegment(gs, pr_reg[10]); \ | 126 | pr_reg[9] = regs->fs & 0xffff; \ |
125 | pr_reg[11] = regs->orig_ax; \ | 127 | savesegment(gs, pr_reg[10]); \ |
126 | pr_reg[12] = regs->ip; \ | 128 | pr_reg[11] = regs->orig_ax; \ |
127 | pr_reg[13] = regs->cs & 0xffff; \ | 129 | pr_reg[12] = regs->ip; \ |
128 | pr_reg[14] = regs->flags; \ | 130 | pr_reg[13] = regs->cs & 0xffff; \ |
129 | pr_reg[15] = regs->sp; \ | 131 | pr_reg[14] = regs->flags; \ |
130 | pr_reg[16] = regs->ss & 0xffff; \ | 132 | pr_reg[15] = regs->sp; \ |
133 | pr_reg[16] = regs->ss & 0xffff; \ | ||
131 | } while (0); | 134 | } while (0); |
132 | 135 | ||
133 | #define ELF_PLATFORM (utsname()->machine) | 136 | #define ELF_PLATFORM (utsname()->machine) |
@@ -135,12 +138,10 @@ extern unsigned int vdso_enabled; | |||
135 | 138 | ||
136 | #else /* CONFIG_X86_32 */ | 139 | #else /* CONFIG_X86_32 */ |
137 | 140 | ||
138 | #include <asm/processor.h> | ||
139 | |||
140 | /* | 141 | /* |
141 | * This is used to ensure we don't load something for the wrong architecture. | 142 | * This is used to ensure we don't load something for the wrong architecture. |
142 | */ | 143 | */ |
143 | #define elf_check_arch(x) \ | 144 | #define elf_check_arch(x) \ |
144 | ((x)->e_machine == EM_X86_64) | 145 | ((x)->e_machine == EM_X86_64) |
145 | 146 | ||
146 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) | 147 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) |
@@ -169,24 +170,30 @@ static inline void elf_common_init(struct thread_struct *t, | |||
169 | t->ds = t->es = ds; | 170 | t->ds = t->es = ds; |
170 | } | 171 | } |
171 | 172 | ||
172 | #define ELF_PLAT_INIT(_r, load_addr) do { \ | 173 | #define ELF_PLAT_INIT(_r, load_addr) \ |
173 | elf_common_init(¤t->thread, _r, 0); \ | 174 | do { \ |
174 | clear_thread_flag(TIF_IA32); \ | 175 | elf_common_init(¤t->thread, _r, 0); \ |
176 | clear_thread_flag(TIF_IA32); \ | ||
175 | } while (0) | 177 | } while (0) |
176 | 178 | ||
177 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ | 179 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ |
178 | elf_common_init(¤t->thread, regs, __USER_DS) | 180 | elf_common_init(¤t->thread, regs, __USER_DS) |
179 | #define compat_start_thread(regs, ip, sp) do { \ | 181 | |
180 | start_ia32_thread(regs, ip, sp); \ | 182 | #define compat_start_thread(regs, ip, sp) \ |
181 | set_fs(USER_DS); \ | 183 | do { \ |
182 | } while (0) | 184 | start_ia32_thread(regs, ip, sp); \ |
183 | #define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \ | 185 | set_fs(USER_DS); \ |
184 | if (test_thread_flag(TIF_IA32)) \ | 186 | } while (0) |
185 | clear_thread_flag(TIF_ABI_PENDING); \ | 187 | |
186 | else \ | 188 | #define COMPAT_SET_PERSONALITY(ex, ibcs2) \ |
187 | set_thread_flag(TIF_ABI_PENDING); \ | 189 | do { \ |
188 | current->personality |= force_personality32; \ | 190 | if (test_thread_flag(TIF_IA32)) \ |
189 | } while (0) | 191 | clear_thread_flag(TIF_ABI_PENDING); \ |
192 | else \ | ||
193 | set_thread_flag(TIF_ABI_PENDING); \ | ||
194 | current->personality |= force_personality32; \ | ||
195 | } while (0) | ||
196 | |||
190 | #define COMPAT_ELF_PLATFORM ("i686") | 197 | #define COMPAT_ELF_PLATFORM ("i686") |
191 | 198 | ||
192 | /* | 199 | /* |
@@ -195,7 +202,8 @@ static inline void elf_common_init(struct thread_struct *t, | |||
195 | * getting dumped. | 202 | * getting dumped. |
196 | */ | 203 | */ |
197 | 204 | ||
198 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ | 205 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ |
206 | do { \ | ||
199 | unsigned v; \ | 207 | unsigned v; \ |
200 | (pr_reg)[0] = (regs)->r15; \ | 208 | (pr_reg)[0] = (regs)->r15; \ |
201 | (pr_reg)[1] = (regs)->r14; \ | 209 | (pr_reg)[1] = (regs)->r14; \ |
@@ -269,10 +277,12 @@ extern int force_personality32; | |||
269 | 277 | ||
270 | struct task_struct; | 278 | struct task_struct; |
271 | 279 | ||
272 | #define ARCH_DLINFO_IA32(vdso_enabled) \ | 280 | #define ARCH_DLINFO_IA32(vdso_enabled) \ |
273 | do if (vdso_enabled) { \ | 281 | do { \ |
282 | if (vdso_enabled) { \ | ||
274 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ | 283 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ |
275 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ | 284 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ |
285 | } \ | ||
276 | } while (0) | 286 | } while (0) |
277 | 287 | ||
278 | #ifdef CONFIG_X86_32 | 288 | #ifdef CONFIG_X86_32 |
@@ -290,9 +300,11 @@ do if (vdso_enabled) { \ | |||
290 | /* 1GB for 64bit, 8MB for 32bit */ | 300 | /* 1GB for 64bit, 8MB for 32bit */ |
291 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) | 301 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) |
292 | 302 | ||
293 | #define ARCH_DLINFO \ | 303 | #define ARCH_DLINFO \ |
294 | do if (vdso_enabled) { \ | 304 | do { \ |
295 | NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ | 305 | if (vdso_enabled) \ |
306 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ | ||
307 | (unsigned long)current->mm->context.vdso); \ | ||
296 | } while (0) | 308 | } while (0) |
297 | 309 | ||
298 | #define AT_SYSINFO 32 | 310 | #define AT_SYSINFO 32 |
@@ -305,8 +317,8 @@ do if (vdso_enabled) { \ | |||
305 | 317 | ||
306 | #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) | 318 | #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) |
307 | 319 | ||
308 | #define VDSO_ENTRY \ | 320 | #define VDSO_ENTRY \ |
309 | ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) | 321 | ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) |
310 | 322 | ||
311 | struct linux_binprm; | 323 | struct linux_binprm; |
312 | 324 | ||
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index a7404d50686b..eb1665125c44 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h | |||
@@ -99,8 +99,7 @@ enum fixed_addresses { | |||
99 | */ | 99 | */ |
100 | #define NR_FIX_BTMAPS 64 | 100 | #define NR_FIX_BTMAPS 64 |
101 | #define FIX_BTMAPS_NESTING 4 | 101 | #define FIX_BTMAPS_NESTING 4 |
102 | FIX_BTMAP_END = | 102 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 - |
103 | __end_of_permanent_fixed_addresses + 512 - | ||
104 | (__end_of_permanent_fixed_addresses & 511), | 103 | (__end_of_permanent_fixed_addresses & 511), |
105 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, | 104 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, |
106 | FIX_WP_TEST, | 105 | FIX_WP_TEST, |
@@ -110,20 +109,20 @@ enum fixed_addresses { | |||
110 | __end_of_fixed_addresses | 109 | __end_of_fixed_addresses |
111 | }; | 110 | }; |
112 | 111 | ||
113 | extern void __set_fixmap (enum fixed_addresses idx, | 112 | extern void __set_fixmap(enum fixed_addresses idx, |
114 | unsigned long phys, pgprot_t flags); | 113 | unsigned long phys, pgprot_t flags); |
115 | extern void reserve_top_address(unsigned long reserve); | 114 | extern void reserve_top_address(unsigned long reserve); |
116 | 115 | ||
117 | #define set_fixmap(idx, phys) \ | 116 | #define set_fixmap(idx, phys) \ |
118 | __set_fixmap(idx, phys, PAGE_KERNEL) | 117 | __set_fixmap(idx, phys, PAGE_KERNEL) |
119 | /* | 118 | /* |
120 | * Some hardware wants to get fixmapped without caching. | 119 | * Some hardware wants to get fixmapped without caching. |
121 | */ | 120 | */ |
122 | #define set_fixmap_nocache(idx, phys) \ | 121 | #define set_fixmap_nocache(idx, phys) \ |
123 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | 122 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) |
124 | 123 | ||
125 | #define clear_fixmap(idx) \ | 124 | #define clear_fixmap(idx) \ |
126 | __set_fixmap(idx, 0, __pgprot(0)) | 125 | __set_fixmap(idx, 0, __pgprot(0)) |
127 | 126 | ||
128 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) | 127 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) |
129 | 128 | ||
@@ -156,7 +155,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) | |||
156 | if (idx >= __end_of_fixed_addresses) | 155 | if (idx >= __end_of_fixed_addresses) |
157 | __this_fixmap_does_not_exist(); | 156 | __this_fixmap_does_not_exist(); |
158 | 157 | ||
159 | return __fix_to_virt(idx); | 158 | return __fix_to_virt(idx); |
160 | } | 159 | } |
161 | 160 | ||
162 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | 161 | static inline unsigned long virt_to_fix(const unsigned long vaddr) |
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index 70ddb21e6458..f3d76858c0e6 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h | |||
@@ -34,32 +34,34 @@ | |||
34 | 34 | ||
35 | enum fixed_addresses { | 35 | enum fixed_addresses { |
36 | VSYSCALL_LAST_PAGE, | 36 | VSYSCALL_LAST_PAGE, |
37 | VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, | 37 | VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE |
38 | + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, | ||
38 | VSYSCALL_HPET, | 39 | VSYSCALL_HPET, |
39 | FIX_DBGP_BASE, | 40 | FIX_DBGP_BASE, |
40 | FIX_EARLYCON_MEM_BASE, | 41 | FIX_EARLYCON_MEM_BASE, |
41 | FIX_HPET_BASE, | 42 | FIX_HPET_BASE, |
42 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | 43 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
43 | FIX_IO_APIC_BASE_0, | 44 | FIX_IO_APIC_BASE_0, |
44 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, | 45 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, |
45 | FIX_EFI_IO_MAP_LAST_PAGE, | 46 | FIX_EFI_IO_MAP_LAST_PAGE, |
46 | FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1, | 47 | FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE |
48 | + MAX_EFI_IO_PAGES - 1, | ||
47 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 49 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
48 | FIX_OHCI1394_BASE, | 50 | FIX_OHCI1394_BASE, |
49 | #endif | 51 | #endif |
50 | __end_of_fixed_addresses | 52 | __end_of_fixed_addresses |
51 | }; | 53 | }; |
52 | 54 | ||
53 | extern void __set_fixmap (enum fixed_addresses idx, | 55 | extern void __set_fixmap(enum fixed_addresses idx, |
54 | unsigned long phys, pgprot_t flags); | 56 | unsigned long phys, pgprot_t flags); |
55 | 57 | ||
56 | #define set_fixmap(idx, phys) \ | 58 | #define set_fixmap(idx, phys) \ |
57 | __set_fixmap(idx, phys, PAGE_KERNEL) | 59 | __set_fixmap(idx, phys, PAGE_KERNEL) |
58 | /* | 60 | /* |
59 | * Some hardware wants to get fixmapped without caching. | 61 | * Some hardware wants to get fixmapped without caching. |
60 | */ | 62 | */ |
61 | #define set_fixmap_nocache(idx, phys) \ | 63 | #define set_fixmap_nocache(idx, phys) \ |
62 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | 64 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) |
63 | 65 | ||
64 | #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) | 66 | #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) |
65 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 67 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h index a48d7153c097..dbe82a5c5eac 100644 --- a/include/asm-x86/floppy.h +++ b/include/asm-x86/floppy.h | |||
@@ -20,20 +20,21 @@ | |||
20 | * driver otherwise. It doesn't matter much for performance anyway, as most | 20 | * driver otherwise. It doesn't matter much for performance anyway, as most |
21 | * floppy accesses go through the track buffer. | 21 | * floppy accesses go through the track buffer. |
22 | */ | 22 | */ |
23 | #define _CROSS_64KB(a,s,vdma) \ | 23 | #define _CROSS_64KB(a, s, vdma) \ |
24 | (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) | 24 | (!(vdma) && \ |
25 | ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) | ||
25 | 26 | ||
26 | #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) | 27 | #define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1) |
27 | 28 | ||
28 | 29 | ||
29 | #define SW fd_routine[use_virtual_dma&1] | 30 | #define SW fd_routine[use_virtual_dma & 1] |
30 | #define CSW fd_routine[can_use_virtual_dma & 1] | 31 | #define CSW fd_routine[can_use_virtual_dma & 1] |
31 | 32 | ||
32 | 33 | ||
33 | #define fd_inb(port) inb_p(port) | 34 | #define fd_inb(port) inb_p(port) |
34 | #define fd_outb(value,port) outb_p(value,port) | 35 | #define fd_outb(value, port) outb_p(value, port) |
35 | 36 | ||
36 | #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") | 37 | #define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy") |
37 | #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) | 38 | #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) |
38 | #define fd_enable_irq() enable_irq(FLOPPY_IRQ) | 39 | #define fd_enable_irq() enable_irq(FLOPPY_IRQ) |
39 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) | 40 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) |
@@ -52,64 +53,64 @@ static int doing_pdma; | |||
52 | 53 | ||
53 | static irqreturn_t floppy_hardint(int irq, void *dev_id) | 54 | static irqreturn_t floppy_hardint(int irq, void *dev_id) |
54 | { | 55 | { |
55 | register unsigned char st; | 56 | unsigned char st; |
56 | 57 | ||
57 | #undef TRACE_FLPY_INT | 58 | #undef TRACE_FLPY_INT |
58 | 59 | ||
59 | #ifdef TRACE_FLPY_INT | 60 | #ifdef TRACE_FLPY_INT |
60 | static int calls=0; | 61 | static int calls; |
61 | static int bytes=0; | 62 | static int bytes; |
62 | static int dma_wait=0; | 63 | static int dma_wait; |
63 | #endif | 64 | #endif |
64 | if (!doing_pdma) | 65 | if (!doing_pdma) |
65 | return floppy_interrupt(irq, dev_id); | 66 | return floppy_interrupt(irq, dev_id); |
66 | 67 | ||
67 | #ifdef TRACE_FLPY_INT | 68 | #ifdef TRACE_FLPY_INT |
68 | if(!calls) | 69 | if (!calls) |
69 | bytes = virtual_dma_count; | 70 | bytes = virtual_dma_count; |
70 | #endif | 71 | #endif |
71 | 72 | ||
72 | { | 73 | { |
73 | register int lcount; | 74 | int lcount; |
74 | register char *lptr; | 75 | char *lptr; |
75 | 76 | ||
76 | st = 1; | 77 | st = 1; |
77 | for(lcount=virtual_dma_count, lptr=virtual_dma_addr; | 78 | for (lcount = virtual_dma_count, lptr = virtual_dma_addr; |
78 | lcount; lcount--, lptr++) { | 79 | lcount; lcount--, lptr++) { |
79 | st=inb(virtual_dma_port+4) & 0xa0 ; | 80 | st = inb(virtual_dma_port + 4) & 0xa0; |
80 | if(st != 0xa0) | 81 | if (st != 0xa0) |
81 | break; | 82 | break; |
82 | if(virtual_dma_mode) | 83 | if (virtual_dma_mode) |
83 | outb_p(*lptr, virtual_dma_port+5); | 84 | outb_p(*lptr, virtual_dma_port + 5); |
84 | else | 85 | else |
85 | *lptr = inb_p(virtual_dma_port+5); | 86 | *lptr = inb_p(virtual_dma_port + 5); |
86 | } | 87 | } |
87 | virtual_dma_count = lcount; | 88 | virtual_dma_count = lcount; |
88 | virtual_dma_addr = lptr; | 89 | virtual_dma_addr = lptr; |
89 | st = inb(virtual_dma_port+4); | 90 | st = inb(virtual_dma_port + 4); |
90 | } | 91 | } |
91 | 92 | ||
92 | #ifdef TRACE_FLPY_INT | 93 | #ifdef TRACE_FLPY_INT |
93 | calls++; | 94 | calls++; |
94 | #endif | 95 | #endif |
95 | if(st == 0x20) | 96 | if (st == 0x20) |
96 | return IRQ_HANDLED; | 97 | return IRQ_HANDLED; |
97 | if(!(st & 0x20)) { | 98 | if (!(st & 0x20)) { |
98 | virtual_dma_residue += virtual_dma_count; | 99 | virtual_dma_residue += virtual_dma_count; |
99 | virtual_dma_count=0; | 100 | virtual_dma_count = 0; |
100 | #ifdef TRACE_FLPY_INT | 101 | #ifdef TRACE_FLPY_INT |
101 | printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", | 102 | printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", |
102 | virtual_dma_count, virtual_dma_residue, calls, bytes, | 103 | virtual_dma_count, virtual_dma_residue, calls, bytes, |
103 | dma_wait); | 104 | dma_wait); |
104 | calls = 0; | 105 | calls = 0; |
105 | dma_wait=0; | 106 | dma_wait = 0; |
106 | #endif | 107 | #endif |
107 | doing_pdma = 0; | 108 | doing_pdma = 0; |
108 | floppy_interrupt(irq, dev_id); | 109 | floppy_interrupt(irq, dev_id); |
109 | return IRQ_HANDLED; | 110 | return IRQ_HANDLED; |
110 | } | 111 | } |
111 | #ifdef TRACE_FLPY_INT | 112 | #ifdef TRACE_FLPY_INT |
112 | if(!virtual_dma_count) | 113 | if (!virtual_dma_count) |
113 | dma_wait++; | 114 | dma_wait++; |
114 | #endif | 115 | #endif |
115 | return IRQ_HANDLED; | 116 | return IRQ_HANDLED; |
@@ -117,14 +118,14 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) | |||
117 | 118 | ||
118 | static void fd_disable_dma(void) | 119 | static void fd_disable_dma(void) |
119 | { | 120 | { |
120 | if(! (can_use_virtual_dma & 1)) | 121 | if (!(can_use_virtual_dma & 1)) |
121 | disable_dma(FLOPPY_DMA); | 122 | disable_dma(FLOPPY_DMA); |
122 | doing_pdma = 0; | 123 | doing_pdma = 0; |
123 | virtual_dma_residue += virtual_dma_count; | 124 | virtual_dma_residue += virtual_dma_count; |
124 | virtual_dma_count=0; | 125 | virtual_dma_count = 0; |
125 | } | 126 | } |
126 | 127 | ||
127 | static int vdma_request_dma(unsigned int dmanr, const char * device_id) | 128 | static int vdma_request_dma(unsigned int dmanr, const char *device_id) |
128 | { | 129 | { |
129 | return 0; | 130 | return 0; |
130 | } | 131 | } |
@@ -142,7 +143,7 @@ static int vdma_get_dma_residue(unsigned int dummy) | |||
142 | 143 | ||
143 | static int fd_request_irq(void) | 144 | static int fd_request_irq(void) |
144 | { | 145 | { |
145 | if(can_use_virtual_dma) | 146 | if (can_use_virtual_dma) |
146 | return request_irq(FLOPPY_IRQ, floppy_hardint, | 147 | return request_irq(FLOPPY_IRQ, floppy_hardint, |
147 | IRQF_DISABLED, "floppy", NULL); | 148 | IRQF_DISABLED, "floppy", NULL); |
148 | else | 149 | else |
@@ -152,13 +153,13 @@ static int fd_request_irq(void) | |||
152 | 153 | ||
153 | static unsigned long dma_mem_alloc(unsigned long size) | 154 | static unsigned long dma_mem_alloc(unsigned long size) |
154 | { | 155 | { |
155 | return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size)); | 156 | return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size)); |
156 | } | 157 | } |
157 | 158 | ||
158 | 159 | ||
159 | static unsigned long vdma_mem_alloc(unsigned long size) | 160 | static unsigned long vdma_mem_alloc(unsigned long size) |
160 | { | 161 | { |
161 | return (unsigned long) vmalloc(size); | 162 | return (unsigned long)vmalloc(size); |
162 | 163 | ||
163 | } | 164 | } |
164 | 165 | ||
@@ -166,7 +167,7 @@ static unsigned long vdma_mem_alloc(unsigned long size) | |||
166 | 167 | ||
167 | static void _fd_dma_mem_free(unsigned long addr, unsigned long size) | 168 | static void _fd_dma_mem_free(unsigned long addr, unsigned long size) |
168 | { | 169 | { |
169 | if((unsigned long) addr >= (unsigned long) high_memory) | 170 | if ((unsigned long)addr >= (unsigned long)high_memory) |
170 | vfree((void *)addr); | 171 | vfree((void *)addr); |
171 | else | 172 | else |
172 | free_pages(addr, get_order(size)); | 173 | free_pages(addr, get_order(size)); |
@@ -176,10 +177,10 @@ static void _fd_dma_mem_free(unsigned long addr, unsigned long size) | |||
176 | 177 | ||
177 | static void _fd_chose_dma_mode(char *addr, unsigned long size) | 178 | static void _fd_chose_dma_mode(char *addr, unsigned long size) |
178 | { | 179 | { |
179 | if(can_use_virtual_dma == 2) { | 180 | if (can_use_virtual_dma == 2) { |
180 | if((unsigned long) addr >= (unsigned long) high_memory || | 181 | if ((unsigned long)addr >= (unsigned long)high_memory || |
181 | isa_virt_to_bus(addr) >= 0x1000000 || | 182 | isa_virt_to_bus(addr) >= 0x1000000 || |
182 | _CROSS_64KB(addr, size, 0)) | 183 | _CROSS_64KB(addr, size, 0)) |
183 | use_virtual_dma = 1; | 184 | use_virtual_dma = 1; |
184 | else | 185 | else |
185 | use_virtual_dma = 0; | 186 | use_virtual_dma = 0; |
@@ -195,7 +196,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) | |||
195 | { | 196 | { |
196 | doing_pdma = 1; | 197 | doing_pdma = 1; |
197 | virtual_dma_port = io; | 198 | virtual_dma_port = io; |
198 | virtual_dma_mode = (mode == DMA_MODE_WRITE); | 199 | virtual_dma_mode = (mode == DMA_MODE_WRITE); |
199 | virtual_dma_addr = addr; | 200 | virtual_dma_addr = addr; |
200 | virtual_dma_count = size; | 201 | virtual_dma_count = size; |
201 | virtual_dma_residue = 0; | 202 | virtual_dma_residue = 0; |
@@ -213,18 +214,18 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) | |||
213 | /* actual, physical DMA */ | 214 | /* actual, physical DMA */ |
214 | doing_pdma = 0; | 215 | doing_pdma = 0; |
215 | clear_dma_ff(FLOPPY_DMA); | 216 | clear_dma_ff(FLOPPY_DMA); |
216 | set_dma_mode(FLOPPY_DMA,mode); | 217 | set_dma_mode(FLOPPY_DMA, mode); |
217 | set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); | 218 | set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr)); |
218 | set_dma_count(FLOPPY_DMA,size); | 219 | set_dma_count(FLOPPY_DMA, size); |
219 | enable_dma(FLOPPY_DMA); | 220 | enable_dma(FLOPPY_DMA); |
220 | return 0; | 221 | return 0; |
221 | } | 222 | } |
222 | 223 | ||
223 | static struct fd_routine_l { | 224 | static struct fd_routine_l { |
224 | int (*_request_dma)(unsigned int dmanr, const char * device_id); | 225 | int (*_request_dma)(unsigned int dmanr, const char *device_id); |
225 | void (*_free_dma)(unsigned int dmanr); | 226 | void (*_free_dma)(unsigned int dmanr); |
226 | int (*_get_dma_residue)(unsigned int dummy); | 227 | int (*_get_dma_residue)(unsigned int dummy); |
227 | unsigned long (*_dma_mem_alloc) (unsigned long size); | 228 | unsigned long (*_dma_mem_alloc)(unsigned long size); |
228 | int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); | 229 | int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); |
229 | } fd_routine[] = { | 230 | } fd_routine[] = { |
230 | { | 231 | { |
@@ -252,7 +253,8 @@ static int FDC2 = -1; | |||
252 | * is needed to prevent corrupted CMOS RAM in case "insmod floppy" | 253 | * is needed to prevent corrupted CMOS RAM in case "insmod floppy" |
253 | * coincides with another rtc CMOS user. Paul G. | 254 | * coincides with another rtc CMOS user. Paul G. |
254 | */ | 255 | */ |
255 | #define FLOPPY0_TYPE ({ \ | 256 | #define FLOPPY0_TYPE \ |
257 | ({ \ | ||
256 | unsigned long flags; \ | 258 | unsigned long flags; \ |
257 | unsigned char val; \ | 259 | unsigned char val; \ |
258 | spin_lock_irqsave(&rtc_lock, flags); \ | 260 | spin_lock_irqsave(&rtc_lock, flags); \ |
@@ -261,7 +263,8 @@ static int FDC2 = -1; | |||
261 | val; \ | 263 | val; \ |
262 | }) | 264 | }) |
263 | 265 | ||
264 | #define FLOPPY1_TYPE ({ \ | 266 | #define FLOPPY1_TYPE \ |
267 | ({ \ | ||
265 | unsigned long flags; \ | 268 | unsigned long flags; \ |
266 | unsigned char val; \ | 269 | unsigned char val; \ |
267 | spin_lock_irqsave(&rtc_lock, flags); \ | 270 | spin_lock_irqsave(&rtc_lock, flags); \ |
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index c9952ea9f698..ac0fbf24d722 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h | |||
@@ -12,35 +12,32 @@ | |||
12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
13 | 13 | ||
14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | 14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ |
15 | __asm__ __volatile( \ | 15 | asm volatile("1:\t" insn "\n" \ |
16 | "1: " insn "\n" \ | 16 | "2:\t.section .fixup,\"ax\"\n" \ |
17 | "2: .section .fixup,\"ax\"\n \ | 17 | "3:\tmov\t%3, %1\n" \ |
18 | 3: mov %3, %1\n \ | 18 | "\tjmp\t2b\n" \ |
19 | jmp 2b\n \ | 19 | "\t.previous\n" \ |
20 | .previous\n" \ | 20 | _ASM_EXTABLE(1b, 3b) \ |
21 | _ASM_EXTABLE(1b,3b) \ | 21 | : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ |
22 | : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ | 22 | : "i" (-EFAULT), "0" (oparg), "1" (0)) |
23 | : "i" (-EFAULT), "0" (oparg), "1" (0)) | ||
24 | 23 | ||
25 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | 24 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ |
26 | __asm__ __volatile( \ | 25 | asm volatile("1:\tmovl %2, %0\n" \ |
27 | "1: movl %2, %0\n \ | 26 | "\tmovl\t%0, %3\n" \ |
28 | movl %0, %3\n" \ | 27 | "\t" insn "\n" \ |
29 | insn "\n" \ | 28 | "2:\tlock; cmpxchgl %3, %2\n" \ |
30 | "2: lock; cmpxchgl %3, %2\n \ | 29 | "\tjnz\t1b\n" \ |
31 | jnz 1b\n \ | 30 | "3:\t.section .fixup,\"ax\"\n" \ |
32 | 3: .section .fixup,\"ax\"\n \ | 31 | "4:\tmov\t%5, %1\n" \ |
33 | 4: mov %5, %1\n \ | 32 | "\tjmp\t3b\n" \ |
34 | jmp 3b\n \ | 33 | "\t.previous\n" \ |
35 | .previous\n" \ | 34 | _ASM_EXTABLE(1b, 4b) \ |
36 | _ASM_EXTABLE(1b,4b) \ | 35 | _ASM_EXTABLE(2b, 4b) \ |
37 | _ASM_EXTABLE(2b,4b) \ | 36 | : "=&a" (oldval), "=&r" (ret), \ |
38 | : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ | 37 | "+m" (*uaddr), "=&r" (tem) \ |
39 | "=&r" (tem) \ | 38 | : "r" (oparg), "i" (-EFAULT), "1" (0)) |
40 | : "r" (oparg), "i" (-EFAULT), "1" (0)) | 39 | |
41 | 40 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
42 | static inline int | ||
43 | futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | ||
44 | { | 41 | { |
45 | int op = (encoded_op >> 28) & 7; | 42 | int op = (encoded_op >> 28) & 7; |
46 | int cmp = (encoded_op >> 24) & 15; | 43 | int cmp = (encoded_op >> 24) & 15; |
@@ -87,20 +84,33 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
87 | 84 | ||
88 | if (!ret) { | 85 | if (!ret) { |
89 | switch (cmp) { | 86 | switch (cmp) { |
90 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | 87 | case FUTEX_OP_CMP_EQ: |
91 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | 88 | ret = (oldval == cmparg); |
92 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | 89 | break; |
93 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | 90 | case FUTEX_OP_CMP_NE: |
94 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | 91 | ret = (oldval != cmparg); |
95 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | 92 | break; |
96 | default: ret = -ENOSYS; | 93 | case FUTEX_OP_CMP_LT: |
94 | ret = (oldval < cmparg); | ||
95 | break; | ||
96 | case FUTEX_OP_CMP_GE: | ||
97 | ret = (oldval >= cmparg); | ||
98 | break; | ||
99 | case FUTEX_OP_CMP_LE: | ||
100 | ret = (oldval <= cmparg); | ||
101 | break; | ||
102 | case FUTEX_OP_CMP_GT: | ||
103 | ret = (oldval > cmparg); | ||
104 | break; | ||
105 | default: | ||
106 | ret = -ENOSYS; | ||
97 | } | 107 | } |
98 | } | 108 | } |
99 | return ret; | 109 | return ret; |
100 | } | 110 | } |
101 | 111 | ||
102 | static inline int | 112 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, |
103 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 113 | int newval) |
104 | { | 114 | { |
105 | 115 | ||
106 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) | 116 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) |
@@ -112,16 +122,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
112 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) |
113 | return -EFAULT; | 123 | return -EFAULT; |
114 | 124 | ||
115 | __asm__ __volatile__( | 125 | asm volatile("1:\tlock; cmpxchgl %3, %1\n" |
116 | "1: lock; cmpxchgl %3, %1 \n" | 126 | "2:\t.section .fixup, \"ax\"\n" |
117 | "2: .section .fixup, \"ax\" \n" | 127 | "3:\tmov %2, %0\n" |
118 | "3: mov %2, %0 \n" | 128 | "\tjmp 2b\n" |
119 | " jmp 2b \n" | 129 | "\t.previous\n" |
120 | " .previous \n" | 130 | _ASM_EXTABLE(1b, 3b) |
121 | _ASM_EXTABLE(1b,3b) | 131 | : "=a" (oldval), "+m" (*uaddr) |
122 | : "=a" (oldval), "+m" (*uaddr) | 132 | : "i" (-EFAULT), "r" (newval), "0" (oldval) |
123 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | 133 | : "memory" |
124 | : "memory" | ||
125 | ); | 134 | ); |
126 | 135 | ||
127 | return oldval; | 136 | return oldval; |
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 33e3ffe1766c..f1b96932746b 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h | |||
@@ -14,23 +14,22 @@ | |||
14 | * Copyright 2003 Andi Kleen, SuSE Labs. | 14 | * Copyright 2003 Andi Kleen, SuSE Labs. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | struct mpc_config_translation; | ||
18 | struct mpc_config_bus; | 17 | struct mpc_config_bus; |
19 | struct mp_config_table; | 18 | struct mp_config_table; |
20 | struct mpc_config_processor; | 19 | struct mpc_config_processor; |
21 | 20 | ||
22 | struct genapic { | 21 | struct genapic { |
23 | char *name; | 22 | char *name; |
24 | int (*probe)(void); | 23 | int (*probe)(void); |
25 | 24 | ||
26 | int (*apic_id_registered)(void); | 25 | int (*apic_id_registered)(void); |
27 | cpumask_t (*target_cpus)(void); | 26 | cpumask_t (*target_cpus)(void); |
28 | int int_delivery_mode; | 27 | int int_delivery_mode; |
29 | int int_dest_mode; | 28 | int int_dest_mode; |
30 | int ESR_DISABLE; | 29 | int ESR_DISABLE; |
31 | int apic_destination_logical; | 30 | int apic_destination_logical; |
32 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); | 31 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); |
33 | unsigned long (*check_apicid_present)(int apicid); | 32 | unsigned long (*check_apicid_present)(int apicid); |
34 | int no_balance_irq; | 33 | int no_balance_irq; |
35 | int no_ioapic_check; | 34 | int no_ioapic_check; |
36 | void (*init_apic_ldr)(void); | 35 | void (*init_apic_ldr)(void); |
@@ -38,28 +37,21 @@ struct genapic { | |||
38 | 37 | ||
39 | void (*setup_apic_routing)(void); | 38 | void (*setup_apic_routing)(void); |
40 | int (*multi_timer_check)(int apic, int irq); | 39 | int (*multi_timer_check)(int apic, int irq); |
41 | int (*apicid_to_node)(int logical_apicid); | 40 | int (*apicid_to_node)(int logical_apicid); |
42 | int (*cpu_to_logical_apicid)(int cpu); | 41 | int (*cpu_to_logical_apicid)(int cpu); |
43 | int (*cpu_present_to_apicid)(int mps_cpu); | 42 | int (*cpu_present_to_apicid)(int mps_cpu); |
44 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); | 43 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); |
45 | int (*mpc_apic_id)(struct mpc_config_processor *m, | 44 | void (*setup_portio_remap)(void); |
46 | struct mpc_config_translation *t); | ||
47 | void (*setup_portio_remap)(void); | ||
48 | int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); | 45 | int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); |
49 | void (*enable_apic_mode)(void); | 46 | void (*enable_apic_mode)(void); |
50 | u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); | 47 | u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); |
51 | 48 | ||
52 | /* mpparse */ | 49 | /* mpparse */ |
53 | void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, | ||
54 | struct mpc_config_translation *); | ||
55 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *, | ||
56 | struct mpc_config_translation *); | ||
57 | |||
58 | /* When one of the next two hooks returns 1 the genapic | 50 | /* When one of the next two hooks returns 1 the genapic |
59 | is switched to this. Essentially they are additional probe | 51 | is switched to this. Essentially they are additional probe |
60 | functions. */ | 52 | functions. */ |
61 | int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, | 53 | int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, |
62 | char *productid); | 54 | char *productid); |
63 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); | 55 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); |
64 | 56 | ||
65 | unsigned (*get_apic_id)(unsigned long x); | 57 | unsigned (*get_apic_id)(unsigned long x); |
@@ -72,7 +64,7 @@ struct genapic { | |||
72 | void (*send_IPI_allbutself)(int vector); | 64 | void (*send_IPI_allbutself)(int vector); |
73 | void (*send_IPI_all)(int vector); | 65 | void (*send_IPI_all)(int vector); |
74 | #endif | 66 | #endif |
75 | }; | 67 | }; |
76 | 68 | ||
77 | #define APICFUNC(x) .x = x, | 69 | #define APICFUNC(x) .x = x, |
78 | 70 | ||
@@ -85,43 +77,46 @@ struct genapic { | |||
85 | #define IPIFUNC(x) | 77 | #define IPIFUNC(x) |
86 | #endif | 78 | #endif |
87 | 79 | ||
88 | #define APIC_INIT(aname, aprobe) { \ | 80 | #define APIC_INIT(aname, aprobe) \ |
89 | .name = aname, \ | 81 | { \ |
90 | .probe = aprobe, \ | 82 | .name = aname, \ |
91 | .int_delivery_mode = INT_DELIVERY_MODE, \ | 83 | .probe = aprobe, \ |
92 | .int_dest_mode = INT_DEST_MODE, \ | 84 | .int_delivery_mode = INT_DELIVERY_MODE, \ |
93 | .no_balance_irq = NO_BALANCE_IRQ, \ | 85 | .int_dest_mode = INT_DEST_MODE, \ |
94 | .ESR_DISABLE = esr_disable, \ | 86 | .no_balance_irq = NO_BALANCE_IRQ, \ |
95 | .apic_destination_logical = APIC_DEST_LOGICAL, \ | 87 | .ESR_DISABLE = esr_disable, \ |
96 | APICFUNC(apic_id_registered) \ | 88 | .apic_destination_logical = APIC_DEST_LOGICAL, \ |
97 | APICFUNC(target_cpus) \ | 89 | APICFUNC(apic_id_registered) \ |
98 | APICFUNC(check_apicid_used) \ | 90 | APICFUNC(target_cpus) \ |
99 | APICFUNC(check_apicid_present) \ | 91 | APICFUNC(check_apicid_used) \ |
100 | APICFUNC(init_apic_ldr) \ | 92 | APICFUNC(check_apicid_present) \ |
101 | APICFUNC(ioapic_phys_id_map) \ | 93 | APICFUNC(init_apic_ldr) \ |
102 | APICFUNC(setup_apic_routing) \ | 94 | APICFUNC(ioapic_phys_id_map) \ |
103 | APICFUNC(multi_timer_check) \ | 95 | APICFUNC(setup_apic_routing) \ |
104 | APICFUNC(apicid_to_node) \ | 96 | APICFUNC(multi_timer_check) \ |
105 | APICFUNC(cpu_to_logical_apicid) \ | 97 | APICFUNC(apicid_to_node) \ |
106 | APICFUNC(cpu_present_to_apicid) \ | 98 | APICFUNC(cpu_to_logical_apicid) \ |
107 | APICFUNC(apicid_to_cpu_present) \ | 99 | APICFUNC(cpu_present_to_apicid) \ |
108 | APICFUNC(mpc_apic_id) \ | 100 | APICFUNC(apicid_to_cpu_present) \ |
109 | APICFUNC(setup_portio_remap) \ | 101 | APICFUNC(setup_portio_remap) \ |
110 | APICFUNC(check_phys_apicid_present) \ | 102 | APICFUNC(check_phys_apicid_present) \ |
111 | APICFUNC(mpc_oem_bus_info) \ | 103 | APICFUNC(mps_oem_check) \ |
112 | APICFUNC(mpc_oem_pci_bus) \ | 104 | APICFUNC(get_apic_id) \ |
113 | APICFUNC(mps_oem_check) \ | 105 | .apic_id_mask = APIC_ID_MASK, \ |
114 | APICFUNC(get_apic_id) \ | 106 | APICFUNC(cpu_mask_to_apicid) \ |
115 | .apic_id_mask = APIC_ID_MASK, \ | 107 | APICFUNC(acpi_madt_oem_check) \ |
116 | APICFUNC(cpu_mask_to_apicid) \ | 108 | IPIFUNC(send_IPI_mask) \ |
117 | APICFUNC(acpi_madt_oem_check) \ | 109 | IPIFUNC(send_IPI_allbutself) \ |
118 | IPIFUNC(send_IPI_mask) \ | 110 | IPIFUNC(send_IPI_all) \ |
119 | IPIFUNC(send_IPI_allbutself) \ | 111 | APICFUNC(enable_apic_mode) \ |
120 | IPIFUNC(send_IPI_all) \ | 112 | APICFUNC(phys_pkg_id) \ |
121 | APICFUNC(enable_apic_mode) \ | 113 | } |
122 | APICFUNC(phys_pkg_id) \ | ||
123 | } | ||
124 | 114 | ||
125 | extern struct genapic *genapic; | 115 | extern struct genapic *genapic; |
126 | 116 | ||
117 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | ||
118 | #define get_uv_system_type() UV_NONE | ||
119 | #define is_uv_system() 0 | ||
120 | |||
121 | |||
127 | #endif | 122 | #endif |
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index d7e516ccbaa4..1de931b263ce 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h | |||
@@ -33,5 +33,15 @@ extern struct genapic *genapic; | |||
33 | 33 | ||
34 | extern struct genapic apic_flat; | 34 | extern struct genapic apic_flat; |
35 | extern struct genapic apic_physflat; | 35 | extern struct genapic apic_physflat; |
36 | extern int acpi_madt_oem_check(char *, char *); | ||
37 | |||
38 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | ||
39 | extern enum uv_system_type get_uv_system_type(void); | ||
40 | extern int is_uv_system(void); | ||
41 | |||
42 | extern struct genapic apic_x2apic_uv_x; | ||
43 | DECLARE_PER_CPU(int, x2apic_extra_bits); | ||
44 | extern void uv_cpu_init(void); | ||
45 | extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); | ||
36 | 46 | ||
37 | #endif | 47 | #endif |
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 9e7280092a48..9870cc1f2f8f 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h | |||
@@ -167,7 +167,7 @@ static inline int is_geode(void) | |||
167 | /* MFGPTs */ | 167 | /* MFGPTs */ |
168 | 168 | ||
169 | #define MFGPT_MAX_TIMERS 8 | 169 | #define MFGPT_MAX_TIMERS 8 |
170 | #define MFGPT_TIMER_ANY -1 | 170 | #define MFGPT_TIMER_ANY (-1) |
171 | 171 | ||
172 | #define MFGPT_DOMAIN_WORKING 1 | 172 | #define MFGPT_DOMAIN_WORKING 1 |
173 | #define MFGPT_DOMAIN_STANDBY 2 | 173 | #define MFGPT_DOMAIN_STANDBY 2 |
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h index 479767c9195f..e153f3b44774 100644 --- a/include/asm-x86/highmem.h +++ b/include/asm-x86/highmem.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * Gerhard.Wichert@pdb.siemens.de | 8 | * Gerhard.Wichert@pdb.siemens.de |
9 | * | 9 | * |
10 | * | 10 | * |
11 | * Redesigned the x86 32-bit VM architecture to deal with | 11 | * Redesigned the x86 32-bit VM architecture to deal with |
12 | * up to 16 Terabyte physical memory. With current x86 CPUs | 12 | * up to 16 Terabyte physical memory. With current x86 CPUs |
13 | * we now support up to 64 Gigabytes physical RAM. | 13 | * we now support up to 64 Gigabytes physical RAM. |
14 | * | 14 | * |
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h index 312a58d6dac6..0062ef390f67 100644 --- a/include/asm-x86/hw_irq_64.h +++ b/include/asm-x86/hw_irq_64.h | |||
@@ -36,7 +36,7 @@ | |||
36 | * cleanup after irq migration. | 36 | * cleanup after irq migration. |
37 | */ | 37 | */ |
38 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR | 38 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Vectors 0x30-0x3f are used for ISA interrupts. | 41 | * Vectors 0x30-0x3f are used for ISA interrupts. |
42 | */ | 42 | */ |
@@ -159,13 +159,12 @@ extern atomic_t irq_mis_count; | |||
159 | * SMP has a few special interrupts for IPI messages | 159 | * SMP has a few special interrupts for IPI messages |
160 | */ | 160 | */ |
161 | 161 | ||
162 | #define BUILD_IRQ(nr) \ | 162 | #define BUILD_IRQ(nr) \ |
163 | asmlinkage void IRQ_NAME(nr); \ | 163 | asmlinkage void IRQ_NAME(nr); \ |
164 | __asm__( \ | 164 | asm("\n.p2align\n" \ |
165 | "\n.p2align\n" \ | 165 | "IRQ" #nr "_interrupt:\n\t" \ |
166 | "IRQ" #nr "_interrupt:\n\t" \ | 166 | "push $~(" #nr ") ; " \ |
167 | "push $~(" #nr ") ; " \ | 167 | "jmp common_interrupt"); |
168 | "jmp common_interrupt"); | ||
169 | 168 | ||
170 | #define platform_legacy_irq(irq) ((irq) < 16) | 169 | #define platform_legacy_irq(irq) ((irq) < 16) |
171 | 170 | ||
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h index c16c6ff4bdd7..d2bbd238b3e1 100644 --- a/include/asm-x86/hypertransport.h +++ b/include/asm-x86/hypertransport.h | |||
@@ -8,12 +8,14 @@ | |||
8 | #define HT_IRQ_LOW_BASE 0xf8000000 | 8 | #define HT_IRQ_LOW_BASE 0xf8000000 |
9 | 9 | ||
10 | #define HT_IRQ_LOW_VECTOR_SHIFT 16 | 10 | #define HT_IRQ_LOW_VECTOR_SHIFT 16 |
11 | #define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 | 11 | #define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 |
12 | #define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) | 12 | #define HT_IRQ_LOW_VECTOR(v) \ |
13 | (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) | ||
13 | 14 | ||
14 | #define HT_IRQ_LOW_DEST_ID_SHIFT 8 | 15 | #define HT_IRQ_LOW_DEST_ID_SHIFT 8 |
15 | #define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 | 16 | #define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 |
16 | #define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) | 17 | #define HT_IRQ_LOW_DEST_ID(v) \ |
18 | (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) | ||
17 | 19 | ||
18 | #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 | 20 | #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 |
19 | #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 | 21 | #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 |
@@ -36,7 +38,8 @@ | |||
36 | 38 | ||
37 | 39 | ||
38 | #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 | 40 | #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 |
39 | #define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff | 41 | #define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff |
40 | #define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) | 42 | #define HT_IRQ_HIGH_DEST_ID(v) \ |
43 | ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) | ||
41 | 44 | ||
42 | #endif /* ASM_HYPERTRANSPORT_H */ | 45 | #endif /* ASM_HYPERTRANSPORT_H */ |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index f377b76b2f34..54522b814f1c 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -41,7 +41,7 @@ static inline void tolerant_fwait(void) | |||
41 | { | 41 | { |
42 | asm volatile("1: fwait\n" | 42 | asm volatile("1: fwait\n" |
43 | "2:\n" | 43 | "2:\n" |
44 | _ASM_EXTABLE(1b,2b)); | 44 | _ASM_EXTABLE(1b, 2b)); |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | 47 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) |
@@ -54,7 +54,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
54 | "3: movl $-1,%[err]\n" | 54 | "3: movl $-1,%[err]\n" |
55 | " jmp 2b\n" | 55 | " jmp 2b\n" |
56 | ".previous\n" | 56 | ".previous\n" |
57 | _ASM_EXTABLE(1b,3b) | 57 | _ASM_EXTABLE(1b, 3b) |
58 | : [err] "=r" (err) | 58 | : [err] "=r" (err) |
59 | #if 0 /* See comment in __save_init_fpu() below. */ | 59 | #if 0 /* See comment in __save_init_fpu() below. */ |
60 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | 60 | : [fx] "r" (fx), "m" (*fx), "0" (0)); |
@@ -76,11 +76,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
76 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | 76 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) |
77 | { | 77 | { |
78 | if (unlikely(fx->swd & X87_FSW_ES)) | 78 | if (unlikely(fx->swd & X87_FSW_ES)) |
79 | asm volatile("fnclex"); | 79 | asm volatile("fnclex"); |
80 | alternative_input(ASM_NOP8 ASM_NOP2, | 80 | alternative_input(ASM_NOP8 ASM_NOP2, |
81 | " emms\n" /* clear stack tags */ | 81 | " emms\n" /* clear stack tags */ |
82 | " fildl %%gs:0", /* load to clear state */ | 82 | " fildl %%gs:0", /* load to clear state */ |
83 | X86_FEATURE_FXSAVE_LEAK); | 83 | X86_FEATURE_FXSAVE_LEAK); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | 86 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) |
@@ -93,14 +93,15 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | |||
93 | "3: movl $-1,%[err]\n" | 93 | "3: movl $-1,%[err]\n" |
94 | " jmp 2b\n" | 94 | " jmp 2b\n" |
95 | ".previous\n" | 95 | ".previous\n" |
96 | _ASM_EXTABLE(1b,3b) | 96 | _ASM_EXTABLE(1b, 3b) |
97 | : [err] "=r" (err), "=m" (*fx) | 97 | : [err] "=r" (err), "=m" (*fx) |
98 | #if 0 /* See comment in __fxsave_clear() below. */ | 98 | #if 0 /* See comment in __fxsave_clear() below. */ |
99 | : [fx] "r" (fx), "0" (0)); | 99 | : [fx] "r" (fx), "0" (0)); |
100 | #else | 100 | #else |
101 | : [fx] "cdaSDb" (fx), "0" (0)); | 101 | : [fx] "cdaSDb" (fx), "0" (0)); |
102 | #endif | 102 | #endif |
103 | if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) | 103 | if (unlikely(err) && |
104 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | ||
104 | err = -EFAULT; | 105 | err = -EFAULT; |
105 | /* No need to clear here because the caller clears USED_MATH */ | 106 | /* No need to clear here because the caller clears USED_MATH */ |
106 | return err; | 107 | return err; |
@@ -156,8 +157,10 @@ static inline int save_i387(struct _fpstate __user *buf) | |||
156 | return 0; | 157 | return 0; |
157 | clear_used_math(); /* trigger finit */ | 158 | clear_used_math(); /* trigger finit */ |
158 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 159 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
159 | err = save_i387_checking((struct i387_fxsave_struct __user *)buf); | 160 | err = save_i387_checking((struct i387_fxsave_struct __user *) |
160 | if (err) return err; | 161 | buf); |
162 | if (err) | ||
163 | return err; | ||
161 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 164 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
162 | stts(); | 165 | stts(); |
163 | } else { | 166 | } else { |
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 67c319e0efc7..45d4df3e51e6 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h | |||
@@ -1,9 +1,11 @@ | |||
1 | #ifndef __ASM_I8259_H__ | 1 | #ifndef __ASM_I8259_H__ |
2 | #define __ASM_I8259_H__ | 2 | #define __ASM_I8259_H__ |
3 | 3 | ||
4 | #include <linux/delay.h> | ||
5 | |||
4 | extern unsigned int cached_irq_mask; | 6 | extern unsigned int cached_irq_mask; |
5 | 7 | ||
6 | #define __byte(x,y) (((unsigned char *) &(y))[x]) | 8 | #define __byte(x, y) (((unsigned char *)&(y))[x]) |
7 | #define cached_master_mask (__byte(0, cached_irq_mask)) | 9 | #define cached_master_mask (__byte(0, cached_irq_mask)) |
8 | #define cached_slave_mask (__byte(1, cached_irq_mask)) | 10 | #define cached_slave_mask (__byte(1, cached_irq_mask)) |
9 | 11 | ||
@@ -29,7 +31,28 @@ extern void enable_8259A_irq(unsigned int irq); | |||
29 | extern void disable_8259A_irq(unsigned int irq); | 31 | extern void disable_8259A_irq(unsigned int irq); |
30 | extern unsigned int startup_8259A_irq(unsigned int irq); | 32 | extern unsigned int startup_8259A_irq(unsigned int irq); |
31 | 33 | ||
32 | #define inb_pic inb_p | 34 | /* the PIC may need a careful delay on some platforms, hence specific calls */ |
33 | #define outb_pic outb_p | 35 | static inline unsigned char inb_pic(unsigned int port) |
36 | { | ||
37 | unsigned char value = inb(port); | ||
38 | |||
39 | /* | ||
40 | * delay for some accesses to PIC on motherboard or in chipset | ||
41 | * must be at least one microsecond, so be safe here: | ||
42 | */ | ||
43 | udelay(2); | ||
44 | |||
45 | return value; | ||
46 | } | ||
47 | |||
48 | static inline void outb_pic(unsigned char value, unsigned int port) | ||
49 | { | ||
50 | outb(value, port); | ||
51 | /* | ||
52 | * delay for some accesses to PIC on motherboard or in chipset | ||
53 | * must be at least one microsecond, so be safe here: | ||
54 | */ | ||
55 | udelay(2); | ||
56 | } | ||
34 | 57 | ||
35 | #endif /* __ASM_I8259_H__ */ | 58 | #endif /* __ASM_I8259_H__ */ |
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h index aa9733206e29..55d3abe5276f 100644 --- a/include/asm-x86/ia32.h +++ b/include/asm-x86/ia32.h | |||
@@ -14,19 +14,19 @@ | |||
14 | 14 | ||
15 | /* signal.h */ | 15 | /* signal.h */ |
16 | struct sigaction32 { | 16 | struct sigaction32 { |
17 | unsigned int sa_handler; /* Really a pointer, but need to deal | 17 | unsigned int sa_handler; /* Really a pointer, but need to deal |
18 | with 32 bits */ | 18 | with 32 bits */ |
19 | unsigned int sa_flags; | 19 | unsigned int sa_flags; |
20 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 20 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
21 | compat_sigset_t sa_mask; /* A 32 bit mask */ | 21 | compat_sigset_t sa_mask; /* A 32 bit mask */ |
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct old_sigaction32 { | 24 | struct old_sigaction32 { |
25 | unsigned int sa_handler; /* Really a pointer, but need to deal | 25 | unsigned int sa_handler; /* Really a pointer, but need to deal |
26 | with 32 bits */ | 26 | with 32 bits */ |
27 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ | 27 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ |
28 | unsigned int sa_flags; | 28 | unsigned int sa_flags; |
29 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 29 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
30 | }; | 30 | }; |
31 | 31 | ||
32 | typedef struct sigaltstack_ia32 { | 32 | typedef struct sigaltstack_ia32 { |
@@ -65,7 +65,7 @@ struct stat64 { | |||
65 | long long st_size; | 65 | long long st_size; |
66 | unsigned int st_blksize; | 66 | unsigned int st_blksize; |
67 | 67 | ||
68 | long long st_blocks;/* Number 512-byte blocks allocated. */ | 68 | long long st_blocks;/* Number 512-byte blocks allocated */ |
69 | 69 | ||
70 | unsigned st_atime; | 70 | unsigned st_atime; |
71 | unsigned st_atime_nsec; | 71 | unsigned st_atime_nsec; |
@@ -77,13 +77,13 @@ struct stat64 { | |||
77 | unsigned long long st_ino; | 77 | unsigned long long st_ino; |
78 | } __attribute__((packed)); | 78 | } __attribute__((packed)); |
79 | 79 | ||
80 | typedef struct compat_siginfo{ | 80 | typedef struct compat_siginfo { |
81 | int si_signo; | 81 | int si_signo; |
82 | int si_errno; | 82 | int si_errno; |
83 | int si_code; | 83 | int si_code; |
84 | 84 | ||
85 | union { | 85 | union { |
86 | int _pad[((128/sizeof(int)) - 3)]; | 86 | int _pad[((128 / sizeof(int)) - 3)]; |
87 | 87 | ||
88 | /* kill() */ | 88 | /* kill() */ |
89 | struct { | 89 | struct { |
@@ -129,28 +129,26 @@ typedef struct compat_siginfo{ | |||
129 | } _sifields; | 129 | } _sifields; |
130 | } compat_siginfo_t; | 130 | } compat_siginfo_t; |
131 | 131 | ||
132 | struct sigframe32 | 132 | struct sigframe32 { |
133 | { | 133 | u32 pretcode; |
134 | u32 pretcode; | 134 | int sig; |
135 | int sig; | 135 | struct sigcontext_ia32 sc; |
136 | struct sigcontext_ia32 sc; | 136 | struct _fpstate_ia32 fpstate; |
137 | struct _fpstate_ia32 fpstate; | 137 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; |
138 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; | ||
139 | }; | 138 | }; |
140 | 139 | ||
141 | struct rt_sigframe32 | 140 | struct rt_sigframe32 { |
142 | { | 141 | u32 pretcode; |
143 | u32 pretcode; | 142 | int sig; |
144 | int sig; | 143 | u32 pinfo; |
145 | u32 pinfo; | 144 | u32 puc; |
146 | u32 puc; | 145 | compat_siginfo_t info; |
147 | compat_siginfo_t info; | 146 | struct ucontext_ia32 uc; |
148 | struct ucontext_ia32 uc; | 147 | struct _fpstate_ia32 fpstate; |
149 | struct _fpstate_ia32 fpstate; | ||
150 | }; | 148 | }; |
151 | 149 | ||
152 | struct ustat32 { | 150 | struct ustat32 { |
153 | __u32 f_tfree; | 151 | __u32 f_tfree; |
154 | compat_ino_t f_tinode; | 152 | compat_ino_t f_tinode; |
155 | char f_fname[6]; | 153 | char f_fname[6]; |
156 | char f_fpack[6]; | 154 | char f_fpack[6]; |
@@ -168,5 +166,5 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm); | |||
168 | #endif | 166 | #endif |
169 | 167 | ||
170 | #endif /* !CONFIG_IA32_SUPPORT */ | 168 | #endif /* !CONFIG_IA32_SUPPORT */ |
171 | 169 | ||
172 | #endif | 170 | #endif |
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 5a58b176dd61..7b292d386713 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h | |||
@@ -1,5 +1,11 @@ | |||
1 | #define ARCH_HAS_IOREMAP_WC | ||
2 | |||
1 | #ifdef CONFIG_X86_32 | 3 | #ifdef CONFIG_X86_32 |
2 | # include "io_32.h" | 4 | # include "io_32.h" |
3 | #else | 5 | #else |
4 | # include "io_64.h" | 6 | # include "io_64.h" |
5 | #endif | 7 | #endif |
8 | extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, | ||
9 | unsigned long prot_val); | ||
10 | extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | ||
11 | |||
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index d4d8fbd9378c..509045f5fda2 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h | |||
@@ -65,14 +65,14 @@ | |||
65 | * | 65 | * |
66 | * The returned physical address is the physical (CPU) mapping for | 66 | * The returned physical address is the physical (CPU) mapping for |
67 | * the memory address given. It is only valid to use this function on | 67 | * the memory address given. It is only valid to use this function on |
68 | * addresses directly mapped or allocated via kmalloc. | 68 | * addresses directly mapped or allocated via kmalloc. |
69 | * | 69 | * |
70 | * This function does not give bus mappings for DMA transfers. In | 70 | * This function does not give bus mappings for DMA transfers. In |
71 | * almost all conceivable cases a device driver should not be using | 71 | * almost all conceivable cases a device driver should not be using |
72 | * this function | 72 | * this function |
73 | */ | 73 | */ |
74 | 74 | ||
75 | static inline unsigned long virt_to_phys(volatile void * address) | 75 | static inline unsigned long virt_to_phys(volatile void *address) |
76 | { | 76 | { |
77 | return __pa(address); | 77 | return __pa(address); |
78 | } | 78 | } |
@@ -90,7 +90,7 @@ static inline unsigned long virt_to_phys(volatile void * address) | |||
90 | * this function | 90 | * this function |
91 | */ | 91 | */ |
92 | 92 | ||
93 | static inline void * phys_to_virt(unsigned long address) | 93 | static inline void *phys_to_virt(unsigned long address) |
94 | { | 94 | { |
95 | return __va(address); | 95 | return __va(address); |
96 | } | 96 | } |
@@ -169,16 +169,19 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | |||
169 | 169 | ||
170 | static inline unsigned char readb(const volatile void __iomem *addr) | 170 | static inline unsigned char readb(const volatile void __iomem *addr) |
171 | { | 171 | { |
172 | return *(volatile unsigned char __force *) addr; | 172 | return *(volatile unsigned char __force *)addr; |
173 | } | 173 | } |
174 | |||
174 | static inline unsigned short readw(const volatile void __iomem *addr) | 175 | static inline unsigned short readw(const volatile void __iomem *addr) |
175 | { | 176 | { |
176 | return *(volatile unsigned short __force *) addr; | 177 | return *(volatile unsigned short __force *)addr; |
177 | } | 178 | } |
179 | |||
178 | static inline unsigned int readl(const volatile void __iomem *addr) | 180 | static inline unsigned int readl(const volatile void __iomem *addr) |
179 | { | 181 | { |
180 | return *(volatile unsigned int __force *) addr; | 182 | return *(volatile unsigned int __force *) addr; |
181 | } | 183 | } |
184 | |||
182 | #define readb_relaxed(addr) readb(addr) | 185 | #define readb_relaxed(addr) readb(addr) |
183 | #define readw_relaxed(addr) readw(addr) | 186 | #define readw_relaxed(addr) readw(addr) |
184 | #define readl_relaxed(addr) readl(addr) | 187 | #define readl_relaxed(addr) readl(addr) |
@@ -188,15 +191,17 @@ static inline unsigned int readl(const volatile void __iomem *addr) | |||
188 | 191 | ||
189 | static inline void writeb(unsigned char b, volatile void __iomem *addr) | 192 | static inline void writeb(unsigned char b, volatile void __iomem *addr) |
190 | { | 193 | { |
191 | *(volatile unsigned char __force *) addr = b; | 194 | *(volatile unsigned char __force *)addr = b; |
192 | } | 195 | } |
196 | |||
193 | static inline void writew(unsigned short b, volatile void __iomem *addr) | 197 | static inline void writew(unsigned short b, volatile void __iomem *addr) |
194 | { | 198 | { |
195 | *(volatile unsigned short __force *) addr = b; | 199 | *(volatile unsigned short __force *)addr = b; |
196 | } | 200 | } |
201 | |||
197 | static inline void writel(unsigned int b, volatile void __iomem *addr) | 202 | static inline void writel(unsigned int b, volatile void __iomem *addr) |
198 | { | 203 | { |
199 | *(volatile unsigned int __force *) addr = b; | 204 | *(volatile unsigned int __force *)addr = b; |
200 | } | 205 | } |
201 | #define __raw_writeb writeb | 206 | #define __raw_writeb writeb |
202 | #define __raw_writew writew | 207 | #define __raw_writew writew |
@@ -239,12 +244,12 @@ memcpy_toio(volatile void __iomem *dst, const void *src, int count) | |||
239 | * 1. Out of order aware processors | 244 | * 1. Out of order aware processors |
240 | * 2. Accidentally out of order processors (PPro errata #51) | 245 | * 2. Accidentally out of order processors (PPro errata #51) |
241 | */ | 246 | */ |
242 | 247 | ||
243 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 248 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) |
244 | 249 | ||
245 | static inline void flush_write_buffers(void) | 250 | static inline void flush_write_buffers(void) |
246 | { | 251 | { |
247 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); | 252 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); |
248 | } | 253 | } |
249 | 254 | ||
250 | #else | 255 | #else |
@@ -264,7 +269,8 @@ extern void io_delay_init(void); | |||
264 | #include <asm/paravirt.h> | 269 | #include <asm/paravirt.h> |
265 | #else | 270 | #else |
266 | 271 | ||
267 | static inline void slow_down_io(void) { | 272 | static inline void slow_down_io(void) |
273 | { | ||
268 | native_io_delay(); | 274 | native_io_delay(); |
269 | #ifdef REALLY_SLOW_IO | 275 | #ifdef REALLY_SLOW_IO |
270 | native_io_delay(); | 276 | native_io_delay(); |
@@ -275,51 +281,74 @@ static inline void slow_down_io(void) { | |||
275 | 281 | ||
276 | #endif | 282 | #endif |
277 | 283 | ||
278 | #define __BUILDIO(bwl,bw,type) \ | 284 | #define __BUILDIO(bwl, bw, type) \ |
279 | static inline void out##bwl(unsigned type value, int port) { \ | 285 | static inline void out##bwl(unsigned type value, int port) \ |
280 | out##bwl##_local(value, port); \ | 286 | { \ |
281 | } \ | 287 | out##bwl##_local(value, port); \ |
282 | static inline unsigned type in##bwl(int port) { \ | 288 | } \ |
283 | return in##bwl##_local(port); \ | 289 | \ |
290 | static inline unsigned type in##bwl(int port) \ | ||
291 | { \ | ||
292 | return in##bwl##_local(port); \ | ||
284 | } | 293 | } |
285 | 294 | ||
286 | #define BUILDIO(bwl,bw,type) \ | 295 | #define BUILDIO(bwl, bw, type) \ |
287 | static inline void out##bwl##_local(unsigned type value, int port) { \ | 296 | static inline void out##bwl##_local(unsigned type value, int port) \ |
288 | __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ | 297 | { \ |
289 | } \ | 298 | asm volatile("out" #bwl " %" #bw "0, %w1" \ |
290 | static inline unsigned type in##bwl##_local(int port) { \ | 299 | : : "a"(value), "Nd"(port)); \ |
291 | unsigned type value; \ | 300 | } \ |
292 | __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ | 301 | \ |
293 | return value; \ | 302 | static inline unsigned type in##bwl##_local(int port) \ |
294 | } \ | 303 | { \ |
295 | static inline void out##bwl##_local_p(unsigned type value, int port) { \ | 304 | unsigned type value; \ |
296 | out##bwl##_local(value, port); \ | 305 | asm volatile("in" #bwl " %w1, %" #bw "0" \ |
297 | slow_down_io(); \ | 306 | : "=a"(value) : "Nd"(port)); \ |
298 | } \ | 307 | return value; \ |
299 | static inline unsigned type in##bwl##_local_p(int port) { \ | 308 | } \ |
300 | unsigned type value = in##bwl##_local(port); \ | 309 | \ |
301 | slow_down_io(); \ | 310 | static inline void out##bwl##_local_p(unsigned type value, int port) \ |
302 | return value; \ | 311 | { \ |
303 | } \ | 312 | out##bwl##_local(value, port); \ |
304 | __BUILDIO(bwl,bw,type) \ | 313 | slow_down_io(); \ |
305 | static inline void out##bwl##_p(unsigned type value, int port) { \ | 314 | } \ |
306 | out##bwl(value, port); \ | 315 | \ |
307 | slow_down_io(); \ | 316 | static inline unsigned type in##bwl##_local_p(int port) \ |
308 | } \ | 317 | { \ |
309 | static inline unsigned type in##bwl##_p(int port) { \ | 318 | unsigned type value = in##bwl##_local(port); \ |
310 | unsigned type value = in##bwl(port); \ | 319 | slow_down_io(); \ |
311 | slow_down_io(); \ | 320 | return value; \ |
312 | return value; \ | 321 | } \ |
313 | } \ | 322 | \ |
314 | static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ | 323 | __BUILDIO(bwl, bw, type) \ |
315 | __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ | 324 | \ |
316 | } \ | 325 | static inline void out##bwl##_p(unsigned type value, int port) \ |
317 | static inline void ins##bwl(int port, void *addr, unsigned long count) { \ | 326 | { \ |
318 | __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ | 327 | out##bwl(value, port); \ |
328 | slow_down_io(); \ | ||
329 | } \ | ||
330 | \ | ||
331 | static inline unsigned type in##bwl##_p(int port) \ | ||
332 | { \ | ||
333 | unsigned type value = in##bwl(port); \ | ||
334 | slow_down_io(); \ | ||
335 | return value; \ | ||
336 | } \ | ||
337 | \ | ||
338 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ | ||
339 | { \ | ||
340 | asm volatile("rep; outs" #bwl \ | ||
341 | : "+S"(addr), "+c"(count) : "d"(port)); \ | ||
342 | } \ | ||
343 | \ | ||
344 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ | ||
345 | { \ | ||
346 | asm volatile("rep; ins" #bwl \ | ||
347 | : "+D"(addr), "+c"(count) : "d"(port)); \ | ||
319 | } | 348 | } |
320 | 349 | ||
321 | BUILDIO(b,b,char) | 350 | BUILDIO(b, b, char) |
322 | BUILDIO(w,w,short) | 351 | BUILDIO(w, w, short) |
323 | BUILDIO(l,,int) | 352 | BUILDIO(l, , int) |
324 | 353 | ||
325 | #endif | 354 | #endif |
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index db0be2011a3c..c2f5eef47b88 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h | |||
@@ -58,60 +58,75 @@ static inline void slow_down_io(void) | |||
58 | /* | 58 | /* |
59 | * Talk about misusing macros.. | 59 | * Talk about misusing macros.. |
60 | */ | 60 | */ |
61 | #define __OUT1(s,x) \ | 61 | #define __OUT1(s, x) \ |
62 | static inline void out##s(unsigned x value, unsigned short port) { | 62 | static inline void out##s(unsigned x value, unsigned short port) { |
63 | 63 | ||
64 | #define __OUT2(s,s1,s2) \ | 64 | #define __OUT2(s, s1, s2) \ |
65 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" | 65 | asm volatile ("out" #s " %" s1 "0,%" s2 "1" |
66 | 66 | ||
67 | #ifndef REALLY_SLOW_IO | 67 | #ifndef REALLY_SLOW_IO |
68 | #define REALLY_SLOW_IO | 68 | #define REALLY_SLOW_IO |
69 | #define UNSET_REALLY_SLOW_IO | 69 | #define UNSET_REALLY_SLOW_IO |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #define __OUT(s,s1,x) \ | 72 | #define __OUT(s, s1, x) \ |
73 | __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ | 73 | __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ |
74 | __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ | 74 | } \ |
75 | slow_down_io(); } | 75 | __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ |
76 | slow_down_io(); \ | ||
77 | } | ||
76 | 78 | ||
77 | #define __IN1(s) \ | 79 | #define __IN1(s) \ |
78 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; | 80 | static inline RETURN_TYPE in##s(unsigned short port) \ |
81 | { \ | ||
82 | RETURN_TYPE _v; | ||
79 | 83 | ||
80 | #define __IN2(s,s1,s2) \ | 84 | #define __IN2(s, s1, s2) \ |
81 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" | 85 | asm volatile ("in" #s " %" s2 "1,%" s1 "0" |
82 | 86 | ||
83 | #define __IN(s,s1,i...) \ | 87 | #define __IN(s, s1, i...) \ |
84 | __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \ | 88 | __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ |
85 | __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | 89 | return _v; \ |
86 | slow_down_io(); return _v; } | 90 | } \ |
91 | __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | ||
92 | slow_down_io(); \ | ||
93 | return _v; } | ||
87 | 94 | ||
88 | #ifdef UNSET_REALLY_SLOW_IO | 95 | #ifdef UNSET_REALLY_SLOW_IO |
89 | #undef REALLY_SLOW_IO | 96 | #undef REALLY_SLOW_IO |
90 | #endif | 97 | #endif |
91 | 98 | ||
92 | #define __INS(s) \ | 99 | #define __INS(s) \ |
93 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ | 100 | static inline void ins##s(unsigned short port, void *addr, \ |
94 | { __asm__ __volatile__ ("rep ; ins" #s \ | 101 | unsigned long count) \ |
95 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 102 | { \ |
103 | asm volatile ("rep ; ins" #s \ | ||
104 | : "=D" (addr), "=c" (count) \ | ||
105 | : "d" (port), "0" (addr), "1" (count)); \ | ||
106 | } | ||
96 | 107 | ||
97 | #define __OUTS(s) \ | 108 | #define __OUTS(s) \ |
98 | static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ | 109 | static inline void outs##s(unsigned short port, const void *addr, \ |
99 | { __asm__ __volatile__ ("rep ; outs" #s \ | 110 | unsigned long count) \ |
100 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 111 | { \ |
112 | asm volatile ("rep ; outs" #s \ | ||
113 | : "=S" (addr), "=c" (count) \ | ||
114 | : "d" (port), "0" (addr), "1" (count)); \ | ||
115 | } | ||
101 | 116 | ||
102 | #define RETURN_TYPE unsigned char | 117 | #define RETURN_TYPE unsigned char |
103 | __IN(b,"") | 118 | __IN(b, "") |
104 | #undef RETURN_TYPE | 119 | #undef RETURN_TYPE |
105 | #define RETURN_TYPE unsigned short | 120 | #define RETURN_TYPE unsigned short |
106 | __IN(w,"") | 121 | __IN(w, "") |
107 | #undef RETURN_TYPE | 122 | #undef RETURN_TYPE |
108 | #define RETURN_TYPE unsigned int | 123 | #define RETURN_TYPE unsigned int |
109 | __IN(l,"") | 124 | __IN(l, "") |
110 | #undef RETURN_TYPE | 125 | #undef RETURN_TYPE |
111 | 126 | ||
112 | __OUT(b,"b",char) | 127 | __OUT(b, "b", char) |
113 | __OUT(w,"w",short) | 128 | __OUT(w, "w", short) |
114 | __OUT(l,,int) | 129 | __OUT(l, , int) |
115 | 130 | ||
116 | __INS(b) | 131 | __INS(b) |
117 | __INS(w) | 132 | __INS(w) |
@@ -132,12 +147,12 @@ __OUTS(l) | |||
132 | * Change virtual addresses to physical addresses and vv. | 147 | * Change virtual addresses to physical addresses and vv. |
133 | * These are pretty trivial | 148 | * These are pretty trivial |
134 | */ | 149 | */ |
135 | static inline unsigned long virt_to_phys(volatile void * address) | 150 | static inline unsigned long virt_to_phys(volatile void *address) |
136 | { | 151 | { |
137 | return __pa(address); | 152 | return __pa(address); |
138 | } | 153 | } |
139 | 154 | ||
140 | static inline void * phys_to_virt(unsigned long address) | 155 | static inline void *phys_to_virt(unsigned long address) |
141 | { | 156 | { |
142 | return __va(address); | 157 | return __va(address); |
143 | } | 158 | } |
@@ -200,18 +215,22 @@ static inline __u8 __readb(const volatile void __iomem *addr) | |||
200 | { | 215 | { |
201 | return *(__force volatile __u8 *)addr; | 216 | return *(__force volatile __u8 *)addr; |
202 | } | 217 | } |
218 | |||
203 | static inline __u16 __readw(const volatile void __iomem *addr) | 219 | static inline __u16 __readw(const volatile void __iomem *addr) |
204 | { | 220 | { |
205 | return *(__force volatile __u16 *)addr; | 221 | return *(__force volatile __u16 *)addr; |
206 | } | 222 | } |
223 | |||
207 | static __always_inline __u32 __readl(const volatile void __iomem *addr) | 224 | static __always_inline __u32 __readl(const volatile void __iomem *addr) |
208 | { | 225 | { |
209 | return *(__force volatile __u32 *)addr; | 226 | return *(__force volatile __u32 *)addr; |
210 | } | 227 | } |
228 | |||
211 | static inline __u64 __readq(const volatile void __iomem *addr) | 229 | static inline __u64 __readq(const volatile void __iomem *addr) |
212 | { | 230 | { |
213 | return *(__force volatile __u64 *)addr; | 231 | return *(__force volatile __u64 *)addr; |
214 | } | 232 | } |
233 | |||
215 | #define readb(x) __readb(x) | 234 | #define readb(x) __readb(x) |
216 | #define readw(x) __readw(x) | 235 | #define readw(x) __readw(x) |
217 | #define readl(x) __readl(x) | 236 | #define readl(x) __readl(x) |
@@ -231,37 +250,44 @@ static inline void __writel(__u32 b, volatile void __iomem *addr) | |||
231 | { | 250 | { |
232 | *(__force volatile __u32 *)addr = b; | 251 | *(__force volatile __u32 *)addr = b; |
233 | } | 252 | } |
253 | |||
234 | static inline void __writeq(__u64 b, volatile void __iomem *addr) | 254 | static inline void __writeq(__u64 b, volatile void __iomem *addr) |
235 | { | 255 | { |
236 | *(__force volatile __u64 *)addr = b; | 256 | *(__force volatile __u64 *)addr = b; |
237 | } | 257 | } |
258 | |||
238 | static inline void __writeb(__u8 b, volatile void __iomem *addr) | 259 | static inline void __writeb(__u8 b, volatile void __iomem *addr) |
239 | { | 260 | { |
240 | *(__force volatile __u8 *)addr = b; | 261 | *(__force volatile __u8 *)addr = b; |
241 | } | 262 | } |
263 | |||
242 | static inline void __writew(__u16 b, volatile void __iomem *addr) | 264 | static inline void __writew(__u16 b, volatile void __iomem *addr) |
243 | { | 265 | { |
244 | *(__force volatile __u16 *)addr = b; | 266 | *(__force volatile __u16 *)addr = b; |
245 | } | 267 | } |
246 | #define writeq(val,addr) __writeq((val),(addr)) | 268 | |
247 | #define writel(val,addr) __writel((val),(addr)) | 269 | #define writeq(val, addr) __writeq((val), (addr)) |
248 | #define writew(val,addr) __writew((val),(addr)) | 270 | #define writel(val, addr) __writel((val), (addr)) |
249 | #define writeb(val,addr) __writeb((val),(addr)) | 271 | #define writew(val, addr) __writew((val), (addr)) |
272 | #define writeb(val, addr) __writeb((val), (addr)) | ||
250 | #define __raw_writeb writeb | 273 | #define __raw_writeb writeb |
251 | #define __raw_writew writew | 274 | #define __raw_writew writew |
252 | #define __raw_writel writel | 275 | #define __raw_writel writel |
253 | #define __raw_writeq writeq | 276 | #define __raw_writeq writeq |
254 | 277 | ||
255 | void __memcpy_fromio(void*,unsigned long,unsigned); | 278 | void __memcpy_fromio(void *, unsigned long, unsigned); |
256 | void __memcpy_toio(unsigned long,const void*,unsigned); | 279 | void __memcpy_toio(unsigned long, const void *, unsigned); |
257 | 280 | ||
258 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) | 281 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, |
282 | unsigned len) | ||
259 | { | 283 | { |
260 | __memcpy_fromio(to,(unsigned long)from,len); | 284 | __memcpy_fromio(to, (unsigned long)from, len); |
261 | } | 285 | } |
262 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) | 286 | |
287 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, | ||
288 | unsigned len) | ||
263 | { | 289 | { |
264 | __memcpy_toio((unsigned long)to,from,len); | 290 | __memcpy_toio((unsigned long)to, from, len); |
265 | } | 291 | } |
266 | 292 | ||
267 | void memset_io(volatile void __iomem *a, int b, size_t c); | 293 | void memset_io(volatile void __iomem *a, int b, size_t c); |
@@ -276,7 +302,7 @@ void memset_io(volatile void __iomem *a, int b, size_t c); | |||
276 | */ | 302 | */ |
277 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | 303 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) |
278 | 304 | ||
279 | #define flush_write_buffers() | 305 | #define flush_write_buffers() |
280 | 306 | ||
281 | extern int iommu_bio_merge; | 307 | extern int iommu_bio_merge; |
282 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge | 308 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge |
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 0f5b3fef0b08..0c9e17c73e05 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h | |||
@@ -110,6 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS]; | |||
110 | * MP-BIOS irq configuration table structures: | 110 | * MP-BIOS irq configuration table structures: |
111 | */ | 111 | */ |
112 | 112 | ||
113 | struct mp_ioapic_routing { | ||
114 | int apic_id; | ||
115 | int gsi_base; | ||
116 | int gsi_end; | ||
117 | u32 pin_programmed[4]; | ||
118 | }; | ||
119 | |||
113 | /* I/O APIC entries */ | 120 | /* I/O APIC entries */ |
114 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | 121 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; |
115 | 122 | ||
@@ -146,7 +153,6 @@ extern int io_apic_get_version(int ioapic); | |||
146 | extern int io_apic_get_redir_entries(int ioapic); | 153 | extern int io_apic_get_redir_entries(int ioapic); |
147 | extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, | 154 | extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, |
148 | int edge_level, int active_high_low); | 155 | int edge_level, int active_high_low); |
149 | extern int timer_uses_ioapic_pin_0; | ||
150 | #endif /* CONFIG_ACPI */ | 156 | #endif /* CONFIG_ACPI */ |
151 | 157 | ||
152 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 158 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h index 93c894dc5154..c0c338bd4068 100644 --- a/include/asm-x86/ioctls.h +++ b/include/asm-x86/ioctls.h | |||
@@ -47,12 +47,13 @@ | |||
47 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | 47 | #define TIOCSBRK 0x5427 /* BSD compatibility */ |
48 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | 48 | #define TIOCCBRK 0x5428 /* BSD compatibility */ |
49 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | 49 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ |
50 | #define TCGETS2 _IOR('T',0x2A, struct termios2) | 50 | #define TCGETS2 _IOR('T', 0x2A, struct termios2) |
51 | #define TCSETS2 _IOW('T',0x2B, struct termios2) | 51 | #define TCSETS2 _IOW('T', 0x2B, struct termios2) |
52 | #define TCSETSW2 _IOW('T',0x2C, struct termios2) | 52 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) |
53 | #define TCSETSF2 _IOW('T',0x2D, struct termios2) | 53 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) |
54 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 54 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) |
55 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 55 | /* Get Pty Number (of pty-mux device) */ |
56 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ | ||
56 | 57 | ||
57 | #define FIONCLEX 0x5450 | 58 | #define FIONCLEX 0x5450 |
58 | #define FIOCLEX 0x5451 | 59 | #define FIOCLEX 0x5451 |
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h index 2adf8b39a40b..ee678fd51594 100644 --- a/include/asm-x86/ipcbuf.h +++ b/include/asm-x86/ipcbuf.h | |||
@@ -11,8 +11,7 @@ | |||
11 | * - 2 miscellaneous 32-bit values | 11 | * - 2 miscellaneous 32-bit values |
12 | */ | 12 | */ |
13 | 13 | ||
14 | struct ipc64_perm | 14 | struct ipc64_perm { |
15 | { | ||
16 | __kernel_key_t key; | 15 | __kernel_key_t key; |
17 | __kernel_uid32_t uid; | 16 | __kernel_uid32_t uid; |
18 | __kernel_gid32_t gid; | 17 | __kernel_gid32_t gid; |
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index 6d011bd6067d..ecc80f341f37 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h | |||
@@ -27,7 +27,8 @@ | |||
27 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. | 27 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) | 30 | static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, |
31 | unsigned int dest) | ||
31 | { | 32 | { |
32 | unsigned int icr = shortcut | dest; | 33 | unsigned int icr = shortcut | dest; |
33 | 34 | ||
@@ -42,12 +43,13 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns | |||
42 | return icr; | 43 | return icr; |
43 | } | 44 | } |
44 | 45 | ||
45 | static inline int __prepare_ICR2 (unsigned int mask) | 46 | static inline int __prepare_ICR2(unsigned int mask) |
46 | { | 47 | { |
47 | return SET_APIC_DEST_FIELD(mask); | 48 | return SET_APIC_DEST_FIELD(mask); |
48 | } | 49 | } |
49 | 50 | ||
50 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) | 51 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, |
52 | unsigned int dest) | ||
51 | { | 53 | { |
52 | /* | 54 | /* |
53 | * Subtle. In the case of the 'never do double writes' workaround | 55 | * Subtle. In the case of the 'never do double writes' workaround |
@@ -78,7 +80,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign | |||
78 | * This is used to send an IPI with no shorthand notation (the destination is | 80 | * This is used to send an IPI with no shorthand notation (the destination is |
79 | * specified in bits 56 to 63 of the ICR). | 81 | * specified in bits 56 to 63 of the ICR). |
80 | */ | 82 | */ |
81 | static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) | 83 | static inline void __send_IPI_dest_field(unsigned int mask, int vector, |
84 | unsigned int dest) | ||
82 | { | 85 | { |
83 | unsigned long cfg; | 86 | unsigned long cfg; |
84 | 87 | ||
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h index aca9c96e8e6b..0b79f3185243 100644 --- a/include/asm-x86/irq_32.h +++ b/include/asm-x86/irq_32.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #include "irq_vectors.h" | 15 | #include "irq_vectors.h" |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | 17 | ||
18 | static __inline__ int irq_canonicalize(int irq) | 18 | static inline int irq_canonicalize(int irq) |
19 | { | 19 | { |
20 | return ((irq == 2) ? 9 : irq); | 20 | return ((irq == 2) ? 9 : irq); |
21 | } | 21 | } |
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h index 5006c6e75656..083d35a62c94 100644 --- a/include/asm-x86/irq_64.h +++ b/include/asm-x86/irq_64.h | |||
@@ -31,10 +31,10 @@ | |||
31 | 31 | ||
32 | #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ | 32 | #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ |
33 | 33 | ||
34 | #define NR_IRQS (NR_VECTORS + (32 *NR_CPUS)) | 34 | #define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) |
35 | #define NR_IRQ_VECTORS NR_IRQS | 35 | #define NR_IRQ_VECTORS NR_IRQS |
36 | 36 | ||
37 | static __inline__ int irq_canonicalize(int irq) | 37 | static inline int irq_canonicalize(int irq) |
38 | { | 38 | { |
39 | return ((irq == 2) ? 9 : irq); | 39 | return ((irq == 2) ? 9 : irq); |
40 | } | 40 | } |
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 0e2292483b35..c242527f970e 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h | |||
@@ -12,25 +12,21 @@ static inline unsigned long native_save_fl(void) | |||
12 | { | 12 | { |
13 | unsigned long flags; | 13 | unsigned long flags; |
14 | 14 | ||
15 | __asm__ __volatile__( | 15 | asm volatile("# __raw_save_flags\n\t" |
16 | "# __raw_save_flags\n\t" | 16 | "pushf ; pop %0" |
17 | "pushf ; pop %0" | 17 | : "=g" (flags) |
18 | : "=g" (flags) | 18 | : /* no input */ |
19 | : /* no input */ | 19 | : "memory"); |
20 | : "memory" | ||
21 | ); | ||
22 | 20 | ||
23 | return flags; | 21 | return flags; |
24 | } | 22 | } |
25 | 23 | ||
26 | static inline void native_restore_fl(unsigned long flags) | 24 | static inline void native_restore_fl(unsigned long flags) |
27 | { | 25 | { |
28 | __asm__ __volatile__( | 26 | asm volatile("push %0 ; popf" |
29 | "push %0 ; popf" | 27 | : /* no output */ |
30 | : /* no output */ | 28 | :"g" (flags) |
31 | :"g" (flags) | 29 | :"memory", "cc"); |
32 | :"memory", "cc" | ||
33 | ); | ||
34 | } | 30 | } |
35 | 31 | ||
36 | static inline void native_irq_disable(void) | 32 | static inline void native_irq_disable(void) |
@@ -70,26 +66,6 @@ static inline void raw_local_irq_restore(unsigned long flags) | |||
70 | native_restore_fl(flags); | 66 | native_restore_fl(flags); |
71 | } | 67 | } |
72 | 68 | ||
73 | #ifdef CONFIG_X86_VSMP | ||
74 | |||
75 | /* | ||
76 | * Interrupt control for the VSMP architecture: | ||
77 | */ | ||
78 | |||
79 | static inline void raw_local_irq_disable(void) | ||
80 | { | ||
81 | unsigned long flags = __raw_local_save_flags(); | ||
82 | raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
83 | } | ||
84 | |||
85 | static inline void raw_local_irq_enable(void) | ||
86 | { | ||
87 | unsigned long flags = __raw_local_save_flags(); | ||
88 | raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
89 | } | ||
90 | |||
91 | #else | ||
92 | |||
93 | static inline void raw_local_irq_disable(void) | 69 | static inline void raw_local_irq_disable(void) |
94 | { | 70 | { |
95 | native_irq_disable(); | 71 | native_irq_disable(); |
@@ -100,8 +76,6 @@ static inline void raw_local_irq_enable(void) | |||
100 | native_irq_enable(); | 76 | native_irq_enable(); |
101 | } | 77 | } |
102 | 78 | ||
103 | #endif | ||
104 | |||
105 | /* | 79 | /* |
106 | * Used in the idle loop; sti takes one instruction cycle | 80 | * Used in the idle loop; sti takes one instruction cycle |
107 | * to complete: | 81 | * to complete: |
@@ -153,23 +127,16 @@ static inline unsigned long __raw_local_irq_save(void) | |||
153 | #endif /* CONFIG_PARAVIRT */ | 127 | #endif /* CONFIG_PARAVIRT */ |
154 | 128 | ||
155 | #ifndef __ASSEMBLY__ | 129 | #ifndef __ASSEMBLY__ |
156 | #define raw_local_save_flags(flags) \ | 130 | #define raw_local_save_flags(flags) \ |
157 | do { (flags) = __raw_local_save_flags(); } while (0) | 131 | do { (flags) = __raw_local_save_flags(); } while (0) |
158 | 132 | ||
159 | #define raw_local_irq_save(flags) \ | 133 | #define raw_local_irq_save(flags) \ |
160 | do { (flags) = __raw_local_irq_save(); } while (0) | 134 | do { (flags) = __raw_local_irq_save(); } while (0) |
161 | 135 | ||
162 | #ifdef CONFIG_X86_VSMP | ||
163 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
164 | { | ||
165 | return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); | ||
166 | } | ||
167 | #else | ||
168 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 136 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
169 | { | 137 | { |
170 | return !(flags & X86_EFLAGS_IF); | 138 | return !(flags & X86_EFLAGS_IF); |
171 | } | 139 | } |
172 | #endif | ||
173 | 140 | ||
174 | static inline int raw_irqs_disabled(void) | 141 | static inline int raw_irqs_disabled(void) |
175 | { | 142 | { |
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index 99dcbafa1511..96651bb59ba1 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h | |||
@@ -20,15 +20,16 @@ enum die_val { | |||
20 | DIE_CALL, | 20 | DIE_CALL, |
21 | DIE_NMI_IPI, | 21 | DIE_NMI_IPI, |
22 | DIE_PAGE_FAULT, | 22 | DIE_PAGE_FAULT, |
23 | DIE_NMIUNKNOWN, | ||
23 | }; | 24 | }; |
24 | 25 | ||
25 | extern void printk_address(unsigned long address, int reliable); | 26 | extern void printk_address(unsigned long address, int reliable); |
26 | extern void die(const char *,struct pt_regs *,long); | 27 | extern void die(const char *, struct pt_regs *,long); |
27 | extern int __must_check __die(const char *, struct pt_regs *, long); | 28 | extern int __must_check __die(const char *, struct pt_regs *, long); |
28 | extern void show_registers(struct pt_regs *regs); | 29 | extern void show_registers(struct pt_regs *regs); |
29 | extern void __show_registers(struct pt_regs *, int all); | 30 | extern void __show_registers(struct pt_regs *, int all); |
30 | extern void show_trace(struct task_struct *t, struct pt_regs *regs, | 31 | extern void show_trace(struct task_struct *t, struct pt_regs *regs, |
31 | unsigned long *sp, unsigned long bp); | 32 | unsigned long *sp, unsigned long bp); |
32 | extern void __show_regs(struct pt_regs *regs); | 33 | extern void __show_regs(struct pt_regs *regs); |
33 | extern void show_regs(struct pt_regs *regs); | 34 | extern void show_regs(struct pt_regs *regs); |
34 | extern unsigned long oops_begin(void); | 35 | extern unsigned long oops_begin(void); |
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index c90d3c77afc2..8f855a15f64d 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h | |||
@@ -94,10 +94,9 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs, | |||
94 | { | 94 | { |
95 | #ifdef CONFIG_X86_32 | 95 | #ifdef CONFIG_X86_32 |
96 | newregs->sp = (unsigned long)&(oldregs->sp); | 96 | newregs->sp = (unsigned long)&(oldregs->sp); |
97 | __asm__ __volatile__( | 97 | asm volatile("xorl %%eax, %%eax\n\t" |
98 | "xorl %%eax, %%eax\n\t" | 98 | "movw %%ss, %%ax\n\t" |
99 | "movw %%ss, %%ax\n\t" | 99 | :"=a"(newregs->ss)); |
100 | :"=a"(newregs->ss)); | ||
101 | #endif | 100 | #endif |
102 | } | 101 | } |
103 | 102 | ||
@@ -114,39 +113,39 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
114 | crash_fixup_ss_esp(newregs, oldregs); | 113 | crash_fixup_ss_esp(newregs, oldregs); |
115 | } else { | 114 | } else { |
116 | #ifdef CONFIG_X86_32 | 115 | #ifdef CONFIG_X86_32 |
117 | __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx)); | 116 | asm volatile("movl %%ebx,%0" : "=m"(newregs->bx)); |
118 | __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx)); | 117 | asm volatile("movl %%ecx,%0" : "=m"(newregs->cx)); |
119 | __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx)); | 118 | asm volatile("movl %%edx,%0" : "=m"(newregs->dx)); |
120 | __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si)); | 119 | asm volatile("movl %%esi,%0" : "=m"(newregs->si)); |
121 | __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di)); | 120 | asm volatile("movl %%edi,%0" : "=m"(newregs->di)); |
122 | __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp)); | 121 | asm volatile("movl %%ebp,%0" : "=m"(newregs->bp)); |
123 | __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax)); | 122 | asm volatile("movl %%eax,%0" : "=m"(newregs->ax)); |
124 | __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp)); | 123 | asm volatile("movl %%esp,%0" : "=m"(newregs->sp)); |
125 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); | 124 | asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); |
126 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); | 125 | asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); |
127 | __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds)); | 126 | asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds)); |
128 | __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es)); | 127 | asm volatile("movl %%es, %%eax;" :"=a"(newregs->es)); |
129 | __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags)); | 128 | asm volatile("pushfl; popl %0" :"=m"(newregs->flags)); |
130 | #else | 129 | #else |
131 | __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx)); | 130 | asm volatile("movq %%rbx,%0" : "=m"(newregs->bx)); |
132 | __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx)); | 131 | asm volatile("movq %%rcx,%0" : "=m"(newregs->cx)); |
133 | __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx)); | 132 | asm volatile("movq %%rdx,%0" : "=m"(newregs->dx)); |
134 | __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si)); | 133 | asm volatile("movq %%rsi,%0" : "=m"(newregs->si)); |
135 | __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di)); | 134 | asm volatile("movq %%rdi,%0" : "=m"(newregs->di)); |
136 | __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp)); | 135 | asm volatile("movq %%rbp,%0" : "=m"(newregs->bp)); |
137 | __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax)); | 136 | asm volatile("movq %%rax,%0" : "=m"(newregs->ax)); |
138 | __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp)); | 137 | asm volatile("movq %%rsp,%0" : "=m"(newregs->sp)); |
139 | __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); | 138 | asm volatile("movq %%r8,%0" : "=m"(newregs->r8)); |
140 | __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); | 139 | asm volatile("movq %%r9,%0" : "=m"(newregs->r9)); |
141 | __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); | 140 | asm volatile("movq %%r10,%0" : "=m"(newregs->r10)); |
142 | __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); | 141 | asm volatile("movq %%r11,%0" : "=m"(newregs->r11)); |
143 | __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); | 142 | asm volatile("movq %%r12,%0" : "=m"(newregs->r12)); |
144 | __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); | 143 | asm volatile("movq %%r13,%0" : "=m"(newregs->r13)); |
145 | __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); | 144 | asm volatile("movq %%r14,%0" : "=m"(newregs->r14)); |
146 | __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); | 145 | asm volatile("movq %%r15,%0" : "=m"(newregs->r15)); |
147 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); | 146 | asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); |
148 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); | 147 | asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); |
149 | __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags)); | 148 | asm volatile("pushfq; popq %0" :"=m"(newregs->flags)); |
150 | #endif | 149 | #endif |
151 | newregs->ip = (unsigned long)current_text_addr(); | 150 | newregs->ip = (unsigned long)current_text_addr(); |
152 | } | 151 | } |
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h new file mode 100644 index 000000000000..484c47554f3b --- /dev/null +++ b/include/asm-x86/kgdb.h | |||
@@ -0,0 +1,81 @@ | |||
1 | #ifndef _ASM_KGDB_H_ | ||
2 | #define _ASM_KGDB_H_ | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2001-2004 Amit S. Kale | ||
6 | * Copyright (C) 2008 Wind River Systems, Inc. | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * BUFMAX defines the maximum number of characters in inbound/outbound | ||
11 | * buffers at least NUMREGBYTES*2 are needed for register packets | ||
12 | * Longer buffer is needed to list all threads | ||
13 | */ | ||
14 | #define BUFMAX 1024 | ||
15 | |||
16 | /* | ||
17 | * Note that this register image is in a different order than | ||
18 | * the register image that Linux produces at interrupt time. | ||
19 | * | ||
20 | * Linux's register image is defined by struct pt_regs in ptrace.h. | ||
21 | * Just why GDB uses a different order is a historical mystery. | ||
22 | */ | ||
23 | #ifdef CONFIG_X86_32 | ||
24 | enum regnames { | ||
25 | GDB_AX, /* 0 */ | ||
26 | GDB_CX, /* 1 */ | ||
27 | GDB_DX, /* 2 */ | ||
28 | GDB_BX, /* 3 */ | ||
29 | GDB_SP, /* 4 */ | ||
30 | GDB_BP, /* 5 */ | ||
31 | GDB_SI, /* 6 */ | ||
32 | GDB_DI, /* 7 */ | ||
33 | GDB_PC, /* 8 also known as eip */ | ||
34 | GDB_PS, /* 9 also known as eflags */ | ||
35 | GDB_CS, /* 10 */ | ||
36 | GDB_SS, /* 11 */ | ||
37 | GDB_DS, /* 12 */ | ||
38 | GDB_ES, /* 13 */ | ||
39 | GDB_FS, /* 14 */ | ||
40 | GDB_GS, /* 15 */ | ||
41 | }; | ||
42 | #else /* ! CONFIG_X86_32 */ | ||
43 | enum regnames { | ||
44 | GDB_AX, /* 0 */ | ||
45 | GDB_DX, /* 1 */ | ||
46 | GDB_CX, /* 2 */ | ||
47 | GDB_BX, /* 3 */ | ||
48 | GDB_SI, /* 4 */ | ||
49 | GDB_DI, /* 5 */ | ||
50 | GDB_BP, /* 6 */ | ||
51 | GDB_SP, /* 7 */ | ||
52 | GDB_R8, /* 8 */ | ||
53 | GDB_R9, /* 9 */ | ||
54 | GDB_R10, /* 10 */ | ||
55 | GDB_R11, /* 11 */ | ||
56 | GDB_R12, /* 12 */ | ||
57 | GDB_R13, /* 13 */ | ||
58 | GDB_R14, /* 14 */ | ||
59 | GDB_R15, /* 15 */ | ||
60 | GDB_PC, /* 16 */ | ||
61 | GDB_PS, /* 17 */ | ||
62 | }; | ||
63 | #endif /* CONFIG_X86_32 */ | ||
64 | |||
65 | /* | ||
66 | * Number of bytes of registers: | ||
67 | */ | ||
68 | #ifdef CONFIG_X86_32 | ||
69 | # define NUMREGBYTES 64 | ||
70 | #else | ||
71 | # define NUMREGBYTES ((GDB_PS+1)*8) | ||
72 | #endif | ||
73 | |||
74 | static inline void arch_kgdb_breakpoint(void) | ||
75 | { | ||
76 | asm(" int $3"); | ||
77 | } | ||
78 | #define BREAK_INSTR_SIZE 1 | ||
79 | #define CACHE_FLUSH_IS_SAFE 1 | ||
80 | |||
81 | #endif /* _ASM_KGDB_H_ */ | ||
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index 61ad7b5d142e..54980b0b3892 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h | |||
@@ -35,12 +35,12 @@ typedef u8 kprobe_opcode_t; | |||
35 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | 35 | #define RELATIVEJUMP_INSTRUCTION 0xe9 |
36 | #define MAX_INSN_SIZE 16 | 36 | #define MAX_INSN_SIZE 16 |
37 | #define MAX_STACK_SIZE 64 | 37 | #define MAX_STACK_SIZE 64 |
38 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | 38 | #define MIN_STACK_SIZE(ADDR) \ |
39 | (((unsigned long)current_thread_info()) + THREAD_SIZE \ | 39 | (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ |
40 | - (unsigned long)(ADDR))) \ | 40 | THREAD_SIZE - (unsigned long)(ADDR))) \ |
41 | ? (MAX_STACK_SIZE) \ | 41 | ? (MAX_STACK_SIZE) \ |
42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE \ | 42 | : (((unsigned long)current_thread_info()) + \ |
43 | - (unsigned long)(ADDR))) | 43 | THREAD_SIZE - (unsigned long)(ADDR))) |
44 | 44 | ||
45 | #define flush_insn_slot(p) do { } while (0) | 45 | #define flush_insn_slot(p) do { } while (0) |
46 | 46 | ||
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 4702b04b979a..68ee390b2844 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -22,15 +22,16 @@ | |||
22 | 22 | ||
23 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) | 23 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
24 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | 24 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) |
25 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) | 25 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ |
26 | 0xFFFFFF0000000000ULL) | ||
26 | 27 | ||
27 | #define KVM_GUEST_CR0_MASK \ | 28 | #define KVM_GUEST_CR0_MASK \ |
28 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ | 29 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ |
29 | | X86_CR0_NW | X86_CR0_CD) | 30 | | X86_CR0_NW | X86_CR0_CD) |
30 | #define KVM_VM_CR0_ALWAYS_ON \ | 31 | #define KVM_VM_CR0_ALWAYS_ON \ |
31 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ | 32 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ |
32 | | X86_CR0_MP) | 33 | | X86_CR0_MP) |
33 | #define KVM_GUEST_CR4_MASK \ | 34 | #define KVM_GUEST_CR4_MASK \ |
34 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) | 35 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) |
35 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | 36 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
36 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | 37 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
@@ -133,12 +134,12 @@ struct kvm_pte_chain { | |||
133 | union kvm_mmu_page_role { | 134 | union kvm_mmu_page_role { |
134 | unsigned word; | 135 | unsigned word; |
135 | struct { | 136 | struct { |
136 | unsigned glevels : 4; | 137 | unsigned glevels:4; |
137 | unsigned level : 4; | 138 | unsigned level:4; |
138 | unsigned quadrant : 2; | 139 | unsigned quadrant:2; |
139 | unsigned pad_for_nice_hex_output : 6; | 140 | unsigned pad_for_nice_hex_output:6; |
140 | unsigned metaphysical : 1; | 141 | unsigned metaphysical:1; |
141 | unsigned access : 3; | 142 | unsigned access:3; |
142 | }; | 143 | }; |
143 | }; | 144 | }; |
144 | 145 | ||
@@ -606,6 +607,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | |||
606 | #define TSS_BASE_SIZE 0x68 | 607 | #define TSS_BASE_SIZE 0x68 |
607 | #define TSS_IOPB_SIZE (65536 / 8) | 608 | #define TSS_IOPB_SIZE (65536 / 8) |
608 | #define TSS_REDIRECTION_SIZE (256 / 8) | 609 | #define TSS_REDIRECTION_SIZE (256 / 8) |
609 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | 610 | #define RMODE_TSS_SIZE \ |
611 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | ||
610 | 612 | ||
611 | #endif | 613 | #endif |
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index 7db91b9bdcd4..d6337f941c98 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h | |||
@@ -68,10 +68,10 @@ struct x86_emulate_ops { | |||
68 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | 68 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. |
69 | * @bytes: [IN ] Number of bytes to read from memory. | 69 | * @bytes: [IN ] Number of bytes to read from memory. |
70 | */ | 70 | */ |
71 | int (*read_emulated) (unsigned long addr, | 71 | int (*read_emulated)(unsigned long addr, |
72 | void *val, | 72 | void *val, |
73 | unsigned int bytes, | 73 | unsigned int bytes, |
74 | struct kvm_vcpu *vcpu); | 74 | struct kvm_vcpu *vcpu); |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * write_emulated: Read bytes from emulated/special memory area. | 77 | * write_emulated: Read bytes from emulated/special memory area. |
@@ -80,10 +80,10 @@ struct x86_emulate_ops { | |||
80 | * required). | 80 | * required). |
81 | * @bytes: [IN ] Number of bytes to write to memory. | 81 | * @bytes: [IN ] Number of bytes to write to memory. |
82 | */ | 82 | */ |
83 | int (*write_emulated) (unsigned long addr, | 83 | int (*write_emulated)(unsigned long addr, |
84 | const void *val, | 84 | const void *val, |
85 | unsigned int bytes, | 85 | unsigned int bytes, |
86 | struct kvm_vcpu *vcpu); | 86 | struct kvm_vcpu *vcpu); |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an | 89 | * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an |
@@ -93,11 +93,11 @@ struct x86_emulate_ops { | |||
93 | * @new: [IN ] Value to write to @addr. | 93 | * @new: [IN ] Value to write to @addr. |
94 | * @bytes: [IN ] Number of bytes to access using CMPXCHG. | 94 | * @bytes: [IN ] Number of bytes to access using CMPXCHG. |
95 | */ | 95 | */ |
96 | int (*cmpxchg_emulated) (unsigned long addr, | 96 | int (*cmpxchg_emulated)(unsigned long addr, |
97 | const void *old, | 97 | const void *old, |
98 | const void *new, | 98 | const void *new, |
99 | unsigned int bytes, | 99 | unsigned int bytes, |
100 | struct kvm_vcpu *vcpu); | 100 | struct kvm_vcpu *vcpu); |
101 | 101 | ||
102 | }; | 102 | }; |
103 | 103 | ||
@@ -143,7 +143,7 @@ struct x86_emulate_ctxt { | |||
143 | /* Register state before/after emulation. */ | 143 | /* Register state before/after emulation. */ |
144 | struct kvm_vcpu *vcpu; | 144 | struct kvm_vcpu *vcpu; |
145 | 145 | ||
146 | /* Linear faulting address (if emulating a page-faulting instruction). */ | 146 | /* Linear faulting address (if emulating a page-faulting instruction) */ |
147 | unsigned long eflags; | 147 | unsigned long eflags; |
148 | 148 | ||
149 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 149 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index 9b17571e9bc3..be4a7247fa2b 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h | |||
@@ -34,8 +34,7 @@ extern const char lgstart_iret[], lgend_iret[]; | |||
34 | extern void lguest_iret(void); | 34 | extern void lguest_iret(void); |
35 | extern void lguest_init(void); | 35 | extern void lguest_init(void); |
36 | 36 | ||
37 | struct lguest_regs | 37 | struct lguest_regs { |
38 | { | ||
39 | /* Manually saved part. */ | 38 | /* Manually saved part. */ |
40 | unsigned long eax, ebx, ecx, edx; | 39 | unsigned long eax, ebx, ecx, edx; |
41 | unsigned long esi, edi, ebp; | 40 | unsigned long esi, edi, ebp; |
@@ -51,8 +50,7 @@ struct lguest_regs | |||
51 | }; | 50 | }; |
52 | 51 | ||
53 | /* This is a guest-specific page (mapped ro) into the guest. */ | 52 | /* This is a guest-specific page (mapped ro) into the guest. */ |
54 | struct lguest_ro_state | 53 | struct lguest_ro_state { |
55 | { | ||
56 | /* Host information we need to restore when we switch back. */ | 54 | /* Host information we need to restore when we switch back. */ |
57 | u32 host_cr3; | 55 | u32 host_cr3; |
58 | struct desc_ptr host_idt_desc; | 56 | struct desc_ptr host_idt_desc; |
@@ -67,8 +65,7 @@ struct lguest_ro_state | |||
67 | struct desc_struct guest_gdt[GDT_ENTRIES]; | 65 | struct desc_struct guest_gdt[GDT_ENTRIES]; |
68 | }; | 66 | }; |
69 | 67 | ||
70 | struct lg_cpu_arch | 68 | struct lg_cpu_arch { |
71 | { | ||
72 | /* The GDT entries copied into lguest_ro_state when running. */ | 69 | /* The GDT entries copied into lguest_ro_state when running. */ |
73 | struct desc_struct gdt[GDT_ENTRIES]; | 70 | struct desc_struct gdt[GDT_ENTRIES]; |
74 | 71 | ||
@@ -85,7 +82,7 @@ static inline void lguest_set_ts(void) | |||
85 | 82 | ||
86 | cr0 = read_cr0(); | 83 | cr0 = read_cr0(); |
87 | if (!(cr0 & 8)) | 84 | if (!(cr0 & 8)) |
88 | write_cr0(cr0|8); | 85 | write_cr0(cr0 | 8); |
89 | } | 86 | } |
90 | 87 | ||
91 | /* Full 4G segment descriptors, suitable for CS and DS. */ | 88 | /* Full 4G segment descriptors, suitable for CS and DS. */ |
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index f239e7069cab..a3241f28e34a 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h | |||
@@ -46,7 +46,7 @@ hcall(unsigned long call, | |||
46 | { | 46 | { |
47 | /* "int" is the Intel instruction to trigger a trap. */ | 47 | /* "int" is the Intel instruction to trigger a trap. */ |
48 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | 48 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) |
49 | /* The call in %eax (aka "a") might be overwritten */ | 49 | /* The call in %eax (aka "a") might be overwritten */ |
50 | : "=a"(call) | 50 | : "=a"(call) |
51 | /* The arguments are in %eax, %edx, %ebx & %ecx */ | 51 | /* The arguments are in %eax, %edx, %ebx & %ecx */ |
52 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) | 52 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) |
@@ -62,8 +62,7 @@ hcall(unsigned long call, | |||
62 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 62 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
63 | 63 | ||
64 | #define LHCALL_RING_SIZE 64 | 64 | #define LHCALL_RING_SIZE 64 |
65 | struct hcall_args | 65 | struct hcall_args { |
66 | { | ||
67 | /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ | 66 | /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ |
68 | unsigned long arg0, arg2, arg3, arg1; | 67 | unsigned long arg0, arg2, arg3, arg1; |
69 | }; | 68 | }; |
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index c048353f4b85..64e444f8e85b 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | 1 | #ifndef __ASM_LINKAGE_H |
2 | #define __ASM_LINKAGE_H | 2 | #define __ASM_LINKAGE_H |
3 | 3 | ||
4 | #undef notrace | ||
5 | #define notrace __attribute__((no_instrument_function)) | ||
6 | |||
4 | #ifdef CONFIG_X86_64 | 7 | #ifdef CONFIG_X86_64 |
5 | #define __ALIGN .p2align 4,,15 | 8 | #define __ALIGN .p2align 4,,15 |
6 | #define __ALIGN_STR ".p2align 4,,15" | 9 | #define __ALIGN_STR ".p2align 4,,15" |
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index f852c62b3319..330a72496abd 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h | |||
@@ -18,32 +18,28 @@ typedef struct { | |||
18 | 18 | ||
19 | static inline void local_inc(local_t *l) | 19 | static inline void local_inc(local_t *l) |
20 | { | 20 | { |
21 | __asm__ __volatile__( | 21 | asm volatile(_ASM_INC "%0" |
22 | _ASM_INC "%0" | 22 | : "+m" (l->a.counter)); |
23 | :"+m" (l->a.counter)); | ||
24 | } | 23 | } |
25 | 24 | ||
26 | static inline void local_dec(local_t *l) | 25 | static inline void local_dec(local_t *l) |
27 | { | 26 | { |
28 | __asm__ __volatile__( | 27 | asm volatile(_ASM_DEC "%0" |
29 | _ASM_DEC "%0" | 28 | : "+m" (l->a.counter)); |
30 | :"+m" (l->a.counter)); | ||
31 | } | 29 | } |
32 | 30 | ||
33 | static inline void local_add(long i, local_t *l) | 31 | static inline void local_add(long i, local_t *l) |
34 | { | 32 | { |
35 | __asm__ __volatile__( | 33 | asm volatile(_ASM_ADD "%1,%0" |
36 | _ASM_ADD "%1,%0" | 34 | : "+m" (l->a.counter) |
37 | :"+m" (l->a.counter) | 35 | : "ir" (i)); |
38 | :"ir" (i)); | ||
39 | } | 36 | } |
40 | 37 | ||
41 | static inline void local_sub(long i, local_t *l) | 38 | static inline void local_sub(long i, local_t *l) |
42 | { | 39 | { |
43 | __asm__ __volatile__( | 40 | asm volatile(_ASM_SUB "%1,%0" |
44 | _ASM_SUB "%1,%0" | 41 | : "+m" (l->a.counter) |
45 | :"+m" (l->a.counter) | 42 | : "ir" (i)); |
46 | :"ir" (i)); | ||
47 | } | 43 | } |
48 | 44 | ||
49 | /** | 45 | /** |
@@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l) | |||
59 | { | 55 | { |
60 | unsigned char c; | 56 | unsigned char c; |
61 | 57 | ||
62 | __asm__ __volatile__( | 58 | asm volatile(_ASM_SUB "%2,%0; sete %1" |
63 | _ASM_SUB "%2,%0; sete %1" | 59 | : "+m" (l->a.counter), "=qm" (c) |
64 | :"+m" (l->a.counter), "=qm" (c) | 60 | : "ir" (i) : "memory"); |
65 | :"ir" (i) : "memory"); | ||
66 | return c; | 61 | return c; |
67 | } | 62 | } |
68 | 63 | ||
@@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l) | |||
78 | { | 73 | { |
79 | unsigned char c; | 74 | unsigned char c; |
80 | 75 | ||
81 | __asm__ __volatile__( | 76 | asm volatile(_ASM_DEC "%0; sete %1" |
82 | _ASM_DEC "%0; sete %1" | 77 | : "+m" (l->a.counter), "=qm" (c) |
83 | :"+m" (l->a.counter), "=qm" (c) | 78 | : : "memory"); |
84 | : : "memory"); | ||
85 | return c != 0; | 79 | return c != 0; |
86 | } | 80 | } |
87 | 81 | ||
@@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l) | |||
97 | { | 91 | { |
98 | unsigned char c; | 92 | unsigned char c; |
99 | 93 | ||
100 | __asm__ __volatile__( | 94 | asm volatile(_ASM_INC "%0; sete %1" |
101 | _ASM_INC "%0; sete %1" | 95 | : "+m" (l->a.counter), "=qm" (c) |
102 | :"+m" (l->a.counter), "=qm" (c) | 96 | : : "memory"); |
103 | : : "memory"); | ||
104 | return c != 0; | 97 | return c != 0; |
105 | } | 98 | } |
106 | 99 | ||
@@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l) | |||
117 | { | 110 | { |
118 | unsigned char c; | 111 | unsigned char c; |
119 | 112 | ||
120 | __asm__ __volatile__( | 113 | asm volatile(_ASM_ADD "%2,%0; sets %1" |
121 | _ASM_ADD "%2,%0; sets %1" | 114 | : "+m" (l->a.counter), "=qm" (c) |
122 | :"+m" (l->a.counter), "=qm" (c) | 115 | : "ir" (i) : "memory"); |
123 | :"ir" (i) : "memory"); | ||
124 | return c; | 116 | return c; |
125 | } | 117 | } |
126 | 118 | ||
@@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l) | |||
141 | #endif | 133 | #endif |
142 | /* Modern 486+ processor */ | 134 | /* Modern 486+ processor */ |
143 | __i = i; | 135 | __i = i; |
144 | __asm__ __volatile__( | 136 | asm volatile(_ASM_XADD "%0, %1;" |
145 | _ASM_XADD "%0, %1;" | 137 | : "+r" (i), "+m" (l->a.counter) |
146 | :"+r" (i), "+m" (l->a.counter) | 138 | : : "memory"); |
147 | : : "memory"); | ||
148 | return i + __i; | 139 | return i + __i; |
149 | 140 | ||
150 | #ifdef CONFIG_M386 | 141 | #ifdef CONFIG_M386 |
@@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l) | |||
182 | #define local_add_unless(l, a, u) \ | 173 | #define local_add_unless(l, a, u) \ |
183 | ({ \ | 174 | ({ \ |
184 | long c, old; \ | 175 | long c, old; \ |
185 | c = local_read(l); \ | 176 | c = local_read((l)); \ |
186 | for (;;) { \ | 177 | for (;;) { \ |
187 | if (unlikely(c == (u))) \ | 178 | if (unlikely(c == (u))) \ |
188 | break; \ | 179 | break; \ |
189 | old = local_cmpxchg((l), c, c + (a)); \ | 180 | old = local_cmpxchg((l), c, c + (a)); \ |
190 | if (likely(old == c)) \ | 181 | if (likely(old == c)) \ |
191 | break; \ | 182 | break; \ |
192 | c = old; \ | 183 | c = old; \ |
@@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l) | |||
214 | 205 | ||
215 | /* Need to disable preemption for the cpu local counters otherwise we could | 206 | /* Need to disable preemption for the cpu local counters otherwise we could |
216 | still access a variable of a previous CPU in a non atomic way. */ | 207 | still access a variable of a previous CPU in a non atomic way. */ |
217 | #define cpu_local_wrap_v(l) \ | 208 | #define cpu_local_wrap_v(l) \ |
218 | ({ local_t res__; \ | 209 | ({ \ |
219 | preempt_disable(); \ | 210 | local_t res__; \ |
220 | res__ = (l); \ | 211 | preempt_disable(); \ |
221 | preempt_enable(); \ | 212 | res__ = (l); \ |
222 | res__; }) | 213 | preempt_enable(); \ |
214 | res__; \ | ||
215 | }) | ||
223 | #define cpu_local_wrap(l) \ | 216 | #define cpu_local_wrap(l) \ |
224 | ({ preempt_disable(); \ | 217 | ({ \ |
225 | l; \ | 218 | preempt_disable(); \ |
226 | preempt_enable(); }) \ | 219 | (l); \ |
227 | 220 | preempt_enable(); \ | |
228 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | 221 | }) \ |
229 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | 222 | |
230 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | 223 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) |
231 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | 224 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) |
232 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | 225 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) |
233 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | 226 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) |
234 | 227 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) | |
235 | #define __cpu_local_inc(l) cpu_local_inc(l) | 228 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) |
236 | #define __cpu_local_dec(l) cpu_local_dec(l) | 229 | |
230 | #define __cpu_local_inc(l) cpu_local_inc((l)) | ||
231 | #define __cpu_local_dec(l) cpu_local_dec((l)) | ||
237 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | 232 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) |
238 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | 233 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) |
239 | 234 | ||
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index 6df235e8ea91..8327907c79bf 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h | |||
@@ -1,10 +1,7 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | 4 | #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) | |
5 | extern u8 bios_cpu_apicid[]; | ||
6 | |||
7 | #define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) | ||
8 | #define esr_disable (1) | 5 | #define esr_disable (1) |
9 | 6 | ||
10 | static inline int apic_id_registered(void) | 7 | static inline int apic_id_registered(void) |
@@ -90,7 +87,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
90 | static inline int cpu_present_to_apicid(int mps_cpu) | 87 | static inline int cpu_present_to_apicid(int mps_cpu) |
91 | { | 88 | { |
92 | if (mps_cpu < NR_CPUS) | 89 | if (mps_cpu < NR_CPUS) |
93 | return (int) bios_cpu_apicid[mps_cpu]; | 90 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
94 | 91 | ||
95 | return BAD_APICID; | 92 | return BAD_APICID; |
96 | } | 93 | } |
@@ -109,17 +106,6 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
109 | return cpu_physical_id(cpu); | 106 | return cpu_physical_id(cpu); |
110 | } | 107 | } |
111 | 108 | ||
112 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
113 | struct mpc_config_translation *translation_record) | ||
114 | { | ||
115 | printk("Processor #%d %u:%u APIC version %d\n", | ||
116 | m->mpc_apicid, | ||
117 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
118 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
119 | m->mpc_apicver); | ||
120 | return m->mpc_apicid; | ||
121 | } | ||
122 | |||
123 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | 109 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) |
124 | { | 110 | { |
125 | /* For clustered we don't have a good way to do this yet - hack */ | 111 | /* For clustered we don't have a good way to do this yet - hack */ |
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index e3c2c1012c1c..0a6634f62abe 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_LOCAL_APIC | ||
5 | |||
4 | #include <mach_apicdef.h> | 6 | #include <mach_apicdef.h> |
5 | #include <asm/smp.h> | 7 | #include <asm/smp.h> |
6 | 8 | ||
@@ -14,24 +16,25 @@ static inline cpumask_t target_cpus(void) | |||
14 | return cpumask_of_cpu(0); | 16 | return cpumask_of_cpu(0); |
15 | #endif | 17 | #endif |
16 | } | 18 | } |
17 | #define TARGET_CPUS (target_cpus()) | ||
18 | 19 | ||
19 | #define NO_BALANCE_IRQ (0) | 20 | #define NO_BALANCE_IRQ (0) |
20 | #define esr_disable (0) | 21 | #define esr_disable (0) |
21 | 22 | ||
23 | #ifdef CONFIG_X86_64 | ||
24 | #include <asm/genapic.h> | ||
25 | #define INT_DELIVERY_MODE (genapic->int_delivery_mode) | ||
26 | #define INT_DEST_MODE (genapic->int_dest_mode) | ||
27 | #define TARGET_CPUS (genapic->target_cpus()) | ||
28 | #define apic_id_registered (genapic->apic_id_registered) | ||
29 | #define init_apic_ldr (genapic->init_apic_ldr) | ||
30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | ||
31 | #define phys_pkg_id (genapic->phys_pkg_id) | ||
32 | #define vector_allocation_domain (genapic->vector_allocation_domain) | ||
33 | extern void setup_apic_routing(void); | ||
34 | #else | ||
22 | #define INT_DELIVERY_MODE dest_LowestPrio | 35 | #define INT_DELIVERY_MODE dest_LowestPrio |
23 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ | 36 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ |
24 | 37 | #define TARGET_CPUS (target_cpus()) | |
25 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | ||
26 | { | ||
27 | return physid_isset(apicid, bitmap); | ||
28 | } | ||
29 | |||
30 | static inline unsigned long check_apicid_present(int bit) | ||
31 | { | ||
32 | return physid_isset(bit, phys_cpu_present_map); | ||
33 | } | ||
34 | |||
35 | /* | 38 | /* |
36 | * Set up the logical destination ID. | 39 | * Set up the logical destination ID. |
37 | * | 40 | * |
@@ -49,23 +52,51 @@ static inline void init_apic_ldr(void) | |||
49 | apic_write_around(APIC_LDR, val); | 52 | apic_write_around(APIC_LDR, val); |
50 | } | 53 | } |
51 | 54 | ||
52 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | 55 | static inline int apic_id_registered(void) |
53 | { | 56 | { |
54 | return phys_map; | 57 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); |
58 | } | ||
59 | |||
60 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
61 | { | ||
62 | return cpus_addr(cpumask)[0]; | ||
63 | } | ||
64 | |||
65 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
66 | { | ||
67 | return cpuid_apic >> index_msb; | ||
55 | } | 68 | } |
56 | 69 | ||
57 | static inline void setup_apic_routing(void) | 70 | static inline void setup_apic_routing(void) |
58 | { | 71 | { |
72 | #ifdef CONFIG_X86_IO_APIC | ||
59 | printk("Enabling APIC mode: %s. Using %d I/O APICs\n", | 73 | printk("Enabling APIC mode: %s. Using %d I/O APICs\n", |
60 | "Flat", nr_ioapics); | 74 | "Flat", nr_ioapics); |
75 | #endif | ||
61 | } | 76 | } |
62 | 77 | ||
63 | static inline int multi_timer_check(int apic, int irq) | 78 | static inline int apicid_to_node(int logical_apicid) |
64 | { | 79 | { |
65 | return 0; | 80 | return 0; |
66 | } | 81 | } |
82 | #endif | ||
67 | 83 | ||
68 | static inline int apicid_to_node(int logical_apicid) | 84 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) |
85 | { | ||
86 | return physid_isset(apicid, bitmap); | ||
87 | } | ||
88 | |||
89 | static inline unsigned long check_apicid_present(int bit) | ||
90 | { | ||
91 | return physid_isset(bit, phys_cpu_present_map); | ||
92 | } | ||
93 | |||
94 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | ||
95 | { | ||
96 | return phys_map; | ||
97 | } | ||
98 | |||
99 | static inline int multi_timer_check(int apic, int irq) | ||
69 | { | 100 | { |
70 | return 0; | 101 | return 0; |
71 | } | 102 | } |
@@ -78,8 +109,13 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
78 | 109 | ||
79 | static inline int cpu_present_to_apicid(int mps_cpu) | 110 | static inline int cpu_present_to_apicid(int mps_cpu) |
80 | { | 111 | { |
112 | #ifdef CONFIG_X86_64 | ||
113 | if (cpu_present(mps_cpu)) | ||
114 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | ||
115 | #else | ||
81 | if (mps_cpu < get_physical_broadcast()) | 116 | if (mps_cpu < get_physical_broadcast()) |
82 | return mps_cpu; | 117 | return mps_cpu; |
118 | #endif | ||
83 | else | 119 | else |
84 | return BAD_APICID; | 120 | return BAD_APICID; |
85 | } | 121 | } |
@@ -89,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) | |||
89 | return physid_mask_of_physid(phys_apicid); | 125 | return physid_mask_of_physid(phys_apicid); |
90 | } | 126 | } |
91 | 127 | ||
92 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
93 | struct mpc_config_translation *translation_record) | ||
94 | { | ||
95 | printk("Processor #%d %u:%u APIC version %d\n", | ||
96 | m->mpc_apicid, | ||
97 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
98 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
99 | m->mpc_apicver); | ||
100 | return m->mpc_apicid; | ||
101 | } | ||
102 | |||
103 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
104 | { | 129 | { |
105 | } | 130 | } |
@@ -109,23 +134,9 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
109 | return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); | 134 | return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); |
110 | } | 135 | } |
111 | 136 | ||
112 | static inline int apic_id_registered(void) | ||
113 | { | ||
114 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | ||
115 | } | ||
116 | |||
117 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
118 | { | ||
119 | return cpus_addr(cpumask)[0]; | ||
120 | } | ||
121 | |||
122 | static inline void enable_apic_mode(void) | 137 | static inline void enable_apic_mode(void) |
123 | { | 138 | { |
124 | } | 139 | } |
125 | 140 | ||
126 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 141 | #endif /* CONFIG_X86_LOCAL_APIC */ |
127 | { | ||
128 | return cpuid_apic >> index_msb; | ||
129 | } | ||
130 | |||
131 | #endif /* __ASM_MACH_APIC_H */ | 142 | #endif /* __ASM_MACH_APIC_H */ |
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index ae9841319094..e4b29ba37de6 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h | |||
@@ -3,10 +3,14 @@ | |||
3 | 3 | ||
4 | #include <asm/apic.h> | 4 | #include <asm/apic.h> |
5 | 5 | ||
6 | #ifdef CONFIG_X86_64 | ||
7 | #define APIC_ID_MASK (0xFFu<<24) | ||
8 | #define GET_APIC_ID(x) (((x)>>24)&0xFFu) | ||
9 | #define SET_APIC_ID(x) (((x)<<24)) | ||
10 | #else | ||
6 | #define APIC_ID_MASK (0xF<<24) | 11 | #define APIC_ID_MASK (0xF<<24) |
7 | |||
8 | static inline unsigned get_apic_id(unsigned long x) | 12 | static inline unsigned get_apic_id(unsigned long x) |
9 | { | 13 | { |
10 | unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); | 14 | unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); |
11 | if (APIC_XAPIC(ver)) | 15 | if (APIC_XAPIC(ver)) |
12 | return (((x)>>24)&0xFF); | 16 | return (((x)>>24)&0xFF); |
@@ -15,5 +19,6 @@ static inline unsigned get_apic_id(unsigned long x) | |||
15 | } | 19 | } |
16 | 20 | ||
17 | #define GET_APIC_ID(x) get_apic_id(x) | 21 | #define GET_APIC_ID(x) get_apic_id(x) |
22 | #endif | ||
18 | 23 | ||
19 | #endif | 24 | #endif |
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h index 0dba244c86db..be323364e68f 100644 --- a/include/asm-x86/mach-default/mach_ipi.h +++ b/include/asm-x86/mach-default/mach_ipi.h | |||
@@ -9,10 +9,15 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector); | |||
9 | 9 | ||
10 | extern int no_broadcast; | 10 | extern int no_broadcast; |
11 | 11 | ||
12 | #ifdef CONFIG_X86_64 | ||
13 | #include <asm/genapic.h> | ||
14 | #define send_IPI_mask (genapic->send_IPI_mask) | ||
15 | #else | ||
12 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 16 | static inline void send_IPI_mask(cpumask_t mask, int vector) |
13 | { | 17 | { |
14 | send_IPI_mask_bitmask(mask, vector); | 18 | send_IPI_mask_bitmask(mask, vector); |
15 | } | 19 | } |
20 | #endif | ||
16 | 21 | ||
17 | static inline void __local_send_IPI_allbutself(int vector) | 22 | static inline void __local_send_IPI_allbutself(int vector) |
18 | { | 23 | { |
@@ -33,6 +38,10 @@ static inline void __local_send_IPI_all(int vector) | |||
33 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); | 38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); |
34 | } | 39 | } |
35 | 40 | ||
41 | #ifdef CONFIG_X86_64 | ||
42 | #define send_IPI_allbutself (genapic->send_IPI_allbutself) | ||
43 | #define send_IPI_all (genapic->send_IPI_all) | ||
44 | #else | ||
36 | static inline void send_IPI_allbutself(int vector) | 45 | static inline void send_IPI_allbutself(int vector) |
37 | { | 46 | { |
38 | /* | 47 | /* |
@@ -50,5 +59,6 @@ static inline void send_IPI_all(int vector) | |||
50 | { | 59 | { |
51 | __local_send_IPI_all(vector); | 60 | __local_send_IPI_all(vector); |
52 | } | 61 | } |
62 | #endif | ||
53 | 63 | ||
54 | #endif /* __ASM_MACH_IPI_H */ | 64 | #endif /* __ASM_MACH_IPI_H */ |
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h index 1d3832482580..d14108505bb8 100644 --- a/include/asm-x86/mach-default/mach_mpparse.h +++ b/include/asm-x86/mach-default/mach_mpparse.h | |||
@@ -1,17 +1,6 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_MACH_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_MACH_MPPARSE_H |
3 | 3 | ||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
5 | struct mpc_config_translation *translation) | ||
6 | { | ||
7 | // Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
8 | } | ||
9 | |||
10 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
11 | struct mpc_config_translation *translation) | ||
12 | { | ||
13 | } | ||
14 | |||
15 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 4 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
16 | char *productid) | 5 | char *productid) |
17 | { | 6 | { |
diff --git a/include/asm-x86/mach-default/mach_reboot.h b/include/asm-x86/mach-default/mach_reboot.h deleted file mode 100644 index 6adee6a97dec..000000000000 --- a/include/asm-x86/mach-default/mach_reboot.h +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | /* | ||
2 | * arch/i386/mach-generic/mach_reboot.h | ||
3 | * | ||
4 | * Machine specific reboot functions for generic. | ||
5 | * Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_REBOOT_H | ||
8 | #define _MACH_REBOOT_H | ||
9 | |||
10 | static inline void kb_wait(void) | ||
11 | { | ||
12 | int i; | ||
13 | |||
14 | for (i = 0; i < 0x10000; i++) | ||
15 | if ((inb_p(0x64) & 0x02) == 0) | ||
16 | break; | ||
17 | } | ||
18 | |||
19 | static inline void mach_reboot(void) | ||
20 | { | ||
21 | int i; | ||
22 | |||
23 | /* old method, works on most machines */ | ||
24 | for (i = 0; i < 10; i++) { | ||
25 | kb_wait(); | ||
26 | udelay(50); | ||
27 | outb(0xfe, 0x64); /* pulse reset low */ | ||
28 | udelay(50); | ||
29 | } | ||
30 | |||
31 | /* New method: sets the "System flag" which, when set, indicates | ||
32 | * successful completion of the keyboard controller self-test (Basic | ||
33 | * Assurance Test, BAT). This is needed for some machines with no | ||
34 | * keyboard plugged in. This read-modify-write sequence sets only the | ||
35 | * system flag | ||
36 | */ | ||
37 | for (i = 0; i < 10; i++) { | ||
38 | int cmd; | ||
39 | |||
40 | outb(0x20, 0x64); /* read Controller Command Byte */ | ||
41 | udelay(50); | ||
42 | kb_wait(); | ||
43 | udelay(50); | ||
44 | cmd = inb(0x60); | ||
45 | udelay(50); | ||
46 | kb_wait(); | ||
47 | udelay(50); | ||
48 | outb(0x60, 0x64); /* write Controller Command Byte */ | ||
49 | udelay(50); | ||
50 | kb_wait(); | ||
51 | udelay(50); | ||
52 | outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */ | ||
53 | udelay(50); | ||
54 | kb_wait(); | ||
55 | udelay(50); | ||
56 | outb(0xfe, 0x64); /* pulse reset low */ | ||
57 | udelay(50); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | #endif /* !_MACH_REBOOT_H */ | ||
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h index 7f45f6311059..3ff2c5bff93a 100644 --- a/include/asm-x86/mach-default/smpboot_hooks.h +++ b/include/asm-x86/mach-default/smpboot_hooks.h | |||
@@ -41,4 +41,11 @@ static inline void smpboot_setup_io_apic(void) | |||
41 | */ | 41 | */ |
42 | if (!skip_ioapic_setup && nr_ioapics) | 42 | if (!skip_ioapic_setup && nr_ioapics) |
43 | setup_IO_APIC(); | 43 | setup_IO_APIC(); |
44 | else | ||
45 | nr_ioapics = 0; | ||
46 | } | ||
47 | |||
48 | static inline void smpboot_clear_io_apic(void) | ||
49 | { | ||
50 | nr_ioapics = 0; | ||
44 | } | 51 | } |
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index d23011fdf454..fbc8ad256f5a 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h | |||
@@ -1,9 +1,7 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | extern u8 bios_cpu_apicid[]; | 4 | #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) |
5 | |||
6 | #define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) | ||
7 | #define esr_disable (1) | 5 | #define esr_disable (1) |
8 | 6 | ||
9 | static inline int apic_id_registered(void) | 7 | static inline int apic_id_registered(void) |
@@ -80,7 +78,7 @@ extern void enable_apic_mode(void); | |||
80 | extern int apic_version [MAX_APICS]; | 78 | extern int apic_version [MAX_APICS]; |
81 | static inline void setup_apic_routing(void) | 79 | static inline void setup_apic_routing(void) |
82 | { | 80 | { |
83 | int apic = bios_cpu_apicid[smp_processor_id()]; | 81 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
84 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 82 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
85 | (apic_version[apic] == 0x14) ? | 83 | (apic_version[apic] == 0x14) ? |
86 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); | 84 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); |
@@ -102,7 +100,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) | |||
102 | if (!mps_cpu) | 100 | if (!mps_cpu) |
103 | return boot_cpu_physical_apicid; | 101 | return boot_cpu_physical_apicid; |
104 | else if (mps_cpu < NR_CPUS) | 102 | else if (mps_cpu < NR_CPUS) |
105 | return (int) bios_cpu_apicid[mps_cpu]; | 103 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
106 | else | 104 | else |
107 | return BAD_APICID; | 105 | return BAD_APICID; |
108 | } | 106 | } |
@@ -129,16 +127,6 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
129 | #endif | 127 | #endif |
130 | } | 128 | } |
131 | 129 | ||
132 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) | ||
133 | { | ||
134 | printk("Processor #%d %u:%u APIC version %d\n", | ||
135 | m->mpc_apicid, | ||
136 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
137 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
138 | m->mpc_apicver); | ||
139 | return (m->mpc_apicid); | ||
140 | } | ||
141 | |||
142 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | 130 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) |
143 | { | 131 | { |
144 | /* For clustered we don't have a good way to do this yet - hack */ | 132 | /* For clustered we don't have a good way to do this yet - hack */ |
@@ -153,7 +141,7 @@ static inline void setup_portio_remap(void) | |||
153 | extern unsigned int boot_cpu_physical_apicid; | 141 | extern unsigned int boot_cpu_physical_apicid; |
154 | static inline int check_phys_apicid_present(int cpu_physical_apicid) | 142 | static inline int check_phys_apicid_present(int cpu_physical_apicid) |
155 | { | 143 | { |
156 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 144 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); |
157 | return (1); | 145 | return (1); |
158 | } | 146 | } |
159 | 147 | ||
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h index 52ee75cd0fe1..ef26d3523625 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/mach-es7000/mach_mpparse.h | |||
@@ -3,17 +3,6 @@ | |||
3 | 3 | ||
4 | #include <linux/acpi.h> | 4 | #include <linux/acpi.h> |
5 | 5 | ||
6 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
7 | struct mpc_config_translation *translation) | ||
8 | { | ||
9 | Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
10 | } | ||
11 | |||
12 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
13 | struct mpc_config_translation *translation) | ||
14 | { | ||
15 | } | ||
16 | |||
17 | extern int parse_unisys_oem (char *oemptr); | 6 | extern int parse_unisys_oem (char *oemptr); |
18 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | 7 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); |
19 | extern void setup_unisys(void); | 8 | extern void setup_unisys(void); |
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h index a236e7021528..6eff343e1233 100644 --- a/include/asm-x86/mach-generic/mach_apic.h +++ b/include/asm-x86/mach-generic/mach_apic.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) | 19 | #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) |
20 | #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) | 20 | #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) |
21 | #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) | 21 | #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) |
22 | #define mpc_apic_id (genapic->mpc_apic_id) | ||
23 | #define setup_portio_remap (genapic->setup_portio_remap) | 22 | #define setup_portio_remap (genapic->setup_portio_remap) |
24 | #define check_apicid_present (genapic->check_apicid_present) | 23 | #define check_apicid_present (genapic->check_apicid_present) |
25 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) | 24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) |
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h index dbd9fce54f4d..0d0b5ba2e9d1 100644 --- a/include/asm-x86/mach-generic/mach_mpparse.h +++ b/include/asm-x86/mach-generic/mach_mpparse.h | |||
@@ -1,11 +1,6 @@ | |||
1 | #ifndef _MACH_MPPARSE_H | 1 | #ifndef _MACH_MPPARSE_H |
2 | #define _MACH_MPPARSE_H 1 | 2 | #define _MACH_MPPARSE_H 1 |
3 | 3 | ||
4 | #include <asm/genapic.h> | ||
5 | |||
6 | #define mpc_oem_bus_info (genapic->mpc_oem_bus_info) | ||
7 | #define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus) | ||
8 | |||
9 | int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); | 4 | int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); |
10 | int acpi_madt_oem_check(char *oem_id, char *oem_table_id); | 5 | int acpi_madt_oem_check(char *oem_id, char *oem_table_id); |
11 | 6 | ||
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h index 3b637fac890b..75a56e5afbe7 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/mach-numaq/mach_apic.h | |||
@@ -95,6 +95,16 @@ static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) | |||
95 | return physid_mask_of_physid(cpu + 4*node); | 95 | return physid_mask_of_physid(cpu + 4*node); |
96 | } | 96 | } |
97 | 97 | ||
98 | struct mpc_config_translation { | ||
99 | unsigned char mpc_type; | ||
100 | unsigned char trans_len; | ||
101 | unsigned char trans_type; | ||
102 | unsigned char trans_quad; | ||
103 | unsigned char trans_global; | ||
104 | unsigned char trans_local; | ||
105 | unsigned short trans_reserved; | ||
106 | }; | ||
107 | |||
98 | static inline int mpc_apic_id(struct mpc_config_processor *m, | 108 | static inline int mpc_apic_id(struct mpc_config_processor *m, |
99 | struct mpc_config_translation *translation_record) | 109 | struct mpc_config_translation *translation_record) |
100 | { | 110 | { |
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 51bbac8fc0c2..459b12401187 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h | |||
@@ -1,25 +1,10 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_MACH_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_MACH_MPPARSE_H |
3 | 3 | ||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | 4 | extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, |
5 | struct mpc_config_translation *translation) | 5 | struct mpc_config_translation *translation); |
6 | { | 6 | extern void mpc_oem_pci_bus(struct mpc_config_bus *m, |
7 | int quad = translation->trans_quad; | 7 | struct mpc_config_translation *translation); |
8 | int local = translation->trans_local; | ||
9 | |||
10 | mp_bus_id_to_node[m->mpc_busid] = quad; | ||
11 | mp_bus_id_to_local[m->mpc_busid] = local; | ||
12 | printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); | ||
13 | } | ||
14 | |||
15 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
16 | struct mpc_config_translation *translation) | ||
17 | { | ||
18 | int quad = translation->trans_quad; | ||
19 | int local = translation->trans_local; | ||
20 | |||
21 | quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; | ||
22 | } | ||
23 | 8 | ||
24 | /* Hook from generic ACPI tables.c */ | 9 | /* Hook from generic ACPI tables.c */ |
25 | static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 10 | static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 062c97f6100b..1f76c2e70232 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h | |||
@@ -40,7 +40,6 @@ static inline unsigned long check_apicid_present(int bit) | |||
40 | 40 | ||
41 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | 41 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) |
42 | 42 | ||
43 | extern u8 bios_cpu_apicid[]; | ||
44 | extern u8 cpu_2_logical_apicid[]; | 43 | extern u8 cpu_2_logical_apicid[]; |
45 | 44 | ||
46 | static inline void init_apic_ldr(void) | 45 | static inline void init_apic_ldr(void) |
@@ -110,7 +109,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
110 | static inline int cpu_present_to_apicid(int mps_cpu) | 109 | static inline int cpu_present_to_apicid(int mps_cpu) |
111 | { | 110 | { |
112 | if (mps_cpu < NR_CPUS) | 111 | if (mps_cpu < NR_CPUS) |
113 | return (int)bios_cpu_apicid[mps_cpu]; | 112 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
114 | else | 113 | else |
115 | return BAD_APICID; | 114 | return BAD_APICID; |
116 | } | 115 | } |
@@ -126,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int apicid) | |||
126 | return physid_mask_of_physid(0); | 125 | return physid_mask_of_physid(0); |
127 | } | 126 | } |
128 | 127 | ||
129 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
130 | struct mpc_config_translation *translation_record) | ||
131 | { | ||
132 | printk("Processor #%d %u:%u APIC version %d\n", | ||
133 | m->mpc_apicid, | ||
134 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
135 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
136 | m->mpc_apicver); | ||
137 | return m->mpc_apicid; | ||
138 | } | ||
139 | |||
140 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
141 | { | 129 | { |
142 | } | 130 | } |
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h index c2520539d934..fdf591701339 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/mach-summit/mach_mpparse.h | |||
@@ -12,17 +12,6 @@ extern void setup_summit(void); | |||
12 | #define setup_summit() {} | 12 | #define setup_summit() {} |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
16 | struct mpc_config_translation *translation) | ||
17 | { | ||
18 | Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
19 | } | ||
20 | |||
21 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
22 | struct mpc_config_translation *translation) | ||
23 | { | ||
24 | } | ||
25 | |||
26 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 15 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
27 | char *productid) | 16 | char *productid) |
28 | { | 17 | { |
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h index efac6f0d139f..a9ef33a8a995 100644 --- a/include/asm-x86/mach-visws/mach_apic.h +++ b/include/asm-x86/mach-visws/mach_apic.h | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | static inline int apic_id_registered(void) | 24 | static inline int apic_id_registered(void) |
25 | { | 25 | { |
26 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | 26 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); |
27 | } | 27 | } |
28 | 28 | ||
29 | /* | 29 | /* |
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h index d926471fa359..c9b83e395a2e 100644 --- a/include/asm-x86/mach-visws/smpboot_hooks.h +++ b/include/asm-x86/mach-visws/smpboot_hooks.h | |||
@@ -22,3 +22,7 @@ static inline void smpboot_restore_warm_reset_vector(void) | |||
22 | static inline void smpboot_setup_io_apic(void) | 22 | static inline void smpboot_setup_io_apic(void) |
23 | { | 23 | { |
24 | } | 24 | } |
25 | |||
26 | static inline void smpboot_clear_io_apic(void) | ||
27 | { | ||
28 | } | ||
diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h deleted file mode 100644 index 7b7115a0c1c9..000000000000 --- a/include/asm-x86/mach_apic.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 2004 James Cleverdon, IBM. | ||
6 | * Subject to the GNU Public License, v.2 | ||
7 | * | ||
8 | * Generic APIC sub-arch defines. | ||
9 | * | ||
10 | * Hacked for x86-64 by James Cleverdon from i386 architecture code by | ||
11 | * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and | ||
12 | * James Cleverdon. | ||
13 | */ | ||
14 | |||
15 | #include <asm/genapic.h> | ||
16 | |||
17 | #define INT_DELIVERY_MODE (genapic->int_delivery_mode) | ||
18 | #define INT_DEST_MODE (genapic->int_dest_mode) | ||
19 | #define TARGET_CPUS (genapic->target_cpus()) | ||
20 | #define vector_allocation_domain (genapic->vector_allocation_domain) | ||
21 | #define apic_id_registered (genapic->apic_id_registered) | ||
22 | #define init_apic_ldr (genapic->init_apic_ldr) | ||
23 | #define send_IPI_mask (genapic->send_IPI_mask) | ||
24 | #define send_IPI_allbutself (genapic->send_IPI_allbutself) | ||
25 | #define send_IPI_all (genapic->send_IPI_all) | ||
26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | ||
27 | #define phys_pkg_id (genapic->phys_pkg_id) | ||
28 | |||
29 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h index cdd9f965835a..daf1ccde77af 100644 --- a/include/asm-x86/mc146818rtc.h +++ b/include/asm-x86/mc146818rtc.h | |||
@@ -42,7 +42,7 @@ extern volatile unsigned long cmos_lock; | |||
42 | static inline void lock_cmos(unsigned char reg) | 42 | static inline void lock_cmos(unsigned char reg) |
43 | { | 43 | { |
44 | unsigned long new; | 44 | unsigned long new; |
45 | new = ((smp_processor_id()+1) << 8) | reg; | 45 | new = ((smp_processor_id() + 1) << 8) | reg; |
46 | for (;;) { | 46 | for (;;) { |
47 | if (cmos_lock) { | 47 | if (cmos_lock) { |
48 | cpu_relax(); | 48 | cpu_relax(); |
@@ -57,22 +57,26 @@ static inline void unlock_cmos(void) | |||
57 | { | 57 | { |
58 | cmos_lock = 0; | 58 | cmos_lock = 0; |
59 | } | 59 | } |
60 | |||
60 | static inline int do_i_have_lock_cmos(void) | 61 | static inline int do_i_have_lock_cmos(void) |
61 | { | 62 | { |
62 | return (cmos_lock >> 8) == (smp_processor_id()+1); | 63 | return (cmos_lock >> 8) == (smp_processor_id() + 1); |
63 | } | 64 | } |
65 | |||
64 | static inline unsigned char current_lock_cmos_reg(void) | 66 | static inline unsigned char current_lock_cmos_reg(void) |
65 | { | 67 | { |
66 | return cmos_lock & 0xff; | 68 | return cmos_lock & 0xff; |
67 | } | 69 | } |
68 | #define lock_cmos_prefix(reg) \ | 70 | |
71 | #define lock_cmos_prefix(reg) \ | ||
69 | do { \ | 72 | do { \ |
70 | unsigned long cmos_flags; \ | 73 | unsigned long cmos_flags; \ |
71 | local_irq_save(cmos_flags); \ | 74 | local_irq_save(cmos_flags); \ |
72 | lock_cmos(reg) | 75 | lock_cmos(reg) |
73 | #define lock_cmos_suffix(reg) \ | 76 | |
74 | unlock_cmos(); \ | 77 | #define lock_cmos_suffix(reg) \ |
75 | local_irq_restore(cmos_flags); \ | 78 | unlock_cmos(); \ |
79 | local_irq_restore(cmos_flags); \ | ||
76 | } while (0) | 80 | } while (0) |
77 | #else | 81 | #else |
78 | #define lock_cmos_prefix(reg) do {} while (0) | 82 | #define lock_cmos_prefix(reg) do {} while (0) |
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h index fbb1f3b71279..c3dca6edc6b1 100644 --- a/include/asm-x86/mca_dma.h +++ b/include/asm-x86/mca_dma.h | |||
@@ -12,18 +12,18 @@ | |||
12 | * count by 2 when using 16-bit dma; that is not handled by these functions. | 12 | * count by 2 when using 16-bit dma; that is not handled by these functions. |
13 | * | 13 | * |
14 | * Ramen Noodles are yummy. | 14 | * Ramen Noodles are yummy. |
15 | * | 15 | * |
16 | * 1998 Tymm Twillman <tymm@computer.org> | 16 | * 1998 Tymm Twillman <tymm@computer.org> |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Registers that are used by the DMA controller; FN is the function register | 20 | * Registers that are used by the DMA controller; FN is the function register |
21 | * (tell the controller what to do) and EXE is the execution register (how | 21 | * (tell the controller what to do) and EXE is the execution register (how |
22 | * to do it) | 22 | * to do it) |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define MCA_DMA_REG_FN 0x18 | 25 | #define MCA_DMA_REG_FN 0x18 |
26 | #define MCA_DMA_REG_EXE 0x1A | 26 | #define MCA_DMA_REG_EXE 0x1A |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Functions that the DMA controller can do | 29 | * Functions that the DMA controller can do |
@@ -43,9 +43,9 @@ | |||
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Modes (used by setting MCA_DMA_FN_MODE in the function register) | 45 | * Modes (used by setting MCA_DMA_FN_MODE in the function register) |
46 | * | 46 | * |
47 | * Note that the MODE_READ is read from memory (write to device), and | 47 | * Note that the MODE_READ is read from memory (write to device), and |
48 | * MODE_WRITE is vice-versa. | 48 | * MODE_WRITE is vice-versa. |
49 | */ | 49 | */ |
50 | 50 | ||
51 | #define MCA_DMA_MODE_XFER 0x04 /* read by default */ | 51 | #define MCA_DMA_MODE_XFER 0x04 /* read by default */ |
@@ -63,7 +63,7 @@ | |||
63 | * IRQ context. | 63 | * IRQ context. |
64 | */ | 64 | */ |
65 | 65 | ||
66 | static __inline__ void mca_enable_dma(unsigned int dmanr) | 66 | static inline void mca_enable_dma(unsigned int dmanr) |
67 | { | 67 | { |
68 | outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); | 68 | outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); |
69 | } | 69 | } |
@@ -76,7 +76,7 @@ static __inline__ void mca_enable_dma(unsigned int dmanr) | |||
76 | * IRQ context. | 76 | * IRQ context. |
77 | */ | 77 | */ |
78 | 78 | ||
79 | static __inline__ void mca_disable_dma(unsigned int dmanr) | 79 | static inline void mca_disable_dma(unsigned int dmanr) |
80 | { | 80 | { |
81 | outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); | 81 | outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); |
82 | } | 82 | } |
@@ -87,10 +87,10 @@ static __inline__ void mca_disable_dma(unsigned int dmanr) | |||
87 | * @a: 24bit bus address | 87 | * @a: 24bit bus address |
88 | * | 88 | * |
89 | * Load the address register in the DMA controller. This has a 24bit | 89 | * Load the address register in the DMA controller. This has a 24bit |
90 | * limitation (16Mb). | 90 | * limitation (16Mb). |
91 | */ | 91 | */ |
92 | 92 | ||
93 | static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) | 93 | static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a) |
94 | { | 94 | { |
95 | outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); | 95 | outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); |
96 | outb(a & 0xff, MCA_DMA_REG_EXE); | 96 | outb(a & 0xff, MCA_DMA_REG_EXE); |
@@ -106,14 +106,14 @@ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) | |||
106 | * limitation (16Mb). The return is a bus address. | 106 | * limitation (16Mb). The return is a bus address. |
107 | */ | 107 | */ |
108 | 108 | ||
109 | static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) | 109 | static inline unsigned int mca_get_dma_addr(unsigned int dmanr) |
110 | { | 110 | { |
111 | unsigned int addr; | 111 | unsigned int addr; |
112 | 112 | ||
113 | outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); | 113 | outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); |
114 | addr = inb(MCA_DMA_REG_EXE); | 114 | addr = inb(MCA_DMA_REG_EXE); |
115 | addr |= inb(MCA_DMA_REG_EXE) << 8; | 115 | addr |= inb(MCA_DMA_REG_EXE) << 8; |
116 | addr |= inb(MCA_DMA_REG_EXE) << 16; | 116 | addr |= inb(MCA_DMA_REG_EXE) << 16; |
117 | 117 | ||
118 | return addr; | 118 | return addr; |
119 | } | 119 | } |
@@ -127,7 +127,7 @@ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) | |||
127 | * Setting a count of zero will not do what you expect. | 127 | * Setting a count of zero will not do what you expect. |
128 | */ | 128 | */ |
129 | 129 | ||
130 | static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) | 130 | static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count) |
131 | { | 131 | { |
132 | count--; /* transfers one more than count -- correct for this */ | 132 | count--; /* transfers one more than count -- correct for this */ |
133 | 133 | ||
@@ -144,7 +144,7 @@ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) | |||
144 | * on this DMA channel. | 144 | * on this DMA channel. |
145 | */ | 145 | */ |
146 | 146 | ||
147 | static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) | 147 | static inline unsigned int mca_get_dma_residue(unsigned int dmanr) |
148 | { | 148 | { |
149 | unsigned short count; | 149 | unsigned short count; |
150 | 150 | ||
@@ -164,12 +164,12 @@ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) | |||
164 | * with an I/O port target. | 164 | * with an I/O port target. |
165 | */ | 165 | */ |
166 | 166 | ||
167 | static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) | 167 | static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) |
168 | { | 168 | { |
169 | /* | 169 | /* |
170 | * DMA from a port address -- set the io address | 170 | * DMA from a port address -- set the io address |
171 | */ | 171 | */ |
172 | 172 | ||
173 | outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); | 173 | outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); |
174 | outb(io_addr & 0xff, MCA_DMA_REG_EXE); | 174 | outb(io_addr & 0xff, MCA_DMA_REG_EXE); |
175 | outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); | 175 | outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); |
@@ -192,7 +192,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) | |||
192 | * %MCA_DMA_MODE_16 to do 16bit transfers. | 192 | * %MCA_DMA_MODE_16 to do 16bit transfers. |
193 | */ | 193 | */ |
194 | 194 | ||
195 | static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) | 195 | static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) |
196 | { | 196 | { |
197 | outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); | 197 | outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); |
198 | outb(mode, MCA_DMA_REG_EXE); | 198 | outb(mode, MCA_DMA_REG_EXE); |
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index efa962c38897..00e88679e11f 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h | |||
@@ -10,10 +10,10 @@ | |||
10 | * | 10 | * |
11 | * cpu_vm_mask is used to optimize ldt flushing. | 11 | * cpu_vm_mask is used to optimize ldt flushing. |
12 | */ | 12 | */ |
13 | typedef struct { | 13 | typedef struct { |
14 | void *ldt; | 14 | void *ldt; |
15 | #ifdef CONFIG_X86_64 | 15 | #ifdef CONFIG_X86_64 |
16 | rwlock_t ldtlock; | 16 | rwlock_t ldtlock; |
17 | #endif | 17 | #endif |
18 | int size; | 18 | int size; |
19 | struct mutex lock; | 19 | struct mutex lock; |
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 8198d1cca1f3..9756ae0f1dd3 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h | |||
@@ -62,7 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
62 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | 62 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); |
63 | 63 | ||
64 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 64 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
65 | /* We were in lazy tlb mode and leave_mm disabled | 65 | /* We were in lazy tlb mode and leave_mm disabled |
66 | * tlb flush IPI delivery. We must reload %cr3. | 66 | * tlb flush IPI delivery. We must reload %cr3. |
67 | */ | 67 | */ |
68 | load_cr3(next->pgd); | 68 | load_cr3(next->pgd); |
@@ -75,10 +75,10 @@ static inline void switch_mm(struct mm_struct *prev, | |||
75 | #define deactivate_mm(tsk, mm) \ | 75 | #define deactivate_mm(tsk, mm) \ |
76 | asm("movl %0,%%gs": :"r" (0)); | 76 | asm("movl %0,%%gs": :"r" (0)); |
77 | 77 | ||
78 | #define activate_mm(prev, next) \ | 78 | #define activate_mm(prev, next) \ |
79 | do { \ | 79 | do { \ |
80 | paravirt_activate_mm(prev, next); \ | 80 | paravirt_activate_mm((prev), (next)); \ |
81 | switch_mm((prev),(next),NULL); \ | 81 | switch_mm((prev), (next), NULL); \ |
82 | } while(0); | 82 | } while (0); |
83 | 83 | ||
84 | #endif | 84 | #endif |
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index ad6dc821ef9e..ca44c71e7fb3 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h | |||
@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm); | |||
20 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 20 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
21 | { | 21 | { |
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | if (read_pda(mmu_state) == TLBSTATE_OK) | 23 | if (read_pda(mmu_state) == TLBSTATE_OK) |
24 | write_pda(mmu_state, TLBSTATE_LAZY); | 24 | write_pda(mmu_state, TLBSTATE_LAZY); |
25 | #endif | 25 | #endif |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
29 | struct task_struct *tsk) | 29 | struct task_struct *tsk) |
30 | { | 30 | { |
31 | unsigned cpu = smp_processor_id(); | 31 | unsigned cpu = smp_processor_id(); |
@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
39 | cpu_set(cpu, next->cpu_vm_mask); | 39 | cpu_set(cpu, next->cpu_vm_mask); |
40 | load_cr3(next->pgd); | 40 | load_cr3(next->pgd); |
41 | 41 | ||
42 | if (unlikely(next->context.ldt != prev->context.ldt)) | 42 | if (unlikely(next->context.ldt != prev->context.ldt)) |
43 | load_LDT_nolock(&next->context); | 43 | load_LDT_nolock(&next->context); |
44 | } | 44 | } |
45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
48 | if (read_pda(active_mm) != next) | 48 | if (read_pda(active_mm) != next) |
49 | BUG(); | 49 | BUG(); |
50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
51 | /* We were in lazy tlb mode and leave_mm disabled | 51 | /* We were in lazy tlb mode and leave_mm disabled |
52 | * tlb flush IPI delivery. We must reload CR3 | 52 | * tlb flush IPI delivery. We must reload CR3 |
53 | * to make sure to use no freed page tables. | 53 | * to make sure to use no freed page tables. |
54 | */ | 54 | */ |
@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
59 | #endif | 59 | #endif |
60 | } | 60 | } |
61 | 61 | ||
62 | #define deactivate_mm(tsk,mm) do { \ | 62 | #define deactivate_mm(tsk, mm) \ |
63 | load_gs_index(0); \ | 63 | do { \ |
64 | asm volatile("movl %0,%%fs"::"r"(0)); \ | 64 | load_gs_index(0); \ |
65 | } while(0) | 65 | asm volatile("movl %0,%%fs"::"r"(0)); \ |
66 | } while (0) | ||
66 | 67 | ||
67 | #define activate_mm(prev, next) \ | 68 | #define activate_mm(prev, next) \ |
68 | switch_mm((prev),(next),NULL) | 69 | switch_mm((prev), (next), NULL) |
69 | 70 | ||
70 | 71 | ||
71 | #endif | 72 | #endif |
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h index 46b71da99869..940881218ff8 100644 --- a/include/asm-x86/mmx.h +++ b/include/asm-x86/mmx.h | |||
@@ -6,7 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | 9 | ||
10 | extern void *_mmx_memcpy(void *to, const void *from, size_t size); | 10 | extern void *_mmx_memcpy(void *to, const void *from, size_t size); |
11 | extern void mmx_clear_page(void *page); | 11 | extern void mmx_clear_page(void *page); |
12 | extern void mmx_copy_page(void *to, void *from); | 12 | extern void mmx_copy_page(void *to, void *from); |
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index 274a59566c45..cb2cad0b65a7 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h | |||
@@ -18,7 +18,7 @@ extern struct pglist_data *node_data[]; | |||
18 | #include <asm/srat.h> | 18 | #include <asm/srat.h> |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | extern int get_memcfg_numa_flat(void ); | 21 | extern int get_memcfg_numa_flat(void); |
22 | /* | 22 | /* |
23 | * This allows any one NUMA architecture to be compiled | 23 | * This allows any one NUMA architecture to be compiled |
24 | * for, and still fall back to the flat function if it | 24 | * for, and still fall back to the flat function if it |
@@ -129,7 +129,7 @@ static inline int pfn_valid(int pfn) | |||
129 | struct pglist_data __maybe_unused \ | 129 | struct pglist_data __maybe_unused \ |
130 | *__alloc_bootmem_node__pgdat = (pgdat); \ | 130 | *__alloc_bootmem_node__pgdat = (pgdat); \ |
131 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ | 131 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ |
132 | __pa(MAX_DMA_ADDRESS)) \ | 132 | __pa(MAX_DMA_ADDRESS)); \ |
133 | }) | 133 | }) |
134 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 134 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
135 | ({ \ | 135 | ({ \ |
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h index ebaf9663aa8a..594bd0dc1d08 100644 --- a/include/asm-x86/mmzone_64.h +++ b/include/asm-x86/mmzone_64.h | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | #ifdef CONFIG_NUMA | 8 | #ifdef CONFIG_NUMA |
9 | 9 | ||
10 | #define VIRTUAL_BUG_ON(x) | 10 | #define VIRTUAL_BUG_ON(x) |
11 | 11 | ||
12 | #include <asm/smp.h> | 12 | #include <asm/smp.h> |
13 | 13 | ||
@@ -16,7 +16,7 @@ struct memnode { | |||
16 | int shift; | 16 | int shift; |
17 | unsigned int mapsize; | 17 | unsigned int mapsize; |
18 | s16 *map; | 18 | s16 *map; |
19 | s16 embedded_map[64-8]; | 19 | s16 embedded_map[64 - 8]; |
20 | } ____cacheline_aligned; /* total size = 128 bytes */ | 20 | } ____cacheline_aligned; /* total size = 128 bytes */ |
21 | extern struct memnode memnode; | 21 | extern struct memnode memnode; |
22 | #define memnode_shift memnode.shift | 22 | #define memnode_shift memnode.shift |
@@ -25,27 +25,27 @@ extern struct memnode memnode; | |||
25 | 25 | ||
26 | extern struct pglist_data *node_data[]; | 26 | extern struct pglist_data *node_data[]; |
27 | 27 | ||
28 | static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | 28 | static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) |
29 | { | 29 | { |
30 | unsigned nid; | 30 | unsigned nid; |
31 | VIRTUAL_BUG_ON(!memnodemap); | 31 | VIRTUAL_BUG_ON(!memnodemap); |
32 | VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize); | 32 | VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize); |
33 | nid = memnodemap[addr >> memnode_shift]; | 33 | nid = memnodemap[addr >> memnode_shift]; |
34 | VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); | 34 | VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); |
35 | return nid; | 35 | return nid; |
36 | } | 36 | } |
37 | 37 | ||
38 | #define NODE_DATA(nid) (node_data[nid]) | 38 | #define NODE_DATA(nid) (node_data[nid]) |
39 | 39 | ||
40 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 40 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
41 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | 41 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ |
42 | NODE_DATA(nid)->node_spanned_pages) | 42 | NODE_DATA(nid)->node_spanned_pages) |
43 | 43 | ||
44 | extern int early_pfn_to_nid(unsigned long pfn); | 44 | extern int early_pfn_to_nid(unsigned long pfn); |
45 | 45 | ||
46 | #ifdef CONFIG_NUMA_EMU | 46 | #ifdef CONFIG_NUMA_EMU |
47 | #define FAKE_NODE_MIN_SIZE (64*1024*1024) | 47 | #define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) |
48 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL)) | 48 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #endif | 51 | #endif |
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 781ad74ab9e9..57a991b9c053 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h | |||
@@ -1,16 +1,13 @@ | |||
1 | #ifndef _AM_X86_MPSPEC_H | 1 | #ifndef _AM_X86_MPSPEC_H |
2 | #define _AM_X86_MPSPEC_H | 2 | #define _AM_X86_MPSPEC_H |
3 | 3 | ||
4 | #include <linux/init.h> | ||
5 | |||
4 | #include <asm/mpspec_def.h> | 6 | #include <asm/mpspec_def.h> |
5 | 7 | ||
6 | #ifdef CONFIG_X86_32 | 8 | #ifdef CONFIG_X86_32 |
7 | #include <mach_mpspec.h> | 9 | #include <mach_mpspec.h> |
8 | 10 | ||
9 | extern int mp_bus_id_to_type[MAX_MP_BUSSES]; | ||
10 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; | ||
11 | extern int mp_bus_id_to_local[MAX_MP_BUSSES]; | ||
12 | extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; | ||
13 | |||
14 | extern unsigned int def_to_bigsmp; | 11 | extern unsigned int def_to_bigsmp; |
15 | extern int apic_version[MAX_APICS]; | 12 | extern int apic_version[MAX_APICS]; |
16 | extern u8 apicid_2_node[]; | 13 | extern u8 apicid_2_node[]; |
@@ -24,27 +21,30 @@ extern int pic_mode; | |||
24 | /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ | 21 | /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ |
25 | #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) | 22 | #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) |
26 | 23 | ||
27 | extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | 24 | extern void early_find_smp_config(void); |
25 | extern void early_get_smp_config(void); | ||
28 | 26 | ||
29 | #endif | 27 | #endif |
30 | 28 | ||
29 | #if defined(CONFIG_MCA) || defined(CONFIG_EISA) | ||
30 | extern int mp_bus_id_to_type[MAX_MP_BUSSES]; | ||
31 | #endif | ||
32 | |||
33 | extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | ||
34 | |||
31 | extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; | 35 | extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; |
32 | 36 | ||
33 | extern unsigned int boot_cpu_physical_apicid; | 37 | extern unsigned int boot_cpu_physical_apicid; |
34 | extern int smp_found_config; | 38 | extern int smp_found_config; |
35 | extern int nr_ioapics; | ||
36 | extern int mp_irq_entries; | ||
37 | extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
38 | extern int mpc_default_type; | 39 | extern int mpc_default_type; |
39 | extern unsigned long mp_lapic_addr; | 40 | extern unsigned long mp_lapic_addr; |
40 | 41 | ||
41 | extern void find_smp_config(void); | 42 | extern void find_smp_config(void); |
42 | extern void get_smp_config(void); | 43 | extern void get_smp_config(void); |
43 | 44 | ||
45 | void __cpuinit generic_processor_info(int apicid, int version); | ||
44 | #ifdef CONFIG_ACPI | 46 | #ifdef CONFIG_ACPI |
45 | extern void mp_register_lapic(u8 id, u8 enabled); | 47 | extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); |
46 | extern void mp_register_lapic_address(u64 address); | ||
47 | extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base); | ||
48 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | 48 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, |
49 | u32 gsi); | 49 | u32 gsi); |
50 | extern void mp_config_acpi_legacy_irqs(void); | 50 | extern void mp_config_acpi_legacy_irqs(void); |
@@ -53,8 +53,7 @@ extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); | |||
53 | 53 | ||
54 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | 54 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) |
55 | 55 | ||
56 | struct physid_mask | 56 | struct physid_mask { |
57 | { | ||
58 | unsigned long mask[PHYSID_ARRAY_SIZE]; | 57 | unsigned long mask[PHYSID_ARRAY_SIZE]; |
59 | }; | 58 | }; |
60 | 59 | ||
@@ -63,34 +62,34 @@ typedef struct physid_mask physid_mask_t; | |||
63 | #define physid_set(physid, map) set_bit(physid, (map).mask) | 62 | #define physid_set(physid, map) set_bit(physid, (map).mask) |
64 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) | 63 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) |
65 | #define physid_isset(physid, map) test_bit(physid, (map).mask) | 64 | #define physid_isset(physid, map) test_bit(physid, (map).mask) |
66 | #define physid_test_and_set(physid, map) \ | 65 | #define physid_test_and_set(physid, map) \ |
67 | test_and_set_bit(physid, (map).mask) | 66 | test_and_set_bit(physid, (map).mask) |
68 | 67 | ||
69 | #define physids_and(dst, src1, src2) \ | 68 | #define physids_and(dst, src1, src2) \ |
70 | bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | 69 | bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) |
71 | 70 | ||
72 | #define physids_or(dst, src1, src2) \ | 71 | #define physids_or(dst, src1, src2) \ |
73 | bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | 72 | bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) |
74 | 73 | ||
75 | #define physids_clear(map) \ | 74 | #define physids_clear(map) \ |
76 | bitmap_zero((map).mask, MAX_APICS) | 75 | bitmap_zero((map).mask, MAX_APICS) |
77 | 76 | ||
78 | #define physids_complement(dst, src) \ | 77 | #define physids_complement(dst, src) \ |
79 | bitmap_complement((dst).mask, (src).mask, MAX_APICS) | 78 | bitmap_complement((dst).mask, (src).mask, MAX_APICS) |
80 | 79 | ||
81 | #define physids_empty(map) \ | 80 | #define physids_empty(map) \ |
82 | bitmap_empty((map).mask, MAX_APICS) | 81 | bitmap_empty((map).mask, MAX_APICS) |
83 | 82 | ||
84 | #define physids_equal(map1, map2) \ | 83 | #define physids_equal(map1, map2) \ |
85 | bitmap_equal((map1).mask, (map2).mask, MAX_APICS) | 84 | bitmap_equal((map1).mask, (map2).mask, MAX_APICS) |
86 | 85 | ||
87 | #define physids_weight(map) \ | 86 | #define physids_weight(map) \ |
88 | bitmap_weight((map).mask, MAX_APICS) | 87 | bitmap_weight((map).mask, MAX_APICS) |
89 | 88 | ||
90 | #define physids_shift_right(d, s, n) \ | 89 | #define physids_shift_right(d, s, n) \ |
91 | bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) | 90 | bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) |
92 | 91 | ||
93 | #define physids_shift_left(d, s, n) \ | 92 | #define physids_shift_left(d, s, n) \ |
94 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | 93 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) |
95 | 94 | ||
96 | #define physids_coerce(map) ((map).mask[0]) | 95 | #define physids_coerce(map) ((map).mask[0]) |
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 3504617fe648..dc6ef85e3624 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h | |||
@@ -11,7 +11,7 @@ | |||
11 | * information is. | 11 | * information is. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') | 14 | #define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_') |
15 | 15 | ||
16 | #ifdef CONFIG_X86_32 | 16 | #ifdef CONFIG_X86_32 |
17 | # define MAX_MPC_ENTRY 1024 | 17 | # define MAX_MPC_ENTRY 1024 |
@@ -23,8 +23,7 @@ | |||
23 | # define MAX_APICS 255 | 23 | # define MAX_APICS 255 |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | struct intel_mp_floating | 26 | struct intel_mp_floating { |
27 | { | ||
28 | char mpf_signature[4]; /* "_MP_" */ | 27 | char mpf_signature[4]; /* "_MP_" */ |
29 | unsigned int mpf_physptr; /* Configuration table address */ | 28 | unsigned int mpf_physptr; /* Configuration table address */ |
30 | unsigned char mpf_length; /* Our length (paragraphs) */ | 29 | unsigned char mpf_length; /* Our length (paragraphs) */ |
@@ -39,14 +38,13 @@ struct intel_mp_floating | |||
39 | 38 | ||
40 | #define MPC_SIGNATURE "PCMP" | 39 | #define MPC_SIGNATURE "PCMP" |
41 | 40 | ||
42 | struct mp_config_table | 41 | struct mp_config_table { |
43 | { | ||
44 | char mpc_signature[4]; | 42 | char mpc_signature[4]; |
45 | unsigned short mpc_length; /* Size of table */ | 43 | unsigned short mpc_length; /* Size of table */ |
46 | char mpc_spec; /* 0x01 */ | 44 | char mpc_spec; /* 0x01 */ |
47 | char mpc_checksum; | 45 | char mpc_checksum; |
48 | char mpc_oem[8]; | 46 | char mpc_oem[8]; |
49 | char mpc_productid[12]; | 47 | char mpc_productid[12]; |
50 | unsigned int mpc_oemptr; /* 0 if not present */ | 48 | unsigned int mpc_oemptr; /* 0 if not present */ |
51 | unsigned short mpc_oemsize; /* 0 if not present */ | 49 | unsigned short mpc_oemsize; /* 0 if not present */ |
52 | unsigned short mpc_oemcount; | 50 | unsigned short mpc_oemcount; |
@@ -71,8 +69,7 @@ struct mp_config_table | |||
71 | #define CPU_MODEL_MASK 0x00F0 | 69 | #define CPU_MODEL_MASK 0x00F0 |
72 | #define CPU_FAMILY_MASK 0x0F00 | 70 | #define CPU_FAMILY_MASK 0x0F00 |
73 | 71 | ||
74 | struct mpc_config_processor | 72 | struct mpc_config_processor { |
75 | { | ||
76 | unsigned char mpc_type; | 73 | unsigned char mpc_type; |
77 | unsigned char mpc_apicid; /* Local APIC number */ | 74 | unsigned char mpc_apicid; /* Local APIC number */ |
78 | unsigned char mpc_apicver; /* Its versions */ | 75 | unsigned char mpc_apicver; /* Its versions */ |
@@ -82,8 +79,7 @@ struct mpc_config_processor | |||
82 | unsigned int mpc_reserved[2]; | 79 | unsigned int mpc_reserved[2]; |
83 | }; | 80 | }; |
84 | 81 | ||
85 | struct mpc_config_bus | 82 | struct mpc_config_bus { |
86 | { | ||
87 | unsigned char mpc_type; | 83 | unsigned char mpc_type; |
88 | unsigned char mpc_busid; | 84 | unsigned char mpc_busid; |
89 | unsigned char mpc_bustype[6]; | 85 | unsigned char mpc_bustype[6]; |
@@ -111,8 +107,7 @@ struct mpc_config_bus | |||
111 | 107 | ||
112 | #define MPC_APIC_USABLE 0x01 | 108 | #define MPC_APIC_USABLE 0x01 |
113 | 109 | ||
114 | struct mpc_config_ioapic | 110 | struct mpc_config_ioapic { |
115 | { | ||
116 | unsigned char mpc_type; | 111 | unsigned char mpc_type; |
117 | unsigned char mpc_apicid; | 112 | unsigned char mpc_apicid; |
118 | unsigned char mpc_apicver; | 113 | unsigned char mpc_apicver; |
@@ -120,8 +115,7 @@ struct mpc_config_ioapic | |||
120 | unsigned int mpc_apicaddr; | 115 | unsigned int mpc_apicaddr; |
121 | }; | 116 | }; |
122 | 117 | ||
123 | struct mpc_config_intsrc | 118 | struct mpc_config_intsrc { |
124 | { | ||
125 | unsigned char mpc_type; | 119 | unsigned char mpc_type; |
126 | unsigned char mpc_irqtype; | 120 | unsigned char mpc_irqtype; |
127 | unsigned short mpc_irqflag; | 121 | unsigned short mpc_irqflag; |
@@ -144,8 +138,7 @@ enum mp_irq_source_types { | |||
144 | 138 | ||
145 | #define MP_APIC_ALL 0xFF | 139 | #define MP_APIC_ALL 0xFF |
146 | 140 | ||
147 | struct mpc_config_lintsrc | 141 | struct mpc_config_lintsrc { |
148 | { | ||
149 | unsigned char mpc_type; | 142 | unsigned char mpc_type; |
150 | unsigned char mpc_irqtype; | 143 | unsigned char mpc_irqtype; |
151 | unsigned short mpc_irqflag; | 144 | unsigned short mpc_irqflag; |
@@ -157,8 +150,7 @@ struct mpc_config_lintsrc | |||
157 | 150 | ||
158 | #define MPC_OEM_SIGNATURE "_OEM" | 151 | #define MPC_OEM_SIGNATURE "_OEM" |
159 | 152 | ||
160 | struct mp_config_oemtable | 153 | struct mp_config_oemtable { |
161 | { | ||
162 | char oem_signature[4]; | 154 | char oem_signature[4]; |
163 | unsigned short oem_length; /* Size of table */ | 155 | unsigned short oem_length; /* Size of table */ |
164 | char oem_rev; /* 0x01 */ | 156 | char oem_rev; /* 0x01 */ |
@@ -166,17 +158,6 @@ struct mp_config_oemtable | |||
166 | char mpc_oem[8]; | 158 | char mpc_oem[8]; |
167 | }; | 159 | }; |
168 | 160 | ||
169 | struct mpc_config_translation | ||
170 | { | ||
171 | unsigned char mpc_type; | ||
172 | unsigned char trans_len; | ||
173 | unsigned char trans_type; | ||
174 | unsigned char trans_quad; | ||
175 | unsigned char trans_global; | ||
176 | unsigned char trans_local; | ||
177 | unsigned short trans_reserved; | ||
178 | }; | ||
179 | |||
180 | /* | 161 | /* |
181 | * Default configurations | 162 | * Default configurations |
182 | * | 163 | * |
@@ -196,4 +177,3 @@ enum mp_bustype { | |||
196 | MP_BUS_MCA, | 177 | MP_BUS_MCA, |
197 | }; | 178 | }; |
198 | #endif | 179 | #endif |
199 | |||
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h index 5b8acddb70fb..296f29ce426d 100644 --- a/include/asm-x86/msidef.h +++ b/include/asm-x86/msidef.h | |||
@@ -11,7 +11,8 @@ | |||
11 | 11 | ||
12 | #define MSI_DATA_VECTOR_SHIFT 0 | 12 | #define MSI_DATA_VECTOR_SHIFT 0 |
13 | #define MSI_DATA_VECTOR_MASK 0x000000ff | 13 | #define MSI_DATA_VECTOR_MASK 0x000000ff |
14 | #define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) | 14 | #define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ |
15 | MSI_DATA_VECTOR_MASK) | ||
15 | 16 | ||
16 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 | 17 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 |
17 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) | 18 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) |
@@ -37,11 +38,14 @@ | |||
37 | #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) | 38 | #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) |
38 | 39 | ||
39 | #define MSI_ADDR_REDIRECTION_SHIFT 3 | 40 | #define MSI_ADDR_REDIRECTION_SHIFT 3 |
40 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */ | 41 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) |
41 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */ | 42 | /* dedicated cpu */ |
43 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) | ||
44 | /* lowest priority */ | ||
42 | 45 | ||
43 | #define MSI_ADDR_DEST_ID_SHIFT 12 | 46 | #define MSI_ADDR_DEST_ID_SHIFT 12 |
44 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 | 47 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 |
45 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) | 48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ |
49 | MSI_ADDR_DEST_ID_MASK) | ||
46 | 50 | ||
47 | #endif /* ASM_MSIDEF_H */ | 51 | #endif /* ASM_MSIDEF_H */ |
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index fae118a25278..09413ad39d3c 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
@@ -57,6 +57,8 @@ | |||
57 | #define MSR_MTRRfix4K_F8000 0x0000026f | 57 | #define MSR_MTRRfix4K_F8000 0x0000026f |
58 | #define MSR_MTRRdefType 0x000002ff | 58 | #define MSR_MTRRdefType 0x000002ff |
59 | 59 | ||
60 | #define MSR_IA32_CR_PAT 0x00000277 | ||
61 | |||
60 | #define MSR_IA32_DEBUGCTLMSR 0x000001d9 | 62 | #define MSR_IA32_DEBUGCTLMSR 0x000001d9 |
61 | #define MSR_IA32_LASTBRANCHFROMIP 0x000001db | 63 | #define MSR_IA32_LASTBRANCHFROMIP 0x000001db |
62 | #define MSR_IA32_LASTBRANCHTOIP 0x000001dc | 64 | #define MSR_IA32_LASTBRANCHTOIP 0x000001dc |
@@ -83,6 +85,7 @@ | |||
83 | /* AMD64 MSRs. Not complete. See the architecture manual for a more | 85 | /* AMD64 MSRs. Not complete. See the architecture manual for a more |
84 | complete list. */ | 86 | complete list. */ |
85 | 87 | ||
88 | #define MSR_AMD64_NB_CFG 0xc001001f | ||
86 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 | 89 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
87 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 | 90 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
88 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 | 91 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
@@ -109,6 +112,7 @@ | |||
109 | #define MSR_K8_SYSCFG 0xc0010010 | 112 | #define MSR_K8_SYSCFG 0xc0010010 |
110 | #define MSR_K8_HWCR 0xc0010015 | 113 | #define MSR_K8_HWCR 0xc0010015 |
111 | #define MSR_K8_ENABLE_C1E 0xc0010055 | 114 | #define MSR_K8_ENABLE_C1E 0xc0010055 |
115 | #define MSR_K8_TSEG_ADDR 0xc0010112 | ||
112 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ | 116 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
113 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ | 117 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
114 | #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ | 118 | #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ |
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 3ca29ebebbb1..3707650a169b 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h | |||
@@ -16,8 +16,8 @@ | |||
16 | static inline unsigned long long native_read_tscp(unsigned int *aux) | 16 | static inline unsigned long long native_read_tscp(unsigned int *aux) |
17 | { | 17 | { |
18 | unsigned long low, high; | 18 | unsigned long low, high; |
19 | asm volatile (".byte 0x0f,0x01,0xf9" | 19 | asm volatile(".byte 0x0f,0x01,0xf9" |
20 | : "=a" (low), "=d" (high), "=c" (*aux)); | 20 | : "=a" (low), "=d" (high), "=c" (*aux)); |
21 | return low | ((u64)high >> 32); | 21 | return low | ((u64)high >> 32); |
22 | } | 22 | } |
23 | 23 | ||
@@ -29,7 +29,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) | |||
29 | */ | 29 | */ |
30 | #ifdef CONFIG_X86_64 | 30 | #ifdef CONFIG_X86_64 |
31 | #define DECLARE_ARGS(val, low, high) unsigned low, high | 31 | #define DECLARE_ARGS(val, low, high) unsigned low, high |
32 | #define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32)) | 32 | #define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) |
33 | #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) | 33 | #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) |
34 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) | 34 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) |
35 | #else | 35 | #else |
@@ -57,7 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, | |||
57 | ".section .fixup,\"ax\"\n\t" | 57 | ".section .fixup,\"ax\"\n\t" |
58 | "3: mov %3,%0 ; jmp 1b\n\t" | 58 | "3: mov %3,%0 ; jmp 1b\n\t" |
59 | ".previous\n\t" | 59 | ".previous\n\t" |
60 | _ASM_EXTABLE(2b,3b) | 60 | _ASM_EXTABLE(2b, 3b) |
61 | : "=r" (*err), EAX_EDX_RET(val, low, high) | 61 | : "=r" (*err), EAX_EDX_RET(val, low, high) |
62 | : "c" (msr), "i" (-EFAULT)); | 62 | : "c" (msr), "i" (-EFAULT)); |
63 | return EAX_EDX_VAL(val, low, high); | 63 | return EAX_EDX_VAL(val, low, high); |
@@ -78,10 +78,10 @@ static inline int native_write_msr_safe(unsigned int msr, | |||
78 | ".section .fixup,\"ax\"\n\t" | 78 | ".section .fixup,\"ax\"\n\t" |
79 | "3: mov %4,%0 ; jmp 1b\n\t" | 79 | "3: mov %4,%0 ; jmp 1b\n\t" |
80 | ".previous\n\t" | 80 | ".previous\n\t" |
81 | _ASM_EXTABLE(2b,3b) | 81 | _ASM_EXTABLE(2b, 3b) |
82 | : "=a" (err) | 82 | : "=a" (err) |
83 | : "c" (msr), "0" (low), "d" (high), | 83 | : "c" (msr), "0" (low), "d" (high), |
84 | "i" (-EFAULT)); | 84 | "i" (-EFAULT)); |
85 | return err; | 85 | return err; |
86 | } | 86 | } |
87 | 87 | ||
@@ -116,23 +116,23 @@ static inline unsigned long long native_read_pmc(int counter) | |||
116 | * pointer indirection), this allows gcc to optimize better | 116 | * pointer indirection), this allows gcc to optimize better |
117 | */ | 117 | */ |
118 | 118 | ||
119 | #define rdmsr(msr,val1,val2) \ | 119 | #define rdmsr(msr, val1, val2) \ |
120 | do { \ | 120 | do { \ |
121 | u64 __val = native_read_msr(msr); \ | 121 | u64 __val = native_read_msr((msr)); \ |
122 | (val1) = (u32)__val; \ | 122 | (val1) = (u32)__val; \ |
123 | (val2) = (u32)(__val >> 32); \ | 123 | (val2) = (u32)(__val >> 32); \ |
124 | } while(0) | 124 | } while (0) |
125 | 125 | ||
126 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) | 126 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) |
127 | { | 127 | { |
128 | native_write_msr(msr, low, high); | 128 | native_write_msr(msr, low, high); |
129 | } | 129 | } |
130 | 130 | ||
131 | #define rdmsrl(msr,val) \ | 131 | #define rdmsrl(msr, val) \ |
132 | ((val) = native_read_msr(msr)) | 132 | ((val) = native_read_msr((msr))) |
133 | 133 | ||
134 | #define wrmsrl(msr, val) \ | 134 | #define wrmsrl(msr, val) \ |
135 | native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32)) | 135 | native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) |
136 | 136 | ||
137 | /* wrmsr with exception handling */ | 137 | /* wrmsr with exception handling */ |
138 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | 138 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
@@ -141,14 +141,22 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | /* rdmsr with exception handling */ | 143 | /* rdmsr with exception handling */ |
144 | #define rdmsr_safe(msr,p1,p2) \ | 144 | #define rdmsr_safe(msr, p1, p2) \ |
145 | ({ \ | 145 | ({ \ |
146 | int __err; \ | 146 | int __err; \ |
147 | u64 __val = native_read_msr_safe(msr, &__err); \ | 147 | u64 __val = native_read_msr_safe((msr), &__err); \ |
148 | (*p1) = (u32)__val; \ | 148 | (*p1) = (u32)__val; \ |
149 | (*p2) = (u32)(__val >> 32); \ | 149 | (*p2) = (u32)(__val >> 32); \ |
150 | __err; \ | 150 | __err; \ |
151 | }) | 151 | }) |
152 | |||
153 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | ||
154 | { | ||
155 | int err; | ||
156 | |||
157 | *p = native_read_msr_safe(msr, &err); | ||
158 | return err; | ||
159 | } | ||
152 | 160 | ||
153 | #define rdtscl(low) \ | 161 | #define rdtscl(low) \ |
154 | ((low) = (u32)native_read_tsc()) | 162 | ((low) = (u32)native_read_tsc()) |
@@ -156,35 +164,37 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | |||
156 | #define rdtscll(val) \ | 164 | #define rdtscll(val) \ |
157 | ((val) = native_read_tsc()) | 165 | ((val) = native_read_tsc()) |
158 | 166 | ||
159 | #define rdpmc(counter,low,high) \ | 167 | #define rdpmc(counter, low, high) \ |
160 | do { \ | 168 | do { \ |
161 | u64 _l = native_read_pmc(counter); \ | 169 | u64 _l = native_read_pmc((counter)); \ |
162 | (low) = (u32)_l; \ | 170 | (low) = (u32)_l; \ |
163 | (high) = (u32)(_l >> 32); \ | 171 | (high) = (u32)(_l >> 32); \ |
164 | } while(0) | 172 | } while (0) |
165 | 173 | ||
166 | #define rdtscp(low, high, aux) \ | 174 | #define rdtscp(low, high, aux) \ |
167 | do { \ | 175 | do { \ |
168 | unsigned long long _val = native_read_tscp(&(aux)); \ | 176 | unsigned long long _val = native_read_tscp(&(aux)); \ |
169 | (low) = (u32)_val; \ | 177 | (low) = (u32)_val; \ |
170 | (high) = (u32)(_val >> 32); \ | 178 | (high) = (u32)(_val >> 32); \ |
171 | } while (0) | 179 | } while (0) |
172 | 180 | ||
173 | #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) | 181 | #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) |
174 | 182 | ||
175 | #endif /* !CONFIG_PARAVIRT */ | 183 | #endif /* !CONFIG_PARAVIRT */ |
176 | 184 | ||
177 | 185 | ||
178 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) | 186 | #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ |
187 | (u32)((val) >> 32)) | ||
179 | 188 | ||
180 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 189 | #define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2)) |
181 | 190 | ||
182 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) | 191 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) |
183 | 192 | ||
184 | #ifdef CONFIG_SMP | 193 | #ifdef CONFIG_SMP |
185 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 194 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
186 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 195 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
187 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 196 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
197 | |||
188 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 198 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
189 | #else /* CONFIG_SMP */ | 199 | #else /* CONFIG_SMP */ |
190 | static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 200 | static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
@@ -195,7 +205,8 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
195 | { | 205 | { |
196 | wrmsr(msr_no, l, h); | 206 | wrmsr(msr_no, l, h); |
197 | } | 207 | } |
198 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 208 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, |
209 | u32 *l, u32 *h) | ||
199 | { | 210 | { |
200 | return rdmsr_safe(msr_no, l, h); | 211 | return rdmsr_safe(msr_no, l, h); |
201 | } | 212 | } |
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index 319d065800be..a69a01a51729 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h | |||
@@ -28,8 +28,7 @@ | |||
28 | 28 | ||
29 | #define MTRR_IOCTL_BASE 'M' | 29 | #define MTRR_IOCTL_BASE 'M' |
30 | 30 | ||
31 | struct mtrr_sentry | 31 | struct mtrr_sentry { |
32 | { | ||
33 | unsigned long base; /* Base address */ | 32 | unsigned long base; /* Base address */ |
34 | unsigned int size; /* Size of region */ | 33 | unsigned int size; /* Size of region */ |
35 | unsigned int type; /* Type of region */ | 34 | unsigned int type; /* Type of region */ |
@@ -41,8 +40,7 @@ struct mtrr_sentry | |||
41 | will break. */ | 40 | will break. */ |
42 | 41 | ||
43 | #ifdef __i386__ | 42 | #ifdef __i386__ |
44 | struct mtrr_gentry | 43 | struct mtrr_gentry { |
45 | { | ||
46 | unsigned int regnum; /* Register number */ | 44 | unsigned int regnum; /* Register number */ |
47 | unsigned long base; /* Base address */ | 45 | unsigned long base; /* Base address */ |
48 | unsigned int size; /* Size of region */ | 46 | unsigned int size; /* Size of region */ |
@@ -51,8 +49,7 @@ struct mtrr_gentry | |||
51 | 49 | ||
52 | #else /* __i386__ */ | 50 | #else /* __i386__ */ |
53 | 51 | ||
54 | struct mtrr_gentry | 52 | struct mtrr_gentry { |
55 | { | ||
56 | unsigned long base; /* Base address */ | 53 | unsigned long base; /* Base address */ |
57 | unsigned int size; /* Size of region */ | 54 | unsigned int size; /* Size of region */ |
58 | unsigned int regnum; /* Register number */ | 55 | unsigned int regnum; /* Register number */ |
@@ -86,38 +83,45 @@ struct mtrr_gentry | |||
86 | 83 | ||
87 | /* The following functions are for use by other drivers */ | 84 | /* The following functions are for use by other drivers */ |
88 | # ifdef CONFIG_MTRR | 85 | # ifdef CONFIG_MTRR |
86 | extern u8 mtrr_type_lookup(u64 addr, u64 end); | ||
89 | extern void mtrr_save_fixed_ranges(void *); | 87 | extern void mtrr_save_fixed_ranges(void *); |
90 | extern void mtrr_save_state(void); | 88 | extern void mtrr_save_state(void); |
91 | extern int mtrr_add (unsigned long base, unsigned long size, | 89 | extern int mtrr_add(unsigned long base, unsigned long size, |
92 | unsigned int type, bool increment); | 90 | unsigned int type, bool increment); |
93 | extern int mtrr_add_page (unsigned long base, unsigned long size, | 91 | extern int mtrr_add_page(unsigned long base, unsigned long size, |
94 | unsigned int type, bool increment); | 92 | unsigned int type, bool increment); |
95 | extern int mtrr_del (int reg, unsigned long base, unsigned long size); | 93 | extern int mtrr_del(int reg, unsigned long base, unsigned long size); |
96 | extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); | 94 | extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); |
97 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); | 95 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); |
98 | extern void mtrr_ap_init(void); | 96 | extern void mtrr_ap_init(void); |
99 | extern void mtrr_bp_init(void); | 97 | extern void mtrr_bp_init(void); |
100 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); | 98 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); |
99 | extern int amd_special_default_mtrr(void); | ||
101 | # else | 100 | # else |
101 | static inline u8 mtrr_type_lookup(u64 addr, u64 end) | ||
102 | { | ||
103 | /* | ||
104 | * Return no-MTRRs: | ||
105 | */ | ||
106 | return 0xff; | ||
107 | } | ||
102 | #define mtrr_save_fixed_ranges(arg) do {} while (0) | 108 | #define mtrr_save_fixed_ranges(arg) do {} while (0) |
103 | #define mtrr_save_state() do {} while (0) | 109 | #define mtrr_save_state() do {} while (0) |
104 | static __inline__ int mtrr_add (unsigned long base, unsigned long size, | 110 | static inline int mtrr_add(unsigned long base, unsigned long size, |
105 | unsigned int type, bool increment) | 111 | unsigned int type, bool increment) |
106 | { | 112 | { |
107 | return -ENODEV; | 113 | return -ENODEV; |
108 | } | 114 | } |
109 | static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, | 115 | static inline int mtrr_add_page(unsigned long base, unsigned long size, |
110 | unsigned int type, bool increment) | 116 | unsigned int type, bool increment) |
111 | { | 117 | { |
112 | return -ENODEV; | 118 | return -ENODEV; |
113 | } | 119 | } |
114 | static __inline__ int mtrr_del (int reg, unsigned long base, | 120 | static inline int mtrr_del(int reg, unsigned long base, unsigned long size) |
115 | unsigned long size) | ||
116 | { | 121 | { |
117 | return -ENODEV; | 122 | return -ENODEV; |
118 | } | 123 | } |
119 | static __inline__ int mtrr_del_page (int reg, unsigned long base, | 124 | static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size) |
120 | unsigned long size) | ||
121 | { | 125 | { |
122 | return -ENODEV; | 126 | return -ENODEV; |
123 | } | 127 | } |
@@ -125,7 +129,9 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
125 | { | 129 | { |
126 | return 0; | 130 | return 0; |
127 | } | 131 | } |
128 | static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} | 132 | static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) |
133 | { | ||
134 | } | ||
129 | 135 | ||
130 | #define mtrr_ap_init() do {} while (0) | 136 | #define mtrr_ap_init() do {} while (0) |
131 | #define mtrr_bp_init() do {} while (0) | 137 | #define mtrr_bp_init() do {} while (0) |
@@ -134,15 +140,13 @@ static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} | |||
134 | #ifdef CONFIG_COMPAT | 140 | #ifdef CONFIG_COMPAT |
135 | #include <linux/compat.h> | 141 | #include <linux/compat.h> |
136 | 142 | ||
137 | struct mtrr_sentry32 | 143 | struct mtrr_sentry32 { |
138 | { | ||
139 | compat_ulong_t base; /* Base address */ | 144 | compat_ulong_t base; /* Base address */ |
140 | compat_uint_t size; /* Size of region */ | 145 | compat_uint_t size; /* Size of region */ |
141 | compat_uint_t type; /* Type of region */ | 146 | compat_uint_t type; /* Type of region */ |
142 | }; | 147 | }; |
143 | 148 | ||
144 | struct mtrr_gentry32 | 149 | struct mtrr_gentry32 { |
145 | { | ||
146 | compat_ulong_t regnum; /* Register number */ | 150 | compat_ulong_t regnum; /* Register number */ |
147 | compat_uint_t base; /* Base address */ | 151 | compat_uint_t base; /* Base address */ |
148 | compat_uint_t size; /* Size of region */ | 152 | compat_uint_t size; /* Size of region */ |
@@ -151,16 +155,17 @@ struct mtrr_gentry32 | |||
151 | 155 | ||
152 | #define MTRR_IOCTL_BASE 'M' | 156 | #define MTRR_IOCTL_BASE 'M' |
153 | 157 | ||
154 | #define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) | 158 | #define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) |
155 | #define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) | 159 | #define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) |
156 | #define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) | 160 | #define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) |
157 | #define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) | 161 | #define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) |
158 | #define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) | 162 | #define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) |
159 | #define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) | 163 | #define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) |
160 | #define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) | 164 | #define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) |
161 | #define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) | 165 | #define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) |
162 | #define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) | 166 | #define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) |
163 | #define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) | 167 | #define MTRRIOC32_KILL_PAGE_ENTRY \ |
168 | _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) | ||
164 | #endif /* CONFIG_COMPAT */ | 169 | #endif /* CONFIG_COMPAT */ |
165 | 170 | ||
166 | #endif /* __KERNEL__ */ | 171 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index bbeefb96ddfd..73e928ef5f03 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #ifndef _ASM_MUTEX_H | 9 | #ifndef _ASM_MUTEX_H |
10 | #define _ASM_MUTEX_H | 10 | #define _ASM_MUTEX_H |
11 | 11 | ||
12 | #include "asm/alternative.h" | 12 | #include <asm/alternative.h> |
13 | 13 | ||
14 | /** | 14 | /** |
15 | * __mutex_fastpath_lock - try to take the lock by moving the count | 15 | * __mutex_fastpath_lock - try to take the lock by moving the count |
@@ -21,22 +21,20 @@ | |||
21 | * wasn't 1 originally. This function MUST leave the value lower than 1 | 21 | * wasn't 1 originally. This function MUST leave the value lower than 1 |
22 | * even when the "1" assertion wasn't true. | 22 | * even when the "1" assertion wasn't true. |
23 | */ | 23 | */ |
24 | #define __mutex_fastpath_lock(count, fail_fn) \ | 24 | #define __mutex_fastpath_lock(count, fail_fn) \ |
25 | do { \ | 25 | do { \ |
26 | unsigned int dummy; \ | 26 | unsigned int dummy; \ |
27 | \ | 27 | \ |
28 | typecheck(atomic_t *, count); \ | 28 | typecheck(atomic_t *, count); \ |
29 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 29 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
30 | \ | 30 | \ |
31 | __asm__ __volatile__( \ | 31 | asm volatile(LOCK_PREFIX " decl (%%eax)\n" \ |
32 | LOCK_PREFIX " decl (%%eax) \n" \ | 32 | " jns 1f \n" \ |
33 | " jns 1f \n" \ | 33 | " call " #fail_fn "\n" \ |
34 | " call "#fail_fn" \n" \ | 34 | "1:\n" \ |
35 | "1: \n" \ | 35 | : "=a" (dummy) \ |
36 | \ | 36 | : "a" (count) \ |
37 | :"=a" (dummy) \ | 37 | : "memory", "ecx", "edx"); \ |
38 | : "a" (count) \ | ||
39 | : "memory", "ecx", "edx"); \ | ||
40 | } while (0) | 38 | } while (0) |
41 | 39 | ||
42 | 40 | ||
@@ -50,8 +48,8 @@ do { \ | |||
50 | * wasn't 1 originally. This function returns 0 if the fastpath succeeds, | 48 | * wasn't 1 originally. This function returns 0 if the fastpath succeeds, |
51 | * or anything the slow path function returns | 49 | * or anything the slow path function returns |
52 | */ | 50 | */ |
53 | static inline int | 51 | static inline int __mutex_fastpath_lock_retval(atomic_t *count, |
54 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | 52 | int (*fail_fn)(atomic_t *)) |
55 | { | 53 | { |
56 | if (unlikely(atomic_dec_return(count) < 0)) | 54 | if (unlikely(atomic_dec_return(count) < 0)) |
57 | return fail_fn(count); | 55 | return fail_fn(count); |
@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
72 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | 70 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs |
73 | * to return 0 otherwise. | 71 | * to return 0 otherwise. |
74 | */ | 72 | */ |
75 | #define __mutex_fastpath_unlock(count, fail_fn) \ | 73 | #define __mutex_fastpath_unlock(count, fail_fn) \ |
76 | do { \ | 74 | do { \ |
77 | unsigned int dummy; \ | 75 | unsigned int dummy; \ |
78 | \ | 76 | \ |
79 | typecheck(atomic_t *, count); \ | 77 | typecheck(atomic_t *, count); \ |
80 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 78 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
81 | \ | 79 | \ |
82 | __asm__ __volatile__( \ | 80 | asm volatile(LOCK_PREFIX " incl (%%eax)\n" \ |
83 | LOCK_PREFIX " incl (%%eax) \n" \ | 81 | " jg 1f\n" \ |
84 | " jg 1f \n" \ | 82 | " call " #fail_fn "\n" \ |
85 | " call "#fail_fn" \n" \ | 83 | "1:\n" \ |
86 | "1: \n" \ | 84 | : "=a" (dummy) \ |
87 | \ | 85 | : "a" (count) \ |
88 | :"=a" (dummy) \ | 86 | : "memory", "ecx", "edx"); \ |
89 | : "a" (count) \ | ||
90 | : "memory", "ecx", "edx"); \ | ||
91 | } while (0) | 87 | } while (0) |
92 | 88 | ||
93 | #define __mutex_slowpath_needs_to_unlock() 1 | 89 | #define __mutex_slowpath_needs_to_unlock() 1 |
@@ -104,8 +100,8 @@ do { \ | |||
104 | * Additionally, if the value was < 0 originally, this function must not leave | 100 | * Additionally, if the value was < 0 originally, this function must not leave |
105 | * it to 0 on failure. | 101 | * it to 0 on failure. |
106 | */ | 102 | */ |
107 | static inline int | 103 | static inline int __mutex_fastpath_trylock(atomic_t *count, |
108 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 104 | int (*fail_fn)(atomic_t *)) |
109 | { | 105 | { |
110 | /* | 106 | /* |
111 | * We have two variants here. The cmpxchg based one is the best one | 107 | * We have two variants here. The cmpxchg based one is the best one |
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h index 6c2949a3c677..f3fae9becb38 100644 --- a/include/asm-x86/mutex_64.h +++ b/include/asm-x86/mutex_64.h | |||
@@ -16,23 +16,21 @@ | |||
16 | * | 16 | * |
17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. | 17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. |
18 | */ | 18 | */ |
19 | #define __mutex_fastpath_lock(v, fail_fn) \ | 19 | #define __mutex_fastpath_lock(v, fail_fn) \ |
20 | do { \ | 20 | do { \ |
21 | unsigned long dummy; \ | 21 | unsigned long dummy; \ |
22 | \ | 22 | \ |
23 | typecheck(atomic_t *, v); \ | 23 | typecheck(atomic_t *, v); \ |
24 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 24 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
25 | \ | 25 | \ |
26 | __asm__ __volatile__( \ | 26 | asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \ |
27 | LOCK_PREFIX " decl (%%rdi) \n" \ | 27 | " jns 1f \n" \ |
28 | " jns 1f \n" \ | 28 | " call " #fail_fn "\n" \ |
29 | " call "#fail_fn" \n" \ | 29 | "1:" \ |
30 | "1:" \ | 30 | : "=D" (dummy) \ |
31 | \ | 31 | : "D" (v) \ |
32 | :"=D" (dummy) \ | 32 | : "rax", "rsi", "rdx", "rcx", \ |
33 | : "D" (v) \ | 33 | "r8", "r9", "r10", "r11", "memory"); \ |
34 | : "rax", "rsi", "rdx", "rcx", \ | ||
35 | "r8", "r9", "r10", "r11", "memory"); \ | ||
36 | } while (0) | 34 | } while (0) |
37 | 35 | ||
38 | /** | 36 | /** |
@@ -45,9 +43,8 @@ do { \ | |||
45 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | 43 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, |
46 | * or anything the slow path function returns | 44 | * or anything the slow path function returns |
47 | */ | 45 | */ |
48 | static inline int | 46 | static inline int __mutex_fastpath_lock_retval(atomic_t *count, |
49 | __mutex_fastpath_lock_retval(atomic_t *count, | 47 | int (*fail_fn)(atomic_t *)) |
50 | int (*fail_fn)(atomic_t *)) | ||
51 | { | 48 | { |
52 | if (unlikely(atomic_dec_return(count) < 0)) | 49 | if (unlikely(atomic_dec_return(count) < 0)) |
53 | return fail_fn(count); | 50 | return fail_fn(count); |
@@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count, | |||
62 | * | 59 | * |
63 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. | 60 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. |
64 | */ | 61 | */ |
65 | #define __mutex_fastpath_unlock(v, fail_fn) \ | 62 | #define __mutex_fastpath_unlock(v, fail_fn) \ |
66 | do { \ | 63 | do { \ |
67 | unsigned long dummy; \ | 64 | unsigned long dummy; \ |
68 | \ | 65 | \ |
69 | typecheck(atomic_t *, v); \ | 66 | typecheck(atomic_t *, v); \ |
70 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 67 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
71 | \ | 68 | \ |
72 | __asm__ __volatile__( \ | 69 | asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \ |
73 | LOCK_PREFIX " incl (%%rdi) \n" \ | 70 | " jg 1f\n" \ |
74 | " jg 1f \n" \ | 71 | " call " #fail_fn "\n" \ |
75 | " call "#fail_fn" \n" \ | 72 | "1:" \ |
76 | "1: " \ | 73 | : "=D" (dummy) \ |
77 | \ | 74 | : "D" (v) \ |
78 | :"=D" (dummy) \ | 75 | : "rax", "rsi", "rdx", "rcx", \ |
79 | : "D" (v) \ | 76 | "r8", "r9", "r10", "r11", "memory"); \ |
80 | : "rax", "rsi", "rdx", "rcx", \ | ||
81 | "r8", "r9", "r10", "r11", "memory"); \ | ||
82 | } while (0) | 77 | } while (0) |
83 | 78 | ||
84 | #define __mutex_slowpath_needs_to_unlock() 1 | 79 | #define __mutex_slowpath_needs_to_unlock() 1 |
@@ -93,8 +88,8 @@ do { \ | |||
93 | * if it wasn't 1 originally. [the fallback function is never used on | 88 | * if it wasn't 1 originally. [the fallback function is never used on |
94 | * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] | 89 | * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] |
95 | */ | 90 | */ |
96 | static inline int | 91 | static inline int __mutex_fastpath_trylock(atomic_t *count, |
97 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 92 | int (*fail_fn)(atomic_t *)) |
98 | { | 93 | { |
99 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) | 94 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
100 | return 1; | 95 | return 1; |
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 53ccac14cead..1e363021e72f 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h | |||
@@ -1,5 +1,93 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_NMI_H_ |
2 | # include "nmi_32.h" | 2 | #define _ASM_X86_NMI_H_ |
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <asm/irq.h> | ||
6 | #include <asm/io.h> | ||
7 | |||
8 | #ifdef ARCH_HAS_NMI_WATCHDOG | ||
9 | |||
10 | /** | ||
11 | * do_nmi_callback | ||
12 | * | ||
13 | * Check to see if a callback exists and execute it. Return 1 | ||
14 | * if the handler exists and was handled successfully. | ||
15 | */ | ||
16 | int do_nmi_callback(struct pt_regs *regs, int cpu); | ||
17 | |||
18 | #ifdef CONFIG_PM | ||
19 | |||
20 | /** Replace the PM callback routine for NMI. */ | ||
21 | struct pm_dev *set_nmi_pm_callback(pm_callback callback); | ||
22 | |||
23 | /** Unset the PM callback routine back to the default. */ | ||
24 | void unset_nmi_pm_callback(struct pm_dev *dev); | ||
25 | |||
3 | #else | 26 | #else |
4 | # include "nmi_64.h" | 27 | |
28 | static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | static inline void unset_nmi_pm_callback(struct pm_dev *dev) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | #endif /* CONFIG_PM */ | ||
38 | |||
39 | #ifdef CONFIG_X86_64 | ||
40 | extern void default_do_nmi(struct pt_regs *); | ||
41 | extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | ||
42 | extern void nmi_watchdog_default(void); | ||
43 | #else | ||
44 | #define nmi_watchdog_default() do {} while (0) | ||
45 | #endif | ||
46 | |||
47 | extern int check_nmi_watchdog(void); | ||
48 | extern int nmi_watchdog_enabled; | ||
49 | extern int unknown_nmi_panic; | ||
50 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | ||
51 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
52 | extern int reserve_perfctr_nmi(unsigned int); | ||
53 | extern void release_perfctr_nmi(unsigned int); | ||
54 | extern int reserve_evntsel_nmi(unsigned int); | ||
55 | extern void release_evntsel_nmi(unsigned int); | ||
56 | |||
57 | extern void setup_apic_nmi_watchdog(void *); | ||
58 | extern void stop_apic_nmi_watchdog(void *); | ||
59 | extern void disable_timer_nmi_watchdog(void); | ||
60 | extern void enable_timer_nmi_watchdog(void); | ||
61 | extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); | ||
62 | |||
63 | extern atomic_t nmi_active; | ||
64 | extern unsigned int nmi_watchdog; | ||
65 | #define NMI_DISABLED -1 | ||
66 | #define NMI_NONE 0 | ||
67 | #define NMI_IO_APIC 1 | ||
68 | #define NMI_LOCAL_APIC 2 | ||
69 | #define NMI_INVALID 3 | ||
70 | #define NMI_DEFAULT NMI_DISABLED | ||
71 | |||
72 | struct ctl_table; | ||
73 | struct file; | ||
74 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
75 | void __user *, size_t *, loff_t *); | ||
76 | extern int unknown_nmi_panic; | ||
77 | |||
78 | void __trigger_all_cpu_backtrace(void); | ||
79 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
80 | |||
81 | #endif | ||
82 | |||
83 | void lapic_watchdog_stop(void); | ||
84 | int lapic_watchdog_init(unsigned nmi_hz); | ||
85 | int lapic_wd_event(unsigned nmi_hz); | ||
86 | unsigned lapic_adjust_nmi_hz(unsigned hz); | ||
87 | int lapic_watchdog_ok(void); | ||
88 | void disable_lapic_nmi_watchdog(void); | ||
89 | void enable_lapic_nmi_watchdog(void); | ||
90 | void stop_nmi(void); | ||
91 | void restart_nmi(void); | ||
92 | |||
5 | #endif | 93 | #endif |
diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h deleted file mode 100644 index 7206c7e8a388..000000000000 --- a/include/asm-x86/nmi_32.h +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | #ifndef ASM_NMI_H | ||
2 | #define ASM_NMI_H | ||
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <asm/irq.h> | ||
6 | |||
7 | #ifdef ARCH_HAS_NMI_WATCHDOG | ||
8 | |||
9 | /** | ||
10 | * do_nmi_callback | ||
11 | * | ||
12 | * Check to see if a callback exists and execute it. Return 1 | ||
13 | * if the handler exists and was handled successfully. | ||
14 | */ | ||
15 | int do_nmi_callback(struct pt_regs *regs, int cpu); | ||
16 | |||
17 | extern int nmi_watchdog_enabled; | ||
18 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | ||
19 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
20 | extern int reserve_perfctr_nmi(unsigned int); | ||
21 | extern void release_perfctr_nmi(unsigned int); | ||
22 | extern int reserve_evntsel_nmi(unsigned int); | ||
23 | extern void release_evntsel_nmi(unsigned int); | ||
24 | |||
25 | extern void setup_apic_nmi_watchdog (void *); | ||
26 | extern void stop_apic_nmi_watchdog (void *); | ||
27 | extern void disable_timer_nmi_watchdog(void); | ||
28 | extern void enable_timer_nmi_watchdog(void); | ||
29 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); | ||
30 | |||
31 | extern atomic_t nmi_active; | ||
32 | extern unsigned int nmi_watchdog; | ||
33 | #define NMI_DISABLED -1 | ||
34 | #define NMI_NONE 0 | ||
35 | #define NMI_IO_APIC 1 | ||
36 | #define NMI_LOCAL_APIC 2 | ||
37 | #define NMI_INVALID 3 | ||
38 | #define NMI_DEFAULT NMI_DISABLED | ||
39 | |||
40 | struct ctl_table; | ||
41 | struct file; | ||
42 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
43 | void __user *, size_t *, loff_t *); | ||
44 | extern int unknown_nmi_panic; | ||
45 | |||
46 | void __trigger_all_cpu_backtrace(void); | ||
47 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
48 | |||
49 | #endif | ||
50 | |||
51 | void lapic_watchdog_stop(void); | ||
52 | int lapic_watchdog_init(unsigned nmi_hz); | ||
53 | int lapic_wd_event(unsigned nmi_hz); | ||
54 | unsigned lapic_adjust_nmi_hz(unsigned hz); | ||
55 | int lapic_watchdog_ok(void); | ||
56 | void disable_lapic_nmi_watchdog(void); | ||
57 | void enable_lapic_nmi_watchdog(void); | ||
58 | void stop_nmi(void); | ||
59 | void restart_nmi(void); | ||
60 | |||
61 | #endif /* ASM_NMI_H */ | ||
diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h deleted file mode 100644 index 2eeb74e5f3ff..000000000000 --- a/include/asm-x86/nmi_64.h +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | #ifndef ASM_NMI_H | ||
2 | #define ASM_NMI_H | ||
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <asm/io.h> | ||
6 | |||
7 | /** | ||
8 | * do_nmi_callback | ||
9 | * | ||
10 | * Check to see if a callback exists and execute it. Return 1 | ||
11 | * if the handler exists and was handled successfully. | ||
12 | */ | ||
13 | int do_nmi_callback(struct pt_regs *regs, int cpu); | ||
14 | |||
15 | #ifdef CONFIG_PM | ||
16 | |||
17 | /** Replace the PM callback routine for NMI. */ | ||
18 | struct pm_dev * set_nmi_pm_callback(pm_callback callback); | ||
19 | |||
20 | /** Unset the PM callback routine back to the default. */ | ||
21 | void unset_nmi_pm_callback(struct pm_dev * dev); | ||
22 | |||
23 | #else | ||
24 | |||
25 | static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static inline void unset_nmi_pm_callback(struct pm_dev * dev) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | #endif /* CONFIG_PM */ | ||
35 | |||
36 | extern void default_do_nmi(struct pt_regs *); | ||
37 | extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | ||
38 | |||
39 | #define get_nmi_reason() inb(0x61) | ||
40 | |||
41 | extern int unknown_nmi_panic; | ||
42 | extern int nmi_watchdog_enabled; | ||
43 | |||
44 | extern int check_nmi_watchdog(void); | ||
45 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | ||
46 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
47 | extern int reserve_perfctr_nmi(unsigned int); | ||
48 | extern void release_perfctr_nmi(unsigned int); | ||
49 | extern int reserve_evntsel_nmi(unsigned int); | ||
50 | extern void release_evntsel_nmi(unsigned int); | ||
51 | |||
52 | extern void setup_apic_nmi_watchdog (void *); | ||
53 | extern void stop_apic_nmi_watchdog (void *); | ||
54 | extern void disable_timer_nmi_watchdog(void); | ||
55 | extern void enable_timer_nmi_watchdog(void); | ||
56 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); | ||
57 | |||
58 | extern void nmi_watchdog_default(void); | ||
59 | |||
60 | extern atomic_t nmi_active; | ||
61 | extern unsigned int nmi_watchdog; | ||
62 | #define NMI_DISABLED -1 | ||
63 | #define NMI_NONE 0 | ||
64 | #define NMI_IO_APIC 1 | ||
65 | #define NMI_LOCAL_APIC 2 | ||
66 | #define NMI_INVALID 3 | ||
67 | #define NMI_DEFAULT NMI_DISABLED | ||
68 | |||
69 | struct ctl_table; | ||
70 | struct file; | ||
71 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
72 | void __user *, size_t *, loff_t *); | ||
73 | |||
74 | extern int unknown_nmi_panic; | ||
75 | |||
76 | void __trigger_all_cpu_backtrace(void); | ||
77 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
78 | |||
79 | |||
80 | void lapic_watchdog_stop(void); | ||
81 | int lapic_watchdog_init(unsigned nmi_hz); | ||
82 | int lapic_wd_event(unsigned nmi_hz); | ||
83 | unsigned lapic_adjust_nmi_hz(unsigned hz); | ||
84 | int lapic_watchdog_ok(void); | ||
85 | void disable_lapic_nmi_watchdog(void); | ||
86 | void enable_lapic_nmi_watchdog(void); | ||
87 | void stop_nmi(void); | ||
88 | void restart_nmi(void); | ||
89 | |||
90 | #endif /* ASM_NMI_H */ | ||
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h index b3930ae539b3..ad0bedd10b89 100644 --- a/include/asm-x86/nops.h +++ b/include/asm-x86/nops.h | |||
@@ -5,6 +5,8 @@ | |||
5 | 5 | ||
6 | /* generic versions from gas | 6 | /* generic versions from gas |
7 | 1: nop | 7 | 1: nop |
8 | the following instructions are NOT nops in 64-bit mode, | ||
9 | for 64-bit mode use K8 or P6 nops instead | ||
8 | 2: movl %esi,%esi | 10 | 2: movl %esi,%esi |
9 | 3: leal 0x00(%esi),%esi | 11 | 3: leal 0x00(%esi),%esi |
10 | 4: leal 0x00(,%esi,1),%esi | 12 | 4: leal 0x00(,%esi,1),%esi |
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h index 15fe07cde586..32c22ae0709f 100644 --- a/include/asm-x86/numa_64.h +++ b/include/asm-x86/numa_64.h | |||
@@ -1,11 +1,12 @@ | |||
1 | #ifndef _ASM_X8664_NUMA_H | 1 | #ifndef _ASM_X8664_NUMA_H |
2 | #define _ASM_X8664_NUMA_H 1 | 2 | #define _ASM_X8664_NUMA_H 1 |
3 | 3 | ||
4 | #include <linux/nodemask.h> | 4 | #include <linux/nodemask.h> |
5 | #include <asm/apicdef.h> | 5 | #include <asm/apicdef.h> |
6 | 6 | ||
7 | struct bootnode { | 7 | struct bootnode { |
8 | u64 start,end; | 8 | u64 start; |
9 | u64 end; | ||
9 | }; | 10 | }; |
10 | 11 | ||
11 | extern int compute_hash_shift(struct bootnode *nodes, int numnodes); | 12 | extern int compute_hash_shift(struct bootnode *nodes, int numnodes); |
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h index 38f710dc37f2..94b86c31239a 100644 --- a/include/asm-x86/numaq.h +++ b/include/asm-x86/numaq.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2002, IBM Corp. | 4 | * Copyright (C) 2002, IBM Corp. |
5 | * | 5 | * |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
@@ -33,7 +33,8 @@ extern int get_memcfg_numaq(void); | |||
33 | /* | 33 | /* |
34 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the | 34 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the |
35 | */ | 35 | */ |
36 | #define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ | 36 | #define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private |
37 | quad space */ | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Communication area for each processor on lynxer-processor tests. | 40 | * Communication area for each processor on lynxer-processor tests. |
@@ -139,7 +140,7 @@ struct sys_cfg_data { | |||
139 | unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ | 140 | unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ |
140 | unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ | 141 | unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ |
141 | /* may not be totally populated */ | 142 | /* may not be totally populated */ |
142 | unsigned int split_mem_enbl; /* 0 for no low shared memory */ | 143 | unsigned int split_mem_enbl; /* 0 for no low shared memory */ |
143 | unsigned int mmio_sz; /* Size of total system memory mapped I/O */ | 144 | unsigned int mmio_sz; /* Size of total system memory mapped I/O */ |
144 | /* (in MB). */ | 145 | /* (in MB). */ |
145 | unsigned int quad_spin_lock; /* Spare location used for quad */ | 146 | unsigned int quad_spin_lock; /* Spare location used for quad */ |
@@ -152,7 +153,7 @@ struct sys_cfg_data { | |||
152 | /* | 153 | /* |
153 | * memory configuration area for each quad | 154 | * memory configuration area for each quad |
154 | */ | 155 | */ |
155 | struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ | 156 | struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ |
156 | }; | 157 | }; |
157 | 158 | ||
158 | static inline unsigned long *get_zholes_size(int nid) | 159 | static inline unsigned long *get_zholes_size(int nid) |
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index a05b2896492f..6724a4bc6b7a 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h | |||
@@ -33,10 +33,8 @@ | |||
33 | 33 | ||
34 | #ifdef CONFIG_X86_64 | 34 | #ifdef CONFIG_X86_64 |
35 | #include <asm/page_64.h> | 35 | #include <asm/page_64.h> |
36 | #define max_pfn_mapped end_pfn_map | ||
37 | #else | 36 | #else |
38 | #include <asm/page_32.h> | 37 | #include <asm/page_32.h> |
39 | #define max_pfn_mapped max_low_pfn | ||
40 | #endif /* CONFIG_X86_64 */ | 38 | #endif /* CONFIG_X86_64 */ |
41 | 39 | ||
42 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | 40 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
@@ -50,6 +48,8 @@ | |||
50 | 48 | ||
51 | extern int page_is_ram(unsigned long pagenr); | 49 | extern int page_is_ram(unsigned long pagenr); |
52 | 50 | ||
51 | extern unsigned long max_pfn_mapped; | ||
52 | |||
53 | struct page; | 53 | struct page; |
54 | 54 | ||
55 | static inline void clear_user_page(void *page, unsigned long vaddr, | 55 | static inline void clear_user_page(void *page, unsigned long vaddr, |
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 5f7257fd589b..424e82f8ae27 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h | |||
@@ -47,7 +47,10 @@ typedef unsigned long pgdval_t; | |||
47 | typedef unsigned long pgprotval_t; | 47 | typedef unsigned long pgprotval_t; |
48 | typedef unsigned long phys_addr_t; | 48 | typedef unsigned long phys_addr_t; |
49 | 49 | ||
50 | typedef union { pteval_t pte, pte_low; } pte_t; | 50 | typedef union { |
51 | pteval_t pte; | ||
52 | pteval_t pte_low; | ||
53 | } pte_t; | ||
51 | 54 | ||
52 | #endif /* __ASSEMBLY__ */ | 55 | #endif /* __ASSEMBLY__ */ |
53 | #endif /* CONFIG_X86_PAE */ | 56 | #endif /* CONFIG_X86_PAE */ |
@@ -61,7 +64,7 @@ typedef struct page *pgtable_t; | |||
61 | #endif | 64 | #endif |
62 | 65 | ||
63 | #ifndef __ASSEMBLY__ | 66 | #ifndef __ASSEMBLY__ |
64 | #define __phys_addr(x) ((x)-PAGE_OFFSET) | 67 | #define __phys_addr(x) ((x) - PAGE_OFFSET) |
65 | #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) | 68 | #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) |
66 | 69 | ||
67 | #ifdef CONFIG_FLATMEM | 70 | #ifdef CONFIG_FLATMEM |
@@ -78,7 +81,7 @@ extern unsigned int __VMALLOC_RESERVE; | |||
78 | extern int sysctl_legacy_va_layout; | 81 | extern int sysctl_legacy_va_layout; |
79 | 82 | ||
80 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) | 83 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) |
81 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) | 84 | #define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) |
82 | 85 | ||
83 | #ifdef CONFIG_X86_USE_3DNOW | 86 | #ifdef CONFIG_X86_USE_3DNOW |
84 | #include <asm/mmx.h> | 87 | #include <asm/mmx.h> |
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 143546073b95..6ea72859c491 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | #define THREAD_ORDER 1 | 6 | #define THREAD_ORDER 1 |
7 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) | 7 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
8 | #define CURRENT_MASK (~(THREAD_SIZE-1)) | 8 | #define CURRENT_MASK (~(THREAD_SIZE - 1)) |
9 | 9 | ||
10 | #define EXCEPTION_STACK_ORDER 0 | 10 | #define EXCEPTION_STACK_ORDER 0 |
11 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) | 11 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) |
@@ -48,10 +48,10 @@ | |||
48 | #define __VIRTUAL_MASK_SHIFT 48 | 48 | #define __VIRTUAL_MASK_SHIFT 48 |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * Kernel image size is limited to 128 MB (see level2_kernel_pgt in | 51 | * Kernel image size is limited to 512 MB (see level2_kernel_pgt in |
52 | * arch/x86/kernel/head_64.S), and it is mapped here: | 52 | * arch/x86/kernel/head_64.S), and it is mapped here: |
53 | */ | 53 | */ |
54 | #define KERNEL_IMAGE_SIZE (128*1024*1024) | 54 | #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) |
55 | #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) | 55 | #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) |
56 | 56 | ||
57 | #ifndef __ASSEMBLY__ | 57 | #ifndef __ASSEMBLY__ |
@@ -59,7 +59,6 @@ void clear_page(void *page); | |||
59 | void copy_page(void *to, void *from); | 59 | void copy_page(void *to, void *from); |
60 | 60 | ||
61 | extern unsigned long end_pfn; | 61 | extern unsigned long end_pfn; |
62 | extern unsigned long end_pfn_map; | ||
63 | extern unsigned long phys_base; | 62 | extern unsigned long phys_base; |
64 | 63 | ||
65 | extern unsigned long __phys_addr(unsigned long); | 64 | extern unsigned long __phys_addr(unsigned long); |
@@ -81,6 +80,9 @@ typedef struct { pteval_t pte; } pte_t; | |||
81 | 80 | ||
82 | #define vmemmap ((struct page *)VMEMMAP_START) | 81 | #define vmemmap ((struct page *)VMEMMAP_START) |
83 | 82 | ||
83 | extern unsigned long init_memory_mapping(unsigned long start, | ||
84 | unsigned long end); | ||
85 | |||
84 | #endif /* !__ASSEMBLY__ */ | 86 | #endif /* !__ASSEMBLY__ */ |
85 | 87 | ||
86 | #ifdef CONFIG_FLATMEM | 88 | #ifdef CONFIG_FLATMEM |
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h index c996ec4da0c8..6f0d0422f4ca 100644 --- a/include/asm-x86/param.h +++ b/include/asm-x86/param.h | |||
@@ -3,8 +3,8 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | 5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ |
6 | # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ | 6 | # define USER_HZ 100 /* some user interfaces are */ |
7 | # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | 7 | # define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #ifndef HZ | 10 | #ifndef HZ |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index d6236eb46466..3d419398499b 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -231,7 +231,8 @@ struct pv_mmu_ops { | |||
231 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, | 231 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, |
232 | pte_t *ptep, pte_t pteval); | 232 | pte_t *ptep, pte_t pteval); |
233 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | 233 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
234 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 234 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, |
235 | pte_t *ptep); | ||
235 | void (*pte_update_defer)(struct mm_struct *mm, | 236 | void (*pte_update_defer)(struct mm_struct *mm, |
236 | unsigned long addr, pte_t *ptep); | 237 | unsigned long addr, pte_t *ptep); |
237 | 238 | ||
@@ -246,7 +247,8 @@ struct pv_mmu_ops { | |||
246 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | 247 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
247 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, | 248 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, |
248 | pte_t *ptep, pte_t pte); | 249 | pte_t *ptep, pte_t pte); |
249 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 250 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, |
251 | pte_t *ptep); | ||
250 | void (*pmd_clear)(pmd_t *pmdp); | 252 | void (*pmd_clear)(pmd_t *pmdp); |
251 | 253 | ||
252 | #endif /* CONFIG_X86_PAE */ | 254 | #endif /* CONFIG_X86_PAE */ |
@@ -274,8 +276,7 @@ struct pv_mmu_ops { | |||
274 | /* This contains all the paravirt structures: we get a convenient | 276 | /* This contains all the paravirt structures: we get a convenient |
275 | * number for each function using the offset which we use to indicate | 277 | * number for each function using the offset which we use to indicate |
276 | * what to patch. */ | 278 | * what to patch. */ |
277 | struct paravirt_patch_template | 279 | struct paravirt_patch_template { |
278 | { | ||
279 | struct pv_init_ops pv_init_ops; | 280 | struct pv_init_ops pv_init_ops; |
280 | struct pv_time_ops pv_time_ops; | 281 | struct pv_time_ops pv_time_ops; |
281 | struct pv_cpu_ops pv_cpu_ops; | 282 | struct pv_cpu_ops pv_cpu_ops; |
@@ -660,43 +661,56 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | |||
660 | } | 661 | } |
661 | 662 | ||
662 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 663 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
663 | #define rdmsr(msr,val1,val2) do { \ | 664 | #define rdmsr(msr, val1, val2) \ |
665 | do { \ | ||
664 | int _err; \ | 666 | int _err; \ |
665 | u64 _l = paravirt_read_msr(msr, &_err); \ | 667 | u64 _l = paravirt_read_msr(msr, &_err); \ |
666 | val1 = (u32)_l; \ | 668 | val1 = (u32)_l; \ |
667 | val2 = _l >> 32; \ | 669 | val2 = _l >> 32; \ |
668 | } while(0) | 670 | } while (0) |
669 | 671 | ||
670 | #define wrmsr(msr,val1,val2) do { \ | 672 | #define wrmsr(msr, val1, val2) \ |
673 | do { \ | ||
671 | paravirt_write_msr(msr, val1, val2); \ | 674 | paravirt_write_msr(msr, val1, val2); \ |
672 | } while(0) | 675 | } while (0) |
673 | 676 | ||
674 | #define rdmsrl(msr,val) do { \ | 677 | #define rdmsrl(msr, val) \ |
678 | do { \ | ||
675 | int _err; \ | 679 | int _err; \ |
676 | val = paravirt_read_msr(msr, &_err); \ | 680 | val = paravirt_read_msr(msr, &_err); \ |
677 | } while(0) | 681 | } while (0) |
678 | 682 | ||
679 | #define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) | 683 | #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) |
680 | #define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) | 684 | #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) |
681 | 685 | ||
682 | /* rdmsr with exception handling */ | 686 | /* rdmsr with exception handling */ |
683 | #define rdmsr_safe(msr,a,b) ({ \ | 687 | #define rdmsr_safe(msr, a, b) \ |
688 | ({ \ | ||
684 | int _err; \ | 689 | int _err; \ |
685 | u64 _l = paravirt_read_msr(msr, &_err); \ | 690 | u64 _l = paravirt_read_msr(msr, &_err); \ |
686 | (*a) = (u32)_l; \ | 691 | (*a) = (u32)_l; \ |
687 | (*b) = _l >> 32; \ | 692 | (*b) = _l >> 32; \ |
688 | _err; }) | 693 | _err; \ |
694 | }) | ||
695 | |||
696 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | ||
697 | { | ||
698 | int err; | ||
689 | 699 | ||
700 | *p = paravirt_read_msr(msr, &err); | ||
701 | return err; | ||
702 | } | ||
690 | 703 | ||
691 | static inline u64 paravirt_read_tsc(void) | 704 | static inline u64 paravirt_read_tsc(void) |
692 | { | 705 | { |
693 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); | 706 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); |
694 | } | 707 | } |
695 | 708 | ||
696 | #define rdtscl(low) do { \ | 709 | #define rdtscl(low) \ |
710 | do { \ | ||
697 | u64 _l = paravirt_read_tsc(); \ | 711 | u64 _l = paravirt_read_tsc(); \ |
698 | low = (int)_l; \ | 712 | low = (int)_l; \ |
699 | } while(0) | 713 | } while (0) |
700 | 714 | ||
701 | #define rdtscll(val) (val = paravirt_read_tsc()) | 715 | #define rdtscll(val) (val = paravirt_read_tsc()) |
702 | 716 | ||
@@ -711,11 +725,12 @@ static inline unsigned long long paravirt_read_pmc(int counter) | |||
711 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); | 725 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); |
712 | } | 726 | } |
713 | 727 | ||
714 | #define rdpmc(counter,low,high) do { \ | 728 | #define rdpmc(counter, low, high) \ |
729 | do { \ | ||
715 | u64 _l = paravirt_read_pmc(counter); \ | 730 | u64 _l = paravirt_read_pmc(counter); \ |
716 | low = (u32)_l; \ | 731 | low = (u32)_l; \ |
717 | high = _l >> 32; \ | 732 | high = _l >> 32; \ |
718 | } while(0) | 733 | } while (0) |
719 | 734 | ||
720 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) | 735 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) |
721 | { | 736 | { |
@@ -794,7 +809,8 @@ static inline void set_iopl_mask(unsigned mask) | |||
794 | } | 809 | } |
795 | 810 | ||
796 | /* The paravirtualized I/O functions */ | 811 | /* The paravirtualized I/O functions */ |
797 | static inline void slow_down_io(void) { | 812 | static inline void slow_down_io(void) |
813 | { | ||
798 | pv_cpu_ops.io_delay(); | 814 | pv_cpu_ops.io_delay(); |
799 | #ifdef REALLY_SLOW_IO | 815 | #ifdef REALLY_SLOW_IO |
800 | pv_cpu_ops.io_delay(); | 816 | pv_cpu_ops.io_delay(); |
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h index 019cbca24a38..3c4ffeb467e9 100644 --- a/include/asm-x86/parport.h +++ b/include/asm-x86/parport.h | |||
@@ -1,10 +1,10 @@ | |||
1 | #ifndef _ASM_X86_PARPORT_H | 1 | #ifndef _ASM_X86_PARPORT_H |
2 | #define _ASM_X86_PARPORT_H | 2 | #define _ASM_X86_PARPORT_H |
3 | 3 | ||
4 | static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); | 4 | static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); |
5 | static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) | 5 | static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) |
6 | { | 6 | { |
7 | return parport_pc_find_isa_ports (autoirq, autodma); | 7 | return parport_pc_find_isa_ports(autoirq, autodma); |
8 | } | 8 | } |
9 | 9 | ||
10 | #endif /* _ASM_X86_PARPORT_H */ | 10 | #endif /* _ASM_X86_PARPORT_H */ |
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h new file mode 100644 index 000000000000..8b822b5a1786 --- /dev/null +++ b/include/asm-x86/pat.h | |||
@@ -0,0 +1,16 @@ | |||
1 | |||
2 | #ifndef _ASM_PAT_H | ||
3 | #define _ASM_PAT_H 1 | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | |||
7 | extern int pat_wc_enabled; | ||
8 | |||
9 | extern void pat_init(void); | ||
10 | |||
11 | extern int reserve_memtype(u64 start, u64 end, | ||
12 | unsigned long req_type, unsigned long *ret_type); | ||
13 | extern int free_memtype(u64 start, u64 end); | ||
14 | |||
15 | #endif | ||
16 | |||
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h index 6823fa4f1afa..5b21485be573 100644 --- a/include/asm-x86/pci-direct.h +++ b/include/asm-x86/pci-direct.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | /* Direct PCI access. This is used for PCI accesses in early boot before | 6 | /* Direct PCI access. This is used for PCI accesses in early boot before |
7 | the PCI subsystem works. */ | 7 | the PCI subsystem works. */ |
8 | 8 | ||
9 | extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); | 9 | extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); |
10 | extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); | 10 | extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); |
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index c61190cb9e12..ddd8e248fc0a 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h | |||
@@ -8,14 +8,13 @@ | |||
8 | #include <asm/scatterlist.h> | 8 | #include <asm/scatterlist.h> |
9 | #include <asm/io.h> | 9 | #include <asm/io.h> |
10 | 10 | ||
11 | |||
12 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
13 | 12 | ||
14 | struct pci_sysdata { | 13 | struct pci_sysdata { |
15 | int domain; /* PCI domain */ | 14 | int domain; /* PCI domain */ |
16 | int node; /* NUMA node */ | 15 | int node; /* NUMA node */ |
17 | #ifdef CONFIG_X86_64 | 16 | #ifdef CONFIG_X86_64 |
18 | void* iommu; /* IOMMU private data */ | 17 | void *iommu; /* IOMMU private data */ |
19 | #endif | 18 | #endif |
20 | }; | 19 | }; |
21 | 20 | ||
@@ -52,7 +51,7 @@ extern unsigned long pci_mem_start; | |||
52 | #define PCIBIOS_MIN_CARDBUS_IO 0x4000 | 51 | #define PCIBIOS_MIN_CARDBUS_IO 0x4000 |
53 | 52 | ||
54 | void pcibios_config_init(void); | 53 | void pcibios_config_init(void); |
55 | struct pci_bus * pcibios_scan_root(int bus); | 54 | struct pci_bus *pcibios_scan_root(int bus); |
56 | 55 | ||
57 | void pcibios_set_master(struct pci_dev *dev); | 56 | void pcibios_set_master(struct pci_dev *dev); |
58 | void pcibios_penalize_isa_irq(int irq, int active); | 57 | void pcibios_penalize_isa_irq(int irq, int active); |
@@ -62,7 +61,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); | |||
62 | 61 | ||
63 | #define HAVE_PCI_MMAP | 62 | #define HAVE_PCI_MMAP |
64 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 63 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
65 | enum pci_mmap_state mmap_state, int write_combine); | 64 | enum pci_mmap_state mmap_state, |
65 | int write_combine); | ||
66 | 66 | ||
67 | 67 | ||
68 | #ifdef CONFIG_PCI | 68 | #ifdef CONFIG_PCI |
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index 374690314539..df867e5d80b1 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h | |||
@@ -1,12 +1,10 @@ | |||
1 | #ifndef __x8664_PCI_H | 1 | #ifndef __x8664_PCI_H |
2 | #define __x8664_PCI_H | 2 | #define __x8664_PCI_H |
3 | 3 | ||
4 | |||
5 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
6 | 5 | ||
7 | |||
8 | #ifdef CONFIG_CALGARY_IOMMU | 6 | #ifdef CONFIG_CALGARY_IOMMU |
9 | static inline void* pci_iommu(struct pci_bus *bus) | 7 | static inline void *pci_iommu(struct pci_bus *bus) |
10 | { | 8 | { |
11 | struct pci_sysdata *sd = bus->sysdata; | 9 | struct pci_sysdata *sd = bus->sysdata; |
12 | return sd->iommu; | 10 | return sd->iommu; |
@@ -19,11 +17,10 @@ static inline void set_pci_iommu(struct pci_bus *bus, void *val) | |||
19 | } | 17 | } |
20 | #endif /* CONFIG_CALGARY_IOMMU */ | 18 | #endif /* CONFIG_CALGARY_IOMMU */ |
21 | 19 | ||
22 | 20 | extern int (*pci_config_read)(int seg, int bus, int dev, int fn, | |
23 | extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); | 21 | int reg, int len, u32 *value); |
24 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); | 22 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, |
25 | 23 | int reg, int len, u32 value); | |
26 | |||
27 | 24 | ||
28 | extern void pci_iommu_alloc(void); | 25 | extern void pci_iommu_alloc(void); |
29 | 26 | ||
@@ -65,5 +62,4 @@ extern void pci_iommu_alloc(void); | |||
65 | 62 | ||
66 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
67 | 64 | ||
68 | |||
69 | #endif /* __x8664_PCI_H */ | 65 | #endif /* __x8664_PCI_H */ |
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index c0305bff0f19..101fb9e11954 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h | |||
@@ -22,7 +22,6 @@ struct x8664_pda { | |||
22 | offset 40!!! */ | 22 | offset 40!!! */ |
23 | #endif | 23 | #endif |
24 | char *irqstackptr; | 24 | char *irqstackptr; |
25 | unsigned int nodenumber; /* number of current node */ | ||
26 | unsigned int __softirq_pending; | 25 | unsigned int __softirq_pending; |
27 | unsigned int __nmi_count; /* number of NMI on this CPUs */ | 26 | unsigned int __nmi_count; /* number of NMI on this CPUs */ |
28 | short mmu_state; | 27 | short mmu_state; |
@@ -58,34 +57,36 @@ extern struct x8664_pda _proxy_pda; | |||
58 | 57 | ||
59 | #define pda_offset(field) offsetof(struct x8664_pda, field) | 58 | #define pda_offset(field) offsetof(struct x8664_pda, field) |
60 | 59 | ||
61 | #define pda_to_op(op, field, val) do { \ | 60 | #define pda_to_op(op, field, val) \ |
62 | typedef typeof(_proxy_pda.field) T__; \ | 61 | do { \ |
63 | if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ | 62 | typedef typeof(_proxy_pda.field) T__; \ |
64 | switch (sizeof(_proxy_pda.field)) { \ | 63 | if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ |
65 | case 2: \ | 64 | switch (sizeof(_proxy_pda.field)) { \ |
66 | asm(op "w %1,%%gs:%c2" : \ | 65 | case 2: \ |
67 | "+m" (_proxy_pda.field) : \ | 66 | asm(op "w %1,%%gs:%c2" : \ |
68 | "ri" ((T__)val), \ | 67 | "+m" (_proxy_pda.field) : \ |
69 | "i"(pda_offset(field))); \ | 68 | "ri" ((T__)val), \ |
70 | break; \ | 69 | "i"(pda_offset(field))); \ |
71 | case 4: \ | 70 | break; \ |
72 | asm(op "l %1,%%gs:%c2" : \ | 71 | case 4: \ |
73 | "+m" (_proxy_pda.field) : \ | 72 | asm(op "l %1,%%gs:%c2" : \ |
74 | "ri" ((T__)val), \ | 73 | "+m" (_proxy_pda.field) : \ |
75 | "i" (pda_offset(field))); \ | 74 | "ri" ((T__)val), \ |
76 | break; \ | 75 | "i" (pda_offset(field))); \ |
77 | case 8: \ | 76 | break; \ |
78 | asm(op "q %1,%%gs:%c2": \ | 77 | case 8: \ |
79 | "+m" (_proxy_pda.field) : \ | 78 | asm(op "q %1,%%gs:%c2": \ |
80 | "ri" ((T__)val), \ | 79 | "+m" (_proxy_pda.field) : \ |
81 | "i"(pda_offset(field))); \ | 80 | "ri" ((T__)val), \ |
82 | break; \ | 81 | "i"(pda_offset(field))); \ |
83 | default: \ | 82 | break; \ |
84 | __bad_pda_field(); \ | 83 | default: \ |
85 | } \ | 84 | __bad_pda_field(); \ |
86 | } while (0) | 85 | } \ |
86 | } while (0) | ||
87 | 87 | ||
88 | #define pda_from_op(op,field) ({ \ | 88 | #define pda_from_op(op, field) \ |
89 | ({ \ | ||
89 | typeof(_proxy_pda.field) ret__; \ | 90 | typeof(_proxy_pda.field) ret__; \ |
90 | switch (sizeof(_proxy_pda.field)) { \ | 91 | switch (sizeof(_proxy_pda.field)) { \ |
91 | case 2: \ | 92 | case 2: \ |
@@ -93,23 +94,24 @@ extern struct x8664_pda _proxy_pda; | |||
93 | "=r" (ret__) : \ | 94 | "=r" (ret__) : \ |
94 | "i" (pda_offset(field)), \ | 95 | "i" (pda_offset(field)), \ |
95 | "m" (_proxy_pda.field)); \ | 96 | "m" (_proxy_pda.field)); \ |
96 | break; \ | 97 | break; \ |
97 | case 4: \ | 98 | case 4: \ |
98 | asm(op "l %%gs:%c1,%0": \ | 99 | asm(op "l %%gs:%c1,%0": \ |
99 | "=r" (ret__): \ | 100 | "=r" (ret__): \ |
100 | "i" (pda_offset(field)), \ | 101 | "i" (pda_offset(field)), \ |
101 | "m" (_proxy_pda.field)); \ | 102 | "m" (_proxy_pda.field)); \ |
102 | break; \ | 103 | break; \ |
103 | case 8: \ | 104 | case 8: \ |
104 | asm(op "q %%gs:%c1,%0": \ | 105 | asm(op "q %%gs:%c1,%0": \ |
105 | "=r" (ret__) : \ | 106 | "=r" (ret__) : \ |
106 | "i" (pda_offset(field)), \ | 107 | "i" (pda_offset(field)), \ |
107 | "m" (_proxy_pda.field)); \ | 108 | "m" (_proxy_pda.field)); \ |
108 | break; \ | 109 | break; \ |
109 | default: \ | 110 | default: \ |
110 | __bad_pda_field(); \ | 111 | __bad_pda_field(); \ |
111 | } \ | 112 | } \ |
112 | ret__; }) | 113 | ret__; \ |
114 | }) | ||
113 | 115 | ||
114 | #define read_pda(field) pda_from_op("mov", field) | 116 | #define read_pda(field) pda_from_op("mov", field) |
115 | #define write_pda(field, val) pda_to_op("mov", field, val) | 117 | #define write_pda(field, val) pda_to_op("mov", field, val) |
@@ -118,12 +120,13 @@ extern struct x8664_pda _proxy_pda; | |||
118 | #define or_pda(field, val) pda_to_op("or", field, val) | 120 | #define or_pda(field, val) pda_to_op("or", field, val) |
119 | 121 | ||
120 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 122 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
121 | #define test_and_clear_bit_pda(bit, field) ({ \ | 123 | #define test_and_clear_bit_pda(bit, field) \ |
122 | int old__; \ | 124 | ({ \ |
123 | asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ | 125 | int old__; \ |
124 | : "=r" (old__), "+m" (_proxy_pda.field) \ | 126 | asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ |
125 | : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ | 127 | : "=r" (old__), "+m" (_proxy_pda.field) \ |
126 | old__; \ | 128 | : "dIr" (bit), "i" (pda_offset(field)) : "memory");\ |
129 | old__; \ | ||
127 | }) | 130 | }) |
128 | 131 | ||
129 | #endif | 132 | #endif |
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index 0dec00f27eb4..736fc3bb8e1e 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h | |||
@@ -85,58 +85,62 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); | |||
85 | * don't give an lvalue though). */ | 85 | * don't give an lvalue though). */ |
86 | extern void __bad_percpu_size(void); | 86 | extern void __bad_percpu_size(void); |
87 | 87 | ||
88 | #define percpu_to_op(op,var,val) \ | 88 | #define percpu_to_op(op, var, val) \ |
89 | do { \ | 89 | do { \ |
90 | typedef typeof(var) T__; \ | 90 | typedef typeof(var) T__; \ |
91 | if (0) { T__ tmp__; tmp__ = (val); } \ | 91 | if (0) { \ |
92 | switch (sizeof(var)) { \ | 92 | T__ tmp__; \ |
93 | case 1: \ | 93 | tmp__ = (val); \ |
94 | asm(op "b %1,"__percpu_seg"%0" \ | 94 | } \ |
95 | : "+m" (var) \ | 95 | switch (sizeof(var)) { \ |
96 | :"ri" ((T__)val)); \ | 96 | case 1: \ |
97 | break; \ | 97 | asm(op "b %1,"__percpu_seg"%0" \ |
98 | case 2: \ | 98 | : "+m" (var) \ |
99 | asm(op "w %1,"__percpu_seg"%0" \ | 99 | : "ri" ((T__)val)); \ |
100 | : "+m" (var) \ | 100 | break; \ |
101 | :"ri" ((T__)val)); \ | 101 | case 2: \ |
102 | break; \ | 102 | asm(op "w %1,"__percpu_seg"%0" \ |
103 | case 4: \ | 103 | : "+m" (var) \ |
104 | asm(op "l %1,"__percpu_seg"%0" \ | 104 | : "ri" ((T__)val)); \ |
105 | : "+m" (var) \ | 105 | break; \ |
106 | :"ri" ((T__)val)); \ | 106 | case 4: \ |
107 | break; \ | 107 | asm(op "l %1,"__percpu_seg"%0" \ |
108 | default: __bad_percpu_size(); \ | 108 | : "+m" (var) \ |
109 | } \ | 109 | : "ri" ((T__)val)); \ |
110 | } while (0) | 110 | break; \ |
111 | 111 | default: __bad_percpu_size(); \ | |
112 | #define percpu_from_op(op,var) \ | 112 | } \ |
113 | ({ \ | 113 | } while (0) |
114 | typeof(var) ret__; \ | 114 | |
115 | switch (sizeof(var)) { \ | 115 | #define percpu_from_op(op, var) \ |
116 | case 1: \ | 116 | ({ \ |
117 | asm(op "b "__percpu_seg"%1,%0" \ | 117 | typeof(var) ret__; \ |
118 | : "=r" (ret__) \ | 118 | switch (sizeof(var)) { \ |
119 | : "m" (var)); \ | 119 | case 1: \ |
120 | break; \ | 120 | asm(op "b "__percpu_seg"%1,%0" \ |
121 | case 2: \ | 121 | : "=r" (ret__) \ |
122 | asm(op "w "__percpu_seg"%1,%0" \ | 122 | : "m" (var)); \ |
123 | : "=r" (ret__) \ | 123 | break; \ |
124 | : "m" (var)); \ | 124 | case 2: \ |
125 | break; \ | 125 | asm(op "w "__percpu_seg"%1,%0" \ |
126 | case 4: \ | 126 | : "=r" (ret__) \ |
127 | asm(op "l "__percpu_seg"%1,%0" \ | 127 | : "m" (var)); \ |
128 | : "=r" (ret__) \ | 128 | break; \ |
129 | : "m" (var)); \ | 129 | case 4: \ |
130 | break; \ | 130 | asm(op "l "__percpu_seg"%1,%0" \ |
131 | default: __bad_percpu_size(); \ | 131 | : "=r" (ret__) \ |
132 | } \ | 132 | : "m" (var)); \ |
133 | ret__; }) | 133 | break; \ |
134 | default: __bad_percpu_size(); \ | ||
135 | } \ | ||
136 | ret__; \ | ||
137 | }) | ||
134 | 138 | ||
135 | #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) | 139 | #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) |
136 | #define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) | 140 | #define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) |
137 | #define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) | 141 | #define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) |
138 | #define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) | 142 | #define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) |
139 | #define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) | 143 | #define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) |
140 | #endif /* !__ASSEMBLY__ */ | 144 | #endif /* !__ASSEMBLY__ */ |
141 | #endif /* !CONFIG_X86_64 */ | 145 | #endif /* !CONFIG_X86_64 */ |
142 | #endif /* _ASM_X86_PERCPU_H_ */ | 146 | #endif /* _ASM_X86_PERCPU_H_ */ |
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 701404fab308..46bc52c0eae1 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h | |||
@@ -26,7 +26,8 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | |||
26 | native_set_pte(ptep, pte); | 26 | native_set_pte(ptep, pte); |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, | 29 | static inline void native_set_pte_present(struct mm_struct *mm, |
30 | unsigned long addr, | ||
30 | pte_t *ptep, pte_t pte) | 31 | pte_t *ptep, pte_t pte) |
31 | { | 32 | { |
32 | native_set_pte(ptep, pte); | 33 | native_set_pte(ptep, pte); |
@@ -37,7 +38,8 @@ static inline void native_pmd_clear(pmd_t *pmdp) | |||
37 | native_set_pmd(pmdp, __pmd(0)); | 38 | native_set_pmd(pmdp, __pmd(0)); |
38 | } | 39 | } |
39 | 40 | ||
40 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) | 41 | static inline void native_pte_clear(struct mm_struct *mm, |
42 | unsigned long addr, pte_t *xp) | ||
41 | { | 43 | { |
42 | *xp = native_make_pte(0); | 44 | *xp = native_make_pte(0); |
43 | } | 45 | } |
@@ -61,16 +63,18 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) | |||
61 | */ | 63 | */ |
62 | #define PTE_FILE_MAX_BITS 29 | 64 | #define PTE_FILE_MAX_BITS 29 |
63 | 65 | ||
64 | #define pte_to_pgoff(pte) \ | 66 | #define pte_to_pgoff(pte) \ |
65 | ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) | 67 | ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5)) |
66 | 68 | ||
67 | #define pgoff_to_pte(off) \ | 69 | #define pgoff_to_pte(off) \ |
68 | ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) | 70 | ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \ |
71 | (((off) >> 5) << 8) + _PAGE_FILE }) | ||
69 | 72 | ||
70 | /* Encode and de-code a swap entry */ | 73 | /* Encode and de-code a swap entry */ |
71 | #define __swp_type(x) (((x).val >> 1) & 0x1f) | 74 | #define __swp_type(x) (((x).val >> 1) & 0x1f) |
72 | #define __swp_offset(x) ((x).val >> 8) | 75 | #define __swp_offset(x) ((x).val >> 8) |
73 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 76 | #define __swp_entry(type, offset) \ |
77 | ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | ||
74 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) | 78 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
75 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) | 79 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
76 | 80 | ||
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 1d763eec740f..8b4a9d44b7f4 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h | |||
@@ -8,22 +8,26 @@ | |||
8 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | 8 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pte_ERROR(e) \ | 11 | #define pte_ERROR(e) \ |
12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) | 12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ |
13 | #define pmd_ERROR(e) \ | 13 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
14 | printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | 14 | #define pmd_ERROR(e) \ |
15 | #define pgd_ERROR(e) \ | 15 | printk("%s:%d: bad pmd %p(%016Lx).\n", \ |
16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | 16 | __FILE__, __LINE__, &(e), pmd_val(e)) |
17 | 17 | #define pgd_ERROR(e) \ | |
18 | printk("%s:%d: bad pgd %p(%016Lx).\n", \ | ||
19 | __FILE__, __LINE__, &(e), pgd_val(e)) | ||
18 | 20 | ||
19 | static inline int pud_none(pud_t pud) | 21 | static inline int pud_none(pud_t pud) |
20 | { | 22 | { |
21 | return pud_val(pud) == 0; | 23 | return pud_val(pud) == 0; |
22 | } | 24 | } |
25 | |||
23 | static inline int pud_bad(pud_t pud) | 26 | static inline int pud_bad(pud_t pud) |
24 | { | 27 | { |
25 | return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; | 28 | return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; |
26 | } | 29 | } |
30 | |||
27 | static inline int pud_present(pud_t pud) | 31 | static inline int pud_present(pud_t pud) |
28 | { | 32 | { |
29 | return pud_val(pud) & _PAGE_PRESENT; | 33 | return pud_val(pud) & _PAGE_PRESENT; |
@@ -48,7 +52,8 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) | |||
48 | * we are justified in merely clearing the PTE present bit, followed | 52 | * we are justified in merely clearing the PTE present bit, followed |
49 | * by a set. The ordering here is important. | 53 | * by a set. The ordering here is important. |
50 | */ | 54 | */ |
51 | static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, | 55 | static inline void native_set_pte_present(struct mm_struct *mm, |
56 | unsigned long addr, | ||
52 | pte_t *ptep, pte_t pte) | 57 | pte_t *ptep, pte_t pte) |
53 | { | 58 | { |
54 | ptep->pte_low = 0; | 59 | ptep->pte_low = 0; |
@@ -60,15 +65,17 @@ static inline void native_set_pte_present(struct mm_struct *mm, unsigned long ad | |||
60 | 65 | ||
61 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 66 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
62 | { | 67 | { |
63 | set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); | 68 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
64 | } | 69 | } |
70 | |||
65 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | 71 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
66 | { | 72 | { |
67 | set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); | 73 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); |
68 | } | 74 | } |
75 | |||
69 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | 76 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
70 | { | 77 | { |
71 | set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); | 78 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); |
72 | } | 79 | } |
73 | 80 | ||
74 | /* | 81 | /* |
@@ -76,7 +83,8 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud) | |||
76 | * entry, so clear the bottom half first and enforce ordering with a compiler | 83 | * entry, so clear the bottom half first and enforce ordering with a compiler |
77 | * barrier. | 84 | * barrier. |
78 | */ | 85 | */ |
79 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 86 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
87 | pte_t *ptep) | ||
80 | { | 88 | { |
81 | ptep->pte_low = 0; | 89 | ptep->pte_low = 0; |
82 | smp_wmb(); | 90 | smp_wmb(); |
@@ -107,20 +115,19 @@ static inline void pud_clear(pud_t *pudp) | |||
107 | * current pgd to avoid unnecessary TLB flushes. | 115 | * current pgd to avoid unnecessary TLB flushes. |
108 | */ | 116 | */ |
109 | pgd = read_cr3(); | 117 | pgd = read_cr3(); |
110 | if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) | 118 | if (__pa(pudp) >= pgd && __pa(pudp) < |
119 | (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) | ||
111 | write_cr3(pgd); | 120 | write_cr3(pgd); |
112 | } | 121 | } |
113 | 122 | ||
114 | #define pud_page(pud) \ | 123 | #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK)) |
115 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | ||
116 | 124 | ||
117 | #define pud_page_vaddr(pud) \ | 125 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) |
118 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | ||
119 | 126 | ||
120 | 127 | ||
121 | /* Find an entry in the second-level page table.. */ | 128 | /* Find an entry in the second-level page table.. */ |
122 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | 129 | #define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ |
123 | pmd_index(address)) | 130 | pmd_index(address)) |
124 | 131 | ||
125 | #ifdef CONFIG_SMP | 132 | #ifdef CONFIG_SMP |
126 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) | 133 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
@@ -161,7 +168,8 @@ static inline unsigned long pte_pfn(pte_t pte) | |||
161 | * put the 32 bits of offset into the high part. | 168 | * put the 32 bits of offset into the high part. |
162 | */ | 169 | */ |
163 | #define pte_to_pgoff(pte) ((pte).pte_high) | 170 | #define pte_to_pgoff(pte) ((pte).pte_high) |
164 | #define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) | 171 | #define pgoff_to_pte(off) \ |
172 | ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) | ||
165 | #define PTE_FILE_MAX_BITS 32 | 173 | #define PTE_FILE_MAX_BITS 32 |
166 | 174 | ||
167 | /* Encode and de-code a swap entry */ | 175 | /* Encode and de-code a swap entry */ |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 9cf472aeb9ce..f1d9f4a03f6f 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -4,13 +4,13 @@ | |||
4 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) | 4 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) |
5 | #define FIRST_USER_ADDRESS 0 | 5 | #define FIRST_USER_ADDRESS 0 |
6 | 6 | ||
7 | #define _PAGE_BIT_PRESENT 0 | 7 | #define _PAGE_BIT_PRESENT 0 /* is present */ |
8 | #define _PAGE_BIT_RW 1 | 8 | #define _PAGE_BIT_RW 1 /* writeable */ |
9 | #define _PAGE_BIT_USER 2 | 9 | #define _PAGE_BIT_USER 2 /* userspace addressable */ |
10 | #define _PAGE_BIT_PWT 3 | 10 | #define _PAGE_BIT_PWT 3 /* page write through */ |
11 | #define _PAGE_BIT_PCD 4 | 11 | #define _PAGE_BIT_PCD 4 /* page cache disabled */ |
12 | #define _PAGE_BIT_ACCESSED 5 | 12 | #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ |
13 | #define _PAGE_BIT_DIRTY 6 | 13 | #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ |
14 | #define _PAGE_BIT_FILE 6 | 14 | #define _PAGE_BIT_FILE 6 |
15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | 15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ |
16 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ | 16 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ |
@@ -48,24 +48,39 @@ | |||
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* If _PAGE_PRESENT is clear, we use these: */ | 50 | /* If _PAGE_PRESENT is clear, we use these: */ |
51 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ | 51 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, |
52 | * saved PTE; unset:swap */ | ||
52 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; | 53 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; |
53 | pte_present gives true */ | 54 | pte_present gives true */ |
54 | 55 | ||
55 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 56 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
56 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 57 | _PAGE_ACCESSED | _PAGE_DIRTY) |
58 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ | ||
59 | _PAGE_DIRTY) | ||
57 | 60 | ||
58 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 61 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
59 | 62 | ||
60 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | 63 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) |
61 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 64 | #define _PAGE_CACHE_WB (0) |
65 | #define _PAGE_CACHE_WC (_PAGE_PWT) | ||
66 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | ||
67 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | ||
62 | 68 | ||
63 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | 69 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
64 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 70 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
65 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 71 | _PAGE_ACCESSED | _PAGE_NX) |
72 | |||
73 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ | ||
74 | _PAGE_USER | _PAGE_ACCESSED) | ||
75 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
76 | _PAGE_ACCESSED | _PAGE_NX) | ||
77 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
78 | _PAGE_ACCESSED) | ||
66 | #define PAGE_COPY PAGE_COPY_NOEXEC | 79 | #define PAGE_COPY PAGE_COPY_NOEXEC |
67 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 80 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
68 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 81 | _PAGE_ACCESSED | _PAGE_NX) |
82 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
83 | _PAGE_ACCESSED) | ||
69 | 84 | ||
70 | #ifdef CONFIG_X86_32 | 85 | #ifdef CONFIG_X86_32 |
71 | #define _PAGE_KERNEL_EXEC \ | 86 | #define _PAGE_KERNEL_EXEC \ |
@@ -84,6 +99,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
84 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | 99 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) |
85 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | 100 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
86 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) | 101 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) |
102 | #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) | ||
87 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) | 103 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) |
88 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) | 104 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) |
89 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | 105 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) |
@@ -101,6 +117,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
101 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | 117 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) |
102 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | 118 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) |
103 | #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) | 119 | #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) |
120 | #define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) | ||
104 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | 121 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) |
105 | #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) | 122 | #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) |
106 | #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) | 123 | #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) |
@@ -134,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
134 | * ZERO_PAGE is a global shared page that is always zero: used | 151 | * ZERO_PAGE is a global shared page that is always zero: used |
135 | * for zero-mapped memory areas etc.. | 152 | * for zero-mapped memory areas etc.. |
136 | */ | 153 | */ |
137 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | 154 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
138 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 155 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
139 | 156 | ||
140 | extern spinlock_t pgd_lock; | 157 | extern spinlock_t pgd_lock; |
@@ -144,30 +161,101 @@ extern struct list_head pgd_list; | |||
144 | * The following only work if pte_present() is true. | 161 | * The following only work if pte_present() is true. |
145 | * Undefined behaviour if not.. | 162 | * Undefined behaviour if not.. |
146 | */ | 163 | */ |
147 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | 164 | static inline int pte_dirty(pte_t pte) |
148 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 165 | { |
149 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | 166 | return pte_val(pte) & _PAGE_DIRTY; |
150 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 167 | } |
151 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } | 168 | |
152 | static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; } | 169 | static inline int pte_young(pte_t pte) |
153 | static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } | 170 | { |
154 | 171 | return pte_val(pte) & _PAGE_ACCESSED; | |
155 | static inline int pmd_large(pmd_t pte) { | 172 | } |
156 | return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | 173 | |
157 | (_PAGE_PSE|_PAGE_PRESENT); | 174 | static inline int pte_write(pte_t pte) |
175 | { | ||
176 | return pte_val(pte) & _PAGE_RW; | ||
177 | } | ||
178 | |||
179 | static inline int pte_file(pte_t pte) | ||
180 | { | ||
181 | return pte_val(pte) & _PAGE_FILE; | ||
182 | } | ||
183 | |||
184 | static inline int pte_huge(pte_t pte) | ||
185 | { | ||
186 | return pte_val(pte) & _PAGE_PSE; | ||
187 | } | ||
188 | |||
189 | static inline int pte_global(pte_t pte) | ||
190 | { | ||
191 | return pte_val(pte) & _PAGE_GLOBAL; | ||
192 | } | ||
193 | |||
194 | static inline int pte_exec(pte_t pte) | ||
195 | { | ||
196 | return !(pte_val(pte) & _PAGE_NX); | ||
197 | } | ||
198 | |||
199 | static inline int pmd_large(pmd_t pte) | ||
200 | { | ||
201 | return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | ||
202 | (_PAGE_PSE | _PAGE_PRESENT); | ||
203 | } | ||
204 | |||
205 | static inline pte_t pte_mkclean(pte_t pte) | ||
206 | { | ||
207 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); | ||
208 | } | ||
209 | |||
210 | static inline pte_t pte_mkold(pte_t pte) | ||
211 | { | ||
212 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); | ||
213 | } | ||
214 | |||
215 | static inline pte_t pte_wrprotect(pte_t pte) | ||
216 | { | ||
217 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); | ||
218 | } | ||
219 | |||
220 | static inline pte_t pte_mkexec(pte_t pte) | ||
221 | { | ||
222 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); | ||
223 | } | ||
224 | |||
225 | static inline pte_t pte_mkdirty(pte_t pte) | ||
226 | { | ||
227 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
228 | } | ||
229 | |||
230 | static inline pte_t pte_mkyoung(pte_t pte) | ||
231 | { | ||
232 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
158 | } | 233 | } |
159 | 234 | ||
160 | static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); } | 235 | static inline pte_t pte_mkwrite(pte_t pte) |
161 | static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); } | 236 | { |
162 | static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); } | 237 | return __pte(pte_val(pte) | _PAGE_RW); |
163 | static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); } | 238 | } |
164 | static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } | 239 | |
165 | static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } | 240 | static inline pte_t pte_mkhuge(pte_t pte) |
166 | static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } | 241 | { |
167 | static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } | 242 | return __pte(pte_val(pte) | _PAGE_PSE); |
168 | static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); } | 243 | } |
169 | static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); } | 244 | |
170 | static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } | 245 | static inline pte_t pte_clrhuge(pte_t pte) |
246 | { | ||
247 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); | ||
248 | } | ||
249 | |||
250 | static inline pte_t pte_mkglobal(pte_t pte) | ||
251 | { | ||
252 | return __pte(pte_val(pte) | _PAGE_GLOBAL); | ||
253 | } | ||
254 | |||
255 | static inline pte_t pte_clrglobal(pte_t pte) | ||
256 | { | ||
257 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); | ||
258 | } | ||
171 | 259 | ||
172 | extern pteval_t __supported_pte_mask; | 260 | extern pteval_t __supported_pte_mask; |
173 | 261 | ||
@@ -334,7 +422,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
334 | }) | 422 | }) |
335 | 423 | ||
336 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 424 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
337 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 425 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
426 | pte_t *ptep) | ||
338 | { | 427 | { |
339 | pte_t pte = native_ptep_get_and_clear(ptep); | 428 | pte_t pte = native_ptep_get_and_clear(ptep); |
340 | pte_update(mm, addr, ptep); | 429 | pte_update(mm, addr, ptep); |
@@ -342,7 +431,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |||
342 | } | 431 | } |
343 | 432 | ||
344 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 433 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
345 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 434 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
435 | unsigned long addr, pte_t *ptep, | ||
436 | int full) | ||
346 | { | 437 | { |
347 | pte_t pte; | 438 | pte_t pte; |
348 | if (full) { | 439 | if (full) { |
@@ -358,7 +449,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
358 | } | 449 | } |
359 | 450 | ||
360 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 451 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
361 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 452 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
453 | unsigned long addr, pte_t *ptep) | ||
362 | { | 454 | { |
363 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); | 455 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
364 | pte_update(mm, addr, ptep); | 456 | pte_update(mm, addr, ptep); |
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 4e6a0fca0b47..c4a643674458 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -40,13 +40,13 @@ void paging_init(void); | |||
40 | #ifdef CONFIG_X86_PAE | 40 | #ifdef CONFIG_X86_PAE |
41 | # include <asm/pgtable-3level-defs.h> | 41 | # include <asm/pgtable-3level-defs.h> |
42 | # define PMD_SIZE (1UL << PMD_SHIFT) | 42 | # define PMD_SIZE (1UL << PMD_SHIFT) |
43 | # define PMD_MASK (~(PMD_SIZE-1)) | 43 | # define PMD_MASK (~(PMD_SIZE - 1)) |
44 | #else | 44 | #else |
45 | # include <asm/pgtable-2level-defs.h> | 45 | # include <asm/pgtable-2level-defs.h> |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 49 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
50 | 50 | ||
51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | 51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) |
52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | 52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) |
@@ -58,21 +58,22 @@ void paging_init(void); | |||
58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
59 | * area for the same reason. ;) | 59 | * area for the same reason. ;) |
60 | */ | 60 | */ |
61 | #define VMALLOC_OFFSET (8*1024*1024) | 61 | #define VMALLOC_OFFSET (8 * 1024 * 1024) |
62 | #define VMALLOC_START (((unsigned long) high_memory + \ | 62 | #define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ |
63 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) | 63 | & ~(VMALLOC_OFFSET - 1)) |
64 | #ifdef CONFIG_X86_PAE | 64 | #ifdef CONFIG_X86_PAE |
65 | #define LAST_PKMAP 512 | 65 | #define LAST_PKMAP 512 |
66 | #else | 66 | #else |
67 | #define LAST_PKMAP 1024 | 67 | #define LAST_PKMAP 1024 |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) | 70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ |
71 | & PMD_MASK) | ||
71 | 72 | ||
72 | #ifdef CONFIG_HIGHMEM | 73 | #ifdef CONFIG_HIGHMEM |
73 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | 74 | # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) |
74 | #else | 75 | #else |
75 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | 76 | # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) |
76 | #endif | 77 | #endif |
77 | 78 | ||
78 | /* | 79 | /* |
@@ -88,10 +89,16 @@ extern unsigned long pg0[]; | |||
88 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 89 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
89 | 90 | ||
90 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ | 91 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
91 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | 92 | #define pmd_none(x) (!(unsigned long)pmd_val((x))) |
92 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 93 | #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) |
93 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | ||
94 | 94 | ||
95 | extern int pmd_bad(pmd_t pmd); | ||
96 | |||
97 | #define pmd_bad_v1(x) \ | ||
98 | (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER))) | ||
99 | #define pmd_bad_v2(x) \ | ||
100 | (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \ | ||
101 | _PAGE_PSE | _PAGE_NX))) | ||
95 | 102 | ||
96 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 103 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
97 | 104 | ||
@@ -117,17 +124,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
117 | } | 124 | } |
118 | 125 | ||
119 | /* | 126 | /* |
120 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | 127 | * Macro to mark a page protection value as "uncacheable". |
121 | * it, this is a no-op. | 128 | * On processors which do not support it, this is a no-op. |
122 | */ | 129 | */ |
123 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | 130 | #define pgprot_noncached(prot) \ |
124 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | 131 | ((boot_cpu_data.x86 > 3) \ |
132 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \ | ||
133 | : (prot)) | ||
125 | 134 | ||
126 | /* | 135 | /* |
127 | * Conversion functions: convert a page and protection to a page entry, | 136 | * Conversion functions: convert a page and protection to a page entry, |
128 | * and a page entry and page directory to the page they refer to. | 137 | * and a page entry and page directory to the page they refer to. |
129 | */ | 138 | */ |
130 | |||
131 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 139 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
132 | 140 | ||
133 | /* | 141 | /* |
@@ -136,20 +144,20 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
136 | * this macro returns the index of the entry in the pgd page which would | 144 | * this macro returns the index of the entry in the pgd page which would |
137 | * control the given virtual address | 145 | * control the given virtual address |
138 | */ | 146 | */ |
139 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 147 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
140 | #define pgd_index_k(addr) pgd_index(addr) | 148 | #define pgd_index_k(addr) pgd_index((addr)) |
141 | 149 | ||
142 | /* | 150 | /* |
143 | * pgd_offset() returns a (pgd_t *) | 151 | * pgd_offset() returns a (pgd_t *) |
144 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | 152 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
145 | */ | 153 | */ |
146 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 154 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) |
147 | 155 | ||
148 | /* | 156 | /* |
149 | * a shortcut which implies the use of the kernel's pgd, instead | 157 | * a shortcut which implies the use of the kernel's pgd, instead |
150 | * of a process's | 158 | * of a process's |
151 | */ | 159 | */ |
152 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 160 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) |
153 | 161 | ||
154 | static inline int pud_large(pud_t pud) { return 0; } | 162 | static inline int pud_large(pud_t pud) { return 0; } |
155 | 163 | ||
@@ -159,8 +167,8 @@ static inline int pud_large(pud_t pud) { return 0; } | |||
159 | * this macro returns the index of the entry in the pmd page which would | 167 | * this macro returns the index of the entry in the pmd page which would |
160 | * control the given virtual address | 168 | * control the given virtual address |
161 | */ | 169 | */ |
162 | #define pmd_index(address) \ | 170 | #define pmd_index(address) \ |
163 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 171 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
164 | 172 | ||
165 | /* | 173 | /* |
166 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | 174 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
@@ -168,43 +176,45 @@ static inline int pud_large(pud_t pud) { return 0; } | |||
168 | * this macro returns the index of the entry in the pte page which would | 176 | * this macro returns the index of the entry in the pte page which would |
169 | * control the given virtual address | 177 | * control the given virtual address |
170 | */ | 178 | */ |
171 | #define pte_index(address) \ | 179 | #define pte_index(address) \ |
172 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 180 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
173 | #define pte_offset_kernel(dir, address) \ | 181 | #define pte_offset_kernel(dir, address) \ |
174 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) | 182 | ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) |
175 | 183 | ||
176 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 184 | #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) |
177 | 185 | ||
178 | #define pmd_page_vaddr(pmd) \ | 186 | #define pmd_page_vaddr(pmd) \ |
179 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 187 | ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK)) |
180 | 188 | ||
181 | #if defined(CONFIG_HIGHPTE) | 189 | #if defined(CONFIG_HIGHPTE) |
182 | #define pte_offset_map(dir, address) \ | 190 | #define pte_offset_map(dir, address) \ |
183 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 191 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ |
184 | #define pte_offset_map_nested(dir, address) \ | 192 | pte_index((address))) |
185 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) | 193 | #define pte_offset_map_nested(dir, address) \ |
186 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | 194 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ |
187 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | 195 | pte_index((address))) |
196 | #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) | ||
197 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) | ||
188 | #else | 198 | #else |
189 | #define pte_offset_map(dir, address) \ | 199 | #define pte_offset_map(dir, address) \ |
190 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | 200 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) |
191 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | 201 | #define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) |
192 | #define pte_unmap(pte) do { } while (0) | 202 | #define pte_unmap(pte) do { } while (0) |
193 | #define pte_unmap_nested(pte) do { } while (0) | 203 | #define pte_unmap_nested(pte) do { } while (0) |
194 | #endif | 204 | #endif |
195 | 205 | ||
196 | /* Clear a kernel PTE and flush it from the TLB */ | 206 | /* Clear a kernel PTE and flush it from the TLB */ |
197 | #define kpte_clear_flush(ptep, vaddr) \ | 207 | #define kpte_clear_flush(ptep, vaddr) \ |
198 | do { \ | 208 | do { \ |
199 | pte_clear(&init_mm, vaddr, ptep); \ | 209 | pte_clear(&init_mm, (vaddr), (ptep)); \ |
200 | __flush_tlb_one(vaddr); \ | 210 | __flush_tlb_one((vaddr)); \ |
201 | } while (0) | 211 | } while (0) |
202 | 212 | ||
203 | /* | 213 | /* |
204 | * The i386 doesn't have any external MMU info: the kernel page | 214 | * The i386 doesn't have any external MMU info: the kernel page |
205 | * tables contain all the necessary information. | 215 | * tables contain all the necessary information. |
206 | */ | 216 | */ |
207 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 217 | #define update_mmu_cache(vma, address, pte) do { } while (0) |
208 | 218 | ||
209 | void native_pagetable_setup_start(pgd_t *base); | 219 | void native_pagetable_setup_start(pgd_t *base); |
210 | void native_pagetable_setup_done(pgd_t *base); | 220 | void native_pagetable_setup_done(pgd_t *base); |
@@ -233,7 +243,7 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) | |||
233 | #define kern_addr_valid(kaddr) (0) | 243 | #define kern_addr_valid(kaddr) (0) |
234 | #endif | 244 | #endif |
235 | 245 | ||
236 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 246 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
237 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 247 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
238 | 248 | ||
239 | #endif /* _I386_PGTABLE_H */ | 249 | #endif /* _I386_PGTABLE_H */ |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 0a0b77bc736a..9fd87d0b6477 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -52,14 +52,18 @@ extern void paging_init(void); | |||
52 | 52 | ||
53 | #ifndef __ASSEMBLY__ | 53 | #ifndef __ASSEMBLY__ |
54 | 54 | ||
55 | #define pte_ERROR(e) \ | 55 | #define pte_ERROR(e) \ |
56 | printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) | 56 | printk("%s:%d: bad pte %p(%016lx).\n", \ |
57 | #define pmd_ERROR(e) \ | 57 | __FILE__, __LINE__, &(e), pte_val(e)) |
58 | printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | 58 | #define pmd_ERROR(e) \ |
59 | #define pud_ERROR(e) \ | 59 | printk("%s:%d: bad pmd %p(%016lx).\n", \ |
60 | printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) | 60 | __FILE__, __LINE__, &(e), pmd_val(e)) |
61 | #define pgd_ERROR(e) \ | 61 | #define pud_ERROR(e) \ |
62 | printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | 62 | printk("%s:%d: bad pud %p(%016lx).\n", \ |
63 | __FILE__, __LINE__, &(e), pud_val(e)) | ||
64 | #define pgd_ERROR(e) \ | ||
65 | printk("%s:%d: bad pgd %p(%016lx).\n", \ | ||
66 | __FILE__, __LINE__, &(e), pgd_val(e)) | ||
63 | 67 | ||
64 | #define pgd_none(x) (!pgd_val(x)) | 68 | #define pgd_none(x) (!pgd_val(x)) |
65 | #define pud_none(x) (!pud_val(x)) | 69 | #define pud_none(x) (!pud_val(x)) |
@@ -87,7 +91,8 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) | |||
87 | #ifdef CONFIG_SMP | 91 | #ifdef CONFIG_SMP |
88 | return native_make_pte(xchg(&xp->pte, 0)); | 92 | return native_make_pte(xchg(&xp->pte, 0)); |
89 | #else | 93 | #else |
90 | /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */ | 94 | /* native_local_ptep_get_and_clear, |
95 | but duplicated because of cyclic dependency */ | ||
91 | pte_t ret = *xp; | 96 | pte_t ret = *xp; |
92 | native_pte_clear(NULL, 0, xp); | 97 | native_pte_clear(NULL, 0, xp); |
93 | return ret; | 98 | return ret; |
@@ -119,7 +124,7 @@ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) | |||
119 | *pgdp = pgd; | 124 | *pgdp = pgd; |
120 | } | 125 | } |
121 | 126 | ||
122 | static inline void native_pgd_clear(pgd_t * pgd) | 127 | static inline void native_pgd_clear(pgd_t *pgd) |
123 | { | 128 | { |
124 | native_set_pgd(pgd, native_make_pgd(0)); | 129 | native_set_pgd(pgd, native_make_pgd(0)); |
125 | } | 130 | } |
@@ -128,19 +133,19 @@ static inline void native_pgd_clear(pgd_t * pgd) | |||
128 | 133 | ||
129 | #endif /* !__ASSEMBLY__ */ | 134 | #endif /* !__ASSEMBLY__ */ |
130 | 135 | ||
131 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) | 136 | #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) |
132 | #define PMD_MASK (~(PMD_SIZE-1)) | 137 | #define PMD_MASK (~(PMD_SIZE - 1)) |
133 | #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) | 138 | #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) |
134 | #define PUD_MASK (~(PUD_SIZE-1)) | 139 | #define PUD_MASK (~(PUD_SIZE - 1)) |
135 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) | 140 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) |
136 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 141 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
137 | 142 | ||
138 | 143 | ||
139 | #define MAXMEM _AC(0x3fffffffffff, UL) | 144 | #define MAXMEM _AC(0x00003fffffffffff, UL) |
140 | #define VMALLOC_START _AC(0xffffc20000000000, UL) | 145 | #define VMALLOC_START _AC(0xffffc20000000000, UL) |
141 | #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) | 146 | #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) |
142 | #define VMEMMAP_START _AC(0xffffe20000000000, UL) | 147 | #define VMEMMAP_START _AC(0xffffe20000000000, UL) |
143 | #define MODULES_VADDR _AC(0xffffffff88000000, UL) | 148 | #define MODULES_VADDR _AC(0xffffffffa0000000, UL) |
144 | #define MODULES_END _AC(0xfffffffffff00000, UL) | 149 | #define MODULES_END _AC(0xfffffffffff00000, UL) |
145 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 150 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
146 | 151 | ||
@@ -153,26 +158,28 @@ static inline unsigned long pgd_bad(pgd_t pgd) | |||
153 | 158 | ||
154 | static inline unsigned long pud_bad(pud_t pud) | 159 | static inline unsigned long pud_bad(pud_t pud) |
155 | { | 160 | { |
156 | return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); | 161 | return pud_val(pud) & |
162 | ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); | ||
157 | } | 163 | } |
158 | 164 | ||
159 | static inline unsigned long pmd_bad(pmd_t pmd) | 165 | static inline unsigned long pmd_bad(pmd_t pmd) |
160 | { | 166 | { |
161 | return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); | 167 | return pmd_val(pmd) & |
168 | ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); | ||
162 | } | 169 | } |
163 | 170 | ||
164 | #define pte_none(x) (!pte_val(x)) | 171 | #define pte_none(x) (!pte_val((x))) |
165 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 172 | #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
166 | 173 | ||
167 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ | 174 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ |
168 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 175 | #define pte_page(x) pfn_to_page(pte_pfn((x))) |
169 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 176 | #define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
170 | 177 | ||
171 | /* | 178 | /* |
172 | * Macro to mark a page protection value as "uncacheable". | 179 | * Macro to mark a page protection value as "uncacheable". |
173 | */ | 180 | */ |
174 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) | 181 | #define pgprot_noncached(prot) \ |
175 | 182 | (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT)) | |
176 | 183 | ||
177 | /* | 184 | /* |
178 | * Conversion functions: convert a page and protection to a page entry, | 185 | * Conversion functions: convert a page and protection to a page entry, |
@@ -182,75 +189,81 @@ static inline unsigned long pmd_bad(pmd_t pmd) | |||
182 | /* | 189 | /* |
183 | * Level 4 access. | 190 | * Level 4 access. |
184 | */ | 191 | */ |
185 | #define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK)) | 192 | #define pgd_page_vaddr(pgd) \ |
186 | #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)) | 193 | ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) |
187 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 194 | #define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) |
188 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | 195 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
189 | #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) | 196 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) |
197 | #define pgd_offset_k(address) (init_level4_pgt + pgd_index((address))) | ||
190 | #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) | 198 | #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) |
191 | static inline int pgd_large(pgd_t pgd) { return 0; } | 199 | static inline int pgd_large(pgd_t pgd) { return 0; } |
192 | #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) | 200 | #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) |
193 | 201 | ||
194 | /* PUD - Level3 access */ | 202 | /* PUD - Level3 access */ |
195 | /* to find an entry in a page-table-directory. */ | 203 | /* to find an entry in a page-table-directory. */ |
196 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) | 204 | #define pud_page_vaddr(pud) \ |
197 | #define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT)) | 205 | ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK)) |
198 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | 206 | #define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT)) |
199 | #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) | 207 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) |
200 | #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) | 208 | #define pud_offset(pgd, address) \ |
209 | ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) | ||
210 | #define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT) | ||
201 | 211 | ||
202 | static inline int pud_large(pud_t pte) | 212 | static inline int pud_large(pud_t pte) |
203 | { | 213 | { |
204 | return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | 214 | return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == |
205 | (_PAGE_PSE|_PAGE_PRESENT); | 215 | (_PAGE_PSE | _PAGE_PRESENT); |
206 | } | 216 | } |
207 | 217 | ||
208 | /* PMD - Level 2 access */ | 218 | /* PMD - Level 2 access */ |
209 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) | 219 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK)) |
210 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 220 | #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) |
211 | 221 | ||
212 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 222 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
213 | #define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \ | 223 | #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ |
214 | pmd_index(address)) | 224 | pmd_index(address)) |
215 | #define pmd_none(x) (!pmd_val(x)) | 225 | #define pmd_none(x) (!pmd_val((x))) |
216 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 226 | #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) |
217 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) | 227 | #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) |
218 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 228 | #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
219 | 229 | ||
220 | #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) | 230 | #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) |
221 | #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE }) | 231 | #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ |
232 | _PAGE_FILE }) | ||
222 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT | 233 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT |
223 | 234 | ||
224 | /* PTE - Level 1 access. */ | 235 | /* PTE - Level 1 access. */ |
225 | 236 | ||
226 | /* page, protection -> pte */ | 237 | /* page, protection -> pte */ |
227 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 238 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot)) |
228 | 239 | ||
229 | #define pte_index(address) \ | 240 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
230 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
231 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ | 241 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ |
232 | pte_index(address)) | 242 | pte_index((address))) |
233 | 243 | ||
234 | /* x86-64 always has all page tables mapped. */ | 244 | /* x86-64 always has all page tables mapped. */ |
235 | #define pte_offset_map(dir,address) pte_offset_kernel(dir,address) | 245 | #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) |
236 | #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address) | 246 | #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) |
237 | #define pte_unmap(pte) /* NOP */ | 247 | #define pte_unmap(pte) /* NOP */ |
238 | #define pte_unmap_nested(pte) /* NOP */ | 248 | #define pte_unmap_nested(pte) /* NOP */ |
249 | |||
250 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
239 | 251 | ||
240 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 252 | extern int direct_gbpages; |
241 | 253 | ||
242 | /* Encode and de-code a swap entry */ | 254 | /* Encode and de-code a swap entry */ |
243 | #define __swp_type(x) (((x).val >> 1) & 0x3f) | 255 | #define __swp_type(x) (((x).val >> 1) & 0x3f) |
244 | #define __swp_offset(x) ((x).val >> 8) | 256 | #define __swp_offset(x) ((x).val >> 8) |
245 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 257 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \ |
246 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 258 | ((offset) << 8) }) |
259 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) | ||
247 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) | 260 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
248 | 261 | ||
249 | extern int kern_addr_valid(unsigned long addr); | 262 | extern int kern_addr_valid(unsigned long addr); |
250 | extern void cleanup_highmap(void); | 263 | extern void cleanup_highmap(void); |
251 | 264 | ||
252 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 265 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
253 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 266 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
254 | 267 | ||
255 | #define HAVE_ARCH_UNMAPPED_AREA | 268 | #define HAVE_ARCH_UNMAPPED_AREA |
256 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 269 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
@@ -263,8 +276,10 @@ extern void cleanup_highmap(void); | |||
263 | 276 | ||
264 | /* fs/proc/kcore.c */ | 277 | /* fs/proc/kcore.c */ |
265 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) | 278 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) |
266 | #define kc_offset_to_vaddr(o) \ | 279 | #define kc_offset_to_vaddr(o) \ |
267 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) | 280 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \ |
281 | ? ((o) | ~__VIRTUAL_MASK) \ | ||
282 | : (o)) | ||
268 | 283 | ||
269 | #define __HAVE_ARCH_PTE_SAME | 284 | #define __HAVE_ARCH_PTE_SAME |
270 | #endif /* !__ASSEMBLY__ */ | 285 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h index bb7133dc155d..fe312a5ba204 100644 --- a/include/asm-x86/posix_types.h +++ b/include/asm-x86/posix_types.h | |||
@@ -1,11 +1,5 @@ | |||
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | # ifdef CONFIG_X86_32 | 2 | # if defined(CONFIG_X86_32) || defined(__i386__) |
3 | # include "posix_types_32.h" | ||
4 | # else | ||
5 | # include "posix_types_64.h" | ||
6 | # endif | ||
7 | #else | ||
8 | # ifdef __i386__ | ||
9 | # include "posix_types_32.h" | 3 | # include "posix_types_32.h" |
10 | # else | 4 | # else |
11 | # include "posix_types_64.h" | 5 | # include "posix_types_64.h" |
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h index 015e539cdef5..b031efda37ec 100644 --- a/include/asm-x86/posix_types_32.h +++ b/include/asm-x86/posix_types_32.h | |||
@@ -45,32 +45,39 @@ typedef struct { | |||
45 | #if defined(__KERNEL__) | 45 | #if defined(__KERNEL__) |
46 | 46 | ||
47 | #undef __FD_SET | 47 | #undef __FD_SET |
48 | #define __FD_SET(fd,fdsetp) \ | 48 | #define __FD_SET(fd,fdsetp) \ |
49 | __asm__ __volatile__("btsl %1,%0": \ | 49 | asm volatile("btsl %1,%0": \ |
50 | "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) | 50 | "+m" (*(__kernel_fd_set *)(fdsetp)) \ |
51 | : "r" ((int)(fd))) | ||
51 | 52 | ||
52 | #undef __FD_CLR | 53 | #undef __FD_CLR |
53 | #define __FD_CLR(fd,fdsetp) \ | 54 | #define __FD_CLR(fd,fdsetp) \ |
54 | __asm__ __volatile__("btrl %1,%0": \ | 55 | asm volatile("btrl %1,%0": \ |
55 | "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) | 56 | "+m" (*(__kernel_fd_set *)(fdsetp)) \ |
57 | : "r" ((int) (fd))) | ||
56 | 58 | ||
57 | #undef __FD_ISSET | 59 | #undef __FD_ISSET |
58 | #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ | 60 | #define __FD_ISSET(fd,fdsetp) \ |
59 | unsigned char __result; \ | 61 | (__extension__ \ |
60 | __asm__ __volatile__("btl %1,%2 ; setb %0" \ | 62 | ({ \ |
61 | :"=q" (__result) :"r" ((int) (fd)), \ | 63 | unsigned char __result; \ |
62 | "m" (*(__kernel_fd_set *) (fdsetp))); \ | 64 | asm volatile("btl %1,%2 ; setb %0" \ |
63 | __result; })) | 65 | : "=q" (__result) \ |
66 | : "r" ((int)(fd)), \ | ||
67 | "m" (*(__kernel_fd_set *)(fdsetp))); \ | ||
68 | __result; \ | ||
69 | })) | ||
64 | 70 | ||
65 | #undef __FD_ZERO | 71 | #undef __FD_ZERO |
66 | #define __FD_ZERO(fdsetp) \ | 72 | #define __FD_ZERO(fdsetp) \ |
67 | do { \ | 73 | do { \ |
68 | int __d0, __d1; \ | 74 | int __d0, __d1; \ |
69 | __asm__ __volatile__("cld ; rep ; stosl" \ | 75 | asm volatile("cld ; rep ; stosl" \ |
70 | :"=m" (*(__kernel_fd_set *) (fdsetp)), \ | 76 | : "=m" (*(__kernel_fd_set *)(fdsetp)), \ |
71 | "=&c" (__d0), "=&D" (__d1) \ | 77 | "=&c" (__d0), "=&D" (__d1) \ |
72 | :"a" (0), "1" (__FDSET_LONGS), \ | 78 | : "a" (0), "1" (__FDSET_LONGS), \ |
73 | "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ | 79 | "2" ((__kernel_fd_set *)(fdsetp)) \ |
80 | : "memory"); \ | ||
74 | } while (0) | 81 | } while (0) |
75 | 82 | ||
76 | #endif /* defined(__KERNEL__) */ | 83 | #endif /* defined(__KERNEL__) */ |
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h index 9926aa43775b..d6624c95854a 100644 --- a/include/asm-x86/posix_types_64.h +++ b/include/asm-x86/posix_types_64.h | |||
@@ -46,7 +46,7 @@ typedef unsigned long __kernel_old_dev_t; | |||
46 | #ifdef __KERNEL__ | 46 | #ifdef __KERNEL__ |
47 | 47 | ||
48 | #undef __FD_SET | 48 | #undef __FD_SET |
49 | static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | 49 | static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) |
50 | { | 50 | { |
51 | unsigned long _tmp = fd / __NFDBITS; | 51 | unsigned long _tmp = fd / __NFDBITS; |
52 | unsigned long _rem = fd % __NFDBITS; | 52 | unsigned long _rem = fd % __NFDBITS; |
@@ -54,7 +54,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | |||
54 | } | 54 | } |
55 | 55 | ||
56 | #undef __FD_CLR | 56 | #undef __FD_CLR |
57 | static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | 57 | static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) |
58 | { | 58 | { |
59 | unsigned long _tmp = fd / __NFDBITS; | 59 | unsigned long _tmp = fd / __NFDBITS; |
60 | unsigned long _rem = fd % __NFDBITS; | 60 | unsigned long _rem = fd % __NFDBITS; |
@@ -62,7 +62,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | #undef __FD_ISSET | 64 | #undef __FD_ISSET |
65 | static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | 65 | static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) |
66 | { | 66 | { |
67 | unsigned long _tmp = fd / __NFDBITS; | 67 | unsigned long _tmp = fd / __NFDBITS; |
68 | unsigned long _rem = fd % __NFDBITS; | 68 | unsigned long _rem = fd % __NFDBITS; |
@@ -74,36 +74,36 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | |||
74 | * for 256 and 1024-bit fd_sets respectively) | 74 | * for 256 and 1024-bit fd_sets respectively) |
75 | */ | 75 | */ |
76 | #undef __FD_ZERO | 76 | #undef __FD_ZERO |
77 | static __inline__ void __FD_ZERO(__kernel_fd_set *p) | 77 | static inline void __FD_ZERO(__kernel_fd_set *p) |
78 | { | 78 | { |
79 | unsigned long *tmp = p->fds_bits; | 79 | unsigned long *tmp = p->fds_bits; |
80 | int i; | 80 | int i; |
81 | 81 | ||
82 | if (__builtin_constant_p(__FDSET_LONGS)) { | 82 | if (__builtin_constant_p(__FDSET_LONGS)) { |
83 | switch (__FDSET_LONGS) { | 83 | switch (__FDSET_LONGS) { |
84 | case 32: | 84 | case 32: |
85 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 85 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
86 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | 86 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; |
87 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; | 87 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; |
88 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; | 88 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; |
89 | tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; | 89 | tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; |
90 | tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; | 90 | tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; |
91 | tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; | 91 | tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; |
92 | tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; | 92 | tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; |
93 | return; | 93 | return; |
94 | case 16: | 94 | case 16: |
95 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 95 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
96 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | 96 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; |
97 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; | 97 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; |
98 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; | 98 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; |
99 | return; | 99 | return; |
100 | case 8: | 100 | case 8: |
101 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 101 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
102 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | 102 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; |
103 | return; | 103 | return; |
104 | case 4: | 104 | case 4: |
105 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 105 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
106 | return; | 106 | return; |
107 | } | 107 | } |
108 | } | 108 | } |
109 | i = __FDSET_LONGS; | 109 | i = __FDSET_LONGS; |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 45a2f0ab33d0..6e26c7c717a2 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -3,8 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/processor-flags.h> | 4 | #include <asm/processor-flags.h> |
5 | 5 | ||
6 | /* migration helpers, for KVM - will be removed in 2.6.25: */ | 6 | /* migration helper, for KVM - will be removed in 2.6.25: */ |
7 | #include <asm/vm86.h> | ||
8 | #define Xgt_desc_struct desc_ptr | 7 | #define Xgt_desc_struct desc_ptr |
9 | 8 | ||
10 | /* Forward declaration, a strange C thing */ | 9 | /* Forward declaration, a strange C thing */ |
@@ -24,6 +23,7 @@ struct mm_struct; | |||
24 | #include <asm/msr.h> | 23 | #include <asm/msr.h> |
25 | #include <asm/desc_defs.h> | 24 | #include <asm/desc_defs.h> |
26 | #include <asm/nops.h> | 25 | #include <asm/nops.h> |
26 | |||
27 | #include <linux/personality.h> | 27 | #include <linux/personality.h> |
28 | #include <linux/cpumask.h> | 28 | #include <linux/cpumask.h> |
29 | #include <linux/cache.h> | 29 | #include <linux/cache.h> |
@@ -37,16 +37,18 @@ struct mm_struct; | |||
37 | static inline void *current_text_addr(void) | 37 | static inline void *current_text_addr(void) |
38 | { | 38 | { |
39 | void *pc; | 39 | void *pc; |
40 | asm volatile("mov $1f,%0\n1:":"=r" (pc)); | 40 | |
41 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); | ||
42 | |||
41 | return pc; | 43 | return pc; |
42 | } | 44 | } |
43 | 45 | ||
44 | #ifdef CONFIG_X86_VSMP | 46 | #ifdef CONFIG_X86_VSMP |
45 | #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) | 47 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
46 | #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | 48 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) |
47 | #else | 49 | #else |
48 | #define ARCH_MIN_TASKALIGN 16 | 50 | # define ARCH_MIN_TASKALIGN 16 |
49 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | 51 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
50 | #endif | 52 | #endif |
51 | 53 | ||
52 | /* | 54 | /* |
@@ -56,69 +58,82 @@ static inline void *current_text_addr(void) | |||
56 | */ | 58 | */ |
57 | 59 | ||
58 | struct cpuinfo_x86 { | 60 | struct cpuinfo_x86 { |
59 | __u8 x86; /* CPU family */ | 61 | __u8 x86; /* CPU family */ |
60 | __u8 x86_vendor; /* CPU vendor */ | 62 | __u8 x86_vendor; /* CPU vendor */ |
61 | __u8 x86_model; | 63 | __u8 x86_model; |
62 | __u8 x86_mask; | 64 | __u8 x86_mask; |
63 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
64 | char wp_works_ok; /* It doesn't on 386's */ | 66 | char wp_works_ok; /* It doesn't on 386's */ |
65 | char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ | 67 | |
66 | char hard_math; | 68 | /* Problems on some 486Dx4's and old 386's: */ |
67 | char rfu; | 69 | char hlt_works_ok; |
68 | char fdiv_bug; | 70 | char hard_math; |
69 | char f00f_bug; | 71 | char rfu; |
70 | char coma_bug; | 72 | char fdiv_bug; |
71 | char pad0; | 73 | char f00f_bug; |
74 | char coma_bug; | ||
75 | char pad0; | ||
72 | #else | 76 | #else |
73 | /* number of 4K pages in DTLB/ITLB combined(in pages)*/ | 77 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
74 | int x86_tlbsize; | 78 | int x86_tlbsize; |
75 | __u8 x86_virt_bits, x86_phys_bits; | 79 | __u8 x86_virt_bits; |
76 | /* cpuid returned core id bits */ | 80 | __u8 x86_phys_bits; |
77 | __u8 x86_coreid_bits; | 81 | /* CPUID returned core id bits: */ |
78 | /* Max extended CPUID function supported */ | 82 | __u8 x86_coreid_bits; |
79 | __u32 extended_cpuid_level; | 83 | /* Max extended CPUID function supported: */ |
84 | __u32 extended_cpuid_level; | ||
80 | #endif | 85 | #endif |
81 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | 86 | /* Maximum supported CPUID level, -1=no CPUID: */ |
82 | __u32 x86_capability[NCAPINTS]; | 87 | int cpuid_level; |
83 | char x86_vendor_id[16]; | 88 | __u32 x86_capability[NCAPINTS]; |
84 | char x86_model_id[64]; | 89 | char x86_vendor_id[16]; |
85 | int x86_cache_size; /* in KB - valid for CPUS which support this | 90 | char x86_model_id[64]; |
86 | call */ | 91 | /* in KB - valid for CPUS which support this call: */ |
87 | int x86_cache_alignment; /* In bytes */ | 92 | int x86_cache_size; |
88 | int x86_power; | 93 | int x86_cache_alignment; /* In bytes */ |
89 | unsigned long loops_per_jiffy; | 94 | int x86_power; |
95 | unsigned long loops_per_jiffy; | ||
90 | #ifdef CONFIG_SMP | 96 | #ifdef CONFIG_SMP |
91 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | 97 | /* cpus sharing the last level cache: */ |
98 | cpumask_t llc_shared_map; | ||
92 | #endif | 99 | #endif |
93 | u16 x86_max_cores; /* cpuid returned max cores value */ | 100 | /* cpuid returned max cores value: */ |
94 | u16 apicid; | 101 | u16 x86_max_cores; |
95 | u16 x86_clflush_size; | 102 | u16 apicid; |
103 | u16 initial_apicid; | ||
104 | u16 x86_clflush_size; | ||
96 | #ifdef CONFIG_SMP | 105 | #ifdef CONFIG_SMP |
97 | u16 booted_cores; /* number of cores as seen by OS */ | 106 | /* number of cores as seen by the OS: */ |
98 | u16 phys_proc_id; /* Physical processor id. */ | 107 | u16 booted_cores; |
99 | u16 cpu_core_id; /* Core id */ | 108 | /* Physical processor id: */ |
100 | u16 cpu_index; /* index into per_cpu list */ | 109 | u16 phys_proc_id; |
110 | /* Core id: */ | ||
111 | u16 cpu_core_id; | ||
112 | /* Index into per_cpu list: */ | ||
113 | u16 cpu_index; | ||
101 | #endif | 114 | #endif |
102 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 115 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
103 | 116 | ||
104 | #define X86_VENDOR_INTEL 0 | 117 | #define X86_VENDOR_INTEL 0 |
105 | #define X86_VENDOR_CYRIX 1 | 118 | #define X86_VENDOR_CYRIX 1 |
106 | #define X86_VENDOR_AMD 2 | 119 | #define X86_VENDOR_AMD 2 |
107 | #define X86_VENDOR_UMC 3 | 120 | #define X86_VENDOR_UMC 3 |
108 | #define X86_VENDOR_NEXGEN 4 | 121 | #define X86_VENDOR_NEXGEN 4 |
109 | #define X86_VENDOR_CENTAUR 5 | 122 | #define X86_VENDOR_CENTAUR 5 |
110 | #define X86_VENDOR_TRANSMETA 7 | 123 | #define X86_VENDOR_TRANSMETA 7 |
111 | #define X86_VENDOR_NSC 8 | 124 | #define X86_VENDOR_NSC 8 |
112 | #define X86_VENDOR_NUM 9 | 125 | #define X86_VENDOR_NUM 9 |
113 | #define X86_VENDOR_UNKNOWN 0xff | 126 | |
127 | #define X86_VENDOR_UNKNOWN 0xff | ||
114 | 128 | ||
115 | /* | 129 | /* |
116 | * capabilities of CPUs | 130 | * capabilities of CPUs |
117 | */ | 131 | */ |
118 | extern struct cpuinfo_x86 boot_cpu_data; | 132 | extern struct cpuinfo_x86 boot_cpu_data; |
119 | extern struct cpuinfo_x86 new_cpu_data; | 133 | extern struct cpuinfo_x86 new_cpu_data; |
120 | extern struct tss_struct doublefault_tss; | 134 | |
121 | extern __u32 cleared_cpu_caps[NCAPINTS]; | 135 | extern struct tss_struct doublefault_tss; |
136 | extern __u32 cleared_cpu_caps[NCAPINTS]; | ||
122 | 137 | ||
123 | #ifdef CONFIG_SMP | 138 | #ifdef CONFIG_SMP |
124 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | 139 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); |
@@ -129,7 +144,18 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | |||
129 | #define current_cpu_data boot_cpu_data | 144 | #define current_cpu_data boot_cpu_data |
130 | #endif | 145 | #endif |
131 | 146 | ||
132 | void cpu_detect(struct cpuinfo_x86 *c); | 147 | static inline int hlt_works(int cpu) |
148 | { | ||
149 | #ifdef CONFIG_X86_32 | ||
150 | return cpu_data(cpu).hlt_works_ok; | ||
151 | #else | ||
152 | return 1; | ||
153 | #endif | ||
154 | } | ||
155 | |||
156 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
157 | |||
158 | extern void cpu_detect(struct cpuinfo_x86 *c); | ||
133 | 159 | ||
134 | extern void identify_cpu(struct cpuinfo_x86 *); | 160 | extern void identify_cpu(struct cpuinfo_x86 *); |
135 | extern void identify_boot_cpu(void); | 161 | extern void identify_boot_cpu(void); |
@@ -146,15 +172,15 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} | |||
146 | #endif | 172 | #endif |
147 | 173 | ||
148 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | 174 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
149 | unsigned int *ecx, unsigned int *edx) | 175 | unsigned int *ecx, unsigned int *edx) |
150 | { | 176 | { |
151 | /* ecx is often an input as well as an output. */ | 177 | /* ecx is often an input as well as an output. */ |
152 | __asm__("cpuid" | 178 | asm("cpuid" |
153 | : "=a" (*eax), | 179 | : "=a" (*eax), |
154 | "=b" (*ebx), | 180 | "=b" (*ebx), |
155 | "=c" (*ecx), | 181 | "=c" (*ecx), |
156 | "=d" (*edx) | 182 | "=d" (*edx) |
157 | : "0" (*eax), "2" (*ecx)); | 183 | : "0" (*eax), "2" (*ecx)); |
158 | } | 184 | } |
159 | 185 | ||
160 | static inline void load_cr3(pgd_t *pgdir) | 186 | static inline void load_cr3(pgd_t *pgdir) |
@@ -165,54 +191,67 @@ static inline void load_cr3(pgd_t *pgdir) | |||
165 | #ifdef CONFIG_X86_32 | 191 | #ifdef CONFIG_X86_32 |
166 | /* This is the TSS defined by the hardware. */ | 192 | /* This is the TSS defined by the hardware. */ |
167 | struct x86_hw_tss { | 193 | struct x86_hw_tss { |
168 | unsigned short back_link, __blh; | 194 | unsigned short back_link, __blh; |
169 | unsigned long sp0; | 195 | unsigned long sp0; |
170 | unsigned short ss0, __ss0h; | 196 | unsigned short ss0, __ss0h; |
171 | unsigned long sp1; | 197 | unsigned long sp1; |
172 | unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ | 198 | /* ss1 caches MSR_IA32_SYSENTER_CS: */ |
173 | unsigned long sp2; | 199 | unsigned short ss1, __ss1h; |
174 | unsigned short ss2, __ss2h; | 200 | unsigned long sp2; |
175 | unsigned long __cr3; | 201 | unsigned short ss2, __ss2h; |
176 | unsigned long ip; | 202 | unsigned long __cr3; |
177 | unsigned long flags; | 203 | unsigned long ip; |
178 | unsigned long ax, cx, dx, bx; | 204 | unsigned long flags; |
179 | unsigned long sp, bp, si, di; | 205 | unsigned long ax; |
180 | unsigned short es, __esh; | 206 | unsigned long cx; |
181 | unsigned short cs, __csh; | 207 | unsigned long dx; |
182 | unsigned short ss, __ssh; | 208 | unsigned long bx; |
183 | unsigned short ds, __dsh; | 209 | unsigned long sp; |
184 | unsigned short fs, __fsh; | 210 | unsigned long bp; |
185 | unsigned short gs, __gsh; | 211 | unsigned long si; |
186 | unsigned short ldt, __ldth; | 212 | unsigned long di; |
187 | unsigned short trace, io_bitmap_base; | 213 | unsigned short es, __esh; |
214 | unsigned short cs, __csh; | ||
215 | unsigned short ss, __ssh; | ||
216 | unsigned short ds, __dsh; | ||
217 | unsigned short fs, __fsh; | ||
218 | unsigned short gs, __gsh; | ||
219 | unsigned short ldt, __ldth; | ||
220 | unsigned short trace; | ||
221 | unsigned short io_bitmap_base; | ||
222 | |||
188 | } __attribute__((packed)); | 223 | } __attribute__((packed)); |
189 | #else | 224 | #else |
190 | struct x86_hw_tss { | 225 | struct x86_hw_tss { |
191 | u32 reserved1; | 226 | u32 reserved1; |
192 | u64 sp0; | 227 | u64 sp0; |
193 | u64 sp1; | 228 | u64 sp1; |
194 | u64 sp2; | 229 | u64 sp2; |
195 | u64 reserved2; | 230 | u64 reserved2; |
196 | u64 ist[7]; | 231 | u64 ist[7]; |
197 | u32 reserved3; | 232 | u32 reserved3; |
198 | u32 reserved4; | 233 | u32 reserved4; |
199 | u16 reserved5; | 234 | u16 reserved5; |
200 | u16 io_bitmap_base; | 235 | u16 io_bitmap_base; |
236 | |||
201 | } __attribute__((packed)) ____cacheline_aligned; | 237 | } __attribute__((packed)) ____cacheline_aligned; |
202 | #endif | 238 | #endif |
203 | 239 | ||
204 | /* | 240 | /* |
205 | * Size of io_bitmap. | 241 | * IO-bitmap sizes: |
206 | */ | 242 | */ |
207 | #define IO_BITMAP_BITS 65536 | 243 | #define IO_BITMAP_BITS 65536 |
208 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | 244 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) |
209 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | 245 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) |
210 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | 246 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) |
211 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | 247 | #define INVALID_IO_BITMAP_OFFSET 0x8000 |
212 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | 248 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 |
213 | 249 | ||
214 | struct tss_struct { | 250 | struct tss_struct { |
215 | struct x86_hw_tss x86_tss; | 251 | /* |
252 | * The hardware state: | ||
253 | */ | ||
254 | struct x86_hw_tss x86_tss; | ||
216 | 255 | ||
217 | /* | 256 | /* |
218 | * The extra 1 is there because the CPU will access an | 257 | * The extra 1 is there because the CPU will access an |
@@ -220,90 +259,108 @@ struct tss_struct { | |||
220 | * bitmap. The extra byte must be all 1 bits, and must | 259 | * bitmap. The extra byte must be all 1 bits, and must |
221 | * be within the limit. | 260 | * be within the limit. |
222 | */ | 261 | */ |
223 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | 262 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
224 | /* | 263 | /* |
225 | * Cache the current maximum and the last task that used the bitmap: | 264 | * Cache the current maximum and the last task that used the bitmap: |
226 | */ | 265 | */ |
227 | unsigned long io_bitmap_max; | 266 | unsigned long io_bitmap_max; |
228 | struct thread_struct *io_bitmap_owner; | 267 | struct thread_struct *io_bitmap_owner; |
268 | |||
229 | /* | 269 | /* |
230 | * pads the TSS to be cacheline-aligned (size is 0x100) | 270 | * Pad the TSS to be cacheline-aligned (size is 0x100): |
231 | */ | 271 | */ |
232 | unsigned long __cacheline_filler[35]; | 272 | unsigned long __cacheline_filler[35]; |
233 | /* | 273 | /* |
234 | * .. and then another 0x100 bytes for emergency kernel stack | 274 | * .. and then another 0x100 bytes for the emergency kernel stack: |
235 | */ | 275 | */ |
236 | unsigned long stack[64]; | 276 | unsigned long stack[64]; |
277 | |||
237 | } __attribute__((packed)); | 278 | } __attribute__((packed)); |
238 | 279 | ||
239 | DECLARE_PER_CPU(struct tss_struct, init_tss); | 280 | DECLARE_PER_CPU(struct tss_struct, init_tss); |
240 | 281 | ||
241 | /* Save the original ist values for checking stack pointers during debugging */ | 282 | /* |
283 | * Save the original ist values for checking stack pointers during debugging | ||
284 | */ | ||
242 | struct orig_ist { | 285 | struct orig_ist { |
243 | unsigned long ist[7]; | 286 | unsigned long ist[7]; |
244 | }; | 287 | }; |
245 | 288 | ||
246 | #define MXCSR_DEFAULT 0x1f80 | 289 | #define MXCSR_DEFAULT 0x1f80 |
247 | 290 | ||
248 | struct i387_fsave_struct { | 291 | struct i387_fsave_struct { |
249 | u32 cwd; | 292 | u32 cwd; /* FPU Control Word */ |
250 | u32 swd; | 293 | u32 swd; /* FPU Status Word */ |
251 | u32 twd; | 294 | u32 twd; /* FPU Tag Word */ |
252 | u32 fip; | 295 | u32 fip; /* FPU IP Offset */ |
253 | u32 fcs; | 296 | u32 fcs; /* FPU IP Selector */ |
254 | u32 foo; | 297 | u32 foo; /* FPU Operand Pointer Offset */ |
255 | u32 fos; | 298 | u32 fos; /* FPU Operand Pointer Selector */ |
256 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | 299 | |
257 | u32 status; /* software status information */ | 300 | /* 8*10 bytes for each FP-reg = 80 bytes: */ |
301 | u32 st_space[20]; | ||
302 | |||
303 | /* Software status information [not touched by FSAVE ]: */ | ||
304 | u32 status; | ||
258 | }; | 305 | }; |
259 | 306 | ||
260 | struct i387_fxsave_struct { | 307 | struct i387_fxsave_struct { |
261 | u16 cwd; | 308 | u16 cwd; /* Control Word */ |
262 | u16 swd; | 309 | u16 swd; /* Status Word */ |
263 | u16 twd; | 310 | u16 twd; /* Tag Word */ |
264 | u16 fop; | 311 | u16 fop; /* Last Instruction Opcode */ |
265 | union { | 312 | union { |
266 | struct { | 313 | struct { |
267 | u64 rip; | 314 | u64 rip; /* Instruction Pointer */ |
268 | u64 rdp; | 315 | u64 rdp; /* Data Pointer */ |
269 | }; | 316 | }; |
270 | struct { | 317 | struct { |
271 | u32 fip; | 318 | u32 fip; /* FPU IP Offset */ |
272 | u32 fcs; | 319 | u32 fcs; /* FPU IP Selector */ |
273 | u32 foo; | 320 | u32 foo; /* FPU Operand Offset */ |
274 | u32 fos; | 321 | u32 fos; /* FPU Operand Selector */ |
275 | }; | 322 | }; |
276 | }; | 323 | }; |
277 | u32 mxcsr; | 324 | u32 mxcsr; /* MXCSR Register State */ |
278 | u32 mxcsr_mask; | 325 | u32 mxcsr_mask; /* MXCSR Mask */ |
279 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | 326 | |
280 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | 327 | /* 8*16 bytes for each FP-reg = 128 bytes: */ |
281 | u32 padding[24]; | 328 | u32 st_space[32]; |
329 | |||
330 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | ||
331 | u32 xmm_space[64]; | ||
332 | |||
333 | u32 padding[24]; | ||
334 | |||
282 | } __attribute__((aligned(16))); | 335 | } __attribute__((aligned(16))); |
283 | 336 | ||
284 | struct i387_soft_struct { | 337 | struct i387_soft_struct { |
285 | u32 cwd; | 338 | u32 cwd; |
286 | u32 swd; | 339 | u32 swd; |
287 | u32 twd; | 340 | u32 twd; |
288 | u32 fip; | 341 | u32 fip; |
289 | u32 fcs; | 342 | u32 fcs; |
290 | u32 foo; | 343 | u32 foo; |
291 | u32 fos; | 344 | u32 fos; |
292 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | 345 | /* 8*10 bytes for each FP-reg = 80 bytes: */ |
293 | u8 ftop, changed, lookahead, no_update, rm, alimit; | 346 | u32 st_space[20]; |
294 | struct info *info; | 347 | u8 ftop; |
295 | u32 entry_eip; | 348 | u8 changed; |
349 | u8 lookahead; | ||
350 | u8 no_update; | ||
351 | u8 rm; | ||
352 | u8 alimit; | ||
353 | struct info *info; | ||
354 | u32 entry_eip; | ||
296 | }; | 355 | }; |
297 | 356 | ||
298 | union i387_union { | 357 | union i387_union { |
299 | struct i387_fsave_struct fsave; | 358 | struct i387_fsave_struct fsave; |
300 | struct i387_fxsave_struct fxsave; | 359 | struct i387_fxsave_struct fxsave; |
301 | struct i387_soft_struct soft; | 360 | struct i387_soft_struct soft; |
302 | }; | 361 | }; |
303 | 362 | ||
304 | #ifdef CONFIG_X86_32 | 363 | #ifdef CONFIG_X86_64 |
305 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
306 | #else | ||
307 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | 364 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
308 | #endif | 365 | #endif |
309 | 366 | ||
@@ -313,42 +370,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | |||
313 | extern unsigned short num_cache_leaves; | 370 | extern unsigned short num_cache_leaves; |
314 | 371 | ||
315 | struct thread_struct { | 372 | struct thread_struct { |
316 | /* cached TLS descriptors. */ | 373 | /* Cached TLS descriptors: */ |
317 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 374 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
318 | unsigned long sp0; | 375 | unsigned long sp0; |
319 | unsigned long sp; | 376 | unsigned long sp; |
320 | #ifdef CONFIG_X86_32 | 377 | #ifdef CONFIG_X86_32 |
321 | unsigned long sysenter_cs; | 378 | unsigned long sysenter_cs; |
322 | #else | 379 | #else |
323 | unsigned long usersp; /* Copy from PDA */ | 380 | unsigned long usersp; /* Copy from PDA */ |
324 | unsigned short es, ds, fsindex, gsindex; | 381 | unsigned short es; |
382 | unsigned short ds; | ||
383 | unsigned short fsindex; | ||
384 | unsigned short gsindex; | ||
325 | #endif | 385 | #endif |
326 | unsigned long ip; | 386 | unsigned long ip; |
327 | unsigned long fs; | 387 | unsigned long fs; |
328 | unsigned long gs; | 388 | unsigned long gs; |
329 | /* Hardware debugging registers */ | 389 | /* Hardware debugging registers: */ |
330 | unsigned long debugreg0; | 390 | unsigned long debugreg0; |
331 | unsigned long debugreg1; | 391 | unsigned long debugreg1; |
332 | unsigned long debugreg2; | 392 | unsigned long debugreg2; |
333 | unsigned long debugreg3; | 393 | unsigned long debugreg3; |
334 | unsigned long debugreg6; | 394 | unsigned long debugreg6; |
335 | unsigned long debugreg7; | 395 | unsigned long debugreg7; |
336 | /* fault info */ | 396 | /* Fault info: */ |
337 | unsigned long cr2, trap_no, error_code; | 397 | unsigned long cr2; |
338 | /* floating point info */ | 398 | unsigned long trap_no; |
399 | unsigned long error_code; | ||
400 | /* Floating point info: */ | ||
339 | union i387_union i387 __attribute__((aligned(16)));; | 401 | union i387_union i387 __attribute__((aligned(16)));; |
340 | #ifdef CONFIG_X86_32 | 402 | #ifdef CONFIG_X86_32 |
341 | /* virtual 86 mode info */ | 403 | /* Virtual 86 mode info */ |
342 | struct vm86_struct __user *vm86_info; | 404 | struct vm86_struct __user *vm86_info; |
343 | unsigned long screen_bitmap; | 405 | unsigned long screen_bitmap; |
344 | unsigned long v86flags, v86mask, saved_sp0; | 406 | unsigned long v86flags; |
345 | unsigned int saved_fs, saved_gs; | 407 | unsigned long v86mask; |
408 | unsigned long saved_sp0; | ||
409 | unsigned int saved_fs; | ||
410 | unsigned int saved_gs; | ||
346 | #endif | 411 | #endif |
347 | /* IO permissions */ | 412 | /* IO permissions: */ |
348 | unsigned long *io_bitmap_ptr; | 413 | unsigned long *io_bitmap_ptr; |
349 | unsigned long iopl; | 414 | unsigned long iopl; |
350 | /* max allowed port in the bitmap, in bytes: */ | 415 | /* Max allowed port in the bitmap, in bytes: */ |
351 | unsigned io_bitmap_max; | 416 | unsigned io_bitmap_max; |
352 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ | 417 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ |
353 | unsigned long debugctlmsr; | 418 | unsigned long debugctlmsr; |
354 | /* Debug Store - if not 0 points to a DS Save Area configuration; | 419 | /* Debug Store - if not 0 points to a DS Save Area configuration; |
@@ -358,21 +423,27 @@ struct thread_struct { | |||
358 | 423 | ||
359 | static inline unsigned long native_get_debugreg(int regno) | 424 | static inline unsigned long native_get_debugreg(int regno) |
360 | { | 425 | { |
361 | unsigned long val = 0; /* Damn you, gcc! */ | 426 | unsigned long val = 0; /* Damn you, gcc! */ |
362 | 427 | ||
363 | switch (regno) { | 428 | switch (regno) { |
364 | case 0: | 429 | case 0: |
365 | asm("mov %%db0, %0" :"=r" (val)); break; | 430 | asm("mov %%db0, %0" :"=r" (val)); |
431 | break; | ||
366 | case 1: | 432 | case 1: |
367 | asm("mov %%db1, %0" :"=r" (val)); break; | 433 | asm("mov %%db1, %0" :"=r" (val)); |
434 | break; | ||
368 | case 2: | 435 | case 2: |
369 | asm("mov %%db2, %0" :"=r" (val)); break; | 436 | asm("mov %%db2, %0" :"=r" (val)); |
437 | break; | ||
370 | case 3: | 438 | case 3: |
371 | asm("mov %%db3, %0" :"=r" (val)); break; | 439 | asm("mov %%db3, %0" :"=r" (val)); |
440 | break; | ||
372 | case 6: | 441 | case 6: |
373 | asm("mov %%db6, %0" :"=r" (val)); break; | 442 | asm("mov %%db6, %0" :"=r" (val)); |
443 | break; | ||
374 | case 7: | 444 | case 7: |
375 | asm("mov %%db7, %0" :"=r" (val)); break; | 445 | asm("mov %%db7, %0" :"=r" (val)); |
446 | break; | ||
376 | default: | 447 | default: |
377 | BUG(); | 448 | BUG(); |
378 | } | 449 | } |
@@ -383,22 +454,22 @@ static inline void native_set_debugreg(int regno, unsigned long value) | |||
383 | { | 454 | { |
384 | switch (regno) { | 455 | switch (regno) { |
385 | case 0: | 456 | case 0: |
386 | asm("mov %0,%%db0" : /* no output */ :"r" (value)); | 457 | asm("mov %0, %%db0" ::"r" (value)); |
387 | break; | 458 | break; |
388 | case 1: | 459 | case 1: |
389 | asm("mov %0,%%db1" : /* no output */ :"r" (value)); | 460 | asm("mov %0, %%db1" ::"r" (value)); |
390 | break; | 461 | break; |
391 | case 2: | 462 | case 2: |
392 | asm("mov %0,%%db2" : /* no output */ :"r" (value)); | 463 | asm("mov %0, %%db2" ::"r" (value)); |
393 | break; | 464 | break; |
394 | case 3: | 465 | case 3: |
395 | asm("mov %0,%%db3" : /* no output */ :"r" (value)); | 466 | asm("mov %0, %%db3" ::"r" (value)); |
396 | break; | 467 | break; |
397 | case 6: | 468 | case 6: |
398 | asm("mov %0,%%db6" : /* no output */ :"r" (value)); | 469 | asm("mov %0, %%db6" ::"r" (value)); |
399 | break; | 470 | break; |
400 | case 7: | 471 | case 7: |
401 | asm("mov %0,%%db7" : /* no output */ :"r" (value)); | 472 | asm("mov %0, %%db7" ::"r" (value)); |
402 | break; | 473 | break; |
403 | default: | 474 | default: |
404 | BUG(); | 475 | BUG(); |
@@ -412,23 +483,24 @@ static inline void native_set_iopl_mask(unsigned mask) | |||
412 | { | 483 | { |
413 | #ifdef CONFIG_X86_32 | 484 | #ifdef CONFIG_X86_32 |
414 | unsigned int reg; | 485 | unsigned int reg; |
415 | __asm__ __volatile__ ("pushfl;" | 486 | |
416 | "popl %0;" | 487 | asm volatile ("pushfl;" |
417 | "andl %1, %0;" | 488 | "popl %0;" |
418 | "orl %2, %0;" | 489 | "andl %1, %0;" |
419 | "pushl %0;" | 490 | "orl %2, %0;" |
420 | "popfl" | 491 | "pushl %0;" |
421 | : "=&r" (reg) | 492 | "popfl" |
422 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | 493 | : "=&r" (reg) |
494 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
423 | #endif | 495 | #endif |
424 | } | 496 | } |
425 | 497 | ||
426 | static inline void native_load_sp0(struct tss_struct *tss, | 498 | static inline void |
427 | struct thread_struct *thread) | 499 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) |
428 | { | 500 | { |
429 | tss->x86_tss.sp0 = thread->sp0; | 501 | tss->x86_tss.sp0 = thread->sp0; |
430 | #ifdef CONFIG_X86_32 | 502 | #ifdef CONFIG_X86_32 |
431 | /* Only happens when SEP is enabled, no need to test "SEP"arately */ | 503 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
432 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | 504 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
433 | tss->x86_tss.ss1 = thread->sysenter_cs; | 505 | tss->x86_tss.ss1 = thread->sysenter_cs; |
434 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | 506 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
@@ -446,8 +518,8 @@ static inline void native_swapgs(void) | |||
446 | #ifdef CONFIG_PARAVIRT | 518 | #ifdef CONFIG_PARAVIRT |
447 | #include <asm/paravirt.h> | 519 | #include <asm/paravirt.h> |
448 | #else | 520 | #else |
449 | #define __cpuid native_cpuid | 521 | #define __cpuid native_cpuid |
450 | #define paravirt_enabled() 0 | 522 | #define paravirt_enabled() 0 |
451 | 523 | ||
452 | /* | 524 | /* |
453 | * These special macros can be used to get or set a debugging register | 525 | * These special macros can be used to get or set a debugging register |
@@ -473,11 +545,12 @@ static inline void load_sp0(struct tss_struct *tss, | |||
473 | * enable), so that any CPU's that boot up | 545 | * enable), so that any CPU's that boot up |
474 | * after us can get the correct flags. | 546 | * after us can get the correct flags. |
475 | */ | 547 | */ |
476 | extern unsigned long mmu_cr4_features; | 548 | extern unsigned long mmu_cr4_features; |
477 | 549 | ||
478 | static inline void set_in_cr4(unsigned long mask) | 550 | static inline void set_in_cr4(unsigned long mask) |
479 | { | 551 | { |
480 | unsigned cr4; | 552 | unsigned cr4; |
553 | |||
481 | mmu_cr4_features |= mask; | 554 | mmu_cr4_features |= mask; |
482 | cr4 = read_cr4(); | 555 | cr4 = read_cr4(); |
483 | cr4 |= mask; | 556 | cr4 |= mask; |
@@ -487,6 +560,7 @@ static inline void set_in_cr4(unsigned long mask) | |||
487 | static inline void clear_in_cr4(unsigned long mask) | 560 | static inline void clear_in_cr4(unsigned long mask) |
488 | { | 561 | { |
489 | unsigned cr4; | 562 | unsigned cr4; |
563 | |||
490 | mmu_cr4_features &= ~mask; | 564 | mmu_cr4_features &= ~mask; |
491 | cr4 = read_cr4(); | 565 | cr4 = read_cr4(); |
492 | cr4 &= ~mask; | 566 | cr4 &= ~mask; |
@@ -494,42 +568,42 @@ static inline void clear_in_cr4(unsigned long mask) | |||
494 | } | 568 | } |
495 | 569 | ||
496 | struct microcode_header { | 570 | struct microcode_header { |
497 | unsigned int hdrver; | 571 | unsigned int hdrver; |
498 | unsigned int rev; | 572 | unsigned int rev; |
499 | unsigned int date; | 573 | unsigned int date; |
500 | unsigned int sig; | 574 | unsigned int sig; |
501 | unsigned int cksum; | 575 | unsigned int cksum; |
502 | unsigned int ldrver; | 576 | unsigned int ldrver; |
503 | unsigned int pf; | 577 | unsigned int pf; |
504 | unsigned int datasize; | 578 | unsigned int datasize; |
505 | unsigned int totalsize; | 579 | unsigned int totalsize; |
506 | unsigned int reserved[3]; | 580 | unsigned int reserved[3]; |
507 | }; | 581 | }; |
508 | 582 | ||
509 | struct microcode { | 583 | struct microcode { |
510 | struct microcode_header hdr; | 584 | struct microcode_header hdr; |
511 | unsigned int bits[0]; | 585 | unsigned int bits[0]; |
512 | }; | 586 | }; |
513 | 587 | ||
514 | typedef struct microcode microcode_t; | 588 | typedef struct microcode microcode_t; |
515 | typedef struct microcode_header microcode_header_t; | 589 | typedef struct microcode_header microcode_header_t; |
516 | 590 | ||
517 | /* microcode format is extended from prescott processors */ | 591 | /* microcode format is extended from prescott processors */ |
518 | struct extended_signature { | 592 | struct extended_signature { |
519 | unsigned int sig; | 593 | unsigned int sig; |
520 | unsigned int pf; | 594 | unsigned int pf; |
521 | unsigned int cksum; | 595 | unsigned int cksum; |
522 | }; | 596 | }; |
523 | 597 | ||
524 | struct extended_sigtable { | 598 | struct extended_sigtable { |
525 | unsigned int count; | 599 | unsigned int count; |
526 | unsigned int cksum; | 600 | unsigned int cksum; |
527 | unsigned int reserved[3]; | 601 | unsigned int reserved[3]; |
528 | struct extended_signature sigs[0]; | 602 | struct extended_signature sigs[0]; |
529 | }; | 603 | }; |
530 | 604 | ||
531 | typedef struct { | 605 | typedef struct { |
532 | unsigned long seg; | 606 | unsigned long seg; |
533 | } mm_segment_t; | 607 | } mm_segment_t; |
534 | 608 | ||
535 | 609 | ||
@@ -541,7 +615,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | |||
541 | /* Free all resources held by a thread. */ | 615 | /* Free all resources held by a thread. */ |
542 | extern void release_thread(struct task_struct *); | 616 | extern void release_thread(struct task_struct *); |
543 | 617 | ||
544 | /* Prepare to copy thread state - unlazy all lazy status */ | 618 | /* Prepare to copy thread state - unlazy all lazy state */ |
545 | extern void prepare_to_copy(struct task_struct *tsk); | 619 | extern void prepare_to_copy(struct task_struct *tsk); |
546 | 620 | ||
547 | unsigned long get_wchan(struct task_struct *p); | 621 | unsigned long get_wchan(struct task_struct *p); |
@@ -578,118 +652,137 @@ static inline unsigned int cpuid_eax(unsigned int op) | |||
578 | unsigned int eax, ebx, ecx, edx; | 652 | unsigned int eax, ebx, ecx, edx; |
579 | 653 | ||
580 | cpuid(op, &eax, &ebx, &ecx, &edx); | 654 | cpuid(op, &eax, &ebx, &ecx, &edx); |
655 | |||
581 | return eax; | 656 | return eax; |
582 | } | 657 | } |
658 | |||
583 | static inline unsigned int cpuid_ebx(unsigned int op) | 659 | static inline unsigned int cpuid_ebx(unsigned int op) |
584 | { | 660 | { |
585 | unsigned int eax, ebx, ecx, edx; | 661 | unsigned int eax, ebx, ecx, edx; |
586 | 662 | ||
587 | cpuid(op, &eax, &ebx, &ecx, &edx); | 663 | cpuid(op, &eax, &ebx, &ecx, &edx); |
664 | |||
588 | return ebx; | 665 | return ebx; |
589 | } | 666 | } |
667 | |||
590 | static inline unsigned int cpuid_ecx(unsigned int op) | 668 | static inline unsigned int cpuid_ecx(unsigned int op) |
591 | { | 669 | { |
592 | unsigned int eax, ebx, ecx, edx; | 670 | unsigned int eax, ebx, ecx, edx; |
593 | 671 | ||
594 | cpuid(op, &eax, &ebx, &ecx, &edx); | 672 | cpuid(op, &eax, &ebx, &ecx, &edx); |
673 | |||
595 | return ecx; | 674 | return ecx; |
596 | } | 675 | } |
676 | |||
597 | static inline unsigned int cpuid_edx(unsigned int op) | 677 | static inline unsigned int cpuid_edx(unsigned int op) |
598 | { | 678 | { |
599 | unsigned int eax, ebx, ecx, edx; | 679 | unsigned int eax, ebx, ecx, edx; |
600 | 680 | ||
601 | cpuid(op, &eax, &ebx, &ecx, &edx); | 681 | cpuid(op, &eax, &ebx, &ecx, &edx); |
682 | |||
602 | return edx; | 683 | return edx; |
603 | } | 684 | } |
604 | 685 | ||
605 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 686 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
606 | static inline void rep_nop(void) | 687 | static inline void rep_nop(void) |
607 | { | 688 | { |
608 | __asm__ __volatile__("rep;nop": : :"memory"); | 689 | asm volatile("rep; nop" ::: "memory"); |
609 | } | 690 | } |
610 | 691 | ||
611 | /* Stop speculative execution */ | 692 | static inline void cpu_relax(void) |
693 | { | ||
694 | rep_nop(); | ||
695 | } | ||
696 | |||
697 | /* Stop speculative execution: */ | ||
612 | static inline void sync_core(void) | 698 | static inline void sync_core(void) |
613 | { | 699 | { |
614 | int tmp; | 700 | int tmp; |
701 | |||
615 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | 702 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) |
616 | : "ebx", "ecx", "edx", "memory"); | 703 | : "ebx", "ecx", "edx", "memory"); |
617 | } | 704 | } |
618 | 705 | ||
619 | #define cpu_relax() rep_nop() | ||
620 | |||
621 | static inline void __monitor(const void *eax, unsigned long ecx, | 706 | static inline void __monitor(const void *eax, unsigned long ecx, |
622 | unsigned long edx) | 707 | unsigned long edx) |
623 | { | 708 | { |
624 | /* "monitor %eax,%ecx,%edx;" */ | 709 | /* "monitor %eax, %ecx, %edx;" */ |
625 | asm volatile( | 710 | asm volatile(".byte 0x0f, 0x01, 0xc8;" |
626 | ".byte 0x0f,0x01,0xc8;" | 711 | :: "a" (eax), "c" (ecx), "d"(edx)); |
627 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
628 | } | 712 | } |
629 | 713 | ||
630 | static inline void __mwait(unsigned long eax, unsigned long ecx) | 714 | static inline void __mwait(unsigned long eax, unsigned long ecx) |
631 | { | 715 | { |
632 | /* "mwait %eax,%ecx;" */ | 716 | /* "mwait %eax, %ecx;" */ |
633 | asm volatile( | 717 | asm volatile(".byte 0x0f, 0x01, 0xc9;" |
634 | ".byte 0x0f,0x01,0xc9;" | 718 | :: "a" (eax), "c" (ecx)); |
635 | : :"a" (eax), "c" (ecx)); | ||
636 | } | 719 | } |
637 | 720 | ||
638 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | 721 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) |
639 | { | 722 | { |
640 | /* "mwait %eax,%ecx;" */ | 723 | /* "mwait %eax, %ecx;" */ |
641 | asm volatile( | 724 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" |
642 | "sti; .byte 0x0f,0x01,0xc9;" | 725 | :: "a" (eax), "c" (ecx)); |
643 | : :"a" (eax), "c" (ecx)); | ||
644 | } | 726 | } |
645 | 727 | ||
646 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 728 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
647 | 729 | ||
648 | extern int force_mwait; | 730 | extern int force_mwait; |
649 | 731 | ||
650 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 732 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
651 | 733 | ||
652 | extern unsigned long boot_option_idle_override; | 734 | extern unsigned long boot_option_idle_override; |
653 | 735 | ||
654 | extern void enable_sep_cpu(void); | 736 | extern void enable_sep_cpu(void); |
655 | extern int sysenter_setup(void); | 737 | extern int sysenter_setup(void); |
656 | 738 | ||
657 | /* Defined in head.S */ | 739 | /* Defined in head.S */ |
658 | extern struct desc_ptr early_gdt_descr; | 740 | extern struct desc_ptr early_gdt_descr; |
659 | 741 | ||
660 | extern void cpu_set_gdt(int); | 742 | extern void cpu_set_gdt(int); |
661 | extern void switch_to_new_gdt(void); | 743 | extern void switch_to_new_gdt(void); |
662 | extern void cpu_init(void); | 744 | extern void cpu_init(void); |
663 | extern void init_gdt(int cpu); | 745 | extern void init_gdt(int cpu); |
664 | 746 | ||
665 | /* from system description table in BIOS. Mostly for MCA use, but | 747 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
666 | * others may find it useful. */ | 748 | { |
667 | extern unsigned int machine_id; | 749 | #ifndef CONFIG_X86_DEBUGCTLMSR |
668 | extern unsigned int machine_submodel_id; | 750 | if (boot_cpu_data.x86 < 6) |
669 | extern unsigned int BIOS_revision; | 751 | return; |
752 | #endif | ||
753 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | ||
754 | } | ||
670 | 755 | ||
671 | /* Boot loader type from the setup header */ | 756 | /* |
672 | extern int bootloader_type; | 757 | * from system description table in BIOS. Mostly for MCA use, but |
758 | * others may find it useful: | ||
759 | */ | ||
760 | extern unsigned int machine_id; | ||
761 | extern unsigned int machine_submodel_id; | ||
762 | extern unsigned int BIOS_revision; | ||
763 | |||
764 | /* Boot loader type from the setup header: */ | ||
765 | extern int bootloader_type; | ||
673 | 766 | ||
674 | extern char ignore_fpu_irq; | 767 | extern char ignore_fpu_irq; |
675 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
676 | 768 | ||
677 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | 769 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 |
678 | #define ARCH_HAS_PREFETCHW | 770 | #define ARCH_HAS_PREFETCHW |
679 | #define ARCH_HAS_SPINLOCK_PREFETCH | 771 | #define ARCH_HAS_SPINLOCK_PREFETCH |
680 | 772 | ||
681 | #ifdef CONFIG_X86_32 | 773 | #ifdef CONFIG_X86_32 |
682 | #define BASE_PREFETCH ASM_NOP4 | 774 | # define BASE_PREFETCH ASM_NOP4 |
683 | #define ARCH_HAS_PREFETCH | 775 | # define ARCH_HAS_PREFETCH |
684 | #else | 776 | #else |
685 | #define BASE_PREFETCH "prefetcht0 (%1)" | 777 | # define BASE_PREFETCH "prefetcht0 (%1)" |
686 | #endif | 778 | #endif |
687 | 779 | ||
688 | /* Prefetch instructions for Pentium III and AMD Athlon */ | 780 | /* |
689 | /* It's not worth to care about 3dnow! prefetches for the K6 | 781 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) |
690 | because they are microcoded there and very slow. | 782 | * |
691 | However we don't do prefetches for pre XP Athlons currently | 783 | * It's not worth to care about 3dnow prefetches for the K6 |
692 | That should be fixed. */ | 784 | * because they are microcoded there and very slow. |
785 | */ | ||
693 | static inline void prefetch(const void *x) | 786 | static inline void prefetch(const void *x) |
694 | { | 787 | { |
695 | alternative_input(BASE_PREFETCH, | 788 | alternative_input(BASE_PREFETCH, |
@@ -698,8 +791,11 @@ static inline void prefetch(const void *x) | |||
698 | "r" (x)); | 791 | "r" (x)); |
699 | } | 792 | } |
700 | 793 | ||
701 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 794 | /* |
702 | spinlocks to avoid one state transition in the cache coherency protocol. */ | 795 | * 3dnow prefetch to get an exclusive cache line. |
796 | * Useful for spinlocks to avoid one state transition in the | ||
797 | * cache coherency protocol: | ||
798 | */ | ||
703 | static inline void prefetchw(const void *x) | 799 | static inline void prefetchw(const void *x) |
704 | { | 800 | { |
705 | alternative_input(BASE_PREFETCH, | 801 | alternative_input(BASE_PREFETCH, |
@@ -708,21 +804,25 @@ static inline void prefetchw(const void *x) | |||
708 | "r" (x)); | 804 | "r" (x)); |
709 | } | 805 | } |
710 | 806 | ||
711 | #define spin_lock_prefetch(x) prefetchw(x) | 807 | static inline void spin_lock_prefetch(const void *x) |
808 | { | ||
809 | prefetchw(x); | ||
810 | } | ||
811 | |||
712 | #ifdef CONFIG_X86_32 | 812 | #ifdef CONFIG_X86_32 |
713 | /* | 813 | /* |
714 | * User space process size: 3GB (default). | 814 | * User space process size: 3GB (default). |
715 | */ | 815 | */ |
716 | #define TASK_SIZE (PAGE_OFFSET) | 816 | #define TASK_SIZE PAGE_OFFSET |
717 | #define STACK_TOP TASK_SIZE | 817 | #define STACK_TOP TASK_SIZE |
718 | #define STACK_TOP_MAX STACK_TOP | 818 | #define STACK_TOP_MAX STACK_TOP |
719 | 819 | ||
720 | #define INIT_THREAD { \ | 820 | #define INIT_THREAD { \ |
721 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | 821 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ |
722 | .vm86_info = NULL, \ | 822 | .vm86_info = NULL, \ |
723 | .sysenter_cs = __KERNEL_CS, \ | 823 | .sysenter_cs = __KERNEL_CS, \ |
724 | .io_bitmap_ptr = NULL, \ | 824 | .io_bitmap_ptr = NULL, \ |
725 | .fs = __KERNEL_PERCPU, \ | 825 | .fs = __KERNEL_PERCPU, \ |
726 | } | 826 | } |
727 | 827 | ||
728 | /* | 828 | /* |
@@ -731,28 +831,15 @@ static inline void prefetchw(const void *x) | |||
731 | * permission bitmap. The extra byte must be all 1 bits, and must | 831 | * permission bitmap. The extra byte must be all 1 bits, and must |
732 | * be within the limit. | 832 | * be within the limit. |
733 | */ | 833 | */ |
734 | #define INIT_TSS { \ | 834 | #define INIT_TSS { \ |
735 | .x86_tss = { \ | 835 | .x86_tss = { \ |
736 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | 836 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ |
737 | .ss0 = __KERNEL_DS, \ | 837 | .ss0 = __KERNEL_DS, \ |
738 | .ss1 = __KERNEL_CS, \ | 838 | .ss1 = __KERNEL_CS, \ |
739 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | 839 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ |
740 | }, \ | 840 | }, \ |
741 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ | 841 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ |
742 | } | 842 | } |
743 | |||
744 | #define start_thread(regs, new_eip, new_esp) do { \ | ||
745 | __asm__("movl %0,%%gs": :"r" (0)); \ | ||
746 | regs->fs = 0; \ | ||
747 | set_fs(USER_DS); \ | ||
748 | regs->ds = __USER_DS; \ | ||
749 | regs->es = __USER_DS; \ | ||
750 | regs->ss = __USER_DS; \ | ||
751 | regs->cs = __USER_CS; \ | ||
752 | regs->ip = new_eip; \ | ||
753 | regs->sp = new_esp; \ | ||
754 | } while (0) | ||
755 | |||
756 | 843 | ||
757 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | 844 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
758 | 845 | ||
@@ -780,24 +867,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
780 | __regs__ - 1; \ | 867 | __regs__ - 1; \ |
781 | }) | 868 | }) |
782 | 869 | ||
783 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | 870 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
784 | 871 | ||
785 | #else | 872 | #else |
786 | /* | 873 | /* |
787 | * User space process size. 47bits minus one guard page. | 874 | * User space process size. 47bits minus one guard page. |
788 | */ | 875 | */ |
789 | #define TASK_SIZE64 (0x800000000000UL - 4096) | 876 | #define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) |
790 | 877 | ||
791 | /* This decides where the kernel will search for a free chunk of vm | 878 | /* This decides where the kernel will search for a free chunk of vm |
792 | * space during mmap's. | 879 | * space during mmap's. |
793 | */ | 880 | */ |
794 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | 881 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
795 | 0xc0000000 : 0xFFFFe000) | 882 | 0xc0000000 : 0xFFFFe000) |
796 | 883 | ||
797 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ | 884 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ |
798 | IA32_PAGE_OFFSET : TASK_SIZE64) | 885 | IA32_PAGE_OFFSET : TASK_SIZE64) |
799 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ | 886 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ |
800 | IA32_PAGE_OFFSET : TASK_SIZE64) | 887 | IA32_PAGE_OFFSET : TASK_SIZE64) |
801 | 888 | ||
802 | #define STACK_TOP TASK_SIZE | 889 | #define STACK_TOP TASK_SIZE |
803 | #define STACK_TOP_MAX TASK_SIZE64 | 890 | #define STACK_TOP_MAX TASK_SIZE64 |
@@ -810,33 +897,25 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
810 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 897 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ |
811 | } | 898 | } |
812 | 899 | ||
813 | #define start_thread(regs, new_rip, new_rsp) do { \ | ||
814 | asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ | ||
815 | load_gs_index(0); \ | ||
816 | (regs)->ip = (new_rip); \ | ||
817 | (regs)->sp = (new_rsp); \ | ||
818 | write_pda(oldrsp, (new_rsp)); \ | ||
819 | (regs)->cs = __USER_CS; \ | ||
820 | (regs)->ss = __USER_DS; \ | ||
821 | (regs)->flags = 0x200; \ | ||
822 | set_fs(USER_DS); \ | ||
823 | } while (0) | ||
824 | |||
825 | /* | 900 | /* |
826 | * Return saved PC of a blocked thread. | 901 | * Return saved PC of a blocked thread. |
827 | * What is this good for? it will be always the scheduler or ret_from_fork. | 902 | * What is this good for? it will be always the scheduler or ret_from_fork. |
828 | */ | 903 | */ |
829 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | 904 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) |
830 | 905 | ||
831 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | 906 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
832 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | 907 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ |
833 | #endif /* CONFIG_X86_64 */ | 908 | #endif /* CONFIG_X86_64 */ |
834 | 909 | ||
835 | /* This decides where the kernel will search for a free chunk of vm | 910 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
911 | unsigned long new_sp); | ||
912 | |||
913 | /* | ||
914 | * This decides where the kernel will search for a free chunk of vm | ||
836 | * space during mmap's. | 915 | * space during mmap's. |
837 | */ | 916 | */ |
838 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | 917 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) |
839 | 918 | ||
840 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | 919 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
841 | 920 | ||
842 | #endif | 921 | #endif |
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 68563c0709ac..1e17bcce450e 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h | |||
@@ -7,8 +7,6 @@ | |||
7 | 7 | ||
8 | extern void early_idt_handler(void); | 8 | extern void early_idt_handler(void); |
9 | 9 | ||
10 | extern void init_memory_mapping(unsigned long start, unsigned long end); | ||
11 | |||
12 | extern void system_call(void); | 10 | extern void system_call(void); |
13 | extern void syscall_init(void); | 11 | extern void syscall_init(void); |
14 | 12 | ||
@@ -26,7 +24,7 @@ extern int reboot_force; | |||
26 | 24 | ||
27 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | 25 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); |
28 | 26 | ||
29 | #define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) | 27 | #define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) |
30 | #define round_down(x,y) ((x) & ~((y)-1)) | 28 | #define round_down(x, y) ((x) & ~((y) - 1)) |
31 | 29 | ||
32 | #endif | 30 | #endif |
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index d9e04b46a440..24ec061566c5 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h | |||
@@ -36,23 +36,23 @@ struct pt_regs { | |||
36 | #else /* __KERNEL__ */ | 36 | #else /* __KERNEL__ */ |
37 | 37 | ||
38 | struct pt_regs { | 38 | struct pt_regs { |
39 | long bx; | 39 | unsigned long bx; |
40 | long cx; | 40 | unsigned long cx; |
41 | long dx; | 41 | unsigned long dx; |
42 | long si; | 42 | unsigned long si; |
43 | long di; | 43 | unsigned long di; |
44 | long bp; | 44 | unsigned long bp; |
45 | long ax; | 45 | unsigned long ax; |
46 | int ds; | 46 | unsigned long ds; |
47 | int es; | 47 | unsigned long es; |
48 | int fs; | 48 | unsigned long fs; |
49 | /* int gs; */ | 49 | /* int gs; */ |
50 | long orig_ax; | 50 | unsigned long orig_ax; |
51 | long ip; | 51 | unsigned long ip; |
52 | int cs; | 52 | unsigned long cs; |
53 | long flags; | 53 | unsigned long flags; |
54 | long sp; | 54 | unsigned long sp; |
55 | int ss; | 55 | unsigned long ss; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | #include <asm/vm86.h> | 58 | #include <asm/vm86.h> |
@@ -140,12 +140,16 @@ extern unsigned long | |||
140 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | 140 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
141 | 141 | ||
142 | #ifdef CONFIG_X86_32 | 142 | #ifdef CONFIG_X86_32 |
143 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | 143 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
144 | int error_code); | ||
144 | #else | 145 | #else |
145 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); | 146 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); |
146 | #endif | 147 | #endif |
147 | 148 | ||
148 | #define regs_return_value(regs) ((regs)->ax) | 149 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
150 | { | ||
151 | return regs->ax; | ||
152 | } | ||
149 | 153 | ||
150 | /* | 154 | /* |
151 | * user_mode_vm(regs) determines whether a register set came from user mode. | 155 | * user_mode_vm(regs) determines whether a register set came from user mode. |
@@ -166,8 +170,8 @@ static inline int user_mode(struct pt_regs *regs) | |||
166 | static inline int user_mode_vm(struct pt_regs *regs) | 170 | static inline int user_mode_vm(struct pt_regs *regs) |
167 | { | 171 | { |
168 | #ifdef CONFIG_X86_32 | 172 | #ifdef CONFIG_X86_32 |
169 | return ((regs->cs & SEGMENT_RPL_MASK) | | 173 | return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= |
170 | (regs->flags & VM_MASK)) >= USER_RPL; | 174 | USER_RPL; |
171 | #else | 175 | #else |
172 | return user_mode(regs); | 176 | return user_mode(regs); |
173 | #endif | 177 | #endif |
@@ -176,7 +180,7 @@ static inline int user_mode_vm(struct pt_regs *regs) | |||
176 | static inline int v8086_mode(struct pt_regs *regs) | 180 | static inline int v8086_mode(struct pt_regs *regs) |
177 | { | 181 | { |
178 | #ifdef CONFIG_X86_32 | 182 | #ifdef CONFIG_X86_32 |
179 | return (regs->flags & VM_MASK); | 183 | return (regs->flags & X86_VM_MASK); |
180 | #else | 184 | #else |
181 | return 0; /* No V86 mode support in long mode */ | 185 | return 0; /* No V86 mode support in long mode */ |
182 | #endif | 186 | #endif |
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h index e9e3ffc22c07..6b5233b4f84b 100644 --- a/include/asm-x86/reboot.h +++ b/include/asm-x86/reboot.h | |||
@@ -3,8 +3,7 @@ | |||
3 | 3 | ||
4 | struct pt_regs; | 4 | struct pt_regs; |
5 | 5 | ||
6 | struct machine_ops | 6 | struct machine_ops { |
7 | { | ||
8 | void (*restart)(char *cmd); | 7 | void (*restart)(char *cmd); |
9 | void (*halt)(void); | 8 | void (*halt)(void); |
10 | void (*power_off)(void); | 9 | void (*power_off)(void); |
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 46f725b0bc82..2557514d7ef6 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h | |||
@@ -3,16 +3,17 @@ | |||
3 | 3 | ||
4 | #include <asm/asm.h> | 4 | #include <asm/asm.h> |
5 | 5 | ||
6 | #define TRACE_RESUME(user) do { \ | 6 | #define TRACE_RESUME(user) \ |
7 | do { \ | ||
7 | if (pm_trace_enabled) { \ | 8 | if (pm_trace_enabled) { \ |
8 | void *tracedata; \ | 9 | void *tracedata; \ |
9 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ | 10 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ |
10 | ".section .tracedata,\"a\"\n" \ | 11 | ".section .tracedata,\"a\"\n" \ |
11 | "1:\t.word %c1\n\t" \ | 12 | "1:\t.word %c1\n\t" \ |
12 | _ASM_PTR " %c2\n" \ | 13 | _ASM_PTR " %c2\n" \ |
13 | ".previous" \ | 14 | ".previous" \ |
14 | :"=r" (tracedata) \ | 15 | :"=r" (tracedata) \ |
15 | : "i" (__LINE__), "i" (__FILE__)); \ | 16 | : "i" (__LINE__), "i" (__FILE__)); \ |
16 | generate_resume_trace(tracedata, user); \ | 17 | generate_resume_trace(tracedata, user); \ |
17 | } \ | 18 | } \ |
18 | } while (0) | 19 | } while (0) |
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h index 97cdcc9887ba..3451c576e6af 100644 --- a/include/asm-x86/rio.h +++ b/include/asm-x86/rio.h | |||
@@ -11,53 +11,53 @@ | |||
11 | #define RIO_TABLE_VERSION 3 | 11 | #define RIO_TABLE_VERSION 3 |
12 | 12 | ||
13 | struct rio_table_hdr { | 13 | struct rio_table_hdr { |
14 | u8 version; /* Version number of this data structure */ | 14 | u8 version; /* Version number of this data structure */ |
15 | u8 num_scal_dev; /* # of Scalability devices */ | 15 | u8 num_scal_dev; /* # of Scalability devices */ |
16 | u8 num_rio_dev; /* # of RIO I/O devices */ | 16 | u8 num_rio_dev; /* # of RIO I/O devices */ |
17 | } __attribute__((packed)); | 17 | } __attribute__((packed)); |
18 | 18 | ||
19 | struct scal_detail { | 19 | struct scal_detail { |
20 | u8 node_id; /* Scalability Node ID */ | 20 | u8 node_id; /* Scalability Node ID */ |
21 | u32 CBAR; /* Address of 1MB register space */ | 21 | u32 CBAR; /* Address of 1MB register space */ |
22 | u8 port0node; /* Node ID port connected to: 0xFF=None */ | 22 | u8 port0node; /* Node ID port connected to: 0xFF=None */ |
23 | u8 port0port; /* Port num port connected to: 0,1,2, or */ | 23 | u8 port0port; /* Port num port connected to: 0,1,2, or */ |
24 | /* 0xFF=None */ | 24 | /* 0xFF=None */ |
25 | u8 port1node; /* Node ID port connected to: 0xFF = None */ | 25 | u8 port1node; /* Node ID port connected to: 0xFF = None */ |
26 | u8 port1port; /* Port num port connected to: 0,1,2, or */ | 26 | u8 port1port; /* Port num port connected to: 0,1,2, or */ |
27 | /* 0xFF=None */ | 27 | /* 0xFF=None */ |
28 | u8 port2node; /* Node ID port connected to: 0xFF = None */ | 28 | u8 port2node; /* Node ID port connected to: 0xFF = None */ |
29 | u8 port2port; /* Port num port connected to: 0,1,2, or */ | 29 | u8 port2port; /* Port num port connected to: 0,1,2, or */ |
30 | /* 0xFF=None */ | 30 | /* 0xFF=None */ |
31 | u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ | 31 | u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ |
32 | } __attribute__((packed)); | 32 | } __attribute__((packed)); |
33 | 33 | ||
34 | struct rio_detail { | 34 | struct rio_detail { |
35 | u8 node_id; /* RIO Node ID */ | 35 | u8 node_id; /* RIO Node ID */ |
36 | u32 BBAR; /* Address of 1MB register space */ | 36 | u32 BBAR; /* Address of 1MB register space */ |
37 | u8 type; /* Type of device */ | 37 | u8 type; /* Type of device */ |
38 | u8 owner_id; /* Node ID of Hurricane that owns this */ | 38 | u8 owner_id; /* Node ID of Hurricane that owns this */ |
39 | /* node */ | 39 | /* node */ |
40 | u8 port0node; /* Node ID port connected to: 0xFF=None */ | 40 | u8 port0node; /* Node ID port connected to: 0xFF=None */ |
41 | u8 port0port; /* Port num port connected to: 0,1,2, or */ | 41 | u8 port0port; /* Port num port connected to: 0,1,2, or */ |
42 | /* 0xFF=None */ | 42 | /* 0xFF=None */ |
43 | u8 port1node; /* Node ID port connected to: 0xFF=None */ | 43 | u8 port1node; /* Node ID port connected to: 0xFF=None */ |
44 | u8 port1port; /* Port num port connected to: 0,1,2, or */ | 44 | u8 port1port; /* Port num port connected to: 0,1,2, or */ |
45 | /* 0xFF=None */ | 45 | /* 0xFF=None */ |
46 | u8 first_slot; /* Lowest slot number below this Calgary */ | 46 | u8 first_slot; /* Lowest slot number below this Calgary */ |
47 | u8 status; /* Bit 0 = 1 : the XAPIC is used */ | 47 | u8 status; /* Bit 0 = 1 : the XAPIC is used */ |
48 | /* = 0 : the XAPIC is not used, ie: */ | 48 | /* = 0 : the XAPIC is not used, ie: */ |
49 | /* ints fwded to another XAPIC */ | 49 | /* ints fwded to another XAPIC */ |
50 | /* Bits1:7 Reserved */ | 50 | /* Bits1:7 Reserved */ |
51 | u8 WP_index; /* instance index - lower ones have */ | 51 | u8 WP_index; /* instance index - lower ones have */ |
52 | /* lower slot numbers/PCI bus numbers */ | 52 | /* lower slot numbers/PCI bus numbers */ |
53 | u8 chassis_num; /* 1 based Chassis number */ | 53 | u8 chassis_num; /* 1 based Chassis number */ |
54 | } __attribute__((packed)); | 54 | } __attribute__((packed)); |
55 | 55 | ||
56 | enum { | 56 | enum { |
57 | HURR_SCALABILTY = 0, /* Hurricane Scalability info */ | 57 | HURR_SCALABILTY = 0, /* Hurricane Scalability info */ |
58 | HURR_RIOIB = 2, /* Hurricane RIOIB info */ | 58 | HURR_RIOIB = 2, /* Hurricane RIOIB info */ |
59 | COMPAT_CALGARY = 4, /* Compatibility Calgary */ | 59 | COMPAT_CALGARY = 4, /* Compatibility Calgary */ |
60 | ALT_CALGARY = 5, /* Second Planar Calgary */ | 60 | ALT_CALGARY = 5, /* Second Planar Calgary */ |
61 | }; | 61 | }; |
62 | 62 | ||
63 | /* | 63 | /* |
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 520a379f4b80..750f2a3542b3 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h | |||
@@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore * | |||
56 | /* | 56 | /* |
57 | * the semaphore definition | 57 | * the semaphore definition |
58 | */ | 58 | */ |
59 | struct rw_semaphore { | 59 | |
60 | signed long count; | ||
61 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 60 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
62 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 61 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
63 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 62 | #define RWSEM_ACTIVE_MASK 0x0000ffff |
64 | #define RWSEM_WAITING_BIAS (-0x00010000) | 63 | #define RWSEM_WAITING_BIAS (-0x00010000) |
65 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 64 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
66 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 65 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
66 | |||
67 | struct rw_semaphore { | ||
68 | signed long count; | ||
67 | spinlock_t wait_lock; | 69 | spinlock_t wait_lock; |
68 | struct list_head wait_list; | 70 | struct list_head wait_list; |
69 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -78,11 +80,13 @@ struct rw_semaphore { | |||
78 | #endif | 80 | #endif |
79 | 81 | ||
80 | 82 | ||
81 | #define __RWSEM_INITIALIZER(name) \ | 83 | #define __RWSEM_INITIALIZER(name) \ |
82 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | 84 | { \ |
83 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | 85 | RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ |
86 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ | ||
87 | } | ||
84 | 88 | ||
85 | #define DECLARE_RWSEM(name) \ | 89 | #define DECLARE_RWSEM(name) \ |
86 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 90 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
87 | 91 | ||
88 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | 92 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, |
@@ -100,16 +104,16 @@ do { \ | |||
100 | */ | 104 | */ |
101 | static inline void __down_read(struct rw_semaphore *sem) | 105 | static inline void __down_read(struct rw_semaphore *sem) |
102 | { | 106 | { |
103 | __asm__ __volatile__( | 107 | asm volatile("# beginning down_read\n\t" |
104 | "# beginning down_read\n\t" | 108 | LOCK_PREFIX " incl (%%eax)\n\t" |
105 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ | 109 | /* adds 0x00000001, returns the old value */ |
106 | " jns 1f\n" | 110 | " jns 1f\n" |
107 | " call call_rwsem_down_read_failed\n" | 111 | " call call_rwsem_down_read_failed\n" |
108 | "1:\n\t" | 112 | "1:\n\t" |
109 | "# ending down_read\n\t" | 113 | "# ending down_read\n\t" |
110 | : "+m" (sem->count) | 114 | : "+m" (sem->count) |
111 | : "a" (sem) | 115 | : "a" (sem) |
112 | : "memory", "cc"); | 116 | : "memory", "cc"); |
113 | } | 117 | } |
114 | 118 | ||
115 | /* | 119 | /* |
@@ -118,21 +122,20 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value | |||
118 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 122 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
119 | { | 123 | { |
120 | __s32 result, tmp; | 124 | __s32 result, tmp; |
121 | __asm__ __volatile__( | 125 | asm volatile("# beginning __down_read_trylock\n\t" |
122 | "# beginning __down_read_trylock\n\t" | 126 | " movl %0,%1\n\t" |
123 | " movl %0,%1\n\t" | 127 | "1:\n\t" |
124 | "1:\n\t" | 128 | " movl %1,%2\n\t" |
125 | " movl %1,%2\n\t" | 129 | " addl %3,%2\n\t" |
126 | " addl %3,%2\n\t" | 130 | " jle 2f\n\t" |
127 | " jle 2f\n\t" | 131 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" |
128 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | 132 | " jnz 1b\n\t" |
129 | " jnz 1b\n\t" | 133 | "2:\n\t" |
130 | "2:\n\t" | 134 | "# ending __down_read_trylock\n\t" |
131 | "# ending __down_read_trylock\n\t" | 135 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) |
132 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) | 136 | : "i" (RWSEM_ACTIVE_READ_BIAS) |
133 | : "i" (RWSEM_ACTIVE_READ_BIAS) | 137 | : "memory", "cc"); |
134 | : "memory", "cc"); | 138 | return result >= 0 ? 1 : 0; |
135 | return result>=0 ? 1 : 0; | ||
136 | } | 139 | } |
137 | 140 | ||
138 | /* | 141 | /* |
@@ -143,17 +146,18 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
143 | int tmp; | 146 | int tmp; |
144 | 147 | ||
145 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
146 | __asm__ __volatile__( | 149 | asm volatile("# beginning down_write\n\t" |
147 | "# beginning down_write\n\t" | 150 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" |
148 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ | 151 | /* subtract 0x0000ffff, returns the old value */ |
149 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ | 152 | " testl %%edx,%%edx\n\t" |
150 | " jz 1f\n" | 153 | /* was the count 0 before? */ |
151 | " call call_rwsem_down_write_failed\n" | 154 | " jz 1f\n" |
152 | "1:\n" | 155 | " call call_rwsem_down_write_failed\n" |
153 | "# ending down_write" | 156 | "1:\n" |
154 | : "+m" (sem->count), "=d" (tmp) | 157 | "# ending down_write" |
155 | : "a" (sem), "1" (tmp) | 158 | : "+m" (sem->count), "=d" (tmp) |
156 | : "memory", "cc"); | 159 | : "a" (sem), "1" (tmp) |
160 | : "memory", "cc"); | ||
157 | } | 161 | } |
158 | 162 | ||
159 | static inline void __down_write(struct rw_semaphore *sem) | 163 | static inline void __down_write(struct rw_semaphore *sem) |
@@ -167,7 +171,7 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
167 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 171 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
168 | { | 172 | { |
169 | signed long ret = cmpxchg(&sem->count, | 173 | signed long ret = cmpxchg(&sem->count, |
170 | RWSEM_UNLOCKED_VALUE, | 174 | RWSEM_UNLOCKED_VALUE, |
171 | RWSEM_ACTIVE_WRITE_BIAS); | 175 | RWSEM_ACTIVE_WRITE_BIAS); |
172 | if (ret == RWSEM_UNLOCKED_VALUE) | 176 | if (ret == RWSEM_UNLOCKED_VALUE) |
173 | return 1; | 177 | return 1; |
@@ -180,16 +184,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
180 | static inline void __up_read(struct rw_semaphore *sem) | 184 | static inline void __up_read(struct rw_semaphore *sem) |
181 | { | 185 | { |
182 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; |
183 | __asm__ __volatile__( | 187 | asm volatile("# beginning __up_read\n\t" |
184 | "# beginning __up_read\n\t" | 188 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" |
185 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ | 189 | /* subtracts 1, returns the old value */ |
186 | " jns 1f\n\t" | 190 | " jns 1f\n\t" |
187 | " call call_rwsem_wake\n" | 191 | " call call_rwsem_wake\n" |
188 | "1:\n" | 192 | "1:\n" |
189 | "# ending __up_read\n" | 193 | "# ending __up_read\n" |
190 | : "+m" (sem->count), "=d" (tmp) | 194 | : "+m" (sem->count), "=d" (tmp) |
191 | : "a" (sem), "1" (tmp) | 195 | : "a" (sem), "1" (tmp) |
192 | : "memory", "cc"); | 196 | : "memory", "cc"); |
193 | } | 197 | } |
194 | 198 | ||
195 | /* | 199 | /* |
@@ -197,17 +201,18 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu | |||
197 | */ | 201 | */ |
198 | static inline void __up_write(struct rw_semaphore *sem) | 202 | static inline void __up_write(struct rw_semaphore *sem) |
199 | { | 203 | { |
200 | __asm__ __volatile__( | 204 | asm volatile("# beginning __up_write\n\t" |
201 | "# beginning __up_write\n\t" | 205 | " movl %2,%%edx\n\t" |
202 | " movl %2,%%edx\n\t" | 206 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" |
203 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ | 207 | /* tries to transition |
204 | " jz 1f\n" | 208 | 0xffff0001 -> 0x00000000 */ |
205 | " call call_rwsem_wake\n" | 209 | " jz 1f\n" |
206 | "1:\n\t" | 210 | " call call_rwsem_wake\n" |
207 | "# ending __up_write\n" | 211 | "1:\n\t" |
208 | : "+m" (sem->count) | 212 | "# ending __up_write\n" |
209 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 213 | : "+m" (sem->count) |
210 | : "memory", "cc", "edx"); | 214 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) |
215 | : "memory", "cc", "edx"); | ||
211 | } | 216 | } |
212 | 217 | ||
213 | /* | 218 | /* |
@@ -215,16 +220,16 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> | |||
215 | */ | 220 | */ |
216 | static inline void __downgrade_write(struct rw_semaphore *sem) | 221 | static inline void __downgrade_write(struct rw_semaphore *sem) |
217 | { | 222 | { |
218 | __asm__ __volatile__( | 223 | asm volatile("# beginning __downgrade_write\n\t" |
219 | "# beginning __downgrade_write\n\t" | 224 | LOCK_PREFIX " addl %2,(%%eax)\n\t" |
220 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ |
221 | " jns 1f\n\t" | 226 | " jns 1f\n\t" |
222 | " call call_rwsem_downgrade_wake\n" | 227 | " call call_rwsem_downgrade_wake\n" |
223 | "1:\n\t" | 228 | "1:\n\t" |
224 | "# ending __downgrade_write\n" | 229 | "# ending __downgrade_write\n" |
225 | : "+m" (sem->count) | 230 | : "+m" (sem->count) |
226 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 231 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) |
227 | : "memory", "cc"); | 232 | : "memory", "cc"); |
228 | } | 233 | } |
229 | 234 | ||
230 | /* | 235 | /* |
@@ -232,10 +237,9 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 | |||
232 | */ | 237 | */ |
233 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 238 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) |
234 | { | 239 | { |
235 | __asm__ __volatile__( | 240 | asm volatile(LOCK_PREFIX "addl %1,%0" |
236 | LOCK_PREFIX "addl %1,%0" | 241 | : "+m" (sem->count) |
237 | : "+m" (sem->count) | 242 | : "ir" (delta)); |
238 | : "ir" (delta)); | ||
239 | } | 243 | } |
240 | 244 | ||
241 | /* | 245 | /* |
@@ -245,12 +249,11 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | |||
245 | { | 249 | { |
246 | int tmp = delta; | 250 | int tmp = delta; |
247 | 251 | ||
248 | __asm__ __volatile__( | 252 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
249 | LOCK_PREFIX "xadd %0,%1" | 253 | : "+r" (tmp), "+m" (sem->count) |
250 | : "+r" (tmp), "+m" (sem->count) | 254 | : : "memory"); |
251 | : : "memory"); | ||
252 | 255 | ||
253 | return tmp+delta; | 256 | return tmp + delta; |
254 | } | 257 | } |
255 | 258 | ||
256 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | 259 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index 23f0535fec61..ed5131dd7d92 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h | |||
@@ -191,13 +191,14 @@ | |||
191 | #define SEGMENT_TI_MASK 0x4 | 191 | #define SEGMENT_TI_MASK 0x4 |
192 | 192 | ||
193 | #define IDT_ENTRIES 256 | 193 | #define IDT_ENTRIES 256 |
194 | #define NUM_EXCEPTION_VECTORS 32 | ||
194 | #define GDT_SIZE (GDT_ENTRIES * 8) | 195 | #define GDT_SIZE (GDT_ENTRIES * 8) |
195 | #define GDT_ENTRY_TLS_ENTRIES 3 | 196 | #define GDT_ENTRY_TLS_ENTRIES 3 |
196 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | 197 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
197 | 198 | ||
198 | #ifdef __KERNEL__ | 199 | #ifdef __KERNEL__ |
199 | #ifndef __ASSEMBLY__ | 200 | #ifndef __ASSEMBLY__ |
200 | extern const char early_idt_handlers[IDT_ENTRIES][10]; | 201 | extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; |
201 | #endif | 202 | #endif |
202 | #endif | 203 | #endif |
203 | 204 | ||
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h index 572c0b67a6b0..d9b2034ed1d2 100644 --- a/include/asm-x86/semaphore.h +++ b/include/asm-x86/semaphore.h | |||
@@ -1,5 +1 @@ | |||
1 | #ifdef CONFIG_X86_32 | #include <linux/semaphore.h> | |
2 | # include "semaphore_32.h" | ||
3 | #else | ||
4 | # include "semaphore_64.h" | ||
5 | #endif | ||
diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h deleted file mode 100644 index ac96d3804d0c..000000000000 --- a/include/asm-x86/semaphore_32.h +++ /dev/null | |||
@@ -1,175 +0,0 @@ | |||
1 | #ifndef _I386_SEMAPHORE_H | ||
2 | #define _I386_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | /* | ||
9 | * SMP- and interrupt-safe semaphores.. | ||
10 | * | ||
11 | * (C) Copyright 1996 Linus Torvalds | ||
12 | * | ||
13 | * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in | ||
14 | * the original code and to make semaphore waits | ||
15 | * interruptible so that processes waiting on | ||
16 | * semaphores can be killed. | ||
17 | * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper | ||
18 | * functions in asm/sempahore-helper.h while fixing a | ||
19 | * potential and subtle race discovered by Ulrich Schmid | ||
20 | * in down_interruptible(). Since I started to play here I | ||
21 | * also implemented the `trylock' semaphore operation. | ||
22 | * 1999-07-02 Artur Skawina <skawina@geocities.com> | ||
23 | * Optimized "0(ecx)" -> "(ecx)" (the assembler does not | ||
24 | * do this). Changed calling sequences from push/jmp to | ||
25 | * traditional call/ret. | ||
26 | * Modified 2001-01-01 Andreas Franck <afranck@gmx.de> | ||
27 | * Some hacks to ensure compatibility with recent | ||
28 | * GCC snapshots, to avoid stack corruption when compiling | ||
29 | * with -fomit-frame-pointer. It's not sure if this will | ||
30 | * be fixed in GCC, as our previous implementation was a | ||
31 | * bit dubious. | ||
32 | * | ||
33 | * If you would like to see an analysis of this implementation, please | ||
34 | * ftp to gcom.com and download the file | ||
35 | * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. | ||
36 | * | ||
37 | */ | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/atomic.h> | ||
41 | #include <linux/wait.h> | ||
42 | #include <linux/rwsem.h> | ||
43 | |||
44 | struct semaphore { | ||
45 | atomic_t count; | ||
46 | int sleepers; | ||
47 | wait_queue_head_t wait; | ||
48 | }; | ||
49 | |||
50 | |||
51 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
52 | { \ | ||
53 | .count = ATOMIC_INIT(n), \ | ||
54 | .sleepers = 0, \ | ||
55 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
56 | } | ||
57 | |||
58 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
59 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
60 | |||
61 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
62 | |||
63 | static inline void sema_init (struct semaphore *sem, int val) | ||
64 | { | ||
65 | /* | ||
66 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
67 | * | ||
68 | * i'd rather use the more flexible initialization above, but sadly | ||
69 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
70 | */ | ||
71 | atomic_set(&sem->count, val); | ||
72 | sem->sleepers = 0; | ||
73 | init_waitqueue_head(&sem->wait); | ||
74 | } | ||
75 | |||
76 | static inline void init_MUTEX (struct semaphore *sem) | ||
77 | { | ||
78 | sema_init(sem, 1); | ||
79 | } | ||
80 | |||
81 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
82 | { | ||
83 | sema_init(sem, 0); | ||
84 | } | ||
85 | |||
86 | extern asmregparm void __down_failed(atomic_t *count_ptr); | ||
87 | extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr); | ||
88 | extern asmregparm int __down_failed_trylock(atomic_t *count_ptr); | ||
89 | extern asmregparm void __up_wakeup(atomic_t *count_ptr); | ||
90 | |||
91 | /* | ||
92 | * This is ugly, but we want the default case to fall through. | ||
93 | * "__down_failed" is a special asm handler that calls the C | ||
94 | * routine that actually waits. See arch/i386/kernel/semaphore.c | ||
95 | */ | ||
96 | static inline void down(struct semaphore * sem) | ||
97 | { | ||
98 | might_sleep(); | ||
99 | __asm__ __volatile__( | ||
100 | "# atomic down operation\n\t" | ||
101 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ | ||
102 | "jns 2f\n" | ||
103 | "\tlea %0,%%eax\n\t" | ||
104 | "call __down_failed\n" | ||
105 | "2:" | ||
106 | :"+m" (sem->count) | ||
107 | : | ||
108 | :"memory","ax"); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Interruptible try to acquire a semaphore. If we obtained | ||
113 | * it, return zero. If we were interrupted, returns -EINTR | ||
114 | */ | ||
115 | static inline int down_interruptible(struct semaphore * sem) | ||
116 | { | ||
117 | int result; | ||
118 | |||
119 | might_sleep(); | ||
120 | __asm__ __volatile__( | ||
121 | "# atomic interruptible down operation\n\t" | ||
122 | "xorl %0,%0\n\t" | ||
123 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
124 | "jns 2f\n\t" | ||
125 | "lea %1,%%eax\n\t" | ||
126 | "call __down_failed_interruptible\n" | ||
127 | "2:" | ||
128 | :"=&a" (result), "+m" (sem->count) | ||
129 | : | ||
130 | :"memory"); | ||
131 | return result; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Non-blockingly attempt to down() a semaphore. | ||
136 | * Returns zero if we acquired it | ||
137 | */ | ||
138 | static inline int down_trylock(struct semaphore * sem) | ||
139 | { | ||
140 | int result; | ||
141 | |||
142 | __asm__ __volatile__( | ||
143 | "# atomic interruptible down operation\n\t" | ||
144 | "xorl %0,%0\n\t" | ||
145 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
146 | "jns 2f\n\t" | ||
147 | "lea %1,%%eax\n\t" | ||
148 | "call __down_failed_trylock\n\t" | ||
149 | "2:\n" | ||
150 | :"=&a" (result), "+m" (sem->count) | ||
151 | : | ||
152 | :"memory"); | ||
153 | return result; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Note! This is subtle. We jump to wake people up only if | ||
158 | * the semaphore was negative (== somebody was waiting on it). | ||
159 | */ | ||
160 | static inline void up(struct semaphore * sem) | ||
161 | { | ||
162 | __asm__ __volatile__( | ||
163 | "# atomic up operation\n\t" | ||
164 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ | ||
165 | "jg 1f\n\t" | ||
166 | "lea %0,%%eax\n\t" | ||
167 | "call __up_wakeup\n" | ||
168 | "1:" | ||
169 | :"+m" (sem->count) | ||
170 | : | ||
171 | :"memory","ax"); | ||
172 | } | ||
173 | |||
174 | #endif | ||
175 | #endif | ||
diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h deleted file mode 100644 index 79694306bf7d..000000000000 --- a/include/asm-x86/semaphore_64.h +++ /dev/null | |||
@@ -1,180 +0,0 @@ | |||
1 | #ifndef _X86_64_SEMAPHORE_H | ||
2 | #define _X86_64_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | /* | ||
9 | * SMP- and interrupt-safe semaphores.. | ||
10 | * | ||
11 | * (C) Copyright 1996 Linus Torvalds | ||
12 | * | ||
13 | * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in | ||
14 | * the original code and to make semaphore waits | ||
15 | * interruptible so that processes waiting on | ||
16 | * semaphores can be killed. | ||
17 | * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper | ||
18 | * functions in asm/sempahore-helper.h while fixing a | ||
19 | * potential and subtle race discovered by Ulrich Schmid | ||
20 | * in down_interruptible(). Since I started to play here I | ||
21 | * also implemented the `trylock' semaphore operation. | ||
22 | * 1999-07-02 Artur Skawina <skawina@geocities.com> | ||
23 | * Optimized "0(ecx)" -> "(ecx)" (the assembler does not | ||
24 | * do this). Changed calling sequences from push/jmp to | ||
25 | * traditional call/ret. | ||
26 | * Modified 2001-01-01 Andreas Franck <afranck@gmx.de> | ||
27 | * Some hacks to ensure compatibility with recent | ||
28 | * GCC snapshots, to avoid stack corruption when compiling | ||
29 | * with -fomit-frame-pointer. It's not sure if this will | ||
30 | * be fixed in GCC, as our previous implementation was a | ||
31 | * bit dubious. | ||
32 | * | ||
33 | * If you would like to see an analysis of this implementation, please | ||
34 | * ftp to gcom.com and download the file | ||
35 | * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. | ||
36 | * | ||
37 | */ | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/atomic.h> | ||
41 | #include <asm/rwlock.h> | ||
42 | #include <linux/wait.h> | ||
43 | #include <linux/rwsem.h> | ||
44 | #include <linux/stringify.h> | ||
45 | |||
46 | struct semaphore { | ||
47 | atomic_t count; | ||
48 | int sleepers; | ||
49 | wait_queue_head_t wait; | ||
50 | }; | ||
51 | |||
52 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
53 | { \ | ||
54 | .count = ATOMIC_INIT(n), \ | ||
55 | .sleepers = 0, \ | ||
56 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
57 | } | ||
58 | |||
59 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
60 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
61 | |||
62 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
63 | |||
64 | static inline void sema_init (struct semaphore *sem, int val) | ||
65 | { | ||
66 | /* | ||
67 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
68 | * | ||
69 | * i'd rather use the more flexible initialization above, but sadly | ||
70 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
71 | */ | ||
72 | atomic_set(&sem->count, val); | ||
73 | sem->sleepers = 0; | ||
74 | init_waitqueue_head(&sem->wait); | ||
75 | } | ||
76 | |||
77 | static inline void init_MUTEX (struct semaphore *sem) | ||
78 | { | ||
79 | sema_init(sem, 1); | ||
80 | } | ||
81 | |||
82 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
83 | { | ||
84 | sema_init(sem, 0); | ||
85 | } | ||
86 | |||
87 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
88 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
89 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
90 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
91 | |||
92 | asmlinkage void __down(struct semaphore * sem); | ||
93 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
94 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
95 | asmlinkage void __up(struct semaphore * sem); | ||
96 | |||
97 | /* | ||
98 | * This is ugly, but we want the default case to fall through. | ||
99 | * "__down_failed" is a special asm handler that calls the C | ||
100 | * routine that actually waits. See arch/x86_64/kernel/semaphore.c | ||
101 | */ | ||
102 | static inline void down(struct semaphore * sem) | ||
103 | { | ||
104 | might_sleep(); | ||
105 | |||
106 | __asm__ __volatile__( | ||
107 | "# atomic down operation\n\t" | ||
108 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ | ||
109 | "jns 1f\n\t" | ||
110 | "call __down_failed\n" | ||
111 | "1:" | ||
112 | :"=m" (sem->count) | ||
113 | :"D" (sem) | ||
114 | :"memory"); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Interruptible try to acquire a semaphore. If we obtained | ||
119 | * it, return zero. If we were interrupted, returns -EINTR | ||
120 | */ | ||
121 | static inline int down_interruptible(struct semaphore * sem) | ||
122 | { | ||
123 | int result; | ||
124 | |||
125 | might_sleep(); | ||
126 | |||
127 | __asm__ __volatile__( | ||
128 | "# atomic interruptible down operation\n\t" | ||
129 | "xorl %0,%0\n\t" | ||
130 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
131 | "jns 2f\n\t" | ||
132 | "call __down_failed_interruptible\n" | ||
133 | "2:\n" | ||
134 | :"=&a" (result), "=m" (sem->count) | ||
135 | :"D" (sem) | ||
136 | :"memory"); | ||
137 | return result; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Non-blockingly attempt to down() a semaphore. | ||
142 | * Returns zero if we acquired it | ||
143 | */ | ||
144 | static inline int down_trylock(struct semaphore * sem) | ||
145 | { | ||
146 | int result; | ||
147 | |||
148 | __asm__ __volatile__( | ||
149 | "# atomic interruptible down operation\n\t" | ||
150 | "xorl %0,%0\n\t" | ||
151 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
152 | "jns 2f\n\t" | ||
153 | "call __down_failed_trylock\n\t" | ||
154 | "2:\n" | ||
155 | :"=&a" (result), "=m" (sem->count) | ||
156 | :"D" (sem) | ||
157 | :"memory","cc"); | ||
158 | return result; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Note! This is subtle. We jump to wake people up only if | ||
163 | * the semaphore was negative (== somebody was waiting on it). | ||
164 | * The default case (no contention) will result in NO | ||
165 | * jumps for both down() and up(). | ||
166 | */ | ||
167 | static inline void up(struct semaphore * sem) | ||
168 | { | ||
169 | __asm__ __volatile__( | ||
170 | "# atomic up operation\n\t" | ||
171 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ | ||
172 | "jg 1f\n\t" | ||
173 | "call __up_wakeup\n" | ||
174 | "1:" | ||
175 | :"=m" (sem->count) | ||
176 | :"D" (sem) | ||
177 | :"memory"); | ||
178 | } | ||
179 | #endif /* __KERNEL__ */ | ||
180 | #endif | ||
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 071e054abd82..fa6763af8d26 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h | |||
@@ -4,6 +4,10 @@ | |||
4 | #define COMMAND_LINE_SIZE 2048 | 4 | #define COMMAND_LINE_SIZE 2048 |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | 6 | #ifndef __ASSEMBLY__ |
7 | |||
8 | /* Interrupt control for vSMPowered x86_64 systems */ | ||
9 | void vsmp_init(void); | ||
10 | |||
7 | char *machine_specific_memory_setup(void); | 11 | char *machine_specific_memory_setup(void); |
8 | #ifndef CONFIG_PARAVIRT | 12 | #ifndef CONFIG_PARAVIRT |
9 | #define paravirt_post_allocator_init() do {} while (0) | 13 | #define paravirt_post_allocator_init() do {} while (0) |
@@ -51,8 +55,8 @@ struct e820entry; | |||
51 | char * __init machine_specific_memory_setup(void); | 55 | char * __init machine_specific_memory_setup(void); |
52 | char *memory_setup(void); | 56 | char *memory_setup(void); |
53 | 57 | ||
54 | int __init copy_e820_map(struct e820entry * biosmap, int nr_map); | 58 | int __init copy_e820_map(struct e820entry *biosmap, int nr_map); |
55 | int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); | 59 | int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map); |
56 | void __init add_memory_region(unsigned long long start, | 60 | void __init add_memory_region(unsigned long long start, |
57 | unsigned long long size, int type); | 61 | unsigned long long size, int type); |
58 | 62 | ||
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index d743947f4c77..2f9c884d2c0f 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h | |||
@@ -79,7 +79,7 @@ struct sigcontext { | |||
79 | unsigned long flags; | 79 | unsigned long flags; |
80 | unsigned long sp_at_signal; | 80 | unsigned long sp_at_signal; |
81 | unsigned short ss, __ssh; | 81 | unsigned short ss, __ssh; |
82 | struct _fpstate __user * fpstate; | 82 | struct _fpstate __user *fpstate; |
83 | unsigned long oldmask; | 83 | unsigned long oldmask; |
84 | unsigned long cr2; | 84 | unsigned long cr2; |
85 | }; | 85 | }; |
@@ -107,7 +107,7 @@ struct sigcontext { | |||
107 | unsigned long eflags; | 107 | unsigned long eflags; |
108 | unsigned long esp_at_signal; | 108 | unsigned long esp_at_signal; |
109 | unsigned short ss, __ssh; | 109 | unsigned short ss, __ssh; |
110 | struct _fpstate __user * fpstate; | 110 | struct _fpstate __user *fpstate; |
111 | unsigned long oldmask; | 111 | unsigned long oldmask; |
112 | unsigned long cr2; | 112 | unsigned long cr2; |
113 | }; | 113 | }; |
@@ -121,7 +121,8 @@ struct sigcontext { | |||
121 | struct _fpstate { | 121 | struct _fpstate { |
122 | __u16 cwd; | 122 | __u16 cwd; |
123 | __u16 swd; | 123 | __u16 swd; |
124 | __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ | 124 | __u16 twd; /* Note this is not the same as the |
125 | 32bit/x87/FSAVE twd */ | ||
125 | __u16 fop; | 126 | __u16 fop; |
126 | __u64 rip; | 127 | __u64 rip; |
127 | __u64 rdp; | 128 | __u64 rdp; |
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 6ffab4fd593a..57a9686fb491 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h | |||
@@ -26,7 +26,7 @@ struct _fpstate_ia32 { | |||
26 | __u32 cw; | 26 | __u32 cw; |
27 | __u32 sw; | 27 | __u32 sw; |
28 | __u32 tag; /* not compatible to 64bit twd */ | 28 | __u32 tag; /* not compatible to 64bit twd */ |
29 | __u32 ipoff; | 29 | __u32 ipoff; |
30 | __u32 cssel; | 30 | __u32 cssel; |
31 | __u32 dataoff; | 31 | __u32 dataoff; |
32 | __u32 datasel; | 32 | __u32 datasel; |
@@ -39,7 +39,7 @@ struct _fpstate_ia32 { | |||
39 | __u32 mxcsr; | 39 | __u32 mxcsr; |
40 | __u32 reserved; | 40 | __u32 reserved; |
41 | struct _fpxreg _fxsr_st[8]; | 41 | struct _fpxreg _fxsr_st[8]; |
42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ | 42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ |
43 | __u32 padding[56]; | 43 | __u32 padding[56]; |
44 | }; | 44 | }; |
45 | 45 | ||
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h index aee7eca585ab..f15186d39c69 100644 --- a/include/asm-x86/signal.h +++ b/include/asm-x86/signal.h | |||
@@ -185,61 +185,61 @@ typedef struct sigaltstack { | |||
185 | 185 | ||
186 | #define __HAVE_ARCH_SIG_BITOPS | 186 | #define __HAVE_ARCH_SIG_BITOPS |
187 | 187 | ||
188 | #define sigaddset(set,sig) \ | 188 | #define sigaddset(set,sig) \ |
189 | (__builtin_constantp(sig) ? \ | 189 | (__builtin_constantp(sig) \ |
190 | __const_sigaddset((set),(sig)) : \ | 190 | ? __const_sigaddset((set), (sig)) \ |
191 | __gen_sigaddset((set),(sig))) | 191 | : __gen_sigaddset((set), (sig))) |
192 | 192 | ||
193 | static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) | 193 | static inline void __gen_sigaddset(sigset_t *set, int _sig) |
194 | { | 194 | { |
195 | __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); | 195 | asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); |
196 | } | 196 | } |
197 | 197 | ||
198 | static __inline__ void __const_sigaddset(sigset_t *set, int _sig) | 198 | static inline void __const_sigaddset(sigset_t *set, int _sig) |
199 | { | 199 | { |
200 | unsigned long sig = _sig - 1; | 200 | unsigned long sig = _sig - 1; |
201 | set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); | 201 | set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); |
202 | } | 202 | } |
203 | 203 | ||
204 | #define sigdelset(set,sig) \ | 204 | #define sigdelset(set, sig) \ |
205 | (__builtin_constant_p(sig) ? \ | 205 | (__builtin_constant_p(sig) \ |
206 | __const_sigdelset((set),(sig)) : \ | 206 | ? __const_sigdelset((set), (sig)) \ |
207 | __gen_sigdelset((set),(sig))) | 207 | : __gen_sigdelset((set), (sig))) |
208 | 208 | ||
209 | 209 | ||
210 | static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) | 210 | static inline void __gen_sigdelset(sigset_t *set, int _sig) |
211 | { | 211 | { |
212 | __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); | 212 | asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); |
213 | } | 213 | } |
214 | 214 | ||
215 | static __inline__ void __const_sigdelset(sigset_t *set, int _sig) | 215 | static inline void __const_sigdelset(sigset_t *set, int _sig) |
216 | { | 216 | { |
217 | unsigned long sig = _sig - 1; | 217 | unsigned long sig = _sig - 1; |
218 | set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); | 218 | set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); |
219 | } | 219 | } |
220 | 220 | ||
221 | static __inline__ int __const_sigismember(sigset_t *set, int _sig) | 221 | static inline int __const_sigismember(sigset_t *set, int _sig) |
222 | { | 222 | { |
223 | unsigned long sig = _sig - 1; | 223 | unsigned long sig = _sig - 1; |
224 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); | 224 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); |
225 | } | 225 | } |
226 | 226 | ||
227 | static __inline__ int __gen_sigismember(sigset_t *set, int _sig) | 227 | static inline int __gen_sigismember(sigset_t *set, int _sig) |
228 | { | 228 | { |
229 | int ret; | 229 | int ret; |
230 | __asm__("btl %2,%1\n\tsbbl %0,%0" | 230 | asm("btl %2,%1\n\tsbbl %0,%0" |
231 | : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); | 231 | : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); |
232 | return ret; | 232 | return ret; |
233 | } | 233 | } |
234 | 234 | ||
235 | #define sigismember(set,sig) \ | 235 | #define sigismember(set, sig) \ |
236 | (__builtin_constant_p(sig) ? \ | 236 | (__builtin_constant_p(sig) \ |
237 | __const_sigismember((set),(sig)) : \ | 237 | ? __const_sigismember((set), (sig)) \ |
238 | __gen_sigismember((set),(sig))) | 238 | : __gen_sigismember((set), (sig))) |
239 | 239 | ||
240 | static __inline__ int sigfindinword(unsigned long word) | 240 | static inline int sigfindinword(unsigned long word) |
241 | { | 241 | { |
242 | __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); | 242 | asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); |
243 | return word; | 243 | return word; |
244 | } | 244 | } |
245 | 245 | ||
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index f2e8319a6b0b..62ebdec394b9 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
@@ -1,5 +1,209 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_SMP_H_ |
2 | # include "smp_32.h" | 2 | #define _ASM_X86_SMP_H_ |
3 | #ifndef __ASSEMBLY__ | ||
4 | #include <linux/cpumask.h> | ||
5 | #include <linux/init.h> | ||
6 | #include <asm/percpu.h> | ||
7 | |||
8 | /* | ||
9 | * We need the APIC definitions automatically as part of 'smp.h' | ||
10 | */ | ||
11 | #ifdef CONFIG_X86_LOCAL_APIC | ||
12 | # include <asm/mpspec.h> | ||
13 | # include <asm/apic.h> | ||
14 | # ifdef CONFIG_X86_IO_APIC | ||
15 | # include <asm/io_apic.h> | ||
16 | # endif | ||
17 | #endif | ||
18 | #include <asm/pda.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | |||
21 | extern cpumask_t cpu_callout_map; | ||
22 | extern cpumask_t cpu_initialized; | ||
23 | extern cpumask_t cpu_callin_map; | ||
24 | |||
25 | extern void (*mtrr_hook)(void); | ||
26 | extern void zap_low_mappings(void); | ||
27 | |||
28 | extern int smp_num_siblings; | ||
29 | extern unsigned int num_processors; | ||
30 | extern cpumask_t cpu_initialized; | ||
31 | |||
32 | #ifdef CONFIG_SMP | ||
33 | extern u16 x86_cpu_to_apicid_init[]; | ||
34 | extern u16 x86_bios_cpu_apicid_init[]; | ||
35 | extern void *x86_cpu_to_apicid_early_ptr; | ||
36 | extern void *x86_bios_cpu_apicid_early_ptr; | ||
3 | #else | 37 | #else |
4 | # include "smp_64.h" | 38 | #define x86_cpu_to_apicid_early_ptr NULL |
39 | #define x86_bios_cpu_apicid_early_ptr NULL | ||
40 | #endif | ||
41 | |||
42 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
43 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
44 | DECLARE_PER_CPU(u16, cpu_llc_id); | ||
45 | DECLARE_PER_CPU(u16, x86_cpu_to_apicid); | ||
46 | DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); | ||
47 | |||
48 | /* Static state in head.S used to set up a CPU */ | ||
49 | extern struct { | ||
50 | void *sp; | ||
51 | unsigned short ss; | ||
52 | } stack_start; | ||
53 | |||
54 | struct smp_ops { | ||
55 | void (*smp_prepare_boot_cpu)(void); | ||
56 | void (*smp_prepare_cpus)(unsigned max_cpus); | ||
57 | int (*cpu_up)(unsigned cpu); | ||
58 | void (*smp_cpus_done)(unsigned max_cpus); | ||
59 | |||
60 | void (*smp_send_stop)(void); | ||
61 | void (*smp_send_reschedule)(int cpu); | ||
62 | int (*smp_call_function_mask)(cpumask_t mask, | ||
63 | void (*func)(void *info), void *info, | ||
64 | int wait); | ||
65 | }; | ||
66 | |||
67 | /* Globals due to paravirt */ | ||
68 | extern void set_cpu_sibling_map(int cpu); | ||
69 | |||
70 | #ifdef CONFIG_SMP | ||
71 | #ifndef CONFIG_PARAVIRT | ||
72 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) | ||
73 | #endif | ||
74 | extern struct smp_ops smp_ops; | ||
75 | |||
76 | static inline void smp_send_stop(void) | ||
77 | { | ||
78 | smp_ops.smp_send_stop(); | ||
79 | } | ||
80 | |||
81 | static inline void smp_prepare_boot_cpu(void) | ||
82 | { | ||
83 | smp_ops.smp_prepare_boot_cpu(); | ||
84 | } | ||
85 | |||
86 | static inline void smp_prepare_cpus(unsigned int max_cpus) | ||
87 | { | ||
88 | smp_ops.smp_prepare_cpus(max_cpus); | ||
89 | } | ||
90 | |||
91 | static inline void smp_cpus_done(unsigned int max_cpus) | ||
92 | { | ||
93 | smp_ops.smp_cpus_done(max_cpus); | ||
94 | } | ||
95 | |||
96 | static inline int __cpu_up(unsigned int cpu) | ||
97 | { | ||
98 | return smp_ops.cpu_up(cpu); | ||
99 | } | ||
100 | |||
101 | static inline void smp_send_reschedule(int cpu) | ||
102 | { | ||
103 | smp_ops.smp_send_reschedule(cpu); | ||
104 | } | ||
105 | |||
106 | static inline int smp_call_function_mask(cpumask_t mask, | ||
107 | void (*func) (void *info), void *info, | ||
108 | int wait) | ||
109 | { | ||
110 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | ||
111 | } | ||
112 | |||
113 | void native_smp_prepare_boot_cpu(void); | ||
114 | void native_smp_prepare_cpus(unsigned int max_cpus); | ||
115 | void native_smp_cpus_done(unsigned int max_cpus); | ||
116 | int native_cpu_up(unsigned int cpunum); | ||
117 | |||
118 | extern int __cpu_disable(void); | ||
119 | extern void __cpu_die(unsigned int cpu); | ||
120 | |||
121 | extern void prefill_possible_map(void); | ||
122 | |||
123 | void smp_store_cpu_info(int id); | ||
124 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
125 | |||
126 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | ||
127 | static inline int num_booting_cpus(void) | ||
128 | { | ||
129 | return cpus_weight(cpu_callout_map); | ||
130 | } | ||
131 | #endif /* CONFIG_SMP */ | ||
132 | |||
133 | extern unsigned disabled_cpus __cpuinitdata; | ||
134 | |||
135 | #ifdef CONFIG_X86_32_SMP | ||
136 | /* | ||
137 | * This function is needed by all SMP systems. It must _always_ be valid | ||
138 | * from the initial startup. We map APIC_BASE very early in page_setup(), | ||
139 | * so this is correct in the x86 case. | ||
140 | */ | ||
141 | DECLARE_PER_CPU(int, cpu_number); | ||
142 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | ||
143 | extern int safe_smp_processor_id(void); | ||
144 | |||
145 | #elif defined(CONFIG_X86_64_SMP) | ||
146 | #define raw_smp_processor_id() read_pda(cpunumber) | ||
147 | |||
148 | #define stack_smp_processor_id() \ | ||
149 | ({ \ | ||
150 | struct thread_info *ti; \ | ||
151 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
152 | ti->cpu; \ | ||
153 | }) | ||
154 | #define safe_smp_processor_id() smp_processor_id() | ||
155 | |||
156 | #else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ | ||
157 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid | ||
158 | #define safe_smp_processor_id() 0 | ||
159 | #define stack_smp_processor_id() 0 | ||
160 | #endif | ||
161 | |||
162 | #ifdef CONFIG_X86_LOCAL_APIC | ||
163 | |||
164 | static inline int logical_smp_processor_id(void) | ||
165 | { | ||
166 | /* we don't want to mark this access volatile - bad code generation */ | ||
167 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
168 | } | ||
169 | |||
170 | #ifndef CONFIG_X86_64 | ||
171 | static inline unsigned int read_apic_id(void) | ||
172 | { | ||
173 | return *(u32 *)(APIC_BASE + APIC_ID); | ||
174 | } | ||
175 | #else | ||
176 | extern unsigned int read_apic_id(void); | ||
177 | #endif | ||
178 | |||
179 | |||
180 | # ifdef APIC_DEFINITION | ||
181 | extern int hard_smp_processor_id(void); | ||
182 | # else | ||
183 | # include <mach_apicdef.h> | ||
184 | static inline int hard_smp_processor_id(void) | ||
185 | { | ||
186 | /* we don't want to mark this access volatile - bad code generation */ | ||
187 | return GET_APIC_ID(read_apic_id()); | ||
188 | } | ||
189 | # endif /* APIC_DEFINITION */ | ||
190 | |||
191 | #else /* CONFIG_X86_LOCAL_APIC */ | ||
192 | |||
193 | # ifndef CONFIG_SMP | ||
194 | # define hard_smp_processor_id() 0 | ||
195 | # endif | ||
196 | |||
197 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
198 | |||
199 | #ifdef CONFIG_HOTPLUG_CPU | ||
200 | extern void cpu_exit_clear(void); | ||
201 | extern void cpu_uninit(void); | ||
202 | extern void remove_siblinginfo(int cpu); | ||
203 | #endif | ||
204 | |||
205 | extern void smp_alloc_memory(void); | ||
206 | extern void lock_ipi_call_lock(void); | ||
207 | extern void unlock_ipi_call_lock(void); | ||
208 | #endif /* __ASSEMBLY__ */ | ||
5 | #endif | 209 | #endif |
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h deleted file mode 100644 index 56152e312287..000000000000 --- a/include/asm-x86/smp_32.h +++ /dev/null | |||
@@ -1,165 +0,0 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/init.h> | ||
7 | |||
8 | /* | ||
9 | * We need the APIC definitions automatically as part of 'smp.h' | ||
10 | */ | ||
11 | #ifdef CONFIG_X86_LOCAL_APIC | ||
12 | # include <asm/mpspec.h> | ||
13 | # include <asm/apic.h> | ||
14 | # ifdef CONFIG_X86_IO_APIC | ||
15 | # include <asm/io_apic.h> | ||
16 | # endif | ||
17 | #endif | ||
18 | |||
19 | extern cpumask_t cpu_callout_map; | ||
20 | extern cpumask_t cpu_callin_map; | ||
21 | |||
22 | extern int smp_num_siblings; | ||
23 | extern unsigned int num_processors; | ||
24 | |||
25 | extern void smp_alloc_memory(void); | ||
26 | extern void lock_ipi_call_lock(void); | ||
27 | extern void unlock_ipi_call_lock(void); | ||
28 | |||
29 | extern void (*mtrr_hook) (void); | ||
30 | extern void zap_low_mappings (void); | ||
31 | |||
32 | extern u8 __initdata x86_cpu_to_apicid_init[]; | ||
33 | extern void *x86_cpu_to_apicid_early_ptr; | ||
34 | |||
35 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
36 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
37 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
38 | DECLARE_PER_CPU(u8, x86_cpu_to_apicid); | ||
39 | |||
40 | #ifdef CONFIG_HOTPLUG_CPU | ||
41 | extern void cpu_exit_clear(void); | ||
42 | extern void cpu_uninit(void); | ||
43 | extern void remove_siblinginfo(int cpu); | ||
44 | #endif | ||
45 | |||
46 | /* Globals due to paravirt */ | ||
47 | extern void set_cpu_sibling_map(int cpu); | ||
48 | |||
49 | struct smp_ops | ||
50 | { | ||
51 | void (*smp_prepare_boot_cpu)(void); | ||
52 | void (*smp_prepare_cpus)(unsigned max_cpus); | ||
53 | int (*cpu_up)(unsigned cpu); | ||
54 | void (*smp_cpus_done)(unsigned max_cpus); | ||
55 | |||
56 | void (*smp_send_stop)(void); | ||
57 | void (*smp_send_reschedule)(int cpu); | ||
58 | int (*smp_call_function_mask)(cpumask_t mask, | ||
59 | void (*func)(void *info), void *info, | ||
60 | int wait); | ||
61 | }; | ||
62 | |||
63 | #ifdef CONFIG_SMP | ||
64 | extern struct smp_ops smp_ops; | ||
65 | |||
66 | static inline void smp_prepare_boot_cpu(void) | ||
67 | { | ||
68 | smp_ops.smp_prepare_boot_cpu(); | ||
69 | } | ||
70 | static inline void smp_prepare_cpus(unsigned int max_cpus) | ||
71 | { | ||
72 | smp_ops.smp_prepare_cpus(max_cpus); | ||
73 | } | ||
74 | static inline int __cpu_up(unsigned int cpu) | ||
75 | { | ||
76 | return smp_ops.cpu_up(cpu); | ||
77 | } | ||
78 | static inline void smp_cpus_done(unsigned int max_cpus) | ||
79 | { | ||
80 | smp_ops.smp_cpus_done(max_cpus); | ||
81 | } | ||
82 | |||
83 | static inline void smp_send_stop(void) | ||
84 | { | ||
85 | smp_ops.smp_send_stop(); | ||
86 | } | ||
87 | static inline void smp_send_reschedule(int cpu) | ||
88 | { | ||
89 | smp_ops.smp_send_reschedule(cpu); | ||
90 | } | ||
91 | static inline int smp_call_function_mask(cpumask_t mask, | ||
92 | void (*func) (void *info), void *info, | ||
93 | int wait) | ||
94 | { | ||
95 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | ||
96 | } | ||
97 | |||
98 | void native_smp_prepare_boot_cpu(void); | ||
99 | void native_smp_prepare_cpus(unsigned int max_cpus); | ||
100 | int native_cpu_up(unsigned int cpunum); | ||
101 | void native_smp_cpus_done(unsigned int max_cpus); | ||
102 | |||
103 | #ifndef CONFIG_PARAVIRT | ||
104 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) | ||
105 | #endif | ||
106 | |||
107 | extern int __cpu_disable(void); | ||
108 | extern void __cpu_die(unsigned int cpu); | ||
109 | |||
110 | /* | ||
111 | * This function is needed by all SMP systems. It must _always_ be valid | ||
112 | * from the initial startup. We map APIC_BASE very early in page_setup(), | ||
113 | * so this is correct in the x86 case. | ||
114 | */ | ||
115 | DECLARE_PER_CPU(int, cpu_number); | ||
116 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | ||
117 | |||
118 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
119 | |||
120 | extern int safe_smp_processor_id(void); | ||
121 | |||
122 | void __cpuinit smp_store_cpu_info(int id); | ||
123 | |||
124 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | ||
125 | static inline int num_booting_cpus(void) | ||
126 | { | ||
127 | return cpus_weight(cpu_callout_map); | ||
128 | } | ||
129 | |||
130 | #else /* CONFIG_SMP */ | ||
131 | |||
132 | #define safe_smp_processor_id() 0 | ||
133 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid | ||
134 | |||
135 | #endif /* !CONFIG_SMP */ | ||
136 | |||
137 | #ifdef CONFIG_X86_LOCAL_APIC | ||
138 | |||
139 | static __inline int logical_smp_processor_id(void) | ||
140 | { | ||
141 | /* we don't want to mark this access volatile - bad code generation */ | ||
142 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
143 | } | ||
144 | |||
145 | # ifdef APIC_DEFINITION | ||
146 | extern int hard_smp_processor_id(void); | ||
147 | # else | ||
148 | # include <mach_apicdef.h> | ||
149 | static inline int hard_smp_processor_id(void) | ||
150 | { | ||
151 | /* we don't want to mark this access volatile - bad code generation */ | ||
152 | return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); | ||
153 | } | ||
154 | # endif /* APIC_DEFINITION */ | ||
155 | |||
156 | #else /* CONFIG_X86_LOCAL_APIC */ | ||
157 | |||
158 | # ifndef CONFIG_SMP | ||
159 | # define hard_smp_processor_id() 0 | ||
160 | # endif | ||
161 | |||
162 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
163 | |||
164 | #endif /* !ASSEMBLY */ | ||
165 | #endif | ||
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h deleted file mode 100644 index e0a75519ad21..000000000000 --- a/include/asm-x86/smp_64.h +++ /dev/null | |||
@@ -1,101 +0,0 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | #include <linux/cpumask.h> | ||
5 | #include <linux/init.h> | ||
6 | |||
7 | /* | ||
8 | * We need the APIC definitions automatically as part of 'smp.h' | ||
9 | */ | ||
10 | #include <asm/apic.h> | ||
11 | #include <asm/io_apic.h> | ||
12 | #include <asm/mpspec.h> | ||
13 | #include <asm/pda.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | |||
16 | extern cpumask_t cpu_callout_map; | ||
17 | extern cpumask_t cpu_initialized; | ||
18 | |||
19 | extern int smp_num_siblings; | ||
20 | extern unsigned int num_processors; | ||
21 | |||
22 | extern void smp_alloc_memory(void); | ||
23 | extern void lock_ipi_call_lock(void); | ||
24 | extern void unlock_ipi_call_lock(void); | ||
25 | |||
26 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
27 | void *info, int wait); | ||
28 | |||
29 | extern u16 __initdata x86_cpu_to_apicid_init[]; | ||
30 | extern u16 __initdata x86_bios_cpu_apicid_init[]; | ||
31 | extern void *x86_cpu_to_apicid_early_ptr; | ||
32 | extern void *x86_bios_cpu_apicid_early_ptr; | ||
33 | |||
34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
36 | DECLARE_PER_CPU(u16, cpu_llc_id); | ||
37 | DECLARE_PER_CPU(u16, x86_cpu_to_apicid); | ||
38 | DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); | ||
39 | |||
40 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
41 | { | ||
42 | if (cpu_present(mps_cpu)) | ||
43 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | ||
44 | else | ||
45 | return BAD_APICID; | ||
46 | } | ||
47 | |||
48 | #ifdef CONFIG_SMP | ||
49 | |||
50 | #define SMP_TRAMPOLINE_BASE 0x6000 | ||
51 | |||
52 | extern int __cpu_disable(void); | ||
53 | extern void __cpu_die(unsigned int cpu); | ||
54 | extern void prefill_possible_map(void); | ||
55 | extern unsigned __cpuinitdata disabled_cpus; | ||
56 | |||
57 | #define raw_smp_processor_id() read_pda(cpunumber) | ||
58 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
59 | |||
60 | #define stack_smp_processor_id() \ | ||
61 | ({ \ | ||
62 | struct thread_info *ti; \ | ||
63 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
64 | ti->cpu; \ | ||
65 | }) | ||
66 | |||
67 | /* | ||
68 | * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies | ||
69 | * scheduling and IPI sending and compresses data structures. | ||
70 | */ | ||
71 | static inline int num_booting_cpus(void) | ||
72 | { | ||
73 | return cpus_weight(cpu_callout_map); | ||
74 | } | ||
75 | |||
76 | extern void smp_send_reschedule(int cpu); | ||
77 | |||
78 | #else /* CONFIG_SMP */ | ||
79 | |||
80 | extern unsigned int boot_cpu_id; | ||
81 | #define cpu_physical_id(cpu) boot_cpu_id | ||
82 | #define stack_smp_processor_id() 0 | ||
83 | |||
84 | #endif /* !CONFIG_SMP */ | ||
85 | |||
86 | #define safe_smp_processor_id() smp_processor_id() | ||
87 | |||
88 | static __inline int logical_smp_processor_id(void) | ||
89 | { | ||
90 | /* we don't want to mark this access volatile - bad code generation */ | ||
91 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
92 | } | ||
93 | |||
94 | static inline int hard_smp_processor_id(void) | ||
95 | { | ||
96 | /* we don't want to mark this access volatile - bad code generation */ | ||
97 | return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); | ||
98 | } | ||
99 | |||
100 | #endif | ||
101 | |||
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index fa58cd55411a..9bd48b0a534b 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #ifdef CONFIG_X86_32 | 17 | #ifdef CONFIG_X86_32 |
18 | # ifdef CONFIG_X86_PAE | 18 | # ifdef CONFIG_X86_PAE |
19 | # define SECTION_SIZE_BITS 30 | 19 | # define SECTION_SIZE_BITS 29 |
20 | # define MAX_PHYSADDR_BITS 36 | 20 | # define MAX_PHYSADDR_BITS 36 |
21 | # define MAX_PHYSMEM_BITS 36 | 21 | # define MAX_PHYSMEM_BITS 36 |
22 | # else | 22 | # else |
@@ -26,8 +26,8 @@ | |||
26 | # endif | 26 | # endif |
27 | #else /* CONFIG_X86_32 */ | 27 | #else /* CONFIG_X86_32 */ |
28 | # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ | 28 | # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ |
29 | # define MAX_PHYSADDR_BITS 40 | 29 | # define MAX_PHYSADDR_BITS 44 |
30 | # define MAX_PHYSMEM_BITS 40 | 30 | # define MAX_PHYSMEM_BITS 44 |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #endif /* CONFIG_SPARSEMEM */ | 33 | #endif /* CONFIG_SPARSEMEM */ |
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 23804c1890ff..bc6376f1bc5a 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -78,11 +78,11 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | |||
78 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; | 78 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 81 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) |
82 | { | 82 | { |
83 | short inc = 0x0100; | 83 | short inc = 0x0100; |
84 | 84 | ||
85 | __asm__ __volatile__ ( | 85 | asm volatile ( |
86 | LOCK_PREFIX "xaddw %w0, %1\n" | 86 | LOCK_PREFIX "xaddw %w0, %1\n" |
87 | "1:\t" | 87 | "1:\t" |
88 | "cmpb %h0, %b0\n\t" | 88 | "cmpb %h0, %b0\n\t" |
@@ -92,42 +92,40 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
92 | /* don't need lfence here, because loads are in-order */ | 92 | /* don't need lfence here, because loads are in-order */ |
93 | "jmp 1b\n" | 93 | "jmp 1b\n" |
94 | "2:" | 94 | "2:" |
95 | :"+Q" (inc), "+m" (lock->slock) | 95 | : "+Q" (inc), "+m" (lock->slock) |
96 | : | 96 | : |
97 | :"memory", "cc"); | 97 | : "memory", "cc"); |
98 | } | 98 | } |
99 | 99 | ||
100 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 100 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
101 | 101 | ||
102 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 102 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) |
103 | { | 103 | { |
104 | int tmp; | 104 | int tmp; |
105 | short new; | 105 | short new; |
106 | 106 | ||
107 | asm volatile( | 107 | asm volatile("movw %2,%w0\n\t" |
108 | "movw %2,%w0\n\t" | 108 | "cmpb %h0,%b0\n\t" |
109 | "cmpb %h0,%b0\n\t" | 109 | "jne 1f\n\t" |
110 | "jne 1f\n\t" | 110 | "movw %w0,%w1\n\t" |
111 | "movw %w0,%w1\n\t" | 111 | "incb %h1\n\t" |
112 | "incb %h1\n\t" | 112 | "lock ; cmpxchgw %w1,%2\n\t" |
113 | "lock ; cmpxchgw %w1,%2\n\t" | 113 | "1:" |
114 | "1:" | 114 | "sete %b1\n\t" |
115 | "sete %b1\n\t" | 115 | "movzbl %b1,%0\n\t" |
116 | "movzbl %b1,%0\n\t" | 116 | : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) |
117 | :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) | 117 | : |
118 | : | 118 | : "memory", "cc"); |
119 | : "memory", "cc"); | ||
120 | 119 | ||
121 | return tmp; | 120 | return tmp; |
122 | } | 121 | } |
123 | 122 | ||
124 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 123 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) |
125 | { | 124 | { |
126 | __asm__ __volatile__( | 125 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
127 | UNLOCK_LOCK_PREFIX "incb %0" | 126 | : "+m" (lock->slock) |
128 | :"+m" (lock->slock) | 127 | : |
129 | : | 128 | : "memory", "cc"); |
130 | :"memory", "cc"); | ||
131 | } | 129 | } |
132 | #else | 130 | #else |
133 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 131 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
@@ -144,60 +142,57 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | |||
144 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; | 142 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; |
145 | } | 143 | } |
146 | 144 | ||
147 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 145 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) |
148 | { | 146 | { |
149 | int inc = 0x00010000; | 147 | int inc = 0x00010000; |
150 | int tmp; | 148 | int tmp; |
151 | 149 | ||
152 | __asm__ __volatile__ ( | 150 | asm volatile("lock ; xaddl %0, %1\n" |
153 | "lock ; xaddl %0, %1\n" | 151 | "movzwl %w0, %2\n\t" |
154 | "movzwl %w0, %2\n\t" | 152 | "shrl $16, %0\n\t" |
155 | "shrl $16, %0\n\t" | 153 | "1:\t" |
156 | "1:\t" | 154 | "cmpl %0, %2\n\t" |
157 | "cmpl %0, %2\n\t" | 155 | "je 2f\n\t" |
158 | "je 2f\n\t" | 156 | "rep ; nop\n\t" |
159 | "rep ; nop\n\t" | 157 | "movzwl %1, %2\n\t" |
160 | "movzwl %1, %2\n\t" | 158 | /* don't need lfence here, because loads are in-order */ |
161 | /* don't need lfence here, because loads are in-order */ | 159 | "jmp 1b\n" |
162 | "jmp 1b\n" | 160 | "2:" |
163 | "2:" | 161 | : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) |
164 | :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) | 162 | : |
165 | : | 163 | : "memory", "cc"); |
166 | :"memory", "cc"); | ||
167 | } | 164 | } |
168 | 165 | ||
169 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 166 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
170 | 167 | ||
171 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 168 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) |
172 | { | 169 | { |
173 | int tmp; | 170 | int tmp; |
174 | int new; | 171 | int new; |
175 | 172 | ||
176 | asm volatile( | 173 | asm volatile("movl %2,%0\n\t" |
177 | "movl %2,%0\n\t" | 174 | "movl %0,%1\n\t" |
178 | "movl %0,%1\n\t" | 175 | "roll $16, %0\n\t" |
179 | "roll $16, %0\n\t" | 176 | "cmpl %0,%1\n\t" |
180 | "cmpl %0,%1\n\t" | 177 | "jne 1f\n\t" |
181 | "jne 1f\n\t" | 178 | "addl $0x00010000, %1\n\t" |
182 | "addl $0x00010000, %1\n\t" | 179 | "lock ; cmpxchgl %1,%2\n\t" |
183 | "lock ; cmpxchgl %1,%2\n\t" | 180 | "1:" |
184 | "1:" | 181 | "sete %b1\n\t" |
185 | "sete %b1\n\t" | 182 | "movzbl %b1,%0\n\t" |
186 | "movzbl %b1,%0\n\t" | 183 | : "=&a" (tmp), "=r" (new), "+m" (lock->slock) |
187 | :"=&a" (tmp), "=r" (new), "+m" (lock->slock) | 184 | : |
188 | : | 185 | : "memory", "cc"); |
189 | : "memory", "cc"); | ||
190 | 186 | ||
191 | return tmp; | 187 | return tmp; |
192 | } | 188 | } |
193 | 189 | ||
194 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 190 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) |
195 | { | 191 | { |
196 | __asm__ __volatile__( | 192 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
197 | UNLOCK_LOCK_PREFIX "incw %0" | 193 | : "+m" (lock->slock) |
198 | :"+m" (lock->slock) | 194 | : |
199 | : | 195 | : "memory", "cc"); |
200 | :"memory", "cc"); | ||
201 | } | 196 | } |
202 | #endif | 197 | #endif |
203 | 198 | ||
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h index 165ab4bdc02b..f4bba131d068 100644 --- a/include/asm-x86/srat.h +++ b/include/asm-x86/srat.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Some of the code in this file has been gleaned from the 64 bit | 2 | * Some of the code in this file has been gleaned from the 64 bit |
3 | * discontigmem support code base. | 3 | * discontigmem support code base. |
4 | * | 4 | * |
5 | * Copyright (C) 2002, IBM Corp. | 5 | * Copyright (C) 2002, IBM Corp. |
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index c5d13a86dea7..b49369ad9a61 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* Let gcc decide wether to inline or use the out of line functions */ | 6 | /* Let gcc decide whether to inline or use the out of line functions */ |
7 | 7 | ||
8 | #define __HAVE_ARCH_STRCPY | 8 | #define __HAVE_ARCH_STRCPY |
9 | extern char *strcpy(char *dest, const char *src); | 9 | extern char *strcpy(char *dest, const char *src); |
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index e583da7918fb..52b5ab383395 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h | |||
@@ -3,26 +3,24 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* Written 2002 by Andi Kleen */ | 6 | /* Written 2002 by Andi Kleen */ |
7 | 7 | ||
8 | /* Only used for special circumstances. Stolen from i386/string.h */ | 8 | /* Only used for special circumstances. Stolen from i386/string.h */ |
9 | static __always_inline void * | 9 | static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) |
10 | __inline_memcpy(void * to, const void * from, size_t n) | ||
11 | { | 10 | { |
12 | unsigned long d0, d1, d2; | 11 | unsigned long d0, d1, d2; |
13 | __asm__ __volatile__( | 12 | asm volatile("rep ; movsl\n\t" |
14 | "rep ; movsl\n\t" | 13 | "testb $2,%b4\n\t" |
15 | "testb $2,%b4\n\t" | 14 | "je 1f\n\t" |
16 | "je 1f\n\t" | 15 | "movsw\n" |
17 | "movsw\n" | 16 | "1:\ttestb $1,%b4\n\t" |
18 | "1:\ttestb $1,%b4\n\t" | 17 | "je 2f\n\t" |
19 | "je 2f\n\t" | 18 | "movsb\n" |
20 | "movsb\n" | 19 | "2:" |
21 | "2:" | 20 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) |
22 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | 21 | : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) |
23 | :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) | 22 | : "memory"); |
24 | : "memory"); | 23 | return to; |
25 | return (to); | ||
26 | } | 24 | } |
27 | 25 | ||
28 | /* Even with __builtin_ the compiler may decide to use the out of line | 26 | /* Even with __builtin_ the compiler may decide to use the out of line |
@@ -32,28 +30,30 @@ return (to); | |||
32 | #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 | 30 | #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 |
33 | extern void *memcpy(void *to, const void *from, size_t len); | 31 | extern void *memcpy(void *to, const void *from, size_t len); |
34 | #else | 32 | #else |
35 | extern void *__memcpy(void *to, const void *from, size_t len); | 33 | extern void *__memcpy(void *to, const void *from, size_t len); |
36 | #define memcpy(dst,src,len) \ | 34 | #define memcpy(dst, src, len) \ |
37 | ({ size_t __len = (len); \ | 35 | ({ \ |
38 | void *__ret; \ | 36 | size_t __len = (len); \ |
39 | if (__builtin_constant_p(len) && __len >= 64) \ | 37 | void *__ret; \ |
40 | __ret = __memcpy((dst),(src),__len); \ | 38 | if (__builtin_constant_p(len) && __len >= 64) \ |
41 | else \ | 39 | __ret = __memcpy((dst), (src), __len); \ |
42 | __ret = __builtin_memcpy((dst),(src),__len); \ | 40 | else \ |
43 | __ret; }) | 41 | __ret = __builtin_memcpy((dst), (src), __len); \ |
42 | __ret; \ | ||
43 | }) | ||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #define __HAVE_ARCH_MEMSET | 46 | #define __HAVE_ARCH_MEMSET |
47 | void *memset(void *s, int c, size_t n); | 47 | void *memset(void *s, int c, size_t n); |
48 | 48 | ||
49 | #define __HAVE_ARCH_MEMMOVE | 49 | #define __HAVE_ARCH_MEMMOVE |
50 | void * memmove(void * dest,const void *src,size_t count); | 50 | void *memmove(void *dest, const void *src, size_t count); |
51 | 51 | ||
52 | int memcmp(const void * cs,const void * ct,size_t count); | 52 | int memcmp(const void *cs, const void *ct, size_t count); |
53 | size_t strlen(const char * s); | 53 | size_t strlen(const char *s); |
54 | char *strcpy(char * dest,const char *src); | 54 | char *strcpy(char *dest, const char *src); |
55 | char *strcat(char * dest, const char * src); | 55 | char *strcat(char *dest, const char *src); |
56 | int strcmp(const char * cs,const char * ct); | 56 | int strcmp(const char *cs, const char *ct); |
57 | 57 | ||
58 | #endif /* __KERNEL__ */ | 58 | #endif /* __KERNEL__ */ |
59 | 59 | ||
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index 1bbda3ad7796..24e1c080aa8a 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h | |||
@@ -10,7 +10,7 @@ static inline int arch_prepare_suspend(void) { return 0; } | |||
10 | 10 | ||
11 | /* image of the saved processor state */ | 11 | /* image of the saved processor state */ |
12 | struct saved_context { | 12 | struct saved_context { |
13 | u16 es, fs, gs, ss; | 13 | u16 es, fs, gs, ss; |
14 | unsigned long cr0, cr2, cr3, cr4; | 14 | unsigned long cr0, cr2, cr3, cr4; |
15 | struct desc_ptr gdt; | 15 | struct desc_ptr gdt; |
16 | struct desc_ptr idt; | 16 | struct desc_ptr idt; |
@@ -32,11 +32,11 @@ extern unsigned long saved_edi; | |||
32 | static inline void acpi_save_register_state(unsigned long return_point) | 32 | static inline void acpi_save_register_state(unsigned long return_point) |
33 | { | 33 | { |
34 | saved_eip = return_point; | 34 | saved_eip = return_point; |
35 | asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); | 35 | asm volatile("movl %%esp,%0" : "=m" (saved_esp)); |
36 | asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); | 36 | asm volatile("movl %%ebp,%0" : "=m" (saved_ebp)); |
37 | asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); | 37 | asm volatile("movl %%ebx,%0" : "=m" (saved_ebx)); |
38 | asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); | 38 | asm volatile("movl %%edi,%0" : "=m" (saved_edi)); |
39 | asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); | 39 | asm volatile("movl %%esi,%0" : "=m" (saved_esi)); |
40 | } | 40 | } |
41 | 41 | ||
42 | #define acpi_restore_register_state() do {} while (0) | 42 | #define acpi_restore_register_state() do {} while (0) |
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h index 2eb92cb81a0d..dc3262b43072 100644 --- a/include/asm-x86/suspend_64.h +++ b/include/asm-x86/suspend_64.h | |||
@@ -9,8 +9,7 @@ | |||
9 | #include <asm/desc.h> | 9 | #include <asm/desc.h> |
10 | #include <asm/i387.h> | 10 | #include <asm/i387.h> |
11 | 11 | ||
12 | static inline int | 12 | static inline int arch_prepare_suspend(void) |
13 | arch_prepare_suspend(void) | ||
14 | { | 13 | { |
15 | return 0; | 14 | return 0; |
16 | } | 15 | } |
@@ -25,7 +24,7 @@ arch_prepare_suspend(void) | |||
25 | */ | 24 | */ |
26 | struct saved_context { | 25 | struct saved_context { |
27 | struct pt_regs regs; | 26 | struct pt_regs regs; |
28 | u16 ds, es, fs, gs, ss; | 27 | u16 ds, es, fs, gs, ss; |
29 | unsigned long gs_base, gs_kernel_base, fs_base; | 28 | unsigned long gs_base, gs_kernel_base, fs_base; |
30 | unsigned long cr0, cr2, cr3, cr4, cr8; | 29 | unsigned long cr0, cr2, cr3, cr4, cr8; |
31 | unsigned long efer; | 30 | unsigned long efer; |
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index f9c589539a82..f5d9e74b1e4a 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h | |||
@@ -8,15 +8,15 @@ | |||
8 | extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, | 8 | extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, |
9 | size_t size, int dir); | 9 | size_t size, int dir); |
10 | extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 10 | extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
11 | dma_addr_t *dma_handle, gfp_t flags); | 11 | dma_addr_t *dma_handle, gfp_t flags); |
12 | extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 12 | extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
13 | size_t size, int dir); | 13 | size_t size, int dir); |
14 | extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | 14 | extern void swiotlb_sync_single_for_cpu(struct device *hwdev, |
15 | dma_addr_t dev_addr, | 15 | dma_addr_t dev_addr, |
16 | size_t size, int dir); | 16 | size_t size, int dir); |
17 | extern void swiotlb_sync_single_for_device(struct device *hwdev, | 17 | extern void swiotlb_sync_single_for_device(struct device *hwdev, |
18 | dma_addr_t dev_addr, | 18 | dma_addr_t dev_addr, |
19 | size_t size, int dir); | 19 | size_t size, int dir); |
20 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | 20 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, |
21 | dma_addr_t dev_addr, | 21 | dma_addr_t dev_addr, |
22 | unsigned long offset, | 22 | unsigned long offset, |
@@ -26,18 +26,18 @@ extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | |||
26 | unsigned long offset, | 26 | unsigned long offset, |
27 | size_t size, int dir); | 27 | size_t size, int dir); |
28 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | 28 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, |
29 | struct scatterlist *sg, int nelems, | 29 | struct scatterlist *sg, int nelems, |
30 | int dir); | 30 | int dir); |
31 | extern void swiotlb_sync_sg_for_device(struct device *hwdev, | 31 | extern void swiotlb_sync_sg_for_device(struct device *hwdev, |
32 | struct scatterlist *sg, int nelems, | 32 | struct scatterlist *sg, int nelems, |
33 | int dir); | 33 | int dir); |
34 | extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | 34 | extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, |
35 | int nents, int direction); | 35 | int nents, int direction); |
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | 36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, |
37 | int nents, int direction); | 37 | int nents, int direction); |
38 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); | 38 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); |
39 | extern void swiotlb_free_coherent (struct device *hwdev, size_t size, | 39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, |
40 | void *vaddr, dma_addr_t dma_handle); | 40 | void *vaddr, dma_addr_t dma_handle); |
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | 41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); |
42 | extern void swiotlb_init(void); | 42 | extern void swiotlb_init(void); |
43 | 43 | ||
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index 6b775c905666..b47a1d0b8a83 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h | |||
@@ -13,7 +13,7 @@ | |||
13 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 13 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define ADDR (*(volatile long *) addr) | 16 | #define ADDR (*(volatile long *)addr) |
17 | 17 | ||
18 | /** | 18 | /** |
19 | * sync_set_bit - Atomically set a bit in memory | 19 | * sync_set_bit - Atomically set a bit in memory |
@@ -26,12 +26,12 @@ | |||
26 | * Note that @nr may be almost arbitrarily large; this function is not | 26 | * Note that @nr may be almost arbitrarily large; this function is not |
27 | * restricted to acting on a single-word quantity. | 27 | * restricted to acting on a single-word quantity. |
28 | */ | 28 | */ |
29 | static inline void sync_set_bit(int nr, volatile unsigned long * addr) | 29 | static inline void sync_set_bit(int nr, volatile unsigned long *addr) |
30 | { | 30 | { |
31 | __asm__ __volatile__("lock; btsl %1,%0" | 31 | asm volatile("lock; btsl %1,%0" |
32 | :"+m" (ADDR) | 32 | : "+m" (ADDR) |
33 | :"Ir" (nr) | 33 | : "Ir" (nr) |
34 | : "memory"); | 34 | : "memory"); |
35 | } | 35 | } |
36 | 36 | ||
37 | /** | 37 | /** |
@@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) | |||
44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
45 | * in order to ensure changes are visible on other processors. | 45 | * in order to ensure changes are visible on other processors. |
46 | */ | 46 | */ |
47 | static inline void sync_clear_bit(int nr, volatile unsigned long * addr) | 47 | static inline void sync_clear_bit(int nr, volatile unsigned long *addr) |
48 | { | 48 | { |
49 | __asm__ __volatile__("lock; btrl %1,%0" | 49 | asm volatile("lock; btrl %1,%0" |
50 | :"+m" (ADDR) | 50 | : "+m" (ADDR) |
51 | :"Ir" (nr) | 51 | : "Ir" (nr) |
52 | : "memory"); | 52 | : "memory"); |
53 | } | 53 | } |
54 | 54 | ||
55 | /** | 55 | /** |
@@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) | |||
61 | * Note that @nr may be almost arbitrarily large; this function is not | 61 | * Note that @nr may be almost arbitrarily large; this function is not |
62 | * restricted to acting on a single-word quantity. | 62 | * restricted to acting on a single-word quantity. |
63 | */ | 63 | */ |
64 | static inline void sync_change_bit(int nr, volatile unsigned long * addr) | 64 | static inline void sync_change_bit(int nr, volatile unsigned long *addr) |
65 | { | 65 | { |
66 | __asm__ __volatile__("lock; btcl %1,%0" | 66 | asm volatile("lock; btcl %1,%0" |
67 | :"+m" (ADDR) | 67 | : "+m" (ADDR) |
68 | :"Ir" (nr) | 68 | : "Ir" (nr) |
69 | : "memory"); | 69 | : "memory"); |
70 | } | 70 | } |
71 | 71 | ||
72 | /** | 72 | /** |
@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) | |||
77 | * This operation is atomic and cannot be reordered. | 77 | * This operation is atomic and cannot be reordered. |
78 | * It also implies a memory barrier. | 78 | * It also implies a memory barrier. |
79 | */ | 79 | */ |
80 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) | 80 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) |
81 | { | 81 | { |
82 | int oldbit; | 82 | int oldbit; |
83 | 83 | ||
84 | __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" | 84 | asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" |
85 | :"=r" (oldbit),"+m" (ADDR) | 85 | : "=r" (oldbit), "+m" (ADDR) |
86 | :"Ir" (nr) : "memory"); | 86 | : "Ir" (nr) : "memory"); |
87 | return oldbit; | 87 | return oldbit; |
88 | } | 88 | } |
89 | 89 | ||
@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) | |||
95 | * This operation is atomic and cannot be reordered. | 95 | * This operation is atomic and cannot be reordered. |
96 | * It also implies a memory barrier. | 96 | * It also implies a memory barrier. |
97 | */ | 97 | */ |
98 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) | 98 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) |
99 | { | 99 | { |
100 | int oldbit; | 100 | int oldbit; |
101 | 101 | ||
102 | __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" | 102 | asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" |
103 | :"=r" (oldbit),"+m" (ADDR) | 103 | : "=r" (oldbit), "+m" (ADDR) |
104 | :"Ir" (nr) : "memory"); | 104 | : "Ir" (nr) : "memory"); |
105 | return oldbit; | 105 | return oldbit; |
106 | } | 106 | } |
107 | 107 | ||
@@ -113,36 +113,17 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) | |||
113 | * This operation is atomic and cannot be reordered. | 113 | * This operation is atomic and cannot be reordered. |
114 | * It also implies a memory barrier. | 114 | * It also implies a memory barrier. |
115 | */ | 115 | */ |
116 | static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) | 116 | static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) |
117 | { | 117 | { |
118 | int oldbit; | 118 | int oldbit; |
119 | 119 | ||
120 | __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" | 120 | asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" |
121 | :"=r" (oldbit),"+m" (ADDR) | 121 | : "=r" (oldbit), "+m" (ADDR) |
122 | :"Ir" (nr) : "memory"); | 122 | : "Ir" (nr) : "memory"); |
123 | return oldbit; | 123 | return oldbit; |
124 | } | 124 | } |
125 | 125 | ||
126 | static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr) | 126 | #define sync_test_bit(nr, addr) test_bit(nr, addr) |
127 | { | ||
128 | return ((1UL << (nr & 31)) & | ||
129 | (((const volatile unsigned int *)addr)[nr >> 5])) != 0; | ||
130 | } | ||
131 | |||
132 | static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) | ||
133 | { | ||
134 | int oldbit; | ||
135 | |||
136 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | ||
137 | :"=r" (oldbit) | ||
138 | :"m" (ADDR),"Ir" (nr)); | ||
139 | return oldbit; | ||
140 | } | ||
141 | |||
142 | #define sync_test_bit(nr,addr) \ | ||
143 | (__builtin_constant_p(nr) ? \ | ||
144 | sync_constant_test_bit((nr),(addr)) : \ | ||
145 | sync_var_test_bit((nr),(addr))) | ||
146 | 127 | ||
147 | #undef ADDR | 128 | #undef ADDR |
148 | 129 | ||
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 9cff02ffe6c2..a2f04cd79b29 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h | |||
@@ -27,22 +27,44 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
27 | * Saving eflags is important. It switches not only IOPL between tasks, | 27 | * Saving eflags is important. It switches not only IOPL between tasks, |
28 | * it also protects other tasks from NT leaking through sysenter etc. | 28 | * it also protects other tasks from NT leaking through sysenter etc. |
29 | */ | 29 | */ |
30 | #define switch_to(prev, next, last) do { \ | 30 | #define switch_to(prev, next, last) \ |
31 | unsigned long esi, edi; \ | 31 | do { \ |
32 | asm volatile("pushfl\n\t" /* Save flags */ \ | 32 | /* \ |
33 | "pushl %%ebp\n\t" \ | 33 | * Context-switching clobbers all registers, so we clobber \ |
34 | "movl %%esp,%0\n\t" /* save ESP */ \ | 34 | * them explicitly, via unused output variables. \ |
35 | "movl %5,%%esp\n\t" /* restore ESP */ \ | 35 | * (EAX and EBP is not listed because EBP is saved/restored \ |
36 | "movl $1f,%1\n\t" /* save EIP */ \ | 36 | * explicitly for wchan access and EAX is the return value of \ |
37 | "pushl %6\n\t" /* restore EIP */ \ | 37 | * __switch_to()) \ |
38 | "jmp __switch_to\n" \ | 38 | */ \ |
39 | unsigned long ebx, ecx, edx, esi, edi; \ | ||
40 | \ | ||
41 | asm volatile("pushfl\n\t" /* save flags */ \ | ||
42 | "pushl %%ebp\n\t" /* save EBP */ \ | ||
43 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | ||
44 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | ||
45 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | ||
46 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | ||
47 | "jmp __switch_to\n" /* regparm call */ \ | ||
39 | "1:\t" \ | 48 | "1:\t" \ |
40 | "popl %%ebp\n\t" \ | 49 | "popl %%ebp\n\t" /* restore EBP */ \ |
41 | "popfl" \ | 50 | "popfl\n" /* restore flags */ \ |
42 | :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ | 51 | \ |
43 | "=a" (last), "=S" (esi), "=D" (edi) \ | 52 | /* output parameters */ \ |
44 | :"m" (next->thread.sp), "m" (next->thread.ip), \ | 53 | : [prev_sp] "=m" (prev->thread.sp), \ |
45 | "2" (prev), "d" (next)); \ | 54 | [prev_ip] "=m" (prev->thread.ip), \ |
55 | "=a" (last), \ | ||
56 | \ | ||
57 | /* clobbered output registers: */ \ | ||
58 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | ||
59 | "=S" (esi), "=D" (edi) \ | ||
60 | \ | ||
61 | /* input parameters: */ \ | ||
62 | : [next_sp] "m" (next->thread.sp), \ | ||
63 | [next_ip] "m" (next->thread.ip), \ | ||
64 | \ | ||
65 | /* regparm parameters for __switch_to(): */ \ | ||
66 | [prev] "a" (prev), \ | ||
67 | [next] "d" (next)); \ | ||
46 | } while (0) | 68 | } while (0) |
47 | 69 | ||
48 | /* | 70 | /* |
@@ -122,35 +144,34 @@ extern void load_gs_index(unsigned); | |||
122 | */ | 144 | */ |
123 | #define loadsegment(seg, value) \ | 145 | #define loadsegment(seg, value) \ |
124 | asm volatile("\n" \ | 146 | asm volatile("\n" \ |
125 | "1:\t" \ | 147 | "1:\t" \ |
126 | "movl %k0,%%" #seg "\n" \ | 148 | "movl %k0,%%" #seg "\n" \ |
127 | "2:\n" \ | 149 | "2:\n" \ |
128 | ".section .fixup,\"ax\"\n" \ | 150 | ".section .fixup,\"ax\"\n" \ |
129 | "3:\t" \ | 151 | "3:\t" \ |
130 | "movl %k1, %%" #seg "\n\t" \ | 152 | "movl %k1, %%" #seg "\n\t" \ |
131 | "jmp 2b\n" \ | 153 | "jmp 2b\n" \ |
132 | ".previous\n" \ | 154 | ".previous\n" \ |
133 | _ASM_EXTABLE(1b,3b) \ | 155 | _ASM_EXTABLE(1b,3b) \ |
134 | : :"r" (value), "r" (0)) | 156 | : :"r" (value), "r" (0)) |
135 | 157 | ||
136 | 158 | ||
137 | /* | 159 | /* |
138 | * Save a segment register away | 160 | * Save a segment register away |
139 | */ | 161 | */ |
140 | #define savesegment(seg, value) \ | 162 | #define savesegment(seg, value) \ |
141 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | 163 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
142 | 164 | ||
143 | static inline unsigned long get_limit(unsigned long segment) | 165 | static inline unsigned long get_limit(unsigned long segment) |
144 | { | 166 | { |
145 | unsigned long __limit; | 167 | unsigned long __limit; |
146 | __asm__("lsll %1,%0" | 168 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
147 | :"=r" (__limit):"r" (segment)); | 169 | return __limit + 1; |
148 | return __limit+1; | ||
149 | } | 170 | } |
150 | 171 | ||
151 | static inline void native_clts(void) | 172 | static inline void native_clts(void) |
152 | { | 173 | { |
153 | asm volatile ("clts"); | 174 | asm volatile("clts"); |
154 | } | 175 | } |
155 | 176 | ||
156 | /* | 177 | /* |
@@ -165,43 +186,43 @@ static unsigned long __force_order; | |||
165 | static inline unsigned long native_read_cr0(void) | 186 | static inline unsigned long native_read_cr0(void) |
166 | { | 187 | { |
167 | unsigned long val; | 188 | unsigned long val; |
168 | asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); | 189 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); |
169 | return val; | 190 | return val; |
170 | } | 191 | } |
171 | 192 | ||
172 | static inline void native_write_cr0(unsigned long val) | 193 | static inline void native_write_cr0(unsigned long val) |
173 | { | 194 | { |
174 | asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); | 195 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); |
175 | } | 196 | } |
176 | 197 | ||
177 | static inline unsigned long native_read_cr2(void) | 198 | static inline unsigned long native_read_cr2(void) |
178 | { | 199 | { |
179 | unsigned long val; | 200 | unsigned long val; |
180 | asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); | 201 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); |
181 | return val; | 202 | return val; |
182 | } | 203 | } |
183 | 204 | ||
184 | static inline void native_write_cr2(unsigned long val) | 205 | static inline void native_write_cr2(unsigned long val) |
185 | { | 206 | { |
186 | asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); | 207 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); |
187 | } | 208 | } |
188 | 209 | ||
189 | static inline unsigned long native_read_cr3(void) | 210 | static inline unsigned long native_read_cr3(void) |
190 | { | 211 | { |
191 | unsigned long val; | 212 | unsigned long val; |
192 | asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); | 213 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); |
193 | return val; | 214 | return val; |
194 | } | 215 | } |
195 | 216 | ||
196 | static inline void native_write_cr3(unsigned long val) | 217 | static inline void native_write_cr3(unsigned long val) |
197 | { | 218 | { |
198 | asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); | 219 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); |
199 | } | 220 | } |
200 | 221 | ||
201 | static inline unsigned long native_read_cr4(void) | 222 | static inline unsigned long native_read_cr4(void) |
202 | { | 223 | { |
203 | unsigned long val; | 224 | unsigned long val; |
204 | asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); | 225 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); |
205 | return val; | 226 | return val; |
206 | } | 227 | } |
207 | 228 | ||
@@ -213,7 +234,7 @@ static inline unsigned long native_read_cr4_safe(void) | |||
213 | #ifdef CONFIG_X86_32 | 234 | #ifdef CONFIG_X86_32 |
214 | asm volatile("1: mov %%cr4, %0\n" | 235 | asm volatile("1: mov %%cr4, %0\n" |
215 | "2:\n" | 236 | "2:\n" |
216 | _ASM_EXTABLE(1b,2b) | 237 | _ASM_EXTABLE(1b, 2b) |
217 | : "=r" (val), "=m" (__force_order) : "0" (0)); | 238 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
218 | #else | 239 | #else |
219 | val = native_read_cr4(); | 240 | val = native_read_cr4(); |
@@ -223,7 +244,7 @@ static inline unsigned long native_read_cr4_safe(void) | |||
223 | 244 | ||
224 | static inline void native_write_cr4(unsigned long val) | 245 | static inline void native_write_cr4(unsigned long val) |
225 | { | 246 | { |
226 | asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); | 247 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); |
227 | } | 248 | } |
228 | 249 | ||
229 | #ifdef CONFIG_X86_64 | 250 | #ifdef CONFIG_X86_64 |
@@ -244,6 +265,7 @@ static inline void native_wbinvd(void) | |||
244 | { | 265 | { |
245 | asm volatile("wbinvd": : :"memory"); | 266 | asm volatile("wbinvd": : :"memory"); |
246 | } | 267 | } |
268 | |||
247 | #ifdef CONFIG_PARAVIRT | 269 | #ifdef CONFIG_PARAVIRT |
248 | #include <asm/paravirt.h> | 270 | #include <asm/paravirt.h> |
249 | #else | 271 | #else |
@@ -276,7 +298,7 @@ static inline void clflush(volatile void *__p) | |||
276 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | 298 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); |
277 | } | 299 | } |
278 | 300 | ||
279 | #define nop() __asm__ __volatile__ ("nop") | 301 | #define nop() asm volatile ("nop") |
280 | 302 | ||
281 | void disable_hlt(void); | 303 | void disable_hlt(void); |
282 | void enable_hlt(void); | 304 | void enable_hlt(void); |
@@ -296,16 +318,7 @@ void default_idle(void); | |||
296 | */ | 318 | */ |
297 | #ifdef CONFIG_X86_32 | 319 | #ifdef CONFIG_X86_32 |
298 | /* | 320 | /* |
299 | * For now, "wmb()" doesn't actually do anything, as all | 321 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
300 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
301 | * in which all writes are seen in the program order even | ||
302 | * outside the CPU. | ||
303 | * | ||
304 | * I expect future Intel CPU's to have a weaker ordering, | ||
305 | * but I'd also expect them to finally get their act together | ||
306 | * and add some real memory barriers if so. | ||
307 | * | ||
308 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
309 | * nop for these. | 322 | * nop for these. |
310 | */ | 323 | */ |
311 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | 324 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
@@ -384,7 +397,7 @@ void default_idle(void); | |||
384 | # define smp_wmb() barrier() | 397 | # define smp_wmb() barrier() |
385 | #endif | 398 | #endif |
386 | #define smp_read_barrier_depends() read_barrier_depends() | 399 | #define smp_read_barrier_depends() read_barrier_depends() |
387 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 400 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
388 | #else | 401 | #else |
389 | #define smp_mb() barrier() | 402 | #define smp_mb() barrier() |
390 | #define smp_rmb() barrier() | 403 | #define smp_rmb() barrier() |
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h index cd955d3d112f..b1a4ea00df78 100644 --- a/include/asm-x86/tce.h +++ b/include/asm-x86/tce.h | |||
@@ -39,7 +39,7 @@ struct iommu_table; | |||
39 | #define TCE_RPN_MASK 0x0000fffffffff000ULL | 39 | #define TCE_RPN_MASK 0x0000fffffffff000ULL |
40 | 40 | ||
41 | extern void tce_build(struct iommu_table *tbl, unsigned long index, | 41 | extern void tce_build(struct iommu_table *tbl, unsigned long index, |
42 | unsigned int npages, unsigned long uaddr, int direction); | 42 | unsigned int npages, unsigned long uaddr, int direction); |
43 | extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); | 43 | extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); |
44 | extern void * __init alloc_tce_table(void); | 44 | extern void * __init alloc_tce_table(void); |
45 | extern void __init free_tce_table(void *tbl); | 45 | extern void __init free_tce_table(void *tbl); |
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 5bd508260ffb..4e053fa561a9 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h | |||
@@ -20,7 +20,8 @@ | |||
20 | * low level task data that entry.S needs immediate access to | 20 | * low level task data that entry.S needs immediate access to |
21 | * - this struct should fit entirely inside of one cache line | 21 | * - this struct should fit entirely inside of one cache line |
22 | * - this struct shares the supervisor stack pages | 22 | * - this struct shares the supervisor stack pages |
23 | * - if the contents of this structure are changed, the assembly constants must also be changed | 23 | * - if the contents of this structure are changed, |
24 | * the assembly constants must also be changed | ||
24 | */ | 25 | */ |
25 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
26 | 27 | ||
@@ -30,18 +31,16 @@ struct thread_info { | |||
30 | unsigned long flags; /* low level flags */ | 31 | unsigned long flags; /* low level flags */ |
31 | unsigned long status; /* thread-synchronous flags */ | 32 | unsigned long status; /* thread-synchronous flags */ |
32 | __u32 cpu; /* current CPU */ | 33 | __u32 cpu; /* current CPU */ |
33 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 34 | int preempt_count; /* 0 => preemptable, |
34 | 35 | <0 => BUG */ | |
35 | |||
36 | mm_segment_t addr_limit; /* thread address space: | 36 | mm_segment_t addr_limit; /* thread address space: |
37 | 0-0xBFFFFFFF for user-thead | 37 | 0-0xBFFFFFFF user-thread |
38 | 0-0xFFFFFFFF for kernel-thread | 38 | 0-0xFFFFFFFF kernel-thread |
39 | */ | 39 | */ |
40 | void *sysenter_return; | 40 | void *sysenter_return; |
41 | struct restart_block restart_block; | 41 | struct restart_block restart_block; |
42 | 42 | unsigned long previous_esp; /* ESP of the previous stack in | |
43 | unsigned long previous_esp; /* ESP of the previous stack in case | 43 | case of nested (IRQ) stacks |
44 | of nested (IRQ) stacks | ||
45 | */ | 44 | */ |
46 | __u8 supervisor_stack[0]; | 45 | __u8 supervisor_stack[0]; |
47 | }; | 46 | }; |
@@ -90,15 +89,16 @@ register unsigned long current_stack_pointer asm("esp") __used; | |||
90 | /* how to get the thread information struct from C */ | 89 | /* how to get the thread information struct from C */ |
91 | static inline struct thread_info *current_thread_info(void) | 90 | static inline struct thread_info *current_thread_info(void) |
92 | { | 91 | { |
93 | return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); | 92 | return (struct thread_info *) |
93 | (current_stack_pointer & ~(THREAD_SIZE - 1)); | ||
94 | } | 94 | } |
95 | 95 | ||
96 | /* thread information allocation */ | 96 | /* thread information allocation */ |
97 | #ifdef CONFIG_DEBUG_STACK_USAGE | 97 | #ifdef CONFIG_DEBUG_STACK_USAGE |
98 | #define alloc_thread_info(tsk) ((struct thread_info *) \ | 98 | #define alloc_thread_info(tsk) ((struct thread_info *) \ |
99 | __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE))) | 99 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE))) |
100 | #else | 100 | #else |
101 | #define alloc_thread_info(tsk) ((struct thread_info *) \ | 101 | #define alloc_thread_info(tsk) ((struct thread_info *) \ |
102 | __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) | 102 | __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) |
103 | #endif | 103 | #endif |
104 | 104 | ||
@@ -107,7 +107,7 @@ static inline struct thread_info *current_thread_info(void) | |||
107 | #else /* !__ASSEMBLY__ */ | 107 | #else /* !__ASSEMBLY__ */ |
108 | 108 | ||
109 | /* how to get the thread information struct from ASM */ | 109 | /* how to get the thread information struct from ASM */ |
110 | #define GET_THREAD_INFO(reg) \ | 110 | #define GET_THREAD_INFO(reg) \ |
111 | movl $-THREAD_SIZE, reg; \ | 111 | movl $-THREAD_SIZE, reg; \ |
112 | andl %esp, reg | 112 | andl %esp, reg |
113 | 113 | ||
@@ -119,14 +119,16 @@ static inline struct thread_info *current_thread_info(void) | |||
119 | 119 | ||
120 | /* | 120 | /* |
121 | * thread information flags | 121 | * thread information flags |
122 | * - these are process state flags that various assembly files may need to access | 122 | * - these are process state flags that various |
123 | * assembly files may need to access | ||
123 | * - pending work-to-be-done flags are in LSW | 124 | * - pending work-to-be-done flags are in LSW |
124 | * - other flags in MSW | 125 | * - other flags in MSW |
125 | */ | 126 | */ |
126 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 127 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
127 | #define TIF_SIGPENDING 1 /* signal pending */ | 128 | #define TIF_SIGPENDING 1 /* signal pending */ |
128 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 129 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
129 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ | 130 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to |
131 | user mode */ | ||
130 | #define TIF_IRET 4 /* return with iret */ | 132 | #define TIF_IRET 4 /* return with iret */ |
131 | #define TIF_SYSCALL_EMU 5 /* syscall emulation active */ | 133 | #define TIF_SYSCALL_EMU 5 /* syscall emulation active */ |
132 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ | 134 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ |
@@ -143,36 +145,36 @@ static inline struct thread_info *current_thread_info(void) | |||
143 | #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ | 145 | #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ |
144 | #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ | 146 | #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ |
145 | 147 | ||
146 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 148 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
147 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 149 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
148 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 150 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
149 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 151 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
150 | #define _TIF_IRET (1<<TIF_IRET) | 152 | #define _TIF_IRET (1 << TIF_IRET) |
151 | #define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) | 153 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
152 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 154 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
153 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 155 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
154 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 156 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
155 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | 157 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) |
156 | #define _TIF_DEBUG (1<<TIF_DEBUG) | 158 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
157 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) | 159 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
158 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 160 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
159 | #define _TIF_NOTSC (1<<TIF_NOTSC) | 161 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
160 | #define _TIF_FORCED_TF (1<<TIF_FORCED_TF) | 162 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
161 | #define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) | 163 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
162 | #define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) | 164 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
163 | #define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) | 165 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) |
164 | 166 | ||
165 | /* work to do on interrupt/exception return */ | 167 | /* work to do on interrupt/exception return */ |
166 | #define _TIF_WORK_MASK \ | 168 | #define _TIF_WORK_MASK \ |
167 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 169 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
168 | _TIF_SECCOMP | _TIF_SYSCALL_EMU)) | 170 | _TIF_SECCOMP | _TIF_SYSCALL_EMU)) |
169 | /* work to do on any return to u-space */ | 171 | /* work to do on any return to u-space */ |
170 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 172 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
171 | 173 | ||
172 | /* flags to check in __switch_to() */ | 174 | /* flags to check in __switch_to() */ |
173 | #define _TIF_WORK_CTXSW \ | 175 | #define _TIF_WORK_CTXSW \ |
174 | (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ | 176 | (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ |
175 | _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) | 177 | _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) |
176 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | 178 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW |
177 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) | 179 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) |
178 | 180 | ||
@@ -184,8 +186,10 @@ static inline struct thread_info *current_thread_info(void) | |||
184 | * ever touches our thread-synchronous status, so we don't | 186 | * ever touches our thread-synchronous status, so we don't |
185 | * have to worry about atomic accesses. | 187 | * have to worry about atomic accesses. |
186 | */ | 188 | */ |
187 | #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ | 189 | #define TS_USEDFPU 0x0001 /* FPU was used by this task |
188 | #define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */ | 190 | this quantum (SMP) */ |
191 | #define TS_POLLING 0x0002 /* True if in idle loop | ||
192 | and not sleeping */ | ||
189 | 193 | ||
190 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 194 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
191 | 195 | ||
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h index 6c9b214b8fc3..1e5c6f6152cd 100644 --- a/include/asm-x86/thread_info_64.h +++ b/include/asm-x86/thread_info_64.h | |||
@@ -29,9 +29,9 @@ struct thread_info { | |||
29 | __u32 flags; /* low level flags */ | 29 | __u32 flags; /* low level flags */ |
30 | __u32 status; /* thread synchronous flags */ | 30 | __u32 status; /* thread synchronous flags */ |
31 | __u32 cpu; /* current CPU */ | 31 | __u32 cpu; /* current CPU */ |
32 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 32 | int preempt_count; /* 0 => preemptable, |
33 | 33 | <0 => BUG */ | |
34 | mm_segment_t addr_limit; | 34 | mm_segment_t addr_limit; |
35 | struct restart_block restart_block; | 35 | struct restart_block restart_block; |
36 | #ifdef CONFIG_IA32_EMULATION | 36 | #ifdef CONFIG_IA32_EMULATION |
37 | void __user *sysenter_return; | 37 | void __user *sysenter_return; |
@@ -61,17 +61,17 @@ struct thread_info { | |||
61 | #define init_stack (init_thread_union.stack) | 61 | #define init_stack (init_thread_union.stack) |
62 | 62 | ||
63 | static inline struct thread_info *current_thread_info(void) | 63 | static inline struct thread_info *current_thread_info(void) |
64 | { | 64 | { |
65 | struct thread_info *ti; | 65 | struct thread_info *ti; |
66 | ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); | 66 | ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); |
67 | return ti; | 67 | return ti; |
68 | } | 68 | } |
69 | 69 | ||
70 | /* do not use in interrupt context */ | 70 | /* do not use in interrupt context */ |
71 | static inline struct thread_info *stack_thread_info(void) | 71 | static inline struct thread_info *stack_thread_info(void) |
72 | { | 72 | { |
73 | struct thread_info *ti; | 73 | struct thread_info *ti; |
74 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); | 74 | asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); |
75 | return ti; | 75 | return ti; |
76 | } | 76 | } |
77 | 77 | ||
@@ -82,8 +82,8 @@ static inline struct thread_info *stack_thread_info(void) | |||
82 | #define THREAD_FLAGS GFP_KERNEL | 82 | #define THREAD_FLAGS GFP_KERNEL |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #define alloc_thread_info(tsk) \ | 85 | #define alloc_thread_info(tsk) \ |
86 | ((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER)) | 86 | ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) |
87 | 87 | ||
88 | #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) | 88 | #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) |
89 | 89 | ||
@@ -98,7 +98,8 @@ static inline struct thread_info *stack_thread_info(void) | |||
98 | 98 | ||
99 | /* | 99 | /* |
100 | * thread information flags | 100 | * thread information flags |
101 | * - these are process state flags that various assembly files may need to access | 101 | * - these are process state flags that various assembly files |
102 | * may need to access | ||
102 | * - pending work-to-be-done flags are in LSW | 103 | * - pending work-to-be-done flags are in LSW |
103 | * - other flags in MSW | 104 | * - other flags in MSW |
104 | * Warning: layout of LSW is hardcoded in entry.S | 105 | * Warning: layout of LSW is hardcoded in entry.S |
@@ -114,7 +115,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
114 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 115 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
115 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | 116 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ |
116 | /* 16 free */ | 117 | /* 16 free */ |
117 | #define TIF_IA32 17 /* 32bit process */ | 118 | #define TIF_IA32 17 /* 32bit process */ |
118 | #define TIF_FORK 18 /* ret_from_fork */ | 119 | #define TIF_FORK 18 /* ret_from_fork */ |
119 | #define TIF_ABI_PENDING 19 | 120 | #define TIF_ABI_PENDING 19 |
120 | #define TIF_MEMDIE 20 | 121 | #define TIF_MEMDIE 20 |
@@ -126,39 +127,40 @@ static inline struct thread_info *stack_thread_info(void) | |||
126 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 127 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
127 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ | 128 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ |
128 | 129 | ||
129 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 130 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
130 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 131 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
131 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 132 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
132 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 133 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
133 | #define _TIF_IRET (1<<TIF_IRET) | 134 | #define _TIF_IRET (1 << TIF_IRET) |
134 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 135 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
135 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 136 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
136 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 137 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
137 | #define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) | 138 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
138 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | 139 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) |
139 | #define _TIF_IA32 (1<<TIF_IA32) | 140 | #define _TIF_IA32 (1 << TIF_IA32) |
140 | #define _TIF_FORK (1<<TIF_FORK) | 141 | #define _TIF_FORK (1 << TIF_FORK) |
141 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | 142 | #define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) |
142 | #define _TIF_DEBUG (1<<TIF_DEBUG) | 143 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
143 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) | 144 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
144 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 145 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
145 | #define _TIF_FORCED_TF (1<<TIF_FORCED_TF) | 146 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
146 | #define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) | 147 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
147 | #define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) | 148 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
148 | #define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) | 149 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) |
149 | 150 | ||
150 | /* work to do on interrupt/exception return */ | 151 | /* work to do on interrupt/exception return */ |
151 | #define _TIF_WORK_MASK \ | 152 | #define _TIF_WORK_MASK \ |
152 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP)) | 153 | (0x0000FFFF & \ |
154 | ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP)) | ||
153 | /* work to do on any return to user space */ | 155 | /* work to do on any return to user space */ |
154 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 156 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
155 | 157 | ||
156 | #define _TIF_DO_NOTIFY_MASK \ | 158 | #define _TIF_DO_NOTIFY_MASK \ |
157 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | 159 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) |
158 | 160 | ||
159 | /* flags to check in __switch_to() */ | 161 | /* flags to check in __switch_to() */ |
160 | #define _TIF_WORK_CTXSW \ | 162 | #define _TIF_WORK_CTXSW \ |
161 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS) | 163 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS) |
162 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | 164 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW |
163 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | 165 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) |
164 | 166 | ||
@@ -171,9 +173,11 @@ static inline struct thread_info *stack_thread_info(void) | |||
171 | * ever touches our thread-synchronous status, so we don't | 173 | * ever touches our thread-synchronous status, so we don't |
172 | * have to worry about atomic accesses. | 174 | * have to worry about atomic accesses. |
173 | */ | 175 | */ |
174 | #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ | 176 | #define TS_USEDFPU 0x0001 /* FPU was used by this task |
177 | this quantum (SMP) */ | ||
175 | #define TS_COMPAT 0x0002 /* 32bit syscall active */ | 178 | #define TS_COMPAT 0x0002 /* 32bit syscall active */ |
176 | #define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ | 179 | #define TS_POLLING 0x0004 /* true if in idle loop |
180 | and not sleeping */ | ||
177 | 181 | ||
178 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 182 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
179 | 183 | ||
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 3998709ed637..0c0674d94255 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h | |||
@@ -32,7 +32,7 @@ static inline void __native_flush_tlb_global(void) | |||
32 | 32 | ||
33 | static inline void __native_flush_tlb_single(unsigned long addr) | 33 | static inline void __native_flush_tlb_single(unsigned long addr) |
34 | { | 34 | { |
35 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); | 35 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void __flush_tlb_all(void) | 38 | static inline void __flush_tlb_all(void) |
@@ -134,8 +134,7 @@ void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, | |||
134 | #define TLBSTATE_LAZY 2 | 134 | #define TLBSTATE_LAZY 2 |
135 | 135 | ||
136 | #ifdef CONFIG_X86_32 | 136 | #ifdef CONFIG_X86_32 |
137 | struct tlb_state | 137 | struct tlb_state { |
138 | { | ||
139 | struct mm_struct *active_mm; | 138 | struct mm_struct *active_mm; |
140 | int state; | 139 | int state; |
141 | char __cacheline_padding[L1_CACHE_BYTES-8]; | 140 | char __cacheline_padding[L1_CACHE_BYTES-8]; |
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 8af05a93f097..81a29eb08ac4 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h | |||
@@ -32,13 +32,18 @@ | |||
32 | /* Mappings between logical cpu number and node number */ | 32 | /* Mappings between logical cpu number and node number */ |
33 | #ifdef CONFIG_X86_32 | 33 | #ifdef CONFIG_X86_32 |
34 | extern int cpu_to_node_map[]; | 34 | extern int cpu_to_node_map[]; |
35 | |||
36 | #else | 35 | #else |
36 | /* Returns the number of the current Node. */ | ||
37 | #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) | ||
38 | #endif | ||
39 | |||
37 | DECLARE_PER_CPU(int, x86_cpu_to_node_map); | 40 | DECLARE_PER_CPU(int, x86_cpu_to_node_map); |
41 | |||
42 | #ifdef CONFIG_SMP | ||
38 | extern int x86_cpu_to_node_map_init[]; | 43 | extern int x86_cpu_to_node_map_init[]; |
39 | extern void *x86_cpu_to_node_map_early_ptr; | 44 | extern void *x86_cpu_to_node_map_early_ptr; |
40 | /* Returns the number of the current Node. */ | 45 | #else |
41 | #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) | 46 | #define x86_cpu_to_node_map_early_ptr NULL |
42 | #endif | 47 | #endif |
43 | 48 | ||
44 | extern cpumask_t node_to_cpumask_map[]; | 49 | extern cpumask_t node_to_cpumask_map[]; |
@@ -54,6 +59,8 @@ static inline int cpu_to_node(int cpu) | |||
54 | } | 59 | } |
55 | 60 | ||
56 | #else /* CONFIG_X86_64 */ | 61 | #else /* CONFIG_X86_64 */ |
62 | |||
63 | #ifdef CONFIG_SMP | ||
57 | static inline int early_cpu_to_node(int cpu) | 64 | static inline int early_cpu_to_node(int cpu) |
58 | { | 65 | { |
59 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; | 66 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; |
@@ -65,21 +72,21 @@ static inline int early_cpu_to_node(int cpu) | |||
65 | else | 72 | else |
66 | return NUMA_NO_NODE; | 73 | return NUMA_NO_NODE; |
67 | } | 74 | } |
75 | #else | ||
76 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) | ||
77 | #endif | ||
68 | 78 | ||
69 | static inline int cpu_to_node(int cpu) | 79 | static inline int cpu_to_node(int cpu) |
70 | { | 80 | { |
71 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 81 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
72 | if (x86_cpu_to_node_map_early_ptr) { | 82 | if (x86_cpu_to_node_map_early_ptr) { |
73 | printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", | 83 | printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", |
74 | (int)cpu); | 84 | (int)cpu); |
75 | dump_stack(); | 85 | dump_stack(); |
76 | return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; | 86 | return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; |
77 | } | 87 | } |
78 | #endif | 88 | #endif |
79 | if (per_cpu_offset(cpu)) | 89 | return per_cpu(x86_cpu_to_node_map, cpu); |
80 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
81 | else | ||
82 | return NUMA_NO_NODE; | ||
83 | } | 90 | } |
84 | #endif /* CONFIG_X86_64 */ | 91 | #endif /* CONFIG_X86_64 */ |
85 | 92 | ||
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h new file mode 100644 index 000000000000..b156b08d0131 --- /dev/null +++ b/include/asm-x86/trampoline.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __TRAMPOLINE_HEADER | ||
2 | #define __TRAMPOLINE_HEADER | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | /* | ||
7 | * Trampoline 80x86 program as an array. | ||
8 | */ | ||
9 | extern const unsigned char trampoline_data []; | ||
10 | extern const unsigned char trampoline_end []; | ||
11 | extern unsigned char *trampoline_base; | ||
12 | |||
13 | extern unsigned long init_rsp; | ||
14 | extern unsigned long initial_code; | ||
15 | |||
16 | #define TRAMPOLINE_BASE 0x6000 | ||
17 | extern unsigned long setup_trampoline(void); | ||
18 | |||
19 | #endif /* __ASSEMBLY__ */ | ||
20 | |||
21 | #endif /* __TRAMPOLINE_HEADER */ | ||
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 7d3e27f7d484..d2d8eb5b55f5 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h | |||
@@ -42,7 +42,7 @@ static inline cycles_t vget_cycles(void) | |||
42 | if (!cpu_has_tsc) | 42 | if (!cpu_has_tsc) |
43 | return 0; | 43 | return 0; |
44 | #endif | 44 | #endif |
45 | return (cycles_t) __native_read_tsc(); | 45 | return (cycles_t)__native_read_tsc(); |
46 | } | 46 | } |
47 | 47 | ||
48 | extern void tsc_init(void); | 48 | extern void tsc_init(void); |
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index fcc570ec4fee..8e7595c1f34e 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define get_fs() (current_thread_info()->addr_limit) | 32 | #define get_fs() (current_thread_info()->addr_limit) |
33 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 33 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
34 | 34 | ||
35 | #define segment_eq(a,b) ((a).seg == (b).seg) | 35 | #define segment_eq(a, b) ((a).seg == (b).seg) |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * movsl can be slow when source and dest are not both 8-byte aligned | 38 | * movsl can be slow when source and dest are not both 8-byte aligned |
@@ -43,7 +43,9 @@ extern struct movsl_mask { | |||
43 | } ____cacheline_aligned_in_smp movsl_mask; | 43 | } ____cacheline_aligned_in_smp movsl_mask; |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) | 46 | #define __addr_ok(addr) \ |
47 | ((unsigned long __force)(addr) < \ | ||
48 | (current_thread_info()->addr_limit.seg)) | ||
47 | 49 | ||
48 | /* | 50 | /* |
49 | * Test whether a block of memory is a valid user space address. | 51 | * Test whether a block of memory is a valid user space address. |
@@ -54,13 +56,16 @@ extern struct movsl_mask { | |||
54 | * | 56 | * |
55 | * This needs 33-bit arithmetic. We have a carry... | 57 | * This needs 33-bit arithmetic. We have a carry... |
56 | */ | 58 | */ |
57 | #define __range_ok(addr,size) ({ \ | 59 | #define __range_ok(addr, size) \ |
58 | unsigned long flag,roksum; \ | 60 | ({ \ |
59 | __chk_user_ptr(addr); \ | 61 | unsigned long flag, roksum; \ |
60 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ | 62 | __chk_user_ptr(addr); \ |
61 | :"=&r" (flag), "=r" (roksum) \ | 63 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ |
62 | :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ | 64 | :"=&r" (flag), "=r" (roksum) \ |
63 | flag; }) | 65 | :"1" (addr), "g" ((int)(size)), \ |
66 | "rm" (current_thread_info()->addr_limit.seg)); \ | ||
67 | flag; \ | ||
68 | }) | ||
64 | 69 | ||
65 | /** | 70 | /** |
66 | * access_ok: - Checks if a user space pointer is valid | 71 | * access_ok: - Checks if a user space pointer is valid |
@@ -81,7 +86,7 @@ extern struct movsl_mask { | |||
81 | * checks that the pointer is in the user space range - after calling | 86 | * checks that the pointer is in the user space range - after calling |
82 | * this function, memory access functions may still return -EFAULT. | 87 | * this function, memory access functions may still return -EFAULT. |
83 | */ | 88 | */ |
84 | #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) | 89 | #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) |
85 | 90 | ||
86 | /* | 91 | /* |
87 | * The exception table consists of pairs of addresses: the first is the | 92 | * The exception table consists of pairs of addresses: the first is the |
@@ -96,8 +101,7 @@ extern struct movsl_mask { | |||
96 | * on our cache or tlb entries. | 101 | * on our cache or tlb entries. |
97 | */ | 102 | */ |
98 | 103 | ||
99 | struct exception_table_entry | 104 | struct exception_table_entry { |
100 | { | ||
101 | unsigned long insn, fixup; | 105 | unsigned long insn, fixup; |
102 | }; | 106 | }; |
103 | 107 | ||
@@ -122,13 +126,15 @@ extern void __get_user_1(void); | |||
122 | extern void __get_user_2(void); | 126 | extern void __get_user_2(void); |
123 | extern void __get_user_4(void); | 127 | extern void __get_user_4(void); |
124 | 128 | ||
125 | #define __get_user_x(size,ret,x,ptr) \ | 129 | #define __get_user_x(size, ret, x, ptr) \ |
126 | __asm__ __volatile__("call __get_user_" #size \ | 130 | asm volatile("call __get_user_" #size \ |
127 | :"=a" (ret),"=d" (x) \ | 131 | :"=a" (ret),"=d" (x) \ |
128 | :"0" (ptr)) | 132 | :"0" (ptr)) |
133 | |||
129 | 134 | ||
135 | /* Careful: we have to cast the result to the type of the pointer | ||
136 | * for sign reasons */ | ||
130 | 137 | ||
131 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */ | ||
132 | /** | 138 | /** |
133 | * get_user: - Get a simple variable from user space. | 139 | * get_user: - Get a simple variable from user space. |
134 | * @x: Variable to store result. | 140 | * @x: Variable to store result. |
@@ -146,15 +152,24 @@ extern void __get_user_4(void); | |||
146 | * Returns zero on success, or -EFAULT on error. | 152 | * Returns zero on success, or -EFAULT on error. |
147 | * On error, the variable @x is set to zero. | 153 | * On error, the variable @x is set to zero. |
148 | */ | 154 | */ |
149 | #define get_user(x,ptr) \ | 155 | #define get_user(x, ptr) \ |
150 | ({ int __ret_gu; \ | 156 | ({ \ |
157 | int __ret_gu; \ | ||
151 | unsigned long __val_gu; \ | 158 | unsigned long __val_gu; \ |
152 | __chk_user_ptr(ptr); \ | 159 | __chk_user_ptr(ptr); \ |
153 | switch(sizeof (*(ptr))) { \ | 160 | switch (sizeof(*(ptr))) { \ |
154 | case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ | 161 | case 1: \ |
155 | case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ | 162 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ |
156 | case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ | 163 | break; \ |
157 | default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ | 164 | case 2: \ |
165 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ | ||
166 | break; \ | ||
167 | case 4: \ | ||
168 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
169 | break; \ | ||
170 | default: \ | ||
171 | __get_user_x(X, __ret_gu, __val_gu, ptr); \ | ||
172 | break; \ | ||
158 | } \ | 173 | } \ |
159 | (x) = (__typeof__(*(ptr)))__val_gu; \ | 174 | (x) = (__typeof__(*(ptr)))__val_gu; \ |
160 | __ret_gu; \ | 175 | __ret_gu; \ |
@@ -171,11 +186,25 @@ extern void __put_user_2(void); | |||
171 | extern void __put_user_4(void); | 186 | extern void __put_user_4(void); |
172 | extern void __put_user_8(void); | 187 | extern void __put_user_8(void); |
173 | 188 | ||
174 | #define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | 189 | #define __put_user_1(x, ptr) \ |
175 | #define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | 190 | asm volatile("call __put_user_1" : "=a" (__ret_pu) \ |
176 | #define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | 191 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) |
177 | #define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) | 192 | |
178 | #define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) | 193 | #define __put_user_2(x, ptr) \ |
194 | asm volatile("call __put_user_2" : "=a" (__ret_pu) \ | ||
195 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
196 | |||
197 | #define __put_user_4(x, ptr) \ | ||
198 | asm volatile("call __put_user_4" : "=a" (__ret_pu) \ | ||
199 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
200 | |||
201 | #define __put_user_8(x, ptr) \ | ||
202 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ | ||
203 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
204 | |||
205 | #define __put_user_X(x, ptr) \ | ||
206 | asm volatile("call __put_user_X" : "=a" (__ret_pu) \ | ||
207 | : "c" (ptr)) | ||
179 | 208 | ||
180 | /** | 209 | /** |
181 | * put_user: - Write a simple value into user space. | 210 | * put_user: - Write a simple value into user space. |
@@ -195,32 +224,43 @@ extern void __put_user_8(void); | |||
195 | */ | 224 | */ |
196 | #ifdef CONFIG_X86_WP_WORKS_OK | 225 | #ifdef CONFIG_X86_WP_WORKS_OK |
197 | 226 | ||
198 | #define put_user(x,ptr) \ | 227 | #define put_user(x, ptr) \ |
199 | ({ int __ret_pu; \ | 228 | ({ \ |
229 | int __ret_pu; \ | ||
200 | __typeof__(*(ptr)) __pu_val; \ | 230 | __typeof__(*(ptr)) __pu_val; \ |
201 | __chk_user_ptr(ptr); \ | 231 | __chk_user_ptr(ptr); \ |
202 | __pu_val = x; \ | 232 | __pu_val = x; \ |
203 | switch(sizeof(*(ptr))) { \ | 233 | switch (sizeof(*(ptr))) { \ |
204 | case 1: __put_user_1(__pu_val, ptr); break; \ | 234 | case 1: \ |
205 | case 2: __put_user_2(__pu_val, ptr); break; \ | 235 | __put_user_1(__pu_val, ptr); \ |
206 | case 4: __put_user_4(__pu_val, ptr); break; \ | 236 | break; \ |
207 | case 8: __put_user_8(__pu_val, ptr); break; \ | 237 | case 2: \ |
208 | default:__put_user_X(__pu_val, ptr); break; \ | 238 | __put_user_2(__pu_val, ptr); \ |
239 | break; \ | ||
240 | case 4: \ | ||
241 | __put_user_4(__pu_val, ptr); \ | ||
242 | break; \ | ||
243 | case 8: \ | ||
244 | __put_user_8(__pu_val, ptr); \ | ||
245 | break; \ | ||
246 | default: \ | ||
247 | __put_user_X(__pu_val, ptr); \ | ||
248 | break; \ | ||
209 | } \ | 249 | } \ |
210 | __ret_pu; \ | 250 | __ret_pu; \ |
211 | }) | 251 | }) |
212 | 252 | ||
213 | #else | 253 | #else |
214 | #define put_user(x,ptr) \ | 254 | #define put_user(x, ptr) \ |
215 | ({ \ | 255 | ({ \ |
216 | int __ret_pu; \ | 256 | int __ret_pu; \ |
217 | __typeof__(*(ptr)) __pus_tmp = x; \ | 257 | __typeof__(*(ptr))__pus_tmp = x; \ |
218 | __ret_pu=0; \ | 258 | __ret_pu = 0; \ |
219 | if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ | 259 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ |
220 | sizeof(*(ptr))) != 0)) \ | 260 | sizeof(*(ptr))) != 0)) \ |
221 | __ret_pu=-EFAULT; \ | 261 | __ret_pu = -EFAULT; \ |
222 | __ret_pu; \ | 262 | __ret_pu; \ |
223 | }) | 263 | }) |
224 | 264 | ||
225 | 265 | ||
226 | #endif | 266 | #endif |
@@ -245,8 +285,8 @@ extern void __put_user_8(void); | |||
245 | * Returns zero on success, or -EFAULT on error. | 285 | * Returns zero on success, or -EFAULT on error. |
246 | * On error, the variable @x is set to zero. | 286 | * On error, the variable @x is set to zero. |
247 | */ | 287 | */ |
248 | #define __get_user(x,ptr) \ | 288 | #define __get_user(x, ptr) \ |
249 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 289 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
250 | 290 | ||
251 | 291 | ||
252 | /** | 292 | /** |
@@ -268,54 +308,62 @@ extern void __put_user_8(void); | |||
268 | * | 308 | * |
269 | * Returns zero on success, or -EFAULT on error. | 309 | * Returns zero on success, or -EFAULT on error. |
270 | */ | 310 | */ |
271 | #define __put_user(x,ptr) \ | 311 | #define __put_user(x, ptr) \ |
272 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 312 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
273 | 313 | ||
274 | #define __put_user_nocheck(x,ptr,size) \ | 314 | #define __put_user_nocheck(x, ptr, size) \ |
275 | ({ \ | 315 | ({ \ |
276 | long __pu_err; \ | 316 | long __pu_err; \ |
277 | __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ | 317 | __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ |
278 | __pu_err; \ | 318 | __pu_err; \ |
279 | }) | 319 | }) |
280 | 320 | ||
281 | 321 | ||
282 | #define __put_user_u64(x, addr, err) \ | 322 | #define __put_user_u64(x, addr, err) \ |
283 | __asm__ __volatile__( \ | 323 | asm volatile("1: movl %%eax,0(%2)\n" \ |
284 | "1: movl %%eax,0(%2)\n" \ | 324 | "2: movl %%edx,4(%2)\n" \ |
285 | "2: movl %%edx,4(%2)\n" \ | 325 | "3:\n" \ |
286 | "3:\n" \ | 326 | ".section .fixup,\"ax\"\n" \ |
287 | ".section .fixup,\"ax\"\n" \ | 327 | "4: movl %3,%0\n" \ |
288 | "4: movl %3,%0\n" \ | 328 | " jmp 3b\n" \ |
289 | " jmp 3b\n" \ | 329 | ".previous\n" \ |
290 | ".previous\n" \ | 330 | _ASM_EXTABLE(1b, 4b) \ |
291 | _ASM_EXTABLE(1b,4b) \ | 331 | _ASM_EXTABLE(2b, 4b) \ |
292 | _ASM_EXTABLE(2b,4b) \ | 332 | : "=r" (err) \ |
293 | : "=r"(err) \ | 333 | : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) |
294 | : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) | ||
295 | 334 | ||
296 | #ifdef CONFIG_X86_WP_WORKS_OK | 335 | #ifdef CONFIG_X86_WP_WORKS_OK |
297 | 336 | ||
298 | #define __put_user_size(x,ptr,size,retval,errret) \ | 337 | #define __put_user_size(x, ptr, size, retval, errret) \ |
299 | do { \ | 338 | do { \ |
300 | retval = 0; \ | 339 | retval = 0; \ |
301 | __chk_user_ptr(ptr); \ | 340 | __chk_user_ptr(ptr); \ |
302 | switch (size) { \ | 341 | switch (size) { \ |
303 | case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ | 342 | case 1: \ |
304 | case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ | 343 | __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ |
305 | case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ | 344 | break; \ |
306 | case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ | 345 | case 2: \ |
307 | default: __put_user_bad(); \ | 346 | __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ |
347 | break; \ | ||
348 | case 4: \ | ||
349 | __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \ | ||
350 | break; \ | ||
351 | case 8: \ | ||
352 | __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ | ||
353 | break; \ | ||
354 | default: \ | ||
355 | __put_user_bad(); \ | ||
308 | } \ | 356 | } \ |
309 | } while (0) | 357 | } while (0) |
310 | 358 | ||
311 | #else | 359 | #else |
312 | 360 | ||
313 | #define __put_user_size(x,ptr,size,retval,errret) \ | 361 | #define __put_user_size(x, ptr, size, retval, errret) \ |
314 | do { \ | 362 | do { \ |
315 | __typeof__(*(ptr)) __pus_tmp = x; \ | 363 | __typeof__(*(ptr))__pus_tmp = x; \ |
316 | retval = 0; \ | 364 | retval = 0; \ |
317 | \ | 365 | \ |
318 | if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ | 366 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ |
319 | retval = errret; \ | 367 | retval = errret; \ |
320 | } while (0) | 368 | } while (0) |
321 | 369 | ||
@@ -329,65 +377,70 @@ struct __large_struct { unsigned long buf[100]; }; | |||
329 | * aliasing issues. | 377 | * aliasing issues. |
330 | */ | 378 | */ |
331 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 379 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
332 | __asm__ __volatile__( \ | 380 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ |
333 | "1: mov"itype" %"rtype"1,%2\n" \ | 381 | "2:\n" \ |
334 | "2:\n" \ | 382 | ".section .fixup,\"ax\"\n" \ |
335 | ".section .fixup,\"ax\"\n" \ | 383 | "3: movl %3,%0\n" \ |
336 | "3: movl %3,%0\n" \ | 384 | " jmp 2b\n" \ |
337 | " jmp 2b\n" \ | 385 | ".previous\n" \ |
338 | ".previous\n" \ | 386 | _ASM_EXTABLE(1b, 3b) \ |
339 | _ASM_EXTABLE(1b,3b) \ | 387 | : "=r"(err) \ |
340 | : "=r"(err) \ | 388 | : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) |
341 | : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) | 389 | |
342 | 390 | ||
343 | 391 | #define __get_user_nocheck(x, ptr, size) \ | |
344 | #define __get_user_nocheck(x,ptr,size) \ | 392 | ({ \ |
345 | ({ \ | 393 | long __gu_err; \ |
346 | long __gu_err; \ | 394 | unsigned long __gu_val; \ |
347 | unsigned long __gu_val; \ | 395 | __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ |
348 | __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ | 396 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
349 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 397 | __gu_err; \ |
350 | __gu_err; \ | ||
351 | }) | 398 | }) |
352 | 399 | ||
353 | extern long __get_user_bad(void); | 400 | extern long __get_user_bad(void); |
354 | 401 | ||
355 | #define __get_user_size(x,ptr,size,retval,errret) \ | 402 | #define __get_user_size(x, ptr, size, retval, errret) \ |
356 | do { \ | 403 | do { \ |
357 | retval = 0; \ | 404 | retval = 0; \ |
358 | __chk_user_ptr(ptr); \ | 405 | __chk_user_ptr(ptr); \ |
359 | switch (size) { \ | 406 | switch (size) { \ |
360 | case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ | 407 | case 1: \ |
361 | case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ | 408 | __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ |
362 | case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ | 409 | break; \ |
363 | default: (x) = __get_user_bad(); \ | 410 | case 2: \ |
411 | __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ | ||
412 | break; \ | ||
413 | case 4: \ | ||
414 | __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \ | ||
415 | break; \ | ||
416 | default: \ | ||
417 | (x) = __get_user_bad(); \ | ||
364 | } \ | 418 | } \ |
365 | } while (0) | 419 | } while (0) |
366 | 420 | ||
367 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 421 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
368 | __asm__ __volatile__( \ | 422 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ |
369 | "1: mov"itype" %2,%"rtype"1\n" \ | 423 | "2:\n" \ |
370 | "2:\n" \ | 424 | ".section .fixup,\"ax\"\n" \ |
371 | ".section .fixup,\"ax\"\n" \ | 425 | "3: movl %3,%0\n" \ |
372 | "3: movl %3,%0\n" \ | 426 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
373 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 427 | " jmp 2b\n" \ |
374 | " jmp 2b\n" \ | 428 | ".previous\n" \ |
375 | ".previous\n" \ | 429 | _ASM_EXTABLE(1b, 3b) \ |
376 | _ASM_EXTABLE(1b,3b) \ | 430 | : "=r" (err), ltype (x) \ |
377 | : "=r"(err), ltype (x) \ | 431 | : "m" (__m(addr)), "i" (errret), "0" (err)) |
378 | : "m"(__m(addr)), "i"(errret), "0"(err)) | 432 | |
379 | 433 | ||
380 | 434 | unsigned long __must_check __copy_to_user_ll | |
381 | unsigned long __must_check __copy_to_user_ll(void __user *to, | 435 | (void __user *to, const void *from, unsigned long n); |
382 | const void *from, unsigned long n); | 436 | unsigned long __must_check __copy_from_user_ll |
383 | unsigned long __must_check __copy_from_user_ll(void *to, | 437 | (void *to, const void __user *from, unsigned long n); |
384 | const void __user *from, unsigned long n); | 438 | unsigned long __must_check __copy_from_user_ll_nozero |
385 | unsigned long __must_check __copy_from_user_ll_nozero(void *to, | 439 | (void *to, const void __user *from, unsigned long n); |
386 | const void __user *from, unsigned long n); | 440 | unsigned long __must_check __copy_from_user_ll_nocache |
387 | unsigned long __must_check __copy_from_user_ll_nocache(void *to, | 441 | (void *to, const void __user *from, unsigned long n); |
388 | const void __user *from, unsigned long n); | 442 | unsigned long __must_check __copy_from_user_ll_nocache_nozero |
389 | unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, | 443 | (void *to, const void __user *from, unsigned long n); |
390 | const void __user *from, unsigned long n); | ||
391 | 444 | ||
392 | /** | 445 | /** |
393 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. | 446 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. |
@@ -416,13 +469,16 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
416 | 469 | ||
417 | switch (n) { | 470 | switch (n) { |
418 | case 1: | 471 | case 1: |
419 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); | 472 | __put_user_size(*(u8 *)from, (u8 __user *)to, |
473 | 1, ret, 1); | ||
420 | return ret; | 474 | return ret; |
421 | case 2: | 475 | case 2: |
422 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); | 476 | __put_user_size(*(u16 *)from, (u16 __user *)to, |
477 | 2, ret, 2); | ||
423 | return ret; | 478 | return ret; |
424 | case 4: | 479 | case 4: |
425 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); | 480 | __put_user_size(*(u32 *)from, (u32 __user *)to, |
481 | 4, ret, 4); | ||
426 | return ret; | 482 | return ret; |
427 | } | 483 | } |
428 | } | 484 | } |
@@ -545,19 +601,21 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, | |||
545 | } | 601 | } |
546 | 602 | ||
547 | static __always_inline unsigned long | 603 | static __always_inline unsigned long |
548 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) | 604 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, |
605 | unsigned long n) | ||
549 | { | 606 | { |
550 | return __copy_from_user_ll_nocache_nozero(to, from, n); | 607 | return __copy_from_user_ll_nocache_nozero(to, from, n); |
551 | } | 608 | } |
552 | 609 | ||
553 | unsigned long __must_check copy_to_user(void __user *to, | 610 | unsigned long __must_check copy_to_user(void __user *to, |
554 | const void *from, unsigned long n); | 611 | const void *from, unsigned long n); |
555 | unsigned long __must_check copy_from_user(void *to, | 612 | unsigned long __must_check copy_from_user(void *to, |
556 | const void __user *from, unsigned long n); | 613 | const void __user *from, |
614 | unsigned long n); | ||
557 | long __must_check strncpy_from_user(char *dst, const char __user *src, | 615 | long __must_check strncpy_from_user(char *dst, const char __user *src, |
558 | long count); | 616 | long count); |
559 | long __must_check __strncpy_from_user(char *dst, | 617 | long __must_check __strncpy_from_user(char *dst, |
560 | const char __user *src, long count); | 618 | const char __user *src, long count); |
561 | 619 | ||
562 | /** | 620 | /** |
563 | * strlen_user: - Get the size of a string in user space. | 621 | * strlen_user: - Get the size of a string in user space. |
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index b87eb4ba8f9d..b8a2f4339903 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -29,23 +29,27 @@ | |||
29 | #define get_fs() (current_thread_info()->addr_limit) | 29 | #define get_fs() (current_thread_info()->addr_limit) |
30 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 30 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
31 | 31 | ||
32 | #define segment_eq(a,b) ((a).seg == (b).seg) | 32 | #define segment_eq(a, b) ((a).seg == (b).seg) |
33 | 33 | ||
34 | #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg))) | 34 | #define __addr_ok(addr) (!((unsigned long)(addr) & \ |
35 | (current_thread_info()->addr_limit.seg))) | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * Uhhuh, this needs 65-bit arithmetic. We have a carry.. | 38 | * Uhhuh, this needs 65-bit arithmetic. We have a carry.. |
38 | */ | 39 | */ |
39 | #define __range_not_ok(addr,size) ({ \ | 40 | #define __range_not_ok(addr, size) \ |
40 | unsigned long flag,roksum; \ | 41 | ({ \ |
41 | __chk_user_ptr(addr); \ | 42 | unsigned long flag, roksum; \ |
42 | asm("# range_ok\n\r" \ | 43 | __chk_user_ptr(addr); \ |
43 | "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ | 44 | asm("# range_ok\n\r" \ |
44 | :"=&r" (flag), "=r" (roksum) \ | 45 | "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ |
45 | :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \ | 46 | : "=&r" (flag), "=r" (roksum) \ |
46 | flag; }) | 47 | : "1" (addr), "g" ((long)(size)), \ |
48 | "g" (current_thread_info()->addr_limit.seg)); \ | ||
49 | flag; \ | ||
50 | }) | ||
47 | 51 | ||
48 | #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) | 52 | #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0) |
49 | 53 | ||
50 | /* | 54 | /* |
51 | * The exception table consists of pairs of addresses: the first is the | 55 | * The exception table consists of pairs of addresses: the first is the |
@@ -60,8 +64,7 @@ | |||
60 | * on our cache or tlb entries. | 64 | * on our cache or tlb entries. |
61 | */ | 65 | */ |
62 | 66 | ||
63 | struct exception_table_entry | 67 | struct exception_table_entry { |
64 | { | ||
65 | unsigned long insn, fixup; | 68 | unsigned long insn, fixup; |
66 | }; | 69 | }; |
67 | 70 | ||
@@ -84,23 +87,36 @@ extern int fixup_exception(struct pt_regs *regs); | |||
84 | * accesses to the same area of user memory). | 87 | * accesses to the same area of user memory). |
85 | */ | 88 | */ |
86 | 89 | ||
87 | #define __get_user_x(size,ret,x,ptr) \ | 90 | #define __get_user_x(size, ret, x, ptr) \ |
88 | asm volatile("call __get_user_" #size \ | 91 | asm volatile("call __get_user_" #size \ |
89 | :"=a" (ret),"=d" (x) \ | 92 | : "=a" (ret),"=d" (x) \ |
90 | :"c" (ptr) \ | 93 | : "c" (ptr) \ |
91 | :"r8") | 94 | : "r8") |
95 | |||
96 | /* Careful: we have to cast the result to the type of the pointer | ||
97 | * for sign reasons */ | ||
92 | 98 | ||
93 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */ | 99 | #define get_user(x, ptr) \ |
94 | #define get_user(x,ptr) \ | 100 | ({ \ |
95 | ({ unsigned long __val_gu; \ | 101 | unsigned long __val_gu; \ |
96 | int __ret_gu; \ | 102 | int __ret_gu; \ |
97 | __chk_user_ptr(ptr); \ | 103 | __chk_user_ptr(ptr); \ |
98 | switch(sizeof (*(ptr))) { \ | 104 | switch (sizeof(*(ptr))) { \ |
99 | case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ | 105 | case 1: \ |
100 | case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ | 106 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ |
101 | case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ | 107 | break; \ |
102 | case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ | 108 | case 2: \ |
103 | default: __get_user_bad(); break; \ | 109 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ |
110 | break; \ | ||
111 | case 4: \ | ||
112 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
113 | break; \ | ||
114 | case 8: \ | ||
115 | __get_user_x(8, __ret_gu, __val_gu, ptr); \ | ||
116 | break; \ | ||
117 | default: \ | ||
118 | __get_user_bad(); \ | ||
119 | break; \ | ||
104 | } \ | 120 | } \ |
105 | (x) = (__force typeof(*(ptr)))__val_gu; \ | 121 | (x) = (__force typeof(*(ptr)))__val_gu; \ |
106 | __ret_gu; \ | 122 | __ret_gu; \ |
@@ -112,55 +128,73 @@ extern void __put_user_4(void); | |||
112 | extern void __put_user_8(void); | 128 | extern void __put_user_8(void); |
113 | extern void __put_user_bad(void); | 129 | extern void __put_user_bad(void); |
114 | 130 | ||
115 | #define __put_user_x(size,ret,x,ptr) \ | 131 | #define __put_user_x(size, ret, x, ptr) \ |
116 | asm volatile("call __put_user_" #size \ | 132 | asm volatile("call __put_user_" #size \ |
117 | :"=a" (ret) \ | 133 | :"=a" (ret) \ |
118 | :"c" (ptr),"d" (x) \ | 134 | :"c" (ptr),"d" (x) \ |
119 | :"r8") | 135 | :"r8") |
120 | 136 | ||
121 | #define put_user(x,ptr) \ | 137 | #define put_user(x, ptr) \ |
122 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 138 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
123 | 139 | ||
124 | #define __get_user(x,ptr) \ | 140 | #define __get_user(x, ptr) \ |
125 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 141 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
126 | #define __put_user(x,ptr) \ | 142 | #define __put_user(x, ptr) \ |
127 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 143 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
128 | 144 | ||
129 | #define __get_user_unaligned __get_user | 145 | #define __get_user_unaligned __get_user |
130 | #define __put_user_unaligned __put_user | 146 | #define __put_user_unaligned __put_user |
131 | 147 | ||
132 | #define __put_user_nocheck(x,ptr,size) \ | 148 | #define __put_user_nocheck(x, ptr, size) \ |
133 | ({ \ | 149 | ({ \ |
134 | int __pu_err; \ | 150 | int __pu_err; \ |
135 | __put_user_size((x),(ptr),(size),__pu_err); \ | 151 | __put_user_size((x), (ptr), (size), __pu_err); \ |
136 | __pu_err; \ | 152 | __pu_err; \ |
137 | }) | 153 | }) |
138 | 154 | ||
139 | 155 | ||
140 | #define __put_user_check(x,ptr,size) \ | 156 | #define __put_user_check(x, ptr, size) \ |
141 | ({ \ | 157 | ({ \ |
142 | int __pu_err; \ | 158 | int __pu_err; \ |
143 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ | 159 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ |
144 | switch (size) { \ | 160 | switch (size) { \ |
145 | case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ | 161 | case 1: \ |
146 | case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ | 162 | __put_user_x(1, __pu_err, x, __pu_addr); \ |
147 | case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ | 163 | break; \ |
148 | case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ | 164 | case 2: \ |
149 | default: __put_user_bad(); \ | 165 | __put_user_x(2, __pu_err, x, __pu_addr); \ |
150 | } \ | 166 | break; \ |
151 | __pu_err; \ | 167 | case 4: \ |
168 | __put_user_x(4, __pu_err, x, __pu_addr); \ | ||
169 | break; \ | ||
170 | case 8: \ | ||
171 | __put_user_x(8, __pu_err, x, __pu_addr); \ | ||
172 | break; \ | ||
173 | default: \ | ||
174 | __put_user_bad(); \ | ||
175 | } \ | ||
176 | __pu_err; \ | ||
152 | }) | 177 | }) |
153 | 178 | ||
154 | #define __put_user_size(x,ptr,size,retval) \ | 179 | #define __put_user_size(x, ptr, size, retval) \ |
155 | do { \ | 180 | do { \ |
156 | retval = 0; \ | 181 | retval = 0; \ |
157 | __chk_user_ptr(ptr); \ | 182 | __chk_user_ptr(ptr); \ |
158 | switch (size) { \ | 183 | switch (size) { \ |
159 | case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\ | 184 | case 1: \ |
160 | case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\ | 185 | __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\ |
161 | case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\ | 186 | break; \ |
162 | case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\ | 187 | case 2: \ |
163 | default: __put_user_bad(); \ | 188 | __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\ |
189 | break; \ | ||
190 | case 4: \ | ||
191 | __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\ | ||
192 | break; \ | ||
193 | case 8: \ | ||
194 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \ | ||
195 | break; \ | ||
196 | default: \ | ||
197 | __put_user_bad(); \ | ||
164 | } \ | 198 | } \ |
165 | } while (0) | 199 | } while (0) |
166 | 200 | ||
@@ -174,23 +208,22 @@ struct __large_struct { unsigned long buf[100]; }; | |||
174 | * aliasing issues. | 208 | * aliasing issues. |
175 | */ | 209 | */ |
176 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | 210 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ |
177 | asm volatile( \ | 211 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ |
178 | "1: mov"itype" %"rtype"1,%2\n" \ | 212 | "2:\n" \ |
179 | "2:\n" \ | 213 | ".section .fixup, \"ax\"\n" \ |
180 | ".section .fixup,\"ax\"\n" \ | 214 | "3: mov %3,%0\n" \ |
181 | "3: mov %3,%0\n" \ | 215 | " jmp 2b\n" \ |
182 | " jmp 2b\n" \ | 216 | ".previous\n" \ |
183 | ".previous\n" \ | 217 | _ASM_EXTABLE(1b, 3b) \ |
184 | _ASM_EXTABLE(1b,3b) \ | 218 | : "=r"(err) \ |
185 | : "=r"(err) \ | 219 | : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err)) |
186 | : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) | 220 | |
187 | 221 | ||
188 | 222 | #define __get_user_nocheck(x, ptr, size) \ | |
189 | #define __get_user_nocheck(x,ptr,size) \ | ||
190 | ({ \ | 223 | ({ \ |
191 | int __gu_err; \ | 224 | int __gu_err; \ |
192 | unsigned long __gu_val; \ | 225 | unsigned long __gu_val; \ |
193 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ | 226 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
194 | (x) = (__force typeof(*(ptr)))__gu_val; \ | 227 | (x) = (__force typeof(*(ptr)))__gu_val; \ |
195 | __gu_err; \ | 228 | __gu_err; \ |
196 | }) | 229 | }) |
@@ -201,31 +234,39 @@ extern int __get_user_4(void); | |||
201 | extern int __get_user_8(void); | 234 | extern int __get_user_8(void); |
202 | extern int __get_user_bad(void); | 235 | extern int __get_user_bad(void); |
203 | 236 | ||
204 | #define __get_user_size(x,ptr,size,retval) \ | 237 | #define __get_user_size(x, ptr, size, retval) \ |
205 | do { \ | 238 | do { \ |
206 | retval = 0; \ | 239 | retval = 0; \ |
207 | __chk_user_ptr(ptr); \ | 240 | __chk_user_ptr(ptr); \ |
208 | switch (size) { \ | 241 | switch (size) { \ |
209 | case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\ | 242 | case 1: \ |
210 | case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\ | 243 | __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\ |
211 | case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\ | 244 | break; \ |
212 | case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\ | 245 | case 2: \ |
213 | default: (x) = __get_user_bad(); \ | 246 | __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\ |
247 | break; \ | ||
248 | case 4: \ | ||
249 | __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\ | ||
250 | break; \ | ||
251 | case 8: \ | ||
252 | __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \ | ||
253 | break; \ | ||
254 | default: \ | ||
255 | (x) = __get_user_bad(); \ | ||
214 | } \ | 256 | } \ |
215 | } while (0) | 257 | } while (0) |
216 | 258 | ||
217 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | 259 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ |
218 | asm volatile( \ | 260 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ |
219 | "1: mov"itype" %2,%"rtype"1\n" \ | 261 | "2:\n" \ |
220 | "2:\n" \ | 262 | ".section .fixup, \"ax\"\n" \ |
221 | ".section .fixup,\"ax\"\n" \ | 263 | "3: mov %3,%0\n" \ |
222 | "3: mov %3,%0\n" \ | 264 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
223 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 265 | " jmp 2b\n" \ |
224 | " jmp 2b\n" \ | 266 | ".previous\n" \ |
225 | ".previous\n" \ | 267 | _ASM_EXTABLE(1b, 3b) \ |
226 | _ASM_EXTABLE(1b,3b) \ | 268 | : "=r" (err), ltype (x) \ |
227 | : "=r"(err), ltype (x) \ | 269 | : "m" (__m(addr)), "i"(errno), "0"(err)) |
228 | : "m"(__m(addr)), "i"(errno), "0"(err)) | ||
229 | 270 | ||
230 | /* | 271 | /* |
231 | * Copy To/From Userspace | 272 | * Copy To/From Userspace |
@@ -244,110 +285,142 @@ copy_in_user(void __user *to, const void __user *from, unsigned len); | |||
244 | 285 | ||
245 | static __always_inline __must_check | 286 | static __always_inline __must_check |
246 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 287 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
247 | { | 288 | { |
248 | int ret = 0; | 289 | int ret = 0; |
249 | if (!__builtin_constant_p(size)) | 290 | if (!__builtin_constant_p(size)) |
250 | return copy_user_generic(dst,(__force void *)src,size); | 291 | return copy_user_generic(dst, (__force void *)src, size); |
251 | switch (size) { | 292 | switch (size) { |
252 | case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); | 293 | case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, |
294 | ret, "b", "b", "=q", 1); | ||
253 | return ret; | 295 | return ret; |
254 | case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2); | 296 | case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, |
297 | ret, "w", "w", "=r", 2); | ||
255 | return ret; | 298 | return ret; |
256 | case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4); | 299 | case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, |
300 | ret, "l", "k", "=r", 4); | ||
301 | return ret; | ||
302 | case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, | ||
303 | ret, "q", "", "=r", 8); | ||
257 | return ret; | 304 | return ret; |
258 | case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8); | ||
259 | return ret; | ||
260 | case 10: | 305 | case 10: |
261 | __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); | 306 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
262 | if (unlikely(ret)) return ret; | 307 | ret, "q", "", "=r", 16); |
263 | __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2); | 308 | if (unlikely(ret)) |
264 | return ret; | 309 | return ret; |
310 | __get_user_asm(*(u16 *)(8 + (char *)dst), | ||
311 | (u16 __user *)(8 + (char __user *)src), | ||
312 | ret, "w", "w", "=r", 2); | ||
313 | return ret; | ||
265 | case 16: | 314 | case 16: |
266 | __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); | 315 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
267 | if (unlikely(ret)) return ret; | 316 | ret, "q", "", "=r", 16); |
268 | __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8); | 317 | if (unlikely(ret)) |
269 | return ret; | 318 | return ret; |
319 | __get_user_asm(*(u64 *)(8 + (char *)dst), | ||
320 | (u64 __user *)(8 + (char __user *)src), | ||
321 | ret, "q", "", "=r", 8); | ||
322 | return ret; | ||
270 | default: | 323 | default: |
271 | return copy_user_generic(dst,(__force void *)src,size); | 324 | return copy_user_generic(dst, (__force void *)src, size); |
272 | } | 325 | } |
273 | } | 326 | } |
274 | 327 | ||
275 | static __always_inline __must_check | 328 | static __always_inline __must_check |
276 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | 329 | int __copy_to_user(void __user *dst, const void *src, unsigned size) |
277 | { | 330 | { |
278 | int ret = 0; | 331 | int ret = 0; |
279 | if (!__builtin_constant_p(size)) | 332 | if (!__builtin_constant_p(size)) |
280 | return copy_user_generic((__force void *)dst,src,size); | 333 | return copy_user_generic((__force void *)dst, src, size); |
281 | switch (size) { | 334 | switch (size) { |
282 | case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); | 335 | case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, |
336 | ret, "b", "b", "iq", 1); | ||
283 | return ret; | 337 | return ret; |
284 | case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2); | 338 | case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, |
339 | ret, "w", "w", "ir", 2); | ||
285 | return ret; | 340 | return ret; |
286 | case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4); | 341 | case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, |
342 | ret, "l", "k", "ir", 4); | ||
343 | return ret; | ||
344 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, | ||
345 | ret, "q", "", "ir", 8); | ||
287 | return ret; | 346 | return ret; |
288 | case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8); | ||
289 | return ret; | ||
290 | case 10: | 347 | case 10: |
291 | __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10); | 348 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
292 | if (unlikely(ret)) return ret; | 349 | ret, "q", "", "ir", 10); |
350 | if (unlikely(ret)) | ||
351 | return ret; | ||
293 | asm("":::"memory"); | 352 | asm("":::"memory"); |
294 | __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2); | 353 | __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, |
295 | return ret; | 354 | ret, "w", "w", "ir", 2); |
355 | return ret; | ||
296 | case 16: | 356 | case 16: |
297 | __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16); | 357 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
298 | if (unlikely(ret)) return ret; | 358 | ret, "q", "", "ir", 16); |
359 | if (unlikely(ret)) | ||
360 | return ret; | ||
299 | asm("":::"memory"); | 361 | asm("":::"memory"); |
300 | __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8); | 362 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, |
301 | return ret; | 363 | ret, "q", "", "ir", 8); |
364 | return ret; | ||
302 | default: | 365 | default: |
303 | return copy_user_generic((__force void *)dst,src,size); | 366 | return copy_user_generic((__force void *)dst, src, size); |
304 | } | 367 | } |
305 | } | 368 | } |
306 | 369 | ||
307 | static __always_inline __must_check | 370 | static __always_inline __must_check |
308 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 371 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
309 | { | 372 | { |
310 | int ret = 0; | 373 | int ret = 0; |
311 | if (!__builtin_constant_p(size)) | 374 | if (!__builtin_constant_p(size)) |
312 | return copy_user_generic((__force void *)dst,(__force void *)src,size); | 375 | return copy_user_generic((__force void *)dst, |
313 | switch (size) { | 376 | (__force void *)src, size); |
314 | case 1: { | 377 | switch (size) { |
378 | case 1: { | ||
315 | u8 tmp; | 379 | u8 tmp; |
316 | __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); | 380 | __get_user_asm(tmp, (u8 __user *)src, |
381 | ret, "b", "b", "=q", 1); | ||
317 | if (likely(!ret)) | 382 | if (likely(!ret)) |
318 | __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); | 383 | __put_user_asm(tmp, (u8 __user *)dst, |
384 | ret, "b", "b", "iq", 1); | ||
319 | return ret; | 385 | return ret; |
320 | } | 386 | } |
321 | case 2: { | 387 | case 2: { |
322 | u16 tmp; | 388 | u16 tmp; |
323 | __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); | 389 | __get_user_asm(tmp, (u16 __user *)src, |
390 | ret, "w", "w", "=r", 2); | ||
324 | if (likely(!ret)) | 391 | if (likely(!ret)) |
325 | __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); | 392 | __put_user_asm(tmp, (u16 __user *)dst, |
393 | ret, "w", "w", "ir", 2); | ||
326 | return ret; | 394 | return ret; |
327 | } | 395 | } |
328 | 396 | ||
329 | case 4: { | 397 | case 4: { |
330 | u32 tmp; | 398 | u32 tmp; |
331 | __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); | 399 | __get_user_asm(tmp, (u32 __user *)src, |
400 | ret, "l", "k", "=r", 4); | ||
332 | if (likely(!ret)) | 401 | if (likely(!ret)) |
333 | __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); | 402 | __put_user_asm(tmp, (u32 __user *)dst, |
403 | ret, "l", "k", "ir", 4); | ||
334 | return ret; | 404 | return ret; |
335 | } | 405 | } |
336 | case 8: { | 406 | case 8: { |
337 | u64 tmp; | 407 | u64 tmp; |
338 | __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); | 408 | __get_user_asm(tmp, (u64 __user *)src, |
409 | ret, "q", "", "=r", 8); | ||
339 | if (likely(!ret)) | 410 | if (likely(!ret)) |
340 | __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); | 411 | __put_user_asm(tmp, (u64 __user *)dst, |
412 | ret, "q", "", "ir", 8); | ||
341 | return ret; | 413 | return ret; |
342 | } | 414 | } |
343 | default: | 415 | default: |
344 | return copy_user_generic((__force void *)dst,(__force void *)src,size); | 416 | return copy_user_generic((__force void *)dst, |
417 | (__force void *)src, size); | ||
345 | } | 418 | } |
346 | } | 419 | } |
347 | 420 | ||
348 | __must_check long | 421 | __must_check long |
349 | strncpy_from_user(char *dst, const char __user *src, long count); | 422 | strncpy_from_user(char *dst, const char __user *src, long count); |
350 | __must_check long | 423 | __must_check long |
351 | __strncpy_from_user(char *dst, const char __user *src, long count); | 424 | __strncpy_from_user(char *dst, const char __user *src, long count); |
352 | __must_check long strnlen_user(const char __user *str, long n); | 425 | __must_check long strnlen_user(const char __user *str, long n); |
353 | __must_check long __strnlen_user(const char __user *str, long n); | 426 | __must_check long __strnlen_user(const char __user *str, long n); |
@@ -355,7 +428,8 @@ __must_check long strlen_user(const char __user *str); | |||
355 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | 428 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); |
356 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | 429 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); |
357 | 430 | ||
358 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size); | 431 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, |
432 | unsigned size); | ||
359 | 433 | ||
360 | static __must_check __always_inline int | 434 | static __must_check __always_inline int |
361 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 435 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
@@ -364,15 +438,19 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | |||
364 | } | 438 | } |
365 | 439 | ||
366 | #define ARCH_HAS_NOCACHE_UACCESS 1 | 440 | #define ARCH_HAS_NOCACHE_UACCESS 1 |
367 | extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); | 441 | extern long __copy_user_nocache(void *dst, const void __user *src, |
442 | unsigned size, int zerorest); | ||
368 | 443 | ||
369 | static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) | 444 | static inline int __copy_from_user_nocache(void *dst, const void __user *src, |
445 | unsigned size) | ||
370 | { | 446 | { |
371 | might_sleep(); | 447 | might_sleep(); |
372 | return __copy_user_nocache(dst, src, size, 1); | 448 | return __copy_user_nocache(dst, src, size, 1); |
373 | } | 449 | } |
374 | 450 | ||
375 | static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) | 451 | static inline int __copy_from_user_inatomic_nocache(void *dst, |
452 | const void __user *src, | ||
453 | unsigned size) | ||
376 | { | 454 | { |
377 | return __copy_user_nocache(dst, src, size, 0); | 455 | return __copy_user_nocache(dst, src, size, 0); |
378 | } | 456 | } |
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h index 913598d4f761..d270ffe72759 100644 --- a/include/asm-x86/unaligned.h +++ b/include/asm-x86/unaligned.h | |||
@@ -32,6 +32,6 @@ | |||
32 | * | 32 | * |
33 | * Note that unaligned accesses can be very expensive on some architectures. | 33 | * Note that unaligned accesses can be very expensive on some architectures. |
34 | */ | 34 | */ |
35 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | 35 | #define put_unaligned(val, ptr) ((void)(*(ptr) = (val))) |
36 | 36 | ||
37 | #endif /* _ASM_X86_UNALIGNED_H */ | 37 | #endif /* _ASM_X86_UNALIGNED_H */ |
diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h index 2a58ed3e51d8..effc7ad8e12f 100644 --- a/include/asm-x86/unistd.h +++ b/include/asm-x86/unistd.h | |||
@@ -1,11 +1,5 @@ | |||
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | # ifdef CONFIG_X86_32 | 2 | # if defined(CONFIG_X86_32) || defined(__i386__) |
3 | # include "unistd_32.h" | ||
4 | # else | ||
5 | # include "unistd_64.h" | ||
6 | # endif | ||
7 | #else | ||
8 | # ifdef __i386__ | ||
9 | # include "unistd_32.h" | 3 | # include "unistd_32.h" |
10 | # else | 4 | # else |
11 | # include "unistd_64.h" | 5 | # include "unistd_64.h" |
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 984123a68f7c..8317d94771d3 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h | |||
@@ -81,7 +81,7 @@ | |||
81 | #define __NR_sigpending 73 | 81 | #define __NR_sigpending 73 |
82 | #define __NR_sethostname 74 | 82 | #define __NR_sethostname 74 |
83 | #define __NR_setrlimit 75 | 83 | #define __NR_setrlimit 75 |
84 | #define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ | 84 | #define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ |
85 | #define __NR_getrusage 77 | 85 | #define __NR_getrusage 77 |
86 | #define __NR_gettimeofday 78 | 86 | #define __NR_gettimeofday 78 |
87 | #define __NR_settimeofday 79 | 87 | #define __NR_settimeofday 79 |
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index 3883ceb54ef5..fe26e36d0f51 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ASM_X86_64_UNISTD_H_ | 2 | #define _ASM_X86_64_UNISTD_H_ |
3 | 3 | ||
4 | #ifndef __SYSCALL | 4 | #ifndef __SYSCALL |
5 | #define __SYSCALL(a,b) | 5 | #define __SYSCALL(a, b) |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | /* | 8 | /* |
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h index f769872debea..a3d910047879 100644 --- a/include/asm-x86/user32.h +++ b/include/asm-x86/user32.h | |||
@@ -1,7 +1,8 @@ | |||
1 | #ifndef USER32_H | 1 | #ifndef USER32_H |
2 | #define USER32_H 1 | 2 | #define USER32_H 1 |
3 | 3 | ||
4 | /* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */ | 4 | /* IA32 compatible user structures for ptrace. |
5 | * These should be used for 32bit coredumps too. */ | ||
5 | 6 | ||
6 | struct user_i387_ia32_struct { | 7 | struct user_i387_ia32_struct { |
7 | u32 cwd; | 8 | u32 cwd; |
@@ -42,9 +43,9 @@ struct user_regs_struct32 { | |||
42 | }; | 43 | }; |
43 | 44 | ||
44 | struct user32 { | 45 | struct user32 { |
45 | struct user_regs_struct32 regs; /* Where the registers are actually stored */ | 46 | struct user_regs_struct32 regs; /* Where the registers are actually stored */ |
46 | int u_fpvalid; /* True if math co-processor being used. */ | 47 | int u_fpvalid; /* True if math co-processor being used. */ |
47 | /* for this mess. Not yet used. */ | 48 | /* for this mess. Not yet used. */ |
48 | struct user_i387_ia32_struct i387; /* Math Co-processor registers. */ | 49 | struct user_i387_ia32_struct i387; /* Math Co-processor registers. */ |
49 | /* The rest of this junk is to help gdb figure out what goes where */ | 50 | /* The rest of this junk is to help gdb figure out what goes where */ |
50 | __u32 u_tsize; /* Text segment size (pages). */ | 51 | __u32 u_tsize; /* Text segment size (pages). */ |
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h index 6157da6f882c..d6e51edc259d 100644 --- a/include/asm-x86/user_32.h +++ b/include/asm-x86/user_32.h | |||
@@ -100,10 +100,10 @@ struct user_regs_struct { | |||
100 | struct user{ | 100 | struct user{ |
101 | /* We start with the registers, to mimic the way that "memory" is returned | 101 | /* We start with the registers, to mimic the way that "memory" is returned |
102 | from the ptrace(3,...) function. */ | 102 | from the ptrace(3,...) function. */ |
103 | struct user_regs_struct regs; /* Where the registers are actually stored */ | 103 | struct user_regs_struct regs; /* Where the registers are actually stored */ |
104 | /* ptrace does not yet supply these. Someday.... */ | 104 | /* ptrace does not yet supply these. Someday.... */ |
105 | int u_fpvalid; /* True if math co-processor being used. */ | 105 | int u_fpvalid; /* True if math co-processor being used. */ |
106 | /* for this mess. Not yet used. */ | 106 | /* for this mess. Not yet used. */ |
107 | struct user_i387_struct i387; /* Math Co-processor registers. */ | 107 | struct user_i387_struct i387; /* Math Co-processor registers. */ |
108 | /* The rest of this junk is to help gdb figure out what goes where */ | 108 | /* The rest of this junk is to help gdb figure out what goes where */ |
109 | unsigned long int u_tsize; /* Text segment size (pages). */ | 109 | unsigned long int u_tsize; /* Text segment size (pages). */ |
@@ -118,7 +118,7 @@ struct user{ | |||
118 | int reserved; /* No longer used */ | 118 | int reserved; /* No longer used */ |
119 | unsigned long u_ar0; /* Used by gdb to help find the values for */ | 119 | unsigned long u_ar0; /* Used by gdb to help find the values for */ |
120 | /* the registers. */ | 120 | /* the registers. */ |
121 | struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ | 121 | struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ |
122 | unsigned long magic; /* To uniquely identify a core file */ | 122 | unsigned long magic; /* To uniquely identify a core file */ |
123 | char u_comm[32]; /* User command that was responsible */ | 123 | char u_comm[32]; /* User command that was responsible */ |
124 | int u_debugreg[8]; | 124 | int u_debugreg[8]; |
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h index 963616455609..6037b634c77f 100644 --- a/include/asm-x86/user_64.h +++ b/include/asm-x86/user_64.h | |||
@@ -45,12 +45,13 @@ | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | /* This matches the 64bit FXSAVE format as defined by AMD. It is the same | 47 | /* This matches the 64bit FXSAVE format as defined by AMD. It is the same |
48 | as the 32bit format defined by Intel, except that the selector:offset pairs for | 48 | as the 32bit format defined by Intel, except that the selector:offset pairs |
49 | data and eip are replaced with flat 64bit pointers. */ | 49 | for data and eip are replaced with flat 64bit pointers. */ |
50 | struct user_i387_struct { | 50 | struct user_i387_struct { |
51 | unsigned short cwd; | 51 | unsigned short cwd; |
52 | unsigned short swd; | 52 | unsigned short swd; |
53 | unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ | 53 | unsigned short twd; /* Note this is not the same as |
54 | the 32bit/x87/FSAVE twd */ | ||
54 | unsigned short fop; | 55 | unsigned short fop; |
55 | __u64 rip; | 56 | __u64 rip; |
56 | __u64 rdp; | 57 | __u64 rdp; |
@@ -97,13 +98,14 @@ struct user_regs_struct { | |||
97 | /* When the kernel dumps core, it starts by dumping the user struct - | 98 | /* When the kernel dumps core, it starts by dumping the user struct - |
98 | this will be used by gdb to figure out where the data and stack segments | 99 | this will be used by gdb to figure out where the data and stack segments |
99 | are within the file, and what virtual addresses to use. */ | 100 | are within the file, and what virtual addresses to use. */ |
100 | struct user{ | 101 | |
102 | struct user { | ||
101 | /* We start with the registers, to mimic the way that "memory" is returned | 103 | /* We start with the registers, to mimic the way that "memory" is returned |
102 | from the ptrace(3,...) function. */ | 104 | from the ptrace(3,...) function. */ |
103 | struct user_regs_struct regs; /* Where the registers are actually stored */ | 105 | struct user_regs_struct regs; /* Where the registers are actually stored */ |
104 | /* ptrace does not yet supply these. Someday.... */ | 106 | /* ptrace does not yet supply these. Someday.... */ |
105 | int u_fpvalid; /* True if math co-processor being used. */ | 107 | int u_fpvalid; /* True if math co-processor being used. */ |
106 | /* for this mess. Not yet used. */ | 108 | /* for this mess. Not yet used. */ |
107 | int pad0; | 109 | int pad0; |
108 | struct user_i387_struct i387; /* Math Co-processor registers. */ | 110 | struct user_i387_struct i387; /* Math Co-processor registers. */ |
109 | /* The rest of this junk is to help gdb figure out what goes where */ | 111 | /* The rest of this junk is to help gdb figure out what goes where */ |
@@ -120,7 +122,7 @@ struct user{ | |||
120 | int pad1; | 122 | int pad1; |
121 | unsigned long u_ar0; /* Used by gdb to help find the values for */ | 123 | unsigned long u_ar0; /* Used by gdb to help find the values for */ |
122 | /* the registers. */ | 124 | /* the registers. */ |
123 | struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ | 125 | struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ |
124 | unsigned long magic; /* To uniquely identify a core file */ | 126 | unsigned long magic; /* To uniquely identify a core file */ |
125 | char u_comm[32]; /* User command that was responsible */ | 127 | char u_comm[32]; /* User command that was responsible */ |
126 | unsigned long u_debugreg[8]; | 128 | unsigned long u_debugreg[8]; |
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h new file mode 100644 index 000000000000..26b9240d1e23 --- /dev/null +++ b/include/asm-x86/uv/uv_hub.h | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV architectural definitions | ||
7 | * | ||
8 | * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_X86_UV_HUB_H__ | ||
12 | #define __ASM_X86_UV_HUB_H__ | ||
13 | |||
14 | #include <linux/numa.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <asm/types.h> | ||
17 | #include <asm/percpu.h> | ||
18 | |||
19 | |||
20 | /* | ||
21 | * Addressing Terminology | ||
22 | * | ||
23 | * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of | ||
24 | * routers always have low bit of 1, C/MBricks have low bit | ||
25 | * equal to 0. Most addressing macros that target UV hub chips | ||
26 | * right shift the NASID by 1 to exclude the always-zero bit. | ||
27 | * | ||
28 | * SNASID - NASID right shifted by 1 bit. | ||
29 | * | ||
30 | * | ||
31 | * Memory/UV-HUB Processor Socket Address Format: | ||
32 | * +--------+---------------+---------------------+ | ||
33 | * |00..0000| SNASID | NodeOffset | | ||
34 | * +--------+---------------+---------------------+ | ||
35 | * <--- N bits --->|<--------M bits -----> | ||
36 | * | ||
37 | * M number of node offset bits (35 .. 40) | ||
38 | * N number of SNASID bits (0 .. 10) | ||
39 | * | ||
40 | * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). | ||
41 | * The actual values are configuration dependent and are set at | ||
42 | * boot time | ||
43 | * | ||
44 | * APICID format | ||
45 | * NOTE!!!!!! This is the current format of the APICID. However, code | ||
46 | * should assume that this will change in the future. Use functions | ||
47 | * in this file for all APICID bit manipulations and conversion. | ||
48 | * | ||
49 | * 1111110000000000 | ||
50 | * 5432109876543210 | ||
51 | * nnnnnnnnnnlc0cch | ||
52 | * sssssssssss | ||
53 | * | ||
54 | * n = snasid bits | ||
55 | * l = socket number on board | ||
56 | * c = core | ||
57 | * h = hyperthread | ||
58 | * s = bits that are in the socket CSR | ||
59 | * | ||
60 | * Note: Processor only supports 12 bits in the APICID register. The ACPI | ||
61 | * tables hold all 16 bits. Software needs to be aware of this. | ||
62 | * | ||
63 | * Unless otherwise specified, all references to APICID refer to | ||
64 | * the FULL value contained in ACPI tables, not the subset in the | ||
65 | * processor APICID register. | ||
66 | */ | ||
67 | |||
68 | |||
69 | /* | ||
70 | * Maximum number of bricks in all partitions and in all coherency domains. | ||
71 | * This is the total number of bricks accessible in the numalink fabric. It | ||
72 | * includes all C & M bricks. Routers are NOT included. | ||
73 | * | ||
74 | * This value is also the value of the maximum number of non-router NASIDs | ||
75 | * in the numalink fabric. | ||
76 | * | ||
77 | * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused. | ||
78 | */ | ||
79 | #define UV_MAX_NUMALINK_BLADES 16384 | ||
80 | |||
81 | /* | ||
82 | * Maximum number of C/Mbricks within a software SSI (hardware may support | ||
83 | * more). | ||
84 | */ | ||
85 | #define UV_MAX_SSI_BLADES 256 | ||
86 | |||
87 | /* | ||
88 | * The largest possible NASID of a C or M brick (+ 2) | ||
89 | */ | ||
90 | #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) | ||
91 | |||
92 | /* | ||
93 | * The following defines attributes of the HUB chip. These attributes are | ||
94 | * frequently referenced and are kept in the per-cpu data areas of each cpu. | ||
95 | * They are kept together in a struct to minimize cache misses. | ||
96 | */ | ||
97 | struct uv_hub_info_s { | ||
98 | unsigned long global_mmr_base; | ||
99 | unsigned short local_nasid; | ||
100 | unsigned short gnode_upper; | ||
101 | unsigned short coherency_domain_number; | ||
102 | unsigned short numa_blade_id; | ||
103 | unsigned char blade_processor_id; | ||
104 | unsigned char m_val; | ||
105 | unsigned char n_val; | ||
106 | }; | ||
107 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | ||
108 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | ||
109 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | ||
110 | |||
111 | /* | ||
112 | * Local & Global MMR space macros. | ||
113 | * Note: macros are intended to be used ONLY by inline functions | ||
114 | * in this file - not by other kernel code. | ||
115 | */ | ||
116 | #define UV_SNASID(n) ((n) >> 1) | ||
117 | #define UV_NASID(n) ((n) << 1) | ||
118 | |||
119 | #define UV_LOCAL_MMR_BASE 0xf4000000UL | ||
120 | #define UV_GLOBAL_MMR32_BASE 0xf8000000UL | ||
121 | #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) | ||
122 | |||
123 | #define UV_GLOBAL_MMR32_SNASID_MASK 0x3ff | ||
124 | #define UV_GLOBAL_MMR32_SNASID_SHIFT 15 | ||
125 | #define UV_GLOBAL_MMR64_SNASID_SHIFT 26 | ||
126 | |||
127 | #define UV_GLOBAL_MMR32_NASID_BITS(n) \ | ||
128 | (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) << \ | ||
129 | (UV_GLOBAL_MMR32_SNASID_SHIFT)) | ||
130 | |||
131 | #define UV_GLOBAL_MMR64_NASID_BITS(n) \ | ||
132 | ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT) | ||
133 | |||
134 | #define UV_APIC_NASID_SHIFT 6 | ||
135 | |||
136 | /* | ||
137 | * Extract a NASID from an APICID (full apicid, not processor subset) | ||
138 | */ | ||
139 | static inline int uv_apicid_to_nasid(int apicid) | ||
140 | { | ||
141 | return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT)); | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Access global MMRs using the low memory MMR32 space. This region supports | ||
146 | * faster MMR access but not all MMRs are accessible in this space. | ||
147 | */ | ||
148 | static inline unsigned long *uv_global_mmr32_address(int nasid, | ||
149 | unsigned long offset) | ||
150 | { | ||
151 | return __va(UV_GLOBAL_MMR32_BASE | | ||
152 | UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset); | ||
153 | } | ||
154 | |||
155 | static inline void uv_write_global_mmr32(int nasid, unsigned long offset, | ||
156 | unsigned long val) | ||
157 | { | ||
158 | *uv_global_mmr32_address(nasid, offset) = val; | ||
159 | } | ||
160 | |||
161 | static inline unsigned long uv_read_global_mmr32(int nasid, | ||
162 | unsigned long offset) | ||
163 | { | ||
164 | return *uv_global_mmr32_address(nasid, offset); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Access Global MMR space using the MMR space located at the top of physical | ||
169 | * memory. | ||
170 | */ | ||
171 | static inline unsigned long *uv_global_mmr64_address(int nasid, | ||
172 | unsigned long offset) | ||
173 | { | ||
174 | return __va(UV_GLOBAL_MMR64_BASE | | ||
175 | UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset); | ||
176 | } | ||
177 | |||
178 | static inline void uv_write_global_mmr64(int nasid, unsigned long offset, | ||
179 | unsigned long val) | ||
180 | { | ||
181 | *uv_global_mmr64_address(nasid, offset) = val; | ||
182 | } | ||
183 | |||
184 | static inline unsigned long uv_read_global_mmr64(int nasid, | ||
185 | unsigned long offset) | ||
186 | { | ||
187 | return *uv_global_mmr64_address(nasid, offset); | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Access node local MMRs. Faster than using global space but only local MMRs | ||
192 | * are accessible. | ||
193 | */ | ||
194 | static inline unsigned long *uv_local_mmr_address(unsigned long offset) | ||
195 | { | ||
196 | return __va(UV_LOCAL_MMR_BASE | offset); | ||
197 | } | ||
198 | |||
199 | static inline unsigned long uv_read_local_mmr(unsigned long offset) | ||
200 | { | ||
201 | return *uv_local_mmr_address(offset); | ||
202 | } | ||
203 | |||
204 | static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) | ||
205 | { | ||
206 | *uv_local_mmr_address(offset) = val; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Structures and definitions for converting between cpu, node, and blade | ||
211 | * numbers. | ||
212 | */ | ||
213 | struct uv_blade_info { | ||
214 | unsigned short nr_posible_cpus; | ||
215 | unsigned short nr_online_cpus; | ||
216 | unsigned short nasid; | ||
217 | }; | ||
218 | struct uv_blade_info *uv_blade_info; | ||
219 | extern short *uv_node_to_blade; | ||
220 | extern short *uv_cpu_to_blade; | ||
221 | extern short uv_possible_blades; | ||
222 | |||
223 | /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ | ||
224 | static inline int uv_blade_processor_id(void) | ||
225 | { | ||
226 | return uv_hub_info->blade_processor_id; | ||
227 | } | ||
228 | |||
229 | /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ | ||
230 | static inline int uv_numa_blade_id(void) | ||
231 | { | ||
232 | return uv_hub_info->numa_blade_id; | ||
233 | } | ||
234 | |||
235 | /* Convert a cpu number to the the UV blade number */ | ||
236 | static inline int uv_cpu_to_blade_id(int cpu) | ||
237 | { | ||
238 | return uv_cpu_to_blade[cpu]; | ||
239 | } | ||
240 | |||
241 | /* Convert linux node number to the UV blade number */ | ||
242 | static inline int uv_node_to_blade_id(int nid) | ||
243 | { | ||
244 | return uv_node_to_blade[nid]; | ||
245 | } | ||
246 | |||
247 | /* Convert a blade id to the NASID of the blade */ | ||
248 | static inline int uv_blade_to_nasid(int bid) | ||
249 | { | ||
250 | return uv_blade_info[bid].nasid; | ||
251 | } | ||
252 | |||
253 | /* Determine the number of possible cpus on a blade */ | ||
254 | static inline int uv_blade_nr_possible_cpus(int bid) | ||
255 | { | ||
256 | return uv_blade_info[bid].nr_posible_cpus; | ||
257 | } | ||
258 | |||
259 | /* Determine the number of online cpus on a blade */ | ||
260 | static inline int uv_blade_nr_online_cpus(int bid) | ||
261 | { | ||
262 | return uv_blade_info[bid].nr_online_cpus; | ||
263 | } | ||
264 | |||
265 | /* Convert a cpu id to the NASID of the blade containing the cpu */ | ||
266 | static inline int uv_cpu_to_nasid(int cpu) | ||
267 | { | ||
268 | return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid; | ||
269 | } | ||
270 | |||
271 | /* Convert a node number to the NASID of the blade */ | ||
272 | static inline int uv_node_to_nasid(int nid) | ||
273 | { | ||
274 | return uv_blade_info[uv_node_to_blade_id(nid)].nasid; | ||
275 | } | ||
276 | |||
277 | /* Maximum possible number of blades */ | ||
278 | static inline int uv_num_possible_blades(void) | ||
279 | { | ||
280 | return uv_possible_blades; | ||
281 | } | ||
282 | |||
283 | #endif /* __ASM_X86_UV_HUB__ */ | ||
284 | |||
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h new file mode 100644 index 000000000000..3b69fe6b6376 --- /dev/null +++ b/include/asm-x86/uv/uv_mmrs.h | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV MMR definitions | ||
7 | * | ||
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_X86_UV_MMRS__ | ||
12 | #define __ASM_X86_UV_MMRS__ | ||
13 | |||
14 | /* | ||
15 | * AUTO GENERATED - Do not edit | ||
16 | */ | ||
17 | |||
18 | #define UV_MMR_ENABLE (1UL << 63) | ||
19 | |||
20 | /* ========================================================================= */ | ||
21 | /* UVH_IPI_INT */ | ||
22 | /* ========================================================================= */ | ||
23 | #define UVH_IPI_INT 0x60500UL | ||
24 | #define UVH_IPI_INT_32 0x0360 | ||
25 | |||
26 | #define UVH_IPI_INT_VECTOR_SHFT 0 | ||
27 | #define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL | ||
28 | #define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 | ||
29 | #define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL | ||
30 | #define UVH_IPI_INT_DESTMODE_SHFT 11 | ||
31 | #define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL | ||
32 | #define UVH_IPI_INT_APIC_ID_SHFT 16 | ||
33 | #define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL | ||
34 | #define UVH_IPI_INT_SEND_SHFT 63 | ||
35 | #define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL | ||
36 | |||
37 | union uvh_ipi_int_u { | ||
38 | unsigned long v; | ||
39 | struct uvh_ipi_int_s { | ||
40 | unsigned long vector_ : 8; /* RW */ | ||
41 | unsigned long delivery_mode : 3; /* RW */ | ||
42 | unsigned long destmode : 1; /* RW */ | ||
43 | unsigned long rsvd_12_15 : 4; /* */ | ||
44 | unsigned long apic_id : 32; /* RW */ | ||
45 | unsigned long rsvd_48_62 : 15; /* */ | ||
46 | unsigned long send : 1; /* WP */ | ||
47 | } s; | ||
48 | }; | ||
49 | |||
50 | /* ========================================================================= */ | ||
51 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ | ||
52 | /* ========================================================================= */ | ||
53 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL | ||
54 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009f0 | ||
55 | |||
56 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 | ||
57 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL | ||
58 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 | ||
59 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL | ||
60 | |||
61 | union uvh_lb_bau_intd_payload_queue_first_u { | ||
62 | unsigned long v; | ||
63 | struct uvh_lb_bau_intd_payload_queue_first_s { | ||
64 | unsigned long rsvd_0_3: 4; /* */ | ||
65 | unsigned long address : 39; /* RW */ | ||
66 | unsigned long rsvd_43_48: 6; /* */ | ||
67 | unsigned long node_id : 14; /* RW */ | ||
68 | unsigned long rsvd_63 : 1; /* */ | ||
69 | } s; | ||
70 | }; | ||
71 | |||
72 | /* ========================================================================= */ | ||
73 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ | ||
74 | /* ========================================================================= */ | ||
75 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL | ||
76 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009f8 | ||
77 | |||
78 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 | ||
79 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL | ||
80 | |||
81 | union uvh_lb_bau_intd_payload_queue_last_u { | ||
82 | unsigned long v; | ||
83 | struct uvh_lb_bau_intd_payload_queue_last_s { | ||
84 | unsigned long rsvd_0_3: 4; /* */ | ||
85 | unsigned long address : 39; /* RW */ | ||
86 | unsigned long rsvd_43_63: 21; /* */ | ||
87 | } s; | ||
88 | }; | ||
89 | |||
90 | /* ========================================================================= */ | ||
91 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ | ||
92 | /* ========================================================================= */ | ||
93 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL | ||
94 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x00a00 | ||
95 | |||
96 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 | ||
97 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL | ||
98 | |||
99 | union uvh_lb_bau_intd_payload_queue_tail_u { | ||
100 | unsigned long v; | ||
101 | struct uvh_lb_bau_intd_payload_queue_tail_s { | ||
102 | unsigned long rsvd_0_3: 4; /* */ | ||
103 | unsigned long address : 39; /* RW */ | ||
104 | unsigned long rsvd_43_63: 21; /* */ | ||
105 | } s; | ||
106 | }; | ||
107 | |||
108 | /* ========================================================================= */ | ||
109 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ | ||
110 | /* ========================================================================= */ | ||
111 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL | ||
112 | |||
113 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 | ||
114 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL | ||
115 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 | ||
116 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL | ||
117 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2 | ||
118 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL | ||
119 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3 | ||
120 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL | ||
121 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4 | ||
122 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL | ||
123 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5 | ||
124 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL | ||
125 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6 | ||
126 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL | ||
127 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7 | ||
128 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL | ||
129 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8 | ||
130 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL | ||
131 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9 | ||
132 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL | ||
133 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10 | ||
134 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL | ||
135 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11 | ||
136 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL | ||
137 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12 | ||
138 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL | ||
139 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13 | ||
140 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL | ||
141 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14 | ||
142 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL | ||
143 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 | ||
144 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL | ||
145 | union uvh_lb_bau_intd_software_acknowledge_u { | ||
146 | unsigned long v; | ||
147 | struct uvh_lb_bau_intd_software_acknowledge_s { | ||
148 | unsigned long pending_0 : 1; /* RW, W1C */ | ||
149 | unsigned long pending_1 : 1; /* RW, W1C */ | ||
150 | unsigned long pending_2 : 1; /* RW, W1C */ | ||
151 | unsigned long pending_3 : 1; /* RW, W1C */ | ||
152 | unsigned long pending_4 : 1; /* RW, W1C */ | ||
153 | unsigned long pending_5 : 1; /* RW, W1C */ | ||
154 | unsigned long pending_6 : 1; /* RW, W1C */ | ||
155 | unsigned long pending_7 : 1; /* RW, W1C */ | ||
156 | unsigned long timeout_0 : 1; /* RW, W1C */ | ||
157 | unsigned long timeout_1 : 1; /* RW, W1C */ | ||
158 | unsigned long timeout_2 : 1; /* RW, W1C */ | ||
159 | unsigned long timeout_3 : 1; /* RW, W1C */ | ||
160 | unsigned long timeout_4 : 1; /* RW, W1C */ | ||
161 | unsigned long timeout_5 : 1; /* RW, W1C */ | ||
162 | unsigned long timeout_6 : 1; /* RW, W1C */ | ||
163 | unsigned long timeout_7 : 1; /* RW, W1C */ | ||
164 | unsigned long rsvd_16_63: 48; /* */ | ||
165 | } s; | ||
166 | }; | ||
167 | |||
168 | /* ========================================================================= */ | ||
169 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ | ||
170 | /* ========================================================================= */ | ||
171 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL | ||
172 | |||
173 | /* ========================================================================= */ | ||
174 | /* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ | ||
175 | /* ========================================================================= */ | ||
176 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL | ||
177 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009d8 | ||
178 | |||
179 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 | ||
180 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL | ||
181 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 | ||
182 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL | ||
183 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63 | ||
184 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL | ||
185 | |||
186 | union uvh_lb_bau_sb_activation_control_u { | ||
187 | unsigned long v; | ||
188 | struct uvh_lb_bau_sb_activation_control_s { | ||
189 | unsigned long index : 6; /* RW */ | ||
190 | unsigned long rsvd_6_61: 56; /* */ | ||
191 | unsigned long push : 1; /* WP */ | ||
192 | unsigned long init : 1; /* WP */ | ||
193 | } s; | ||
194 | }; | ||
195 | |||
196 | /* ========================================================================= */ | ||
197 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ | ||
198 | /* ========================================================================= */ | ||
199 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL | ||
200 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009e0 | ||
201 | |||
202 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 | ||
203 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL | ||
204 | |||
205 | union uvh_lb_bau_sb_activation_status_0_u { | ||
206 | unsigned long v; | ||
207 | struct uvh_lb_bau_sb_activation_status_0_s { | ||
208 | unsigned long status : 64; /* RW */ | ||
209 | } s; | ||
210 | }; | ||
211 | |||
212 | /* ========================================================================= */ | ||
213 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ | ||
214 | /* ========================================================================= */ | ||
215 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL | ||
216 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009e8 | ||
217 | |||
218 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 | ||
219 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL | ||
220 | |||
221 | union uvh_lb_bau_sb_activation_status_1_u { | ||
222 | unsigned long v; | ||
223 | struct uvh_lb_bau_sb_activation_status_1_s { | ||
224 | unsigned long status : 64; /* RW */ | ||
225 | } s; | ||
226 | }; | ||
227 | |||
228 | /* ========================================================================= */ | ||
229 | /* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ | ||
230 | /* ========================================================================= */ | ||
231 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL | ||
232 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009d0 | ||
233 | |||
234 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 | ||
235 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL | ||
236 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 | ||
237 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL | ||
238 | |||
239 | union uvh_lb_bau_sb_descriptor_base_u { | ||
240 | unsigned long v; | ||
241 | struct uvh_lb_bau_sb_descriptor_base_s { | ||
242 | unsigned long rsvd_0_11 : 12; /* */ | ||
243 | unsigned long page_address : 31; /* RW */ | ||
244 | unsigned long rsvd_43_48 : 6; /* */ | ||
245 | unsigned long node_id : 14; /* RW */ | ||
246 | unsigned long rsvd_63 : 1; /* */ | ||
247 | } s; | ||
248 | }; | ||
249 | |||
250 | /* ========================================================================= */ | ||
251 | /* UVH_NODE_ID */ | ||
252 | /* ========================================================================= */ | ||
253 | #define UVH_NODE_ID 0x0UL | ||
254 | |||
255 | #define UVH_NODE_ID_FORCE1_SHFT 0 | ||
256 | #define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL | ||
257 | #define UVH_NODE_ID_MANUFACTURER_SHFT 1 | ||
258 | #define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL | ||
259 | #define UVH_NODE_ID_PART_NUMBER_SHFT 12 | ||
260 | #define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL | ||
261 | #define UVH_NODE_ID_REVISION_SHFT 28 | ||
262 | #define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL | ||
263 | #define UVH_NODE_ID_NODE_ID_SHFT 32 | ||
264 | #define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL | ||
265 | #define UVH_NODE_ID_NODES_PER_BIT_SHFT 48 | ||
266 | #define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL | ||
267 | #define UVH_NODE_ID_NI_PORT_SHFT 56 | ||
268 | #define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL | ||
269 | |||
270 | union uvh_node_id_u { | ||
271 | unsigned long v; | ||
272 | struct uvh_node_id_s { | ||
273 | unsigned long force1 : 1; /* RO */ | ||
274 | unsigned long manufacturer : 11; /* RO */ | ||
275 | unsigned long part_number : 16; /* RO */ | ||
276 | unsigned long revision : 4; /* RO */ | ||
277 | unsigned long node_id : 15; /* RW */ | ||
278 | unsigned long rsvd_47 : 1; /* */ | ||
279 | unsigned long nodes_per_bit : 7; /* RW */ | ||
280 | unsigned long rsvd_55 : 1; /* */ | ||
281 | unsigned long ni_port : 4; /* RO */ | ||
282 | unsigned long rsvd_60_63 : 4; /* */ | ||
283 | } s; | ||
284 | }; | ||
285 | |||
286 | /* ========================================================================= */ | ||
287 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ | ||
288 | /* ========================================================================= */ | ||
289 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | ||
290 | |||
291 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 | ||
292 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL | ||
293 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 | ||
294 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL | ||
295 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 | ||
296 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL | ||
297 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
298 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
299 | |||
300 | union uvh_rh_gam_gru_overlay_config_mmr_u { | ||
301 | unsigned long v; | ||
302 | struct uvh_rh_gam_gru_overlay_config_mmr_s { | ||
303 | unsigned long rsvd_0_27: 28; /* */ | ||
304 | unsigned long base : 18; /* RW */ | ||
305 | unsigned long gr4 : 1; /* RW */ | ||
306 | unsigned long rsvd_47_51: 5; /* */ | ||
307 | unsigned long n_gru : 4; /* RW */ | ||
308 | unsigned long rsvd_56_62: 7; /* */ | ||
309 | unsigned long enable : 1; /* RW */ | ||
310 | } s; | ||
311 | }; | ||
312 | |||
313 | /* ========================================================================= */ | ||
314 | /* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ | ||
315 | /* ========================================================================= */ | ||
316 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL | ||
317 | |||
318 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 | ||
319 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL | ||
320 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 | ||
321 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL | ||
322 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
323 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
324 | |||
325 | union uvh_rh_gam_mmr_overlay_config_mmr_u { | ||
326 | unsigned long v; | ||
327 | struct uvh_rh_gam_mmr_overlay_config_mmr_s { | ||
328 | unsigned long rsvd_0_25: 26; /* */ | ||
329 | unsigned long base : 20; /* RW */ | ||
330 | unsigned long dual_hub : 1; /* RW */ | ||
331 | unsigned long rsvd_47_62: 16; /* */ | ||
332 | unsigned long enable : 1; /* RW */ | ||
333 | } s; | ||
334 | }; | ||
335 | |||
336 | /* ========================================================================= */ | ||
337 | /* UVH_RTC */ | ||
338 | /* ========================================================================= */ | ||
339 | #define UVH_RTC 0x28000UL | ||
340 | |||
341 | #define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 | ||
342 | #define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL | ||
343 | |||
344 | union uvh_rtc_u { | ||
345 | unsigned long v; | ||
346 | struct uvh_rtc_s { | ||
347 | unsigned long real_time_clock : 56; /* RW */ | ||
348 | unsigned long rsvd_56_63 : 8; /* */ | ||
349 | } s; | ||
350 | }; | ||
351 | |||
352 | /* ========================================================================= */ | ||
353 | /* UVH_SI_ADDR_MAP_CONFIG */ | ||
354 | /* ========================================================================= */ | ||
355 | #define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL | ||
356 | |||
357 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 | ||
358 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL | ||
359 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8 | ||
360 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL | ||
361 | |||
362 | union uvh_si_addr_map_config_u { | ||
363 | unsigned long v; | ||
364 | struct uvh_si_addr_map_config_s { | ||
365 | unsigned long m_skt : 6; /* RW */ | ||
366 | unsigned long rsvd_6_7: 2; /* */ | ||
367 | unsigned long n_skt : 4; /* RW */ | ||
368 | unsigned long rsvd_12_63: 52; /* */ | ||
369 | } s; | ||
370 | }; | ||
371 | |||
372 | |||
373 | #endif /* __ASM_X86_UV_MMRS__ */ | ||
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 629bcb6e8e45..86e085e003d2 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h | |||
@@ -8,9 +8,11 @@ extern const char VDSO64_PRELINK[]; | |||
8 | * Given a pointer to the vDSO image, find the pointer to VDSO64_name | 8 | * Given a pointer to the vDSO image, find the pointer to VDSO64_name |
9 | * as that symbol is defined in the vDSO sources or linker script. | 9 | * as that symbol is defined in the vDSO sources or linker script. |
10 | */ | 10 | */ |
11 | #define VDSO64_SYMBOL(base, name) ({ \ | 11 | #define VDSO64_SYMBOL(base, name) \ |
12 | extern const char VDSO64_##name[]; \ | 12 | ({ \ |
13 | (void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); }) | 13 | extern const char VDSO64_##name[]; \ |
14 | (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \ | ||
15 | }) | ||
14 | #endif | 16 | #endif |
15 | 17 | ||
16 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT | 18 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT |
@@ -20,9 +22,18 @@ extern const char VDSO32_PRELINK[]; | |||
20 | * Given a pointer to the vDSO image, find the pointer to VDSO32_name | 22 | * Given a pointer to the vDSO image, find the pointer to VDSO32_name |
21 | * as that symbol is defined in the vDSO sources or linker script. | 23 | * as that symbol is defined in the vDSO sources or linker script. |
22 | */ | 24 | */ |
23 | #define VDSO32_SYMBOL(base, name) ({ \ | 25 | #define VDSO32_SYMBOL(base, name) \ |
24 | extern const char VDSO32_##name[]; \ | 26 | ({ \ |
25 | (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) | 27 | extern const char VDSO32_##name[]; \ |
28 | (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ | ||
29 | }) | ||
26 | #endif | 30 | #endif |
27 | 31 | ||
32 | /* | ||
33 | * These symbols are defined with the addresses in the vsyscall page. | ||
34 | * See vsyscall-sigreturn.S. | ||
35 | */ | ||
36 | extern void __user __kernel_sigreturn; | ||
37 | extern void __user __kernel_rt_sigreturn; | ||
38 | |||
28 | #endif /* asm-x86/vdso.h */ | 39 | #endif /* asm-x86/vdso.h */ |
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h index 0ecf68ac03aa..0ccf804377e6 100644 --- a/include/asm-x86/vga.h +++ b/include/asm-x86/vga.h | |||
@@ -12,9 +12,9 @@ | |||
12 | * access the videoram directly without any black magic. | 12 | * access the videoram directly without any black magic. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x) | 15 | #define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) |
16 | 16 | ||
17 | #define vga_readb(x) (*(x)) | 17 | #define vga_readb(x) (*(x)) |
18 | #define vga_writeb(x,y) (*(y) = (x)) | 18 | #define vga_writeb(x, y) (*(y) = (x)) |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index c92fe4af52e8..074b357146df 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h | |||
@@ -12,19 +12,13 @@ | |||
12 | * Linus | 12 | * Linus |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define TF_MASK 0x00000100 | 15 | #include <asm/processor-flags.h> |
16 | #define IF_MASK 0x00000200 | 16 | |
17 | #define IOPL_MASK 0x00003000 | ||
18 | #define NT_MASK 0x00004000 | ||
19 | #ifdef CONFIG_VM86 | 17 | #ifdef CONFIG_VM86 |
20 | #define VM_MASK 0x00020000 | 18 | #define X86_VM_MASK X86_EFLAGS_VM |
21 | #else | 19 | #else |
22 | #define VM_MASK 0 /* ignored */ | 20 | #define X86_VM_MASK 0 /* No VM86 support */ |
23 | #endif | 21 | #endif |
24 | #define AC_MASK 0x00040000 | ||
25 | #define VIF_MASK 0x00080000 /* virtual interrupt flag */ | ||
26 | #define VIP_MASK 0x00100000 /* virtual interrupt pending */ | ||
27 | #define ID_MASK 0x00200000 | ||
28 | 22 | ||
29 | #define BIOSSEG 0x0f000 | 23 | #define BIOSSEG 0x0f000 |
30 | 24 | ||
@@ -42,9 +36,11 @@ | |||
42 | #define VM86_ARG(retval) ((retval) >> 8) | 36 | #define VM86_ARG(retval) ((retval) >> 8) |
43 | 37 | ||
44 | #define VM86_SIGNAL 0 /* return due to signal */ | 38 | #define VM86_SIGNAL 0 /* return due to signal */ |
45 | #define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ | 39 | #define VM86_UNKNOWN 1 /* unhandled GP fault |
40 | - IO-instruction or similar */ | ||
46 | #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ | 41 | #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ |
47 | #define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ | 42 | #define VM86_STI 3 /* sti/popf/iret instruction enabled |
43 | virtual interrupts */ | ||
48 | 44 | ||
49 | /* | 45 | /* |
50 | * Additional return values when invoking new vm86() | 46 | * Additional return values when invoking new vm86() |
@@ -205,7 +201,8 @@ void release_vm86_irqs(struct task_struct *); | |||
205 | #define handle_vm86_fault(a, b) | 201 | #define handle_vm86_fault(a, b) |
206 | #define release_vm86_irqs(a) | 202 | #define release_vm86_irqs(a) |
207 | 203 | ||
208 | static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { | 204 | static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) |
205 | { | ||
209 | return 0; | 206 | return 0; |
210 | } | 207 | } |
211 | 208 | ||
diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h index eb8bd892c01e..b7c0dea119fe 100644 --- a/include/asm-x86/vmi.h +++ b/include/asm-x86/vmi.h | |||
@@ -155,9 +155,9 @@ | |||
155 | 155 | ||
156 | #ifndef __ASSEMBLY__ | 156 | #ifndef __ASSEMBLY__ |
157 | struct vmi_relocation_info { | 157 | struct vmi_relocation_info { |
158 | unsigned char *eip; | 158 | unsigned char *eip; |
159 | unsigned char type; | 159 | unsigned char type; |
160 | unsigned char reserved[3]; | 160 | unsigned char reserved[3]; |
161 | }; | 161 | }; |
162 | #endif | 162 | #endif |
163 | 163 | ||
@@ -173,53 +173,53 @@ struct vmi_relocation_info { | |||
173 | #ifndef __ASSEMBLY__ | 173 | #ifndef __ASSEMBLY__ |
174 | 174 | ||
175 | struct vrom_header { | 175 | struct vrom_header { |
176 | u16 rom_signature; // option ROM signature | 176 | u16 rom_signature; /* option ROM signature */ |
177 | u8 rom_length; // ROM length in 512 byte chunks | 177 | u8 rom_length; /* ROM length in 512 byte chunks */ |
178 | u8 rom_entry[4]; // 16-bit code entry point | 178 | u8 rom_entry[4]; /* 16-bit code entry point */ |
179 | u8 rom_pad0; // 4-byte align pad | 179 | u8 rom_pad0; /* 4-byte align pad */ |
180 | u32 vrom_signature; // VROM identification signature | 180 | u32 vrom_signature; /* VROM identification signature */ |
181 | u8 api_version_min;// Minor version of API | 181 | u8 api_version_min;/* Minor version of API */ |
182 | u8 api_version_maj;// Major version of API | 182 | u8 api_version_maj;/* Major version of API */ |
183 | u8 jump_slots; // Number of jump slots | 183 | u8 jump_slots; /* Number of jump slots */ |
184 | u8 reserved1; // Reserved for expansion | 184 | u8 reserved1; /* Reserved for expansion */ |
185 | u32 virtual_top; // Hypervisor virtual address start | 185 | u32 virtual_top; /* Hypervisor virtual address start */ |
186 | u16 reserved2; // Reserved for expansion | 186 | u16 reserved2; /* Reserved for expansion */ |
187 | u16 license_offs; // Offset to License string | 187 | u16 license_offs; /* Offset to License string */ |
188 | u16 pci_header_offs;// Offset to PCI OPROM header | 188 | u16 pci_header_offs;/* Offset to PCI OPROM header */ |
189 | u16 pnp_header_offs;// Offset to PnP OPROM header | 189 | u16 pnp_header_offs;/* Offset to PnP OPROM header */ |
190 | u32 rom_pad3; // PnP reserverd / VMI reserved | 190 | u32 rom_pad3; /* PnP reserverd / VMI reserved */ |
191 | u8 reserved[96]; // Reserved for headers | 191 | u8 reserved[96]; /* Reserved for headers */ |
192 | char vmi_init[8]; // VMI_Init jump point | 192 | char vmi_init[8]; /* VMI_Init jump point */ |
193 | char get_reloc[8]; // VMI_GetRelocationInfo jump point | 193 | char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ |
194 | } __attribute__((packed)); | 194 | } __attribute__((packed)); |
195 | 195 | ||
196 | struct pnp_header { | 196 | struct pnp_header { |
197 | char sig[4]; | 197 | char sig[4]; |
198 | char rev; | 198 | char rev; |
199 | char size; | 199 | char size; |
200 | short next; | 200 | short next; |
201 | short res; | 201 | short res; |
202 | long devID; | 202 | long devID; |
203 | unsigned short manufacturer_offset; | 203 | unsigned short manufacturer_offset; |
204 | unsigned short product_offset; | 204 | unsigned short product_offset; |
205 | } __attribute__((packed)); | 205 | } __attribute__((packed)); |
206 | 206 | ||
207 | struct pci_header { | 207 | struct pci_header { |
208 | char sig[4]; | 208 | char sig[4]; |
209 | short vendorID; | 209 | short vendorID; |
210 | short deviceID; | 210 | short deviceID; |
211 | short vpdData; | 211 | short vpdData; |
212 | short size; | 212 | short size; |
213 | char rev; | 213 | char rev; |
214 | char class; | 214 | char class; |
215 | char subclass; | 215 | char subclass; |
216 | char interface; | 216 | char interface; |
217 | short chunks; | 217 | short chunks; |
218 | char rom_version_min; | 218 | char rom_version_min; |
219 | char rom_version_maj; | 219 | char rom_version_maj; |
220 | char codetype; | 220 | char codetype; |
221 | char lastRom; | 221 | char lastRom; |
222 | short reserved; | 222 | short reserved; |
223 | } __attribute__((packed)); | 223 | } __attribute__((packed)); |
224 | 224 | ||
225 | /* Function prototypes for bootstrapping */ | 225 | /* Function prototypes for bootstrapping */ |
diff --git a/include/asm-x86/voyager.h b/include/asm-x86/voyager.h index 91a9932937ab..9c811d2e6f91 100644 --- a/include/asm-x86/voyager.h +++ b/include/asm-x86/voyager.h | |||
@@ -91,8 +91,7 @@ | |||
91 | #define VOYAGER_WRITE_CONFIG 0x2 | 91 | #define VOYAGER_WRITE_CONFIG 0x2 |
92 | #define VOYAGER_BYPASS 0xff | 92 | #define VOYAGER_BYPASS 0xff |
93 | 93 | ||
94 | typedef struct voyager_asic | 94 | typedef struct voyager_asic { |
95 | { | ||
96 | __u8 asic_addr; /* ASIC address; Level 4 */ | 95 | __u8 asic_addr; /* ASIC address; Level 4 */ |
97 | __u8 asic_type; /* ASIC type */ | 96 | __u8 asic_type; /* ASIC type */ |
98 | __u8 asic_id; /* ASIC id */ | 97 | __u8 asic_id; /* ASIC id */ |
@@ -113,7 +112,7 @@ typedef struct voyager_module { | |||
113 | __u16 largest_reg; /* Largest register in the scan path */ | 112 | __u16 largest_reg; /* Largest register in the scan path */ |
114 | __u16 smallest_reg; /* Smallest register in the scan path */ | 113 | __u16 smallest_reg; /* Smallest register in the scan path */ |
115 | voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ | 114 | voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ |
116 | struct voyager_module *submodule; /* Submodule pointer */ | 115 | struct voyager_module *submodule; /* Submodule pointer */ |
117 | struct voyager_module *next; /* Next module in linked list */ | 116 | struct voyager_module *next; /* Next module in linked list */ |
118 | } voyager_module_t; | 117 | } voyager_module_t; |
119 | 118 | ||
@@ -135,7 +134,7 @@ typedef struct voyager_eeprom_hdr { | |||
135 | __u16 cct_offset; | 134 | __u16 cct_offset; |
136 | __u16 log_length; /* length of err log */ | 135 | __u16 log_length; /* length of err log */ |
137 | __u16 xsum_end; /* offset to end of | 136 | __u16 xsum_end; /* offset to end of |
138 | checksum */ | 137 | checksum */ |
139 | __u8 reserved[4]; | 138 | __u8 reserved[4]; |
140 | __u8 sflag; /* starting sentinal */ | 139 | __u8 sflag; /* starting sentinal */ |
141 | __u8 part_number[13]; /* prom part number */ | 140 | __u8 part_number[13]; /* prom part number */ |
@@ -148,7 +147,8 @@ typedef struct voyager_eeprom_hdr { | |||
148 | 147 | ||
149 | 148 | ||
150 | 149 | ||
151 | #define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) | 150 | #define VOYAGER_EPROM_SIZE_OFFSET \ |
151 | ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) | ||
152 | #define VOYAGER_XSUM_END_OFFSET 0x2a | 152 | #define VOYAGER_XSUM_END_OFFSET 0x2a |
153 | 153 | ||
154 | /* the following three definitions are for internal table layouts | 154 | /* the following three definitions are for internal table layouts |
@@ -199,7 +199,7 @@ typedef struct voyager_asic_data_table { | |||
199 | #define VOYAGER_WCBIC_TOM_L 0x4 | 199 | #define VOYAGER_WCBIC_TOM_L 0x4 |
200 | #define VOYAGER_WCBIC_TOM_H 0x5 | 200 | #define VOYAGER_WCBIC_TOM_H 0x5 |
201 | 201 | ||
202 | /* register defines for Voyager Memory Contol (VMC) | 202 | /* register defines for Voyager Memory Contol (VMC) |
203 | * these are present on L4 machines only */ | 203 | * these are present on L4 machines only */ |
204 | #define VOYAGER_VMC1 0x81 | 204 | #define VOYAGER_VMC1 0x81 |
205 | #define VOYAGER_VMC2 0x91 | 205 | #define VOYAGER_VMC2 0x91 |
@@ -334,7 +334,7 @@ typedef struct { | |||
334 | 334 | ||
335 | struct QuadDescription { | 335 | struct QuadDescription { |
336 | __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields | 336 | __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields |
337 | * will be zero except for slot */ | 337 | * will be zero except for slot */ |
338 | __u8 StructureVersion; | 338 | __u8 StructureVersion; |
339 | __u32 CPI_BaseAddress; | 339 | __u32 CPI_BaseAddress; |
340 | __u32 LARC_BankSize; | 340 | __u32 LARC_BankSize; |
@@ -342,7 +342,7 @@ struct QuadDescription { | |||
342 | __u8 Slot; /* Processor slots 1 - 4 */ | 342 | __u8 Slot; /* Processor slots 1 - 4 */ |
343 | } __attribute__((packed)); | 343 | } __attribute__((packed)); |
344 | 344 | ||
345 | struct ProcBoardInfo { | 345 | struct ProcBoardInfo { |
346 | __u8 Type; | 346 | __u8 Type; |
347 | __u8 StructureVersion; | 347 | __u8 StructureVersion; |
348 | __u8 NumberOfBoards; | 348 | __u8 NumberOfBoards; |
@@ -382,19 +382,30 @@ struct CPU_Info { | |||
382 | * packed in it by our friend the compiler. | 382 | * packed in it by our friend the compiler. |
383 | */ | 383 | */ |
384 | typedef struct { | 384 | typedef struct { |
385 | __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ | 385 | __u8 Mailbox_SUS; /* Written to by SUS to give |
386 | __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ | 386 | commands/response to the OS */ |
387 | __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ | 387 | __u8 Mailbox_OS; /* Written to by the OS to give |
388 | __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ | 388 | commands/response to SUS */ |
389 | __u32 OS_Flags; /* Flags set by the OS as info for SUS */ | 389 | __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the |
390 | __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ | 390 | interface SUS supports */ |
391 | __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ | 391 | __u8 OS_MailboxVersion; /* Tells SUS which iteration of the |
392 | interface the OS supports */ | ||
393 | __u32 OS_Flags; /* Flags set by the OS as info for | ||
394 | SUS */ | ||
395 | __u32 SUS_Flags; /* Flags set by SUS as info | ||
396 | for the OS */ | ||
397 | __u32 WatchDogPeriod; /* Watchdog period (in seconds) which | ||
398 | the DP uses to see if the OS | ||
399 | is dead */ | ||
392 | __u32 WatchDogCount; /* Updated by the OS on every tic. */ | 400 | __u32 WatchDogCount; /* Updated by the OS on every tic. */ |
393 | __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ | 401 | __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS |
394 | MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ | 402 | where to stuff the SUS error log |
403 | on a dump */ | ||
404 | MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; | ||
405 | /* Storage for MCA POS data */ | ||
395 | /* All new SECOND_PASS_INTERFACE fields added from this point */ | 406 | /* All new SECOND_PASS_INTERFACE fields added from this point */ |
396 | struct ProcBoardInfo *BoardData; | 407 | struct ProcBoardInfo *BoardData; |
397 | struct CPU_Info *CPU_Data; | 408 | struct CPU_Info *CPU_Data; |
398 | /* All new fields must be added from this point */ | 409 | /* All new fields must be added from this point */ |
399 | } Voyager_KernelSUS_Mbox_t; | 410 | } Voyager_KernelSUS_Mbox_t; |
400 | 411 | ||
@@ -478,7 +489,7 @@ struct voyager_SUS { | |||
478 | __u32 SUS_errorlog; | 489 | __u32 SUS_errorlog; |
479 | /* lots of system configuration stuff under here */ | 490 | /* lots of system configuration stuff under here */ |
480 | }; | 491 | }; |
481 | 492 | ||
482 | /* Variables exported by voyager_smp */ | 493 | /* Variables exported by voyager_smp */ |
483 | extern __u32 voyager_extended_vic_processors; | 494 | extern __u32 voyager_extended_vic_processors; |
484 | extern __u32 voyager_allowed_boot_processors; | 495 | extern __u32 voyager_allowed_boot_processors; |
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index a41ef1bdd424..067b5c1835a3 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h | |||
@@ -16,12 +16,12 @@ | |||
16 | * Copyright (C) 1998 Ingo Molnar. | 16 | * Copyright (C) 1998 Ingo Molnar. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" | 19 | #define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n" |
20 | #define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" | 20 | #define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n" |
21 | #define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" | 21 | #define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" |
22 | #define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" | 22 | #define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" |
23 | #define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" | 23 | #define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" |
24 | #define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" | 24 | #define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" |
25 | 25 | ||
26 | #include <asm/i387.h> | 26 | #include <asm/i387.h> |
27 | 27 | ||
@@ -32,24 +32,24 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
32 | 32 | ||
33 | kernel_fpu_begin(); | 33 | kernel_fpu_begin(); |
34 | 34 | ||
35 | __asm__ __volatile__ ( | 35 | asm volatile( |
36 | #undef BLOCK | 36 | #undef BLOCK |
37 | #define BLOCK(i) \ | 37 | #define BLOCK(i) \ |
38 | LD(i,0) \ | 38 | LD(i, 0) \ |
39 | LD(i+1,1) \ | 39 | LD(i + 1, 1) \ |
40 | LD(i+2,2) \ | 40 | LD(i + 2, 2) \ |
41 | LD(i+3,3) \ | 41 | LD(i + 3, 3) \ |
42 | XO1(i,0) \ | 42 | XO1(i, 0) \ |
43 | ST(i,0) \ | 43 | ST(i, 0) \ |
44 | XO1(i+1,1) \ | 44 | XO1(i+1, 1) \ |
45 | ST(i+1,1) \ | 45 | ST(i+1, 1) \ |
46 | XO1(i+2,2) \ | 46 | XO1(i + 2, 2) \ |
47 | ST(i+2,2) \ | 47 | ST(i + 2, 2) \ |
48 | XO1(i+3,3) \ | 48 | XO1(i + 3, 3) \ |
49 | ST(i+3,3) | 49 | ST(i + 3, 3) |
50 | 50 | ||
51 | " .align 32 ;\n" | 51 | " .align 32 ;\n" |
52 | " 1: ;\n" | 52 | " 1: ;\n" |
53 | 53 | ||
54 | BLOCK(0) | 54 | BLOCK(0) |
55 | BLOCK(4) | 55 | BLOCK(4) |
@@ -76,25 +76,25 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
76 | 76 | ||
77 | kernel_fpu_begin(); | 77 | kernel_fpu_begin(); |
78 | 78 | ||
79 | __asm__ __volatile__ ( | 79 | asm volatile( |
80 | #undef BLOCK | 80 | #undef BLOCK |
81 | #define BLOCK(i) \ | 81 | #define BLOCK(i) \ |
82 | LD(i,0) \ | 82 | LD(i, 0) \ |
83 | LD(i+1,1) \ | 83 | LD(i + 1, 1) \ |
84 | LD(i+2,2) \ | 84 | LD(i + 2, 2) \ |
85 | LD(i+3,3) \ | 85 | LD(i + 3, 3) \ |
86 | XO1(i,0) \ | 86 | XO1(i, 0) \ |
87 | XO1(i+1,1) \ | 87 | XO1(i + 1, 1) \ |
88 | XO1(i+2,2) \ | 88 | XO1(i + 2, 2) \ |
89 | XO1(i+3,3) \ | 89 | XO1(i + 3, 3) \ |
90 | XO2(i,0) \ | 90 | XO2(i, 0) \ |
91 | ST(i,0) \ | 91 | ST(i, 0) \ |
92 | XO2(i+1,1) \ | 92 | XO2(i + 1, 1) \ |
93 | ST(i+1,1) \ | 93 | ST(i + 1, 1) \ |
94 | XO2(i+2,2) \ | 94 | XO2(i + 2, 2) \ |
95 | ST(i+2,2) \ | 95 | ST(i + 2, 2) \ |
96 | XO2(i+3,3) \ | 96 | XO2(i + 3, 3) \ |
97 | ST(i+3,3) | 97 | ST(i + 3, 3) |
98 | 98 | ||
99 | " .align 32 ;\n" | 99 | " .align 32 ;\n" |
100 | " 1: ;\n" | 100 | " 1: ;\n" |
@@ -125,29 +125,29 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
125 | 125 | ||
126 | kernel_fpu_begin(); | 126 | kernel_fpu_begin(); |
127 | 127 | ||
128 | __asm__ __volatile__ ( | 128 | asm volatile( |
129 | #undef BLOCK | 129 | #undef BLOCK |
130 | #define BLOCK(i) \ | 130 | #define BLOCK(i) \ |
131 | LD(i,0) \ | 131 | LD(i, 0) \ |
132 | LD(i+1,1) \ | 132 | LD(i + 1, 1) \ |
133 | LD(i+2,2) \ | 133 | LD(i + 2, 2) \ |
134 | LD(i+3,3) \ | 134 | LD(i + 3, 3) \ |
135 | XO1(i,0) \ | 135 | XO1(i, 0) \ |
136 | XO1(i+1,1) \ | 136 | XO1(i + 1, 1) \ |
137 | XO1(i+2,2) \ | 137 | XO1(i + 2, 2) \ |
138 | XO1(i+3,3) \ | 138 | XO1(i + 3, 3) \ |
139 | XO2(i,0) \ | 139 | XO2(i, 0) \ |
140 | XO2(i+1,1) \ | 140 | XO2(i + 1, 1) \ |
141 | XO2(i+2,2) \ | 141 | XO2(i + 2, 2) \ |
142 | XO2(i+3,3) \ | 142 | XO2(i + 3, 3) \ |
143 | XO3(i,0) \ | 143 | XO3(i, 0) \ |
144 | ST(i,0) \ | 144 | ST(i, 0) \ |
145 | XO3(i+1,1) \ | 145 | XO3(i + 1, 1) \ |
146 | ST(i+1,1) \ | 146 | ST(i + 1, 1) \ |
147 | XO3(i+2,2) \ | 147 | XO3(i + 2, 2) \ |
148 | ST(i+2,2) \ | 148 | ST(i + 2, 2) \ |
149 | XO3(i+3,3) \ | 149 | XO3(i + 3, 3) \ |
150 | ST(i+3,3) | 150 | ST(i + 3, 3) |
151 | 151 | ||
152 | " .align 32 ;\n" | 152 | " .align 32 ;\n" |
153 | " 1: ;\n" | 153 | " 1: ;\n" |
@@ -186,35 +186,35 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
186 | because we modify p4 and p5 there, but we can't mark them | 186 | because we modify p4 and p5 there, but we can't mark them |
187 | as read/write, otherwise we'd overflow the 10-asm-operands | 187 | as read/write, otherwise we'd overflow the 10-asm-operands |
188 | limit of GCC < 3.1. */ | 188 | limit of GCC < 3.1. */ |
189 | __asm__ ("" : "+r" (p4), "+r" (p5)); | 189 | asm("" : "+r" (p4), "+r" (p5)); |
190 | 190 | ||
191 | __asm__ __volatile__ ( | 191 | asm volatile( |
192 | #undef BLOCK | 192 | #undef BLOCK |
193 | #define BLOCK(i) \ | 193 | #define BLOCK(i) \ |
194 | LD(i,0) \ | 194 | LD(i, 0) \ |
195 | LD(i+1,1) \ | 195 | LD(i + 1, 1) \ |
196 | LD(i+2,2) \ | 196 | LD(i + 2, 2) \ |
197 | LD(i+3,3) \ | 197 | LD(i + 3, 3) \ |
198 | XO1(i,0) \ | 198 | XO1(i, 0) \ |
199 | XO1(i+1,1) \ | 199 | XO1(i + 1, 1) \ |
200 | XO1(i+2,2) \ | 200 | XO1(i + 2, 2) \ |
201 | XO1(i+3,3) \ | 201 | XO1(i + 3, 3) \ |
202 | XO2(i,0) \ | 202 | XO2(i, 0) \ |
203 | XO2(i+1,1) \ | 203 | XO2(i + 1, 1) \ |
204 | XO2(i+2,2) \ | 204 | XO2(i + 2, 2) \ |
205 | XO2(i+3,3) \ | 205 | XO2(i + 3, 3) \ |
206 | XO3(i,0) \ | 206 | XO3(i, 0) \ |
207 | XO3(i+1,1) \ | 207 | XO3(i + 1, 1) \ |
208 | XO3(i+2,2) \ | 208 | XO3(i + 2, 2) \ |
209 | XO3(i+3,3) \ | 209 | XO3(i + 3, 3) \ |
210 | XO4(i,0) \ | 210 | XO4(i, 0) \ |
211 | ST(i,0) \ | 211 | ST(i, 0) \ |
212 | XO4(i+1,1) \ | 212 | XO4(i + 1, 1) \ |
213 | ST(i+1,1) \ | 213 | ST(i + 1, 1) \ |
214 | XO4(i+2,2) \ | 214 | XO4(i + 2, 2) \ |
215 | ST(i+2,2) \ | 215 | ST(i + 2, 2) \ |
216 | XO4(i+3,3) \ | 216 | XO4(i + 3, 3) \ |
217 | ST(i+3,3) | 217 | ST(i + 3, 3) |
218 | 218 | ||
219 | " .align 32 ;\n" | 219 | " .align 32 ;\n" |
220 | " 1: ;\n" | 220 | " 1: ;\n" |
@@ -233,13 +233,13 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
233 | " jnz 1b ;\n" | 233 | " jnz 1b ;\n" |
234 | : "+r" (lines), | 234 | : "+r" (lines), |
235 | "+r" (p1), "+r" (p2), "+r" (p3) | 235 | "+r" (p1), "+r" (p2), "+r" (p3) |
236 | : "r" (p4), "r" (p5) | 236 | : "r" (p4), "r" (p5) |
237 | : "memory"); | 237 | : "memory"); |
238 | 238 | ||
239 | /* p4 and p5 were modified, and now the variables are dead. | 239 | /* p4 and p5 were modified, and now the variables are dead. |
240 | Clobber them just to be sure nobody does something stupid | 240 | Clobber them just to be sure nobody does something stupid |
241 | like assuming they have some legal value. */ | 241 | like assuming they have some legal value. */ |
242 | __asm__ ("" : "=r" (p4), "=r" (p5)); | 242 | asm("" : "=r" (p4), "=r" (p5)); |
243 | 243 | ||
244 | kernel_fpu_end(); | 244 | kernel_fpu_end(); |
245 | } | 245 | } |
@@ -259,7 +259,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
259 | 259 | ||
260 | kernel_fpu_begin(); | 260 | kernel_fpu_begin(); |
261 | 261 | ||
262 | __asm__ __volatile__ ( | 262 | asm volatile( |
263 | " .align 32 ;\n" | 263 | " .align 32 ;\n" |
264 | " 1: ;\n" | 264 | " 1: ;\n" |
265 | " movq (%1), %%mm0 ;\n" | 265 | " movq (%1), %%mm0 ;\n" |
@@ -286,7 +286,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
286 | " pxor 56(%2), %%mm7 ;\n" | 286 | " pxor 56(%2), %%mm7 ;\n" |
287 | " movq %%mm6, 48(%1) ;\n" | 287 | " movq %%mm6, 48(%1) ;\n" |
288 | " movq %%mm7, 56(%1) ;\n" | 288 | " movq %%mm7, 56(%1) ;\n" |
289 | 289 | ||
290 | " addl $64, %1 ;\n" | 290 | " addl $64, %1 ;\n" |
291 | " addl $64, %2 ;\n" | 291 | " addl $64, %2 ;\n" |
292 | " decl %0 ;\n" | 292 | " decl %0 ;\n" |
@@ -307,7 +307,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
307 | 307 | ||
308 | kernel_fpu_begin(); | 308 | kernel_fpu_begin(); |
309 | 309 | ||
310 | __asm__ __volatile__ ( | 310 | asm volatile( |
311 | " .align 32,0x90 ;\n" | 311 | " .align 32,0x90 ;\n" |
312 | " 1: ;\n" | 312 | " 1: ;\n" |
313 | " movq (%1), %%mm0 ;\n" | 313 | " movq (%1), %%mm0 ;\n" |
@@ -342,7 +342,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
342 | " pxor 56(%3), %%mm7 ;\n" | 342 | " pxor 56(%3), %%mm7 ;\n" |
343 | " movq %%mm6, 48(%1) ;\n" | 343 | " movq %%mm6, 48(%1) ;\n" |
344 | " movq %%mm7, 56(%1) ;\n" | 344 | " movq %%mm7, 56(%1) ;\n" |
345 | 345 | ||
346 | " addl $64, %1 ;\n" | 346 | " addl $64, %1 ;\n" |
347 | " addl $64, %2 ;\n" | 347 | " addl $64, %2 ;\n" |
348 | " addl $64, %3 ;\n" | 348 | " addl $64, %3 ;\n" |
@@ -364,7 +364,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
364 | 364 | ||
365 | kernel_fpu_begin(); | 365 | kernel_fpu_begin(); |
366 | 366 | ||
367 | __asm__ __volatile__ ( | 367 | asm volatile( |
368 | " .align 32,0x90 ;\n" | 368 | " .align 32,0x90 ;\n" |
369 | " 1: ;\n" | 369 | " 1: ;\n" |
370 | " movq (%1), %%mm0 ;\n" | 370 | " movq (%1), %%mm0 ;\n" |
@@ -407,7 +407,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
407 | " pxor 56(%4), %%mm7 ;\n" | 407 | " pxor 56(%4), %%mm7 ;\n" |
408 | " movq %%mm6, 48(%1) ;\n" | 408 | " movq %%mm6, 48(%1) ;\n" |
409 | " movq %%mm7, 56(%1) ;\n" | 409 | " movq %%mm7, 56(%1) ;\n" |
410 | 410 | ||
411 | " addl $64, %1 ;\n" | 411 | " addl $64, %1 ;\n" |
412 | " addl $64, %2 ;\n" | 412 | " addl $64, %2 ;\n" |
413 | " addl $64, %3 ;\n" | 413 | " addl $64, %3 ;\n" |
@@ -436,9 +436,9 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
436 | because we modify p4 and p5 there, but we can't mark them | 436 | because we modify p4 and p5 there, but we can't mark them |
437 | as read/write, otherwise we'd overflow the 10-asm-operands | 437 | as read/write, otherwise we'd overflow the 10-asm-operands |
438 | limit of GCC < 3.1. */ | 438 | limit of GCC < 3.1. */ |
439 | __asm__ ("" : "+r" (p4), "+r" (p5)); | 439 | asm("" : "+r" (p4), "+r" (p5)); |
440 | 440 | ||
441 | __asm__ __volatile__ ( | 441 | asm volatile( |
442 | " .align 32,0x90 ;\n" | 442 | " .align 32,0x90 ;\n" |
443 | " 1: ;\n" | 443 | " 1: ;\n" |
444 | " movq (%1), %%mm0 ;\n" | 444 | " movq (%1), %%mm0 ;\n" |
@@ -489,7 +489,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
489 | " pxor 56(%5), %%mm7 ;\n" | 489 | " pxor 56(%5), %%mm7 ;\n" |
490 | " movq %%mm6, 48(%1) ;\n" | 490 | " movq %%mm6, 48(%1) ;\n" |
491 | " movq %%mm7, 56(%1) ;\n" | 491 | " movq %%mm7, 56(%1) ;\n" |
492 | 492 | ||
493 | " addl $64, %1 ;\n" | 493 | " addl $64, %1 ;\n" |
494 | " addl $64, %2 ;\n" | 494 | " addl $64, %2 ;\n" |
495 | " addl $64, %3 ;\n" | 495 | " addl $64, %3 ;\n" |
@@ -505,7 +505,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
505 | /* p4 and p5 were modified, and now the variables are dead. | 505 | /* p4 and p5 were modified, and now the variables are dead. |
506 | Clobber them just to be sure nobody does something stupid | 506 | Clobber them just to be sure nobody does something stupid |
507 | like assuming they have some legal value. */ | 507 | like assuming they have some legal value. */ |
508 | __asm__ ("" : "=r" (p4), "=r" (p5)); | 508 | asm("" : "=r" (p4), "=r" (p5)); |
509 | 509 | ||
510 | kernel_fpu_end(); | 510 | kernel_fpu_end(); |
511 | } | 511 | } |
@@ -531,11 +531,12 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
531 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | 531 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) |
532 | */ | 532 | */ |
533 | 533 | ||
534 | #define XMMS_SAVE do { \ | 534 | #define XMMS_SAVE \ |
535 | do { \ | ||
535 | preempt_disable(); \ | 536 | preempt_disable(); \ |
536 | cr0 = read_cr0(); \ | 537 | cr0 = read_cr0(); \ |
537 | clts(); \ | 538 | clts(); \ |
538 | __asm__ __volatile__ ( \ | 539 | asm volatile( \ |
539 | "movups %%xmm0,(%0) ;\n\t" \ | 540 | "movups %%xmm0,(%0) ;\n\t" \ |
540 | "movups %%xmm1,0x10(%0) ;\n\t" \ | 541 | "movups %%xmm1,0x10(%0) ;\n\t" \ |
541 | "movups %%xmm2,0x20(%0) ;\n\t" \ | 542 | "movups %%xmm2,0x20(%0) ;\n\t" \ |
@@ -543,10 +544,11 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
543 | : \ | 544 | : \ |
544 | : "r" (xmm_save) \ | 545 | : "r" (xmm_save) \ |
545 | : "memory"); \ | 546 | : "memory"); \ |
546 | } while(0) | 547 | } while (0) |
547 | 548 | ||
548 | #define XMMS_RESTORE do { \ | 549 | #define XMMS_RESTORE \ |
549 | __asm__ __volatile__ ( \ | 550 | do { \ |
551 | asm volatile( \ | ||
550 | "sfence ;\n\t" \ | 552 | "sfence ;\n\t" \ |
551 | "movups (%0),%%xmm0 ;\n\t" \ | 553 | "movups (%0),%%xmm0 ;\n\t" \ |
552 | "movups 0x10(%0),%%xmm1 ;\n\t" \ | 554 | "movups 0x10(%0),%%xmm1 ;\n\t" \ |
@@ -557,76 +559,76 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
557 | : "memory"); \ | 559 | : "memory"); \ |
558 | write_cr0(cr0); \ | 560 | write_cr0(cr0); \ |
559 | preempt_enable(); \ | 561 | preempt_enable(); \ |
560 | } while(0) | 562 | } while (0) |
561 | 563 | ||
562 | #define ALIGN16 __attribute__((aligned(16))) | 564 | #define ALIGN16 __attribute__((aligned(16))) |
563 | 565 | ||
564 | #define OFFS(x) "16*("#x")" | 566 | #define OFFS(x) "16*("#x")" |
565 | #define PF_OFFS(x) "256+16*("#x")" | 567 | #define PF_OFFS(x) "256+16*("#x")" |
566 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" | 568 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" |
567 | #define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" | 569 | #define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" |
568 | #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" | 570 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" |
569 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" | 571 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" |
570 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" | 572 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" |
571 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" | 573 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" |
572 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" | 574 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" |
573 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" | 575 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" |
574 | #define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" | 576 | #define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" |
575 | #define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" | 577 | #define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" |
576 | #define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" | 578 | #define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" |
577 | #define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" | 579 | #define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" |
578 | #define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" | 580 | #define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" |
579 | 581 | ||
580 | 582 | ||
581 | static void | 583 | static void |
582 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | 584 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) |
583 | { | 585 | { |
584 | unsigned long lines = bytes >> 8; | 586 | unsigned long lines = bytes >> 8; |
585 | char xmm_save[16*4] ALIGN16; | 587 | char xmm_save[16*4] ALIGN16; |
586 | int cr0; | 588 | int cr0; |
587 | 589 | ||
588 | XMMS_SAVE; | 590 | XMMS_SAVE; |
589 | 591 | ||
590 | __asm__ __volatile__ ( | 592 | asm volatile( |
591 | #undef BLOCK | 593 | #undef BLOCK |
592 | #define BLOCK(i) \ | 594 | #define BLOCK(i) \ |
593 | LD(i,0) \ | 595 | LD(i, 0) \ |
594 | LD(i+1,1) \ | 596 | LD(i + 1, 1) \ |
595 | PF1(i) \ | 597 | PF1(i) \ |
596 | PF1(i+2) \ | 598 | PF1(i + 2) \ |
597 | LD(i+2,2) \ | 599 | LD(i + 2, 2) \ |
598 | LD(i+3,3) \ | 600 | LD(i + 3, 3) \ |
599 | PF0(i+4) \ | 601 | PF0(i + 4) \ |
600 | PF0(i+6) \ | 602 | PF0(i + 6) \ |
601 | XO1(i,0) \ | 603 | XO1(i, 0) \ |
602 | XO1(i+1,1) \ | 604 | XO1(i + 1, 1) \ |
603 | XO1(i+2,2) \ | 605 | XO1(i + 2, 2) \ |
604 | XO1(i+3,3) \ | 606 | XO1(i + 3, 3) \ |
605 | ST(i,0) \ | 607 | ST(i, 0) \ |
606 | ST(i+1,1) \ | 608 | ST(i + 1, 1) \ |
607 | ST(i+2,2) \ | 609 | ST(i + 2, 2) \ |
608 | ST(i+3,3) \ | 610 | ST(i + 3, 3) \ |
609 | 611 | ||
610 | 612 | ||
611 | PF0(0) | 613 | PF0(0) |
612 | PF0(2) | 614 | PF0(2) |
613 | 615 | ||
614 | " .align 32 ;\n" | 616 | " .align 32 ;\n" |
615 | " 1: ;\n" | 617 | " 1: ;\n" |
616 | 618 | ||
617 | BLOCK(0) | 619 | BLOCK(0) |
618 | BLOCK(4) | 620 | BLOCK(4) |
619 | BLOCK(8) | 621 | BLOCK(8) |
620 | BLOCK(12) | 622 | BLOCK(12) |
621 | 623 | ||
622 | " addl $256, %1 ;\n" | 624 | " addl $256, %1 ;\n" |
623 | " addl $256, %2 ;\n" | 625 | " addl $256, %2 ;\n" |
624 | " decl %0 ;\n" | 626 | " decl %0 ;\n" |
625 | " jnz 1b ;\n" | 627 | " jnz 1b ;\n" |
626 | : "+r" (lines), | 628 | : "+r" (lines), |
627 | "+r" (p1), "+r" (p2) | 629 | "+r" (p1), "+r" (p2) |
628 | : | 630 | : |
629 | : "memory"); | 631 | : "memory"); |
630 | 632 | ||
631 | XMMS_RESTORE; | 633 | XMMS_RESTORE; |
632 | } | 634 | } |
@@ -635,59 +637,59 @@ static void | |||
635 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 637 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
636 | unsigned long *p3) | 638 | unsigned long *p3) |
637 | { | 639 | { |
638 | unsigned long lines = bytes >> 8; | 640 | unsigned long lines = bytes >> 8; |
639 | char xmm_save[16*4] ALIGN16; | 641 | char xmm_save[16*4] ALIGN16; |
640 | int cr0; | 642 | int cr0; |
641 | 643 | ||
642 | XMMS_SAVE; | 644 | XMMS_SAVE; |
643 | 645 | ||
644 | __asm__ __volatile__ ( | 646 | asm volatile( |
645 | #undef BLOCK | 647 | #undef BLOCK |
646 | #define BLOCK(i) \ | 648 | #define BLOCK(i) \ |
647 | PF1(i) \ | 649 | PF1(i) \ |
648 | PF1(i+2) \ | 650 | PF1(i + 2) \ |
649 | LD(i,0) \ | 651 | LD(i,0) \ |
650 | LD(i+1,1) \ | 652 | LD(i + 1, 1) \ |
651 | LD(i+2,2) \ | 653 | LD(i + 2, 2) \ |
652 | LD(i+3,3) \ | 654 | LD(i + 3, 3) \ |
653 | PF2(i) \ | 655 | PF2(i) \ |
654 | PF2(i+2) \ | 656 | PF2(i + 2) \ |
655 | PF0(i+4) \ | 657 | PF0(i + 4) \ |
656 | PF0(i+6) \ | 658 | PF0(i + 6) \ |
657 | XO1(i,0) \ | 659 | XO1(i,0) \ |
658 | XO1(i+1,1) \ | 660 | XO1(i + 1, 1) \ |
659 | XO1(i+2,2) \ | 661 | XO1(i + 2, 2) \ |
660 | XO1(i+3,3) \ | 662 | XO1(i + 3, 3) \ |
661 | XO2(i,0) \ | 663 | XO2(i,0) \ |
662 | XO2(i+1,1) \ | 664 | XO2(i + 1, 1) \ |
663 | XO2(i+2,2) \ | 665 | XO2(i + 2, 2) \ |
664 | XO2(i+3,3) \ | 666 | XO2(i + 3, 3) \ |
665 | ST(i,0) \ | 667 | ST(i,0) \ |
666 | ST(i+1,1) \ | 668 | ST(i + 1, 1) \ |
667 | ST(i+2,2) \ | 669 | ST(i + 2, 2) \ |
668 | ST(i+3,3) \ | 670 | ST(i + 3, 3) \ |
669 | 671 | ||
670 | 672 | ||
671 | PF0(0) | 673 | PF0(0) |
672 | PF0(2) | 674 | PF0(2) |
673 | 675 | ||
674 | " .align 32 ;\n" | 676 | " .align 32 ;\n" |
675 | " 1: ;\n" | 677 | " 1: ;\n" |
676 | 678 | ||
677 | BLOCK(0) | 679 | BLOCK(0) |
678 | BLOCK(4) | 680 | BLOCK(4) |
679 | BLOCK(8) | 681 | BLOCK(8) |
680 | BLOCK(12) | 682 | BLOCK(12) |
681 | 683 | ||
682 | " addl $256, %1 ;\n" | 684 | " addl $256, %1 ;\n" |
683 | " addl $256, %2 ;\n" | 685 | " addl $256, %2 ;\n" |
684 | " addl $256, %3 ;\n" | 686 | " addl $256, %3 ;\n" |
685 | " decl %0 ;\n" | 687 | " decl %0 ;\n" |
686 | " jnz 1b ;\n" | 688 | " jnz 1b ;\n" |
687 | : "+r" (lines), | 689 | : "+r" (lines), |
688 | "+r" (p1), "+r"(p2), "+r"(p3) | 690 | "+r" (p1), "+r"(p2), "+r"(p3) |
689 | : | 691 | : |
690 | : "memory" ); | 692 | : "memory" ); |
691 | 693 | ||
692 | XMMS_RESTORE; | 694 | XMMS_RESTORE; |
693 | } | 695 | } |
@@ -696,66 +698,66 @@ static void | |||
696 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 698 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
697 | unsigned long *p3, unsigned long *p4) | 699 | unsigned long *p3, unsigned long *p4) |
698 | { | 700 | { |
699 | unsigned long lines = bytes >> 8; | 701 | unsigned long lines = bytes >> 8; |
700 | char xmm_save[16*4] ALIGN16; | 702 | char xmm_save[16*4] ALIGN16; |
701 | int cr0; | 703 | int cr0; |
702 | 704 | ||
703 | XMMS_SAVE; | 705 | XMMS_SAVE; |
704 | 706 | ||
705 | __asm__ __volatile__ ( | 707 | asm volatile( |
706 | #undef BLOCK | 708 | #undef BLOCK |
707 | #define BLOCK(i) \ | 709 | #define BLOCK(i) \ |
708 | PF1(i) \ | 710 | PF1(i) \ |
709 | PF1(i+2) \ | 711 | PF1(i + 2) \ |
710 | LD(i,0) \ | 712 | LD(i,0) \ |
711 | LD(i+1,1) \ | 713 | LD(i + 1, 1) \ |
712 | LD(i+2,2) \ | 714 | LD(i + 2, 2) \ |
713 | LD(i+3,3) \ | 715 | LD(i + 3, 3) \ |
714 | PF2(i) \ | 716 | PF2(i) \ |
715 | PF2(i+2) \ | 717 | PF2(i + 2) \ |
716 | XO1(i,0) \ | 718 | XO1(i,0) \ |
717 | XO1(i+1,1) \ | 719 | XO1(i + 1, 1) \ |
718 | XO1(i+2,2) \ | 720 | XO1(i + 2, 2) \ |
719 | XO1(i+3,3) \ | 721 | XO1(i + 3, 3) \ |
720 | PF3(i) \ | 722 | PF3(i) \ |
721 | PF3(i+2) \ | 723 | PF3(i + 2) \ |
722 | PF0(i+4) \ | 724 | PF0(i + 4) \ |
723 | PF0(i+6) \ | 725 | PF0(i + 6) \ |
724 | XO2(i,0) \ | 726 | XO2(i,0) \ |
725 | XO2(i+1,1) \ | 727 | XO2(i + 1, 1) \ |
726 | XO2(i+2,2) \ | 728 | XO2(i + 2, 2) \ |
727 | XO2(i+3,3) \ | 729 | XO2(i + 3, 3) \ |
728 | XO3(i,0) \ | 730 | XO3(i,0) \ |
729 | XO3(i+1,1) \ | 731 | XO3(i + 1, 1) \ |
730 | XO3(i+2,2) \ | 732 | XO3(i + 2, 2) \ |
731 | XO3(i+3,3) \ | 733 | XO3(i + 3, 3) \ |
732 | ST(i,0) \ | 734 | ST(i,0) \ |
733 | ST(i+1,1) \ | 735 | ST(i + 1, 1) \ |
734 | ST(i+2,2) \ | 736 | ST(i + 2, 2) \ |
735 | ST(i+3,3) \ | 737 | ST(i + 3, 3) \ |
736 | 738 | ||
737 | 739 | ||
738 | PF0(0) | 740 | PF0(0) |
739 | PF0(2) | 741 | PF0(2) |
740 | 742 | ||
741 | " .align 32 ;\n" | 743 | " .align 32 ;\n" |
742 | " 1: ;\n" | 744 | " 1: ;\n" |
743 | 745 | ||
744 | BLOCK(0) | 746 | BLOCK(0) |
745 | BLOCK(4) | 747 | BLOCK(4) |
746 | BLOCK(8) | 748 | BLOCK(8) |
747 | BLOCK(12) | 749 | BLOCK(12) |
748 | 750 | ||
749 | " addl $256, %1 ;\n" | 751 | " addl $256, %1 ;\n" |
750 | " addl $256, %2 ;\n" | 752 | " addl $256, %2 ;\n" |
751 | " addl $256, %3 ;\n" | 753 | " addl $256, %3 ;\n" |
752 | " addl $256, %4 ;\n" | 754 | " addl $256, %4 ;\n" |
753 | " decl %0 ;\n" | 755 | " decl %0 ;\n" |
754 | " jnz 1b ;\n" | 756 | " jnz 1b ;\n" |
755 | : "+r" (lines), | 757 | : "+r" (lines), |
756 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) | 758 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) |
757 | : | 759 | : |
758 | : "memory" ); | 760 | : "memory" ); |
759 | 761 | ||
760 | XMMS_RESTORE; | 762 | XMMS_RESTORE; |
761 | } | 763 | } |
@@ -764,7 +766,7 @@ static void | |||
764 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 766 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
765 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | 767 | unsigned long *p3, unsigned long *p4, unsigned long *p5) |
766 | { | 768 | { |
767 | unsigned long lines = bytes >> 8; | 769 | unsigned long lines = bytes >> 8; |
768 | char xmm_save[16*4] ALIGN16; | 770 | char xmm_save[16*4] ALIGN16; |
769 | int cr0; | 771 | int cr0; |
770 | 772 | ||
@@ -776,65 +778,65 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
776 | because we modify p4 and p5 there, but we can't mark them | 778 | because we modify p4 and p5 there, but we can't mark them |
777 | as read/write, otherwise we'd overflow the 10-asm-operands | 779 | as read/write, otherwise we'd overflow the 10-asm-operands |
778 | limit of GCC < 3.1. */ | 780 | limit of GCC < 3.1. */ |
779 | __asm__ ("" : "+r" (p4), "+r" (p5)); | 781 | asm("" : "+r" (p4), "+r" (p5)); |
780 | 782 | ||
781 | __asm__ __volatile__ ( | 783 | asm volatile( |
782 | #undef BLOCK | 784 | #undef BLOCK |
783 | #define BLOCK(i) \ | 785 | #define BLOCK(i) \ |
784 | PF1(i) \ | 786 | PF1(i) \ |
785 | PF1(i+2) \ | 787 | PF1(i + 2) \ |
786 | LD(i,0) \ | 788 | LD(i,0) \ |
787 | LD(i+1,1) \ | 789 | LD(i + 1, 1) \ |
788 | LD(i+2,2) \ | 790 | LD(i + 2, 2) \ |
789 | LD(i+3,3) \ | 791 | LD(i + 3, 3) \ |
790 | PF2(i) \ | 792 | PF2(i) \ |
791 | PF2(i+2) \ | 793 | PF2(i + 2) \ |
792 | XO1(i,0) \ | 794 | XO1(i,0) \ |
793 | XO1(i+1,1) \ | 795 | XO1(i + 1, 1) \ |
794 | XO1(i+2,2) \ | 796 | XO1(i + 2, 2) \ |
795 | XO1(i+3,3) \ | 797 | XO1(i + 3, 3) \ |
796 | PF3(i) \ | 798 | PF3(i) \ |
797 | PF3(i+2) \ | 799 | PF3(i + 2) \ |
798 | XO2(i,0) \ | 800 | XO2(i,0) \ |
799 | XO2(i+1,1) \ | 801 | XO2(i + 1, 1) \ |
800 | XO2(i+2,2) \ | 802 | XO2(i + 2, 2) \ |
801 | XO2(i+3,3) \ | 803 | XO2(i + 3, 3) \ |
802 | PF4(i) \ | 804 | PF4(i) \ |
803 | PF4(i+2) \ | 805 | PF4(i + 2) \ |
804 | PF0(i+4) \ | 806 | PF0(i + 4) \ |
805 | PF0(i+6) \ | 807 | PF0(i + 6) \ |
806 | XO3(i,0) \ | 808 | XO3(i,0) \ |
807 | XO3(i+1,1) \ | 809 | XO3(i + 1, 1) \ |
808 | XO3(i+2,2) \ | 810 | XO3(i + 2, 2) \ |
809 | XO3(i+3,3) \ | 811 | XO3(i + 3, 3) \ |
810 | XO4(i,0) \ | 812 | XO4(i,0) \ |
811 | XO4(i+1,1) \ | 813 | XO4(i + 1, 1) \ |
812 | XO4(i+2,2) \ | 814 | XO4(i + 2, 2) \ |
813 | XO4(i+3,3) \ | 815 | XO4(i + 3, 3) \ |
814 | ST(i,0) \ | 816 | ST(i,0) \ |
815 | ST(i+1,1) \ | 817 | ST(i + 1, 1) \ |
816 | ST(i+2,2) \ | 818 | ST(i + 2, 2) \ |
817 | ST(i+3,3) \ | 819 | ST(i + 3, 3) \ |
818 | 820 | ||
819 | 821 | ||
820 | PF0(0) | 822 | PF0(0) |
821 | PF0(2) | 823 | PF0(2) |
822 | 824 | ||
823 | " .align 32 ;\n" | 825 | " .align 32 ;\n" |
824 | " 1: ;\n" | 826 | " 1: ;\n" |
825 | 827 | ||
826 | BLOCK(0) | 828 | BLOCK(0) |
827 | BLOCK(4) | 829 | BLOCK(4) |
828 | BLOCK(8) | 830 | BLOCK(8) |
829 | BLOCK(12) | 831 | BLOCK(12) |
830 | 832 | ||
831 | " addl $256, %1 ;\n" | 833 | " addl $256, %1 ;\n" |
832 | " addl $256, %2 ;\n" | 834 | " addl $256, %2 ;\n" |
833 | " addl $256, %3 ;\n" | 835 | " addl $256, %3 ;\n" |
834 | " addl $256, %4 ;\n" | 836 | " addl $256, %4 ;\n" |
835 | " addl $256, %5 ;\n" | 837 | " addl $256, %5 ;\n" |
836 | " decl %0 ;\n" | 838 | " decl %0 ;\n" |
837 | " jnz 1b ;\n" | 839 | " jnz 1b ;\n" |
838 | : "+r" (lines), | 840 | : "+r" (lines), |
839 | "+r" (p1), "+r" (p2), "+r" (p3) | 841 | "+r" (p1), "+r" (p2), "+r" (p3) |
840 | : "r" (p4), "r" (p5) | 842 | : "r" (p4), "r" (p5) |
@@ -843,17 +845,17 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
843 | /* p4 and p5 were modified, and now the variables are dead. | 845 | /* p4 and p5 were modified, and now the variables are dead. |
844 | Clobber them just to be sure nobody does something stupid | 846 | Clobber them just to be sure nobody does something stupid |
845 | like assuming they have some legal value. */ | 847 | like assuming they have some legal value. */ |
846 | __asm__ ("" : "=r" (p4), "=r" (p5)); | 848 | asm("" : "=r" (p4), "=r" (p5)); |
847 | 849 | ||
848 | XMMS_RESTORE; | 850 | XMMS_RESTORE; |
849 | } | 851 | } |
850 | 852 | ||
851 | static struct xor_block_template xor_block_pIII_sse = { | 853 | static struct xor_block_template xor_block_pIII_sse = { |
852 | .name = "pIII_sse", | 854 | .name = "pIII_sse", |
853 | .do_2 = xor_sse_2, | 855 | .do_2 = xor_sse_2, |
854 | .do_3 = xor_sse_3, | 856 | .do_3 = xor_sse_3, |
855 | .do_4 = xor_sse_4, | 857 | .do_4 = xor_sse_4, |
856 | .do_5 = xor_sse_5, | 858 | .do_5 = xor_sse_5, |
857 | }; | 859 | }; |
858 | 860 | ||
859 | /* Also try the generic routines. */ | 861 | /* Also try the generic routines. */ |
@@ -861,21 +863,21 @@ static struct xor_block_template xor_block_pIII_sse = { | |||
861 | 863 | ||
862 | #undef XOR_TRY_TEMPLATES | 864 | #undef XOR_TRY_TEMPLATES |
863 | #define XOR_TRY_TEMPLATES \ | 865 | #define XOR_TRY_TEMPLATES \ |
864 | do { \ | 866 | do { \ |
865 | xor_speed(&xor_block_8regs); \ | 867 | xor_speed(&xor_block_8regs); \ |
866 | xor_speed(&xor_block_8regs_p); \ | 868 | xor_speed(&xor_block_8regs_p); \ |
867 | xor_speed(&xor_block_32regs); \ | 869 | xor_speed(&xor_block_32regs); \ |
868 | xor_speed(&xor_block_32regs_p); \ | 870 | xor_speed(&xor_block_32regs_p); \ |
869 | if (cpu_has_xmm) \ | 871 | if (cpu_has_xmm) \ |
870 | xor_speed(&xor_block_pIII_sse); \ | 872 | xor_speed(&xor_block_pIII_sse); \ |
871 | if (cpu_has_mmx) { \ | 873 | if (cpu_has_mmx) { \ |
872 | xor_speed(&xor_block_pII_mmx); \ | 874 | xor_speed(&xor_block_pII_mmx); \ |
873 | xor_speed(&xor_block_p5_mmx); \ | 875 | xor_speed(&xor_block_p5_mmx); \ |
874 | } \ | 876 | } \ |
875 | } while (0) | 877 | } while (0) |
876 | 878 | ||
877 | /* We force the use of the SSE xor block because it can write around L2. | 879 | /* We force the use of the SSE xor block because it can write around L2. |
878 | We may also be able to load into the L1 only depending on how the cpu | 880 | We may also be able to load into the L1 only depending on how the cpu |
879 | deals with a load to a line that is being prefetched. */ | 881 | deals with a load to a line that is being prefetched. */ |
880 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | 882 | #define XOR_SELECT_TEMPLATE(FASTEST) \ |
881 | (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) | 883 | (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) |
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index 1eee7fcb2420..24957e39ac8a 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h | |||
@@ -24,20 +24,23 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * x86-64 changes / gcc fixes from Andi Kleen. | 27 | * x86-64 changes / gcc fixes from Andi Kleen. |
28 | * Copyright 2002 Andi Kleen, SuSE Labs. | 28 | * Copyright 2002 Andi Kleen, SuSE Labs. |
29 | * | 29 | * |
30 | * This hasn't been optimized for the hammer yet, but there are likely | 30 | * This hasn't been optimized for the hammer yet, but there are likely |
31 | * no advantages to be gotten from x86-64 here anyways. | 31 | * no advantages to be gotten from x86-64 here anyways. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; | 34 | typedef struct { |
35 | unsigned long a, b; | ||
36 | } __attribute__((aligned(16))) xmm_store_t; | ||
35 | 37 | ||
36 | /* Doesn't use gcc to save the XMM registers, because there is no easy way to | 38 | /* Doesn't use gcc to save the XMM registers, because there is no easy way to |
37 | tell it to do a clts before the register saving. */ | 39 | tell it to do a clts before the register saving. */ |
38 | #define XMMS_SAVE do { \ | 40 | #define XMMS_SAVE \ |
41 | do { \ | ||
39 | preempt_disable(); \ | 42 | preempt_disable(); \ |
40 | asm volatile ( \ | 43 | asm volatile( \ |
41 | "movq %%cr0,%0 ;\n\t" \ | 44 | "movq %%cr0,%0 ;\n\t" \ |
42 | "clts ;\n\t" \ | 45 | "clts ;\n\t" \ |
43 | "movups %%xmm0,(%1) ;\n\t" \ | 46 | "movups %%xmm0,(%1) ;\n\t" \ |
@@ -47,10 +50,11 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; | |||
47 | : "=&r" (cr0) \ | 50 | : "=&r" (cr0) \ |
48 | : "r" (xmm_save) \ | 51 | : "r" (xmm_save) \ |
49 | : "memory"); \ | 52 | : "memory"); \ |
50 | } while(0) | 53 | } while (0) |
51 | 54 | ||
52 | #define XMMS_RESTORE do { \ | 55 | #define XMMS_RESTORE \ |
53 | asm volatile ( \ | 56 | do { \ |
57 | asm volatile( \ | ||
54 | "sfence ;\n\t" \ | 58 | "sfence ;\n\t" \ |
55 | "movups (%1),%%xmm0 ;\n\t" \ | 59 | "movups (%1),%%xmm0 ;\n\t" \ |
56 | "movups 0x10(%1),%%xmm1 ;\n\t" \ | 60 | "movups 0x10(%1),%%xmm1 ;\n\t" \ |
@@ -61,72 +65,72 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; | |||
61 | : "r" (cr0), "r" (xmm_save) \ | 65 | : "r" (cr0), "r" (xmm_save) \ |
62 | : "memory"); \ | 66 | : "memory"); \ |
63 | preempt_enable(); \ | 67 | preempt_enable(); \ |
64 | } while(0) | 68 | } while (0) |
65 | 69 | ||
66 | #define OFFS(x) "16*("#x")" | 70 | #define OFFS(x) "16*("#x")" |
67 | #define PF_OFFS(x) "256+16*("#x")" | 71 | #define PF_OFFS(x) "256+16*("#x")" |
68 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" | 72 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" |
69 | #define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" | 73 | #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" |
70 | #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" | 74 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" |
71 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" | 75 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" |
72 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" | 76 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" |
73 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" | 77 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" |
74 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" | 78 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" |
75 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" | 79 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" |
76 | #define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" | 80 | #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" |
77 | #define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" | 81 | #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" |
78 | #define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" | 82 | #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" |
79 | #define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" | 83 | #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" |
80 | #define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" | 84 | #define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" |
81 | 85 | ||
82 | 86 | ||
83 | static void | 87 | static void |
84 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | 88 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) |
85 | { | 89 | { |
86 | unsigned int lines = bytes >> 8; | 90 | unsigned int lines = bytes >> 8; |
87 | unsigned long cr0; | 91 | unsigned long cr0; |
88 | xmm_store_t xmm_save[4]; | 92 | xmm_store_t xmm_save[4]; |
89 | 93 | ||
90 | XMMS_SAVE; | 94 | XMMS_SAVE; |
91 | 95 | ||
92 | asm volatile ( | 96 | asm volatile( |
93 | #undef BLOCK | 97 | #undef BLOCK |
94 | #define BLOCK(i) \ | 98 | #define BLOCK(i) \ |
95 | LD(i,0) \ | 99 | LD(i, 0) \ |
96 | LD(i+1,1) \ | 100 | LD(i + 1, 1) \ |
97 | PF1(i) \ | 101 | PF1(i) \ |
98 | PF1(i+2) \ | 102 | PF1(i + 2) \ |
99 | LD(i+2,2) \ | 103 | LD(i + 2, 2) \ |
100 | LD(i+3,3) \ | 104 | LD(i + 3, 3) \ |
101 | PF0(i+4) \ | 105 | PF0(i + 4) \ |
102 | PF0(i+6) \ | 106 | PF0(i + 6) \ |
103 | XO1(i,0) \ | 107 | XO1(i, 0) \ |
104 | XO1(i+1,1) \ | 108 | XO1(i + 1, 1) \ |
105 | XO1(i+2,2) \ | 109 | XO1(i + 2, 2) \ |
106 | XO1(i+3,3) \ | 110 | XO1(i + 3, 3) \ |
107 | ST(i,0) \ | 111 | ST(i, 0) \ |
108 | ST(i+1,1) \ | 112 | ST(i + 1, 1) \ |
109 | ST(i+2,2) \ | 113 | ST(i + 2, 2) \ |
110 | ST(i+3,3) \ | 114 | ST(i + 3, 3) \ |
111 | 115 | ||
112 | 116 | ||
113 | PF0(0) | 117 | PF0(0) |
114 | PF0(2) | 118 | PF0(2) |
115 | 119 | ||
116 | " .align 32 ;\n" | 120 | " .align 32 ;\n" |
117 | " 1: ;\n" | 121 | " 1: ;\n" |
118 | 122 | ||
119 | BLOCK(0) | 123 | BLOCK(0) |
120 | BLOCK(4) | 124 | BLOCK(4) |
121 | BLOCK(8) | 125 | BLOCK(8) |
122 | BLOCK(12) | 126 | BLOCK(12) |
123 | 127 | ||
124 | " addq %[inc], %[p1] ;\n" | 128 | " addq %[inc], %[p1] ;\n" |
125 | " addq %[inc], %[p2] ;\n" | 129 | " addq %[inc], %[p2] ;\n" |
126 | " decl %[cnt] ; jnz 1b" | 130 | " decl %[cnt] ; jnz 1b" |
127 | : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) | 131 | : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) |
128 | : [inc] "r" (256UL) | 132 | : [inc] "r" (256UL) |
129 | : "memory"); | 133 | : "memory"); |
130 | 134 | ||
131 | XMMS_RESTORE; | 135 | XMMS_RESTORE; |
132 | } | 136 | } |
@@ -141,52 +145,52 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
141 | 145 | ||
142 | XMMS_SAVE; | 146 | XMMS_SAVE; |
143 | 147 | ||
144 | __asm__ __volatile__ ( | 148 | asm volatile( |
145 | #undef BLOCK | 149 | #undef BLOCK |
146 | #define BLOCK(i) \ | 150 | #define BLOCK(i) \ |
147 | PF1(i) \ | 151 | PF1(i) \ |
148 | PF1(i+2) \ | 152 | PF1(i + 2) \ |
149 | LD(i,0) \ | 153 | LD(i, 0) \ |
150 | LD(i+1,1) \ | 154 | LD(i + 1, 1) \ |
151 | LD(i+2,2) \ | 155 | LD(i + 2, 2) \ |
152 | LD(i+3,3) \ | 156 | LD(i + 3, 3) \ |
153 | PF2(i) \ | 157 | PF2(i) \ |
154 | PF2(i+2) \ | 158 | PF2(i + 2) \ |
155 | PF0(i+4) \ | 159 | PF0(i + 4) \ |
156 | PF0(i+6) \ | 160 | PF0(i + 6) \ |
157 | XO1(i,0) \ | 161 | XO1(i, 0) \ |
158 | XO1(i+1,1) \ | 162 | XO1(i + 1, 1) \ |
159 | XO1(i+2,2) \ | 163 | XO1(i + 2, 2) \ |
160 | XO1(i+3,3) \ | 164 | XO1(i + 3, 3) \ |
161 | XO2(i,0) \ | 165 | XO2(i, 0) \ |
162 | XO2(i+1,1) \ | 166 | XO2(i + 1, 1) \ |
163 | XO2(i+2,2) \ | 167 | XO2(i + 2, 2) \ |
164 | XO2(i+3,3) \ | 168 | XO2(i + 3, 3) \ |
165 | ST(i,0) \ | 169 | ST(i, 0) \ |
166 | ST(i+1,1) \ | 170 | ST(i + 1, 1) \ |
167 | ST(i+2,2) \ | 171 | ST(i + 2, 2) \ |
168 | ST(i+3,3) \ | 172 | ST(i + 3, 3) \ |
169 | 173 | ||
170 | 174 | ||
171 | PF0(0) | 175 | PF0(0) |
172 | PF0(2) | 176 | PF0(2) |
173 | 177 | ||
174 | " .align 32 ;\n" | 178 | " .align 32 ;\n" |
175 | " 1: ;\n" | 179 | " 1: ;\n" |
176 | 180 | ||
177 | BLOCK(0) | 181 | BLOCK(0) |
178 | BLOCK(4) | 182 | BLOCK(4) |
179 | BLOCK(8) | 183 | BLOCK(8) |
180 | BLOCK(12) | 184 | BLOCK(12) |
181 | 185 | ||
182 | " addq %[inc], %[p1] ;\n" | 186 | " addq %[inc], %[p1] ;\n" |
183 | " addq %[inc], %[p2] ;\n" | 187 | " addq %[inc], %[p2] ;\n" |
184 | " addq %[inc], %[p3] ;\n" | 188 | " addq %[inc], %[p3] ;\n" |
185 | " decl %[cnt] ; jnz 1b" | 189 | " decl %[cnt] ; jnz 1b" |
186 | : [cnt] "+r" (lines), | 190 | : [cnt] "+r" (lines), |
187 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | 191 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) |
188 | : [inc] "r" (256UL) | 192 | : [inc] "r" (256UL) |
189 | : "memory"); | 193 | : "memory"); |
190 | XMMS_RESTORE; | 194 | XMMS_RESTORE; |
191 | } | 195 | } |
192 | 196 | ||
@@ -195,64 +199,64 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
195 | unsigned long *p3, unsigned long *p4) | 199 | unsigned long *p3, unsigned long *p4) |
196 | { | 200 | { |
197 | unsigned int lines = bytes >> 8; | 201 | unsigned int lines = bytes >> 8; |
198 | xmm_store_t xmm_save[4]; | 202 | xmm_store_t xmm_save[4]; |
199 | unsigned long cr0; | 203 | unsigned long cr0; |
200 | 204 | ||
201 | XMMS_SAVE; | 205 | XMMS_SAVE; |
202 | 206 | ||
203 | __asm__ __volatile__ ( | 207 | asm volatile( |
204 | #undef BLOCK | 208 | #undef BLOCK |
205 | #define BLOCK(i) \ | 209 | #define BLOCK(i) \ |
206 | PF1(i) \ | 210 | PF1(i) \ |
207 | PF1(i+2) \ | 211 | PF1(i + 2) \ |
208 | LD(i,0) \ | 212 | LD(i, 0) \ |
209 | LD(i+1,1) \ | 213 | LD(i + 1, 1) \ |
210 | LD(i+2,2) \ | 214 | LD(i + 2, 2) \ |
211 | LD(i+3,3) \ | 215 | LD(i + 3, 3) \ |
212 | PF2(i) \ | 216 | PF2(i) \ |
213 | PF2(i+2) \ | 217 | PF2(i + 2) \ |
214 | XO1(i,0) \ | 218 | XO1(i, 0) \ |
215 | XO1(i+1,1) \ | 219 | XO1(i + 1, 1) \ |
216 | XO1(i+2,2) \ | 220 | XO1(i + 2, 2) \ |
217 | XO1(i+3,3) \ | 221 | XO1(i + 3, 3) \ |
218 | PF3(i) \ | 222 | PF3(i) \ |
219 | PF3(i+2) \ | 223 | PF3(i + 2) \ |
220 | PF0(i+4) \ | 224 | PF0(i + 4) \ |
221 | PF0(i+6) \ | 225 | PF0(i + 6) \ |
222 | XO2(i,0) \ | 226 | XO2(i, 0) \ |
223 | XO2(i+1,1) \ | 227 | XO2(i + 1, 1) \ |
224 | XO2(i+2,2) \ | 228 | XO2(i + 2, 2) \ |
225 | XO2(i+3,3) \ | 229 | XO2(i + 3, 3) \ |
226 | XO3(i,0) \ | 230 | XO3(i, 0) \ |
227 | XO3(i+1,1) \ | 231 | XO3(i + 1, 1) \ |
228 | XO3(i+2,2) \ | 232 | XO3(i + 2, 2) \ |
229 | XO3(i+3,3) \ | 233 | XO3(i + 3, 3) \ |
230 | ST(i,0) \ | 234 | ST(i, 0) \ |
231 | ST(i+1,1) \ | 235 | ST(i + 1, 1) \ |
232 | ST(i+2,2) \ | 236 | ST(i + 2, 2) \ |
233 | ST(i+3,3) \ | 237 | ST(i + 3, 3) \ |
234 | 238 | ||
235 | 239 | ||
236 | PF0(0) | 240 | PF0(0) |
237 | PF0(2) | 241 | PF0(2) |
238 | 242 | ||
239 | " .align 32 ;\n" | 243 | " .align 32 ;\n" |
240 | " 1: ;\n" | 244 | " 1: ;\n" |
241 | 245 | ||
242 | BLOCK(0) | 246 | BLOCK(0) |
243 | BLOCK(4) | 247 | BLOCK(4) |
244 | BLOCK(8) | 248 | BLOCK(8) |
245 | BLOCK(12) | 249 | BLOCK(12) |
246 | 250 | ||
247 | " addq %[inc], %[p1] ;\n" | 251 | " addq %[inc], %[p1] ;\n" |
248 | " addq %[inc], %[p2] ;\n" | 252 | " addq %[inc], %[p2] ;\n" |
249 | " addq %[inc], %[p3] ;\n" | 253 | " addq %[inc], %[p3] ;\n" |
250 | " addq %[inc], %[p4] ;\n" | 254 | " addq %[inc], %[p4] ;\n" |
251 | " decl %[cnt] ; jnz 1b" | 255 | " decl %[cnt] ; jnz 1b" |
252 | : [cnt] "+c" (lines), | 256 | : [cnt] "+c" (lines), |
253 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | 257 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) |
254 | : [inc] "r" (256UL) | 258 | : [inc] "r" (256UL) |
255 | : "memory" ); | 259 | : "memory" ); |
256 | 260 | ||
257 | XMMS_RESTORE; | 261 | XMMS_RESTORE; |
258 | } | 262 | } |
@@ -261,70 +265,70 @@ static void | |||
261 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 265 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
262 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | 266 | unsigned long *p3, unsigned long *p4, unsigned long *p5) |
263 | { | 267 | { |
264 | unsigned int lines = bytes >> 8; | 268 | unsigned int lines = bytes >> 8; |
265 | xmm_store_t xmm_save[4]; | 269 | xmm_store_t xmm_save[4]; |
266 | unsigned long cr0; | 270 | unsigned long cr0; |
267 | 271 | ||
268 | XMMS_SAVE; | 272 | XMMS_SAVE; |
269 | 273 | ||
270 | __asm__ __volatile__ ( | 274 | asm volatile( |
271 | #undef BLOCK | 275 | #undef BLOCK |
272 | #define BLOCK(i) \ | 276 | #define BLOCK(i) \ |
273 | PF1(i) \ | 277 | PF1(i) \ |
274 | PF1(i+2) \ | 278 | PF1(i + 2) \ |
275 | LD(i,0) \ | 279 | LD(i, 0) \ |
276 | LD(i+1,1) \ | 280 | LD(i + 1, 1) \ |
277 | LD(i+2,2) \ | 281 | LD(i + 2, 2) \ |
278 | LD(i+3,3) \ | 282 | LD(i + 3, 3) \ |
279 | PF2(i) \ | 283 | PF2(i) \ |
280 | PF2(i+2) \ | 284 | PF2(i + 2) \ |
281 | XO1(i,0) \ | 285 | XO1(i, 0) \ |
282 | XO1(i+1,1) \ | 286 | XO1(i + 1, 1) \ |
283 | XO1(i+2,2) \ | 287 | XO1(i + 2, 2) \ |
284 | XO1(i+3,3) \ | 288 | XO1(i + 3, 3) \ |
285 | PF3(i) \ | 289 | PF3(i) \ |
286 | PF3(i+2) \ | 290 | PF3(i + 2) \ |
287 | XO2(i,0) \ | 291 | XO2(i, 0) \ |
288 | XO2(i+1,1) \ | 292 | XO2(i + 1, 1) \ |
289 | XO2(i+2,2) \ | 293 | XO2(i + 2, 2) \ |
290 | XO2(i+3,3) \ | 294 | XO2(i + 3, 3) \ |
291 | PF4(i) \ | 295 | PF4(i) \ |
292 | PF4(i+2) \ | 296 | PF4(i + 2) \ |
293 | PF0(i+4) \ | 297 | PF0(i + 4) \ |
294 | PF0(i+6) \ | 298 | PF0(i + 6) \ |
295 | XO3(i,0) \ | 299 | XO3(i, 0) \ |
296 | XO3(i+1,1) \ | 300 | XO3(i + 1, 1) \ |
297 | XO3(i+2,2) \ | 301 | XO3(i + 2, 2) \ |
298 | XO3(i+3,3) \ | 302 | XO3(i + 3, 3) \ |
299 | XO4(i,0) \ | 303 | XO4(i, 0) \ |
300 | XO4(i+1,1) \ | 304 | XO4(i + 1, 1) \ |
301 | XO4(i+2,2) \ | 305 | XO4(i + 2, 2) \ |
302 | XO4(i+3,3) \ | 306 | XO4(i + 3, 3) \ |
303 | ST(i,0) \ | 307 | ST(i, 0) \ |
304 | ST(i+1,1) \ | 308 | ST(i + 1, 1) \ |
305 | ST(i+2,2) \ | 309 | ST(i + 2, 2) \ |
306 | ST(i+3,3) \ | 310 | ST(i + 3, 3) \ |
307 | 311 | ||
308 | 312 | ||
309 | PF0(0) | 313 | PF0(0) |
310 | PF0(2) | 314 | PF0(2) |
311 | 315 | ||
312 | " .align 32 ;\n" | 316 | " .align 32 ;\n" |
313 | " 1: ;\n" | 317 | " 1: ;\n" |
314 | 318 | ||
315 | BLOCK(0) | 319 | BLOCK(0) |
316 | BLOCK(4) | 320 | BLOCK(4) |
317 | BLOCK(8) | 321 | BLOCK(8) |
318 | BLOCK(12) | 322 | BLOCK(12) |
319 | 323 | ||
320 | " addq %[inc], %[p1] ;\n" | 324 | " addq %[inc], %[p1] ;\n" |
321 | " addq %[inc], %[p2] ;\n" | 325 | " addq %[inc], %[p2] ;\n" |
322 | " addq %[inc], %[p3] ;\n" | 326 | " addq %[inc], %[p3] ;\n" |
323 | " addq %[inc], %[p4] ;\n" | 327 | " addq %[inc], %[p4] ;\n" |
324 | " addq %[inc], %[p5] ;\n" | 328 | " addq %[inc], %[p5] ;\n" |
325 | " decl %[cnt] ; jnz 1b" | 329 | " decl %[cnt] ; jnz 1b" |
326 | : [cnt] "+c" (lines), | 330 | : [cnt] "+c" (lines), |
327 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), | 331 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), |
328 | [p5] "+r" (p5) | 332 | [p5] "+r" (p5) |
329 | : [inc] "r" (256UL) | 333 | : [inc] "r" (256UL) |
330 | : "memory"); | 334 | : "memory"); |
@@ -333,18 +337,18 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
333 | } | 337 | } |
334 | 338 | ||
335 | static struct xor_block_template xor_block_sse = { | 339 | static struct xor_block_template xor_block_sse = { |
336 | .name = "generic_sse", | 340 | .name = "generic_sse", |
337 | .do_2 = xor_sse_2, | 341 | .do_2 = xor_sse_2, |
338 | .do_3 = xor_sse_3, | 342 | .do_3 = xor_sse_3, |
339 | .do_4 = xor_sse_4, | 343 | .do_4 = xor_sse_4, |
340 | .do_5 = xor_sse_5, | 344 | .do_5 = xor_sse_5, |
341 | }; | 345 | }; |
342 | 346 | ||
343 | #undef XOR_TRY_TEMPLATES | 347 | #undef XOR_TRY_TEMPLATES |
344 | #define XOR_TRY_TEMPLATES \ | 348 | #define XOR_TRY_TEMPLATES \ |
345 | do { \ | 349 | do { \ |
346 | xor_speed(&xor_block_sse); \ | 350 | xor_speed(&xor_block_sse); \ |
347 | } while (0) | 351 | } while (0) |
348 | 352 | ||
349 | /* We force the use of the SSE xor block because it can write around L2. | 353 | /* We force the use of the SSE xor block because it can write around L2. |
350 | We may also be able to load into the L1 only depending on how the cpu | 354 | We may also be able to load into the L1 only depending on how the cpu |
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h index 3e04167cd9dc..d9b2034ed1d2 100644 --- a/include/asm-xtensa/semaphore.h +++ b/include/asm-xtensa/semaphore.h | |||
@@ -1,99 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * linux/include/asm-xtensa/semaphore.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef _XTENSA_SEMAPHORE_H | ||
12 | #define _XTENSA_SEMAPHORE_H | ||
13 | |||
14 | #include <asm/atomic.h> | ||
15 | #include <asm/system.h> | ||
16 | #include <linux/wait.h> | ||
17 | #include <linux/rwsem.h> | ||
18 | |||
19 | struct semaphore { | ||
20 | atomic_t count; | ||
21 | int sleepers; | ||
22 | wait_queue_head_t wait; | ||
23 | }; | ||
24 | |||
25 | #define __SEMAPHORE_INITIALIZER(name,n) \ | ||
26 | { \ | ||
27 | .count = ATOMIC_INIT(n), \ | ||
28 | .sleepers = 0, \ | ||
29 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
30 | } | ||
31 | |||
32 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
34 | |||
35 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
36 | |||
37 | static inline void sema_init (struct semaphore *sem, int val) | ||
38 | { | ||
39 | atomic_set(&sem->count, val); | ||
40 | sem->sleepers = 0; | ||
41 | init_waitqueue_head(&sem->wait); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX (struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 1); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 0); | ||
52 | } | ||
53 | |||
54 | asmlinkage void __down(struct semaphore * sem); | ||
55 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
56 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
57 | asmlinkage void __up(struct semaphore * sem); | ||
58 | |||
59 | extern spinlock_t semaphore_wake_lock; | ||
60 | |||
61 | static inline void down(struct semaphore * sem) | ||
62 | { | ||
63 | might_sleep(); | ||
64 | |||
65 | if (atomic_sub_return(1, &sem->count) < 0) | ||
66 | __down(sem); | ||
67 | } | ||
68 | |||
69 | static inline int down_interruptible(struct semaphore * sem) | ||
70 | { | ||
71 | int ret = 0; | ||
72 | |||
73 | might_sleep(); | ||
74 | |||
75 | if (atomic_sub_return(1, &sem->count) < 0) | ||
76 | ret = __down_interruptible(sem); | ||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | static inline int down_trylock(struct semaphore * sem) | ||
81 | { | ||
82 | int ret = 0; | ||
83 | |||
84 | if (atomic_sub_return(1, &sem->count) < 0) | ||
85 | ret = __down_trylock(sem); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Note! This is subtle. We jump to wake people up only if | ||
91 | * the semaphore was negative (== somebody was waiting on it). | ||
92 | */ | ||
93 | static inline void up(struct semaphore * sem) | ||
94 | { | ||
95 | if (atomic_add_return(1, &sem->count) <= 0) | ||
96 | __up(sem); | ||
97 | } | ||
98 | |||
99 | #endif /* _XTENSA_SEMAPHORE_H */ | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 85778a4b1209..35094479ca55 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -216,6 +216,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, | |||
216 | /* used to install a new clocksource */ | 216 | /* used to install a new clocksource */ |
217 | extern int clocksource_register(struct clocksource*); | 217 | extern int clocksource_register(struct clocksource*); |
218 | extern void clocksource_unregister(struct clocksource*); | 218 | extern void clocksource_unregister(struct clocksource*); |
219 | extern void clocksource_touch_watchdog(void); | ||
219 | extern struct clocksource* clocksource_get_next(void); | 220 | extern struct clocksource* clocksource_get_next(void); |
220 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | 221 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
221 | extern void clocksource_resume(void); | 222 | extern void clocksource_resume(void); |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 1ad56a7b2f74..56f3236da829 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -173,7 +173,6 @@ struct hrtimer_clock_base { | |||
173 | * struct hrtimer_cpu_base - the per cpu clock bases | 173 | * struct hrtimer_cpu_base - the per cpu clock bases |
174 | * @lock: lock protecting the base and associated clock bases | 174 | * @lock: lock protecting the base and associated clock bases |
175 | * and timers | 175 | * and timers |
176 | * @lock_key: the lock_class_key for use with lockdep | ||
177 | * @clock_base: array of clock bases for this cpu | 176 | * @clock_base: array of clock bases for this cpu |
178 | * @curr_timer: the timer which is executing a callback right now | 177 | * @curr_timer: the timer which is executing a callback right now |
179 | * @expires_next: absolute time of the next event which was scheduled | 178 | * @expires_next: absolute time of the next event which was scheduled |
@@ -189,7 +188,6 @@ struct hrtimer_clock_base { | |||
189 | */ | 188 | */ |
190 | struct hrtimer_cpu_base { | 189 | struct hrtimer_cpu_base { |
191 | spinlock_t lock; | 190 | spinlock_t lock; |
192 | struct lock_class_key lock_key; | ||
193 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 191 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
194 | struct list_head cb_pending; | 192 | struct list_head cb_pending; |
195 | #ifdef CONFIG_HIGH_RES_TIMERS | 193 | #ifdef CONFIG_HIGH_RES_TIMERS |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f8ab4ce70564..b5fef13148bd 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -102,6 +102,25 @@ extern void disable_irq_nosync(unsigned int irq); | |||
102 | extern void disable_irq(unsigned int irq); | 102 | extern void disable_irq(unsigned int irq); |
103 | extern void enable_irq(unsigned int irq); | 103 | extern void enable_irq(unsigned int irq); |
104 | 104 | ||
105 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
106 | |||
107 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | ||
108 | extern int irq_can_set_affinity(unsigned int irq); | ||
109 | |||
110 | #else /* CONFIG_SMP */ | ||
111 | |||
112 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | ||
113 | { | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | |||
117 | static inline int irq_can_set_affinity(unsigned int irq) | ||
118 | { | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ | ||
123 | |||
105 | #ifdef CONFIG_GENERIC_HARDIRQS | 124 | #ifdef CONFIG_GENERIC_HARDIRQS |
106 | /* | 125 | /* |
107 | * Special lockdep variants of irq disabling/enabling. | 126 | * Special lockdep variants of irq disabling/enabling. |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 176e5e790a44..1883a85625dd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -228,21 +228,11 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | |||
228 | 228 | ||
229 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | 229 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ |
230 | 230 | ||
231 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | ||
232 | extern int irq_can_set_affinity(unsigned int irq); | ||
233 | |||
234 | #else /* CONFIG_SMP */ | 231 | #else /* CONFIG_SMP */ |
235 | 232 | ||
236 | #define move_native_irq(x) | 233 | #define move_native_irq(x) |
237 | #define move_masked_irq(x) | 234 | #define move_masked_irq(x) |
238 | 235 | ||
239 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | ||
240 | { | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | |||
244 | static inline int irq_can_set_affinity(unsigned int irq) { return 0; } | ||
245 | |||
246 | #endif /* CONFIG_SMP */ | 236 | #endif /* CONFIG_SMP */ |
247 | 237 | ||
248 | #ifdef CONFIG_IRQBALANCE | 238 | #ifdef CONFIG_IRQBALANCE |
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h new file mode 100644 index 000000000000..9757b1a6d9dc --- /dev/null +++ b/include/linux/kgdb.h | |||
@@ -0,0 +1,281 @@ | |||
1 | /* | ||
2 | * This provides the callbacks and functions that KGDB needs to share between | ||
3 | * the core, I/O and arch-specific portions. | ||
4 | * | ||
5 | * Author: Amit Kale <amitkale@linsyssoft.com> and | ||
6 | * Tom Rini <trini@kernel.crashing.org> | ||
7 | * | ||
8 | * 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc. | ||
9 | * This file is licensed under the terms of the GNU General Public License | ||
10 | * version 2. This program is licensed "as is" without any warranty of any | ||
11 | * kind, whether express or implied. | ||
12 | */ | ||
13 | #ifndef _KGDB_H_ | ||
14 | #define _KGDB_H_ | ||
15 | |||
16 | #include <linux/serial_8250.h> | ||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/init.h> | ||
19 | |||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/kgdb.h> | ||
22 | |||
23 | struct pt_regs; | ||
24 | |||
25 | /** | ||
26 | * kgdb_skipexception - (optional) exit kgdb_handle_exception early | ||
27 | * @exception: Exception vector number | ||
28 | * @regs: Current &struct pt_regs. | ||
29 | * | ||
30 | * On some architectures it is required to skip a breakpoint | ||
31 | * exception when it occurs after a breakpoint has been removed. | ||
32 | * This can be implemented in the architecture specific portion of | ||
33 | * for kgdb. | ||
34 | */ | ||
35 | extern int kgdb_skipexception(int exception, struct pt_regs *regs); | ||
36 | |||
37 | /** | ||
38 | * kgdb_post_primary_code - (optional) Save error vector/code numbers. | ||
39 | * @regs: Original pt_regs. | ||
40 | * @e_vector: Original error vector. | ||
41 | * @err_code: Original error code. | ||
42 | * | ||
43 | * This is usually needed on architectures which support SMP and | ||
44 | * KGDB. This function is called after all the secondary cpus have | ||
45 | * been put to a know spin state and the primary CPU has control over | ||
46 | * KGDB. | ||
47 | */ | ||
48 | extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, | ||
49 | int err_code); | ||
50 | |||
51 | /** | ||
52 | * kgdb_disable_hw_debug - (optional) Disable hardware debugging hook | ||
53 | * @regs: Current &struct pt_regs. | ||
54 | * | ||
55 | * This function will be called if the particular architecture must | ||
56 | * disable hardware debugging while it is processing gdb packets or | ||
57 | * handling exception. | ||
58 | */ | ||
59 | extern void kgdb_disable_hw_debug(struct pt_regs *regs); | ||
60 | |||
61 | struct tasklet_struct; | ||
62 | struct task_struct; | ||
63 | struct uart_port; | ||
64 | |||
65 | /** | ||
66 | * kgdb_breakpoint - compiled in breakpoint | ||
67 | * | ||
68 | * This will be impelmented a static inline per architecture. This | ||
69 | * function is called by the kgdb core to execute an architecture | ||
70 | * specific trap to cause kgdb to enter the exception processing. | ||
71 | * | ||
72 | */ | ||
73 | void kgdb_breakpoint(void); | ||
74 | |||
75 | extern int kgdb_connected; | ||
76 | |||
77 | extern atomic_t kgdb_setting_breakpoint; | ||
78 | extern atomic_t kgdb_cpu_doing_single_step; | ||
79 | |||
80 | extern struct task_struct *kgdb_usethread; | ||
81 | extern struct task_struct *kgdb_contthread; | ||
82 | |||
83 | enum kgdb_bptype { | ||
84 | BP_BREAKPOINT = 0, | ||
85 | BP_HARDWARE_BREAKPOINT, | ||
86 | BP_WRITE_WATCHPOINT, | ||
87 | BP_READ_WATCHPOINT, | ||
88 | BP_ACCESS_WATCHPOINT | ||
89 | }; | ||
90 | |||
91 | enum kgdb_bpstate { | ||
92 | BP_UNDEFINED = 0, | ||
93 | BP_REMOVED, | ||
94 | BP_SET, | ||
95 | BP_ACTIVE | ||
96 | }; | ||
97 | |||
98 | struct kgdb_bkpt { | ||
99 | unsigned long bpt_addr; | ||
100 | unsigned char saved_instr[BREAK_INSTR_SIZE]; | ||
101 | enum kgdb_bptype type; | ||
102 | enum kgdb_bpstate state; | ||
103 | }; | ||
104 | |||
105 | #ifndef KGDB_MAX_BREAKPOINTS | ||
106 | # define KGDB_MAX_BREAKPOINTS 1000 | ||
107 | #endif | ||
108 | |||
109 | #define KGDB_HW_BREAKPOINT 1 | ||
110 | |||
111 | /* | ||
112 | * Functions each KGDB-supporting architecture must provide: | ||
113 | */ | ||
114 | |||
115 | /** | ||
116 | * kgdb_arch_init - Perform any architecture specific initalization. | ||
117 | * | ||
118 | * This function will handle the initalization of any architecture | ||
119 | * specific callbacks. | ||
120 | */ | ||
121 | extern int kgdb_arch_init(void); | ||
122 | |||
123 | /** | ||
124 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | ||
125 | * | ||
126 | * This function will handle the uninitalization of any architecture | ||
127 | * specific callbacks, for dynamic registration and unregistration. | ||
128 | */ | ||
129 | extern void kgdb_arch_exit(void); | ||
130 | |||
131 | /** | ||
132 | * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs | ||
133 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | ||
134 | * @regs: The &struct pt_regs of the current process. | ||
135 | * | ||
136 | * Convert the pt_regs in @regs into the format for registers that | ||
137 | * GDB expects, stored in @gdb_regs. | ||
138 | */ | ||
139 | extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs); | ||
140 | |||
141 | /** | ||
142 | * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs | ||
143 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | ||
144 | * @p: The &struct task_struct of the desired process. | ||
145 | * | ||
146 | * Convert the register values of the sleeping process in @p to | ||
147 | * the format that GDB expects. | ||
148 | * This function is called when kgdb does not have access to the | ||
149 | * &struct pt_regs and therefore it should fill the gdb registers | ||
150 | * @gdb_regs with what has been saved in &struct thread_struct | ||
151 | * thread field during switch_to. | ||
152 | */ | ||
153 | extern void | ||
154 | sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p); | ||
155 | |||
156 | /** | ||
157 | * gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs. | ||
158 | * @gdb_regs: A pointer to hold the registers we've received from GDB. | ||
159 | * @regs: A pointer to a &struct pt_regs to hold these values in. | ||
160 | * | ||
161 | * Convert the GDB regs in @gdb_regs into the pt_regs, and store them | ||
162 | * in @regs. | ||
163 | */ | ||
164 | extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs); | ||
165 | |||
166 | /** | ||
167 | * kgdb_arch_handle_exception - Handle architecture specific GDB packets. | ||
168 | * @vector: The error vector of the exception that happened. | ||
169 | * @signo: The signal number of the exception that happened. | ||
170 | * @err_code: The error code of the exception that happened. | ||
171 | * @remcom_in_buffer: The buffer of the packet we have read. | ||
172 | * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. | ||
173 | * @regs: The &struct pt_regs of the current process. | ||
174 | * | ||
175 | * This function MUST handle the 'c' and 's' command packets, | ||
176 | * as well packets to set / remove a hardware breakpoint, if used. | ||
177 | * If there are additional packets which the hardware needs to handle, | ||
178 | * they are handled here. The code should return -1 if it wants to | ||
179 | * process more packets, and a %0 or %1 if it wants to exit from the | ||
180 | * kgdb callback. | ||
181 | */ | ||
182 | extern int | ||
183 | kgdb_arch_handle_exception(int vector, int signo, int err_code, | ||
184 | char *remcom_in_buffer, | ||
185 | char *remcom_out_buffer, | ||
186 | struct pt_regs *regs); | ||
187 | |||
188 | /** | ||
189 | * kgdb_roundup_cpus - Get other CPUs into a holding pattern | ||
190 | * @flags: Current IRQ state | ||
191 | * | ||
192 | * On SMP systems, we need to get the attention of the other CPUs | ||
193 | * and get them be in a known state. This should do what is needed | ||
194 | * to get the other CPUs to call kgdb_wait(). Note that on some arches, | ||
195 | * the NMI approach is not used for rounding up all the CPUs. For example, | ||
196 | * in case of MIPS, smp_call_function() is used to roundup CPUs. In | ||
197 | * this case, we have to make sure that interrupts are enabled before | ||
198 | * calling smp_call_function(). The argument to this function is | ||
199 | * the flags that will be used when restoring the interrupts. There is | ||
200 | * local_irq_save() call before kgdb_roundup_cpus(). | ||
201 | * | ||
202 | * On non-SMP systems, this is not called. | ||
203 | */ | ||
204 | extern void kgdb_roundup_cpus(unsigned long flags); | ||
205 | |||
206 | /* Optional functions. */ | ||
207 | extern int kgdb_validate_break_address(unsigned long addr); | ||
208 | extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr); | ||
209 | extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle); | ||
210 | |||
211 | /** | ||
212 | * struct kgdb_arch - Describe architecture specific values. | ||
213 | * @gdb_bpt_instr: The instruction to trigger a breakpoint. | ||
214 | * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT. | ||
215 | * @set_breakpoint: Allow an architecture to specify how to set a software | ||
216 | * breakpoint. | ||
217 | * @remove_breakpoint: Allow an architecture to specify how to remove a | ||
218 | * software breakpoint. | ||
219 | * @set_hw_breakpoint: Allow an architecture to specify how to set a hardware | ||
220 | * breakpoint. | ||
221 | * @remove_hw_breakpoint: Allow an architecture to specify how to remove a | ||
222 | * hardware breakpoint. | ||
223 | * @remove_all_hw_break: Allow an architecture to specify how to remove all | ||
224 | * hardware breakpoints. | ||
225 | * @correct_hw_break: Allow an architecture to specify how to correct the | ||
226 | * hardware debug registers. | ||
227 | */ | ||
228 | struct kgdb_arch { | ||
229 | unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE]; | ||
230 | unsigned long flags; | ||
231 | |||
232 | int (*set_breakpoint)(unsigned long, char *); | ||
233 | int (*remove_breakpoint)(unsigned long, char *); | ||
234 | int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); | ||
235 | int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); | ||
236 | void (*remove_all_hw_break)(void); | ||
237 | void (*correct_hw_break)(void); | ||
238 | }; | ||
239 | |||
240 | /** | ||
241 | * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. | ||
242 | * @name: Name of the I/O driver. | ||
243 | * @read_char: Pointer to a function that will return one char. | ||
244 | * @write_char: Pointer to a function that will write one char. | ||
245 | * @flush: Pointer to a function that will flush any pending writes. | ||
246 | * @init: Pointer to a function that will initialize the device. | ||
247 | * @pre_exception: Pointer to a function that will do any prep work for | ||
248 | * the I/O driver. | ||
249 | * @post_exception: Pointer to a function that will do any cleanup work | ||
250 | * for the I/O driver. | ||
251 | */ | ||
252 | struct kgdb_io { | ||
253 | const char *name; | ||
254 | int (*read_char) (void); | ||
255 | void (*write_char) (u8); | ||
256 | void (*flush) (void); | ||
257 | int (*init) (void); | ||
258 | void (*pre_exception) (void); | ||
259 | void (*post_exception) (void); | ||
260 | }; | ||
261 | |||
262 | extern struct kgdb_arch arch_kgdb_ops; | ||
263 | |||
264 | extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); | ||
265 | extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); | ||
266 | |||
267 | extern int kgdb_hex2long(char **ptr, long *long_val); | ||
268 | extern int kgdb_mem2hex(char *mem, char *buf, int count); | ||
269 | extern int kgdb_hex2mem(char *buf, char *mem, int count); | ||
270 | |||
271 | extern int kgdb_isremovedbreak(unsigned long addr); | ||
272 | |||
273 | extern int | ||
274 | kgdb_handle_exception(int ex_vector, int signo, int err_code, | ||
275 | struct pt_regs *regs); | ||
276 | extern int kgdb_nmicallback(int cpu, void *regs); | ||
277 | |||
278 | extern int kgdb_single_step; | ||
279 | extern atomic_t kgdb_active; | ||
280 | |||
281 | #endif /* _KGDB_H_ */ | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 37ee881c42ac..165734a2dd47 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -122,6 +122,8 @@ enum { | |||
122 | 122 | ||
123 | ATAPI_MAX_DRAIN = 16 << 10, | 123 | ATAPI_MAX_DRAIN = 16 << 10, |
124 | 124 | ||
125 | ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, | ||
126 | |||
125 | ATA_SHT_EMULATED = 1, | 127 | ATA_SHT_EMULATED = 1, |
126 | ATA_SHT_CMD_PER_LUN = 1, | 128 | ATA_SHT_CMD_PER_LUN = 1, |
127 | ATA_SHT_THIS_ID = -1, | 129 | ATA_SHT_THIS_ID = -1, |
@@ -163,9 +165,6 @@ enum { | |||
163 | ATA_DEV_NONE = 9, /* no device */ | 165 | ATA_DEV_NONE = 9, /* no device */ |
164 | 166 | ||
165 | /* struct ata_link flags */ | 167 | /* struct ata_link flags */ |
166 | ATA_LFLAG_HRST_TO_RESUME = (1 << 0), /* hardreset to resume link */ | ||
167 | ATA_LFLAG_SKIP_D2H_BSY = (1 << 1), /* can't wait for the first D2H | ||
168 | * Register FIS clearing BSY */ | ||
169 | ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ | 168 | ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ |
170 | ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ | 169 | ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ |
171 | ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ | 170 | ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ |
@@ -225,6 +224,7 @@ enum { | |||
225 | ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ | 224 | ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ |
226 | ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ | 225 | ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ |
227 | ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ | 226 | ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ |
227 | ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ | ||
228 | 228 | ||
229 | ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ | 229 | ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ |
230 | ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ | 230 | ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ |
@@ -249,6 +249,25 @@ enum { | |||
249 | */ | 249 | */ |
250 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, | 250 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, |
251 | 251 | ||
252 | /* Spec mandates to wait for ">= 2ms" before checking status | ||
253 | * after reset. We wait 150ms, because that was the magic | ||
254 | * delay used for ATAPI devices in Hale Landis's ATADRVR, for | ||
255 | * the period of time between when the ATA command register is | ||
256 | * written, and then status is checked. Because waiting for | ||
257 | * "a while" before checking status is fine, post SRST, we | ||
258 | * perform this magic delay here as well. | ||
259 | * | ||
260 | * Old drivers/ide uses the 2mS rule and then waits for ready. | ||
261 | */ | ||
262 | ATA_WAIT_AFTER_RESET_MSECS = 150, | ||
263 | |||
264 | /* If PMP is supported, we have to do follow-up SRST. As some | ||
265 | * PMPs don't send D2H Reg FIS after hardreset, LLDs are | ||
266 | * advised to wait only for the following duration before | ||
267 | * doing SRST. | ||
268 | */ | ||
269 | ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ, | ||
270 | |||
252 | /* ATA bus states */ | 271 | /* ATA bus states */ |
253 | BUS_UNKNOWN = 0, | 272 | BUS_UNKNOWN = 0, |
254 | BUS_DMA = 1, | 273 | BUS_DMA = 1, |
@@ -292,17 +311,16 @@ enum { | |||
292 | 311 | ||
293 | /* reset / recovery action types */ | 312 | /* reset / recovery action types */ |
294 | ATA_EH_REVALIDATE = (1 << 0), | 313 | ATA_EH_REVALIDATE = (1 << 0), |
295 | ATA_EH_SOFTRESET = (1 << 1), | 314 | ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */ |
296 | ATA_EH_HARDRESET = (1 << 2), | 315 | ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ |
316 | ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | ||
297 | ATA_EH_ENABLE_LINK = (1 << 3), | 317 | ATA_EH_ENABLE_LINK = (1 << 3), |
298 | ATA_EH_LPM = (1 << 4), /* link power management action */ | 318 | ATA_EH_LPM = (1 << 4), /* link power management action */ |
299 | 319 | ||
300 | ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | ||
301 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, | 320 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, |
302 | 321 | ||
303 | /* ata_eh_info->flags */ | 322 | /* ata_eh_info->flags */ |
304 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 323 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
305 | ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ | ||
306 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 324 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
307 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 325 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
308 | 326 | ||
@@ -313,7 +331,6 @@ enum { | |||
313 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ | 331 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ |
314 | 332 | ||
315 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, | 333 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, |
316 | ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, | ||
317 | 334 | ||
318 | /* max tries if error condition is still set after ->error_handler */ | 335 | /* max tries if error condition is still set after ->error_handler */ |
319 | ATA_EH_MAX_TRIES = 5, | 336 | ATA_EH_MAX_TRIES = 5, |
@@ -352,6 +369,22 @@ enum { | |||
352 | ATAPI_READ_CD = 2, /* READ CD [MSF] */ | 369 | ATAPI_READ_CD = 2, /* READ CD [MSF] */ |
353 | ATAPI_PASS_THRU = 3, /* SAT pass-thru */ | 370 | ATAPI_PASS_THRU = 3, /* SAT pass-thru */ |
354 | ATAPI_MISC = 4, /* the rest */ | 371 | ATAPI_MISC = 4, /* the rest */ |
372 | |||
373 | /* Timing constants */ | ||
374 | ATA_TIMING_SETUP = (1 << 0), | ||
375 | ATA_TIMING_ACT8B = (1 << 1), | ||
376 | ATA_TIMING_REC8B = (1 << 2), | ||
377 | ATA_TIMING_CYC8B = (1 << 3), | ||
378 | ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | | ||
379 | ATA_TIMING_CYC8B, | ||
380 | ATA_TIMING_ACTIVE = (1 << 4), | ||
381 | ATA_TIMING_RECOVER = (1 << 5), | ||
382 | ATA_TIMING_CYCLE = (1 << 6), | ||
383 | ATA_TIMING_UDMA = (1 << 7), | ||
384 | ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | | ||
385 | ATA_TIMING_REC8B | ATA_TIMING_CYC8B | | ||
386 | ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | | ||
387 | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, | ||
355 | }; | 388 | }; |
356 | 389 | ||
357 | enum ata_xfer_mask { | 390 | enum ata_xfer_mask { |
@@ -412,6 +445,7 @@ enum link_pm { | |||
412 | }; | 445 | }; |
413 | extern struct class_device_attribute class_device_attr_link_power_management_policy; | 446 | extern struct class_device_attribute class_device_attr_link_power_management_policy; |
414 | 447 | ||
448 | #ifdef CONFIG_ATA_SFF | ||
415 | struct ata_ioports { | 449 | struct ata_ioports { |
416 | void __iomem *cmd_addr; | 450 | void __iomem *cmd_addr; |
417 | void __iomem *data_addr; | 451 | void __iomem *data_addr; |
@@ -429,6 +463,7 @@ struct ata_ioports { | |||
429 | void __iomem *bmdma_addr; | 463 | void __iomem *bmdma_addr; |
430 | void __iomem *scr_addr; | 464 | void __iomem *scr_addr; |
431 | }; | 465 | }; |
466 | #endif /* CONFIG_ATA_SFF */ | ||
432 | 467 | ||
433 | struct ata_host { | 468 | struct ata_host { |
434 | spinlock_t lock; | 469 | spinlock_t lock; |
@@ -436,7 +471,7 @@ struct ata_host { | |||
436 | void __iomem * const *iomap; | 471 | void __iomem * const *iomap; |
437 | unsigned int n_ports; | 472 | unsigned int n_ports; |
438 | void *private_data; | 473 | void *private_data; |
439 | const struct ata_port_operations *ops; | 474 | struct ata_port_operations *ops; |
440 | unsigned long flags; | 475 | unsigned long flags; |
441 | #ifdef CONFIG_ATA_ACPI | 476 | #ifdef CONFIG_ATA_ACPI |
442 | acpi_handle acpi_handle; | 477 | acpi_handle acpi_handle; |
@@ -605,7 +640,7 @@ struct ata_link { | |||
605 | 640 | ||
606 | struct ata_port { | 641 | struct ata_port { |
607 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ | 642 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ |
608 | const struct ata_port_operations *ops; | 643 | struct ata_port_operations *ops; |
609 | spinlock_t *lock; | 644 | spinlock_t *lock; |
610 | unsigned long flags; /* ATA_FLAG_xxx */ | 645 | unsigned long flags; /* ATA_FLAG_xxx */ |
611 | unsigned int pflags; /* ATA_PFLAG_xxx */ | 646 | unsigned int pflags; /* ATA_PFLAG_xxx */ |
@@ -615,7 +650,9 @@ struct ata_port { | |||
615 | struct ata_prd *prd; /* our SG list */ | 650 | struct ata_prd *prd; /* our SG list */ |
616 | dma_addr_t prd_dma; /* and its DMA mapping */ | 651 | dma_addr_t prd_dma; /* and its DMA mapping */ |
617 | 652 | ||
653 | #ifdef CONFIG_ATA_SFF | ||
618 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 654 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
655 | #endif /* CONFIG_ATA_SFF */ | ||
619 | 656 | ||
620 | u8 ctl; /* cache of ATA control register */ | 657 | u8 ctl; /* cache of ATA control register */ |
621 | u8 last_ctl; /* Cache last written value */ | 658 | u8 last_ctl; /* Cache last written value */ |
@@ -667,81 +704,108 @@ struct ata_port { | |||
667 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ | 704 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ |
668 | }; | 705 | }; |
669 | 706 | ||
670 | struct ata_port_operations { | 707 | /* The following initializer overrides a method to NULL whether one of |
671 | void (*dev_config) (struct ata_device *); | 708 | * its parent has the method defined or not. This is equivalent to |
672 | 709 | * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant | |
673 | void (*set_piomode) (struct ata_port *, struct ata_device *); | 710 | * expression and thus can't be used as an initializer. |
674 | void (*set_dmamode) (struct ata_port *, struct ata_device *); | 711 | */ |
675 | unsigned long (*mode_filter) (struct ata_device *, unsigned long); | 712 | #define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) |
676 | |||
677 | void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); | ||
678 | void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); | ||
679 | |||
680 | void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); | ||
681 | u8 (*check_status)(struct ata_port *ap); | ||
682 | u8 (*check_altstatus)(struct ata_port *ap); | ||
683 | void (*dev_select)(struct ata_port *ap, unsigned int device); | ||
684 | |||
685 | void (*phy_reset) (struct ata_port *ap); /* obsolete */ | ||
686 | int (*set_mode) (struct ata_link *link, struct ata_device **r_failed_dev); | ||
687 | |||
688 | int (*cable_detect) (struct ata_port *ap); | ||
689 | |||
690 | int (*check_atapi_dma) (struct ata_queued_cmd *qc); | ||
691 | |||
692 | void (*bmdma_setup) (struct ata_queued_cmd *qc); | ||
693 | void (*bmdma_start) (struct ata_queued_cmd *qc); | ||
694 | |||
695 | unsigned int (*data_xfer) (struct ata_device *dev, unsigned char *buf, | ||
696 | unsigned int buflen, int rw); | ||
697 | |||
698 | int (*qc_defer) (struct ata_queued_cmd *qc); | ||
699 | void (*qc_prep) (struct ata_queued_cmd *qc); | ||
700 | unsigned int (*qc_issue) (struct ata_queued_cmd *qc); | ||
701 | |||
702 | /* port multiplier */ | ||
703 | void (*pmp_attach) (struct ata_port *ap); | ||
704 | void (*pmp_detach) (struct ata_port *ap); | ||
705 | 713 | ||
706 | /* Error handlers. ->error_handler overrides ->eng_timeout and | 714 | struct ata_port_operations { |
707 | * indicates that new-style EH is in place. | 715 | /* |
716 | * Command execution | ||
708 | */ | 717 | */ |
709 | void (*eng_timeout) (struct ata_port *ap); /* obsolete */ | 718 | int (*qc_defer)(struct ata_queued_cmd *qc); |
710 | 719 | int (*check_atapi_dma)(struct ata_queued_cmd *qc); | |
711 | void (*freeze) (struct ata_port *ap); | 720 | void (*qc_prep)(struct ata_queued_cmd *qc); |
712 | void (*thaw) (struct ata_port *ap); | 721 | unsigned int (*qc_issue)(struct ata_queued_cmd *qc); |
713 | void (*error_handler) (struct ata_port *ap); | 722 | bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); |
714 | void (*post_internal_cmd) (struct ata_queued_cmd *qc); | 723 | |
715 | 724 | /* | |
716 | irq_handler_t irq_handler; | 725 | * Configuration and exception handling |
717 | void (*irq_clear) (struct ata_port *); | 726 | */ |
718 | u8 (*irq_on) (struct ata_port *); | 727 | int (*cable_detect)(struct ata_port *ap); |
719 | 728 | unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); | |
720 | int (*scr_read) (struct ata_port *ap, unsigned int sc_reg, u32 *val); | 729 | void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); |
721 | int (*scr_write) (struct ata_port *ap, unsigned int sc_reg, u32 val); | 730 | void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); |
731 | int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); | ||
732 | |||
733 | void (*dev_config)(struct ata_device *dev); | ||
734 | |||
735 | void (*freeze)(struct ata_port *ap); | ||
736 | void (*thaw)(struct ata_port *ap); | ||
737 | ata_prereset_fn_t prereset; | ||
738 | ata_reset_fn_t softreset; | ||
739 | ata_reset_fn_t hardreset; | ||
740 | ata_postreset_fn_t postreset; | ||
741 | ata_prereset_fn_t pmp_prereset; | ||
742 | ata_reset_fn_t pmp_softreset; | ||
743 | ata_reset_fn_t pmp_hardreset; | ||
744 | ata_postreset_fn_t pmp_postreset; | ||
745 | void (*error_handler)(struct ata_port *ap); | ||
746 | void (*post_internal_cmd)(struct ata_queued_cmd *qc); | ||
747 | |||
748 | /* | ||
749 | * Optional features | ||
750 | */ | ||
751 | int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val); | ||
752 | int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val); | ||
753 | void (*pmp_attach)(struct ata_port *ap); | ||
754 | void (*pmp_detach)(struct ata_port *ap); | ||
755 | int (*enable_pm)(struct ata_port *ap, enum link_pm policy); | ||
756 | void (*disable_pm)(struct ata_port *ap); | ||
757 | |||
758 | /* | ||
759 | * Start, stop, suspend and resume | ||
760 | */ | ||
761 | int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); | ||
762 | int (*port_resume)(struct ata_port *ap); | ||
763 | int (*port_start)(struct ata_port *ap); | ||
764 | void (*port_stop)(struct ata_port *ap); | ||
765 | void (*host_stop)(struct ata_host *host); | ||
766 | |||
767 | #ifdef CONFIG_ATA_SFF | ||
768 | /* | ||
769 | * SFF / taskfile oriented ops | ||
770 | */ | ||
771 | void (*sff_dev_select)(struct ata_port *ap, unsigned int device); | ||
772 | u8 (*sff_check_status)(struct ata_port *ap); | ||
773 | u8 (*sff_check_altstatus)(struct ata_port *ap); | ||
774 | void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); | ||
775 | void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); | ||
776 | void (*sff_exec_command)(struct ata_port *ap, | ||
777 | const struct ata_taskfile *tf); | ||
778 | unsigned int (*sff_data_xfer)(struct ata_device *dev, | ||
779 | unsigned char *buf, unsigned int buflen, int rw); | ||
780 | u8 (*sff_irq_on)(struct ata_port *); | ||
781 | void (*sff_irq_clear)(struct ata_port *); | ||
722 | 782 | ||
723 | int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); | 783 | void (*bmdma_setup)(struct ata_queued_cmd *qc); |
724 | int (*port_resume) (struct ata_port *ap); | 784 | void (*bmdma_start)(struct ata_queued_cmd *qc); |
725 | int (*enable_pm) (struct ata_port *ap, enum link_pm policy); | 785 | void (*bmdma_stop)(struct ata_queued_cmd *qc); |
726 | void (*disable_pm) (struct ata_port *ap); | 786 | u8 (*bmdma_status)(struct ata_port *ap); |
727 | int (*port_start) (struct ata_port *ap); | 787 | #endif /* CONFIG_ATA_SFF */ |
728 | void (*port_stop) (struct ata_port *ap); | ||
729 | 788 | ||
730 | void (*host_stop) (struct ata_host *host); | 789 | /* |
790 | * Obsolete | ||
791 | */ | ||
792 | void (*phy_reset)(struct ata_port *ap); | ||
793 | void (*eng_timeout)(struct ata_port *ap); | ||
731 | 794 | ||
732 | void (*bmdma_stop) (struct ata_queued_cmd *qc); | 795 | /* |
733 | u8 (*bmdma_status) (struct ata_port *ap); | 796 | * ->inherits must be the last field and all the preceding |
797 | * fields must be pointers. | ||
798 | */ | ||
799 | const struct ata_port_operations *inherits; | ||
734 | }; | 800 | }; |
735 | 801 | ||
736 | struct ata_port_info { | 802 | struct ata_port_info { |
737 | struct scsi_host_template *sht; | ||
738 | unsigned long flags; | 803 | unsigned long flags; |
739 | unsigned long link_flags; | 804 | unsigned long link_flags; |
740 | unsigned long pio_mask; | 805 | unsigned long pio_mask; |
741 | unsigned long mwdma_mask; | 806 | unsigned long mwdma_mask; |
742 | unsigned long udma_mask; | 807 | unsigned long udma_mask; |
743 | const struct ata_port_operations *port_ops; | 808 | struct ata_port_operations *port_ops; |
744 | irq_handler_t irq_handler; | ||
745 | void *private_data; | 809 | void *private_data; |
746 | }; | 810 | }; |
747 | 811 | ||
@@ -759,11 +823,14 @@ struct ata_timing { | |||
759 | 823 | ||
760 | #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) | 824 | #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) |
761 | 825 | ||
826 | /* | ||
827 | * Core layer - drivers/ata/libata-core.c | ||
828 | */ | ||
762 | extern const unsigned long sata_deb_timing_normal[]; | 829 | extern const unsigned long sata_deb_timing_normal[]; |
763 | extern const unsigned long sata_deb_timing_hotplug[]; | 830 | extern const unsigned long sata_deb_timing_hotplug[]; |
764 | extern const unsigned long sata_deb_timing_long[]; | 831 | extern const unsigned long sata_deb_timing_long[]; |
765 | 832 | ||
766 | extern const struct ata_port_operations ata_dummy_port_ops; | 833 | extern struct ata_port_operations ata_dummy_port_ops; |
767 | extern const struct ata_port_info ata_dummy_port_info; | 834 | extern const struct ata_port_info ata_dummy_port_info; |
768 | 835 | ||
769 | static inline const unsigned long * | 836 | static inline const unsigned long * |
@@ -782,22 +849,21 @@ static inline int ata_port_is_dummy(struct ata_port *ap) | |||
782 | 849 | ||
783 | extern void sata_print_link_status(struct ata_link *link); | 850 | extern void sata_print_link_status(struct ata_link *link); |
784 | extern void ata_port_probe(struct ata_port *); | 851 | extern void ata_port_probe(struct ata_port *); |
785 | extern void ata_bus_reset(struct ata_port *ap); | ||
786 | extern int sata_set_spd(struct ata_link *link); | 852 | extern int sata_set_spd(struct ata_link *link); |
853 | extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); | ||
854 | extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, | ||
855 | int (*check_ready)(struct ata_link *link)); | ||
787 | extern int sata_link_debounce(struct ata_link *link, | 856 | extern int sata_link_debounce(struct ata_link *link, |
788 | const unsigned long *params, unsigned long deadline); | 857 | const unsigned long *params, unsigned long deadline); |
789 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, | 858 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, |
790 | unsigned long deadline); | 859 | unsigned long deadline); |
791 | extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); | ||
792 | extern int ata_std_softreset(struct ata_link *link, unsigned int *classes, | ||
793 | unsigned long deadline); | ||
794 | extern int sata_link_hardreset(struct ata_link *link, | 860 | extern int sata_link_hardreset(struct ata_link *link, |
795 | const unsigned long *timing, unsigned long deadline); | 861 | const unsigned long *timing, unsigned long deadline, |
862 | bool *online, int (*check_ready)(struct ata_link *)); | ||
796 | extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, | 863 | extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, |
797 | unsigned long deadline); | 864 | unsigned long deadline); |
798 | extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); | 865 | extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); |
799 | extern void ata_port_disable(struct ata_port *); | 866 | extern void ata_port_disable(struct ata_port *); |
800 | extern void ata_std_ports(struct ata_ioports *ioaddr); | ||
801 | 867 | ||
802 | extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); | 868 | extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); |
803 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, | 869 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, |
@@ -810,7 +876,7 @@ extern int ata_host_activate(struct ata_host *host, int irq, | |||
810 | struct scsi_host_template *sht); | 876 | struct scsi_host_template *sht); |
811 | extern void ata_host_detach(struct ata_host *host); | 877 | extern void ata_host_detach(struct ata_host *host); |
812 | extern void ata_host_init(struct ata_host *, struct device *, | 878 | extern void ata_host_init(struct ata_host *, struct device *, |
813 | unsigned long, const struct ata_port_operations *); | 879 | unsigned long, struct ata_port_operations *); |
814 | extern int ata_scsi_detect(struct scsi_host_template *sht); | 880 | extern int ata_scsi_detect(struct scsi_host_template *sht); |
815 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 881 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
816 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); | 882 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); |
@@ -823,7 +889,6 @@ extern void ata_sas_port_stop(struct ata_port *ap); | |||
823 | extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); | 889 | extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); |
824 | extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), | 890 | extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), |
825 | struct ata_port *ap); | 891 | struct ata_port *ap); |
826 | extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); | ||
827 | extern int sata_scr_valid(struct ata_link *link); | 892 | extern int sata_scr_valid(struct ata_link *link); |
828 | extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); | 893 | extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); |
829 | extern int sata_scr_write(struct ata_link *link, int reg, u32 val); | 894 | extern int sata_scr_write(struct ata_link *link, int reg, u32 val); |
@@ -835,21 +900,9 @@ extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); | |||
835 | extern void ata_host_resume(struct ata_host *host); | 900 | extern void ata_host_resume(struct ata_host *host); |
836 | #endif | 901 | #endif |
837 | extern int ata_ratelimit(void); | 902 | extern int ata_ratelimit(void); |
838 | extern int ata_busy_sleep(struct ata_port *ap, | ||
839 | unsigned long timeout_pat, unsigned long timeout); | ||
840 | extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline); | ||
841 | extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); | ||
842 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 903 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, |
843 | unsigned long interval_msec, | 904 | unsigned long interval_msec, |
844 | unsigned long timeout_msec); | 905 | unsigned long timeout_msec); |
845 | extern unsigned int ata_dev_try_classify(struct ata_device *dev, int present, | ||
846 | u8 *r_err); | ||
847 | |||
848 | /* | ||
849 | * Default driver ops implementations | ||
850 | */ | ||
851 | extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); | ||
852 | extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
853 | extern int atapi_cmd_type(u8 opcode); | 906 | extern int atapi_cmd_type(u8 opcode); |
854 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, | 907 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, |
855 | u8 pmp, int is_cmd, u8 *fis); | 908 | u8 pmp, int is_cmd, u8 *fis); |
@@ -864,23 +917,9 @@ extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); | |||
864 | extern int ata_xfer_mode2shift(unsigned long xfer_mode); | 917 | extern int ata_xfer_mode2shift(unsigned long xfer_mode); |
865 | extern const char *ata_mode_string(unsigned long xfer_mask); | 918 | extern const char *ata_mode_string(unsigned long xfer_mask); |
866 | extern unsigned long ata_id_xfermask(const u16 *id); | 919 | extern unsigned long ata_id_xfermask(const u16 *id); |
867 | extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device); | ||
868 | extern void ata_std_dev_select(struct ata_port *ap, unsigned int device); | ||
869 | extern u8 ata_check_status(struct ata_port *ap); | ||
870 | extern u8 ata_altstatus(struct ata_port *ap); | ||
871 | extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); | ||
872 | extern int ata_port_start(struct ata_port *ap); | 920 | extern int ata_port_start(struct ata_port *ap); |
873 | extern int ata_sff_port_start(struct ata_port *ap); | ||
874 | extern irqreturn_t ata_interrupt(int irq, void *dev_instance); | ||
875 | extern unsigned int ata_data_xfer(struct ata_device *dev, | ||
876 | unsigned char *buf, unsigned int buflen, int rw); | ||
877 | extern unsigned int ata_data_xfer_noirq(struct ata_device *dev, | ||
878 | unsigned char *buf, unsigned int buflen, int rw); | ||
879 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); | 921 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); |
880 | extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc); | ||
881 | extern void ata_qc_prep(struct ata_queued_cmd *qc); | ||
882 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); | 922 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); |
883 | extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); | ||
884 | extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | 923 | extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
885 | unsigned int n_elem); | 924 | unsigned int n_elem); |
886 | extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); | 925 | extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); |
@@ -889,24 +928,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, | |||
889 | unsigned int ofs, unsigned int len); | 928 | unsigned int ofs, unsigned int len); |
890 | extern void ata_id_c_string(const u16 *id, unsigned char *s, | 929 | extern void ata_id_c_string(const u16 *id, unsigned char *s, |
891 | unsigned int ofs, unsigned int len); | 930 | unsigned int ofs, unsigned int len); |
892 | extern void ata_bmdma_setup(struct ata_queued_cmd *qc); | ||
893 | extern void ata_bmdma_start(struct ata_queued_cmd *qc); | ||
894 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); | ||
895 | extern u8 ata_bmdma_status(struct ata_port *ap); | ||
896 | extern void ata_bmdma_irq_clear(struct ata_port *ap); | ||
897 | extern void ata_bmdma_freeze(struct ata_port *ap); | ||
898 | extern void ata_bmdma_thaw(struct ata_port *ap); | ||
899 | extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | ||
900 | ata_reset_fn_t softreset, | ||
901 | ata_reset_fn_t hardreset, | ||
902 | ata_postreset_fn_t postreset); | ||
903 | extern void ata_bmdma_error_handler(struct ata_port *ap); | ||
904 | extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); | ||
905 | extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
906 | u8 status, int in_wq); | ||
907 | extern void ata_qc_complete(struct ata_queued_cmd *qc); | 931 | extern void ata_qc_complete(struct ata_queued_cmd *qc); |
908 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, | 932 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); |
909 | void (*finish_qc)(struct ata_queued_cmd *)); | ||
910 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | 933 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, |
911 | void (*done)(struct scsi_cmnd *)); | 934 | void (*done)(struct scsi_cmnd *)); |
912 | extern int ata_std_bios_param(struct scsi_device *sdev, | 935 | extern int ata_std_bios_param(struct scsi_device *sdev, |
@@ -918,7 +941,6 @@ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, | |||
918 | int queue_depth); | 941 | int queue_depth); |
919 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); | 942 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); |
920 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); | 943 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); |
921 | extern u8 ata_irq_on(struct ata_port *ap); | ||
922 | 944 | ||
923 | extern int ata_cable_40wire(struct ata_port *ap); | 945 | extern int ata_cable_40wire(struct ata_port *ap); |
924 | extern int ata_cable_80wire(struct ata_port *ap); | 946 | extern int ata_cable_80wire(struct ata_port *ap); |
@@ -926,10 +948,7 @@ extern int ata_cable_sata(struct ata_port *ap); | |||
926 | extern int ata_cable_ignore(struct ata_port *ap); | 948 | extern int ata_cable_ignore(struct ata_port *ap); |
927 | extern int ata_cable_unknown(struct ata_port *ap); | 949 | extern int ata_cable_unknown(struct ata_port *ap); |
928 | 950 | ||
929 | /* | 951 | /* Timing helpers */ |
930 | * Timing helpers | ||
931 | */ | ||
932 | |||
933 | extern unsigned int ata_pio_need_iordy(const struct ata_device *); | 952 | extern unsigned int ata_pio_need_iordy(const struct ata_device *); |
934 | extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); | 953 | extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); |
935 | extern int ata_timing_compute(struct ata_device *, unsigned short, | 954 | extern int ata_timing_compute(struct ata_device *, unsigned short, |
@@ -939,24 +958,31 @@ extern void ata_timing_merge(const struct ata_timing *, | |||
939 | unsigned int); | 958 | unsigned int); |
940 | extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); | 959 | extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); |
941 | 960 | ||
942 | enum { | 961 | /* PCI */ |
943 | ATA_TIMING_SETUP = (1 << 0), | 962 | #ifdef CONFIG_PCI |
944 | ATA_TIMING_ACT8B = (1 << 1), | 963 | struct pci_dev; |
945 | ATA_TIMING_REC8B = (1 << 2), | 964 | |
946 | ATA_TIMING_CYC8B = (1 << 3), | 965 | struct pci_bits { |
947 | ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | | 966 | unsigned int reg; /* PCI config register to read */ |
948 | ATA_TIMING_CYC8B, | 967 | unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ |
949 | ATA_TIMING_ACTIVE = (1 << 4), | 968 | unsigned long mask; |
950 | ATA_TIMING_RECOVER = (1 << 5), | 969 | unsigned long val; |
951 | ATA_TIMING_CYCLE = (1 << 6), | ||
952 | ATA_TIMING_UDMA = (1 << 7), | ||
953 | ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | | ||
954 | ATA_TIMING_REC8B | ATA_TIMING_CYC8B | | ||
955 | ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | | ||
956 | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, | ||
957 | }; | 970 | }; |
958 | 971 | ||
959 | /* libata-acpi.c */ | 972 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); |
973 | extern void ata_pci_remove_one(struct pci_dev *pdev); | ||
974 | |||
975 | #ifdef CONFIG_PM | ||
976 | extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
977 | extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); | ||
978 | extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
979 | extern int ata_pci_device_resume(struct pci_dev *pdev); | ||
980 | #endif /* CONFIG_PM */ | ||
981 | #endif /* CONFIG_PCI */ | ||
982 | |||
983 | /* | ||
984 | * ACPI - drivers/ata/libata-acpi.c | ||
985 | */ | ||
960 | #ifdef CONFIG_ATA_ACPI | 986 | #ifdef CONFIG_ATA_ACPI |
961 | static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) | 987 | static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) |
962 | { | 988 | { |
@@ -1000,56 +1026,8 @@ static inline int ata_acpi_cbl_80wire(struct ata_port *ap, | |||
1000 | } | 1026 | } |
1001 | #endif | 1027 | #endif |
1002 | 1028 | ||
1003 | #ifdef CONFIG_PCI | ||
1004 | struct pci_dev; | ||
1005 | |||
1006 | extern int ata_pci_init_one(struct pci_dev *pdev, | ||
1007 | const struct ata_port_info * const * ppi); | ||
1008 | extern void ata_pci_remove_one(struct pci_dev *pdev); | ||
1009 | #ifdef CONFIG_PM | ||
1010 | extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
1011 | extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); | ||
1012 | extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
1013 | extern int ata_pci_device_resume(struct pci_dev *pdev); | ||
1014 | #endif | ||
1015 | extern int ata_pci_clear_simplex(struct pci_dev *pdev); | ||
1016 | |||
1017 | struct pci_bits { | ||
1018 | unsigned int reg; /* PCI config register to read */ | ||
1019 | unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ | ||
1020 | unsigned long mask; | ||
1021 | unsigned long val; | ||
1022 | }; | ||
1023 | |||
1024 | extern int ata_pci_init_sff_host(struct ata_host *host); | ||
1025 | extern int ata_pci_init_bmdma(struct ata_host *host); | ||
1026 | extern int ata_pci_prepare_sff_host(struct pci_dev *pdev, | ||
1027 | const struct ata_port_info * const * ppi, | ||
1028 | struct ata_host **r_host); | ||
1029 | extern int ata_pci_activate_sff_host(struct ata_host *host, | ||
1030 | irq_handler_t irq_handler, | ||
1031 | struct scsi_host_template *sht); | ||
1032 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); | ||
1033 | extern unsigned long ata_pci_default_filter(struct ata_device *dev, | ||
1034 | unsigned long xfer_mask); | ||
1035 | #endif /* CONFIG_PCI */ | ||
1036 | |||
1037 | /* | ||
1038 | * PMP | ||
1039 | */ | ||
1040 | extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); | ||
1041 | extern int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline); | ||
1042 | extern int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class, | ||
1043 | unsigned long deadline); | ||
1044 | extern void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class); | ||
1045 | extern void sata_pmp_do_eh(struct ata_port *ap, | ||
1046 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
1047 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, | ||
1048 | ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset, | ||
1049 | ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset); | ||
1050 | |||
1051 | /* | 1029 | /* |
1052 | * EH | 1030 | * EH - drivers/ata/libata-eh.c |
1053 | */ | 1031 | */ |
1054 | extern void ata_port_schedule_eh(struct ata_port *ap); | 1032 | extern void ata_port_schedule_eh(struct ata_port *ap); |
1055 | extern int ata_link_abort(struct ata_link *link); | 1033 | extern int ata_link_abort(struct ata_link *link); |
@@ -1066,6 +1044,92 @@ extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); | |||
1066 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | 1044 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, |
1067 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 1045 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
1068 | ata_postreset_fn_t postreset); | 1046 | ata_postreset_fn_t postreset); |
1047 | extern void ata_std_error_handler(struct ata_port *ap); | ||
1048 | |||
1049 | /* | ||
1050 | * Base operations to inherit from and initializers for sht | ||
1051 | * | ||
1052 | * Operations | ||
1053 | * | ||
1054 | * base : Common to all libata drivers. | ||
1055 | * sata : SATA controllers w/ native interface. | ||
1056 | * pmp : SATA controllers w/ PMP support. | ||
1057 | * sff : SFF ATA controllers w/o BMDMA support. | ||
1058 | * bmdma : SFF ATA controllers w/ BMDMA support. | ||
1059 | * | ||
1060 | * sht initializers | ||
1061 | * | ||
1062 | * BASE : Common to all libata drivers. The user must set | ||
1063 | * sg_tablesize and dma_boundary. | ||
1064 | * PIO : SFF ATA controllers w/ only PIO support. | ||
1065 | * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and | ||
1066 | * dma_boundary are set to BMDMA limits. | ||
1067 | * NCQ : SATA controllers supporting NCQ. The user must set | ||
1068 | * sg_tablesize, dma_boundary and can_queue. | ||
1069 | */ | ||
1070 | extern const struct ata_port_operations ata_base_port_ops; | ||
1071 | extern const struct ata_port_operations sata_port_ops; | ||
1072 | |||
1073 | #define ATA_BASE_SHT(drv_name) \ | ||
1074 | .module = THIS_MODULE, \ | ||
1075 | .name = drv_name, \ | ||
1076 | .ioctl = ata_scsi_ioctl, \ | ||
1077 | .queuecommand = ata_scsi_queuecmd, \ | ||
1078 | .can_queue = ATA_DEF_QUEUE, \ | ||
1079 | .this_id = ATA_SHT_THIS_ID, \ | ||
1080 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \ | ||
1081 | .emulated = ATA_SHT_EMULATED, \ | ||
1082 | .use_clustering = ATA_SHT_USE_CLUSTERING, \ | ||
1083 | .proc_name = drv_name, \ | ||
1084 | .slave_configure = ata_scsi_slave_config, \ | ||
1085 | .slave_destroy = ata_scsi_slave_destroy, \ | ||
1086 | .bios_param = ata_std_bios_param | ||
1087 | |||
1088 | #define ATA_NCQ_SHT(drv_name) \ | ||
1089 | ATA_BASE_SHT(drv_name), \ | ||
1090 | .change_queue_depth = ata_scsi_change_queue_depth | ||
1091 | |||
1092 | /* | ||
1093 | * PMP helpers | ||
1094 | */ | ||
1095 | #ifdef CONFIG_SATA_PMP | ||
1096 | static inline bool sata_pmp_supported(struct ata_port *ap) | ||
1097 | { | ||
1098 | return ap->flags & ATA_FLAG_PMP; | ||
1099 | } | ||
1100 | |||
1101 | static inline bool sata_pmp_attached(struct ata_port *ap) | ||
1102 | { | ||
1103 | return ap->nr_pmp_links != 0; | ||
1104 | } | ||
1105 | |||
1106 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1107 | { | ||
1108 | return link == &link->ap->link; | ||
1109 | } | ||
1110 | #else /* CONFIG_SATA_PMP */ | ||
1111 | static inline bool sata_pmp_supported(struct ata_port *ap) | ||
1112 | { | ||
1113 | return false; | ||
1114 | } | ||
1115 | |||
1116 | static inline bool sata_pmp_attached(struct ata_port *ap) | ||
1117 | { | ||
1118 | return false; | ||
1119 | } | ||
1120 | |||
1121 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1122 | { | ||
1123 | return 1; | ||
1124 | } | ||
1125 | #endif /* CONFIG_SATA_PMP */ | ||
1126 | |||
1127 | static inline int sata_srst_pmp(struct ata_link *link) | ||
1128 | { | ||
1129 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) | ||
1130 | return SATA_PMP_CTRL_PORT; | ||
1131 | return link->pmp; | ||
1132 | } | ||
1069 | 1133 | ||
1070 | /* | 1134 | /* |
1071 | * printk helpers | 1135 | * printk helpers |
@@ -1074,7 +1138,7 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1074 | printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) | 1138 | printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) |
1075 | 1139 | ||
1076 | #define ata_link_printk(link, lv, fmt, args...) do { \ | 1140 | #define ata_link_printk(link, lv, fmt, args...) do { \ |
1077 | if ((link)->ap->nr_pmp_links) \ | 1141 | if (sata_pmp_attached((link)->ap)) \ |
1078 | printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ | 1142 | printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ |
1079 | (link)->pmp , ##args); \ | 1143 | (link)->pmp , ##args); \ |
1080 | else \ | 1144 | else \ |
@@ -1094,18 +1158,11 @@ extern void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) | |||
1094 | __attribute__ ((format (printf, 2, 3))); | 1158 | __attribute__ ((format (printf, 2, 3))); |
1095 | extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); | 1159 | extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); |
1096 | 1160 | ||
1097 | static inline void ata_ehi_schedule_probe(struct ata_eh_info *ehi) | ||
1098 | { | ||
1099 | ehi->flags |= ATA_EHI_RESUME_LINK; | ||
1100 | ehi->action |= ATA_EH_SOFTRESET; | ||
1101 | ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; | ||
1102 | } | ||
1103 | |||
1104 | static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) | 1161 | static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) |
1105 | { | 1162 | { |
1106 | ata_ehi_schedule_probe(ehi); | 1163 | ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; |
1107 | ehi->flags |= ATA_EHI_HOTPLUGGED; | 1164 | ehi->flags |= ATA_EHI_HOTPLUGGED; |
1108 | ehi->action |= ATA_EH_ENABLE_LINK; | 1165 | ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK; |
1109 | ehi->err_mask |= AC_ERR_ATA_BUS; | 1166 | ehi->err_mask |= AC_ERR_ATA_BUS; |
1110 | } | 1167 | } |
1111 | 1168 | ||
@@ -1126,7 +1183,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag) | |||
1126 | 1183 | ||
1127 | static inline unsigned int ata_tag_internal(unsigned int tag) | 1184 | static inline unsigned int ata_tag_internal(unsigned int tag) |
1128 | { | 1185 | { |
1129 | return tag == ATA_MAX_QUEUE - 1; | 1186 | return tag == ATA_TAG_INTERNAL; |
1130 | } | 1187 | } |
1131 | 1188 | ||
1132 | /* | 1189 | /* |
@@ -1167,11 +1224,6 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev) | |||
1167 | /* | 1224 | /* |
1168 | * link helpers | 1225 | * link helpers |
1169 | */ | 1226 | */ |
1170 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1171 | { | ||
1172 | return link == &link->ap->link; | ||
1173 | } | ||
1174 | |||
1175 | static inline int ata_link_max_devices(const struct ata_link *link) | 1227 | static inline int ata_link_max_devices(const struct ata_link *link) |
1176 | { | 1228 | { |
1177 | if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) | 1229 | if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) |
@@ -1186,7 +1238,7 @@ static inline int ata_link_active(struct ata_link *link) | |||
1186 | 1238 | ||
1187 | static inline struct ata_link *ata_port_first_link(struct ata_port *ap) | 1239 | static inline struct ata_link *ata_port_first_link(struct ata_port *ap) |
1188 | { | 1240 | { |
1189 | if (ap->nr_pmp_links) | 1241 | if (sata_pmp_attached(ap)) |
1190 | return ap->pmp_link; | 1242 | return ap->pmp_link; |
1191 | return &ap->link; | 1243 | return &ap->link; |
1192 | } | 1244 | } |
@@ -1195,8 +1247,8 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link) | |||
1195 | { | 1247 | { |
1196 | struct ata_port *ap = link->ap; | 1248 | struct ata_port *ap = link->ap; |
1197 | 1249 | ||
1198 | if (link == &ap->link) { | 1250 | if (ata_is_host_link(link)) { |
1199 | if (!ap->nr_pmp_links) | 1251 | if (!sata_pmp_attached(ap)) |
1200 | return NULL; | 1252 | return NULL; |
1201 | return ap->pmp_link; | 1253 | return ap->pmp_link; |
1202 | } | 1254 | } |
@@ -1222,11 +1274,6 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link) | |||
1222 | for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ | 1274 | for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ |
1223 | (dev) >= (link)->device || ((dev) = NULL); (dev)--) | 1275 | (dev) >= (link)->device || ((dev) = NULL); (dev)--) |
1224 | 1276 | ||
1225 | static inline u8 ata_chk_status(struct ata_port *ap) | ||
1226 | { | ||
1227 | return ap->ops->check_status(ap); | ||
1228 | } | ||
1229 | |||
1230 | /** | 1277 | /** |
1231 | * ata_ncq_enabled - Test whether NCQ is enabled | 1278 | * ata_ncq_enabled - Test whether NCQ is enabled |
1232 | * @dev: ATA device to test for | 1279 | * @dev: ATA device to test for |
@@ -1243,74 +1290,6 @@ static inline int ata_ncq_enabled(struct ata_device *dev) | |||
1243 | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; | 1290 | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; |
1244 | } | 1291 | } |
1245 | 1292 | ||
1246 | /** | ||
1247 | * ata_pause - Flush writes and pause 400 nanoseconds. | ||
1248 | * @ap: Port to wait for. | ||
1249 | * | ||
1250 | * LOCKING: | ||
1251 | * Inherited from caller. | ||
1252 | */ | ||
1253 | |||
1254 | static inline void ata_pause(struct ata_port *ap) | ||
1255 | { | ||
1256 | ata_altstatus(ap); | ||
1257 | ndelay(400); | ||
1258 | } | ||
1259 | |||
1260 | |||
1261 | /** | ||
1262 | * ata_busy_wait - Wait for a port status register | ||
1263 | * @ap: Port to wait for. | ||
1264 | * @bits: bits that must be clear | ||
1265 | * @max: number of 10uS waits to perform | ||
1266 | * | ||
1267 | * Waits up to max*10 microseconds for the selected bits in the port's | ||
1268 | * status register to be cleared. | ||
1269 | * Returns final value of status register. | ||
1270 | * | ||
1271 | * LOCKING: | ||
1272 | * Inherited from caller. | ||
1273 | */ | ||
1274 | |||
1275 | static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, | ||
1276 | unsigned int max) | ||
1277 | { | ||
1278 | u8 status; | ||
1279 | |||
1280 | do { | ||
1281 | udelay(10); | ||
1282 | status = ata_chk_status(ap); | ||
1283 | max--; | ||
1284 | } while (status != 0xff && (status & bits) && (max > 0)); | ||
1285 | |||
1286 | return status; | ||
1287 | } | ||
1288 | |||
1289 | |||
1290 | /** | ||
1291 | * ata_wait_idle - Wait for a port to be idle. | ||
1292 | * @ap: Port to wait for. | ||
1293 | * | ||
1294 | * Waits up to 10ms for port's BUSY and DRQ signals to clear. | ||
1295 | * Returns final value of status register. | ||
1296 | * | ||
1297 | * LOCKING: | ||
1298 | * Inherited from caller. | ||
1299 | */ | ||
1300 | |||
1301 | static inline u8 ata_wait_idle(struct ata_port *ap) | ||
1302 | { | ||
1303 | u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | ||
1304 | |||
1305 | #ifdef ATA_DEBUG | ||
1306 | if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) | ||
1307 | ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", | ||
1308 | status); | ||
1309 | #endif | ||
1310 | |||
1311 | return status; | ||
1312 | } | ||
1313 | |||
1314 | static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) | 1293 | static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) |
1315 | { | 1294 | { |
1316 | qc->tf.ctl |= ATA_NIEN; | 1295 | qc->tf.ctl |= ATA_NIEN; |
@@ -1403,4 +1382,171 @@ static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) | |||
1403 | return *(struct ata_port **)&host->hostdata[0]; | 1382 | return *(struct ata_port **)&host->hostdata[0]; |
1404 | } | 1383 | } |
1405 | 1384 | ||
1385 | |||
1386 | /************************************************************************** | ||
1387 | * PMP - drivers/ata/libata-pmp.c | ||
1388 | */ | ||
1389 | #ifdef CONFIG_SATA_PMP | ||
1390 | |||
1391 | extern const struct ata_port_operations sata_pmp_port_ops; | ||
1392 | |||
1393 | extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); | ||
1394 | extern void sata_pmp_error_handler(struct ata_port *ap); | ||
1395 | |||
1396 | #else /* CONFIG_SATA_PMP */ | ||
1397 | |||
1398 | #define sata_pmp_port_ops sata_port_ops | ||
1399 | #define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer | ||
1400 | #define sata_pmp_error_handler ata_std_error_handler | ||
1401 | |||
1402 | #endif /* CONFIG_SATA_PMP */ | ||
1403 | |||
1404 | |||
1405 | /************************************************************************** | ||
1406 | * SFF - drivers/ata/libata-sff.c | ||
1407 | */ | ||
1408 | #ifdef CONFIG_ATA_SFF | ||
1409 | |||
1410 | extern const struct ata_port_operations ata_sff_port_ops; | ||
1411 | extern const struct ata_port_operations ata_bmdma_port_ops; | ||
1412 | |||
1413 | /* PIO only, sg_tablesize and dma_boundary limits can be removed */ | ||
1414 | #define ATA_PIO_SHT(drv_name) \ | ||
1415 | ATA_BASE_SHT(drv_name), \ | ||
1416 | .sg_tablesize = LIBATA_MAX_PRD, \ | ||
1417 | .dma_boundary = ATA_DMA_BOUNDARY | ||
1418 | |||
1419 | #define ATA_BMDMA_SHT(drv_name) \ | ||
1420 | ATA_BASE_SHT(drv_name), \ | ||
1421 | .sg_tablesize = LIBATA_MAX_PRD, \ | ||
1422 | .dma_boundary = ATA_DMA_BOUNDARY | ||
1423 | |||
1424 | extern void ata_sff_qc_prep(struct ata_queued_cmd *qc); | ||
1425 | extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc); | ||
1426 | extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); | ||
1427 | extern u8 ata_sff_check_status(struct ata_port *ap); | ||
1428 | extern u8 ata_sff_altstatus(struct ata_port *ap); | ||
1429 | extern int ata_sff_busy_sleep(struct ata_port *ap, | ||
1430 | unsigned long timeout_pat, unsigned long timeout); | ||
1431 | extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); | ||
1432 | extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); | ||
1433 | extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
1434 | extern void ata_sff_exec_command(struct ata_port *ap, | ||
1435 | const struct ata_taskfile *tf); | ||
1436 | extern unsigned int ata_sff_data_xfer(struct ata_device *dev, | ||
1437 | unsigned char *buf, unsigned int buflen, int rw); | ||
1438 | extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, | ||
1439 | unsigned char *buf, unsigned int buflen, int rw); | ||
1440 | extern u8 ata_sff_irq_on(struct ata_port *ap); | ||
1441 | extern void ata_sff_irq_clear(struct ata_port *ap); | ||
1442 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
1443 | u8 status, int in_wq); | ||
1444 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); | ||
1445 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); | ||
1446 | extern unsigned int ata_sff_host_intr(struct ata_port *ap, | ||
1447 | struct ata_queued_cmd *qc); | ||
1448 | extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); | ||
1449 | extern void ata_sff_freeze(struct ata_port *ap); | ||
1450 | extern void ata_sff_thaw(struct ata_port *ap); | ||
1451 | extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); | ||
1452 | extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, | ||
1453 | u8 *r_err); | ||
1454 | extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, | ||
1455 | unsigned long deadline); | ||
1456 | extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, | ||
1457 | unsigned long deadline); | ||
1458 | extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, | ||
1459 | unsigned long deadline); | ||
1460 | extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); | ||
1461 | extern void ata_sff_error_handler(struct ata_port *ap); | ||
1462 | extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc); | ||
1463 | extern int ata_sff_port_start(struct ata_port *ap); | ||
1464 | extern void ata_sff_std_ports(struct ata_ioports *ioaddr); | ||
1465 | extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev, | ||
1466 | unsigned long xfer_mask); | ||
1467 | extern void ata_bmdma_setup(struct ata_queued_cmd *qc); | ||
1468 | extern void ata_bmdma_start(struct ata_queued_cmd *qc); | ||
1469 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); | ||
1470 | extern u8 ata_bmdma_status(struct ata_port *ap); | ||
1471 | extern void ata_bus_reset(struct ata_port *ap); | ||
1472 | |||
1473 | #ifdef CONFIG_PCI | ||
1474 | extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); | ||
1475 | extern int ata_pci_bmdma_init(struct ata_host *host); | ||
1476 | extern int ata_pci_sff_init_host(struct ata_host *host); | ||
1477 | extern int ata_pci_sff_prepare_host(struct pci_dev *pdev, | ||
1478 | const struct ata_port_info * const * ppi, | ||
1479 | struct ata_host **r_host); | ||
1480 | extern int ata_pci_sff_activate_host(struct ata_host *host, | ||
1481 | irq_handler_t irq_handler, | ||
1482 | struct scsi_host_template *sht); | ||
1483 | extern int ata_pci_sff_init_one(struct pci_dev *pdev, | ||
1484 | const struct ata_port_info * const * ppi, | ||
1485 | struct scsi_host_template *sht, void *host_priv); | ||
1486 | #endif /* CONFIG_PCI */ | ||
1487 | |||
1488 | /** | ||
1489 | * ata_sff_pause - Flush writes and pause 400 nanoseconds. | ||
1490 | * @ap: Port to wait for. | ||
1491 | * | ||
1492 | * LOCKING: | ||
1493 | * Inherited from caller. | ||
1494 | */ | ||
1495 | static inline void ata_sff_pause(struct ata_port *ap) | ||
1496 | { | ||
1497 | ata_sff_altstatus(ap); | ||
1498 | ndelay(400); | ||
1499 | } | ||
1500 | |||
1501 | /** | ||
1502 | * ata_sff_busy_wait - Wait for a port status register | ||
1503 | * @ap: Port to wait for. | ||
1504 | * @bits: bits that must be clear | ||
1505 | * @max: number of 10uS waits to perform | ||
1506 | * | ||
1507 | * Waits up to max*10 microseconds for the selected bits in the port's | ||
1508 | * status register to be cleared. | ||
1509 | * Returns final value of status register. | ||
1510 | * | ||
1511 | * LOCKING: | ||
1512 | * Inherited from caller. | ||
1513 | */ | ||
1514 | static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, | ||
1515 | unsigned int max) | ||
1516 | { | ||
1517 | u8 status; | ||
1518 | |||
1519 | do { | ||
1520 | udelay(10); | ||
1521 | status = ap->ops->sff_check_status(ap); | ||
1522 | max--; | ||
1523 | } while (status != 0xff && (status & bits) && (max > 0)); | ||
1524 | |||
1525 | return status; | ||
1526 | } | ||
1527 | |||
1528 | /** | ||
1529 | * ata_wait_idle - Wait for a port to be idle. | ||
1530 | * @ap: Port to wait for. | ||
1531 | * | ||
1532 | * Waits up to 10ms for port's BUSY and DRQ signals to clear. | ||
1533 | * Returns final value of status register. | ||
1534 | * | ||
1535 | * LOCKING: | ||
1536 | * Inherited from caller. | ||
1537 | */ | ||
1538 | static inline u8 ata_wait_idle(struct ata_port *ap) | ||
1539 | { | ||
1540 | u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | ||
1541 | |||
1542 | #ifdef ATA_DEBUG | ||
1543 | if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) | ||
1544 | ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", | ||
1545 | status); | ||
1546 | #endif | ||
1547 | |||
1548 | return status; | ||
1549 | } | ||
1550 | #endif /* CONFIG_ATA_SFF */ | ||
1551 | |||
1406 | #endif /* __LINUX_LIBATA_H__ */ | 1552 | #endif /* __LINUX_LIBATA_H__ */ |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 7d1eaa97de13..77323a72dd3c 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -81,7 +81,7 @@ enum { | |||
81 | MLX4_CMD_SW2HW_CQ = 0x16, | 81 | MLX4_CMD_SW2HW_CQ = 0x16, |
82 | MLX4_CMD_HW2SW_CQ = 0x17, | 82 | MLX4_CMD_HW2SW_CQ = 0x17, |
83 | MLX4_CMD_QUERY_CQ = 0x18, | 83 | MLX4_CMD_QUERY_CQ = 0x18, |
84 | MLX4_CMD_RESIZE_CQ = 0x2c, | 84 | MLX4_CMD_MODIFY_CQ = 0x2c, |
85 | 85 | ||
86 | /* SRQ commands */ | 86 | /* SRQ commands */ |
87 | MLX4_CMD_SW2HW_SRQ = 0x35, | 87 | MLX4_CMD_SW2HW_SRQ = 0x35, |
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 0181e0a57cbf..071cf96cf01f 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h | |||
@@ -45,11 +45,11 @@ struct mlx4_cqe { | |||
45 | u8 sl; | 45 | u8 sl; |
46 | u8 reserved1; | 46 | u8 reserved1; |
47 | __be16 rlid; | 47 | __be16 rlid; |
48 | u32 reserved2; | 48 | __be32 ipoib_status; |
49 | __be32 byte_cnt; | 49 | __be32 byte_cnt; |
50 | __be16 wqe_index; | 50 | __be16 wqe_index; |
51 | __be16 checksum; | 51 | __be16 checksum; |
52 | u8 reserved3[3]; | 52 | u8 reserved2[3]; |
53 | u8 owner_sr_opcode; | 53 | u8 owner_sr_opcode; |
54 | }; | 54 | }; |
55 | 55 | ||
@@ -85,6 +85,16 @@ enum { | |||
85 | MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, | 85 | MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | enum { | ||
89 | MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, | ||
90 | MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, | ||
91 | MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, | ||
92 | MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, | ||
93 | MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, | ||
94 | MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, | ||
95 | MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, | ||
96 | }; | ||
97 | |||
88 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, | 98 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, |
89 | void __iomem *uar_page, | 99 | void __iomem *uar_page, |
90 | spinlock_t *doorbell_lock) | 100 | spinlock_t *doorbell_lock) |
@@ -120,4 +130,9 @@ enum { | |||
120 | MLX4_CQ_DB_REQ_NOT = 2 << 24 | 130 | MLX4_CQ_DB_REQ_NOT = 2 << 24 |
121 | }; | 131 | }; |
122 | 132 | ||
133 | int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, | ||
134 | u16 count, u16 period); | ||
135 | int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, | ||
136 | int entries, struct mlx4_mtt *mtt); | ||
137 | |||
123 | #endif /* MLX4_CQ_H */ | 138 | #endif /* MLX4_CQ_H */ |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6cdf813cd478..ff7df1a2222f 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -186,6 +186,7 @@ struct mlx4_caps { | |||
186 | u32 flags; | 186 | u32 flags; |
187 | u16 stat_rate_support; | 187 | u16 stat_rate_support; |
188 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 188 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
189 | int max_gso_sz; | ||
189 | }; | 190 | }; |
190 | 191 | ||
191 | struct mlx4_buf_list { | 192 | struct mlx4_buf_list { |
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 1b835ca49df1..53c5fdb6eac4 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h | |||
@@ -48,8 +48,7 @@ struct mlx4_interface { | |||
48 | void * (*add) (struct mlx4_dev *dev); | 48 | void * (*add) (struct mlx4_dev *dev); |
49 | void (*remove)(struct mlx4_dev *dev, void *context); | 49 | void (*remove)(struct mlx4_dev *dev, void *context); |
50 | void (*event) (struct mlx4_dev *dev, void *context, | 50 | void (*event) (struct mlx4_dev *dev, void *context, |
51 | enum mlx4_dev_event event, int subtype, | 51 | enum mlx4_dev_event event, int port); |
52 | int port); | ||
53 | struct list_head list; | 52 | struct list_head list; |
54 | }; | 53 | }; |
55 | 54 | ||
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 09a2230923f2..a5e43febee4f 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -158,10 +158,12 @@ struct mlx4_qp_context { | |||
158 | #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) | 158 | #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) |
159 | 159 | ||
160 | enum { | 160 | enum { |
161 | MLX4_WQE_CTRL_NEC = 1 << 29, | 161 | MLX4_WQE_CTRL_NEC = 1 << 29, |
162 | MLX4_WQE_CTRL_FENCE = 1 << 6, | 162 | MLX4_WQE_CTRL_FENCE = 1 << 6, |
163 | MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, | 163 | MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, |
164 | MLX4_WQE_CTRL_SOLICITED = 1 << 1, | 164 | MLX4_WQE_CTRL_SOLICITED = 1 << 1, |
165 | MLX4_WQE_CTRL_IP_CSUM = 1 << 4, | ||
166 | MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, | ||
165 | }; | 167 | }; |
166 | 168 | ||
167 | struct mlx4_wqe_ctrl_seg { | 169 | struct mlx4_wqe_ctrl_seg { |
@@ -217,6 +219,11 @@ struct mlx4_wqe_datagram_seg { | |||
217 | __be32 reservd[2]; | 219 | __be32 reservd[2]; |
218 | }; | 220 | }; |
219 | 221 | ||
222 | struct mlx4_lso_seg { | ||
223 | __be32 mss_hdr_size; | ||
224 | __be32 header[0]; | ||
225 | }; | ||
226 | |||
220 | struct mlx4_wqe_bind_seg { | 227 | struct mlx4_wqe_bind_seg { |
221 | __be32 flags1; | 228 | __be32 flags1; |
222 | __be32 flags2; | 229 | __be32 flags2; |
diff --git a/include/linux/quota.h b/include/linux/quota.h index 6e0393a5b2ea..eb560d031acd 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -160,14 +160,18 @@ enum { | |||
160 | 160 | ||
161 | 161 | ||
162 | #ifdef __KERNEL__ | 162 | #ifdef __KERNEL__ |
163 | #include <linux/spinlock.h> | 163 | #include <linux/list.h> |
164 | #include <linux/rwsem.h> | ||
165 | #include <linux/mutex.h> | 164 | #include <linux/mutex.h> |
165 | #include <linux/rwsem.h> | ||
166 | #include <linux/spinlock.h> | ||
167 | #include <linux/wait.h> | ||
166 | 168 | ||
167 | #include <linux/dqblk_xfs.h> | 169 | #include <linux/dqblk_xfs.h> |
168 | #include <linux/dqblk_v1.h> | 170 | #include <linux/dqblk_v1.h> |
169 | #include <linux/dqblk_v2.h> | 171 | #include <linux/dqblk_v2.h> |
170 | 172 | ||
173 | #include <asm/atomic.h> | ||
174 | |||
171 | extern spinlock_t dq_data_lock; | 175 | extern spinlock_t dq_data_lock; |
172 | 176 | ||
173 | /* Maximal numbers of writes for quota operation (insert/delete/update) | 177 | /* Maximal numbers of writes for quota operation (insert/delete/update) |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h new file mode 100644 index 000000000000..9cae64b00d6b --- /dev/null +++ b/include/linux/semaphore.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 Intel Corporation | ||
3 | * Author: Matthew Wilcox <willy@linux.intel.com> | ||
4 | * | ||
5 | * Distributed under the terms of the GNU GPL, version 2 | ||
6 | * | ||
7 | * Please see kernel/semaphore.c for documentation of these functions | ||
8 | */ | ||
9 | #ifndef __LINUX_SEMAPHORE_H | ||
10 | #define __LINUX_SEMAPHORE_H | ||
11 | |||
12 | #include <linux/list.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | |||
15 | /* Please don't access any members of this structure directly */ | ||
16 | struct semaphore { | ||
17 | spinlock_t lock; | ||
18 | unsigned int count; | ||
19 | struct list_head wait_list; | ||
20 | }; | ||
21 | |||
22 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
23 | { \ | ||
24 | .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ | ||
25 | .count = n, \ | ||
26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | ||
27 | } | ||
28 | |||
29 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
31 | |||
32 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
33 | |||
34 | static inline void sema_init(struct semaphore *sem, int val) | ||
35 | { | ||
36 | static struct lock_class_key __key; | ||
37 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
38 | lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); | ||
39 | } | ||
40 | |||
41 | #define init_MUTEX(sem) sema_init(sem, 1) | ||
42 | #define init_MUTEX_LOCKED(sem) sema_init(sem, 0) | ||
43 | |||
44 | extern void down(struct semaphore *sem); | ||
45 | extern int __must_check down_interruptible(struct semaphore *sem); | ||
46 | extern int __must_check down_killable(struct semaphore *sem); | ||
47 | extern int __must_check down_trylock(struct semaphore *sem); | ||
48 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); | ||
49 | extern void up(struct semaphore *sem); | ||
50 | |||
51 | #endif /* __LINUX_SEMAPHORE_H */ | ||
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 289942fc6655..7cb094a82456 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -213,6 +213,10 @@ struct uart_ops { | |||
213 | void (*config_port)(struct uart_port *, int); | 213 | void (*config_port)(struct uart_port *, int); |
214 | int (*verify_port)(struct uart_port *, struct serial_struct *); | 214 | int (*verify_port)(struct uart_port *, struct serial_struct *); |
215 | int (*ioctl)(struct uart_port *, unsigned int, unsigned long); | 215 | int (*ioctl)(struct uart_port *, unsigned int, unsigned long); |
216 | #ifdef CONFIG_CONSOLE_POLL | ||
217 | void (*poll_put_char)(struct uart_port *, unsigned char); | ||
218 | int (*poll_get_char)(struct uart_port *); | ||
219 | #endif | ||
216 | }; | 220 | }; |
217 | 221 | ||
218 | #define UART_CONFIG_TYPE (1 << 0) | 222 | #define UART_CONFIG_TYPE (1 << 0) |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index b00c1c73eb0a..79d59c937fac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -45,9 +45,9 @@ struct kmem_cache_cpu { | |||
45 | struct kmem_cache_node { | 45 | struct kmem_cache_node { |
46 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | 46 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
47 | unsigned long nr_partial; | 47 | unsigned long nr_partial; |
48 | atomic_long_t nr_slabs; | ||
49 | struct list_head partial; | 48 | struct list_head partial; |
50 | #ifdef CONFIG_SLUB_DEBUG | 49 | #ifdef CONFIG_SLUB_DEBUG |
50 | atomic_long_t nr_slabs; | ||
51 | struct list_head full; | 51 | struct list_head full; |
52 | #endif | 52 | #endif |
53 | }; | 53 | }; |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 1129ee0a7180..d311a090fae7 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -296,43 +296,6 @@ do { \ | |||
296 | }) | 296 | }) |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * Locks two spinlocks l1 and l2. | ||
300 | * l1_first indicates if spinlock l1 should be taken first. | ||
301 | */ | ||
302 | static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2, | ||
303 | bool l1_first) | ||
304 | __acquires(l1) | ||
305 | __acquires(l2) | ||
306 | { | ||
307 | if (l1_first) { | ||
308 | spin_lock(l1); | ||
309 | spin_lock(l2); | ||
310 | } else { | ||
311 | spin_lock(l2); | ||
312 | spin_lock(l1); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Unlocks two spinlocks l1 and l2. | ||
318 | * l1_taken_first indicates if spinlock l1 was taken first and therefore | ||
319 | * should be released after spinlock l2. | ||
320 | */ | ||
321 | static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2, | ||
322 | bool l1_taken_first) | ||
323 | __releases(l1) | ||
324 | __releases(l2) | ||
325 | { | ||
326 | if (l1_taken_first) { | ||
327 | spin_unlock(l2); | ||
328 | spin_unlock(l1); | ||
329 | } else { | ||
330 | spin_unlock(l1); | ||
331 | spin_unlock(l2); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Pull the atomic_t declaration: | 299 | * Pull the atomic_t declaration: |
337 | * (asm-mips/atomic.h needs above definitions) | 300 | * (asm-mips/atomic.h needs above definitions) |
338 | */ | 301 | */ |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 421323e5a2d6..accd7bad35b0 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -9,6 +9,9 @@ | |||
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | 11 | ||
12 | struct timespec; | ||
13 | struct compat_timespec; | ||
14 | |||
12 | /* | 15 | /* |
13 | * System call restart block. | 16 | * System call restart block. |
14 | */ | 17 | */ |
@@ -26,6 +29,15 @@ struct restart_block { | |||
26 | u32 bitset; | 29 | u32 bitset; |
27 | u64 time; | 30 | u64 time; |
28 | } futex; | 31 | } futex; |
32 | /* For nanosleep */ | ||
33 | struct { | ||
34 | clockid_t index; | ||
35 | struct timespec __user *rmtp; | ||
36 | #ifdef CONFIG_COMPAT | ||
37 | struct compat_timespec __user *compat_rmtp; | ||
38 | #endif | ||
39 | u64 expires; | ||
40 | } nanosleep; | ||
29 | }; | 41 | }; |
30 | }; | 42 | }; |
31 | 43 | ||
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 85c95cd39bc3..21f69aca4505 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -125,6 +125,7 @@ | |||
125 | #include <linux/cdev.h> | 125 | #include <linux/cdev.h> |
126 | 126 | ||
127 | struct tty_struct; | 127 | struct tty_struct; |
128 | struct tty_driver; | ||
128 | 129 | ||
129 | struct tty_operations { | 130 | struct tty_operations { |
130 | int (*open)(struct tty_struct * tty, struct file * filp); | 131 | int (*open)(struct tty_struct * tty, struct file * filp); |
@@ -157,6 +158,11 @@ struct tty_operations { | |||
157 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 158 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
158 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 159 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
159 | unsigned int set, unsigned int clear); | 160 | unsigned int set, unsigned int clear); |
161 | #ifdef CONFIG_CONSOLE_POLL | ||
162 | int (*poll_init)(struct tty_driver *driver, int line, char *options); | ||
163 | int (*poll_get_char)(struct tty_driver *driver, int line); | ||
164 | void (*poll_put_char)(struct tty_driver *driver, int line, char ch); | ||
165 | #endif | ||
160 | }; | 166 | }; |
161 | 167 | ||
162 | struct tty_driver { | 168 | struct tty_driver { |
@@ -220,6 +226,11 @@ struct tty_driver { | |||
220 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 226 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
221 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 227 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
222 | unsigned int set, unsigned int clear); | 228 | unsigned int set, unsigned int clear); |
229 | #ifdef CONFIG_CONSOLE_POLL | ||
230 | int (*poll_init)(struct tty_driver *driver, int line, char *options); | ||
231 | int (*poll_get_char)(struct tty_driver *driver, int line); | ||
232 | void (*poll_put_char)(struct tty_driver *driver, int line, char ch); | ||
233 | #endif | ||
223 | 234 | ||
224 | struct list_head tty_drivers; | 235 | struct list_head tty_drivers; |
225 | }; | 236 | }; |
@@ -230,6 +241,7 @@ struct tty_driver *alloc_tty_driver(int lines); | |||
230 | void put_tty_driver(struct tty_driver *driver); | 241 | void put_tty_driver(struct tty_driver *driver); |
231 | void tty_set_operations(struct tty_driver *driver, | 242 | void tty_set_operations(struct tty_driver *driver, |
232 | const struct tty_operations *op); | 243 | const struct tty_operations *op); |
244 | extern struct tty_driver *tty_find_polling_driver(char *name, int *line); | ||
233 | 245 | ||
234 | /* tty driver magic number */ | 246 | /* tty driver magic number */ |
235 | #define TTY_DRIVER_MAGIC 0x5402 | 247 | #define TTY_DRIVER_MAGIC 0x5402 |
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 975c963e5789..fec6decfb983 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -84,4 +84,26 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
84 | ret; \ | 84 | ret; \ |
85 | }) | 85 | }) |
86 | 86 | ||
87 | /* | ||
88 | * probe_kernel_read(): safely attempt to read from a location | ||
89 | * @dst: pointer to the buffer that shall take the data | ||
90 | * @src: address to read from | ||
91 | * @size: size of the data chunk | ||
92 | * | ||
93 | * Safely read from address @src to the buffer at @dst. If a kernel fault | ||
94 | * happens, handle that and return -EFAULT. | ||
95 | */ | ||
96 | extern long probe_kernel_read(void *dst, void *src, size_t size); | ||
97 | |||
98 | /* | ||
99 | * probe_kernel_write(): safely attempt to write to a location | ||
100 | * @dst: address to write to | ||
101 | * @src: pointer to the data that shall be written | ||
102 | * @size: size of the data chunk | ||
103 | * | ||
104 | * Safely write to address @dst from the buffer at @src. If a kernel fault | ||
105 | * happens, handle that and return -EFAULT. | ||
106 | */ | ||
107 | extern long probe_kernel_write(void *dst, void *src, size_t size); | ||
108 | |||
87 | #endif /* __LINUX_UACCESS_H__ */ | 109 | #endif /* __LINUX_UACCESS_H__ */ |
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h index 64a721fcbc1c..8d65bf0a625b 100644 --- a/include/rdma/ib_user_verbs.h +++ b/include/rdma/ib_user_verbs.h | |||
@@ -533,7 +533,10 @@ struct ib_uverbs_send_wr { | |||
533 | __u32 num_sge; | 533 | __u32 num_sge; |
534 | __u32 opcode; | 534 | __u32 opcode; |
535 | __u32 send_flags; | 535 | __u32 send_flags; |
536 | __u32 imm_data; | 536 | union { |
537 | __u32 imm_data; | ||
538 | __u32 invalidate_rkey; | ||
539 | } ex; | ||
537 | union { | 540 | union { |
538 | struct { | 541 | struct { |
539 | __u64 remote_addr; | 542 | __u64 remote_addr; |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 701e7b40560a..95bf4bac44cb 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -94,7 +94,7 @@ enum ib_device_cap_flags { | |||
94 | IB_DEVICE_SRQ_RESIZE = (1<<13), | 94 | IB_DEVICE_SRQ_RESIZE = (1<<13), |
95 | IB_DEVICE_N_NOTIFY_CQ = (1<<14), | 95 | IB_DEVICE_N_NOTIFY_CQ = (1<<14), |
96 | IB_DEVICE_ZERO_STAG = (1<<15), | 96 | IB_DEVICE_ZERO_STAG = (1<<15), |
97 | IB_DEVICE_SEND_W_INV = (1<<16), | 97 | IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */ |
98 | IB_DEVICE_MEM_WINDOW = (1<<17), | 98 | IB_DEVICE_MEM_WINDOW = (1<<17), |
99 | /* | 99 | /* |
100 | * Devices should set IB_DEVICE_UD_IP_SUM if they support | 100 | * Devices should set IB_DEVICE_UD_IP_SUM if they support |
@@ -104,6 +104,8 @@ enum ib_device_cap_flags { | |||
104 | * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. | 104 | * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. |
105 | */ | 105 | */ |
106 | IB_DEVICE_UD_IP_CSUM = (1<<18), | 106 | IB_DEVICE_UD_IP_CSUM = (1<<18), |
107 | IB_DEVICE_UD_TSO = (1<<19), | ||
108 | IB_DEVICE_SEND_W_INV = (1<<21), | ||
107 | }; | 109 | }; |
108 | 110 | ||
109 | enum ib_atomic_cap { | 111 | enum ib_atomic_cap { |
@@ -411,6 +413,7 @@ enum ib_wc_opcode { | |||
411 | IB_WC_COMP_SWAP, | 413 | IB_WC_COMP_SWAP, |
412 | IB_WC_FETCH_ADD, | 414 | IB_WC_FETCH_ADD, |
413 | IB_WC_BIND_MW, | 415 | IB_WC_BIND_MW, |
416 | IB_WC_LSO, | ||
414 | /* | 417 | /* |
415 | * Set value of IB_WC_RECV so consumers can test if a completion is a | 418 | * Set value of IB_WC_RECV so consumers can test if a completion is a |
416 | * receive by testing (opcode & IB_WC_RECV). | 419 | * receive by testing (opcode & IB_WC_RECV). |
@@ -495,6 +498,10 @@ enum ib_qp_type { | |||
495 | IB_QPT_RAW_ETY | 498 | IB_QPT_RAW_ETY |
496 | }; | 499 | }; |
497 | 500 | ||
501 | enum ib_qp_create_flags { | ||
502 | IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, | ||
503 | }; | ||
504 | |||
498 | struct ib_qp_init_attr { | 505 | struct ib_qp_init_attr { |
499 | void (*event_handler)(struct ib_event *, void *); | 506 | void (*event_handler)(struct ib_event *, void *); |
500 | void *qp_context; | 507 | void *qp_context; |
@@ -504,6 +511,7 @@ struct ib_qp_init_attr { | |||
504 | struct ib_qp_cap cap; | 511 | struct ib_qp_cap cap; |
505 | enum ib_sig_type sq_sig_type; | 512 | enum ib_sig_type sq_sig_type; |
506 | enum ib_qp_type qp_type; | 513 | enum ib_qp_type qp_type; |
514 | enum ib_qp_create_flags create_flags; | ||
507 | u8 port_num; /* special QP types only */ | 515 | u8 port_num; /* special QP types only */ |
508 | }; | 516 | }; |
509 | 517 | ||
@@ -617,7 +625,9 @@ enum ib_wr_opcode { | |||
617 | IB_WR_SEND_WITH_IMM, | 625 | IB_WR_SEND_WITH_IMM, |
618 | IB_WR_RDMA_READ, | 626 | IB_WR_RDMA_READ, |
619 | IB_WR_ATOMIC_CMP_AND_SWP, | 627 | IB_WR_ATOMIC_CMP_AND_SWP, |
620 | IB_WR_ATOMIC_FETCH_AND_ADD | 628 | IB_WR_ATOMIC_FETCH_AND_ADD, |
629 | IB_WR_LSO, | ||
630 | IB_WR_SEND_WITH_INV, | ||
621 | }; | 631 | }; |
622 | 632 | ||
623 | enum ib_send_flags { | 633 | enum ib_send_flags { |
@@ -641,7 +651,10 @@ struct ib_send_wr { | |||
641 | int num_sge; | 651 | int num_sge; |
642 | enum ib_wr_opcode opcode; | 652 | enum ib_wr_opcode opcode; |
643 | int send_flags; | 653 | int send_flags; |
644 | __be32 imm_data; | 654 | union { |
655 | __be32 imm_data; | ||
656 | u32 invalidate_rkey; | ||
657 | } ex; | ||
645 | union { | 658 | union { |
646 | struct { | 659 | struct { |
647 | u64 remote_addr; | 660 | u64 remote_addr; |
@@ -655,6 +668,9 @@ struct ib_send_wr { | |||
655 | } atomic; | 668 | } atomic; |
656 | struct { | 669 | struct { |
657 | struct ib_ah *ah; | 670 | struct ib_ah *ah; |
671 | void *header; | ||
672 | int hlen; | ||
673 | int mss; | ||
658 | u32 remote_qpn; | 674 | u32 remote_qpn; |
659 | u32 remote_qkey; | 675 | u32 remote_qkey; |
660 | u16 pkey_index; /* valid for GSI only */ | 676 | u16 pkey_index; /* valid for GSI only */ |
@@ -730,7 +746,7 @@ struct ib_uobject { | |||
730 | struct ib_ucontext *context; /* associated user context */ | 746 | struct ib_ucontext *context; /* associated user context */ |
731 | void *object; /* containing object */ | 747 | void *object; /* containing object */ |
732 | struct list_head list; /* link to context's list */ | 748 | struct list_head list; /* link to context's list */ |
733 | u32 id; /* index into kernel idr */ | 749 | int id; /* index into kernel idr */ |
734 | struct kref ref; | 750 | struct kref ref; |
735 | struct rw_semaphore mutex; /* protects .live */ | 751 | struct rw_semaphore mutex; /* protects .live */ |
736 | int live; | 752 | int live; |
@@ -971,6 +987,8 @@ struct ib_device { | |||
971 | int comp_vector, | 987 | int comp_vector, |
972 | struct ib_ucontext *context, | 988 | struct ib_ucontext *context, |
973 | struct ib_udata *udata); | 989 | struct ib_udata *udata); |
990 | int (*modify_cq)(struct ib_cq *cq, u16 cq_count, | ||
991 | u16 cq_period); | ||
974 | int (*destroy_cq)(struct ib_cq *cq); | 992 | int (*destroy_cq)(struct ib_cq *cq); |
975 | int (*resize_cq)(struct ib_cq *cq, int cqe, | 993 | int (*resize_cq)(struct ib_cq *cq, int cqe, |
976 | struct ib_udata *udata); | 994 | struct ib_udata *udata); |
@@ -1376,6 +1394,15 @@ struct ib_cq *ib_create_cq(struct ib_device *device, | |||
1376 | int ib_resize_cq(struct ib_cq *cq, int cqe); | 1394 | int ib_resize_cq(struct ib_cq *cq, int cqe); |
1377 | 1395 | ||
1378 | /** | 1396 | /** |
1397 | * ib_modify_cq - Modifies moderation params of the CQ | ||
1398 | * @cq: The CQ to modify. | ||
1399 | * @cq_count: number of CQEs that will trigger an event | ||
1400 | * @cq_period: max period of time in usec before triggering an event | ||
1401 | * | ||
1402 | */ | ||
1403 | int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); | ||
1404 | |||
1405 | /** | ||
1379 | * ib_destroy_cq - Destroys the specified CQ. | 1406 | * ib_destroy_cq - Destroys the specified CQ. |
1380 | * @cq: The CQ to destroy. | 1407 | * @cq: The CQ to destroy. |
1381 | */ | 1408 | */ |