aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/acpi.h20
-rw-r--r--include/asm-i386/apic.h2
-rw-r--r--include/asm-i386/div64.h2
-rw-r--r--include/asm-i386/fixmap.h2
-rw-r--r--include/asm-i386/io_apic.h4
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h10
-rw-r--r--include/asm-i386/mmzone.h2
-rw-r--r--include/asm-i386/mpspec.h4
-rw-r--r--include/asm-i386/numa.h3
-rw-r--r--include/asm-i386/processor.h4
-rw-r--r--include/asm-i386/spinlock.h200
-rw-r--r--include/asm-i386/spinlock_types.h20
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/topology.h2
-rw-r--r--include/asm-i386/unistd.h12
15 files changed, 130 insertions, 159 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index cf828ace13f9..df4ed323aa4d 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -103,7 +103,7 @@ __acpi_release_global_lock (unsigned int *lock)
103 :"=r"(n_hi), "=r"(n_lo) \ 103 :"=r"(n_hi), "=r"(n_lo) \
104 :"0"(n_hi), "1"(n_lo)) 104 :"0"(n_hi), "1"(n_lo))
105 105
106#ifdef CONFIG_ACPI_BOOT 106#ifdef CONFIG_ACPI
107extern int acpi_lapic; 107extern int acpi_lapic;
108extern int acpi_ioapic; 108extern int acpi_ioapic;
109extern int acpi_noirq; 109extern int acpi_noirq;
@@ -146,13 +146,6 @@ static inline void check_acpi_pci(void) { }
146 146
147#endif 147#endif
148 148
149#else /* CONFIG_ACPI_BOOT */
150# define acpi_lapic 0
151# define acpi_ioapic 0
152
153#endif
154
155#ifdef CONFIG_ACPI_PCI
156static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 149static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
157static inline void acpi_disable_pci(void) 150static inline void acpi_disable_pci(void)
158{ 151{
@@ -160,11 +153,16 @@ static inline void acpi_disable_pci(void)
160 acpi_noirq_set(); 153 acpi_noirq_set();
161} 154}
162extern int acpi_irq_balance_set(char *str); 155extern int acpi_irq_balance_set(char *str);
163#else 156
157#else /* !CONFIG_ACPI */
158
159#define acpi_lapic 0
160#define acpi_ioapic 0
164static inline void acpi_noirq_set(void) { } 161static inline void acpi_noirq_set(void) { }
165static inline void acpi_disable_pci(void) { } 162static inline void acpi_disable_pci(void) { }
166static inline int acpi_irq_balance_set(char *str) { return 0; } 163
167#endif 164#endif /* !CONFIG_ACPI */
165
168 166
169#ifdef CONFIG_ACPI_SLEEP 167#ifdef CONFIG_ACPI_SLEEP
170 168
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 6a1b1882285c..8c454aa58ac6 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -130,6 +130,8 @@ extern unsigned int nmi_watchdog;
130#define NMI_LOCAL_APIC 2 130#define NMI_LOCAL_APIC 2
131#define NMI_INVALID 3 131#define NMI_INVALID 3
132 132
133extern int disable_timer_pin_1;
134
133#else /* !CONFIG_X86_LOCAL_APIC */ 135#else /* !CONFIG_X86_LOCAL_APIC */
134static inline void lapic_shutdown(void) { } 136static inline void lapic_shutdown(void) { }
135 137
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h
index 28ed8b296afc..75c67c785bb8 100644
--- a/include/asm-i386/div64.h
+++ b/include/asm-i386/div64.h
@@ -35,7 +35,7 @@
35 */ 35 */
36#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) 36#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
37 37
38extern inline long 38static inline long
39div_ll_X_l_rem(long long divs, long div, long *rem) 39div_ll_X_l_rem(long long divs, long div, long *rem)
40{ 40{
41 long dum2; 41 long dum2;
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index c94cac958389..cfb1c61d3b9c 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -76,7 +76,7 @@ enum fixed_addresses {
76 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 76 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
77 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 77 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
78#endif 78#endif
79#ifdef CONFIG_ACPI_BOOT 79#ifdef CONFIG_ACPI
80 FIX_ACPI_BEGIN, 80 FIX_ACPI_BEGIN,
81 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, 81 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
82#endif 82#endif
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 002c203ccd6a..51c4e5fe6062 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -195,12 +195,12 @@ extern int skip_ioapic_setup;
195 */ 195 */
196#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) 196#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
197 197
198#ifdef CONFIG_ACPI_BOOT 198#ifdef CONFIG_ACPI
199extern int io_apic_get_unique_id (int ioapic, int apic_id); 199extern int io_apic_get_unique_id (int ioapic, int apic_id);
200extern int io_apic_get_version (int ioapic); 200extern int io_apic_get_version (int ioapic);
201extern int io_apic_get_redir_entries (int ioapic); 201extern int io_apic_get_redir_entries (int ioapic);
202extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); 202extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low);
203#endif /*CONFIG_ACPI_BOOT*/ 203#endif /* CONFIG_ACPI */
204 204
205extern int (*ioapic_renumber_irq)(int ioapic, int irq); 205extern int (*ioapic_renumber_irq)(int ioapic, int irq);
206 206
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
index 521e227db679..06ae4d81ba6a 100644
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -22,7 +22,15 @@ static inline void mach_reboot(void)
22 for (i = 0; i < 100; i++) { 22 for (i = 0; i < 100; i++) {
23 kb_wait(); 23 kb_wait();
24 udelay(50); 24 udelay(50);
25 outb(0xfe, 0x64); /* pulse reset low */ 25 outb(0x60, 0x64); /* write Controller Command Byte */
26 udelay(50);
27 kb_wait();
28 udelay(50);
29 outb(0x14, 0x60); /* set "System flag" */
30 udelay(50);
31 kb_wait();
32 udelay(50);
33 outb(0xfe, 0x64); /* pulse reset low */
26 udelay(50); 34 udelay(50);
27 } 35 }
28} 36}
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 516421300ea2..348fe3a4879d 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -29,7 +29,7 @@ static inline void get_memcfg_numa(void)
29#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
30 if (get_memcfg_numaq()) 30 if (get_memcfg_numaq())
31 return; 31 return;
32#elif CONFIG_ACPI_SRAT 32#elif defined(CONFIG_ACPI_SRAT)
33 if (get_memcfg_from_srat()) 33 if (get_memcfg_from_srat())
34 return; 34 return;
35#endif 35#endif
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index d84a9c326c22..64a0b8e6afeb 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -27,14 +27,14 @@ extern unsigned long mp_lapic_addr;
27extern int pic_mode; 27extern int pic_mode;
28extern int using_apic_timer; 28extern int using_apic_timer;
29 29
30#ifdef CONFIG_ACPI_BOOT 30#ifdef CONFIG_ACPI
31extern void mp_register_lapic (u8 id, u8 enabled); 31extern void mp_register_lapic (u8 id, u8 enabled);
32extern void mp_register_lapic_address (u64 address); 32extern void mp_register_lapic_address (u64 address);
33extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); 33extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
34extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); 34extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
35extern void mp_config_acpi_legacy_irqs (void); 35extern void mp_config_acpi_legacy_irqs (void);
36extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); 36extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
37#endif /*CONFIG_ACPI_BOOT*/ 37#endif /* CONFIG_ACPI */
38 38
39#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) 39#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
40 40
diff --git a/include/asm-i386/numa.h b/include/asm-i386/numa.h
new file mode 100644
index 000000000000..96fcb157db1d
--- /dev/null
+++ b/include/asm-i386/numa.h
@@ -0,0 +1,3 @@
1
2int pxm_to_nid(int pxm);
3
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 37bef8ed7bed..0a4ec764377c 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -679,7 +679,7 @@ static inline void rep_nop(void)
679 However we don't do prefetches for pre XP Athlons currently 679 However we don't do prefetches for pre XP Athlons currently
680 That should be fixed. */ 680 That should be fixed. */
681#define ARCH_HAS_PREFETCH 681#define ARCH_HAS_PREFETCH
682extern inline void prefetch(const void *x) 682static inline void prefetch(const void *x)
683{ 683{
684 alternative_input(ASM_NOP4, 684 alternative_input(ASM_NOP4,
685 "prefetchnta (%1)", 685 "prefetchnta (%1)",
@@ -693,7 +693,7 @@ extern inline void prefetch(const void *x)
693 693
694/* 3dnow! prefetch to get an exclusive cache line. Useful for 694/* 3dnow! prefetch to get an exclusive cache line. Useful for
695 spinlocks to avoid one state transition in the cache coherency protocol. */ 695 spinlocks to avoid one state transition in the cache coherency protocol. */
696extern inline void prefetchw(const void *x) 696static inline void prefetchw(const void *x)
697{ 697{
698 alternative_input(ASM_NOP4, 698 alternative_input(ASM_NOP4,
699 "prefetchw (%1)", 699 "prefetchw (%1)",
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index f9ff31f40036..23604350cdf4 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,46 +7,21 @@
7#include <linux/config.h> 7#include <linux/config.h>
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9 9
10asmlinkage int printk(const char * fmt, ...)
11 __attribute__ ((format (printf, 1, 2)));
12
13/* 10/*
14 * Your basic SMP spinlocks, allowing only a single CPU anywhere 11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
15 */ 12 *
16
17typedef struct {
18 volatile unsigned int slock;
19#ifdef CONFIG_DEBUG_SPINLOCK
20 unsigned magic;
21#endif
22#ifdef CONFIG_PREEMPT
23 unsigned int break_lock;
24#endif
25} spinlock_t;
26
27#define SPINLOCK_MAGIC 0xdead4ead
28
29#ifdef CONFIG_DEBUG_SPINLOCK
30#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
31#else
32#define SPINLOCK_MAGIC_INIT /* */
33#endif
34
35#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
36
37#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
38
39/*
40 * Simple spin lock operations. There are two variants, one clears IRQ's 13 * Simple spin lock operations. There are two variants, one clears IRQ's
41 * on the local processor, one does not. 14 * on the local processor, one does not.
42 * 15 *
43 * We make no fairness assumptions. They have a cost. 16 * We make no fairness assumptions. They have a cost.
17 *
18 * (the type definitions are in asm/spinlock_types.h)
44 */ 19 */
45 20
46#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) 21#define __raw_spin_is_locked(x) \
47#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) 22 (*(volatile signed char *)(&(x)->slock) <= 0)
48 23
49#define spin_lock_string \ 24#define __raw_spin_lock_string \
50 "\n1:\t" \ 25 "\n1:\t" \
51 "lock ; decb %0\n\t" \ 26 "lock ; decb %0\n\t" \
52 "jns 3f\n" \ 27 "jns 3f\n" \
@@ -57,7 +32,7 @@ typedef struct {
57 "jmp 1b\n" \ 32 "jmp 1b\n" \
58 "3:\n\t" 33 "3:\n\t"
59 34
60#define spin_lock_string_flags \ 35#define __raw_spin_lock_string_flags \
61 "\n1:\t" \ 36 "\n1:\t" \
62 "lock ; decb %0\n\t" \ 37 "lock ; decb %0\n\t" \
63 "jns 4f\n\t" \ 38 "jns 4f\n\t" \
@@ -73,86 +48,71 @@ typedef struct {
73 "jmp 1b\n" \ 48 "jmp 1b\n" \
74 "4:\n\t" 49 "4:\n\t"
75 50
51static inline void __raw_spin_lock(raw_spinlock_t *lock)
52{
53 __asm__ __volatile__(
54 __raw_spin_lock_string
55 :"=m" (lock->slock) : : "memory");
56}
57
58static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
59{
60 __asm__ __volatile__(
61 __raw_spin_lock_string_flags
62 :"=m" (lock->slock) : "r" (flags) : "memory");
63}
64
65static inline int __raw_spin_trylock(raw_spinlock_t *lock)
66{
67 char oldval;
68 __asm__ __volatile__(
69 "xchgb %b0,%1"
70 :"=q" (oldval), "=m" (lock->slock)
71 :"0" (0) : "memory");
72 return oldval > 0;
73}
74
76/* 75/*
77 * This works. Despite all the confusion. 76 * __raw_spin_unlock based on writing $1 to the low byte.
78 * (except on PPro SMP or if we are using OOSTORE) 77 * This method works. Despite all the confusion.
78 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
79 * (PPro errata 66, 92) 79 * (PPro errata 66, 92)
80 */ 80 */
81 81
82#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) 82#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
83 83
84#define spin_unlock_string \ 84#define __raw_spin_unlock_string \
85 "movb $1,%0" \ 85 "movb $1,%0" \
86 :"=m" (lock->slock) : : "memory" 86 :"=m" (lock->slock) : : "memory"
87 87
88 88
89static inline void _raw_spin_unlock(spinlock_t *lock) 89static inline void __raw_spin_unlock(raw_spinlock_t *lock)
90{ 90{
91#ifdef CONFIG_DEBUG_SPINLOCK
92 BUG_ON(lock->magic != SPINLOCK_MAGIC);
93 BUG_ON(!spin_is_locked(lock));
94#endif
95 __asm__ __volatile__( 91 __asm__ __volatile__(
96 spin_unlock_string 92 __raw_spin_unlock_string
97 ); 93 );
98} 94}
99 95
100#else 96#else
101 97
102#define spin_unlock_string \ 98#define __raw_spin_unlock_string \
103 "xchgb %b0, %1" \ 99 "xchgb %b0, %1" \
104 :"=q" (oldval), "=m" (lock->slock) \ 100 :"=q" (oldval), "=m" (lock->slock) \
105 :"0" (oldval) : "memory" 101 :"0" (oldval) : "memory"
106 102
107static inline void _raw_spin_unlock(spinlock_t *lock) 103static inline void __raw_spin_unlock(raw_spinlock_t *lock)
108{ 104{
109 char oldval = 1; 105 char oldval = 1;
110#ifdef CONFIG_DEBUG_SPINLOCK
111 BUG_ON(lock->magic != SPINLOCK_MAGIC);
112 BUG_ON(!spin_is_locked(lock));
113#endif
114 __asm__ __volatile__(
115 spin_unlock_string
116 );
117}
118 106
119#endif
120
121static inline int _raw_spin_trylock(spinlock_t *lock)
122{
123 char oldval;
124 __asm__ __volatile__( 107 __asm__ __volatile__(
125 "xchgb %b0,%1" 108 __raw_spin_unlock_string
126 :"=q" (oldval), "=m" (lock->slock) 109 );
127 :"0" (0) : "memory");
128 return oldval > 0;
129} 110}
130 111
131static inline void _raw_spin_lock(spinlock_t *lock)
132{
133#ifdef CONFIG_DEBUG_SPINLOCK
134 if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
135 printk("eip: %p\n", __builtin_return_address(0));
136 BUG();
137 }
138#endif 112#endif
139 __asm__ __volatile__(
140 spin_lock_string
141 :"=m" (lock->slock) : : "memory");
142}
143 113
144static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) 114#define __raw_spin_unlock_wait(lock) \
145{ 115 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
146#ifdef CONFIG_DEBUG_SPINLOCK
147 if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
148 printk("eip: %p\n", __builtin_return_address(0));
149 BUG();
150 }
151#endif
152 __asm__ __volatile__(
153 spin_lock_string_flags
154 :"=m" (lock->slock) : "r" (flags) : "memory");
155}
156 116
157/* 117/*
158 * Read-write spinlocks, allowing multiple readers 118 * Read-write spinlocks, allowing multiple readers
@@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
163 * can "mix" irq-safe locks - any writer needs to get a 123 * can "mix" irq-safe locks - any writer needs to get a
164 * irq-safe write-lock, but readers can get non-irqsafe 124 * irq-safe write-lock, but readers can get non-irqsafe
165 * read-locks. 125 * read-locks.
126 *
127 * On x86, we implement read-write locks as a 32-bit counter
128 * with the high bit (sign) being the "contended" bit.
129 *
130 * The inline assembly is non-obvious. Think about it.
131 *
132 * Changed to use the same technique as rw semaphores. See
133 * semaphore.h for details. -ben
134 *
135 * the helpers are in arch/i386/kernel/semaphore.c
166 */ 136 */
167typedef struct {
168 volatile unsigned int lock;
169#ifdef CONFIG_DEBUG_SPINLOCK
170 unsigned magic;
171#endif
172#ifdef CONFIG_PREEMPT
173 unsigned int break_lock;
174#endif
175} rwlock_t;
176
177#define RWLOCK_MAGIC 0xdeaf1eed
178
179#ifdef CONFIG_DEBUG_SPINLOCK
180#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
181#else
182#define RWLOCK_MAGIC_INIT /* */
183#endif
184
185#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
186
187#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
188 137
189/** 138/**
190 * read_can_lock - would read_trylock() succeed? 139 * read_can_lock - would read_trylock() succeed?
191 * @lock: the rwlock in question. 140 * @lock: the rwlock in question.
192 */ 141 */
193#define read_can_lock(x) ((int)(x)->lock > 0) 142#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
194 143
195/** 144/**
196 * write_can_lock - would write_trylock() succeed? 145 * write_can_lock - would write_trylock() succeed?
197 * @lock: the rwlock in question. 146 * @lock: the rwlock in question.
198 */ 147 */
199#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 148#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
200 149
201/* 150static inline void __raw_read_lock(raw_rwlock_t *rw)
202 * On x86, we implement read-write locks as a 32-bit counter
203 * with the high bit (sign) being the "contended" bit.
204 *
205 * The inline assembly is non-obvious. Think about it.
206 *
207 * Changed to use the same technique as rw semaphores. See
208 * semaphore.h for details. -ben
209 */
210/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
211
212static inline void _raw_read_lock(rwlock_t *rw)
213{ 151{
214#ifdef CONFIG_DEBUG_SPINLOCK
215 BUG_ON(rw->magic != RWLOCK_MAGIC);
216#endif
217 __build_read_lock(rw, "__read_lock_failed"); 152 __build_read_lock(rw, "__read_lock_failed");
218} 153}
219 154
220static inline void _raw_write_lock(rwlock_t *rw) 155static inline void __raw_write_lock(raw_rwlock_t *rw)
221{ 156{
222#ifdef CONFIG_DEBUG_SPINLOCK
223 BUG_ON(rw->magic != RWLOCK_MAGIC);
224#endif
225 __build_write_lock(rw, "__write_lock_failed"); 157 __build_write_lock(rw, "__write_lock_failed");
226} 158}
227 159
228#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") 160static inline int __raw_read_trylock(raw_rwlock_t *lock)
229#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
230
231static inline int _raw_read_trylock(rwlock_t *lock)
232{ 161{
233 atomic_t *count = (atomic_t *)lock; 162 atomic_t *count = (atomic_t *)lock;
234 atomic_dec(count); 163 atomic_dec(count);
@@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
238 return 0; 167 return 0;
239} 168}
240 169
241static inline int _raw_write_trylock(rwlock_t *lock) 170static inline int __raw_write_trylock(raw_rwlock_t *lock)
242{ 171{
243 atomic_t *count = (atomic_t *)lock; 172 atomic_t *count = (atomic_t *)lock;
244 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 173 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
247 return 0; 176 return 0;
248} 177}
249 178
179static inline void __raw_read_unlock(raw_rwlock_t *rw)
180{
181 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
182}
183
184static inline void __raw_write_unlock(raw_rwlock_t *rw)
185{
186 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
187 : "=m" (rw->lock) : : "memory");
188}
189
250#endif /* __ASM_SPINLOCK_H */ 190#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
new file mode 100644
index 000000000000..59efe849f351
--- /dev/null
+++ b/include/asm-i386/spinlock_types.h
@@ -0,0 +1,20 @@
1#ifndef __ASM_SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct {
9 volatile unsigned int slock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
13
14typedef struct {
15 volatile unsigned int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19
20#endif
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index e2cb9fa6f563..8fbf791651bf 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -48,7 +48,7 @@ struct thread_info {
48 48
49#else /* !__ASSEMBLY__ */ 49#else /* !__ASSEMBLY__ */
50 50
51#include <asm/asm_offsets.h> 51#include <asm/asm-offsets.h>
52 52
53#endif 53#endif
54 54
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 2461b731781e..0ec27c9e8e45 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -60,7 +60,7 @@ static inline int node_to_first_cpu(int node)
60 return first_cpu(mask); 60 return first_cpu(mask);
61} 61}
62 62
63#define pcibus_to_node(bus) mp_bus_id_to_node[(bus)->number] 63#define pcibus_to_node(bus) ((long) (bus)->sysdata)
64#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) 64#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus))
65 65
66/* sched_domains SD_NODE_INIT for NUMAQ machines */ 66/* sched_domains SD_NODE_INIT for NUMAQ machines */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index a7cb377745bf..fbaf90a3968c 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -332,7 +332,7 @@ type name(type1 arg1) \
332long __res; \ 332long __res; \
333__asm__ volatile ("int $0x80" \ 333__asm__ volatile ("int $0x80" \
334 : "=a" (__res) \ 334 : "=a" (__res) \
335 : "0" (__NR_##name),"b" ((long)(arg1))); \ 335 : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \
336__syscall_return(type,__res); \ 336__syscall_return(type,__res); \
337} 337}
338 338
@@ -342,7 +342,7 @@ type name(type1 arg1,type2 arg2) \
342long __res; \ 342long __res; \
343__asm__ volatile ("int $0x80" \ 343__asm__ volatile ("int $0x80" \
344 : "=a" (__res) \ 344 : "=a" (__res) \
345 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \ 345 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \
346__syscall_return(type,__res); \ 346__syscall_return(type,__res); \
347} 347}
348 348
@@ -353,7 +353,7 @@ long __res; \
353__asm__ volatile ("int $0x80" \ 353__asm__ volatile ("int $0x80" \
354 : "=a" (__res) \ 354 : "=a" (__res) \
355 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 355 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
356 "d" ((long)(arg3))); \ 356 "d" ((long)(arg3)) : "memory"); \
357__syscall_return(type,__res); \ 357__syscall_return(type,__res); \
358} 358}
359 359
@@ -364,7 +364,7 @@ long __res; \
364__asm__ volatile ("int $0x80" \ 364__asm__ volatile ("int $0x80" \
365 : "=a" (__res) \ 365 : "=a" (__res) \
366 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 366 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
367 "d" ((long)(arg3)),"S" ((long)(arg4))); \ 367 "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
368__syscall_return(type,__res); \ 368__syscall_return(type,__res); \
369} 369}
370 370
@@ -376,7 +376,7 @@ long __res; \
376__asm__ volatile ("int $0x80" \ 376__asm__ volatile ("int $0x80" \
377 : "=a" (__res) \ 377 : "=a" (__res) \
378 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 378 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
379 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \ 379 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \
380__syscall_return(type,__res); \ 380__syscall_return(type,__res); \
381} 381}
382 382
@@ -389,7 +389,7 @@ __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; p
389 : "=a" (__res) \ 389 : "=a" (__res) \
390 : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 390 : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
391 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ 391 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \
392 "0" ((long)(arg6))); \ 392 "0" ((long)(arg6)) : "memory"); \
393__syscall_return(type,__res); \ 393__syscall_return(type,__res); \
394} 394}
395 395