diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-10 06:04:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-10 06:04:41 -0500 |
commit | b17304245f0db0ac69b795c411407808f3f2796d (patch) | |
tree | 63ed3915d9295bd08f640bf25c322064ba787fad /arch/x86 | |
parent | 889c92d21db40be0b7d22a59395060237895bb85 (diff) | |
parent | 9a100a4464917b5ffff3a8ce1c2758088fd9bb32 (diff) |
Merge branch 'linus' into x86/setup-lzma
Conflicts:
init/do_mounts_rd.c
Diffstat (limited to 'arch/x86')
34 files changed, 283 insertions, 362 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 85a78575956c..8078955845ae 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -408,7 +408,7 @@ config X86_MINIMUM_CPU_FAMILY | |||
408 | 408 | ||
409 | config X86_DEBUGCTLMSR | 409 | config X86_DEBUGCTLMSR |
410 | def_bool y | 410 | def_bool y |
411 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) | 411 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML |
412 | 412 | ||
413 | menuconfig PROCESSOR_SELECT | 413 | menuconfig PROCESSOR_SELECT |
414 | bool "Supported processor vendors" if EMBEDDED | 414 | bool "Supported processor vendors" if EMBEDDED |
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 4a8e80cdcfa5..a9f8a814a1f7 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -22,3 +22,4 @@ unifdef-y += unistd_32.h | |||
22 | unifdef-y += unistd_64.h | 22 | unifdef-y += unistd_64.h |
23 | unifdef-y += vm86.h | 23 | unifdef-y += vm86.h |
24 | unifdef-y += vsyscall.h | 24 | unifdef-y += vsyscall.h |
25 | unifdef-y += swab.h | ||
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index ad5b9f6ecddf..85b46fba4229 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_ATOMIC_32_H | 2 | #define _ASM_X86_ATOMIC_32_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/types.h> | ||
5 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
6 | #include <asm/cmpxchg.h> | 7 | #include <asm/cmpxchg.h> |
7 | 8 | ||
@@ -10,15 +11,6 @@ | |||
10 | * resource counting etc.. | 11 | * resource counting etc.. |
11 | */ | 12 | */ |
12 | 13 | ||
13 | /* | ||
14 | * Make sure gcc doesn't try to be clever and move things around | ||
15 | * on us. We need to use _exactly_ the address the user gave us, | ||
16 | * not some alias that contains the same information. | ||
17 | */ | ||
18 | typedef struct { | ||
19 | int counter; | ||
20 | } atomic_t; | ||
21 | |||
22 | #define ATOMIC_INIT(i) { (i) } | 14 | #define ATOMIC_INIT(i) { (i) } |
23 | 15 | ||
24 | /** | 16 | /** |
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h index 279d2a731f3f..8c21731984da 100644 --- a/arch/x86/include/asm/atomic_64.h +++ b/arch/x86/include/asm/atomic_64.h | |||
@@ -1,25 +1,15 @@ | |||
1 | #ifndef _ASM_X86_ATOMIC_64_H | 1 | #ifndef _ASM_X86_ATOMIC_64_H |
2 | #define _ASM_X86_ATOMIC_64_H | 2 | #define _ASM_X86_ATOMIC_64_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
4 | #include <asm/alternative.h> | 5 | #include <asm/alternative.h> |
5 | #include <asm/cmpxchg.h> | 6 | #include <asm/cmpxchg.h> |
6 | 7 | ||
7 | /* atomic_t should be 32 bit signed type */ | ||
8 | |||
9 | /* | 8 | /* |
10 | * Atomic operations that C can't guarantee us. Useful for | 9 | * Atomic operations that C can't guarantee us. Useful for |
11 | * resource counting etc.. | 10 | * resource counting etc.. |
12 | */ | 11 | */ |
13 | 12 | ||
14 | /* | ||
15 | * Make sure gcc doesn't try to be clever and move things around | ||
16 | * on us. We need to use _exactly_ the address the user gave us, | ||
17 | * not some alias that contains the same information. | ||
18 | */ | ||
19 | typedef struct { | ||
20 | int counter; | ||
21 | } atomic_t; | ||
22 | |||
23 | #define ATOMIC_INIT(i) { (i) } | 13 | #define ATOMIC_INIT(i) { (i) } |
24 | 14 | ||
25 | /** | 15 | /** |
@@ -191,11 +181,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
191 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | 181 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
192 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | 182 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
193 | 183 | ||
194 | /* An 64bit atomic type */ | 184 | /* The 64-bit atomic type */ |
195 | |||
196 | typedef struct { | ||
197 | long counter; | ||
198 | } atomic64_t; | ||
199 | 185 | ||
200 | #define ATOMIC64_INIT(i) { (i) } | 186 | #define ATOMIC64_INIT(i) { (i) } |
201 | 187 | ||
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 9fa9dcdf344b..e02a359d2aa5 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -300,7 +300,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
300 | return oldbit; | 300 | return oldbit; |
301 | } | 301 | } |
302 | 302 | ||
303 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) | 303 | static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
304 | { | 304 | { |
305 | return ((1UL << (nr % BITS_PER_LONG)) & | 305 | return ((1UL << (nr % BITS_PER_LONG)) & |
306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index f110ad417df3..7c49917e3d9d 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h | |||
@@ -1,65 +1,7 @@ | |||
1 | #ifndef _ASM_X86_BYTEORDER_H | 1 | #ifndef _ASM_X86_BYTEORDER_H |
2 | #define _ASM_X86_BYTEORDER_H | 2 | #define _ASM_X86_BYTEORDER_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/swab.h> |
5 | #include <linux/compiler.h> | 5 | #include <linux/byteorder/little_endian.h> |
6 | |||
7 | #define __LITTLE_ENDIAN | ||
8 | |||
9 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) | ||
10 | { | ||
11 | #ifdef __i386__ | ||
12 | # ifdef CONFIG_X86_BSWAP | ||
13 | asm("bswap %0" : "=r" (val) : "0" (val)); | ||
14 | # else | ||
15 | asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ | ||
16 | "rorl $16,%0\n\t" /* swap words */ | ||
17 | "xchgb %b0,%h0" /* swap higher bytes */ | ||
18 | : "=q" (val) | ||
19 | : "0" (val)); | ||
20 | # endif | ||
21 | |||
22 | #else /* __i386__ */ | ||
23 | asm("bswapl %0" | ||
24 | : "=r" (val) | ||
25 | : "0" (val)); | ||
26 | #endif | ||
27 | return val; | ||
28 | } | ||
29 | #define __arch_swab32 __arch_swab32 | ||
30 | |||
31 | static inline __attribute_const__ __u64 __arch_swab64(__u64 val) | ||
32 | { | ||
33 | #ifdef __i386__ | ||
34 | union { | ||
35 | struct { | ||
36 | __u32 a; | ||
37 | __u32 b; | ||
38 | } s; | ||
39 | __u64 u; | ||
40 | } v; | ||
41 | v.u = val; | ||
42 | # ifdef CONFIG_X86_BSWAP | ||
43 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | ||
44 | : "=r" (v.s.a), "=r" (v.s.b) | ||
45 | : "0" (v.s.a), "1" (v.s.b)); | ||
46 | # else | ||
47 | v.s.a = __arch_swab32(v.s.a); | ||
48 | v.s.b = __arch_swab32(v.s.b); | ||
49 | asm("xchgl %0,%1" | ||
50 | : "=r" (v.s.a), "=r" (v.s.b) | ||
51 | : "0" (v.s.a), "1" (v.s.b)); | ||
52 | # endif | ||
53 | return v.u; | ||
54 | #else /* __i386__ */ | ||
55 | asm("bswapq %0" | ||
56 | : "=r" (val) | ||
57 | : "0" (val)); | ||
58 | return val; | ||
59 | #endif | ||
60 | } | ||
61 | #define __arch_swab64 __arch_swab64 | ||
62 | |||
63 | #include <linux/byteorder.h> | ||
64 | 6 | ||
65 | #endif /* _ASM_X86_BYTEORDER_H */ | 7 | #endif /* _ASM_X86_BYTEORDER_H */ |
diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h new file mode 100644 index 000000000000..306d4178ffc9 --- /dev/null +++ b/arch/x86/include/asm/swab.h | |||
@@ -0,0 +1,61 @@ | |||
1 | #ifndef _ASM_X86_SWAB_H | ||
2 | #define _ASM_X86_SWAB_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
7 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) | ||
8 | { | ||
9 | #ifdef __i386__ | ||
10 | # ifdef CONFIG_X86_BSWAP | ||
11 | asm("bswap %0" : "=r" (val) : "0" (val)); | ||
12 | # else | ||
13 | asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ | ||
14 | "rorl $16,%0\n\t" /* swap words */ | ||
15 | "xchgb %b0,%h0" /* swap higher bytes */ | ||
16 | : "=q" (val) | ||
17 | : "0" (val)); | ||
18 | # endif | ||
19 | |||
20 | #else /* __i386__ */ | ||
21 | asm("bswapl %0" | ||
22 | : "=r" (val) | ||
23 | : "0" (val)); | ||
24 | #endif | ||
25 | return val; | ||
26 | } | ||
27 | #define __arch_swab32 __arch_swab32 | ||
28 | |||
29 | static inline __attribute_const__ __u64 __arch_swab64(__u64 val) | ||
30 | { | ||
31 | #ifdef __i386__ | ||
32 | union { | ||
33 | struct { | ||
34 | __u32 a; | ||
35 | __u32 b; | ||
36 | } s; | ||
37 | __u64 u; | ||
38 | } v; | ||
39 | v.u = val; | ||
40 | # ifdef CONFIG_X86_BSWAP | ||
41 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | ||
42 | : "=r" (v.s.a), "=r" (v.s.b) | ||
43 | : "0" (v.s.a), "1" (v.s.b)); | ||
44 | # else | ||
45 | v.s.a = __arch_swab32(v.s.a); | ||
46 | v.s.b = __arch_swab32(v.s.b); | ||
47 | asm("xchgl %0,%1" | ||
48 | : "=r" (v.s.a), "=r" (v.s.b) | ||
49 | : "0" (v.s.a), "1" (v.s.b)); | ||
50 | # endif | ||
51 | return v.u; | ||
52 | #else /* __i386__ */ | ||
53 | asm("bswapq %0" | ||
54 | : "=r" (val) | ||
55 | : "0" (val)); | ||
56 | return val; | ||
57 | #endif | ||
58 | } | ||
59 | #define __arch_swab64 __arch_swab64 | ||
60 | |||
61 | #endif /* _ASM_X86_SWAB_H */ | ||
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index 51fb2c76ad74..b9e4e20174fb 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h | |||
@@ -1,46 +1,10 @@ | |||
1 | #ifndef _ASM_X86_SWIOTLB_H | 1 | #ifndef _ASM_X86_SWIOTLB_H |
2 | #define _ASM_X86_SWIOTLB_H | 2 | #define _ASM_X86_SWIOTLB_H |
3 | 3 | ||
4 | #include <asm/dma-mapping.h> | 4 | #include <linux/swiotlb.h> |
5 | 5 | ||
6 | /* SWIOTLB interface */ | 6 | /* SWIOTLB interface */ |
7 | 7 | ||
8 | extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, | ||
9 | size_t size, int dir); | ||
10 | extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | ||
11 | dma_addr_t *dma_handle, gfp_t flags); | ||
12 | extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | ||
13 | size_t size, int dir); | ||
14 | extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | ||
15 | dma_addr_t dev_addr, | ||
16 | size_t size, int dir); | ||
17 | extern void swiotlb_sync_single_for_device(struct device *hwdev, | ||
18 | dma_addr_t dev_addr, | ||
19 | size_t size, int dir); | ||
20 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | ||
21 | dma_addr_t dev_addr, | ||
22 | unsigned long offset, | ||
23 | size_t size, int dir); | ||
24 | extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | ||
25 | dma_addr_t dev_addr, | ||
26 | unsigned long offset, | ||
27 | size_t size, int dir); | ||
28 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | ||
29 | struct scatterlist *sg, int nelems, | ||
30 | int dir); | ||
31 | extern void swiotlb_sync_sg_for_device(struct device *hwdev, | ||
32 | struct scatterlist *sg, int nelems, | ||
33 | int dir); | ||
34 | extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
35 | int nents, int direction); | ||
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | ||
37 | int nents, int direction); | ||
38 | extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | ||
39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | ||
40 | void *vaddr, dma_addr_t dma_handle); | ||
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | ||
42 | extern void swiotlb_init(void); | ||
43 | |||
44 | extern int swiotlb_force; | 8 | extern int swiotlb_force; |
45 | 9 | ||
46 | #ifdef CONFIG_SWIOTLB | 10 | #ifdef CONFIG_SWIOTLB |
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h deleted file mode 100644 index 8b064bd9c553..000000000000 --- a/arch/x86/include/asm/unwind.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef _ASM_X86_UNWIND_H | ||
2 | #define _ASM_X86_UNWIND_H | ||
3 | |||
4 | #define UNW_PC(frame) ((void)(frame), 0UL) | ||
5 | #define UNW_SP(frame) ((void)(frame), 0UL) | ||
6 | #define UNW_FP(frame) ((void)(frame), 0UL) | ||
7 | |||
8 | static inline int arch_unw_user_mode(const void *info) | ||
9 | { | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | #endif /* _ASM_X86_UNWIND_H */ | ||
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 29dc0c89d4af..d37593c2f438 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | static int __initdata acpi_force = 0; | 49 | static int __initdata acpi_force = 0; |
50 | 50 | u32 acpi_rsdt_forced; | |
51 | #ifdef CONFIG_ACPI | 51 | #ifdef CONFIG_ACPI |
52 | int acpi_disabled = 0; | 52 | int acpi_disabled = 0; |
53 | #else | 53 | #else |
@@ -1374,6 +1374,17 @@ static void __init acpi_process_madt(void) | |||
1374 | "Invalid BIOS MADT, disabling ACPI\n"); | 1374 | "Invalid BIOS MADT, disabling ACPI\n"); |
1375 | disable_acpi(); | 1375 | disable_acpi(); |
1376 | } | 1376 | } |
1377 | } else { | ||
1378 | /* | ||
1379 | * ACPI found no MADT, and so ACPI wants UP PIC mode. | ||
1380 | * In the event an MPS table was found, forget it. | ||
1381 | * Boot with "acpi=off" to use MPS on such a system. | ||
1382 | */ | ||
1383 | if (smp_found_config) { | ||
1384 | printk(KERN_WARNING PREFIX | ||
1385 | "No APIC-table, disabling MPS\n"); | ||
1386 | smp_found_config = 0; | ||
1387 | } | ||
1377 | } | 1388 | } |
1378 | 1389 | ||
1379 | /* | 1390 | /* |
@@ -1809,6 +1820,10 @@ static int __init parse_acpi(char *arg) | |||
1809 | disable_acpi(); | 1820 | disable_acpi(); |
1810 | acpi_ht = 1; | 1821 | acpi_ht = 1; |
1811 | } | 1822 | } |
1823 | /* acpi=rsdt use RSDT instead of XSDT */ | ||
1824 | else if (strcmp(arg, "rsdt") == 0) { | ||
1825 | acpi_rsdt_forced = 1; | ||
1826 | } | ||
1812 | /* "acpi=noirq" disables ACPI interrupt routing */ | 1827 | /* "acpi=noirq" disables ACPI interrupt routing */ |
1813 | else if (strcmp(arg, "noirq") == 0) { | 1828 | else if (strcmp(arg, "noirq") == 0) { |
1814 | acpi_noirq_set(); | 1829 | acpi_noirq_set(); |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index c2502eb9aa83..a4805b3b4095 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -56,6 +56,7 @@ static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ | |||
56 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | 56 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; |
57 | 57 | ||
58 | #define MWAIT_SUBSTATE_MASK (0xf) | 58 | #define MWAIT_SUBSTATE_MASK (0xf) |
59 | #define MWAIT_CSTATE_MASK (0xf) | ||
59 | #define MWAIT_SUBSTATE_SIZE (4) | 60 | #define MWAIT_SUBSTATE_SIZE (4) |
60 | 61 | ||
61 | #define CPUID_MWAIT_LEAF (5) | 62 | #define CPUID_MWAIT_LEAF (5) |
@@ -98,7 +99,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
98 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | 99 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); |
99 | 100 | ||
100 | /* Check whether this particular cx_type (in CST) is supported or not */ | 101 | /* Check whether this particular cx_type (in CST) is supported or not */ |
101 | cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; | 102 | cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) & |
103 | MWAIT_CSTATE_MASK) + 1; | ||
102 | edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); | 104 | edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); |
103 | num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; | 105 | num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; |
104 | 106 | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 806b4e9051b4..707c1f6f95fa 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -159,6 +159,8 @@ static int __init acpi_sleep_setup(char *str) | |||
159 | #endif | 159 | #endif |
160 | if (strncmp(str, "old_ordering", 12) == 0) | 160 | if (strncmp(str, "old_ordering", 12) == 0) |
161 | acpi_old_suspend_ordering(); | 161 | acpi_old_suspend_ordering(); |
162 | if (strncmp(str, "s4_nonvs", 8) == 0) | ||
163 | acpi_s4_no_nvs(); | ||
162 | str = strchr(str, ','); | 164 | str = strchr(str, ','); |
163 | if (str != NULL) | 165 | if (str != NULL) |
164 | str += strspn(str, ", \t"); | 166 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index b0461856acfb..a4cff5d6e380 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -982,7 +982,7 @@ static int __init longhaul_init(void) | |||
982 | case 10: | 982 | case 10: |
983 | printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); | 983 | printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); |
984 | default: | 984 | default: |
985 | ;; | 985 | ; |
986 | } | 986 | } |
987 | 987 | ||
988 | return -ENODEV; | 988 | return -ENODEV; |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index b8e05ee4f736..beea4466b063 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -160,6 +160,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | |||
160 | switch (c->x86_model) { | 160 | switch (c->x86_model) { |
161 | case 0x0E: /* Core */ | 161 | case 0x0E: /* Core */ |
162 | case 0x0F: /* Core Duo */ | 162 | case 0x0F: /* Core Duo */ |
163 | case 0x16: /* Celeron Core */ | ||
163 | p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; | 164 | p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; |
164 | return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE); | 165 | return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE); |
165 | case 0x0D: /* Pentium M (Dothan) */ | 166 | case 0x0D: /* Pentium M (Dothan) */ |
@@ -171,7 +172,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | |||
171 | } | 172 | } |
172 | 173 | ||
173 | if (c->x86 != 0xF) { | 174 | if (c->x86 != 0xF) { |
174 | printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@vger.kernel.org>\n"); | 175 | if (!cpu_has(c, X86_FEATURE_EST)) |
176 | printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. " | ||
177 | "Please send an e-mail to <cpufreq@vger.kernel.org>\n"); | ||
175 | return 0; | 178 | return 0; |
176 | } | 179 | } |
177 | 180 | ||
@@ -274,6 +277,7 @@ static struct cpufreq_driver p4clockmod_driver = { | |||
274 | .name = "p4-clockmod", | 277 | .name = "p4-clockmod", |
275 | .owner = THIS_MODULE, | 278 | .owner = THIS_MODULE, |
276 | .attr = p4clockmod_attr, | 279 | .attr = p4clockmod_attr, |
280 | .hide_interface = 1, | ||
277 | }; | 281 | }; |
278 | 282 | ||
279 | 283 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 3b5f06423e77..f0ea6fa2f53c 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -459,9 +459,7 @@ static int centrino_verify (struct cpufreq_policy *policy) | |||
459 | * Sets a new CPUFreq policy. | 459 | * Sets a new CPUFreq policy. |
460 | */ | 460 | */ |
461 | struct allmasks { | 461 | struct allmasks { |
462 | cpumask_t online_policy_cpus; | ||
463 | cpumask_t saved_mask; | 462 | cpumask_t saved_mask; |
464 | cpumask_t set_mask; | ||
465 | cpumask_t covered_cpus; | 463 | cpumask_t covered_cpus; |
466 | }; | 464 | }; |
467 | 465 | ||
@@ -475,9 +473,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
475 | int retval = 0; | 473 | int retval = 0; |
476 | unsigned int j, k, first_cpu, tmp; | 474 | unsigned int j, k, first_cpu, tmp; |
477 | CPUMASK_ALLOC(allmasks); | 475 | CPUMASK_ALLOC(allmasks); |
478 | CPUMASK_PTR(online_policy_cpus, allmasks); | ||
479 | CPUMASK_PTR(saved_mask, allmasks); | 476 | CPUMASK_PTR(saved_mask, allmasks); |
480 | CPUMASK_PTR(set_mask, allmasks); | ||
481 | CPUMASK_PTR(covered_cpus, allmasks); | 477 | CPUMASK_PTR(covered_cpus, allmasks); |
482 | 478 | ||
483 | if (unlikely(allmasks == NULL)) | 479 | if (unlikely(allmasks == NULL)) |
@@ -497,30 +493,28 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
497 | goto out; | 493 | goto out; |
498 | } | 494 | } |
499 | 495 | ||
500 | #ifdef CONFIG_HOTPLUG_CPU | ||
501 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | ||
502 | cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus); | ||
503 | #else | ||
504 | *online_policy_cpus = policy->cpus; | ||
505 | #endif | ||
506 | |||
507 | *saved_mask = current->cpus_allowed; | 496 | *saved_mask = current->cpus_allowed; |
508 | first_cpu = 1; | 497 | first_cpu = 1; |
509 | cpus_clear(*covered_cpus); | 498 | cpus_clear(*covered_cpus); |
510 | for_each_cpu_mask_nr(j, *online_policy_cpus) { | 499 | for_each_cpu_mask_nr(j, policy->cpus) { |
500 | const cpumask_t *mask; | ||
501 | |||
502 | /* cpufreq holds the hotplug lock, so we are safe here */ | ||
503 | if (!cpu_online(j)) | ||
504 | continue; | ||
505 | |||
511 | /* | 506 | /* |
512 | * Support for SMP systems. | 507 | * Support for SMP systems. |
513 | * Make sure we are running on CPU that wants to change freq | 508 | * Make sure we are running on CPU that wants to change freq |
514 | */ | 509 | */ |
515 | cpus_clear(*set_mask); | ||
516 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 510 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
517 | cpus_or(*set_mask, *set_mask, *online_policy_cpus); | 511 | mask = &policy->cpus; |
518 | else | 512 | else |
519 | cpu_set(j, *set_mask); | 513 | mask = &cpumask_of_cpu(j); |
520 | 514 | ||
521 | set_cpus_allowed_ptr(current, set_mask); | 515 | set_cpus_allowed_ptr(current, mask); |
522 | preempt_disable(); | 516 | preempt_disable(); |
523 | if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { | 517 | if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { |
524 | dprintk("couldn't limit to CPUs in this domain\n"); | 518 | dprintk("couldn't limit to CPUs in this domain\n"); |
525 | retval = -EAGAIN; | 519 | retval = -EAGAIN; |
526 | if (first_cpu) { | 520 | if (first_cpu) { |
@@ -548,7 +542,9 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
548 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 542 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", |
549 | target_freq, freqs.old, freqs.new, msr); | 543 | target_freq, freqs.old, freqs.new, msr); |
550 | 544 | ||
551 | for_each_cpu_mask_nr(k, *online_policy_cpus) { | 545 | for_each_cpu_mask_nr(k, policy->cpus) { |
546 | if (!cpu_online(k)) | ||
547 | continue; | ||
552 | freqs.cpu = k; | 548 | freqs.cpu = k; |
553 | cpufreq_notify_transition(&freqs, | 549 | cpufreq_notify_transition(&freqs, |
554 | CPUFREQ_PRECHANGE); | 550 | CPUFREQ_PRECHANGE); |
@@ -571,7 +567,9 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
571 | preempt_enable(); | 567 | preempt_enable(); |
572 | } | 568 | } |
573 | 569 | ||
574 | for_each_cpu_mask_nr(k, *online_policy_cpus) { | 570 | for_each_cpu_mask_nr(k, policy->cpus) { |
571 | if (!cpu_online(k)) | ||
572 | continue; | ||
575 | freqs.cpu = k; | 573 | freqs.cpu = k; |
576 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 574 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
577 | } | 575 | } |
@@ -584,18 +582,17 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
584 | * Best effort undo.. | 582 | * Best effort undo.. |
585 | */ | 583 | */ |
586 | 584 | ||
587 | if (!cpus_empty(*covered_cpus)) | 585 | for_each_cpu_mask_nr(j, *covered_cpus) { |
588 | for_each_cpu_mask_nr(j, *covered_cpus) { | 586 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); |
589 | set_cpus_allowed_ptr(current, | 587 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
590 | &cpumask_of_cpu(j)); | 588 | } |
591 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | ||
592 | } | ||
593 | 589 | ||
594 | tmp = freqs.new; | 590 | tmp = freqs.new; |
595 | freqs.new = freqs.old; | 591 | freqs.new = freqs.old; |
596 | freqs.old = tmp; | 592 | freqs.old = tmp; |
597 | for_each_cpu_mask_nr(j, *online_policy_cpus) { | 593 | for_each_cpu_mask_nr(j, policy->cpus) { |
598 | freqs.cpu = j; | 594 | if (!cpu_online(j)) |
595 | continue; | ||
599 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 596 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
600 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 597 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
601 | } | 598 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 98d4fdb7dc04..cdac7d62369b 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | |||
@@ -139,6 +139,15 @@ static unsigned int pentium_core_get_frequency(void) | |||
139 | case 3: | 139 | case 3: |
140 | fsb = 166667; | 140 | fsb = 166667; |
141 | break; | 141 | break; |
142 | case 2: | ||
143 | fsb = 200000; | ||
144 | break; | ||
145 | case 0: | ||
146 | fsb = 266667; | ||
147 | break; | ||
148 | case 4: | ||
149 | fsb = 333333; | ||
150 | break; | ||
142 | default: | 151 | default: |
143 | printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); | 152 | printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); |
144 | } | 153 | } |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 65a13943e098..e85826829cf2 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -665,6 +665,27 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn) | |||
665 | } | 665 | } |
666 | #endif | 666 | #endif |
667 | 667 | ||
668 | #ifdef CONFIG_HIBERNATION | ||
669 | /** | ||
670 | * Mark ACPI NVS memory region, so that we can save/restore it during | ||
671 | * hibernation and the subsequent resume. | ||
672 | */ | ||
673 | static int __init e820_mark_nvs_memory(void) | ||
674 | { | ||
675 | int i; | ||
676 | |||
677 | for (i = 0; i < e820.nr_map; i++) { | ||
678 | struct e820entry *ei = &e820.map[i]; | ||
679 | |||
680 | if (ei->type == E820_NVS) | ||
681 | hibernate_nvs_register(ei->addr, ei->size); | ||
682 | } | ||
683 | |||
684 | return 0; | ||
685 | } | ||
686 | core_initcall(e820_mark_nvs_memory); | ||
687 | #endif | ||
688 | |||
668 | /* | 689 | /* |
669 | * Early reserved memory areas. | 690 | * Early reserved memory areas. |
670 | */ | 691 | */ |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 744aa7fc49d5..76b8cd953dee 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -201,6 +201,12 @@ struct chipset { | |||
201 | void (*f)(int num, int slot, int func); | 201 | void (*f)(int num, int slot, int func); |
202 | }; | 202 | }; |
203 | 203 | ||
204 | /* | ||
205 | * Only works for devices on the root bus. If you add any devices | ||
206 | * not on bus 0 readd another loop level in early_quirks(). But | ||
207 | * be careful because at least the Nvidia quirk here relies on | ||
208 | * only matching on bus 0. | ||
209 | */ | ||
204 | static struct chipset early_qrk[] __initdata = { | 210 | static struct chipset early_qrk[] __initdata = { |
205 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, | 211 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, |
206 | PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, | 212 | PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, |
@@ -267,17 +273,17 @@ static int __init check_dev_quirk(int num, int slot, int func) | |||
267 | 273 | ||
268 | void __init early_quirks(void) | 274 | void __init early_quirks(void) |
269 | { | 275 | { |
270 | int num, slot, func; | 276 | int slot, func; |
271 | 277 | ||
272 | if (!early_pci_allowed()) | 278 | if (!early_pci_allowed()) |
273 | return; | 279 | return; |
274 | 280 | ||
275 | /* Poor man's PCI discovery */ | 281 | /* Poor man's PCI discovery */ |
276 | for (num = 0; num < 32; num++) | 282 | /* Only scan the root bus */ |
277 | for (slot = 0; slot < 32; slot++) | 283 | for (slot = 0; slot < 32; slot++) |
278 | for (func = 0; func < 8; func++) { | 284 | for (func = 0; func < 8; func++) { |
279 | /* Only probe function 0 on single fn devices */ | 285 | /* Only probe function 0 on single fn devices */ |
280 | if (check_dev_quirk(num, slot, func)) | 286 | if (check_dev_quirk(0, slot, func)) |
281 | break; | 287 | break; |
282 | } | 288 | } |
283 | } | 289 | } |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 6c27679ec6aa..884d985b8b82 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -376,9 +376,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
376 | 376 | ||
377 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 377 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
378 | { | 378 | { |
379 | mutex_lock(&kprobe_mutex); | 379 | if (p->ainsn.insn) { |
380 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); | 380 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); |
381 | mutex_unlock(&kprobe_mutex); | 381 | p->ainsn.insn = NULL; |
382 | } | ||
382 | } | 383 | } |
383 | 384 | ||
384 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | 385 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
@@ -694,7 +695,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
694 | /* | 695 | /* |
695 | * It is possible to have multiple instances associated with a given | 696 | * It is possible to have multiple instances associated with a given |
696 | * task either because multiple functions in the call path have | 697 | * task either because multiple functions in the call path have |
697 | * return probes installed on them, and/or more then one | 698 | * return probes installed on them, and/or more than one |
698 | * return probe was registered for a target function. | 699 | * return probe was registered for a target function. |
699 | * | 700 | * |
700 | * We can handle this because: | 701 | * We can handle this because: |
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index c12314c9e86f..8815f3c7fec7 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -252,7 +252,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer); | |||
252 | /* | 252 | /* |
253 | * The MFPGT timers on the CS5536 provide us with suitable timers to use | 253 | * The MFPGT timers on the CS5536 provide us with suitable timers to use |
254 | * as clock event sources - not as good as a HPET or APIC, but certainly | 254 | * as clock event sources - not as good as a HPET or APIC, but certainly |
255 | * better then the PIT. This isn't a general purpose MFGPT driver, but | 255 | * better than the PIT. This isn't a general purpose MFGPT driver, but |
256 | * a simplified one designed specifically to act as a clock event source. | 256 | * a simplified one designed specifically to act as a clock event source. |
257 | * For full details about the MFGPT, please consult the CS5536 data sheet. | 257 | * For full details about the MFGPT, please consult the CS5536 data sheet. |
258 | */ | 258 | */ |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 19a1044a0cd9..b25428533141 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(bad_dma_address); | |||
38 | be probably a smaller DMA mask, but this is bug-to-bug compatible | 38 | be probably a smaller DMA mask, but this is bug-to-bug compatible |
39 | to older i386. */ | 39 | to older i386. */ |
40 | struct device x86_dma_fallback_dev = { | 40 | struct device x86_dma_fallback_dev = { |
41 | .bus_id = "fallback device", | 41 | .init_name = "fallback device", |
42 | .coherent_dma_mask = DMA_32BIT_MASK, | 42 | .coherent_dma_mask = DMA_32BIT_MASK, |
43 | .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, | 43 | .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, |
44 | }; | 44 | }; |
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 242c3440687f..d59c91747665 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | int swiotlb __read_mostly; | 14 | int swiotlb __read_mostly; |
15 | 15 | ||
16 | void *swiotlb_alloc_boot(size_t size, unsigned long nslabs) | 16 | void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) |
17 | { | 17 | { |
18 | return alloc_bootmem_low_pages(size); | 18 | return alloc_bootmem_low_pages(size); |
19 | } | 19 | } |
@@ -23,7 +23,7 @@ void *swiotlb_alloc(unsigned order, unsigned long nslabs) | |||
23 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | 23 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); |
24 | } | 24 | } |
25 | 25 | ||
26 | dma_addr_t swiotlb_phys_to_bus(phys_addr_t paddr) | 26 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) |
27 | { | 27 | { |
28 | return paddr; | 28 | return paddr; |
29 | } | 29 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ce6650eb64e9..c9a666cdd3db 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/unwind.h> | ||
24 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
25 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
26 | #include <linux/kexec.h> | 25 | #include <linux/kexec.h> |
@@ -51,7 +50,6 @@ | |||
51 | #include <asm/debugreg.h> | 50 | #include <asm/debugreg.h> |
52 | #include <asm/atomic.h> | 51 | #include <asm/atomic.h> |
53 | #include <asm/system.h> | 52 | #include <asm/system.h> |
54 | #include <asm/unwind.h> | ||
55 | #include <asm/traps.h> | 53 | #include <asm/traps.h> |
56 | #include <asm/desc.h> | 54 | #include <asm/desc.h> |
57 | #include <asm/i387.h> | 55 | #include <asm/i387.h> |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 57ec8c86a877..9e268b6b204e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -667,7 +667,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
667 | if (unlikely(in_atomic() || !mm)) | 667 | if (unlikely(in_atomic() || !mm)) |
668 | goto bad_area_nosemaphore; | 668 | goto bad_area_nosemaphore; |
669 | 669 | ||
670 | again: | ||
671 | /* | 670 | /* |
672 | * When running in the kernel we expect faults to occur only to | 671 | * When running in the kernel we expect faults to occur only to |
673 | * addresses in user space. All other faults represent errors in the | 672 | * addresses in user space. All other faults represent errors in the |
@@ -859,25 +858,14 @@ no_context: | |||
859 | oops_end(flags, regs, sig); | 858 | oops_end(flags, regs, sig); |
860 | #endif | 859 | #endif |
861 | 860 | ||
862 | /* | ||
863 | * We ran out of memory, or some other thing happened to us that made | ||
864 | * us unable to handle the page fault gracefully. | ||
865 | */ | ||
866 | out_of_memory: | 861 | out_of_memory: |
862 | /* | ||
863 | * We ran out of memory, call the OOM killer, and return the userspace | ||
864 | * (which will retry the fault, or kill us if we got oom-killed). | ||
865 | */ | ||
867 | up_read(&mm->mmap_sem); | 866 | up_read(&mm->mmap_sem); |
868 | if (is_global_init(tsk)) { | 867 | pagefault_out_of_memory(); |
869 | yield(); | 868 | return; |
870 | /* | ||
871 | * Re-lookup the vma - in theory the vma tree might | ||
872 | * have changed: | ||
873 | */ | ||
874 | goto again; | ||
875 | } | ||
876 | |||
877 | printk("VM: killing process %s\n", tsk->comm); | ||
878 | if (error_code & PF_USER) | ||
879 | do_group_exit(SIGKILL); | ||
880 | goto no_context; | ||
881 | 869 | ||
882 | do_sigbus: | 870 | do_sigbus: |
883 | up_read(&mm->mmap_sem); | 871 | up_read(&mm->mmap_sem); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f99a6c6c432e..88f1b10de3be 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -328,6 +328,8 @@ int devmem_is_allowed(unsigned long pagenr) | |||
328 | { | 328 | { |
329 | if (pagenr <= 256) | 329 | if (pagenr <= 256) |
330 | return 1; | 330 | return 1; |
331 | if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) | ||
332 | return 0; | ||
331 | if (!page_is_ram(pagenr)) | 333 | if (!page_is_ram(pagenr)) |
332 | return 1; | 334 | return 1; |
333 | return 0; | 335 | return 0; |
@@ -1079,7 +1081,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
1079 | unsigned long start_pfn = start >> PAGE_SHIFT; | 1081 | unsigned long start_pfn = start >> PAGE_SHIFT; |
1080 | unsigned long nr_pages = size >> PAGE_SHIFT; | 1082 | unsigned long nr_pages = size >> PAGE_SHIFT; |
1081 | 1083 | ||
1082 | return __add_pages(zone, start_pfn, nr_pages); | 1084 | return __add_pages(nid, zone, start_pfn, nr_pages); |
1083 | } | 1085 | } |
1084 | #endif | 1086 | #endif |
1085 | 1087 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9f7a0d24d42a..23f68e77ad1f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -857,7 +857,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
857 | if (last_mapped_pfn > max_pfn_mapped) | 857 | if (last_mapped_pfn > max_pfn_mapped) |
858 | max_pfn_mapped = last_mapped_pfn; | 858 | max_pfn_mapped = last_mapped_pfn; |
859 | 859 | ||
860 | ret = __add_pages(zone, start_pfn, nr_pages); | 860 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
861 | WARN_ON_ONCE(ret); | 861 | WARN_ON_ONCE(ret); |
862 | 862 | ||
863 | return ret; | 863 | return ret; |
@@ -888,6 +888,8 @@ int devmem_is_allowed(unsigned long pagenr) | |||
888 | { | 888 | { |
889 | if (pagenr <= 256) | 889 | if (pagenr <= 256) |
890 | return 1; | 890 | return 1; |
891 | if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) | ||
892 | return 0; | ||
891 | if (!page_is_ram(pagenr)) | 893 | if (!page_is_ram(pagenr)) |
892 | return 1; | 894 | return 1; |
893 | return 0; | 895 | return 0; |
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 8518c678d83f..d1f7439d173c 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -239,7 +239,7 @@ void resume_map_numa_kva(pgd_t *pgd_base) | |||
239 | start_pfn = node_remap_start_pfn[node]; | 239 | start_pfn = node_remap_start_pfn[node]; |
240 | size = node_remap_size[node]; | 240 | size = node_remap_size[node]; |
241 | 241 | ||
242 | printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node); | 242 | printk(KERN_DEBUG "%s: node %d\n", __func__, node); |
243 | 243 | ||
244 | for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { | 244 | for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { |
245 | unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); | 245 | unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); |
@@ -251,7 +251,7 @@ void resume_map_numa_kva(pgd_t *pgd_base) | |||
251 | PAGE_KERNEL_LARGE_EXEC)); | 251 | PAGE_KERNEL_LARGE_EXEC)); |
252 | 252 | ||
253 | printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", | 253 | printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", |
254 | __FUNCTION__, vaddr, start_pfn + pfn); | 254 | __func__, vaddr, start_pfn + pfn); |
255 | } | 255 | } |
256 | } | 256 | } |
257 | } | 257 | } |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 98658f25f542..8fdf06e4edf9 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * @file op_model_amd.c | 2 | * @file op_model_amd.c |
3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations | 3 | * athlon / K7 / K8 / Family 10h model-specific MSR operations |
4 | * | 4 | * |
5 | * @remark Copyright 2002-2008 OProfile authors | 5 | * @remark Copyright 2002-2009 OProfile authors |
6 | * @remark Read the file COPYING | 6 | * @remark Read the file COPYING |
7 | * | 7 | * |
8 | * @author John Levon | 8 | * @author John Levon |
@@ -10,7 +10,7 @@ | |||
10 | * @author Graydon Hoare | 10 | * @author Graydon Hoare |
11 | * @author Robert Richter <robert.richter@amd.com> | 11 | * @author Robert Richter <robert.richter@amd.com> |
12 | * @author Barry Kasindorf | 12 | * @author Barry Kasindorf |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/oprofile.h> | 15 | #include <linux/oprofile.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
@@ -60,53 +60,10 @@ static unsigned long reset_value[NUM_COUNTERS]; | |||
60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ | 60 | #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ |
61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ | 61 | #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ |
62 | 62 | ||
63 | /* Codes used in cpu_buffer.c */ | 63 | #define IBS_FETCH_SIZE 6 |
64 | /* This produces duplicate code, need to be fixed */ | 64 | #define IBS_OP_SIZE 12 |
65 | #define IBS_FETCH_BEGIN 3 | ||
66 | #define IBS_OP_BEGIN 4 | ||
67 | |||
68 | /* | ||
69 | * The function interface needs to be fixed, something like add | ||
70 | * data. Should then be added to linux/oprofile.h. | ||
71 | */ | ||
72 | extern void | ||
73 | oprofile_add_ibs_sample(struct pt_regs * const regs, | ||
74 | unsigned int * const ibs_sample, int ibs_code); | ||
75 | |||
76 | struct ibs_fetch_sample { | ||
77 | /* MSRC001_1031 IBS Fetch Linear Address Register */ | ||
78 | unsigned int ibs_fetch_lin_addr_low; | ||
79 | unsigned int ibs_fetch_lin_addr_high; | ||
80 | /* MSRC001_1030 IBS Fetch Control Register */ | ||
81 | unsigned int ibs_fetch_ctl_low; | ||
82 | unsigned int ibs_fetch_ctl_high; | ||
83 | /* MSRC001_1032 IBS Fetch Physical Address Register */ | ||
84 | unsigned int ibs_fetch_phys_addr_low; | ||
85 | unsigned int ibs_fetch_phys_addr_high; | ||
86 | }; | ||
87 | |||
88 | struct ibs_op_sample { | ||
89 | /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */ | ||
90 | unsigned int ibs_op_rip_low; | ||
91 | unsigned int ibs_op_rip_high; | ||
92 | /* MSRC001_1035 IBS Op Data Register */ | ||
93 | unsigned int ibs_op_data1_low; | ||
94 | unsigned int ibs_op_data1_high; | ||
95 | /* MSRC001_1036 IBS Op Data 2 Register */ | ||
96 | unsigned int ibs_op_data2_low; | ||
97 | unsigned int ibs_op_data2_high; | ||
98 | /* MSRC001_1037 IBS Op Data 3 Register */ | ||
99 | unsigned int ibs_op_data3_low; | ||
100 | unsigned int ibs_op_data3_high; | ||
101 | /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */ | ||
102 | unsigned int ibs_dc_linear_low; | ||
103 | unsigned int ibs_dc_linear_high; | ||
104 | /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */ | ||
105 | unsigned int ibs_dc_phys_low; | ||
106 | unsigned int ibs_dc_phys_high; | ||
107 | }; | ||
108 | 65 | ||
109 | static int ibs_allowed; /* AMD Family10h and later */ | 66 | static int has_ibs; /* AMD Family10h and later */ |
110 | 67 | ||
111 | struct op_ibs_config { | 68 | struct op_ibs_config { |
112 | unsigned long op_enabled; | 69 | unsigned long op_enabled; |
@@ -197,31 +154,29 @@ static inline int | |||
197 | op_amd_handle_ibs(struct pt_regs * const regs, | 154 | op_amd_handle_ibs(struct pt_regs * const regs, |
198 | struct op_msrs const * const msrs) | 155 | struct op_msrs const * const msrs) |
199 | { | 156 | { |
200 | unsigned int low, high; | 157 | u32 low, high; |
201 | struct ibs_fetch_sample ibs_fetch; | 158 | u64 msr; |
202 | struct ibs_op_sample ibs_op; | 159 | struct op_entry entry; |
203 | 160 | ||
204 | if (!ibs_allowed) | 161 | if (!has_ibs) |
205 | return 1; | 162 | return 1; |
206 | 163 | ||
207 | if (ibs_config.fetch_enabled) { | 164 | if (ibs_config.fetch_enabled) { |
208 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 165 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
209 | if (high & IBS_FETCH_HIGH_VALID_BIT) { | 166 | if (high & IBS_FETCH_HIGH_VALID_BIT) { |
210 | ibs_fetch.ibs_fetch_ctl_high = high; | 167 | rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); |
211 | ibs_fetch.ibs_fetch_ctl_low = low; | 168 | oprofile_write_reserve(&entry, regs, msr, |
212 | rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high); | 169 | IBS_FETCH_CODE, IBS_FETCH_SIZE); |
213 | ibs_fetch.ibs_fetch_lin_addr_high = high; | 170 | oprofile_add_data(&entry, (u32)msr); |
214 | ibs_fetch.ibs_fetch_lin_addr_low = low; | 171 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
215 | rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high); | 172 | oprofile_add_data(&entry, low); |
216 | ibs_fetch.ibs_fetch_phys_addr_high = high; | 173 | oprofile_add_data(&entry, high); |
217 | ibs_fetch.ibs_fetch_phys_addr_low = low; | 174 | rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); |
218 | 175 | oprofile_add_data(&entry, (u32)msr); | |
219 | oprofile_add_ibs_sample(regs, | 176 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
220 | (unsigned int *)&ibs_fetch, | 177 | oprofile_write_commit(&entry); |
221 | IBS_FETCH_BEGIN); | ||
222 | 178 | ||
223 | /* reenable the IRQ */ | 179 | /* reenable the IRQ */ |
224 | rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); | ||
225 | high &= ~IBS_FETCH_HIGH_VALID_BIT; | 180 | high &= ~IBS_FETCH_HIGH_VALID_BIT; |
226 | high |= IBS_FETCH_HIGH_ENABLE; | 181 | high |= IBS_FETCH_HIGH_ENABLE; |
227 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; | 182 | low &= IBS_FETCH_LOW_MAX_CNT_MASK; |
@@ -232,30 +187,29 @@ op_amd_handle_ibs(struct pt_regs * const regs, | |||
232 | if (ibs_config.op_enabled) { | 187 | if (ibs_config.op_enabled) { |
233 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | 188 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); |
234 | if (low & IBS_OP_LOW_VALID_BIT) { | 189 | if (low & IBS_OP_LOW_VALID_BIT) { |
235 | rdmsr(MSR_AMD64_IBSOPRIP, low, high); | 190 | rdmsrl(MSR_AMD64_IBSOPRIP, msr); |
236 | ibs_op.ibs_op_rip_low = low; | 191 | oprofile_write_reserve(&entry, regs, msr, |
237 | ibs_op.ibs_op_rip_high = high; | 192 | IBS_OP_CODE, IBS_OP_SIZE); |
238 | rdmsr(MSR_AMD64_IBSOPDATA, low, high); | 193 | oprofile_add_data(&entry, (u32)msr); |
239 | ibs_op.ibs_op_data1_low = low; | 194 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
240 | ibs_op.ibs_op_data1_high = high; | 195 | rdmsrl(MSR_AMD64_IBSOPDATA, msr); |
241 | rdmsr(MSR_AMD64_IBSOPDATA2, low, high); | 196 | oprofile_add_data(&entry, (u32)msr); |
242 | ibs_op.ibs_op_data2_low = low; | 197 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
243 | ibs_op.ibs_op_data2_high = high; | 198 | rdmsrl(MSR_AMD64_IBSOPDATA2, msr); |
244 | rdmsr(MSR_AMD64_IBSOPDATA3, low, high); | 199 | oprofile_add_data(&entry, (u32)msr); |
245 | ibs_op.ibs_op_data3_low = low; | 200 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
246 | ibs_op.ibs_op_data3_high = high; | 201 | rdmsrl(MSR_AMD64_IBSOPDATA3, msr); |
247 | rdmsr(MSR_AMD64_IBSDCLINAD, low, high); | 202 | oprofile_add_data(&entry, (u32)msr); |
248 | ibs_op.ibs_dc_linear_low = low; | 203 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
249 | ibs_op.ibs_dc_linear_high = high; | 204 | rdmsrl(MSR_AMD64_IBSDCLINAD, msr); |
250 | rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high); | 205 | oprofile_add_data(&entry, (u32)msr); |
251 | ibs_op.ibs_dc_phys_low = low; | 206 | oprofile_add_data(&entry, (u32)(msr >> 32)); |
252 | ibs_op.ibs_dc_phys_high = high; | 207 | rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); |
208 | oprofile_add_data(&entry, (u32)msr); | ||
209 | oprofile_add_data(&entry, (u32)(msr >> 32)); | ||
210 | oprofile_write_commit(&entry); | ||
253 | 211 | ||
254 | /* reenable the IRQ */ | 212 | /* reenable the IRQ */ |
255 | oprofile_add_ibs_sample(regs, | ||
256 | (unsigned int *)&ibs_op, | ||
257 | IBS_OP_BEGIN); | ||
258 | rdmsr(MSR_AMD64_IBSOPCTL, low, high); | ||
259 | high = 0; | 213 | high = 0; |
260 | low &= ~IBS_OP_LOW_VALID_BIT; | 214 | low &= ~IBS_OP_LOW_VALID_BIT; |
261 | low |= IBS_OP_LOW_ENABLE; | 215 | low |= IBS_OP_LOW_ENABLE; |
@@ -305,14 +259,14 @@ static void op_amd_start(struct op_msrs const * const msrs) | |||
305 | } | 259 | } |
306 | 260 | ||
307 | #ifdef CONFIG_OPROFILE_IBS | 261 | #ifdef CONFIG_OPROFILE_IBS |
308 | if (ibs_allowed && ibs_config.fetch_enabled) { | 262 | if (has_ibs && ibs_config.fetch_enabled) { |
309 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | 263 | low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; |
310 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ | 264 | high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ |
311 | + IBS_FETCH_HIGH_ENABLE; | 265 | + IBS_FETCH_HIGH_ENABLE; |
312 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 266 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
313 | } | 267 | } |
314 | 268 | ||
315 | if (ibs_allowed && ibs_config.op_enabled) { | 269 | if (has_ibs && ibs_config.op_enabled) { |
316 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) | 270 | low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) |
317 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ | 271 | + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ |
318 | + IBS_OP_LOW_ENABLE; | 272 | + IBS_OP_LOW_ENABLE; |
@@ -341,14 +295,14 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
341 | } | 295 | } |
342 | 296 | ||
343 | #ifdef CONFIG_OPROFILE_IBS | 297 | #ifdef CONFIG_OPROFILE_IBS |
344 | if (ibs_allowed && ibs_config.fetch_enabled) { | 298 | if (has_ibs && ibs_config.fetch_enabled) { |
345 | /* clear max count and enable */ | 299 | /* clear max count and enable */ |
346 | low = 0; | 300 | low = 0; |
347 | high = 0; | 301 | high = 0; |
348 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); | 302 | wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); |
349 | } | 303 | } |
350 | 304 | ||
351 | if (ibs_allowed && ibs_config.op_enabled) { | 305 | if (has_ibs && ibs_config.op_enabled) { |
352 | /* clear max count and enable */ | 306 | /* clear max count and enable */ |
353 | low = 0; | 307 | low = 0; |
354 | high = 0; | 308 | high = 0; |
@@ -409,6 +363,7 @@ static int init_ibs_nmi(void) | |||
409 | | IBSCTL_LVTOFFSETVAL); | 363 | | IBSCTL_LVTOFFSETVAL); |
410 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | 364 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
411 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { | 365 | if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { |
366 | pci_dev_put(cpu_cfg); | ||
412 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | 367 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " |
413 | "IBSCTL = 0x%08x", value); | 368 | "IBSCTL = 0x%08x", value); |
414 | return 1; | 369 | return 1; |
@@ -436,20 +391,20 @@ static int init_ibs_nmi(void) | |||
436 | /* uninitialize the APIC for the IBS interrupts if needed */ | 391 | /* uninitialize the APIC for the IBS interrupts if needed */ |
437 | static void clear_ibs_nmi(void) | 392 | static void clear_ibs_nmi(void) |
438 | { | 393 | { |
439 | if (ibs_allowed) | 394 | if (has_ibs) |
440 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); | 395 | on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); |
441 | } | 396 | } |
442 | 397 | ||
443 | /* initialize the APIC for the IBS interrupts if available */ | 398 | /* initialize the APIC for the IBS interrupts if available */ |
444 | static void ibs_init(void) | 399 | static void ibs_init(void) |
445 | { | 400 | { |
446 | ibs_allowed = boot_cpu_has(X86_FEATURE_IBS); | 401 | has_ibs = boot_cpu_has(X86_FEATURE_IBS); |
447 | 402 | ||
448 | if (!ibs_allowed) | 403 | if (!has_ibs) |
449 | return; | 404 | return; |
450 | 405 | ||
451 | if (init_ibs_nmi()) { | 406 | if (init_ibs_nmi()) { |
452 | ibs_allowed = 0; | 407 | has_ibs = 0; |
453 | return; | 408 | return; |
454 | } | 409 | } |
455 | 410 | ||
@@ -458,7 +413,7 @@ static void ibs_init(void) | |||
458 | 413 | ||
459 | static void ibs_exit(void) | 414 | static void ibs_exit(void) |
460 | { | 415 | { |
461 | if (!ibs_allowed) | 416 | if (!has_ibs) |
462 | return; | 417 | return; |
463 | 418 | ||
464 | clear_ibs_nmi(); | 419 | clear_ibs_nmi(); |
@@ -478,7 +433,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) | |||
478 | if (ret) | 433 | if (ret) |
479 | return ret; | 434 | return ret; |
480 | 435 | ||
481 | if (!ibs_allowed) | 436 | if (!has_ibs) |
482 | return ret; | 437 | return ret; |
483 | 438 | ||
484 | /* model specific files */ | 439 | /* model specific files */ |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 9e5752fe4d15..c0ecf250fe51 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -210,11 +210,10 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do | |||
210 | if (bus && node != -1) { | 210 | if (bus && node != -1) { |
211 | #ifdef CONFIG_ACPI_NUMA | 211 | #ifdef CONFIG_ACPI_NUMA |
212 | if (pxm >= 0) | 212 | if (pxm >= 0) |
213 | printk(KERN_DEBUG "bus %02x -> pxm %d -> node %d\n", | 213 | dev_printk(KERN_DEBUG, &bus->dev, |
214 | busnum, pxm, node); | 214 | "on NUMA node %d (pxm %d)\n", node, pxm); |
215 | #else | 215 | #else |
216 | printk(KERN_DEBUG "bus %02x -> node %d\n", | 216 | dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node); |
217 | busnum, node); | ||
218 | #endif | 217 | #endif |
219 | } | 218 | } |
220 | 219 | ||
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 62ddb73e09ed..82d22fc601ae 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -551,17 +551,25 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
551 | if ((err = pci_enable_resources(dev, mask)) < 0) | 551 | if ((err = pci_enable_resources(dev, mask)) < 0) |
552 | return err; | 552 | return err; |
553 | 553 | ||
554 | if (!dev->msi_enabled) | 554 | if (!pci_dev_msi_enabled(dev)) |
555 | return pcibios_enable_irq(dev); | 555 | return pcibios_enable_irq(dev); |
556 | return 0; | 556 | return 0; |
557 | } | 557 | } |
558 | 558 | ||
559 | void pcibios_disable_device (struct pci_dev *dev) | 559 | void pcibios_disable_device (struct pci_dev *dev) |
560 | { | 560 | { |
561 | if (!dev->msi_enabled && pcibios_disable_irq) | 561 | if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) |
562 | pcibios_disable_irq(dev); | 562 | pcibios_disable_irq(dev); |
563 | } | 563 | } |
564 | 564 | ||
565 | int pci_ext_cfg_avail(struct pci_dev *dev) | ||
566 | { | ||
567 | if (raw_pci_ext_ops) | ||
568 | return 1; | ||
569 | else | ||
570 | return 0; | ||
571 | } | ||
572 | |||
565 | struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node) | 573 | struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node) |
566 | { | 574 | { |
567 | struct pci_bus *bus = NULL; | 575 | struct pci_bus *bus = NULL; |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index e51bf2cda4b0..f884740da318 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -129,7 +129,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
129 | pr = pci_find_parent_resource(dev, r); | 129 | pr = pci_find_parent_resource(dev, r); |
130 | if (!r->start || !pr || | 130 | if (!r->start || !pr || |
131 | request_resource(pr, r) < 0) { | 131 | request_resource(pr, r) < 0) { |
132 | dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); | 132 | dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); |
133 | /* | 133 | /* |
134 | * Something is wrong with the region. | 134 | * Something is wrong with the region. |
135 | * Invalidate the resource to prevent | 135 | * Invalidate the resource to prevent |
@@ -170,7 +170,7 @@ static void __init pcibios_allocate_resources(int pass) | |||
170 | r->flags, disabled, pass); | 170 | r->flags, disabled, pass); |
171 | pr = pci_find_parent_resource(dev, r); | 171 | pr = pci_find_parent_resource(dev, r); |
172 | if (!pr || request_resource(pr, r) < 0) { | 172 | if (!pr || request_resource(pr, r) < 0) { |
173 | dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); | 173 | dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); |
174 | /* We'll assign a new address later */ | 174 | /* We'll assign a new address later */ |
175 | r->end -= r->start; | 175 | r->end -= r->start; |
176 | r->start = 0; | 176 | r->start = 0; |
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index bec3b048e72b..25a1f8efed4a 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c | |||
@@ -12,7 +12,8 @@ static __init int pci_arch_init(void) | |||
12 | type = pci_direct_probe(); | 12 | type = pci_direct_probe(); |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | pci_mmcfg_early_init(); | 15 | if (!(pci_probe & PCI_PROBE_NOEARLY)) |
16 | pci_mmcfg_early_init(); | ||
16 | 17 | ||
17 | #ifdef CONFIG_PCI_OLPC | 18 | #ifdef CONFIG_PCI_OLPC |
18 | if (!pci_olpc_init()) | 19 | if (!pci_olpc_init()) |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 373b9afe6d44..4064345cf144 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -533,7 +533,7 @@ static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, | |||
533 | { | 533 | { |
534 | struct pci_dev *bridge; | 534 | struct pci_dev *bridge; |
535 | int pin = pci_get_interrupt_pin(dev, &bridge); | 535 | int pin = pci_get_interrupt_pin(dev, &bridge); |
536 | return pcibios_set_irq_routing(bridge, pin, irq); | 536 | return pcibios_set_irq_routing(bridge, pin - 1, irq); |
537 | } | 537 | } |
538 | 538 | ||
539 | #endif | 539 | #endif |
@@ -887,7 +887,6 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
887 | dev_dbg(&dev->dev, "no interrupt pin\n"); | 887 | dev_dbg(&dev->dev, "no interrupt pin\n"); |
888 | return 0; | 888 | return 0; |
889 | } | 889 | } |
890 | pin = pin - 1; | ||
891 | 890 | ||
892 | /* Find IRQ routing entry */ | 891 | /* Find IRQ routing entry */ |
893 | 892 | ||
@@ -897,17 +896,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
897 | info = pirq_get_info(dev); | 896 | info = pirq_get_info(dev); |
898 | if (!info) { | 897 | if (!info) { |
899 | dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", | 898 | dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", |
900 | 'A' + pin); | 899 | 'A' + pin - 1); |
901 | return 0; | 900 | return 0; |
902 | } | 901 | } |
903 | pirq = info->irq[pin].link; | 902 | pirq = info->irq[pin - 1].link; |
904 | mask = info->irq[pin].bitmap; | 903 | mask = info->irq[pin - 1].bitmap; |
905 | if (!pirq) { | 904 | if (!pirq) { |
906 | dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin); | 905 | dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1); |
907 | return 0; | 906 | return 0; |
908 | } | 907 | } |
909 | dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", | 908 | dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", |
910 | 'A' + pin, pirq, mask, pirq_table->exclusive_irqs); | 909 | 'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs); |
911 | mask &= pcibios_irq_mask; | 910 | mask &= pcibios_irq_mask; |
912 | 911 | ||
913 | /* Work around broken HP Pavilion Notebooks which assign USB to | 912 | /* Work around broken HP Pavilion Notebooks which assign USB to |
@@ -949,7 +948,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
949 | newirq = i; | 948 | newirq = i; |
950 | } | 949 | } |
951 | } | 950 | } |
952 | dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq); | 951 | dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq); |
953 | 952 | ||
954 | /* Check if it is hardcoded */ | 953 | /* Check if it is hardcoded */ |
955 | if ((pirq & 0xf0) == 0xf0) { | 954 | if ((pirq & 0xf0) == 0xf0) { |
@@ -977,18 +976,18 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
977 | return 0; | 976 | return 0; |
978 | } | 977 | } |
979 | } | 978 | } |
980 | dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq); | 979 | dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq); |
981 | 980 | ||
982 | /* Update IRQ for all devices with the same pirq value */ | 981 | /* Update IRQ for all devices with the same pirq value */ |
983 | while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { | 982 | while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { |
984 | pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); | 983 | pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); |
985 | if (!pin) | 984 | if (!pin) |
986 | continue; | 985 | continue; |
987 | pin--; | 986 | |
988 | info = pirq_get_info(dev2); | 987 | info = pirq_get_info(dev2); |
989 | if (!info) | 988 | if (!info) |
990 | continue; | 989 | continue; |
991 | if (info->irq[pin].link == pirq) { | 990 | if (info->irq[pin - 1].link == pirq) { |
992 | /* | 991 | /* |
993 | * We refuse to override the dev->irq | 992 | * We refuse to override the dev->irq |
994 | * information. Give a warning! | 993 | * information. Give a warning! |
@@ -1042,6 +1041,9 @@ static void __init pcibios_fixup_irqs(void) | |||
1042 | dev = NULL; | 1041 | dev = NULL; |
1043 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 1042 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
1044 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 1043 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
1044 | if (!pin) | ||
1045 | continue; | ||
1046 | |||
1045 | #ifdef CONFIG_X86_IO_APIC | 1047 | #ifdef CONFIG_X86_IO_APIC |
1046 | /* | 1048 | /* |
1047 | * Recalculate IRQ numbers if we use the I/O APIC. | 1049 | * Recalculate IRQ numbers if we use the I/O APIC. |
@@ -1049,15 +1051,11 @@ static void __init pcibios_fixup_irqs(void) | |||
1049 | if (io_apic_assign_pci_irqs) { | 1051 | if (io_apic_assign_pci_irqs) { |
1050 | int irq; | 1052 | int irq; |
1051 | 1053 | ||
1052 | if (!pin) | ||
1053 | continue; | ||
1054 | |||
1055 | /* | 1054 | /* |
1056 | * interrupt pins are numbered starting from 1 | 1055 | * interrupt pins are numbered starting from 1 |
1057 | */ | 1056 | */ |
1058 | pin--; | ||
1059 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, | 1057 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, |
1060 | PCI_SLOT(dev->devfn), pin); | 1058 | PCI_SLOT(dev->devfn), pin - 1); |
1061 | /* | 1059 | /* |
1062 | * Busses behind bridges are typically not listed in the | 1060 | * Busses behind bridges are typically not listed in the |
1063 | * MP-table. In this case we have to look up the IRQ | 1061 | * MP-table. In this case we have to look up the IRQ |
@@ -1070,22 +1068,22 @@ static void __init pcibios_fixup_irqs(void) | |||
1070 | struct pci_dev *bridge = dev->bus->self; | 1068 | struct pci_dev *bridge = dev->bus->self; |
1071 | int bus; | 1069 | int bus; |
1072 | 1070 | ||
1073 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | 1071 | pin = pci_swizzle_interrupt_pin(dev, pin); |
1074 | bus = bridge->bus->number; | 1072 | bus = bridge->bus->number; |
1075 | irq = IO_APIC_get_PCI_irq_vector(bus, | 1073 | irq = IO_APIC_get_PCI_irq_vector(bus, |
1076 | PCI_SLOT(bridge->devfn), pin); | 1074 | PCI_SLOT(bridge->devfn), pin - 1); |
1077 | if (irq >= 0) | 1075 | if (irq >= 0) |
1078 | dev_warn(&dev->dev, | 1076 | dev_warn(&dev->dev, |
1079 | "using bridge %s INT %c to " | 1077 | "using bridge %s INT %c to " |
1080 | "get IRQ %d\n", | 1078 | "get IRQ %d\n", |
1081 | pci_name(bridge), | 1079 | pci_name(bridge), |
1082 | 'A' + pin, irq); | 1080 | 'A' + pin - 1, irq); |
1083 | } | 1081 | } |
1084 | if (irq >= 0) { | 1082 | if (irq >= 0) { |
1085 | dev_info(&dev->dev, | 1083 | dev_info(&dev->dev, |
1086 | "PCI->APIC IRQ transform: INT %c " | 1084 | "PCI->APIC IRQ transform: INT %c " |
1087 | "-> IRQ %d\n", | 1085 | "-> IRQ %d\n", |
1088 | 'A' + pin, irq); | 1086 | 'A' + pin - 1, irq); |
1089 | dev->irq = irq; | 1087 | dev->irq = irq; |
1090 | } | 1088 | } |
1091 | } | 1089 | } |
@@ -1093,7 +1091,7 @@ static void __init pcibios_fixup_irqs(void) | |||
1093 | /* | 1091 | /* |
1094 | * Still no IRQ? Try to lookup one... | 1092 | * Still no IRQ? Try to lookup one... |
1095 | */ | 1093 | */ |
1096 | if (pin && !dev->irq) | 1094 | if (!dev->irq) |
1097 | pcibios_lookup_irq(dev, 0); | 1095 | pcibios_lookup_irq(dev, 0); |
1098 | } | 1096 | } |
1099 | } | 1097 | } |
@@ -1220,12 +1218,10 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1220 | if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { | 1218 | if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { |
1221 | char *msg = ""; | 1219 | char *msg = ""; |
1222 | 1220 | ||
1223 | pin--; /* interrupt pins are numbered starting from 1 */ | ||
1224 | |||
1225 | if (io_apic_assign_pci_irqs) { | 1221 | if (io_apic_assign_pci_irqs) { |
1226 | int irq; | 1222 | int irq; |
1227 | 1223 | ||
1228 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin); | 1224 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1); |
1229 | /* | 1225 | /* |
1230 | * Busses behind bridges are typically not listed in the MP-table. | 1226 | * Busses behind bridges are typically not listed in the MP-table. |
1231 | * In this case we have to look up the IRQ based on the parent bus, | 1227 | * In this case we have to look up the IRQ based on the parent bus, |
@@ -1236,20 +1232,20 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1236 | while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ | 1232 | while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ |
1237 | struct pci_dev *bridge = dev->bus->self; | 1233 | struct pci_dev *bridge = dev->bus->self; |
1238 | 1234 | ||
1239 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | 1235 | pin = pci_swizzle_interrupt_pin(dev, pin); |
1240 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1236 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, |
1241 | PCI_SLOT(bridge->devfn), pin); | 1237 | PCI_SLOT(bridge->devfn), pin - 1); |
1242 | if (irq >= 0) | 1238 | if (irq >= 0) |
1243 | dev_warn(&dev->dev, "using bridge %s " | 1239 | dev_warn(&dev->dev, "using bridge %s " |
1244 | "INT %c to get IRQ %d\n", | 1240 | "INT %c to get IRQ %d\n", |
1245 | pci_name(bridge), 'A' + pin, | 1241 | pci_name(bridge), 'A' + pin - 1, |
1246 | irq); | 1242 | irq); |
1247 | dev = bridge; | 1243 | dev = bridge; |
1248 | } | 1244 | } |
1249 | dev = temp_dev; | 1245 | dev = temp_dev; |
1250 | if (irq >= 0) { | 1246 | if (irq >= 0) { |
1251 | dev_info(&dev->dev, "PCI->APIC IRQ transform: " | 1247 | dev_info(&dev->dev, "PCI->APIC IRQ transform: " |
1252 | "INT %c -> IRQ %d\n", 'A' + pin, irq); | 1248 | "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); |
1253 | dev->irq = irq; | 1249 | dev->irq = irq; |
1254 | return 0; | 1250 | return 0; |
1255 | } else | 1251 | } else |
@@ -1268,7 +1264,7 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1268 | return 0; | 1264 | return 0; |
1269 | 1265 | ||
1270 | dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", | 1266 | dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", |
1271 | 'A' + pin, msg); | 1267 | 'A' + pin - 1, msg); |
1272 | } | 1268 | } |
1273 | return 0; | 1269 | return 0; |
1274 | } | 1270 | } |
diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c index 16d0c0eb0d19..bcead7a46871 100644 --- a/arch/x86/pci/visws.c +++ b/arch/x86/pci/visws.c | |||
@@ -24,24 +24,6 @@ static void pci_visws_disable_irq(struct pci_dev *dev) { } | |||
24 | 24 | ||
25 | unsigned int pci_bus0, pci_bus1; | 25 | unsigned int pci_bus0, pci_bus1; |
26 | 26 | ||
27 | static inline u8 bridge_swizzle(u8 pin, u8 slot) | ||
28 | { | ||
29 | return (((pin - 1) + slot) % 4) + 1; | ||
30 | } | ||
31 | |||
32 | static u8 __init visws_swizzle(struct pci_dev *dev, u8 *pinp) | ||
33 | { | ||
34 | u8 pin = *pinp; | ||
35 | |||
36 | while (dev->bus->self) { /* Move up the chain of bridges. */ | ||
37 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | ||
38 | dev = dev->bus->self; | ||
39 | } | ||
40 | *pinp = pin; | ||
41 | |||
42 | return PCI_SLOT(dev->devfn); | ||
43 | } | ||
44 | |||
45 | static int __init visws_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | 27 | static int __init visws_map_irq(struct pci_dev *dev, u8 slot, u8 pin) |
46 | { | 28 | { |
47 | int irq, bus = dev->bus->number; | 29 | int irq, bus = dev->bus->number; |
@@ -106,7 +88,7 @@ int __init pci_visws_init(void) | |||
106 | raw_pci_ops = &pci_direct_conf1; | 88 | raw_pci_ops = &pci_direct_conf1; |
107 | pci_scan_bus_with_sysdata(pci_bus0); | 89 | pci_scan_bus_with_sysdata(pci_bus0); |
108 | pci_scan_bus_with_sysdata(pci_bus1); | 90 | pci_scan_bus_with_sysdata(pci_bus1); |
109 | pci_fixup_irqs(visws_swizzle, visws_map_irq); | 91 | pci_fixup_irqs(pci_common_swizzle, visws_map_irq); |
110 | pcibios_resource_survey(); | 92 | pcibios_resource_survey(); |
111 | return 0; | 93 | return 0; |
112 | } | 94 | } |