aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:07 -0400
commit874f6d1be7699b5d1873283b4737712cbabd7754 (patch)
tree55f8a0d08d32f3a4beed6c76f9e48db896b8d3bb
parente486b4c4ba601410772136ba0f8337bf545dccad (diff)
parentd50ba3687b99213501463a1947e3dd5b98bc2d99 (diff)
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar: "Misc smaller cleanups" * 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/lib: Fix spelling, put space between a numeral and its units x86/lib: Fix spelling in the comments x86, quirks: Shut-up a long-standing gcc warning x86, msr: Unify variable names x86-64, docs, mm: Add vsyscall range to virtual address space layout x86: Drop KERNEL_IMAGE_START x86_64: Use __BOOT_DS instead_of __KERNEL_DS for safety
-rw-r--r--Documentation/x86/x86_64/mm.txt4
-rw-r--r--arch/x86/boot/compressed/head_64.S2
-rw-r--r--arch/x86/include/asm/msr.h14
-rw-r--r--arch/x86/include/asm/page_64_types.h1
-rw-r--r--arch/x86/kernel/head64.c6
-rw-r--r--arch/x86/kernel/quirks.c18
-rw-r--r--arch/x86/lib/checksum_32.S2
-rw-r--r--arch/x86/lib/memcpy_32.c6
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S6
10 files changed, 33 insertions, 28 deletions
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index d6498e3cd713..881582f75c9c 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -13,7 +13,9 @@ ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
13ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) 13ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
14... unused hole ... 14... unused hole ...
15ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 15ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
16ffffffffa0000000 - fffffffffff00000 (=1536 MB) module mapping space 16ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
17ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
18ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
17 19
18The direct mapping covers all memory in the system up to the highest 20The direct mapping covers all memory in the system up to the highest
19memory address (this means in some cases it can also include PCI memory 21memory address (this means in some cases it can also include PCI memory
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index c1d383d1fb7e..16f24e6dad79 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -52,7 +52,7 @@ ENTRY(startup_32)
52 jnz 1f 52 jnz 1f
53 53
54 cli 54 cli
55 movl $(__KERNEL_DS), %eax 55 movl $(__BOOT_DS), %eax
56 movl %eax, %ds 56 movl %eax, %ds
57 movl %eax, %es 57 movl %eax, %es
58 movl %eax, %ss 58 movl %eax, %ss
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9264802e2824..cb7502852acb 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -137,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter)
137 * pointer indirection), this allows gcc to optimize better 137 * pointer indirection), this allows gcc to optimize better
138 */ 138 */
139 139
140#define rdmsr(msr, val1, val2) \ 140#define rdmsr(msr, low, high) \
141do { \ 141do { \
142 u64 __val = native_read_msr((msr)); \ 142 u64 __val = native_read_msr((msr)); \
143 (void)((val1) = (u32)__val); \ 143 (void)((low) = (u32)__val); \
144 (void)((val2) = (u32)(__val >> 32)); \ 144 (void)((high) = (u32)(__val >> 32)); \
145} while (0) 145} while (0)
146 146
147static inline void wrmsr(unsigned msr, unsigned low, unsigned high) 147static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
@@ -162,12 +162,12 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
162} 162}
163 163
164/* rdmsr with exception handling */ 164/* rdmsr with exception handling */
165#define rdmsr_safe(msr, p1, p2) \ 165#define rdmsr_safe(msr, low, high) \
166({ \ 166({ \
167 int __err; \ 167 int __err; \
168 u64 __val = native_read_msr_safe((msr), &__err); \ 168 u64 __val = native_read_msr_safe((msr), &__err); \
169 (*p1) = (u32)__val; \ 169 (*low) = (u32)__val; \
170 (*p2) = (u32)(__val >> 32); \ 170 (*high) = (u32)(__val >> 32); \
171 __err; \ 171 __err; \
172}) 172})
173 173
@@ -208,7 +208,7 @@ do { \
208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ 208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
209 (u32)((val) >> 32)) 209 (u32)((val) >> 32))
210 210
211#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) 211#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
212 212
213#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) 213#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
214 214
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8b491e66eaa8..6c896fbe21db 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -48,6 +48,5 @@
48 * arch/x86/kernel/head_64.S), and it is mapped here: 48 * arch/x86/kernel/head_64.S), and it is mapped here:
49 */ 49 */
50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
51#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
52 51
53#endif /* _ASM_X86_PAGE_64_DEFS_H */ 52#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index c5e403f6d869..101ac1a9263e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -144,10 +144,10 @@ void __init x86_64_start_kernel(char * real_mode_data)
144 * Build-time sanity checks on the kernel image and module 144 * Build-time sanity checks on the kernel image and module
145 * area mappings. (these are purely build-time and produce no code) 145 * area mappings. (these are purely build-time and produce no code)
146 */ 146 */
147 BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); 147 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
148 BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); 148 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
149 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); 149 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
150 BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); 150 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
151 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); 151 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
152 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 152 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
153 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 153 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 26ee48a33dc4..04ee1e2e4c02 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -354,18 +354,22 @@ static void ati_force_hpet_resume(void)
354 354
355static u32 ati_ixp4x0_rev(struct pci_dev *dev) 355static u32 ati_ixp4x0_rev(struct pci_dev *dev)
356{ 356{
357 u32 d; 357 int err = 0;
358 u8 b; 358 u32 d = 0;
359 u8 b = 0;
359 360
360 pci_read_config_byte(dev, 0xac, &b); 361 err = pci_read_config_byte(dev, 0xac, &b);
361 b &= ~(1<<5); 362 b &= ~(1<<5);
362 pci_write_config_byte(dev, 0xac, b); 363 err |= pci_write_config_byte(dev, 0xac, b);
363 pci_read_config_dword(dev, 0x70, &d); 364 err |= pci_read_config_dword(dev, 0x70, &d);
364 d |= 1<<8; 365 d |= 1<<8;
365 pci_write_config_dword(dev, 0x70, d); 366 err |= pci_write_config_dword(dev, 0x70, d);
366 pci_read_config_dword(dev, 0x8, &d); 367 err |= pci_read_config_dword(dev, 0x8, &d);
367 d &= 0xff; 368 d &= 0xff;
368 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); 369 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
370
371 WARN_ON_ONCE(err);
372
369 return d; 373 return d;
370} 374}
371 375
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 2af5df3ade7c..e78b8eee6615 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -61,7 +61,7 @@ ENTRY(csum_partial)
61 testl $3, %esi # Check alignment. 61 testl $3, %esi # Check alignment.
62 jz 2f # Jump if alignment is ok. 62 jz 2f # Jump if alignment is ok.
63 testl $1, %esi # Check alignment. 63 testl $1, %esi # Check alignment.
64 jz 10f # Jump if alignment is boundary of 2bytes. 64 jz 10f # Jump if alignment is boundary of 2 bytes.
65 65
66 # buf is odd 66 # buf is odd
67 dec %ecx 67 dec %ecx
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index b908a59eccf5..e78761d6b7f8 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n)
26 char *ret = dest; 26 char *ret = dest;
27 27
28 __asm__ __volatile__( 28 __asm__ __volatile__(
29 /* Handle more 16bytes in loop */ 29 /* Handle more 16 bytes in loop */
30 "cmp $0x10, %0\n\t" 30 "cmp $0x10, %0\n\t"
31 "jb 1f\n\t" 31 "jb 1f\n\t"
32 32
@@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n)
51 "sub $0x10, %0\n\t" 51 "sub $0x10, %0\n\t"
52 52
53 /* 53 /*
54 * We gobble 16byts forward in each loop. 54 * We gobble 16 bytes forward in each loop.
55 */ 55 */
56 "3:\n\t" 56 "3:\n\t"
57 "sub $0x10, %0\n\t" 57 "sub $0x10, %0\n\t"
@@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n)
117 "sub $0x10, %0\n\t" 117 "sub $0x10, %0\n\t"
118 118
119 /* 119 /*
120 * We gobble 16byts backward in each loop. 120 * We gobble 16 bytes backward in each loop.
121 */ 121 */
122 "7:\n\t" 122 "7:\n\t"
123 "sub $0x10, %0\n\t" 123 "sub $0x10, %0\n\t"
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 1c273be7c97e..56313a326188 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -98,7 +98,7 @@ ENTRY(memcpy)
98 subq $0x20, %rdx 98 subq $0x20, %rdx
99 /* 99 /*
100 * At most 3 ALU operations in one cycle, 100 * At most 3 ALU operations in one cycle,
101 * so append NOPS in the same 16bytes trunk. 101 * so append NOPS in the same 16 bytes trunk.
102 */ 102 */
103 .p2align 4 103 .p2align 4
104.Lcopy_backward_loop: 104.Lcopy_backward_loop:
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index ee164610ec46..65268a6104f4 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -27,7 +27,7 @@
27ENTRY(memmove) 27ENTRY(memmove)
28 CFI_STARTPROC 28 CFI_STARTPROC
29 29
30 /* Handle more 32bytes in loop */ 30 /* Handle more 32 bytes in loop */
31 mov %rdi, %rax 31 mov %rdi, %rax
32 cmp $0x20, %rdx 32 cmp $0x20, %rdx
33 jb 1f 33 jb 1f
@@ -56,7 +56,7 @@ ENTRY(memmove)
563: 563:
57 sub $0x20, %rdx 57 sub $0x20, %rdx
58 /* 58 /*
59 * We gobble 32byts forward in each loop. 59 * We gobble 32 bytes forward in each loop.
60 */ 60 */
615: 615:
62 sub $0x20, %rdx 62 sub $0x20, %rdx
@@ -122,7 +122,7 @@ ENTRY(memmove)
122 addq %rdx, %rdi 122 addq %rdx, %rdi
123 subq $0x20, %rdx 123 subq $0x20, %rdx
124 /* 124 /*
125 * We gobble 32byts backward in each loop. 125 * We gobble 32 bytes backward in each loop.
126 */ 126 */
1278: 1278:
128 subq $0x20, %rdx 128 subq $0x20, %rdx