aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:07 -0400
commit874f6d1be7699b5d1873283b4737712cbabd7754 (patch)
tree55f8a0d08d32f3a4beed6c76f9e48db896b8d3bb /arch/x86/lib
parente486b4c4ba601410772136ba0f8337bf545dccad (diff)
parentd50ba3687b99213501463a1947e3dd5b98bc2d99 (diff)
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar: "Misc smaller cleanups" * 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/lib: Fix spelling, put space between a numeral and its units x86/lib: Fix spelling in the comments x86, quirks: Shut-up a long-standing gcc warning x86, msr: Unify variable names x86-64, docs, mm: Add vsyscall range to virtual address space layout x86: Drop KERNEL_IMAGE_START x86_64: Use __BOOT_DS instead_of __KERNEL_DS for safety
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/checksum_32.S2
-rw-r--r--arch/x86/lib/memcpy_32.c6
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S6
4 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 2af5df3ade7c..e78b8eee6615 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -61,7 +61,7 @@ ENTRY(csum_partial)
61 testl $3, %esi # Check alignment. 61 testl $3, %esi # Check alignment.
62 jz 2f # Jump if alignment is ok. 62 jz 2f # Jump if alignment is ok.
63 testl $1, %esi # Check alignment. 63 testl $1, %esi # Check alignment.
64 jz 10f # Jump if alignment is boundary of 2bytes. 64 jz 10f # Jump if alignment is boundary of 2 bytes.
65 65
66 # buf is odd 66 # buf is odd
67 dec %ecx 67 dec %ecx
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index b908a59eccf5..e78761d6b7f8 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n)
26 char *ret = dest; 26 char *ret = dest;
27 27
28 __asm__ __volatile__( 28 __asm__ __volatile__(
29 /* Handle more 16bytes in loop */ 29 /* Handle more 16 bytes in loop */
30 "cmp $0x10, %0\n\t" 30 "cmp $0x10, %0\n\t"
31 "jb 1f\n\t" 31 "jb 1f\n\t"
32 32
@@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n)
51 "sub $0x10, %0\n\t" 51 "sub $0x10, %0\n\t"
52 52
53 /* 53 /*
54 * We gobble 16byts forward in each loop. 54 * We gobble 16 bytes forward in each loop.
55 */ 55 */
56 "3:\n\t" 56 "3:\n\t"
57 "sub $0x10, %0\n\t" 57 "sub $0x10, %0\n\t"
@@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n)
117 "sub $0x10, %0\n\t" 117 "sub $0x10, %0\n\t"
118 118
119 /* 119 /*
120 * We gobble 16byts backward in each loop. 120 * We gobble 16 bytes backward in each loop.
121 */ 121 */
122 "7:\n\t" 122 "7:\n\t"
123 "sub $0x10, %0\n\t" 123 "sub $0x10, %0\n\t"
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 1c273be7c97e..56313a326188 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -98,7 +98,7 @@ ENTRY(memcpy)
98 subq $0x20, %rdx 98 subq $0x20, %rdx
99 /* 99 /*
100 * At most 3 ALU operations in one cycle, 100 * At most 3 ALU operations in one cycle,
101 * so append NOPS in the same 16bytes trunk. 101 * so append NOPS in the same 16 bytes trunk.
102 */ 102 */
103 .p2align 4 103 .p2align 4
104.Lcopy_backward_loop: 104.Lcopy_backward_loop:
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index ee164610ec46..65268a6104f4 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -27,7 +27,7 @@
27ENTRY(memmove) 27ENTRY(memmove)
28 CFI_STARTPROC 28 CFI_STARTPROC
29 29
30 /* Handle more 32bytes in loop */ 30 /* Handle more 32 bytes in loop */
31 mov %rdi, %rax 31 mov %rdi, %rax
32 cmp $0x20, %rdx 32 cmp $0x20, %rdx
33 jb 1f 33 jb 1f
@@ -56,7 +56,7 @@ ENTRY(memmove)
563: 563:
57 sub $0x20, %rdx 57 sub $0x20, %rdx
58 /* 58 /*
59 * We gobble 32byts forward in each loop. 59 * We gobble 32 bytes forward in each loop.
60 */ 60 */
615: 615:
62 sub $0x20, %rdx 62 sub $0x20, %rdx
@@ -122,7 +122,7 @@ ENTRY(memmove)
122 addq %rdx, %rdi 122 addq %rdx, %rdi
123 subq $0x20, %rdx 123 subq $0x20, %rdx
124 /* 124 /*
125 * We gobble 32byts backward in each loop. 125 * We gobble 32 bytes backward in each loop.
126 */ 126 */
1278: 1278:
128 subq $0x20, %rdx 128 subq $0x20, %rdx