aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/cris/kernel/vmlinux.lds.S2
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/x86/include/asm/percpu.h48
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S59
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S2
21 files changed, 129 insertions, 20 deletions
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 003ef4c02585..433be2a24f31 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
1#include <asm-generic/vmlinux.lds.h> 1#include <asm-generic/vmlinux.lds.h>
2#include <asm/thread_info.h> 2#include <asm/thread_info.h>
3#include <asm/cache.h>
3#include <asm/page.h> 4#include <asm/page.h>
4 5
5OUTPUT_FORMAT("elf64-alpha") 6OUTPUT_FORMAT("elf64-alpha")
@@ -38,7 +39,7 @@ SECTIONS
38 __init_begin = ALIGN(PAGE_SIZE); 39 __init_begin = ALIGN(PAGE_SIZE);
39 INIT_TEXT_SECTION(PAGE_SIZE) 40 INIT_TEXT_SECTION(PAGE_SIZE)
40 INIT_DATA_SECTION(16) 41 INIT_DATA_SECTION(16)
41 PERCPU(PAGE_SIZE) 42 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
42 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page 43 /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
43 needed for the THREAD_SIZE aligned init_task gets freed after init */ 44 needed for the THREAD_SIZE aligned init_task gets freed after init */
44 . = ALIGN(THREAD_SIZE); 45 . = ALIGN(THREAD_SIZE);
@@ -46,7 +47,7 @@ SECTIONS
46 /* Freed after init ends here */ 47 /* Freed after init ends here */
47 48
48 _data = .; 49 _data = .;
49 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) 50 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
50 51
51 .got : { 52 .got : {
52 *(.got) 53 *(.got)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 61462790757f..28fea9b2d129 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -78,7 +78,7 @@ SECTIONS
78#endif 78#endif
79 } 79 }
80 80
81 PERCPU(PAGE_SIZE) 81 PERCPU(32, PAGE_SIZE)
82 82
83#ifndef CONFIG_XIP_KERNEL 83#ifndef CONFIG_XIP_KERNEL
84 . = ALIGN(PAGE_SIZE); 84 . = ALIGN(PAGE_SIZE);
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 4122678529c0..c40d07f708e8 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -136,7 +136,7 @@ SECTIONS
136 136
137 . = ALIGN(16); 137 . = ALIGN(16);
138 INIT_DATA_SECTION(16) 138 INIT_DATA_SECTION(16)
139 PERCPU(4) 139 PERCPU(32, 4)
140 140
141 .exit.data : 141 .exit.data :
142 { 142 {
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index c49be845f96a..728bbd9e7d4c 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -102,7 +102,7 @@ SECTIONS
102#endif 102#endif
103 __vmlinux_end = .; /* Last address of the physical file. */ 103 __vmlinux_end = .; /* Last address of the physical file. */
104#ifdef CONFIG_ETRAX_ARCH_V32 104#ifdef CONFIG_ETRAX_ARCH_V32
105 PERCPU(PAGE_SIZE) 105 PERCPU(32, PAGE_SIZE)
106 106
107 .init.ramfs : { 107 .init.ramfs : {
108 INIT_RAM_FS 108 INIT_RAM_FS
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 8b973f3cc90e..0daae8af5787 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -37,7 +37,7 @@ SECTIONS
37 _einittext = .; 37 _einittext = .;
38 38
39 INIT_DATA_SECTION(8) 39 INIT_DATA_SECTION(8)
40 PERCPU(4096) 40 PERCPU(L1_CACHE_BYTES, 4096)
41 41
42 . = ALIGN(PAGE_SIZE); 42 . = ALIGN(PAGE_SIZE);
43 __init_end = .; 43 __init_end = .;
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a4d044dcb1c..787de4a77d82 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -198,7 +198,7 @@ SECTIONS {
198 198
199 /* Per-cpu data: */ 199 /* Per-cpu data: */
200 . = ALIGN(PERCPU_PAGE_SIZE); 200 . = ALIGN(PERCPU_PAGE_SIZE);
201 PERCPU_VADDR(PERCPU_ADDR, :percpu) 201 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202 __phys_per_cpu_start = __per_cpu_load; 202 __phys_per_cpu_start = __per_cpu_load;
203 /* 203 /*
204 * ensure percpu data fits 204 * ensure percpu data fits
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 7da94eaa082b..c194d64cdbb9 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -53,7 +53,7 @@ SECTIONS
53 __init_begin = .; 53 __init_begin = .;
54 INIT_TEXT_SECTION(PAGE_SIZE) 54 INIT_TEXT_SECTION(PAGE_SIZE)
55 INIT_DATA_SECTION(16) 55 INIT_DATA_SECTION(16)
56 PERCPU(PAGE_SIZE) 56 PERCPU(32, PAGE_SIZE)
57 . = ALIGN(PAGE_SIZE); 57 . = ALIGN(PAGE_SIZE);
58 __init_end = .; 58 __init_end = .;
59 /* freed after init ends here */ 59 /* freed after init ends here */
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 570607b376b5..832afbb87588 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -115,7 +115,7 @@ SECTIONS
115 EXIT_DATA 115 EXIT_DATA
116 } 116 }
117 117
118 PERCPU(PAGE_SIZE) 118 PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
119 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(PAGE_SIZE);
120 __init_end = .; 120 __init_end = .;
121 /* freed after init ends here */ 121 /* freed after init ends here */
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index febbeee7f2f5..968bcd2cb022 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -70,7 +70,7 @@ SECTIONS
70 .exit.text : { EXIT_TEXT; } 70 .exit.text : { EXIT_TEXT; }
71 .exit.data : { EXIT_DATA; } 71 .exit.data : { EXIT_DATA; }
72 72
73 PERCPU(PAGE_SIZE) 73 PERCPU(32, PAGE_SIZE)
74 . = ALIGN(PAGE_SIZE); 74 . = ALIGN(PAGE_SIZE);
75 __init_end = .; 75 __init_end = .;
76 /* freed after init ends here */ 76 /* freed after init ends here */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index d64a6bbec2aa..8f1e4efd143e 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
145 EXIT_DATA 145 EXIT_DATA
146 } 146 }
147 147
148 PERCPU(PAGE_SIZE) 148 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
149 . = ALIGN(PAGE_SIZE); 149 . = ALIGN(PAGE_SIZE);
150 __init_end = .; 150 __init_end = .;
151 /* freed after init ends here */ 151 /* freed after init ends here */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8a0deefac08d..b9150f07d266 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -160,7 +160,7 @@ SECTIONS
160 INIT_RAM_FS 160 INIT_RAM_FS
161 } 161 }
162 162
163 PERCPU(PAGE_SIZE) 163 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
164 164
165 . = ALIGN(8); 165 . = ALIGN(8);
166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a68ac10213b2..1bc18cdb525b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -77,7 +77,7 @@ SECTIONS
77 . = ALIGN(PAGE_SIZE); 77 . = ALIGN(PAGE_SIZE);
78 INIT_DATA_SECTION(0x100) 78 INIT_DATA_SECTION(0x100)
79 79
80 PERCPU(PAGE_SIZE) 80 PERCPU(0x100, PAGE_SIZE)
81 . = ALIGN(PAGE_SIZE); 81 . = ALIGN(PAGE_SIZE);
82 __init_end = .; /* freed after init ends here */ 82 __init_end = .; /* freed after init ends here */
83 83
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 7f8a709c3ada..af4d46187a79 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -66,7 +66,7 @@ SECTIONS
66 __machvec_end = .; 66 __machvec_end = .;
67 } 67 }
68 68
69 PERCPU(PAGE_SIZE) 69 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
70 70
71 /* 71 /*
72 * .exit.text is discarded at runtime, not link time, to deal with 72 * .exit.text is discarded at runtime, not link time, to deal with
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0c1e6783657f..92b557afe535 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -108,7 +108,7 @@ SECTIONS
108 __sun4v_2insn_patch_end = .; 108 __sun4v_2insn_patch_end = .;
109 } 109 }
110 110
111 PERCPU(PAGE_SIZE) 111 PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)
112 112
113 . = ALIGN(PAGE_SIZE); 113 . = ALIGN(PAGE_SIZE);
114 __init_end = .; 114 __init_end = .;
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 25fdc0c1839a..c6ce378e0678 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -63,7 +63,7 @@ SECTIONS
63 *(.init.page) 63 *(.init.page)
64 } :data =0 64 } :data =0
65 INIT_DATA_SECTION(16) 65 INIT_DATA_SECTION(16)
66 PERCPU(PAGE_SIZE) 66 PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
67 . = ALIGN(PAGE_SIZE); 67 . = ALIGN(PAGE_SIZE);
68 VMLINUX_SYMBOL(_einitdata) = .; 68 VMLINUX_SYMBOL(_einitdata) = .;
69 69
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index ac55b9efa1ce..34bede8aad4a 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -42,7 +42,7 @@
42 INIT_SETUP(0) 42 INIT_SETUP(0)
43 } 43 }
44 44
45 PERCPU(32) 45 PERCPU(32, 32)
46 46
47 .initcall.init : { 47 .initcall.init : {
48 INIT_CALLS 48 INIT_CALLS
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 7e172955ee57..a09e1f052d84 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -451,6 +451,26 @@ do { \
451#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 451#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
452#endif /* !CONFIG_M386 */ 452#endif /* !CONFIG_M386 */
453 453
454#ifdef CONFIG_X86_CMPXCHG64
455#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
456({ \
457 char __ret; \
458 typeof(o1) __o1 = o1; \
459 typeof(o1) __n1 = n1; \
460 typeof(o2) __o2 = o2; \
461 typeof(o2) __n2 = n2; \
462 typeof(o2) __dummy = n2; \
463 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
464 : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
465 : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
466 __ret; \
467})
468
469#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
470#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
471#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
472#endif /* CONFIG_X86_CMPXCHG64 */
473
454/* 474/*
455 * Per cpu atomic 64 bit operations are only available under 64 bit. 475 * Per cpu atomic 64 bit operations are only available under 64 bit.
456 * 32 bit must fall back to generic operations. 476 * 32 bit must fall back to generic operations.
@@ -480,6 +500,34 @@ do { \
480#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 500#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
481#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 501#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
482#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 502#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
503
504/*
505 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
506 * is not supported on early AMD64 processors so we must be able to emulate
507 * it in software. The address used in the cmpxchg16 instruction must be
508 * aligned to a 16 byte boundary.
509 */
510#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
511({ \
512 char __ret; \
513 typeof(o1) __o1 = o1; \
514 typeof(o1) __n1 = n1; \
515 typeof(o2) __o2 = o2; \
516 typeof(o2) __n2 = n2; \
517 typeof(o2) __dummy; \
518 alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \
519 "cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \
520 X86_FEATURE_CX16, \
521 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
522 "S" (&pcp1), "b"(__n1), "c"(__n2), \
523 "a"(__o1), "d"(__o2)); \
524 __ret; \
525})
526
527#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
528#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
529#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
530
483#endif 531#endif
484 532
485/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 533/* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bf4700755184..cef446f8ac78 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -230,7 +230,7 @@ SECTIONS
230 * output PHDR, so the next output section - .init.text - should 230 * output PHDR, so the next output section - .init.text - should
231 * start another segment - init. 231 * start another segment - init.
232 */ 232 */
233 PERCPU_VADDR(0, :percpu) 233 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
234#endif 234#endif
235 235
236 INIT_TEXT_SECTION(PAGE_SIZE) 236 INIT_TEXT_SECTION(PAGE_SIZE)
@@ -305,7 +305,7 @@ SECTIONS
305 } 305 }
306 306
307#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 307#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
308 PERCPU(THREAD_SIZE) 308 PERCPU(INTERNODE_CACHE_BYTES, THREAD_SIZE)
309#endif 309#endif
310 310
311 . = ALIGN(PAGE_SIZE); 311 . = ALIGN(PAGE_SIZE);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index e10cf070ede0..f2479f19ddde 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -42,4 +42,5 @@ else
42 lib-y += memmove_64.o memset_64.o 42 lib-y += memmove_64.o memset_64.o
43 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o 43 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
44 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o 44 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
45 lib-y += cmpxchg16b_emu.o
45endif 46endif
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
new file mode 100644
index 000000000000..3e8b08a6de2b
--- /dev/null
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -0,0 +1,59 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; version 2
5 * of the License.
6 *
7 */
8#include <linux/linkage.h>
9#include <asm/alternative-asm.h>
10#include <asm/frame.h>
11#include <asm/dwarf2.h>
12
13.text
14
15/*
16 * Inputs:
17 * %rsi : memory location to compare
18 * %rax : low 64 bits of old value
19 * %rdx : high 64 bits of old value
20 * %rbx : low 64 bits of new value
21 * %rcx : high 64 bits of new value
22 * %al : Operation successful
23 */
24ENTRY(this_cpu_cmpxchg16b_emu)
25CFI_STARTPROC
26
27#
28# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
29# via the ZF. Caller will access %al to get result.
30#
31# Note that this is only useful for a cpuops operation. Meaning that we
32# do *not* have a fully atomic operation but just an operation that is
33# *atomic* on a single cpu (as provided by the this_cpu_xx class of
34# macros).
35#
36this_cpu_cmpxchg16b_emu:
37 pushf
38 cli
39
40 cmpq %gs:(%rsi), %rax
41 jne not_same
42 cmpq %gs:8(%rsi), %rdx
43 jne not_same
44
45 movq %rbx, %gs:(%rsi)
46 movq %rcx, %gs:8(%rsi)
47
48 popf
49 mov $1, %al
50 ret
51
52 not_same:
53 popf
54 xor %al,%al
55 ret
56
57CFI_ENDPROC
58
59ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 9b526154c9ba..a2820065927e 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -155,7 +155,7 @@ SECTIONS
155 INIT_RAM_FS 155 INIT_RAM_FS
156 } 156 }
157 157
158 PERCPU(PAGE_SIZE) 158 PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE)
159 159
160 /* We need this dummy segment here */ 160 /* We need this dummy segment here */
161 161