aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/ipi.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c19
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c7
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/setup_percpu.c14
-rw-r--r--arch/x86/kernel/tlb_uv.c1
-rw-r--r--arch/x86/kernel/vmlinux.lds.S126
10 files changed, 87 insertions, 105 deletions
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index dbf5445727a..6ef00ba4c88 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
106 unsigned long mask = cpumask_bits(cpumask)[0]; 106 unsigned long mask = cpumask_bits(cpumask)[0];
107 unsigned long flags; 107 unsigned long flags;
108 108
109 if (WARN_ONCE(!mask, "empty IPI mask"))
110 return;
111
109 local_irq_save(flags); 112 local_irq_save(flags);
110 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 113 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
111 __default_send_IPI_dest_field(mask, vector, apic->dest_logical); 114 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 832e908adcb..601159374e8 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -46,7 +46,7 @@ static int early_get_nodeid(void)
46 return node_id.s.node_id; 46 return node_id.s.node_id;
47} 47}
48 48
49static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 49static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
50{ 50{
51 if (!strcmp(oem_id, "SGI")) { 51 if (!strcmp(oem_id, "SGI")) {
52 if (!strcmp(oem_table_id, "UVL")) 52 if (!strcmp(oem_table_id, "UVL"))
@@ -253,7 +253,7 @@ static void uv_send_IPI_self(int vector)
253 apic_write(APIC_SELF_IPI, vector); 253 apic_write(APIC_SELF_IPI, vector);
254} 254}
255 255
256struct apic apic_x2apic_uv_x = { 256struct apic __refdata apic_x2apic_uv_x = {
257 257
258 .name = "UV large system", 258 .name = "UV large system",
259 .probe = NULL, 259 .probe = NULL,
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 3efcb2b96a1..c1f253dac15 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -7,6 +7,10 @@ ifdef CONFIG_FUNCTION_TRACER
7CFLAGS_REMOVE_common.o = -pg 7CFLAGS_REMOVE_common.o = -pg
8endif 8endif
9 9
10# Make sure load_percpu_segment has no stackprotector
11nostackp := $(call cc-option, -fno-stack-protector)
12CFLAGS_common.o := $(nostackp)
13
10obj-y := intel_cacheinfo.o addon_cpuid_features.o 14obj-y := intel_cacheinfo.o addon_cpuid_features.o
11obj-y += proc.o capflags.o powerflags.o common.o 15obj-y += proc.o capflags.o powerflags.o common.o
12obj-y += vmware.o hypervisor.o 16obj-y += vmware.o hypervisor.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 1cfb623ce11..01213048f62 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1226,8 +1226,13 @@ static void mce_init(void)
1226} 1226}
1227 1227
1228/* Add per CPU specific workarounds here */ 1228/* Add per CPU specific workarounds here */
1229static void mce_cpu_quirks(struct cpuinfo_x86 *c) 1229static int mce_cpu_quirks(struct cpuinfo_x86 *c)
1230{ 1230{
1231 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1232 pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
1233 return -EOPNOTSUPP;
1234 }
1235
1231 /* This should be disabled by the BIOS, but isn't always */ 1236 /* This should be disabled by the BIOS, but isn't always */
1232 if (c->x86_vendor == X86_VENDOR_AMD) { 1237 if (c->x86_vendor == X86_VENDOR_AMD) {
1233 if (c->x86 == 15 && banks > 4) { 1238 if (c->x86 == 15 && banks > 4) {
@@ -1273,11 +1278,20 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c)
1273 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && 1278 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1274 monarch_timeout < 0) 1279 monarch_timeout < 0)
1275 monarch_timeout = USEC_PER_SEC; 1280 monarch_timeout = USEC_PER_SEC;
1281
1282 /*
1283 * There are also broken BIOSes on some Pentium M and
1284 * earlier systems:
1285 */
1286 if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
1287 mce_bootlog = 0;
1276 } 1288 }
1277 if (monarch_timeout < 0) 1289 if (monarch_timeout < 0)
1278 monarch_timeout = 0; 1290 monarch_timeout = 0;
1279 if (mce_bootlog != 0) 1291 if (mce_bootlog != 0)
1280 mce_panic_timeout = 30; 1292 mce_panic_timeout = 30;
1293
1294 return 0;
1281} 1295}
1282 1296
1283static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) 1297static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
@@ -1338,11 +1352,10 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1338 if (!mce_available(c)) 1352 if (!mce_available(c))
1339 return; 1353 return;
1340 1354
1341 if (mce_cap_init() < 0) { 1355 if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) {
1342 mce_disabled = 1; 1356 mce_disabled = 1;
1343 return; 1357 return;
1344 } 1358 }
1345 mce_cpu_quirks(c);
1346 1359
1347 machine_check_vector = do_machine_check; 1360 machine_check_vector = do_machine_check;
1348 1361
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 8bc64cfbe93..5957a93e517 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -116,11 +116,14 @@ static int therm_throt_process(int curr)
116 cpu, __get_cpu_var(thermal_throttle_count)); 116 cpu, __get_cpu_var(thermal_throttle_count));
117 117
118 add_taint(TAINT_MACHINE_CHECK); 118 add_taint(TAINT_MACHINE_CHECK);
119 } else if (was_throttled) { 119 return 1;
120 }
121 if (was_throttled) {
120 printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); 122 printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
123 return 1;
121 } 124 }
122 125
123 return 1; 126 return 0;
124} 127}
125 128
126#ifdef CONFIG_SYSFS 129#ifdef CONFIG_SYSFS
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 0d98a01cbdb..cc827ac9e8d 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -261,9 +261,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
261 * which will be freed later 261 * which will be freed later
262 */ 262 */
263 263
264#ifndef CONFIG_HOTPLUG_CPU 264__CPUINIT
265.section .init.text,"ax",@progbits
266#endif
267 265
268#ifdef CONFIG_SMP 266#ifdef CONFIG_SMP
269ENTRY(startup_32_smp) 267ENTRY(startup_32_smp)
@@ -602,11 +600,7 @@ ignore_int:
602#endif 600#endif
603 iret 601 iret
604 602
605#ifndef CONFIG_HOTPLUG_CPU
606 __CPUINITDATA
607#else
608 __REFDATA 603 __REFDATA
609#endif
610.align 4 604.align 4
611ENTRY(initial_code) 605ENTRY(initial_code)
612 .long i386_start_kernel 606 .long i386_start_kernel
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 994dd6a4a2a..071166a4ba8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -519,16 +519,12 @@ static void c1e_idle(void)
519 if (!cpumask_test_cpu(cpu, c1e_mask)) { 519 if (!cpumask_test_cpu(cpu, c1e_mask)) {
520 cpumask_set_cpu(cpu, c1e_mask); 520 cpumask_set_cpu(cpu, c1e_mask);
521 /* 521 /*
522 * Force broadcast so ACPI can not interfere. Needs 522 * Force broadcast so ACPI can not interfere.
523 * to run with interrupts enabled as it uses
524 * smp_function_call.
525 */ 523 */
526 local_irq_enable();
527 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, 524 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
528 &cpu); 525 &cpu);
529 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", 526 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
530 cpu); 527 cpu);
531 local_irq_disable();
532 } 528 }
533 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 529 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
534 530
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 29a3eef7cf4..07d81916f21 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -165,7 +165,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
165 165
166 if (!chosen) { 166 if (!chosen) {
167 size_t vm_size = VMALLOC_END - VMALLOC_START; 167 size_t vm_size = VMALLOC_END - VMALLOC_START;
168 size_t tot_size = num_possible_cpus() * PMD_SIZE; 168 size_t tot_size = nr_cpu_ids * PMD_SIZE;
169 169
170 /* on non-NUMA, embedding is better */ 170 /* on non-NUMA, embedding is better */
171 if (!pcpu_need_numa()) 171 if (!pcpu_need_numa())
@@ -199,7 +199,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
199 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; 199 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
200 200
201 /* allocate pointer array and alloc large pages */ 201 /* allocate pointer array and alloc large pages */
202 map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0])); 202 map_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpul_map[0]));
203 pcpul_map = alloc_bootmem(map_size); 203 pcpul_map = alloc_bootmem(map_size);
204 204
205 for_each_possible_cpu(cpu) { 205 for_each_possible_cpu(cpu) {
@@ -228,7 +228,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
228 228
229 /* allocate address and map */ 229 /* allocate address and map */
230 pcpul_vm.flags = VM_ALLOC; 230 pcpul_vm.flags = VM_ALLOC;
231 pcpul_vm.size = num_possible_cpus() * PMD_SIZE; 231 pcpul_vm.size = nr_cpu_ids * PMD_SIZE;
232 vm_area_register_early(&pcpul_vm, PMD_SIZE); 232 vm_area_register_early(&pcpul_vm, PMD_SIZE);
233 233
234 for_each_possible_cpu(cpu) { 234 for_each_possible_cpu(cpu) {
@@ -250,8 +250,8 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
250 PMD_SIZE, pcpul_vm.addr, NULL); 250 PMD_SIZE, pcpul_vm.addr, NULL);
251 251
252 /* sort pcpul_map array for pcpu_lpage_remapped() */ 252 /* sort pcpul_map array for pcpu_lpage_remapped() */
253 for (i = 0; i < num_possible_cpus() - 1; i++) 253 for (i = 0; i < nr_cpu_ids - 1; i++)
254 for (j = i + 1; j < num_possible_cpus(); j++) 254 for (j = i + 1; j < nr_cpu_ids; j++)
255 if (pcpul_map[i].ptr > pcpul_map[j].ptr) { 255 if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
256 struct pcpul_ent tmp = pcpul_map[i]; 256 struct pcpul_ent tmp = pcpul_map[i];
257 pcpul_map[i] = pcpul_map[j]; 257 pcpul_map[i] = pcpul_map[j];
@@ -288,7 +288,7 @@ void *pcpu_lpage_remapped(void *kaddr)
288{ 288{
289 void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); 289 void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK);
290 unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; 290 unsigned long offset = (unsigned long)kaddr & ~PMD_MASK;
291 int left = 0, right = num_possible_cpus() - 1; 291 int left = 0, right = nr_cpu_ids - 1;
292 int pos; 292 int pos;
293 293
294 /* pcpul in use at all? */ 294 /* pcpul in use at all? */
@@ -377,7 +377,7 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
377 pcpu4k_nr_static_pages = PFN_UP(static_size); 377 pcpu4k_nr_static_pages = PFN_UP(static_size);
378 378
379 /* unaligned allocations can't be freed, round up to page size */ 379 /* unaligned allocations can't be freed, round up to page size */
380 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() 380 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * nr_cpu_ids
381 * sizeof(pcpu4k_pages[0])); 381 * sizeof(pcpu4k_pages[0]));
382 pcpu4k_pages = alloc_bootmem(pages_size); 382 pcpu4k_pages = alloc_bootmem(pages_size);
383 383
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 8ccabb8a2f6..77b9689f8ed 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -744,6 +744,7 @@ uv_activation_descriptor_init(int node, int pnode)
744 * note that base_dest_nodeid is actually a nasid. 744 * note that base_dest_nodeid is actually a nasid.
745 */ 745 */
746 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1; 746 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
747 ad2->header.dest_subnodeid = 0x10; /* the LB */
747 ad2->header.command = UV_NET_ENDPOINT_INTD; 748 ad2->header.command = UV_NET_ENDPOINT_INTD;
748 ad2->header.int_both = 1; 749 ad2->header.int_both = 1;
749 /* 750 /*
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 78d185d797d..9fc178255c0 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -46,11 +46,10 @@ PHDRS {
46 data PT_LOAD FLAGS(7); /* RWE */ 46 data PT_LOAD FLAGS(7); /* RWE */
47#ifdef CONFIG_X86_64 47#ifdef CONFIG_X86_64
48 user PT_LOAD FLAGS(7); /* RWE */ 48 user PT_LOAD FLAGS(7); /* RWE */
49 data.init PT_LOAD FLAGS(7); /* RWE */
50#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
51 percpu PT_LOAD FLAGS(7); /* RWE */ 50 percpu PT_LOAD FLAGS(7); /* RWE */
52#endif 51#endif
53 data.init2 PT_LOAD FLAGS(7); /* RWE */ 52 init PT_LOAD FLAGS(7); /* RWE */
54#endif 53#endif
55 note PT_NOTE FLAGS(0); /* ___ */ 54 note PT_NOTE FLAGS(0); /* ___ */
56} 55}
@@ -103,65 +102,43 @@ SECTIONS
103 __stop___ex_table = .; 102 __stop___ex_table = .;
104 } :text = 0x9090 103 } :text = 0x9090
105 104
106 RODATA 105 RO_DATA(PAGE_SIZE)
107 106
108 /* Data */ 107 /* Data */
109 . = ALIGN(PAGE_SIZE);
110 .data : AT(ADDR(.data) - LOAD_OFFSET) { 108 .data : AT(ADDR(.data) - LOAD_OFFSET) {
111 /* Start of data section */ 109 /* Start of data section */
112 _sdata = .; 110 _sdata = .;
113 DATA_DATA 111
114 CONSTRUCTORS 112 /* init_task */
115 } :data 113 INIT_TASK_DATA(THREAD_SIZE)
116 114
117#ifdef CONFIG_X86_32 115#ifdef CONFIG_X86_32
118 /* 32 bit has nosave before _edata */ 116 /* 32 bit has nosave before _edata */
119 . = ALIGN(PAGE_SIZE); 117 NOSAVE_DATA
120 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
121 __nosave_begin = .;
122 *(.data.nosave)
123 . = ALIGN(PAGE_SIZE);
124 __nosave_end = .;
125 }
126#endif 118#endif
127 119
128 . = ALIGN(PAGE_SIZE); 120 PAGE_ALIGNED_DATA(PAGE_SIZE)
129 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
130 *(.data.page_aligned)
131 *(.data.idt) 121 *(.data.idt)
132 }
133 122
134#ifdef CONFIG_X86_32 123 CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
135 . = ALIGN(32);
136#else
137 . = ALIGN(PAGE_SIZE);
138 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
139#endif
140 .data.cacheline_aligned :
141 AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
142 *(.data.cacheline_aligned)
143 }
144 124
145 /* rarely changed data like cpu maps */ 125 DATA_DATA
146#ifdef CONFIG_X86_32 126 CONSTRUCTORS
147 . = ALIGN(32); 127
148#else 128 /* rarely changed data like cpu maps */
149 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES); 129 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
150#endif
151 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
152 *(.data.read_mostly)
153 130
154 /* End of data section */ 131 /* End of data section */
155 _edata = .; 132 _edata = .;
156 } 133 } :data
157 134
158#ifdef CONFIG_X86_64 135#ifdef CONFIG_X86_64
159 136
160#define VSYSCALL_ADDR (-10*1024*1024) 137#define VSYSCALL_ADDR (-10*1024*1024)
161#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \ 138#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
162 SIZEOF(.data.read_mostly) + 4095) & ~(4095)) 139 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
163#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \ 140#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
164 SIZEOF(.data.read_mostly) + 4095) & ~(4095)) 141 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
165 142
166#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) 143#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
167#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) 144#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
@@ -227,35 +204,29 @@ SECTIONS
227 204
228#endif /* CONFIG_X86_64 */ 205#endif /* CONFIG_X86_64 */
229 206
230 /* init_task */ 207 /* Init code and data - will be freed after init */
231 . = ALIGN(THREAD_SIZE); 208 . = ALIGN(PAGE_SIZE);
232 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { 209 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
233 *(.data.init_task) 210 __init_begin = .; /* paired with __init_end */
234 } 211 }
235#ifdef CONFIG_X86_64
236 :data.init
237#endif
238 212
213#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
239 /* 214 /*
240 * smp_locks might be freed after init 215 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
241 * start/end must be page aligned 216 * output PHDR, so the next output section - .init.text - should
217 * start another segment - init.
242 */ 218 */
243 . = ALIGN(PAGE_SIZE); 219 PERCPU_VADDR(0, :percpu)
244 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 220#endif
245 __smp_locks = .;
246 *(.smp_locks)
247 __smp_locks_end = .;
248 . = ALIGN(PAGE_SIZE);
249 }
250 221
251 /* Init code and data - will be freed after init */
252 . = ALIGN(PAGE_SIZE);
253 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 222 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
254 __init_begin = .; /* paired with __init_end */
255 _sinittext = .; 223 _sinittext = .;
256 INIT_TEXT 224 INIT_TEXT
257 _einittext = .; 225 _einittext = .;
258 } 226 }
227#ifdef CONFIG_X86_64
228 :init
229#endif
259 230
260 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 231 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
261 INIT_DATA 232 INIT_DATA
@@ -326,17 +297,7 @@ SECTIONS
326 } 297 }
327#endif 298#endif
328 299
329#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) 300#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
330 /*
331 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
332 * output PHDR, so the next output section - __data_nosave - should
333 * start another section data.init2. Also, pda should be at the head of
334 * percpu area. Preallocate it and define the percpu offset symbol
335 * so that it can be accessed as a percpu variable.
336 */
337 . = ALIGN(PAGE_SIZE);
338 PERCPU_VADDR(0, :percpu)
339#else
340 PERCPU(PAGE_SIZE) 301 PERCPU(PAGE_SIZE)
341#endif 302#endif
342 303
@@ -347,15 +308,22 @@ SECTIONS
347 __init_end = .; 308 __init_end = .;
348 } 309 }
349 310
311 /*
312 * smp_locks might be freed after init
313 * start/end must be page aligned
314 */
315 . = ALIGN(PAGE_SIZE);
316 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
317 __smp_locks = .;
318 *(.smp_locks)
319 __smp_locks_end = .;
320 . = ALIGN(PAGE_SIZE);
321 }
322
350#ifdef CONFIG_X86_64 323#ifdef CONFIG_X86_64
351 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 324 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
352 . = ALIGN(PAGE_SIZE); 325 NOSAVE_DATA
353 __nosave_begin = .; 326 }
354 *(.data.nosave)
355 . = ALIGN(PAGE_SIZE);
356 __nosave_end = .;
357 } :data.init2
358 /* use another section data.init2, see PERCPU_VADDR() above */
359#endif 327#endif
360 328
361 /* BSS */ 329 /* BSS */