aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/cputable.c86
-rw-r--r--arch/powerpc/kernel/head_64.S21
-rw-r--r--arch/powerpc/kernel/misc_32.S74
-rw-r--r--arch/powerpc/kernel/misc_64.S124
-rw-r--r--arch/powerpc/kernel/module_32.c39
-rw-r--r--arch/powerpc/kernel/module_64.c49
-rw-r--r--arch/powerpc/kernel/prom.c8
-rw-r--r--arch/powerpc/kernel/setup_32.c8
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/kernel/time.c63
-rw-r--r--arch/powerpc/kernel/vdso.c43
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S12
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S10
14 files changed, 263 insertions, 291 deletions
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 95382f994404..bfd499ee3753 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/oprofile_impl.h> 19#include <asm/oprofile_impl.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
21 22
22struct cpu_spec* cur_cpu_spec = NULL; 23struct cpu_spec* cur_cpu_spec = NULL;
23EXPORT_SYMBOL(cur_cpu_spec); 24EXPORT_SYMBOL(cur_cpu_spec);
@@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void);
73#define PPC_FEATURE_SPE_COMP 0 74#define PPC_FEATURE_SPE_COMP 0
74#endif 75#endif
75 76
76struct cpu_spec cpu_specs[] = { 77static struct cpu_spec cpu_specs[] = {
77#ifdef CONFIG_PPC64 78#ifdef CONFIG_PPC64
78 { /* Power3 */ 79 { /* Power3 */
79 .pvr_mask = 0xffff0000, 80 .pvr_mask = 0xffff0000,
@@ -227,6 +228,21 @@ struct cpu_spec cpu_specs[] = {
227 .oprofile_type = PPC_OPROFILE_POWER4, 228 .oprofile_type = PPC_OPROFILE_POWER4,
228 .platform = "ppc970", 229 .platform = "ppc970",
229 }, 230 },
231 { /* PPC970GX */
232 .pvr_mask = 0xffff0000,
233 .pvr_value = 0x00450000,
234 .cpu_name = "PPC970GX",
235 .cpu_features = CPU_FTRS_PPC970,
236 .cpu_user_features = COMMON_USER_POWER4 |
237 PPC_FEATURE_HAS_ALTIVEC_COMP,
238 .icache_bsize = 128,
239 .dcache_bsize = 128,
240 .num_pmcs = 8,
241 .cpu_setup = __setup_cpu_ppc970,
242 .oprofile_cpu_type = "ppc64/970",
243 .oprofile_type = PPC_OPROFILE_POWER4,
244 .platform = "ppc970",
245 },
230 { /* Power5 GR */ 246 { /* Power5 GR */
231 .pvr_mask = 0xffff0000, 247 .pvr_mask = 0xffff0000,
232 .pvr_value = 0x003a0000, 248 .pvr_value = 0x003a0000,
@@ -1152,3 +1168,71 @@ struct cpu_spec cpu_specs[] = {
1152#endif /* !CLASSIC_PPC */ 1168#endif /* !CLASSIC_PPC */
1153#endif /* CONFIG_PPC32 */ 1169#endif /* CONFIG_PPC32 */
1154}; 1170};
1171
1172struct cpu_spec *identify_cpu(unsigned long offset)
1173{
1174 struct cpu_spec *s = cpu_specs;
1175 struct cpu_spec **cur = &cur_cpu_spec;
1176 unsigned int pvr = mfspr(SPRN_PVR);
1177 int i;
1178
1179 s = PTRRELOC(s);
1180 cur = PTRRELOC(cur);
1181
1182 if (*cur != NULL)
1183 return PTRRELOC(*cur);
1184
1185 for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
1186 if ((pvr & s->pvr_mask) == s->pvr_value) {
1187 *cur = cpu_specs + i;
1188#ifdef CONFIG_PPC64
1189 /* ppc64 expects identify_cpu to also call setup_cpu
1190 * for that processor. I will consolidate that at a
1191 * later time, for now, just use our friend #ifdef.
1192 * we also don't need to PTRRELOC the function pointer
1193 * on ppc64 as we are running at 0 in real mode.
1194 */
1195 if (s->cpu_setup) {
1196 s->cpu_setup(offset, s);
1197 }
1198#endif /* CONFIG_PPC64 */
1199 return s;
1200 }
1201 BUG();
1202 return NULL;
1203}
1204
1205void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
1206{
1207 struct fixup_entry {
1208 unsigned long mask;
1209 unsigned long value;
1210 long start_off;
1211 long end_off;
1212 } *fcur, *fend;
1213
1214 fcur = fixup_start;
1215 fend = fixup_end;
1216
1217 for (; fcur < fend; fcur++) {
1218 unsigned int *pstart, *pend, *p;
1219
1220 if ((value & fcur->mask) == fcur->value)
1221 continue;
1222
1223 /* These PTRRELOCs will disappear once the new scheme for
1224 * modules and vdso is implemented
1225 */
1226 pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
1227 pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
1228
1229 for (p = pstart; p < pend; p++) {
1230 *p = 0x60000000u;
1231 asm volatile ("dcbst 0, %0" : : "r" (p));
1232 }
1233 asm volatile ("sync" : : : "memory");
1234 for (p = pstart; p < pend; p++)
1235 asm volatile ("icbi 0,%0" : : "r" (p));
1236 asm volatile ("sync; isync" : : : "memory");
1237 }
1238}
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 645c7f10fb28..291e3629b504 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -1580,11 +1580,6 @@ _STATIC(__start_initialization_iSeries)
1580 li r0,0 1580 li r0,0
1581 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1581 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1582 1582
1583 LOAD_REG_IMMEDIATE(r3,cpu_specs)
1584 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1585 li r5,0
1586 bl .identify_cpu
1587
1588 LOAD_REG_IMMEDIATE(r2,__toc_start) 1583 LOAD_REG_IMMEDIATE(r2,__toc_start)
1589 addi r2,r2,0x4000 1584 addi r2,r2,0x4000
1590 addi r2,r2,0x4000 1585 addi r2,r2,0x4000
@@ -1646,6 +1641,8 @@ _GLOBAL(__start_initialization_multiplatform)
1646 cmpwi r0,0x3c /* 970FX */ 1641 cmpwi r0,0x3c /* 970FX */
1647 beq 1f 1642 beq 1f
1648 cmpwi r0,0x44 /* 970MP */ 1643 cmpwi r0,0x44 /* 970MP */
1644 beq 1f
1645 cmpwi r0,0x45 /* 970GX */
1649 bne 2f 1646 bne 2f
16501: bl .__cpu_preinit_ppc970 16471: bl .__cpu_preinit_ppc970
16512: 16482:
@@ -1964,13 +1961,6 @@ _STATIC(start_here_multiplatform)
1964 addi r2,r2,0x4000 1961 addi r2,r2,0x4000
1965 add r2,r2,r26 1962 add r2,r2,r26
1966 1963
1967 LOAD_REG_IMMEDIATE(r3, cpu_specs)
1968 add r3,r3,r26
1969 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1970 add r4,r4,r26
1971 mr r5,r26
1972 bl .identify_cpu
1973
1974 /* Do very early kernel initializations, including initial hash table, 1964 /* Do very early kernel initializations, including initial hash table,
1975 * stab and slb setup before we turn on relocation. */ 1965 * stab and slb setup before we turn on relocation. */
1976 1966
@@ -2000,13 +1990,6 @@ _STATIC(start_here_common)
2000 li r0,0 1990 li r0,0
2001 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1991 stdu r0,-STACK_FRAME_OVERHEAD(r1)
2002 1992
2003 /* Apply the CPUs-specific fixups (nop out sections not relevant
2004 * to this CPU
2005 */
2006 li r3,0
2007 bl .do_cpu_ftr_fixups
2008 bl .do_fw_ftr_fixups
2009
2010 /* ptr to current */ 1993 /* ptr to current */
2011 LOAD_REG_IMMEDIATE(r4, init_task) 1994 LOAD_REG_IMMEDIATE(r4, init_task)
2012 std r4,PACACURRENT(r13) 1995 std r4,PACACURRENT(r13)
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 88fd73fdf048..412bea3cf813 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -102,80 +102,6 @@ _GLOBAL(reloc_got2)
102 blr 102 blr
103 103
104/* 104/*
105 * identify_cpu,
106 * called with r3 = data offset and r4 = CPU number
107 * doesn't change r3
108 */
109_GLOBAL(identify_cpu)
110 addis r8,r3,cpu_specs@ha
111 addi r8,r8,cpu_specs@l
112 mfpvr r7
1131:
114 lwz r5,CPU_SPEC_PVR_MASK(r8)
115 and r5,r5,r7
116 lwz r6,CPU_SPEC_PVR_VALUE(r8)
117 cmplw 0,r6,r5
118 beq 1f
119 addi r8,r8,CPU_SPEC_ENTRY_SIZE
120 b 1b
1211:
122 addis r6,r3,cur_cpu_spec@ha
123 addi r6,r6,cur_cpu_spec@l
124 sub r8,r8,r3
125 stw r8,0(r6)
126 blr
127
128/*
129 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
130 * and writes nop's over sections of code that don't apply for this cpu.
131 * r3 = data offset (not changed)
132 */
133_GLOBAL(do_cpu_ftr_fixups)
134 /* Get CPU 0 features */
135 addis r6,r3,cur_cpu_spec@ha
136 addi r6,r6,cur_cpu_spec@l
137 lwz r4,0(r6)
138 add r4,r4,r3
139 lwz r4,CPU_SPEC_FEATURES(r4)
140
141 /* Get the fixup table */
142 addis r6,r3,__start___ftr_fixup@ha
143 addi r6,r6,__start___ftr_fixup@l
144 addis r7,r3,__stop___ftr_fixup@ha
145 addi r7,r7,__stop___ftr_fixup@l
146
147 /* Do the fixup */
1481: cmplw 0,r6,r7
149 bgelr
150 addi r6,r6,16
151 lwz r8,-16(r6) /* mask */
152 and r8,r8,r4
153 lwz r9,-12(r6) /* value */
154 cmplw 0,r8,r9
155 beq 1b
156 lwz r8,-8(r6) /* section begin */
157 lwz r9,-4(r6) /* section end */
158 subf. r9,r8,r9
159 beq 1b
160 /* write nops over the section of code */
161 /* todo: if large section, add a branch at the start of it */
162 srwi r9,r9,2
163 mtctr r9
164 add r8,r8,r3
165 lis r0,0x60000000@h /* nop */
1663: stw r0,0(r8)
167 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
168 beq 2f
169 dcbst 0,r8 /* suboptimal, but simpler */
170 sync
171 icbi 0,r8
1722: addi r8,r8,4
173 bdnz 3b
174 sync /* additional sync needed on g4 */
175 isync
176 b 1b
177
178/*
179 * call_setup_cpu - call the setup_cpu function for this cpu 105 * call_setup_cpu - call the setup_cpu function for this cpu
180 * r3 = data offset, r24 = cpu number 106 * r3 = data offset, r24 = cpu number
181 * 107 *
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c70e20708a1f..21fd2c662a99 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache)
246 isync 246 isync
247 blr 247 blr
248 248
249/*
250 * identify_cpu and calls setup_cpu
251 * In: r3 = base of the cpu_specs array
252 * r4 = address of cur_cpu_spec
253 * r5 = relocation offset
254 */
255_GLOBAL(identify_cpu)
256 mfpvr r7
2571:
258 lwz r8,CPU_SPEC_PVR_MASK(r3)
259 and r8,r8,r7
260 lwz r9,CPU_SPEC_PVR_VALUE(r3)
261 cmplw 0,r9,r8
262 beq 1f
263 addi r3,r3,CPU_SPEC_ENTRY_SIZE
264 b 1b
2651:
266 sub r0,r3,r5
267 std r0,0(r4)
268 ld r4,CPU_SPEC_SETUP(r3)
269 cmpdi 0,r4,0
270 add r4,r4,r5
271 beqlr
272 ld r4,0(r4)
273 add r4,r4,r5
274 mtctr r4
275 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
276 mr r4,r3
277 mr r3,r5
278 bctr
279
280/*
281 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
282 * and writes nop's over sections of code that don't apply for this cpu.
283 * r3 = data offset (not changed)
284 */
285_GLOBAL(do_cpu_ftr_fixups)
286 /* Get CPU 0 features */
287 LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
288 sub r6,r6,r3
289 ld r4,0(r6)
290 sub r4,r4,r3
291 ld r4,CPU_SPEC_FEATURES(r4)
292 /* Get the fixup table */
293 LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
294 sub r6,r6,r3
295 LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
296 sub r7,r7,r3
297 /* Do the fixup */
2981: cmpld r6,r7
299 bgelr
300 addi r6,r6,32
301 ld r8,-32(r6) /* mask */
302 and r8,r8,r4
303 ld r9,-24(r6) /* value */
304 cmpld r8,r9
305 beq 1b
306 ld r8,-16(r6) /* section begin */
307 ld r9,-8(r6) /* section end */
308 subf. r9,r8,r9
309 beq 1b
310 /* write nops over the section of code */
311 /* todo: if large section, add a branch at the start of it */
312 srwi r9,r9,2
313 mtctr r9
314 sub r8,r8,r3
315 lis r0,0x60000000@h /* nop */
3163: stw r0,0(r8)
317 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
318 beq 2f
319 dcbst 0,r8 /* suboptimal, but simpler */
320 sync
321 icbi 0,r8
3222: addi r8,r8,4
323 bdnz 3b
324 sync /* additional sync needed on g4 */
325 isync
326 b 1b
327
328/*
329 * do_fw_ftr_fixups - goes through the list of firmware feature fixups
330 * and writes nop's over sections of code that don't apply for this firmware.
331 * r3 = data offset (not changed)
332 */
333_GLOBAL(do_fw_ftr_fixups)
334 /* Get firmware features */
335 LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
336 sub r6,r6,r3
337 ld r4,0(r6)
338 /* Get the fixup table */
339 LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
340 sub r6,r6,r3
341 LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
342 sub r7,r7,r3
343 /* Do the fixup */
3441: cmpld r6,r7
345 bgelr
346 addi r6,r6,32
347 ld r8,-32(r6) /* mask */
348 and r8,r8,r4
349 ld r9,-24(r6) /* value */
350 cmpld r8,r9
351 beq 1b
352 ld r8,-16(r6) /* section begin */
353 ld r9,-8(r6) /* section end */
354 subf. r9,r8,r9
355 beq 1b
356 /* write nops over the section of code */
357 /* todo: if large section, add a branch at the start of it */
358 srwi r9,r9,2
359 mtctr r9
360 sub r8,r8,r3
361 lis r0,0x60000000@h /* nop */
3623: stw r0,0(r8)
363BEGIN_FTR_SECTION
364 dcbst 0,r8 /* suboptimal, but simpler */
365 sync
366 icbi 0,r8
367END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
368 addi r8,r8,4
369 bdnz 3b
370 sync /* additional sync needed on g4 */
371 isync
372 b 1b
373 249
374#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 250#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
375/* 251/*
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 92f4e5f64f02..e2c3c6a85f33 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -24,6 +24,8 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/cache.h> 25#include <linux/cache.h>
26 26
27#include "setup.h"
28
27#if 0 29#if 0
28#define DEBUGP printk 30#define DEBUGP printk
29#else 31#else
@@ -269,33 +271,50 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
269 return 0; 271 return 0;
270} 272}
271 273
274static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
275 const Elf_Shdr *sechdrs,
276 const char *name)
277{
278 char *secstrings;
279 unsigned int i;
280
281 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
282 for (i = 1; i < hdr->e_shnum; i++)
283 if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
284 return &sechdrs[i];
285 return NULL;
286}
287
272int module_finalize(const Elf_Ehdr *hdr, 288int module_finalize(const Elf_Ehdr *hdr,
273 const Elf_Shdr *sechdrs, 289 const Elf_Shdr *sechdrs,
274 struct module *me) 290 struct module *me)
275{ 291{
276 char *secstrings; 292 const Elf_Shdr *sect;
277 unsigned int i;
278 293
279 me->arch.bug_table = NULL; 294 me->arch.bug_table = NULL;
280 me->arch.num_bugs = 0; 295 me->arch.num_bugs = 0;
281 296
282 /* Find the __bug_table section, if present */ 297 /* Find the __bug_table section, if present */
283 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 298 sect = find_section(hdr, sechdrs, "__bug_table");
284 for (i = 1; i < hdr->e_shnum; i++) { 299 if (sect != NULL) {
285 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) 300 me->arch.bug_table = (void *) sect->sh_addr;
286 continue; 301 me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
287 me->arch.bug_table = (void *) sechdrs[i].sh_addr;
288 me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
289 break;
290 } 302 }
291 303
292 /* 304 /*
293 * Strictly speaking this should have a spinlock to protect against 305 * Strictly speaking this should have a spinlock to protect against
294 * traversals, but since we only traverse on BUG()s, a spinlock 306 * traversals, but since we only traverse on BUG()s, a spinlock
295 * could potentially lead to deadlock and thus be counter-productive. 307 * could potentially lead to deadlock and thus be counter-productive.
296 */ 308 */
297 list_add(&me->arch.bug_list, &module_bug_list); 309 list_add(&me->arch.bug_list, &module_bug_list);
298 310
311 /* Apply feature fixups */
312 sect = find_section(hdr, sechdrs, "__ftr_fixup");
313 if (sect != NULL)
314 do_feature_fixups(cur_cpu_spec->cpu_features,
315 (void *)sect->sh_addr,
316 (void *)sect->sh_addr + sect->sh_size);
317
299 return 0; 318 return 0;
300} 319}
301 320
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ba34001fca8e..8dd1f0aae5d6 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -22,6 +22,9 @@
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <asm/module.h> 23#include <asm/module.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/firmware.h>
26
27#include "setup.h"
25 28
26/* FIXME: We don't do .init separately. To do this, we'd need to have 29/* FIXME: We don't do .init separately. To do this, we'd need to have
27 a separate r2 value in the init and core section, and stub between 30 a separate r2 value in the init and core section, and stub between
@@ -400,6 +403,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
400 | (value & 0x03fffffc); 403 | (value & 0x03fffffc);
401 break; 404 break;
402 405
406 case R_PPC64_REL64:
407 /* 64 bits relative (used by features fixups) */
408 *location = value - (unsigned long)location;
409 break;
410
403 default: 411 default:
404 printk("%s: Unknown ADD relocation: %lu\n", 412 printk("%s: Unknown ADD relocation: %lu\n",
405 me->name, 413 me->name,
@@ -413,23 +421,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
413 421
414LIST_HEAD(module_bug_list); 422LIST_HEAD(module_bug_list);
415 423
416int module_finalize(const Elf_Ehdr *hdr, 424static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
417 const Elf_Shdr *sechdrs, struct module *me) 425 const Elf_Shdr *sechdrs,
426 const char *name)
418{ 427{
419 char *secstrings; 428 char *secstrings;
420 unsigned int i; 429 unsigned int i;
421 430
431 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
432 for (i = 1; i < hdr->e_shnum; i++)
433 if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
434 return &sechdrs[i];
435 return NULL;
436}
437
438int module_finalize(const Elf_Ehdr *hdr,
439 const Elf_Shdr *sechdrs, struct module *me)
440{
441 const Elf_Shdr *sect;
442
422 me->arch.bug_table = NULL; 443 me->arch.bug_table = NULL;
423 me->arch.num_bugs = 0; 444 me->arch.num_bugs = 0;
424 445
425 /* Find the __bug_table section, if present */ 446 /* Find the __bug_table section, if present */
426 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 447 sect = find_section(hdr, sechdrs, "__bug_table");
427 for (i = 1; i < hdr->e_shnum; i++) { 448 if (sect != NULL) {
428 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) 449 me->arch.bug_table = (void *) sect->sh_addr;
429 continue; 450 me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
430 me->arch.bug_table = (void *) sechdrs[i].sh_addr;
431 me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
432 break;
433 } 451 }
434 452
435 /* 453 /*
@@ -439,6 +457,19 @@ int module_finalize(const Elf_Ehdr *hdr,
439 */ 457 */
440 list_add(&me->arch.bug_list, &module_bug_list); 458 list_add(&me->arch.bug_list, &module_bug_list);
441 459
460 /* Apply feature fixups */
461 sect = find_section(hdr, sechdrs, "__ftr_fixup");
462 if (sect != NULL)
463 do_feature_fixups(cur_cpu_spec->cpu_features,
464 (void *)sect->sh_addr,
465 (void *)sect->sh_addr + sect->sh_size);
466
467 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
468 if (sect != NULL)
469 do_feature_fixups(powerpc_firmware_features,
470 (void *)sect->sh_addr,
471 (void *)sect->sh_addr + sect->sh_size);
472
442 return 0; 473 return 0;
443} 474}
444 475
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 865b9648d0d5..bdb412d4b748 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -1014,7 +1014,7 @@ EXPORT_SYMBOL(find_all_nodes);
1014/** Checks if the given "compat" string matches one of the strings in 1014/** Checks if the given "compat" string matches one of the strings in
1015 * the device's "compatible" property 1015 * the device's "compatible" property
1016 */ 1016 */
1017int device_is_compatible(struct device_node *device, const char *compat) 1017int device_is_compatible(const struct device_node *device, const char *compat)
1018{ 1018{
1019 const char* cp; 1019 const char* cp;
1020 int cplen, l; 1020 int cplen, l;
@@ -1491,7 +1491,8 @@ static int __init prom_reconfig_setup(void)
1491__initcall(prom_reconfig_setup); 1491__initcall(prom_reconfig_setup);
1492#endif 1492#endif
1493 1493
1494struct property *of_find_property(struct device_node *np, const char *name, 1494struct property *of_find_property(const struct device_node *np,
1495 const char *name,
1495 int *lenp) 1496 int *lenp)
1496{ 1497{
1497 struct property *pp; 1498 struct property *pp;
@@ -1512,7 +1513,8 @@ struct property *of_find_property(struct device_node *np, const char *name,
1512 * Find a property with a given name for a given node 1513 * Find a property with a given name for a given node
1513 * and return the value. 1514 * and return the value.
1514 */ 1515 */
1515const void *get_property(struct device_node *np, const char *name, int *lenp) 1516const void *get_property(const struct device_node *np, const char *name,
1517 int *lenp)
1516{ 1518{
1517 struct property *pp = of_find_property(np,name,lenp); 1519 struct property *pp = of_find_property(np,name,lenp);
1518 return pp ? pp->value : NULL; 1520 return pp ? pp->value : NULL;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 191d0ab09222..a4c2964a3ca6 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -91,6 +91,7 @@ int ucache_bsize;
91unsigned long __init early_init(unsigned long dt_ptr) 91unsigned long __init early_init(unsigned long dt_ptr)
92{ 92{
93 unsigned long offset = reloc_offset(); 93 unsigned long offset = reloc_offset();
94 struct cpu_spec *spec;
94 95
95 /* First zero the BSS -- use memset_io, some platforms don't have 96 /* First zero the BSS -- use memset_io, some platforms don't have
96 * caches on yet */ 97 * caches on yet */
@@ -100,8 +101,11 @@ unsigned long __init early_init(unsigned long dt_ptr)
100 * Identify the CPU type and fix up code sections 101 * Identify the CPU type and fix up code sections
101 * that depend on which cpu we have. 102 * that depend on which cpu we have.
102 */ 103 */
103 identify_cpu(offset, 0); 104 spec = identify_cpu(offset);
104 do_cpu_ftr_fixups(offset); 105
106 do_feature_fixups(spec->cpu_features,
107 PTRRELOC(&__start___ftr_fixup),
108 PTRRELOC(&__stop___ftr_fixup));
105 109
106 return KERNELBASE + offset; 110 return KERNELBASE + offset;
107} 111}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4b2e32eab9dc..16278968dab6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -170,6 +170,9 @@ void __init setup_paca(int cpu)
170 170
171void __init early_setup(unsigned long dt_ptr) 171void __init early_setup(unsigned long dt_ptr)
172{ 172{
173 /* Identify CPU type */
174 identify_cpu(0);
175
173 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ 176 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
174 setup_paca(0); 177 setup_paca(0);
175 178
@@ -348,6 +351,14 @@ void __init setup_system(void)
348{ 351{
349 DBG(" -> setup_system()\n"); 352 DBG(" -> setup_system()\n");
350 353
354 /* Apply the CPUs-specific and firmware specific fixups to kernel
355 * text (nop out sections not relevant to this CPU or this firmware)
356 */
357 do_feature_fixups(cur_cpu_spec->cpu_features,
358 &__start___ftr_fixup, &__stop___ftr_fixup);
359 do_feature_fixups(powerpc_firmware_features,
360 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
361
351 /* 362 /*
352 * Unflatten the device-tree passed by prom_init or kexec 363 * Unflatten the device-tree passed by prom_init or kexec
353 */ 364 */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5b59bc18dfe7..a1b5e4b16151 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -220,11 +220,8 @@ static void account_process_time(struct pt_regs *regs)
220 */ 220 */
221struct cpu_purr_data { 221struct cpu_purr_data {
222 int initialized; /* thread is running */ 222 int initialized; /* thread is running */
223 u64 tb0; /* timebase at origin time */
224 u64 purr0; /* PURR at origin time */
225 u64 tb; /* last TB value read */ 223 u64 tb; /* last TB value read */
226 u64 purr; /* last PURR value read */ 224 u64 purr; /* last PURR value read */
227 u64 stolen; /* stolen time so far */
228 spinlock_t lock; 225 spinlock_t lock;
229}; 226};
230 227
@@ -234,10 +231,8 @@ static void snapshot_tb_and_purr(void *data)
234{ 231{
235 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); 232 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
236 233
237 p->tb0 = mftb(); 234 p->tb = mftb();
238 p->purr0 = mfspr(SPRN_PURR); 235 p->purr = mfspr(SPRN_PURR);
239 p->tb = p->tb0;
240 p->purr = 0;
241 wmb(); 236 wmb();
242 p->initialized = 1; 237 p->initialized = 1;
243} 238}
@@ -258,37 +253,24 @@ void snapshot_timebases(void)
258 253
259void calculate_steal_time(void) 254void calculate_steal_time(void)
260{ 255{
261 u64 tb, purr, t0; 256 u64 tb, purr;
262 s64 stolen; 257 s64 stolen;
263 struct cpu_purr_data *p0, *pme, *phim; 258 struct cpu_purr_data *pme;
264 int cpu;
265 259
266 if (!cpu_has_feature(CPU_FTR_PURR)) 260 if (!cpu_has_feature(CPU_FTR_PURR))
267 return; 261 return;
268 cpu = smp_processor_id(); 262 pme = &per_cpu(cpu_purr_data, smp_processor_id());
269 pme = &per_cpu(cpu_purr_data, cpu);
270 if (!pme->initialized) 263 if (!pme->initialized)
271 return; /* this can happen in early boot */ 264 return; /* this can happen in early boot */
272 p0 = &per_cpu(cpu_purr_data, cpu & ~1); 265 spin_lock(&pme->lock);
273 phim = &per_cpu(cpu_purr_data, cpu ^ 1);
274 spin_lock(&p0->lock);
275 tb = mftb(); 266 tb = mftb();
276 purr = mfspr(SPRN_PURR) - pme->purr0; 267 purr = mfspr(SPRN_PURR);
277 if (!phim->initialized || !cpu_online(cpu ^ 1)) { 268 stolen = (tb - pme->tb) - (purr - pme->purr);
278 stolen = (tb - pme->tb) - (purr - pme->purr); 269 if (stolen > 0)
279 } else {
280 t0 = pme->tb0;
281 if (phim->tb0 < t0)
282 t0 = phim->tb0;
283 stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
284 }
285 if (stolen > 0) {
286 account_steal_time(current, stolen); 270 account_steal_time(current, stolen);
287 p0->stolen += stolen;
288 }
289 pme->tb = tb; 271 pme->tb = tb;
290 pme->purr = purr; 272 pme->purr = purr;
291 spin_unlock(&p0->lock); 273 spin_unlock(&pme->lock);
292} 274}
293 275
294/* 276/*
@@ -297,30 +279,17 @@ void calculate_steal_time(void)
297 */ 279 */
298static void snapshot_purr(void) 280static void snapshot_purr(void)
299{ 281{
300 int cpu; 282 struct cpu_purr_data *pme;
301 u64 purr;
302 struct cpu_purr_data *p0, *pme, *phim;
303 unsigned long flags; 283 unsigned long flags;
304 284
305 if (!cpu_has_feature(CPU_FTR_PURR)) 285 if (!cpu_has_feature(CPU_FTR_PURR))
306 return; 286 return;
307 cpu = smp_processor_id(); 287 pme = &per_cpu(cpu_purr_data, smp_processor_id());
308 pme = &per_cpu(cpu_purr_data, cpu); 288 spin_lock_irqsave(&pme->lock, flags);
309 p0 = &per_cpu(cpu_purr_data, cpu & ~1); 289 pme->tb = mftb();
310 phim = &per_cpu(cpu_purr_data, cpu ^ 1); 290 pme->purr = mfspr(SPRN_PURR);
311 spin_lock_irqsave(&p0->lock, flags);
312 pme->tb = pme->tb0 = mftb();
313 purr = mfspr(SPRN_PURR);
314 if (!phim->initialized) {
315 pme->purr = 0;
316 pme->purr0 = purr;
317 } else {
318 /* set p->purr and p->purr0 for no change in p0->stolen */
319 pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
320 pme->purr0 = purr - pme->purr;
321 }
322 pme->initialized = 1; 291 pme->initialized = 1;
323 spin_unlock_irqrestore(&p0->lock, flags); 292 spin_unlock_irqrestore(&pme->lock, flags);
324} 293}
325 294
326#endif /* CONFIG_PPC_SPLPAR */ 295#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1a7e19cdab39..c913ad5cad29 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -36,6 +36,8 @@
36#include <asm/vdso.h> 36#include <asm/vdso.h>
37#include <asm/vdso_datapage.h> 37#include <asm/vdso_datapage.h>
38 38
39#include "setup.h"
40
39#undef DEBUG 41#undef DEBUG
40 42
41#ifdef DEBUG 43#ifdef DEBUG
@@ -586,6 +588,43 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
586 return 0; 588 return 0;
587} 589}
588 590
591
592static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
593 struct lib64_elfinfo *v64)
594{
595 void *start32;
596 unsigned long size32;
597
598#ifdef CONFIG_PPC64
599 void *start64;
600 unsigned long size64;
601
602 start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
603 if (start64)
604 do_feature_fixups(cur_cpu_spec->cpu_features,
605 start64, start64 + size64);
606
607 start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
608 if (start64)
609 do_feature_fixups(powerpc_firmware_features,
610 start64, start64 + size64);
611#endif /* CONFIG_PPC64 */
612
613 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
614 if (start32)
615 do_feature_fixups(cur_cpu_spec->cpu_features,
616 start32, start32 + size32);
617
618#ifdef CONFIG_PPC64
619 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
620 if (start32)
621 do_feature_fixups(powerpc_firmware_features,
622 start32, start32 + size32);
623#endif /* CONFIG_PPC64 */
624
625 return 0;
626}
627
589static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, 628static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
590 struct lib64_elfinfo *v64) 629 struct lib64_elfinfo *v64)
591{ 630{
@@ -634,6 +673,9 @@ static __init int vdso_setup(void)
634 if (vdso_fixup_datapage(&v32, &v64)) 673 if (vdso_fixup_datapage(&v32, &v64))
635 return -1; 674 return -1;
636 675
676 if (vdso_fixup_features(&v32, &v64))
677 return -1;
678
637 if (vdso_fixup_alt_funcs(&v32, &v64)) 679 if (vdso_fixup_alt_funcs(&v32, &v64))
638 return -1; 680 return -1;
639 681
@@ -714,6 +756,7 @@ void __init vdso_init(void)
714 * Setup the syscall map in the vDOS 756 * Setup the syscall map in the vDOS
715 */ 757 */
716 vdso_setup_syscall_map(); 758 vdso_setup_syscall_map();
759
717 /* 760 /*
718 * Initialize the vDSO images in memory, that is do necessary 761 * Initialize the vDSO images in memory, that is do necessary
719 * fixups of vDSO symbols, locate trampolines, etc... 762 * fixups of vDSO symbols, locate trampolines, etc...
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 6187af2d54c3..26e138c4ce17 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -32,6 +32,18 @@ SECTIONS
32 PROVIDE (_etext = .); 32 PROVIDE (_etext = .);
33 PROVIDE (etext = .); 33 PROVIDE (etext = .);
34 34
35 . = ALIGN(8);
36 __ftr_fixup : {
37 *(__ftr_fixup)
38 }
39
40#ifdef CONFIG_PPC64
41 . = ALIGN(8);
42 __fw_ftr_fixup : {
43 *(__fw_ftr_fixup)
44 }
45#endif
46
35 /* Other stuff is appended to the text segment: */ 47 /* Other stuff is appended to the text segment: */
36 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 48 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
37 .rodata1 : { *(.rodata1) } 49 .rodata1 : { *(.rodata1) }
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index 56e76ff5498f..40ffd9b6cef7 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -229,8 +229,10 @@ V_FUNCTION_BEGIN(__do_get_xsec)
229 xor r0,r8,r8 /* create dependency */ 229 xor r0,r8,r8 /* create dependency */
230 add r3,r3,r0 230 add r3,r3,r0
231 231
232 /* Get TB & offset it */ 232 /* Get TB & offset it. We use the MFTB macro which will generate
233 mftb r7 233 * workaround code for Cell.
234 */
235 MFTB(r7)
234 ld r9,CFG_TB_ORIG_STAMP(r3) 236 ld r9,CFG_TB_ORIG_STAMP(r3)
235 subf r7,r9,r7 237 subf r7,r9,r7
236 238
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4a2b6dc0960c..2d70f35d50b5 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -31,6 +31,16 @@ SECTIONS
31 PROVIDE (_etext = .); 31 PROVIDE (_etext = .);
32 PROVIDE (etext = .); 32 PROVIDE (etext = .);
33 33
34 . = ALIGN(8);
35 __ftr_fixup : {
36 *(__ftr_fixup)
37 }
38
39 . = ALIGN(8);
40 __fw_ftr_fixup : {
41 *(__fw_ftr_fixup)
42 }
43
34 /* Other stuff is appended to the text segment: */ 44 /* Other stuff is appended to the text segment: */
35 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 45 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
36 .rodata1 : { *(.rodata1) } 46 .rodata1 : { *(.rodata1) }