aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/entry_32.S17
-rw-r--r--arch/powerpc/kernel/entry_64.S10
-rw-r--r--arch/powerpc/kernel/legacy_serial.c44
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/prom_init.c39
-rw-r--r--arch/powerpc/kernel/ptrace.c54
-rw-r--r--arch/powerpc/kernel/setup-common.c24
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/signal.c23
-rw-r--r--arch/powerpc/kernel/smp.c119
-rw-r--r--arch/powerpc/kernel/stacktrace.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c311
-rw-r--r--arch/powerpc/kernel/vio.c6
13 files changed, 517 insertions, 142 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 81c8324a4a3c..1cbbf7033641 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -148,7 +148,7 @@ transfer_to_handler:
148 /* Check to see if the dbcr0 register is set up to debug. Use the 148 /* Check to see if the dbcr0 register is set up to debug. Use the
149 internal debug mode bit to do this. */ 149 internal debug mode bit to do this. */
150 lwz r12,THREAD_DBCR0(r12) 150 lwz r12,THREAD_DBCR0(r12)
151 andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 151 andis. r12,r12,DBCR0_IDM@h
152 beq+ 3f 152 beq+ 3f
153 /* From user and task is ptraced - load up global dbcr0 */ 153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */ 154 li r12,-1 /* clear all pending debug events */
@@ -292,7 +292,7 @@ syscall_exit_cont:
292 /* If the process has its own DBCR0 value, load it up. The internal 292 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */ 293 debug mode bit tells us that dbcr0 should be loaded. */
294 lwz r0,THREAD+THREAD_DBCR0(r2) 294 lwz r0,THREAD+THREAD_DBCR0(r2)
295 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 295 andis. r10,r0,DBCR0_IDM@h
296 bnel- load_dbcr0 296 bnel- load_dbcr0
297#endif 297#endif
298#ifdef CONFIG_44x 298#ifdef CONFIG_44x
@@ -343,7 +343,12 @@ syscall_dotrace:
343 stw r0,_TRAP(r1) 343 stw r0,_TRAP(r1)
344 addi r3,r1,STACK_FRAME_OVERHEAD 344 addi r3,r1,STACK_FRAME_OVERHEAD
345 bl do_syscall_trace_enter 345 bl do_syscall_trace_enter
346 lwz r0,GPR0(r1) /* Restore original registers */ 346 /*
347 * Restore argument registers possibly just changed.
348 * We use the return value of do_syscall_trace_enter
349 * for call number to look up in the table (r0).
350 */
351 mr r0,r3
347 lwz r3,GPR3(r1) 352 lwz r3,GPR3(r1)
348 lwz r4,GPR4(r1) 353 lwz r4,GPR4(r1)
349 lwz r5,GPR5(r1) 354 lwz r5,GPR5(r1)
@@ -720,7 +725,7 @@ restore_user:
720 /* Check whether this process has its own DBCR0 value. The internal 725 /* Check whether this process has its own DBCR0 value. The internal
721 debug mode bit tells us that dbcr0 should be loaded. */ 726 debug mode bit tells us that dbcr0 should be loaded. */
722 lwz r0,THREAD+THREAD_DBCR0(r2) 727 lwz r0,THREAD+THREAD_DBCR0(r2)
723 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 728 andis. r10,r0,DBCR0_IDM@h
724 bnel- load_dbcr0 729 bnel- load_dbcr0
725#endif 730#endif
726 731
@@ -1055,8 +1060,8 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
1055 SAVE_NVGPRS(r1) 1060 SAVE_NVGPRS(r1)
1056 rlwinm r3,r3,0,0,30 1061 rlwinm r3,r3,0,0,30
1057 stw r3,_TRAP(r1) 1062 stw r3,_TRAP(r1)
10582: li r3,0 10632: addi r3,r1,STACK_FRAME_OVERHEAD
1059 addi r4,r1,STACK_FRAME_OVERHEAD 1064 mr r4,r9
1060 bl do_signal 1065 bl do_signal
1061 REST_NVGPRS(r1) 1066 REST_NVGPRS(r1)
1062 b recheck 1067 b recheck
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d7369243ae44..2d802e97097c 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -214,7 +214,12 @@ syscall_dotrace:
214 bl .save_nvgprs 214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD 215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter 216 bl .do_syscall_trace_enter
217 ld r0,GPR0(r1) /* Restore original registers */ 217 /*
218 * Restore argument registers possibly just changed.
219 * We use the return value of do_syscall_trace_enter
220 * for the call number to look up in the table (r0).
221 */
222 mr r0,r3
218 ld r3,GPR3(r1) 223 ld r3,GPR3(r1)
219 ld r4,GPR4(r1) 224 ld r4,GPR4(r1)
220 ld r5,GPR5(r1) 225 ld r5,GPR5(r1)
@@ -638,8 +643,7 @@ user_work:
638 b .ret_from_except_lite 643 b .ret_from_except_lite
639 644
6401: bl .save_nvgprs 6451: bl .save_nvgprs
641 li r3,0 646 addi r3,r1,STACK_FRAME_OVERHEAD
642 addi r4,r1,STACK_FRAME_OVERHEAD
643 bl .do_signal 647 bl .do_signal
644 b .ret_from_except 648 b .ret_from_except
645 649
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 4d96e1db55ee..9ddfaef1a184 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -493,18 +493,18 @@ static int __init serial_dev_init(void)
493device_initcall(serial_dev_init); 493device_initcall(serial_dev_init);
494 494
495 495
496#ifdef CONFIG_SERIAL_8250_CONSOLE
496/* 497/*
497 * This is called very early, as part of console_init() (typically just after 498 * This is called very early, as part of console_init() (typically just after
498 * time_init()). This function is respondible for trying to find a good 499 * time_init()). This function is respondible for trying to find a good
499 * default console on serial ports. It tries to match the open firmware 500 * default console on serial ports. It tries to match the open firmware
500 * default output with one of the available serial console drivers, either 501 * default output with one of the available serial console drivers that have
501 * one of the platform serial ports that have been probed earlier by 502 * been probed earlier by find_legacy_serial_ports()
502 * find_legacy_serial_ports() or some more platform specific ones.
503 */ 503 */
504static int __init check_legacy_serial_console(void) 504static int __init check_legacy_serial_console(void)
505{ 505{
506 struct device_node *prom_stdout = NULL; 506 struct device_node *prom_stdout = NULL;
507 int speed = 0, offset = 0; 507 int i, speed = 0, offset = 0;
508 const char *name; 508 const char *name;
509 const u32 *spd; 509 const u32 *spd;
510 510
@@ -548,31 +548,20 @@ static int __init check_legacy_serial_console(void)
548 if (spd) 548 if (spd)
549 speed = *spd; 549 speed = *spd;
550 550
551 if (0) 551 if (strcmp(name, "serial") != 0)
552 ; 552 goto not_found;
553#ifdef CONFIG_SERIAL_8250_CONSOLE 553
554 else if (strcmp(name, "serial") == 0) { 554 /* Look for it in probed array */
555 int i; 555 for (i = 0; i < legacy_serial_count; i++) {
556 /* Look for it in probed array */ 556 if (prom_stdout != legacy_serial_infos[i].np)
557 for (i = 0; i < legacy_serial_count; i++) { 557 continue;
558 if (prom_stdout != legacy_serial_infos[i].np) 558 offset = i;
559 continue; 559 speed = legacy_serial_infos[i].speed;
560 offset = i; 560 break;
561 speed = legacy_serial_infos[i].speed;
562 break;
563 }
564 if (i >= legacy_serial_count)
565 goto not_found;
566 } 561 }
567#endif /* CONFIG_SERIAL_8250_CONSOLE */ 562 if (i >= legacy_serial_count)
568#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
569 else if (strcmp(name, "ch-a") == 0)
570 offset = 0;
571 else if (strcmp(name, "ch-b") == 0)
572 offset = 1;
573#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
574 else
575 goto not_found; 563 goto not_found;
564
576 of_node_put(prom_stdout); 565 of_node_put(prom_stdout);
577 566
578 DBG("Found serial console at ttyS%d\n", offset); 567 DBG("Found serial console at ttyS%d\n", offset);
@@ -591,3 +580,4 @@ static int __init check_legacy_serial_console(void)
591} 580}
592console_initcall(check_legacy_serial_console); 581console_initcall(check_legacy_serial_console);
593 582
583#endif /* CONFIG_SERIAL_8250_CONSOLE */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index db2497ccc111..e030f3bd5024 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -254,7 +254,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
254 return; 254 return;
255 255
256 /* Clear the DAC and struct entries. One shot trigger */ 256 /* Clear the DAC and struct entries. One shot trigger */
257#if (defined(CONFIG_44x) || defined(CONFIG_BOOKE)) 257#if defined(CONFIG_BOOKE)
258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W 258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
259 | DBCR0_IDM)); 259 | DBCR0_IDM));
260#endif 260#endif
@@ -286,7 +286,7 @@ int set_dabr(unsigned long dabr)
286 mtspr(SPRN_DABR, dabr); 286 mtspr(SPRN_DABR, dabr);
287#endif 287#endif
288 288
289#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 289#if defined(CONFIG_BOOKE)
290 mtspr(SPRN_DAC1, dabr); 290 mtspr(SPRN_DAC1, dabr);
291#endif 291#endif
292 292
@@ -373,7 +373,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
374 set_dabr(new->thread.dabr); 374 set_dabr(new->thread.dabr);
375 375
376#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 376#if defined(CONFIG_BOOKE)
377 /* If new thread DAC (HW breakpoint) is the same then leave it */ 377 /* If new thread DAC (HW breakpoint) is the same then leave it */
378 if (new->thread.dabr) 378 if (new->thread.dabr)
379 set_dabr(new->thread.dabr); 379 set_dabr(new->thread.dabr);
@@ -568,7 +568,7 @@ void flush_thread(void)
568 current->thread.dabr = 0; 568 current->thread.dabr = 0;
569 set_dabr(0); 569 set_dabr(0);
570 570
571#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 571#if defined(CONFIG_BOOKE)
572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); 572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
573#endif 573#endif
574 } 574 }
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index c4ab2195b9cb..b72849ac7db3 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -205,8 +205,6 @@ static int __initdata mem_reserve_cnt;
205static cell_t __initdata regbuf[1024]; 205static cell_t __initdata regbuf[1024];
206 206
207 207
208#define MAX_CPU_THREADS 2
209
210/* 208/*
211 * Error results ... some OF calls will return "-1" on error, some 209 * Error results ... some OF calls will return "-1" on error, some
212 * will return 0, some will return either. To simplify, here are 210 * will return 0, some will return either. To simplify, here are
@@ -1339,10 +1337,6 @@ static void __init prom_hold_cpus(void)
1339 unsigned int reg; 1337 unsigned int reg;
1340 phandle node; 1338 phandle node;
1341 char type[64]; 1339 char type[64];
1342 int cpuid = 0;
1343 unsigned int interrupt_server[MAX_CPU_THREADS];
1344 unsigned int cpu_threads, hw_cpu_num;
1345 int propsize;
1346 struct prom_t *_prom = &RELOC(prom); 1340 struct prom_t *_prom = &RELOC(prom);
1347 unsigned long *spinloop 1341 unsigned long *spinloop
1348 = (void *) LOW_ADDR(__secondary_hold_spinloop); 1342 = (void *) LOW_ADDR(__secondary_hold_spinloop);
@@ -1386,7 +1380,6 @@ static void __init prom_hold_cpus(void)
1386 reg = -1; 1380 reg = -1;
1387 prom_getprop(node, "reg", &reg, sizeof(reg)); 1381 prom_getprop(node, "reg", &reg, sizeof(reg));
1388 1382
1389 prom_debug("\ncpuid = 0x%x\n", cpuid);
1390 prom_debug("cpu hw idx = 0x%x\n", reg); 1383 prom_debug("cpu hw idx = 0x%x\n", reg);
1391 1384
1392 /* Init the acknowledge var which will be reset by 1385 /* Init the acknowledge var which will be reset by
@@ -1395,28 +1388,9 @@ static void __init prom_hold_cpus(void)
1395 */ 1388 */
1396 *acknowledge = (unsigned long)-1; 1389 *acknowledge = (unsigned long)-1;
1397 1390
1398 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s", 1391 if (reg != _prom->cpu) {
1399 &interrupt_server,
1400 sizeof(interrupt_server));
1401 if (propsize < 0) {
1402 /* no property. old hardware has no SMT */
1403 cpu_threads = 1;
1404 interrupt_server[0] = reg; /* fake it with phys id */
1405 } else {
1406 /* We have a threaded processor */
1407 cpu_threads = propsize / sizeof(u32);
1408 if (cpu_threads > MAX_CPU_THREADS) {
1409 prom_printf("SMT: too many threads!\n"
1410 "SMT: found %x, max is %x\n",
1411 cpu_threads, MAX_CPU_THREADS);
1412 cpu_threads = 1; /* ToDo: panic? */
1413 }
1414 }
1415
1416 hw_cpu_num = interrupt_server[0];
1417 if (hw_cpu_num != _prom->cpu) {
1418 /* Primary Thread of non-boot cpu */ 1392 /* Primary Thread of non-boot cpu */
1419 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg); 1393 prom_printf("starting cpu hw idx %x... ", reg);
1420 call_prom("start-cpu", 3, 0, node, 1394 call_prom("start-cpu", 3, 0, node,
1421 secondary_hold, reg); 1395 secondary_hold, reg);
1422 1396
@@ -1431,17 +1405,10 @@ static void __init prom_hold_cpus(void)
1431 } 1405 }
1432#ifdef CONFIG_SMP 1406#ifdef CONFIG_SMP
1433 else 1407 else
1434 prom_printf("%x : boot cpu %x\n", cpuid, reg); 1408 prom_printf("boot cpu hw idx %x\n", reg);
1435#endif /* CONFIG_SMP */ 1409#endif /* CONFIG_SMP */
1436
1437 /* Reserve cpu #s for secondary threads. They start later. */
1438 cpuid += cpu_threads;
1439 } 1410 }
1440 1411
1441 if (cpuid > NR_CPUS)
1442 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1443 ") exceeded: ignoring extras\n");
1444
1445 prom_debug("prom_hold_cpus: end...\n"); 1412 prom_debug("prom_hold_cpus: end...\n");
1446} 1413}
1447 1414
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a5d0e78779c8..6b66cd85b433 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ptrace.h> 23#include <linux/ptrace.h>
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/tracehook.h>
25#include <linux/elf.h> 26#include <linux/elf.h>
26#include <linux/user.h> 27#include <linux/user.h>
27#include <linux/security.h> 28#include <linux/security.h>
@@ -717,7 +718,7 @@ void user_disable_single_step(struct task_struct *task)
717 struct pt_regs *regs = task->thread.regs; 718 struct pt_regs *regs = task->thread.regs;
718 719
719 720
720#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 721#if defined(CONFIG_BOOKE)
721 /* If DAC then do not single step, skip */ 722 /* If DAC then do not single step, skip */
722 if (task->thread.dabr) 723 if (task->thread.dabr)
723 return; 724 return;
@@ -744,10 +745,11 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
744 if (addr > 0) 745 if (addr > 0)
745 return -EINVAL; 746 return -EINVAL;
746 747
748 /* The bottom 3 bits in dabr are flags */
747 if ((data & ~0x7UL) >= TASK_SIZE) 749 if ((data & ~0x7UL) >= TASK_SIZE)
748 return -EIO; 750 return -EIO;
749 751
750#ifdef CONFIG_PPC64 752#ifndef CONFIG_BOOKE
751 753
752 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. 754 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
753 * It was assumed, on previous implementations, that 3 bits were 755 * It was assumed, on previous implementations, that 3 bits were
@@ -769,7 +771,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
769 task->thread.dabr = data; 771 task->thread.dabr = data;
770 772
771#endif 773#endif
772#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 774#if defined(CONFIG_BOOKE)
773 775
774 /* As described above, it was assumed 3 bits were passed with the data 776 /* As described above, it was assumed 3 bits were passed with the data
775 * address, but we will assume only the mode bits will be passed 777 * address, but we will assume only the mode bits will be passed
@@ -1013,31 +1015,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1013 return ret; 1015 return ret;
1014} 1016}
1015 1017
1016static void do_syscall_trace(void) 1018/*
1019 * We must return the syscall number to actually look up in the table.
1020 * This can be -1L to skip running any syscall at all.
1021 */
1022long do_syscall_trace_enter(struct pt_regs *regs)
1017{ 1023{
1018 /* the 0x80 provides a way for the tracing parent to distinguish 1024 long ret = 0;
1019 between a syscall stop and SIGTRAP delivery */
1020 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1021 ? 0x80 : 0));
1022
1023 /*
1024 * this isn't the same as continuing with a signal, but it will do
1025 * for normal use. strace only continues with a signal if the
1026 * stopping signal is not SIGTRAP. -brl
1027 */
1028 if (current->exit_code) {
1029 send_sig(current->exit_code, current, 1);
1030 current->exit_code = 0;
1031 }
1032}
1033 1025
1034void do_syscall_trace_enter(struct pt_regs *regs)
1035{
1036 secure_computing(regs->gpr[0]); 1026 secure_computing(regs->gpr[0]);
1037 1027
1038 if (test_thread_flag(TIF_SYSCALL_TRACE) 1028 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
1039 && (current->ptrace & PT_PTRACED)) 1029 tracehook_report_syscall_entry(regs))
1040 do_syscall_trace(); 1030 /*
1031 * Tracing decided this syscall should not happen.
1032 * We'll return a bogus call number to get an ENOSYS
1033 * error, but leave the original number in regs->gpr[0].
1034 */
1035 ret = -1L;
1041 1036
1042 if (unlikely(current->audit_context)) { 1037 if (unlikely(current->audit_context)) {
1043#ifdef CONFIG_PPC64 1038#ifdef CONFIG_PPC64
@@ -1055,16 +1050,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
1055 regs->gpr[5] & 0xffffffff, 1050 regs->gpr[5] & 0xffffffff,
1056 regs->gpr[6] & 0xffffffff); 1051 regs->gpr[6] & 0xffffffff);
1057 } 1052 }
1053
1054 return ret ?: regs->gpr[0];
1058} 1055}
1059 1056
1060void do_syscall_trace_leave(struct pt_regs *regs) 1057void do_syscall_trace_leave(struct pt_regs *regs)
1061{ 1058{
1059 int step;
1060
1062 if (unlikely(current->audit_context)) 1061 if (unlikely(current->audit_context))
1063 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 1062 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
1064 regs->result); 1063 regs->result);
1065 1064
1066 if ((test_thread_flag(TIF_SYSCALL_TRACE) 1065 step = test_thread_flag(TIF_SINGLESTEP);
1067 || test_thread_flag(TIF_SINGLESTEP)) 1066 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1068 && (current->ptrace & PT_PTRACED)) 1067 tracehook_report_syscall_exit(regs, step);
1069 do_syscall_trace();
1070} 1068}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 61a3f4132087..9cc5a52711e5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc)
367 * setup_cpu_maps - initialize the following cpu maps: 367 * setup_cpu_maps - initialize the following cpu maps:
368 * cpu_possible_map 368 * cpu_possible_map
369 * cpu_present_map 369 * cpu_present_map
370 * cpu_sibling_map
371 * 370 *
372 * Having the possible map set up early allows us to restrict allocations 371 * Having the possible map set up early allows us to restrict allocations
373 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 372 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
@@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void)
475 */ 474 */
476 cpu_init_thread_core_maps(nthreads); 475 cpu_init_thread_core_maps(nthreads);
477} 476}
478
479/*
480 * Being that cpu_sibling_map is now a per_cpu array, then it cannot
481 * be initialized until the per_cpu areas have been created. This
482 * function is now called from setup_per_cpu_areas().
483 */
484void __init smp_setup_cpu_sibling_map(void)
485{
486#ifdef CONFIG_PPC64
487 int i, cpu, base;
488
489 for_each_possible_cpu(cpu) {
490 DBG("Sibling map for CPU %d:", cpu);
491 base = cpu_first_thread_in_core(cpu);
492 for (i = 0; i < threads_per_core; i++) {
493 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
494 DBG(" %d", base + i);
495 }
496 DBG("\n");
497 }
498
499#endif /* CONFIG_PPC64 */
500}
501#endif /* CONFIG_SMP */ 477#endif /* CONFIG_SMP */
502 478
503#ifdef CONFIG_PCSPKR_PLATFORM 479#ifdef CONFIG_PCSPKR_PLATFORM
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04d8de9f0fc6..8b25f51f03bf 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void)
611 paca[i].data_offset = ptr - __per_cpu_start; 611 paca[i].data_offset = ptr - __per_cpu_start;
612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
613 } 613 }
614
615 /* Now that per_cpu is setup, initialize cpu_sibling_map */
616 smp_setup_cpu_sibling_map();
617} 614}
618#endif 615#endif
619 616
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 7aada783ec6a..a54405ebd7b0 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -9,7 +9,7 @@
9 * this archive for more details. 9 * this archive for more details.
10 */ 10 */
11 11
12#include <linux/ptrace.h> 12#include <linux/tracehook.h>
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/unistd.h> 15#include <asm/unistd.h>
@@ -112,7 +112,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
112 } 112 }
113} 113}
114 114
115int do_signal(sigset_t *oldset, struct pt_regs *regs) 115static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
116{ 116{
117 siginfo_t info; 117 siginfo_t info;
118 int signr; 118 int signr;
@@ -147,7 +147,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
147 */ 147 */
148 if (current->thread.dabr) { 148 if (current->thread.dabr) {
149 set_dabr(current->thread.dabr); 149 set_dabr(current->thread.dabr);
150#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 150#if defined(CONFIG_BOOKE)
151 mtspr(SPRN_DBCR0, current->thread.dbcr0); 151 mtspr(SPRN_DBCR0, current->thread.dbcr0);
152#endif 152#endif
153 } 153 }
@@ -177,11 +177,28 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
177 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. 177 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
178 */ 178 */
179 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; 179 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
180
181 /*
182 * Let tracing know that we've done the handler setup.
183 */
184 tracehook_signal_handler(signr, &info, &ka, regs,
185 test_thread_flag(TIF_SINGLESTEP));
180 } 186 }
181 187
182 return ret; 188 return ret;
183} 189}
184 190
191void do_signal(struct pt_regs *regs, unsigned long thread_info_flags)
192{
193 if (thread_info_flags & _TIF_SIGPENDING)
194 do_signal_pending(NULL, regs);
195
196 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
197 clear_thread_flag(TIF_NOTIFY_RESUME);
198 tracehook_notify_resume(regs);
199 }
200}
201
185long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 202long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
186 unsigned long r5, unsigned long r6, unsigned long r7, 203 unsigned long r5, unsigned long r6, unsigned long r7,
187 unsigned long r8, struct pt_regs *regs) 204 unsigned long r8, struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index f5ae9fa222ea..5337ca7bb649 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -41,6 +41,7 @@
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/machdep.h> 43#include <asm/machdep.h>
44#include <asm/cputhreads.h>
44#include <asm/cputable.h> 45#include <asm/cputable.h>
45#include <asm/system.h> 46#include <asm/system.h>
46#include <asm/mpic.h> 47#include <asm/mpic.h>
@@ -62,10 +63,12 @@ struct thread_info *secondary_ti;
62cpumask_t cpu_possible_map = CPU_MASK_NONE; 63cpumask_t cpu_possible_map = CPU_MASK_NONE;
63cpumask_t cpu_online_map = CPU_MASK_NONE; 64cpumask_t cpu_online_map = CPU_MASK_NONE;
64DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 65DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
66DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
65 67
66EXPORT_SYMBOL(cpu_online_map); 68EXPORT_SYMBOL(cpu_online_map);
67EXPORT_SYMBOL(cpu_possible_map); 69EXPORT_SYMBOL(cpu_possible_map);
68EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 70EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71EXPORT_PER_CPU_SYMBOL(cpu_core_map);
69 72
70/* SMP operations for this machine */ 73/* SMP operations for this machine */
71struct smp_ops_t *smp_ops; 74struct smp_ops_t *smp_ops;
@@ -228,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void)
228 BUG_ON(smp_processor_id() != boot_cpuid); 231 BUG_ON(smp_processor_id() != boot_cpuid);
229 232
230 cpu_set(boot_cpuid, cpu_online_map); 233 cpu_set(boot_cpuid, cpu_online_map);
234 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
235 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
231#ifdef CONFIG_PPC64 236#ifdef CONFIG_PPC64
232 paca[boot_cpuid].__current = current; 237 paca[boot_cpuid].__current = current;
233#endif 238#endif
@@ -375,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
375 return 0; 380 return 0;
376} 381}
377 382
383/* Return the value of the reg property corresponding to the given
384 * logical cpu.
385 */
386int cpu_to_core_id(int cpu)
387{
388 struct device_node *np;
389 const int *reg;
390 int id = -1;
391
392 np = of_get_cpu_node(cpu, NULL);
393 if (!np)
394 goto out;
395
396 reg = of_get_property(np, "reg", NULL);
397 if (!reg)
398 goto out;
399
400 id = *reg;
401out:
402 of_node_put(np);
403 return id;
404}
405
406/* Must be called when no change can occur to cpu_present_map,
407 * i.e. during cpu online or offline.
408 */
409static struct device_node *cpu_to_l2cache(int cpu)
410{
411 struct device_node *np;
412 const phandle *php;
413 phandle ph;
414
415 if (!cpu_present(cpu))
416 return NULL;
417
418 np = of_get_cpu_node(cpu, NULL);
419 if (np == NULL)
420 return NULL;
421
422 php = of_get_property(np, "l2-cache", NULL);
423 if (php == NULL)
424 return NULL;
425 ph = *php;
426 of_node_put(np);
427
428 return of_find_node_by_phandle(ph);
429}
378 430
379/* Activate a secondary processor. */ 431/* Activate a secondary processor. */
380int __devinit start_secondary(void *unused) 432int __devinit start_secondary(void *unused)
381{ 433{
382 unsigned int cpu = smp_processor_id(); 434 unsigned int cpu = smp_processor_id();
435 struct device_node *l2_cache;
436 int i, base;
383 437
384 atomic_inc(&init_mm.mm_count); 438 atomic_inc(&init_mm.mm_count);
385 current->active_mm = &init_mm; 439 current->active_mm = &init_mm;
@@ -400,6 +454,33 @@ int __devinit start_secondary(void *unused)
400 454
401 ipi_call_lock(); 455 ipi_call_lock();
402 cpu_set(cpu, cpu_online_map); 456 cpu_set(cpu, cpu_online_map);
457 /* Update sibling maps */
458 base = cpu_first_thread_in_core(cpu);
459 for (i = 0; i < threads_per_core; i++) {
460 if (cpu_is_offline(base + i))
461 continue;
462 cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
463 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
464
465 /* cpu_core_map should be a superset of
466 * cpu_sibling_map even if we don't have cache
467 * information, so update the former here, too.
468 */
469 cpu_set(cpu, per_cpu(cpu_core_map, base +i));
470 cpu_set(base + i, per_cpu(cpu_core_map, cpu));
471 }
472 l2_cache = cpu_to_l2cache(cpu);
473 for_each_online_cpu(i) {
474 struct device_node *np = cpu_to_l2cache(i);
475 if (!np)
476 continue;
477 if (np == l2_cache) {
478 cpu_set(cpu, per_cpu(cpu_core_map, i));
479 cpu_set(i, per_cpu(cpu_core_map, cpu));
480 }
481 of_node_put(np);
482 }
483 of_node_put(l2_cache);
403 ipi_call_unlock(); 484 ipi_call_unlock();
404 485
405 local_irq_enable(); 486 local_irq_enable();
@@ -437,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus)
437#ifdef CONFIG_HOTPLUG_CPU 518#ifdef CONFIG_HOTPLUG_CPU
438int __cpu_disable(void) 519int __cpu_disable(void)
439{ 520{
440 if (smp_ops->cpu_disable) 521 struct device_node *l2_cache;
441 return smp_ops->cpu_disable(); 522 int cpu = smp_processor_id();
523 int base, i;
524 int err;
442 525
443 return -ENOSYS; 526 if (!smp_ops->cpu_disable)
527 return -ENOSYS;
528
529 err = smp_ops->cpu_disable();
530 if (err)
531 return err;
532
533 /* Update sibling maps */
534 base = cpu_first_thread_in_core(cpu);
535 for (i = 0; i < threads_per_core; i++) {
536 cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
537 cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
538 cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
539 cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
540 }
541
542 l2_cache = cpu_to_l2cache(cpu);
543 for_each_present_cpu(i) {
544 struct device_node *np = cpu_to_l2cache(i);
545 if (!np)
546 continue;
547 if (np == l2_cache) {
548 cpu_clear(cpu, per_cpu(cpu_core_map, i));
549 cpu_clear(i, per_cpu(cpu_core_map, cpu));
550 }
551 of_node_put(np);
552 }
553 of_node_put(l2_cache);
554
555
556 return 0;
444} 557}
445 558
446void __cpu_die(unsigned int cpu) 559void __cpu_die(unsigned int cpu)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index f2589645870a..b0dbb1daa4df 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/stacktrace.h> 15#include <linux/stacktrace.h>
16#include <linux/module.h>
17#include <asm/ptrace.h> 16#include <asm/ptrace.h>
18#include <asm/processor.h> 17#include <asm/processor.h>
19 18
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 800e5e9a087b..56d172d16e56 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -22,6 +22,8 @@
22 22
23static DEFINE_PER_CPU(struct cpu, cpu_devices); 23static DEFINE_PER_CPU(struct cpu, cpu_devices);
24 24
25static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
26
25/* SMT stuff */ 27/* SMT stuff */
26 28
27#ifdef CONFIG_PPC_MULTIPLATFORM 29#ifdef CONFIG_PPC_MULTIPLATFORM
@@ -297,8 +299,289 @@ static struct sysdev_attribute pa6t_attrs[] = {
297#endif /* CONFIG_DEBUG_KERNEL */ 299#endif /* CONFIG_DEBUG_KERNEL */
298}; 300};
299 301
302struct cache_desc {
303 struct kobject kobj;
304 struct cache_desc *next;
305 const char *type; /* Instruction, Data, or Unified */
306 u32 size; /* total cache size in KB */
307 u32 line_size; /* in bytes */
308 u32 nr_sets; /* number of sets */
309 u32 level; /* e.g. 1, 2, 3... */
310 u32 associativity; /* e.g. 8-way... 0 is fully associative */
311};
312
313DEFINE_PER_CPU(struct cache_desc *, cache_desc);
314
315static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
316{
317 return container_of(k, struct cache_desc, kobj);
318}
319
320static void cache_desc_release(struct kobject *k)
321{
322 struct cache_desc *desc = kobj_to_cache_desc(k);
323
324 pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
325
326 if (desc->next)
327 kobject_put(&desc->next->kobj);
328
329 kfree(kobj_to_cache_desc(k));
330}
331
332static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
333{
334 struct kobj_attribute *kobj_attr;
335
336 kobj_attr = container_of(attr, struct kobj_attribute, attr);
337
338 return kobj_attr->show(k, kobj_attr, buf);
339}
340
341static struct sysfs_ops cache_desc_sysfs_ops = {
342 .show = cache_desc_show,
343};
344
345static struct kobj_type cache_desc_type = {
346 .release = cache_desc_release,
347 .sysfs_ops = &cache_desc_sysfs_ops,
348};
349
350static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
351{
352 struct cache_desc *cache = kobj_to_cache_desc(k);
353
354 return sprintf(buf, "%uK\n", cache->size);
355}
356
357static struct kobj_attribute cache_size_attr =
358 __ATTR(size, 0444, cache_size_show, NULL);
359
360static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
361{
362 struct cache_desc *cache = kobj_to_cache_desc(k);
363
364 return sprintf(buf, "%u\n", cache->line_size);
365}
366
367static struct kobj_attribute cache_line_size_attr =
368 __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
369
370static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
371{
372 struct cache_desc *cache = kobj_to_cache_desc(k);
373
374 return sprintf(buf, "%u\n", cache->nr_sets);
375}
376
377static struct kobj_attribute cache_nr_sets_attr =
378 __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
379
380static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
381{
382 struct cache_desc *cache = kobj_to_cache_desc(k);
383
384 return sprintf(buf, "%s\n", cache->type);
385}
386
387static struct kobj_attribute cache_type_attr =
388 __ATTR(type, 0444, cache_type_show, NULL);
389
390static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
391{
392 struct cache_desc *cache = kobj_to_cache_desc(k);
393
394 return sprintf(buf, "%u\n", cache->level);
395}
396
397static struct kobj_attribute cache_level_attr =
398 __ATTR(level, 0444, cache_level_show, NULL);
399
400static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
401{
402 struct cache_desc *cache = kobj_to_cache_desc(k);
403
404 return sprintf(buf, "%u\n", cache->associativity);
405}
406
407static struct kobj_attribute cache_assoc_attr =
408 __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
409
410struct cache_desc_info {
411 const char *type;
412 const char *size_prop;
413 const char *line_size_prop;
414 const char *nr_sets_prop;
415};
416
417/* PowerPC Processor binding says the [di]-cache-* must be equal on
418 * unified caches, so just use d-cache properties. */
419static struct cache_desc_info ucache_info = {
420 .type = "Unified",
421 .size_prop = "d-cache-size",
422 .line_size_prop = "d-cache-line-size",
423 .nr_sets_prop = "d-cache-sets",
424};
300 425
301static void register_cpu_online(unsigned int cpu) 426static struct cache_desc_info dcache_info = {
427 .type = "Data",
428 .size_prop = "d-cache-size",
429 .line_size_prop = "d-cache-line-size",
430 .nr_sets_prop = "d-cache-sets",
431};
432
433static struct cache_desc_info icache_info = {
434 .type = "Instruction",
435 .size_prop = "i-cache-size",
436 .line_size_prop = "i-cache-line-size",
437 .nr_sets_prop = "i-cache-sets",
438};
439
440static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
441{
442 const u32 *cache_line_size;
443 struct cache_desc *new;
444 const u32 *cache_size;
445 const u32 *nr_sets;
446 int rc;
447
448 new = kzalloc(sizeof(*new), GFP_KERNEL);
449 if (!new)
450 return NULL;
451
452 rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
453 "index%d", index);
454 if (rc)
455 goto err;
456
457 /* type */
458 new->type = info->type;
459 rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
460 WARN_ON(rc);
461
462 /* level */
463 new->level = level;
464 rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
465 WARN_ON(rc);
466
467 /* size */
468 cache_size = of_get_property(np, info->size_prop, NULL);
469 if (cache_size) {
470 new->size = *cache_size / 1024;
471 rc = sysfs_create_file(&new->kobj,
472 &cache_size_attr.attr);
473 WARN_ON(rc);
474 }
475
476 /* coherency_line_size */
477 cache_line_size = of_get_property(np, info->line_size_prop, NULL);
478 if (cache_line_size) {
479 new->line_size = *cache_line_size;
480 rc = sysfs_create_file(&new->kobj,
481 &cache_line_size_attr.attr);
482 WARN_ON(rc);
483 }
484
485 /* number_of_sets */
486 nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
487 if (nr_sets) {
488 new->nr_sets = *nr_sets;
489 rc = sysfs_create_file(&new->kobj,
490 &cache_nr_sets_attr.attr);
491 WARN_ON(rc);
492 }
493
494 /* ways_of_associativity */
495 if (new->nr_sets == 1) {
496 /* fully associative */
497 new->associativity = 0;
498 goto create_assoc;
499 }
500
501 if (new->nr_sets && new->size && new->line_size) {
502 /* If we have values for all of these we can derive
503 * the associativity. */
504 new->associativity =
505 ((new->size * 1024) / new->nr_sets) / new->line_size;
506create_assoc:
507 rc = sysfs_create_file(&new->kobj,
508 &cache_assoc_attr.attr);
509 WARN_ON(rc);
510 }
511
512 return new;
513err:
514 kfree(new);
515 return NULL;
516}
517
518static bool cache_is_unified(struct device_node *np)
519{
520 return of_get_property(np, "cache-unified", NULL);
521}
522
523static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
524{
525 const phandle *next_cache_phandle;
526 struct device_node *next_cache;
527 struct cache_desc *new, **end;
528
529 pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
530
531 if (cache_is_unified(np)) {
532 new = create_cache_desc(np, parent, index, level,
533 &ucache_info);
534 } else {
535 new = create_cache_desc(np, parent, index, level,
536 &dcache_info);
537 if (new) {
538 index++;
539 new->next = create_cache_desc(np, parent, index, level,
540 &icache_info);
541 }
542 }
543 if (!new)
544 return NULL;
545
546 end = &new->next;
547 while (*end)
548 end = &(*end)->next;
549
550 next_cache_phandle = of_get_property(np, "l2-cache", NULL);
551 if (!next_cache_phandle)
552 goto out;
553
554 next_cache = of_find_node_by_phandle(*next_cache_phandle);
555 if (!next_cache)
556 goto out;
557
558 *end = create_cache_index_info(next_cache, parent, ++index, ++level);
559
560 of_node_put(next_cache);
561out:
562 return new;
563}
564
565static void __cpuinit create_cache_info(struct sys_device *sysdev)
566{
567 struct kobject *cache_toplevel;
568 struct device_node *np = NULL;
569 int cpu = sysdev->id;
570
571 cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
572 if (!cache_toplevel)
573 return;
574 per_cpu(cache_toplevel, cpu) = cache_toplevel;
575 np = of_get_cpu_node(cpu, NULL);
576 if (np != NULL) {
577 per_cpu(cache_desc, cpu) =
578 create_cache_index_info(np, cache_toplevel, 0, 1);
579 of_node_put(np);
580 }
581 return;
582}
583
584static void __cpuinit register_cpu_online(unsigned int cpu)
302{ 585{
303 struct cpu *c = &per_cpu(cpu_devices, cpu); 586 struct cpu *c = &per_cpu(cpu_devices, cpu);
304 struct sys_device *s = &c->sysdev; 587 struct sys_device *s = &c->sysdev;
@@ -346,9 +629,33 @@ static void register_cpu_online(unsigned int cpu)
346 629
347 if (cpu_has_feature(CPU_FTR_DSCR)) 630 if (cpu_has_feature(CPU_FTR_DSCR))
348 sysdev_create_file(s, &attr_dscr); 631 sysdev_create_file(s, &attr_dscr);
632
633 create_cache_info(s);
349} 634}
350 635
351#ifdef CONFIG_HOTPLUG_CPU 636#ifdef CONFIG_HOTPLUG_CPU
637static void remove_cache_info(struct sys_device *sysdev)
638{
639 struct kobject *cache_toplevel;
640 struct cache_desc *cache_desc;
641 int cpu = sysdev->id;
642
643 cache_desc = per_cpu(cache_desc, cpu);
644 if (cache_desc != NULL) {
645 sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr);
646 sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr);
647 sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr);
648 sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr);
649 sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr);
650 sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr);
651
652 kobject_put(&cache_desc->kobj);
653 }
654 cache_toplevel = per_cpu(cache_toplevel, cpu);
655 if (cache_toplevel != NULL)
656 kobject_put(cache_toplevel);
657}
658
352static void unregister_cpu_online(unsigned int cpu) 659static void unregister_cpu_online(unsigned int cpu)
353{ 660{
354 struct cpu *c = &per_cpu(cpu_devices, cpu); 661 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -399,6 +706,8 @@ static void unregister_cpu_online(unsigned int cpu)
399 706
400 if (cpu_has_feature(CPU_FTR_DSCR)) 707 if (cpu_has_feature(CPU_FTR_DSCR))
401 sysdev_remove_file(s, &attr_dscr); 708 sysdev_remove_file(s, &attr_dscr);
709
710 remove_cache_info(s);
402} 711}
403#endif /* CONFIG_HOTPLUG_CPU */ 712#endif /* CONFIG_HOTPLUG_CPU */
404 713
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index ade8aeaa2e70..22a3c33fd751 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -530,7 +530,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
530 } 530 }
531 531
532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs); 532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
533 if (unlikely(dma_mapping_error(ret))) { 533 if (unlikely(dma_mapping_error(dev, ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed); 535 atomic_inc(&viodev->cmo.allocs_failed);
536 } 536 }
@@ -1031,8 +1031,8 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1031static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1031static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1032static void vio_cmo_bus_remove(struct vio_dev *viodev) {} 1032static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1033static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} 1033static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1034static void vio_cmo_bus_init() {} 1034static void vio_cmo_bus_init(void) {}
1035static void vio_cmo_sysfs_init() { } 1035static void vio_cmo_sysfs_init(void) { }
1036#endif /* CONFIG_PPC_SMLPAR */ 1036#endif /* CONFIG_PPC_SMLPAR */
1037EXPORT_SYMBOL(vio_cmo_entitlement_update); 1037EXPORT_SYMBOL(vio_cmo_entitlement_update);
1038EXPORT_SYMBOL(vio_cmo_set_dev_desired); 1038EXPORT_SYMBOL(vio_cmo_set_dev_desired);