aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/entry_32.S17
-rw-r--r--arch/powerpc/kernel/entry_64.S10
-rw-r--r--arch/powerpc/kernel/legacy_serial.c44
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/prom_init.c39
-rw-r--r--arch/powerpc/kernel/ptrace.c54
-rw-r--r--arch/powerpc/kernel/setup-common.c24
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/signal.c23
-rw-r--r--arch/powerpc/kernel/smp.c119
-rw-r--r--arch/powerpc/kernel/stacktrace.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c311
-rw-r--r--arch/powerpc/kernel/vio.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c9
-rw-r--r--arch/powerpc/platforms/powermac/setup.c72
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c12
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c8
-rw-r--r--drivers/net/ibmveth.c8
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c88
-rw-r--r--drivers/of/of_i2c.c64
-rw-r--r--drivers/of/of_spi.c93
-rw-r--r--drivers/spi/spi.c139
-rw-r--r--include/asm-powerpc/pgtable-4k.h2
-rw-r--r--include/asm-powerpc/pgtable-64k.h2
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h3
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h4
-rw-r--r--include/asm-powerpc/ptrace.h1
-rw-r--r--include/asm-powerpc/signal.h3
-rw-r--r--include/asm-powerpc/smp.h2
-rw-r--r--include/asm-powerpc/syscall.h84
-rw-r--r--include/asm-powerpc/thread_info.h5
-rw-r--r--include/asm-powerpc/topology.h2
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/of_spi.h18
-rw-r--r--include/linux/spi/spi.h12
38 files changed, 1039 insertions, 260 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fe88418167c5..587da5e0990f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -117,6 +117,7 @@ config PPC
117 select HAVE_KPROBES 117 select HAVE_KPROBES
118 select HAVE_ARCH_KGDB 118 select HAVE_ARCH_KGDB
119 select HAVE_KRETPROBES 119 select HAVE_KRETPROBES
120 select HAVE_ARCH_TRACEHOOK
120 select HAVE_LMB 121 select HAVE_LMB
121 select HAVE_DMA_ATTRS if PPC64 122 select HAVE_DMA_ATTRS if PPC64
122 select USE_GENERIC_SMP_HELPERS if SMP 123 select USE_GENERIC_SMP_HELPERS if SMP
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 81c8324a4a3c..1cbbf7033641 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -148,7 +148,7 @@ transfer_to_handler:
148 /* Check to see if the dbcr0 register is set up to debug. Use the 148 /* Check to see if the dbcr0 register is set up to debug. Use the
149 internal debug mode bit to do this. */ 149 internal debug mode bit to do this. */
150 lwz r12,THREAD_DBCR0(r12) 150 lwz r12,THREAD_DBCR0(r12)
151 andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 151 andis. r12,r12,DBCR0_IDM@h
152 beq+ 3f 152 beq+ 3f
153 /* From user and task is ptraced - load up global dbcr0 */ 153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */ 154 li r12,-1 /* clear all pending debug events */
@@ -292,7 +292,7 @@ syscall_exit_cont:
292 /* If the process has its own DBCR0 value, load it up. The internal 292 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */ 293 debug mode bit tells us that dbcr0 should be loaded. */
294 lwz r0,THREAD+THREAD_DBCR0(r2) 294 lwz r0,THREAD+THREAD_DBCR0(r2)
295 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 295 andis. r10,r0,DBCR0_IDM@h
296 bnel- load_dbcr0 296 bnel- load_dbcr0
297#endif 297#endif
298#ifdef CONFIG_44x 298#ifdef CONFIG_44x
@@ -343,7 +343,12 @@ syscall_dotrace:
343 stw r0,_TRAP(r1) 343 stw r0,_TRAP(r1)
344 addi r3,r1,STACK_FRAME_OVERHEAD 344 addi r3,r1,STACK_FRAME_OVERHEAD
345 bl do_syscall_trace_enter 345 bl do_syscall_trace_enter
346 lwz r0,GPR0(r1) /* Restore original registers */ 346 /*
347 * Restore argument registers possibly just changed.
348 * We use the return value of do_syscall_trace_enter
349 * for call number to look up in the table (r0).
350 */
351 mr r0,r3
347 lwz r3,GPR3(r1) 352 lwz r3,GPR3(r1)
348 lwz r4,GPR4(r1) 353 lwz r4,GPR4(r1)
349 lwz r5,GPR5(r1) 354 lwz r5,GPR5(r1)
@@ -720,7 +725,7 @@ restore_user:
720 /* Check whether this process has its own DBCR0 value. The internal 725 /* Check whether this process has its own DBCR0 value. The internal
721 debug mode bit tells us that dbcr0 should be loaded. */ 726 debug mode bit tells us that dbcr0 should be loaded. */
722 lwz r0,THREAD+THREAD_DBCR0(r2) 727 lwz r0,THREAD+THREAD_DBCR0(r2)
723 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h 728 andis. r10,r0,DBCR0_IDM@h
724 bnel- load_dbcr0 729 bnel- load_dbcr0
725#endif 730#endif
726 731
@@ -1055,8 +1060,8 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
1055 SAVE_NVGPRS(r1) 1060 SAVE_NVGPRS(r1)
1056 rlwinm r3,r3,0,0,30 1061 rlwinm r3,r3,0,0,30
1057 stw r3,_TRAP(r1) 1062 stw r3,_TRAP(r1)
10582: li r3,0 10632: addi r3,r1,STACK_FRAME_OVERHEAD
1059 addi r4,r1,STACK_FRAME_OVERHEAD 1064 mr r4,r9
1060 bl do_signal 1065 bl do_signal
1061 REST_NVGPRS(r1) 1066 REST_NVGPRS(r1)
1062 b recheck 1067 b recheck
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d7369243ae44..2d802e97097c 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -214,7 +214,12 @@ syscall_dotrace:
214 bl .save_nvgprs 214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD 215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter 216 bl .do_syscall_trace_enter
217 ld r0,GPR0(r1) /* Restore original registers */ 217 /*
218 * Restore argument registers possibly just changed.
219 * We use the return value of do_syscall_trace_enter
220 * for the call number to look up in the table (r0).
221 */
222 mr r0,r3
218 ld r3,GPR3(r1) 223 ld r3,GPR3(r1)
219 ld r4,GPR4(r1) 224 ld r4,GPR4(r1)
220 ld r5,GPR5(r1) 225 ld r5,GPR5(r1)
@@ -638,8 +643,7 @@ user_work:
638 b .ret_from_except_lite 643 b .ret_from_except_lite
639 644
6401: bl .save_nvgprs 6451: bl .save_nvgprs
641 li r3,0 646 addi r3,r1,STACK_FRAME_OVERHEAD
642 addi r4,r1,STACK_FRAME_OVERHEAD
643 bl .do_signal 647 bl .do_signal
644 b .ret_from_except 648 b .ret_from_except
645 649
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 4d96e1db55ee..9ddfaef1a184 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -493,18 +493,18 @@ static int __init serial_dev_init(void)
493device_initcall(serial_dev_init); 493device_initcall(serial_dev_init);
494 494
495 495
496#ifdef CONFIG_SERIAL_8250_CONSOLE
496/* 497/*
497 * This is called very early, as part of console_init() (typically just after 498 * This is called very early, as part of console_init() (typically just after
498 * time_init()). This function is respondible for trying to find a good 499 * time_init()). This function is respondible for trying to find a good
499 * default console on serial ports. It tries to match the open firmware 500 * default console on serial ports. It tries to match the open firmware
500 * default output with one of the available serial console drivers, either 501 * default output with one of the available serial console drivers that have
501 * one of the platform serial ports that have been probed earlier by 502 * been probed earlier by find_legacy_serial_ports()
502 * find_legacy_serial_ports() or some more platform specific ones.
503 */ 503 */
504static int __init check_legacy_serial_console(void) 504static int __init check_legacy_serial_console(void)
505{ 505{
506 struct device_node *prom_stdout = NULL; 506 struct device_node *prom_stdout = NULL;
507 int speed = 0, offset = 0; 507 int i, speed = 0, offset = 0;
508 const char *name; 508 const char *name;
509 const u32 *spd; 509 const u32 *spd;
510 510
@@ -548,31 +548,20 @@ static int __init check_legacy_serial_console(void)
548 if (spd) 548 if (spd)
549 speed = *spd; 549 speed = *spd;
550 550
551 if (0) 551 if (strcmp(name, "serial") != 0)
552 ; 552 goto not_found;
553#ifdef CONFIG_SERIAL_8250_CONSOLE 553
554 else if (strcmp(name, "serial") == 0) { 554 /* Look for it in probed array */
555 int i; 555 for (i = 0; i < legacy_serial_count; i++) {
556 /* Look for it in probed array */ 556 if (prom_stdout != legacy_serial_infos[i].np)
557 for (i = 0; i < legacy_serial_count; i++) { 557 continue;
558 if (prom_stdout != legacy_serial_infos[i].np) 558 offset = i;
559 continue; 559 speed = legacy_serial_infos[i].speed;
560 offset = i; 560 break;
561 speed = legacy_serial_infos[i].speed;
562 break;
563 }
564 if (i >= legacy_serial_count)
565 goto not_found;
566 } 561 }
567#endif /* CONFIG_SERIAL_8250_CONSOLE */ 562 if (i >= legacy_serial_count)
568#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
569 else if (strcmp(name, "ch-a") == 0)
570 offset = 0;
571 else if (strcmp(name, "ch-b") == 0)
572 offset = 1;
573#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
574 else
575 goto not_found; 563 goto not_found;
564
576 of_node_put(prom_stdout); 565 of_node_put(prom_stdout);
577 566
578 DBG("Found serial console at ttyS%d\n", offset); 567 DBG("Found serial console at ttyS%d\n", offset);
@@ -591,3 +580,4 @@ static int __init check_legacy_serial_console(void)
591} 580}
592console_initcall(check_legacy_serial_console); 581console_initcall(check_legacy_serial_console);
593 582
583#endif /* CONFIG_SERIAL_8250_CONSOLE */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index db2497ccc111..e030f3bd5024 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -254,7 +254,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
254 return; 254 return;
255 255
256 /* Clear the DAC and struct entries. One shot trigger */ 256 /* Clear the DAC and struct entries. One shot trigger */
257#if (defined(CONFIG_44x) || defined(CONFIG_BOOKE)) 257#if defined(CONFIG_BOOKE)
258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W 258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
259 | DBCR0_IDM)); 259 | DBCR0_IDM));
260#endif 260#endif
@@ -286,7 +286,7 @@ int set_dabr(unsigned long dabr)
286 mtspr(SPRN_DABR, dabr); 286 mtspr(SPRN_DABR, dabr);
287#endif 287#endif
288 288
289#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 289#if defined(CONFIG_BOOKE)
290 mtspr(SPRN_DAC1, dabr); 290 mtspr(SPRN_DAC1, dabr);
291#endif 291#endif
292 292
@@ -373,7 +373,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
374 set_dabr(new->thread.dabr); 374 set_dabr(new->thread.dabr);
375 375
376#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 376#if defined(CONFIG_BOOKE)
377 /* If new thread DAC (HW breakpoint) is the same then leave it */ 377 /* If new thread DAC (HW breakpoint) is the same then leave it */
378 if (new->thread.dabr) 378 if (new->thread.dabr)
379 set_dabr(new->thread.dabr); 379 set_dabr(new->thread.dabr);
@@ -568,7 +568,7 @@ void flush_thread(void)
568 current->thread.dabr = 0; 568 current->thread.dabr = 0;
569 set_dabr(0); 569 set_dabr(0);
570 570
571#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 571#if defined(CONFIG_BOOKE)
572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); 572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
573#endif 573#endif
574 } 574 }
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index c4ab2195b9cb..b72849ac7db3 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -205,8 +205,6 @@ static int __initdata mem_reserve_cnt;
205static cell_t __initdata regbuf[1024]; 205static cell_t __initdata regbuf[1024];
206 206
207 207
208#define MAX_CPU_THREADS 2
209
210/* 208/*
211 * Error results ... some OF calls will return "-1" on error, some 209 * Error results ... some OF calls will return "-1" on error, some
212 * will return 0, some will return either. To simplify, here are 210 * will return 0, some will return either. To simplify, here are
@@ -1339,10 +1337,6 @@ static void __init prom_hold_cpus(void)
1339 unsigned int reg; 1337 unsigned int reg;
1340 phandle node; 1338 phandle node;
1341 char type[64]; 1339 char type[64];
1342 int cpuid = 0;
1343 unsigned int interrupt_server[MAX_CPU_THREADS];
1344 unsigned int cpu_threads, hw_cpu_num;
1345 int propsize;
1346 struct prom_t *_prom = &RELOC(prom); 1340 struct prom_t *_prom = &RELOC(prom);
1347 unsigned long *spinloop 1341 unsigned long *spinloop
1348 = (void *) LOW_ADDR(__secondary_hold_spinloop); 1342 = (void *) LOW_ADDR(__secondary_hold_spinloop);
@@ -1386,7 +1380,6 @@ static void __init prom_hold_cpus(void)
1386 reg = -1; 1380 reg = -1;
1387 prom_getprop(node, "reg", &reg, sizeof(reg)); 1381 prom_getprop(node, "reg", &reg, sizeof(reg));
1388 1382
1389 prom_debug("\ncpuid = 0x%x\n", cpuid);
1390 prom_debug("cpu hw idx = 0x%x\n", reg); 1383 prom_debug("cpu hw idx = 0x%x\n", reg);
1391 1384
1392 /* Init the acknowledge var which will be reset by 1385 /* Init the acknowledge var which will be reset by
@@ -1395,28 +1388,9 @@ static void __init prom_hold_cpus(void)
1395 */ 1388 */
1396 *acknowledge = (unsigned long)-1; 1389 *acknowledge = (unsigned long)-1;
1397 1390
1398 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s", 1391 if (reg != _prom->cpu) {
1399 &interrupt_server,
1400 sizeof(interrupt_server));
1401 if (propsize < 0) {
1402 /* no property. old hardware has no SMT */
1403 cpu_threads = 1;
1404 interrupt_server[0] = reg; /* fake it with phys id */
1405 } else {
1406 /* We have a threaded processor */
1407 cpu_threads = propsize / sizeof(u32);
1408 if (cpu_threads > MAX_CPU_THREADS) {
1409 prom_printf("SMT: too many threads!\n"
1410 "SMT: found %x, max is %x\n",
1411 cpu_threads, MAX_CPU_THREADS);
1412 cpu_threads = 1; /* ToDo: panic? */
1413 }
1414 }
1415
1416 hw_cpu_num = interrupt_server[0];
1417 if (hw_cpu_num != _prom->cpu) {
1418 /* Primary Thread of non-boot cpu */ 1392 /* Primary Thread of non-boot cpu */
1419 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg); 1393 prom_printf("starting cpu hw idx %x... ", reg);
1420 call_prom("start-cpu", 3, 0, node, 1394 call_prom("start-cpu", 3, 0, node,
1421 secondary_hold, reg); 1395 secondary_hold, reg);
1422 1396
@@ -1431,17 +1405,10 @@ static void __init prom_hold_cpus(void)
1431 } 1405 }
1432#ifdef CONFIG_SMP 1406#ifdef CONFIG_SMP
1433 else 1407 else
1434 prom_printf("%x : boot cpu %x\n", cpuid, reg); 1408 prom_printf("boot cpu hw idx %x\n", reg);
1435#endif /* CONFIG_SMP */ 1409#endif /* CONFIG_SMP */
1436
1437 /* Reserve cpu #s for secondary threads. They start later. */
1438 cpuid += cpu_threads;
1439 } 1410 }
1440 1411
1441 if (cpuid > NR_CPUS)
1442 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1443 ") exceeded: ignoring extras\n");
1444
1445 prom_debug("prom_hold_cpus: end...\n"); 1412 prom_debug("prom_hold_cpus: end...\n");
1446} 1413}
1447 1414
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a5d0e78779c8..6b66cd85b433 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ptrace.h> 23#include <linux/ptrace.h>
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/tracehook.h>
25#include <linux/elf.h> 26#include <linux/elf.h>
26#include <linux/user.h> 27#include <linux/user.h>
27#include <linux/security.h> 28#include <linux/security.h>
@@ -717,7 +718,7 @@ void user_disable_single_step(struct task_struct *task)
717 struct pt_regs *regs = task->thread.regs; 718 struct pt_regs *regs = task->thread.regs;
718 719
719 720
720#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 721#if defined(CONFIG_BOOKE)
721 /* If DAC then do not single step, skip */ 722 /* If DAC then do not single step, skip */
722 if (task->thread.dabr) 723 if (task->thread.dabr)
723 return; 724 return;
@@ -744,10 +745,11 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
744 if (addr > 0) 745 if (addr > 0)
745 return -EINVAL; 746 return -EINVAL;
746 747
748 /* The bottom 3 bits in dabr are flags */
747 if ((data & ~0x7UL) >= TASK_SIZE) 749 if ((data & ~0x7UL) >= TASK_SIZE)
748 return -EIO; 750 return -EIO;
749 751
750#ifdef CONFIG_PPC64 752#ifndef CONFIG_BOOKE
751 753
752 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. 754 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
753 * It was assumed, on previous implementations, that 3 bits were 755 * It was assumed, on previous implementations, that 3 bits were
@@ -769,7 +771,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
769 task->thread.dabr = data; 771 task->thread.dabr = data;
770 772
771#endif 773#endif
772#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 774#if defined(CONFIG_BOOKE)
773 775
774 /* As described above, it was assumed 3 bits were passed with the data 776 /* As described above, it was assumed 3 bits were passed with the data
775 * address, but we will assume only the mode bits will be passed 777 * address, but we will assume only the mode bits will be passed
@@ -1013,31 +1015,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1013 return ret; 1015 return ret;
1014} 1016}
1015 1017
1016static void do_syscall_trace(void) 1018/*
1019 * We must return the syscall number to actually look up in the table.
1020 * This can be -1L to skip running any syscall at all.
1021 */
1022long do_syscall_trace_enter(struct pt_regs *regs)
1017{ 1023{
1018 /* the 0x80 provides a way for the tracing parent to distinguish 1024 long ret = 0;
1019 between a syscall stop and SIGTRAP delivery */
1020 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1021 ? 0x80 : 0));
1022
1023 /*
1024 * this isn't the same as continuing with a signal, but it will do
1025 * for normal use. strace only continues with a signal if the
1026 * stopping signal is not SIGTRAP. -brl
1027 */
1028 if (current->exit_code) {
1029 send_sig(current->exit_code, current, 1);
1030 current->exit_code = 0;
1031 }
1032}
1033 1025
1034void do_syscall_trace_enter(struct pt_regs *regs)
1035{
1036 secure_computing(regs->gpr[0]); 1026 secure_computing(regs->gpr[0]);
1037 1027
1038 if (test_thread_flag(TIF_SYSCALL_TRACE) 1028 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
1039 && (current->ptrace & PT_PTRACED)) 1029 tracehook_report_syscall_entry(regs))
1040 do_syscall_trace(); 1030 /*
1031 * Tracing decided this syscall should not happen.
1032 * We'll return a bogus call number to get an ENOSYS
1033 * error, but leave the original number in regs->gpr[0].
1034 */
1035 ret = -1L;
1041 1036
1042 if (unlikely(current->audit_context)) { 1037 if (unlikely(current->audit_context)) {
1043#ifdef CONFIG_PPC64 1038#ifdef CONFIG_PPC64
@@ -1055,16 +1050,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
1055 regs->gpr[5] & 0xffffffff, 1050 regs->gpr[5] & 0xffffffff,
1056 regs->gpr[6] & 0xffffffff); 1051 regs->gpr[6] & 0xffffffff);
1057 } 1052 }
1053
1054 return ret ?: regs->gpr[0];
1058} 1055}
1059 1056
1060void do_syscall_trace_leave(struct pt_regs *regs) 1057void do_syscall_trace_leave(struct pt_regs *regs)
1061{ 1058{
1059 int step;
1060
1062 if (unlikely(current->audit_context)) 1061 if (unlikely(current->audit_context))
1063 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 1062 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
1064 regs->result); 1063 regs->result);
1065 1064
1066 if ((test_thread_flag(TIF_SYSCALL_TRACE) 1065 step = test_thread_flag(TIF_SINGLESTEP);
1067 || test_thread_flag(TIF_SINGLESTEP)) 1066 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1068 && (current->ptrace & PT_PTRACED)) 1067 tracehook_report_syscall_exit(regs, step);
1069 do_syscall_trace();
1070} 1068}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 61a3f4132087..9cc5a52711e5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc)
367 * setup_cpu_maps - initialize the following cpu maps: 367 * setup_cpu_maps - initialize the following cpu maps:
368 * cpu_possible_map 368 * cpu_possible_map
369 * cpu_present_map 369 * cpu_present_map
370 * cpu_sibling_map
371 * 370 *
372 * Having the possible map set up early allows us to restrict allocations 371 * Having the possible map set up early allows us to restrict allocations
373 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 372 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
@@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void)
475 */ 474 */
476 cpu_init_thread_core_maps(nthreads); 475 cpu_init_thread_core_maps(nthreads);
477} 476}
478
479/*
480 * Being that cpu_sibling_map is now a per_cpu array, then it cannot
481 * be initialized until the per_cpu areas have been created. This
482 * function is now called from setup_per_cpu_areas().
483 */
484void __init smp_setup_cpu_sibling_map(void)
485{
486#ifdef CONFIG_PPC64
487 int i, cpu, base;
488
489 for_each_possible_cpu(cpu) {
490 DBG("Sibling map for CPU %d:", cpu);
491 base = cpu_first_thread_in_core(cpu);
492 for (i = 0; i < threads_per_core; i++) {
493 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
494 DBG(" %d", base + i);
495 }
496 DBG("\n");
497 }
498
499#endif /* CONFIG_PPC64 */
500}
501#endif /* CONFIG_SMP */ 477#endif /* CONFIG_SMP */
502 478
503#ifdef CONFIG_PCSPKR_PLATFORM 479#ifdef CONFIG_PCSPKR_PLATFORM
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04d8de9f0fc6..8b25f51f03bf 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void)
611 paca[i].data_offset = ptr - __per_cpu_start; 611 paca[i].data_offset = ptr - __per_cpu_start;
612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
613 } 613 }
614
615 /* Now that per_cpu is setup, initialize cpu_sibling_map */
616 smp_setup_cpu_sibling_map();
617} 614}
618#endif 615#endif
619 616
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 7aada783ec6a..a54405ebd7b0 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -9,7 +9,7 @@
9 * this archive for more details. 9 * this archive for more details.
10 */ 10 */
11 11
12#include <linux/ptrace.h> 12#include <linux/tracehook.h>
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/unistd.h> 15#include <asm/unistd.h>
@@ -112,7 +112,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
112 } 112 }
113} 113}
114 114
115int do_signal(sigset_t *oldset, struct pt_regs *regs) 115static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
116{ 116{
117 siginfo_t info; 117 siginfo_t info;
118 int signr; 118 int signr;
@@ -147,7 +147,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
147 */ 147 */
148 if (current->thread.dabr) { 148 if (current->thread.dabr) {
149 set_dabr(current->thread.dabr); 149 set_dabr(current->thread.dabr);
150#if defined(CONFIG_44x) || defined(CONFIG_BOOKE) 150#if defined(CONFIG_BOOKE)
151 mtspr(SPRN_DBCR0, current->thread.dbcr0); 151 mtspr(SPRN_DBCR0, current->thread.dbcr0);
152#endif 152#endif
153 } 153 }
@@ -177,11 +177,28 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
177 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. 177 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
178 */ 178 */
179 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; 179 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
180
181 /*
182 * Let tracing know that we've done the handler setup.
183 */
184 tracehook_signal_handler(signr, &info, &ka, regs,
185 test_thread_flag(TIF_SINGLESTEP));
180 } 186 }
181 187
182 return ret; 188 return ret;
183} 189}
184 190
191void do_signal(struct pt_regs *regs, unsigned long thread_info_flags)
192{
193 if (thread_info_flags & _TIF_SIGPENDING)
194 do_signal_pending(NULL, regs);
195
196 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
197 clear_thread_flag(TIF_NOTIFY_RESUME);
198 tracehook_notify_resume(regs);
199 }
200}
201
185long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 202long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
186 unsigned long r5, unsigned long r6, unsigned long r7, 203 unsigned long r5, unsigned long r6, unsigned long r7,
187 unsigned long r8, struct pt_regs *regs) 204 unsigned long r8, struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index f5ae9fa222ea..5337ca7bb649 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -41,6 +41,7 @@
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/machdep.h> 43#include <asm/machdep.h>
44#include <asm/cputhreads.h>
44#include <asm/cputable.h> 45#include <asm/cputable.h>
45#include <asm/system.h> 46#include <asm/system.h>
46#include <asm/mpic.h> 47#include <asm/mpic.h>
@@ -62,10 +63,12 @@ struct thread_info *secondary_ti;
62cpumask_t cpu_possible_map = CPU_MASK_NONE; 63cpumask_t cpu_possible_map = CPU_MASK_NONE;
63cpumask_t cpu_online_map = CPU_MASK_NONE; 64cpumask_t cpu_online_map = CPU_MASK_NONE;
64DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 65DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
66DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
65 67
66EXPORT_SYMBOL(cpu_online_map); 68EXPORT_SYMBOL(cpu_online_map);
67EXPORT_SYMBOL(cpu_possible_map); 69EXPORT_SYMBOL(cpu_possible_map);
68EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 70EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71EXPORT_PER_CPU_SYMBOL(cpu_core_map);
69 72
70/* SMP operations for this machine */ 73/* SMP operations for this machine */
71struct smp_ops_t *smp_ops; 74struct smp_ops_t *smp_ops;
@@ -228,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void)
228 BUG_ON(smp_processor_id() != boot_cpuid); 231 BUG_ON(smp_processor_id() != boot_cpuid);
229 232
230 cpu_set(boot_cpuid, cpu_online_map); 233 cpu_set(boot_cpuid, cpu_online_map);
234 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
235 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
231#ifdef CONFIG_PPC64 236#ifdef CONFIG_PPC64
232 paca[boot_cpuid].__current = current; 237 paca[boot_cpuid].__current = current;
233#endif 238#endif
@@ -375,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
375 return 0; 380 return 0;
376} 381}
377 382
383/* Return the value of the reg property corresponding to the given
384 * logical cpu.
385 */
386int cpu_to_core_id(int cpu)
387{
388 struct device_node *np;
389 const int *reg;
390 int id = -1;
391
392 np = of_get_cpu_node(cpu, NULL);
393 if (!np)
394 goto out;
395
396 reg = of_get_property(np, "reg", NULL);
397 if (!reg)
398 goto out;
399
400 id = *reg;
401out:
402 of_node_put(np);
403 return id;
404}
405
406/* Must be called when no change can occur to cpu_present_map,
407 * i.e. during cpu online or offline.
408 */
409static struct device_node *cpu_to_l2cache(int cpu)
410{
411 struct device_node *np;
412 const phandle *php;
413 phandle ph;
414
415 if (!cpu_present(cpu))
416 return NULL;
417
418 np = of_get_cpu_node(cpu, NULL);
419 if (np == NULL)
420 return NULL;
421
422 php = of_get_property(np, "l2-cache", NULL);
423 if (php == NULL)
424 return NULL;
425 ph = *php;
426 of_node_put(np);
427
428 return of_find_node_by_phandle(ph);
429}
378 430
379/* Activate a secondary processor. */ 431/* Activate a secondary processor. */
380int __devinit start_secondary(void *unused) 432int __devinit start_secondary(void *unused)
381{ 433{
382 unsigned int cpu = smp_processor_id(); 434 unsigned int cpu = smp_processor_id();
435 struct device_node *l2_cache;
436 int i, base;
383 437
384 atomic_inc(&init_mm.mm_count); 438 atomic_inc(&init_mm.mm_count);
385 current->active_mm = &init_mm; 439 current->active_mm = &init_mm;
@@ -400,6 +454,33 @@ int __devinit start_secondary(void *unused)
400 454
401 ipi_call_lock(); 455 ipi_call_lock();
402 cpu_set(cpu, cpu_online_map); 456 cpu_set(cpu, cpu_online_map);
457 /* Update sibling maps */
458 base = cpu_first_thread_in_core(cpu);
459 for (i = 0; i < threads_per_core; i++) {
460 if (cpu_is_offline(base + i))
461 continue;
462 cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
463 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
464
465 /* cpu_core_map should be a superset of
466 * cpu_sibling_map even if we don't have cache
467 * information, so update the former here, too.
468 */
469 cpu_set(cpu, per_cpu(cpu_core_map, base +i));
470 cpu_set(base + i, per_cpu(cpu_core_map, cpu));
471 }
472 l2_cache = cpu_to_l2cache(cpu);
473 for_each_online_cpu(i) {
474 struct device_node *np = cpu_to_l2cache(i);
475 if (!np)
476 continue;
477 if (np == l2_cache) {
478 cpu_set(cpu, per_cpu(cpu_core_map, i));
479 cpu_set(i, per_cpu(cpu_core_map, cpu));
480 }
481 of_node_put(np);
482 }
483 of_node_put(l2_cache);
403 ipi_call_unlock(); 484 ipi_call_unlock();
404 485
405 local_irq_enable(); 486 local_irq_enable();
@@ -437,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus)
437#ifdef CONFIG_HOTPLUG_CPU 518#ifdef CONFIG_HOTPLUG_CPU
438int __cpu_disable(void) 519int __cpu_disable(void)
439{ 520{
440 if (smp_ops->cpu_disable) 521 struct device_node *l2_cache;
441 return smp_ops->cpu_disable(); 522 int cpu = smp_processor_id();
523 int base, i;
524 int err;
442 525
443 return -ENOSYS; 526 if (!smp_ops->cpu_disable)
527 return -ENOSYS;
528
529 err = smp_ops->cpu_disable();
530 if (err)
531 return err;
532
533 /* Update sibling maps */
534 base = cpu_first_thread_in_core(cpu);
535 for (i = 0; i < threads_per_core; i++) {
536 cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
537 cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
538 cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
539 cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
540 }
541
542 l2_cache = cpu_to_l2cache(cpu);
543 for_each_present_cpu(i) {
544 struct device_node *np = cpu_to_l2cache(i);
545 if (!np)
546 continue;
547 if (np == l2_cache) {
548 cpu_clear(cpu, per_cpu(cpu_core_map, i));
549 cpu_clear(i, per_cpu(cpu_core_map, cpu));
550 }
551 of_node_put(np);
552 }
553 of_node_put(l2_cache);
554
555
556 return 0;
444} 557}
445 558
446void __cpu_die(unsigned int cpu) 559void __cpu_die(unsigned int cpu)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index f2589645870a..b0dbb1daa4df 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/stacktrace.h> 15#include <linux/stacktrace.h>
16#include <linux/module.h>
17#include <asm/ptrace.h> 16#include <asm/ptrace.h>
18#include <asm/processor.h> 17#include <asm/processor.h>
19 18
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 800e5e9a087b..56d172d16e56 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -22,6 +22,8 @@
22 22
23static DEFINE_PER_CPU(struct cpu, cpu_devices); 23static DEFINE_PER_CPU(struct cpu, cpu_devices);
24 24
25static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
26
25/* SMT stuff */ 27/* SMT stuff */
26 28
27#ifdef CONFIG_PPC_MULTIPLATFORM 29#ifdef CONFIG_PPC_MULTIPLATFORM
@@ -297,8 +299,289 @@ static struct sysdev_attribute pa6t_attrs[] = {
297#endif /* CONFIG_DEBUG_KERNEL */ 299#endif /* CONFIG_DEBUG_KERNEL */
298}; 300};
299 301
302struct cache_desc {
303 struct kobject kobj;
304 struct cache_desc *next;
305 const char *type; /* Instruction, Data, or Unified */
306 u32 size; /* total cache size in KB */
307 u32 line_size; /* in bytes */
308 u32 nr_sets; /* number of sets */
309 u32 level; /* e.g. 1, 2, 3... */
310 u32 associativity; /* e.g. 8-way... 0 is fully associative */
311};
312
313DEFINE_PER_CPU(struct cache_desc *, cache_desc);
314
315static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
316{
317 return container_of(k, struct cache_desc, kobj);
318}
319
320static void cache_desc_release(struct kobject *k)
321{
322 struct cache_desc *desc = kobj_to_cache_desc(k);
323
324 pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
325
326 if (desc->next)
327 kobject_put(&desc->next->kobj);
328
329 kfree(kobj_to_cache_desc(k));
330}
331
332static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
333{
334 struct kobj_attribute *kobj_attr;
335
336 kobj_attr = container_of(attr, struct kobj_attribute, attr);
337
338 return kobj_attr->show(k, kobj_attr, buf);
339}
340
341static struct sysfs_ops cache_desc_sysfs_ops = {
342 .show = cache_desc_show,
343};
344
345static struct kobj_type cache_desc_type = {
346 .release = cache_desc_release,
347 .sysfs_ops = &cache_desc_sysfs_ops,
348};
349
350static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
351{
352 struct cache_desc *cache = kobj_to_cache_desc(k);
353
354 return sprintf(buf, "%uK\n", cache->size);
355}
356
357static struct kobj_attribute cache_size_attr =
358 __ATTR(size, 0444, cache_size_show, NULL);
359
360static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
361{
362 struct cache_desc *cache = kobj_to_cache_desc(k);
363
364 return sprintf(buf, "%u\n", cache->line_size);
365}
366
367static struct kobj_attribute cache_line_size_attr =
368 __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
369
370static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
371{
372 struct cache_desc *cache = kobj_to_cache_desc(k);
373
374 return sprintf(buf, "%u\n", cache->nr_sets);
375}
376
377static struct kobj_attribute cache_nr_sets_attr =
378 __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
379
380static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
381{
382 struct cache_desc *cache = kobj_to_cache_desc(k);
383
384 return sprintf(buf, "%s\n", cache->type);
385}
386
387static struct kobj_attribute cache_type_attr =
388 __ATTR(type, 0444, cache_type_show, NULL);
389
390static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
391{
392 struct cache_desc *cache = kobj_to_cache_desc(k);
393
394 return sprintf(buf, "%u\n", cache->level);
395}
396
397static struct kobj_attribute cache_level_attr =
398 __ATTR(level, 0444, cache_level_show, NULL);
399
400static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
401{
402 struct cache_desc *cache = kobj_to_cache_desc(k);
403
404 return sprintf(buf, "%u\n", cache->associativity);
405}
406
407static struct kobj_attribute cache_assoc_attr =
408 __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
409
410struct cache_desc_info {
411 const char *type;
412 const char *size_prop;
413 const char *line_size_prop;
414 const char *nr_sets_prop;
415};
416
417/* PowerPC Processor binding says the [di]-cache-* must be equal on
418 * unified caches, so just use d-cache properties. */
419static struct cache_desc_info ucache_info = {
420 .type = "Unified",
421 .size_prop = "d-cache-size",
422 .line_size_prop = "d-cache-line-size",
423 .nr_sets_prop = "d-cache-sets",
424};
300 425
301static void register_cpu_online(unsigned int cpu) 426static struct cache_desc_info dcache_info = {
427 .type = "Data",
428 .size_prop = "d-cache-size",
429 .line_size_prop = "d-cache-line-size",
430 .nr_sets_prop = "d-cache-sets",
431};
432
433static struct cache_desc_info icache_info = {
434 .type = "Instruction",
435 .size_prop = "i-cache-size",
436 .line_size_prop = "i-cache-line-size",
437 .nr_sets_prop = "i-cache-sets",
438};
439
440static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
441{
442 const u32 *cache_line_size;
443 struct cache_desc *new;
444 const u32 *cache_size;
445 const u32 *nr_sets;
446 int rc;
447
448 new = kzalloc(sizeof(*new), GFP_KERNEL);
449 if (!new)
450 return NULL;
451
452 rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
453 "index%d", index);
454 if (rc)
455 goto err;
456
457 /* type */
458 new->type = info->type;
459 rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
460 WARN_ON(rc);
461
462 /* level */
463 new->level = level;
464 rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
465 WARN_ON(rc);
466
467 /* size */
468 cache_size = of_get_property(np, info->size_prop, NULL);
469 if (cache_size) {
470 new->size = *cache_size / 1024;
471 rc = sysfs_create_file(&new->kobj,
472 &cache_size_attr.attr);
473 WARN_ON(rc);
474 }
475
476 /* coherency_line_size */
477 cache_line_size = of_get_property(np, info->line_size_prop, NULL);
478 if (cache_line_size) {
479 new->line_size = *cache_line_size;
480 rc = sysfs_create_file(&new->kobj,
481 &cache_line_size_attr.attr);
482 WARN_ON(rc);
483 }
484
485 /* number_of_sets */
486 nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
487 if (nr_sets) {
488 new->nr_sets = *nr_sets;
489 rc = sysfs_create_file(&new->kobj,
490 &cache_nr_sets_attr.attr);
491 WARN_ON(rc);
492 }
493
494 /* ways_of_associativity */
495 if (new->nr_sets == 1) {
496 /* fully associative */
497 new->associativity = 0;
498 goto create_assoc;
499 }
500
501 if (new->nr_sets && new->size && new->line_size) {
502 /* If we have values for all of these we can derive
503 * the associativity. */
504 new->associativity =
505 ((new->size * 1024) / new->nr_sets) / new->line_size;
506create_assoc:
507 rc = sysfs_create_file(&new->kobj,
508 &cache_assoc_attr.attr);
509 WARN_ON(rc);
510 }
511
512 return new;
513err:
514 kfree(new);
515 return NULL;
516}
517
518static bool cache_is_unified(struct device_node *np)
519{
520 return of_get_property(np, "cache-unified", NULL);
521}
522
523static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
524{
525 const phandle *next_cache_phandle;
526 struct device_node *next_cache;
527 struct cache_desc *new, **end;
528
529 pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
530
531 if (cache_is_unified(np)) {
532 new = create_cache_desc(np, parent, index, level,
533 &ucache_info);
534 } else {
535 new = create_cache_desc(np, parent, index, level,
536 &dcache_info);
537 if (new) {
538 index++;
539 new->next = create_cache_desc(np, parent, index, level,
540 &icache_info);
541 }
542 }
543 if (!new)
544 return NULL;
545
546 end = &new->next;
547 while (*end)
548 end = &(*end)->next;
549
550 next_cache_phandle = of_get_property(np, "l2-cache", NULL);
551 if (!next_cache_phandle)
552 goto out;
553
554 next_cache = of_find_node_by_phandle(*next_cache_phandle);
555 if (!next_cache)
556 goto out;
557
558 *end = create_cache_index_info(next_cache, parent, ++index, ++level);
559
560 of_node_put(next_cache);
561out:
562 return new;
563}
564
565static void __cpuinit create_cache_info(struct sys_device *sysdev)
566{
567 struct kobject *cache_toplevel;
568 struct device_node *np = NULL;
569 int cpu = sysdev->id;
570
571 cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
572 if (!cache_toplevel)
573 return;
574 per_cpu(cache_toplevel, cpu) = cache_toplevel;
575 np = of_get_cpu_node(cpu, NULL);
576 if (np != NULL) {
577 per_cpu(cache_desc, cpu) =
578 create_cache_index_info(np, cache_toplevel, 0, 1);
579 of_node_put(np);
580 }
581 return;
582}
583
584static void __cpuinit register_cpu_online(unsigned int cpu)
302{ 585{
303 struct cpu *c = &per_cpu(cpu_devices, cpu); 586 struct cpu *c = &per_cpu(cpu_devices, cpu);
304 struct sys_device *s = &c->sysdev; 587 struct sys_device *s = &c->sysdev;
@@ -346,9 +629,33 @@ static void register_cpu_online(unsigned int cpu)
346 629
347 if (cpu_has_feature(CPU_FTR_DSCR)) 630 if (cpu_has_feature(CPU_FTR_DSCR))
348 sysdev_create_file(s, &attr_dscr); 631 sysdev_create_file(s, &attr_dscr);
632
633 create_cache_info(s);
349} 634}
350 635
351#ifdef CONFIG_HOTPLUG_CPU 636#ifdef CONFIG_HOTPLUG_CPU
637static void remove_cache_info(struct sys_device *sysdev)
638{
639 struct kobject *cache_toplevel;
640 struct cache_desc *cache_desc;
641 int cpu = sysdev->id;
642
643 cache_desc = per_cpu(cache_desc, cpu);
644 if (cache_desc != NULL) {
645 sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr);
646 sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr);
647 sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr);
648 sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr);
649 sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr);
650 sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr);
651
652 kobject_put(&cache_desc->kobj);
653 }
654 cache_toplevel = per_cpu(cache_toplevel, cpu);
655 if (cache_toplevel != NULL)
656 kobject_put(cache_toplevel);
657}
658
352static void unregister_cpu_online(unsigned int cpu) 659static void unregister_cpu_online(unsigned int cpu)
353{ 660{
354 struct cpu *c = &per_cpu(cpu_devices, cpu); 661 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -399,6 +706,8 @@ static void unregister_cpu_online(unsigned int cpu)
399 706
400 if (cpu_has_feature(CPU_FTR_DSCR)) 707 if (cpu_has_feature(CPU_FTR_DSCR))
401 sysdev_remove_file(s, &attr_dscr); 708 sysdev_remove_file(s, &attr_dscr);
709
710 remove_cache_info(s);
402} 711}
403#endif /* CONFIG_HOTPLUG_CPU */ 712#endif /* CONFIG_HOTPLUG_CPU */
404 713
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index ade8aeaa2e70..22a3c33fd751 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -530,7 +530,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
530 } 530 }
531 531
532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs); 532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
533 if (unlikely(dma_mapping_error(ret))) { 533 if (unlikely(dma_mapping_error(dev, ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed); 535 atomic_inc(&viodev->cmo.allocs_failed);
536 } 536 }
@@ -1031,8 +1031,8 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1031static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1031static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1032static void vio_cmo_bus_remove(struct vio_dev *viodev) {} 1032static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1033static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} 1033static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1034static void vio_cmo_bus_init() {} 1034static void vio_cmo_bus_init(void) {}
1035static void vio_cmo_sysfs_init() { } 1035static void vio_cmo_sysfs_init(void) { }
1036#endif /* CONFIG_PPC_SMLPAR */ 1036#endif /* CONFIG_PPC_SMLPAR */
1037EXPORT_SYMBOL(vio_cmo_entitlement_update); 1037EXPORT_SYMBOL(vio_cmo_entitlement_update);
1038EXPORT_SYMBOL(vio_cmo_set_dev_desired); 1038EXPORT_SYMBOL(vio_cmo_set_dev_desired);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index ed0aab0208a6..f1c2d55b4377 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -736,14 +736,21 @@ static int __init hugetlbpage_init(void)
736 736
737 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 737 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
738 return -ENODEV; 738 return -ENODEV;
739
739 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE 740 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
740 * and adjust PTE_NONCACHE_NUM if the number of supported huge page 741 * and adjust PTE_NONCACHE_NUM if the number of supported huge page
741 * sizes changes. 742 * sizes changes.
742 */ 743 */
743 set_huge_psize(MMU_PAGE_16M); 744 set_huge_psize(MMU_PAGE_16M);
744 set_huge_psize(MMU_PAGE_64K);
745 set_huge_psize(MMU_PAGE_16G); 745 set_huge_psize(MMU_PAGE_16G);
746 746
747 /* Temporarily disable support for 64K huge pages when 64K SPU local
748 * store support is enabled as the current implementation conflicts.
749 */
750#ifndef CONFIG_SPU_FS_64K_LS
751 set_huge_psize(MMU_PAGE_64K);
752#endif
753
747 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 754 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
748 if (mmu_huge_psizes[psize]) { 755 if (mmu_huge_psizes[psize]) {
749 huge_pgtable_cache(psize) = kmem_cache_create( 756 huge_pgtable_cache(psize) = kmem_cache_create(
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 31635446901a..88ccf3a08a9c 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -541,6 +541,78 @@ static int __init pmac_declare_of_platform_devices(void)
541} 541}
542machine_device_initcall(powermac, pmac_declare_of_platform_devices); 542machine_device_initcall(powermac, pmac_declare_of_platform_devices);
543 543
544#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
545/*
546 * This is called very early, as part of console_init() (typically just after
547 * time_init()). This function is respondible for trying to find a good
548 * default console on serial ports. It tries to match the open firmware
549 * default output with one of the available serial console drivers.
550 */
551static int __init check_pmac_serial_console(void)
552{
553 struct device_node *prom_stdout = NULL;
554 int offset = 0;
555 const char *name;
556#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
557 char *devname = "ttyS";
558#else
559 char *devname = "ttyPZ";
560#endif
561
562 pr_debug(" -> check_pmac_serial_console()\n");
563
564 /* The user has requested a console so this is already set up. */
565 if (strstr(boot_command_line, "console=")) {
566 pr_debug(" console was specified !\n");
567 return -EBUSY;
568 }
569
570 if (!of_chosen) {
571 pr_debug(" of_chosen is NULL !\n");
572 return -ENODEV;
573 }
574
575 /* We are getting a weird phandle from OF ... */
576 /* ... So use the full path instead */
577 name = of_get_property(of_chosen, "linux,stdout-path", NULL);
578 if (name == NULL) {
579 pr_debug(" no linux,stdout-path !\n");
580 return -ENODEV;
581 }
582 prom_stdout = of_find_node_by_path(name);
583 if (!prom_stdout) {
584 pr_debug(" can't find stdout package %s !\n", name);
585 return -ENODEV;
586 }
587 pr_debug("stdout is %s\n", prom_stdout->full_name);
588
589 name = of_get_property(prom_stdout, "name", NULL);
590 if (!name) {
591 pr_debug(" stdout package has no name !\n");
592 goto not_found;
593 }
594
595 if (strcmp(name, "ch-a") == 0)
596 offset = 0;
597 else if (strcmp(name, "ch-b") == 0)
598 offset = 1;
599 else
600 goto not_found;
601 of_node_put(prom_stdout);
602
603 pr_debug("Found serial console at %s%d\n", devname, offset);
604
605 return add_preferred_console(devname, offset, NULL);
606
607 not_found:
608 pr_debug("No preferred console found !\n");
609 of_node_put(prom_stdout);
610 return -ENODEV;
611}
612console_initcall(check_pmac_serial_console);
613
614#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
615
544/* 616/*
545 * Called very early, MMU is off, device-tree isn't unflattened 617 * Called very early, MMU is off, device-tree isn't unflattened
546 */ 618 */
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 47de4d3fc167..572771fd8463 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -125,13 +125,23 @@ void udbg_scc_init(int force_scc)
125 out_8(sccc, 0xc0); 125 out_8(sccc, 0xc0);
126 126
127 /* If SCC was the OF output port, read the BRG value, else 127 /* If SCC was the OF output port, read the BRG value, else
128 * Setup for 57600 8N1 128 * Setup for 38400 or 57600 8N1 depending on the machine
129 */ 129 */
130 if (ch_def != NULL) { 130 if (ch_def != NULL) {
131 out_8(sccc, 13); 131 out_8(sccc, 13);
132 scc_inittab[1] = in_8(sccc); 132 scc_inittab[1] = in_8(sccc);
133 out_8(sccc, 12); 133 out_8(sccc, 12);
134 scc_inittab[3] = in_8(sccc); 134 scc_inittab[3] = in_8(sccc);
135 } else if (machine_is_compatible("RackMac1,1")
136 || machine_is_compatible("RackMac1,2")
137 || machine_is_compatible("MacRISC4")) {
138 /* Xserves and G5s default to 57600 */
139 scc_inittab[1] = 0;
140 scc_inittab[3] = 0;
141 } else {
142 /* Others default to 38400 */
143 scc_inittab[1] = 0;
144 scc_inittab[3] = 1;
135 } 145 }
136 146
137 for (i = 0; i < sizeof(scc_inittab); ++i) 147 for (i = 0; i < sizeof(scc_inittab); ++i)
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index c6b3be03168b..38fe32a7cc70 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -289,7 +289,9 @@ static int cmm_thread(void *dummy)
289} 289}
290 290
291#define CMM_SHOW(name, format, args...) \ 291#define CMM_SHOW(name, format, args...) \
292 static ssize_t show_##name(struct sys_device *dev, char *buf) \ 292 static ssize_t show_##name(struct sys_device *dev, \
293 struct sysdev_attribute *attr, \
294 char *buf) \
293 { \ 295 { \
294 return sprintf(buf, format, ##args); \ 296 return sprintf(buf, format, ##args); \
295 } \ 297 } \
@@ -298,12 +300,14 @@ static int cmm_thread(void *dummy)
298CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages)); 300CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
299CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target)); 301CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
300 302
301static ssize_t show_oom_pages(struct sys_device *dev, char *buf) 303static ssize_t show_oom_pages(struct sys_device *dev,
304 struct sysdev_attribute *attr, char *buf)
302{ 305{
303 return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages)); 306 return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
304} 307}
305 308
306static ssize_t store_oom_pages(struct sys_device *dev, 309static ssize_t store_oom_pages(struct sys_device *dev,
310 struct sysdev_attribute *attr,
307 const char *buf, size_t count) 311 const char *buf, size_t count)
308{ 312{
309 unsigned long val = simple_strtoul (buf, NULL, 10); 313 unsigned long val = simple_strtoul (buf, NULL, 10);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 91ec9fdc7184..a03fe1fb61ca 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
261 pool->buff_size, DMA_FROM_DEVICE); 261 pool->buff_size, DMA_FROM_DEVICE);
262 262
263 if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) 263 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
264 goto failure; 264 goto failure;
265 265
266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
@@ -294,7 +294,7 @@ failure:
294 pool->consumer_index = pool->size - 1; 294 pool->consumer_index = pool->size - 1;
295 else 295 else
296 pool->consumer_index--; 296 pool->consumer_index--;
297 if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) 297 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
298 dma_unmap_single(&adapter->vdev->dev, 298 dma_unmap_single(&adapter->vdev->dev,
299 pool->dma_addr[index], pool->buff_size, 299 pool->dma_addr[index], pool->buff_size,
300 DMA_FROM_DEVICE); 300 DMA_FROM_DEVICE);
@@ -488,7 +488,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
488 &adapter->rx_buff_pool[i]); 488 &adapter->rx_buff_pool[i]);
489 489
490 if (adapter->bounce_buffer != NULL) { 490 if (adapter->bounce_buffer != NULL) {
491 if (!dma_mapping_error(adapter->bounce_buffer_dma)) { 491 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
492 dma_unmap_single(&adapter->vdev->dev, 492 dma_unmap_single(&adapter->vdev->dev,
493 adapter->bounce_buffer_dma, 493 adapter->bounce_buffer_dma,
494 adapter->netdev->mtu + IBMVETH_BUFF_OH, 494 adapter->netdev->mtu + IBMVETH_BUFF_OH,
@@ -924,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
924 buf[1] = 0; 924 buf[1] = 0;
925 } 925 }
926 926
927 if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { 927 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
928 if (!firmware_has_feature(FW_FEATURE_CMO)) 928 if (!firmware_has_feature(FW_FEATURE_CMO))
929 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 929 ibmveth_error_printk("tx: unable to map xmit buffer\n");
930 skb_copy_from_linear_data(skb, adapter->bounce_buffer, 930 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 1d7ec3129349..f821dbc952a4 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -13,3 +13,9 @@ config OF_I2C
13 depends on PPC_OF && I2C 13 depends on PPC_OF && I2C
14 help 14 help
15 OpenFirmware I2C accessors 15 OpenFirmware I2C accessors
16
17config OF_SPI
18 def_tristate SPI
19 depends on OF && PPC_OF && SPI
20 help
21 OpenFirmware SPI accessors
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 548772e871fd..4c3c6f8e36f5 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -2,3 +2,4 @@ obj-y = base.o
2obj-$(CONFIG_OF_DEVICE) += device.o platform.o 2obj-$(CONFIG_OF_DEVICE) += device.o platform.o
3obj-$(CONFIG_OF_GPIO) += gpio.o 3obj-$(CONFIG_OF_GPIO) += gpio.o
4obj-$(CONFIG_OF_I2C) += of_i2c.o 4obj-$(CONFIG_OF_I2C) += of_i2c.o
5obj-$(CONFIG_OF_SPI) += of_spi.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 23ffb7c0caf2..ad8ac1a8af28 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -385,3 +385,91 @@ struct device_node *of_find_matching_node(struct device_node *from,
385 return np; 385 return np;
386} 386}
387EXPORT_SYMBOL(of_find_matching_node); 387EXPORT_SYMBOL(of_find_matching_node);
388
389/**
390 * of_modalias_table: Table of explicit compatible ==> modalias mappings
391 *
392 * This table allows particulare compatible property values to be mapped
393 * to modalias strings. This is useful for busses which do not directly
394 * understand the OF device tree but are populated based on data contained
395 * within the device tree. SPI and I2C are the two current users of this
396 * table.
397 *
398 * In most cases, devices do not need to be listed in this table because
399 * the modalias value can be derived directly from the compatible table.
400 * However, if for any reason a value cannot be derived, then this table
401 * provides a method to override the implicit derivation.
402 *
403 * At the moment, a single table is used for all bus types because it is
404 * assumed that the data size is small and that the compatible values
405 * should already be distinct enough to differentiate between SPI, I2C
406 * and other devices.
407 */
408struct of_modalias_table {
409 char *of_device;
410 char *modalias;
411};
412static struct of_modalias_table of_modalias_table[] = {
413 /* Empty for now; add entries as needed */
414};
415
416/**
417 * of_modalias_node - Lookup appropriate modalias for a device node
418 * @node: pointer to a device tree node
419 * @modalias: Pointer to buffer that modalias value will be copied into
420 * @len: Length of modalias value
421 *
422 * Based on the value of the compatible property, this routine will determine
423 * an appropriate modalias value for a particular device tree node. Three
424 * separate methods are used to derive a modalias value.
425 *
426 * First method is to lookup the compatible value in of_modalias_table.
427 * Second is to look for a "linux,<modalias>" entry in the compatible list
428 * and used that for modalias. Third is to strip off the manufacturer
429 * prefix from the first compatible entry and use the remainder as modalias
430 *
431 * This routine returns 0 on success
432 */
433int of_modalias_node(struct device_node *node, char *modalias, int len)
434{
435 int i, cplen;
436 const char *compatible;
437 const char *p;
438
439 /* 1. search for exception list entry */
440 for (i = 0; i < ARRAY_SIZE(of_modalias_table); i++) {
441 compatible = of_modalias_table[i].of_device;
442 if (!of_device_is_compatible(node, compatible))
443 continue;
444 strlcpy(modalias, of_modalias_table[i].modalias, len);
445 return 0;
446 }
447
448 compatible = of_get_property(node, "compatible", &cplen);
449 if (!compatible)
450 return -ENODEV;
451
452 /* 2. search for linux,<modalias> entry */
453 p = compatible;
454 while (cplen > 0) {
455 if (!strncmp(p, "linux,", 6)) {
456 p += 6;
457 strlcpy(modalias, p, len);
458 return 0;
459 }
460
461 i = strlen(p) + 1;
462 p += i;
463 cplen -= i;
464 }
465
466 /* 3. take first compatible entry and strip manufacturer */
467 p = strchr(compatible, ',');
468 if (!p)
469 return -ENODEV;
470 p++;
471 strlcpy(modalias, p, len);
472 return 0;
473}
474EXPORT_SYMBOL_GPL(of_modalias_node);
475
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 344e1b03dd8b..6a98dc8aa30b 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -16,62 +16,6 @@
16#include <linux/of_i2c.h> 16#include <linux/of_i2c.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19struct i2c_driver_device {
20 char *of_device;
21 char *i2c_type;
22};
23
24static struct i2c_driver_device i2c_devices[] = {
25};
26
27static int of_find_i2c_driver(struct device_node *node,
28 struct i2c_board_info *info)
29{
30 int i, cplen;
31 const char *compatible;
32 const char *p;
33
34 /* 1. search for exception list entry */
35 for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
36 if (!of_device_is_compatible(node, i2c_devices[i].of_device))
37 continue;
38 if (strlcpy(info->type, i2c_devices[i].i2c_type,
39 I2C_NAME_SIZE) >= I2C_NAME_SIZE)
40 return -ENOMEM;
41
42 return 0;
43 }
44
45 compatible = of_get_property(node, "compatible", &cplen);
46 if (!compatible)
47 return -ENODEV;
48
49 /* 2. search for linux,<i2c-type> entry */
50 p = compatible;
51 while (cplen > 0) {
52 if (!strncmp(p, "linux,", 6)) {
53 p += 6;
54 if (strlcpy(info->type, p,
55 I2C_NAME_SIZE) >= I2C_NAME_SIZE)
56 return -ENOMEM;
57 return 0;
58 }
59
60 i = strlen(p) + 1;
61 p += i;
62 cplen -= i;
63 }
64
65 /* 3. take fist compatible entry and strip manufacturer */
66 p = strchr(compatible, ',');
67 if (!p)
68 return -ENODEV;
69 p++;
70 if (strlcpy(info->type, p, I2C_NAME_SIZE) >= I2C_NAME_SIZE)
71 return -ENOMEM;
72 return 0;
73}
74
75void of_register_i2c_devices(struct i2c_adapter *adap, 19void of_register_i2c_devices(struct i2c_adapter *adap,
76 struct device_node *adap_node) 20 struct device_node *adap_node)
77{ 21{
@@ -83,6 +27,9 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
83 const u32 *addr; 27 const u32 *addr;
84 int len; 28 int len;
85 29
30 if (of_modalias_node(node, info.type, sizeof(info.type)) < 0)
31 continue;
32
86 addr = of_get_property(node, "reg", &len); 33 addr = of_get_property(node, "reg", &len);
87 if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) { 34 if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) {
88 printk(KERN_ERR 35 printk(KERN_ERR
@@ -92,11 +39,6 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
92 39
93 info.irq = irq_of_parse_and_map(node, 0); 40 info.irq = irq_of_parse_and_map(node, 0);
94 41
95 if (of_find_i2c_driver(node, &info) < 0) {
96 irq_dispose_mapping(info.irq);
97 continue;
98 }
99
100 info.addr = *addr; 42 info.addr = *addr;
101 43
102 request_module(info.type); 44 request_module(info.type);
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
new file mode 100644
index 000000000000..b01eec026f68
--- /dev/null
+++ b/drivers/of/of_spi.c
@@ -0,0 +1,93 @@
1/*
2 * SPI OF support routines
3 * Copyright (C) 2008 Secret Lab Technologies Ltd.
4 *
5 * Support routines for deriving SPI device attachments from the device
6 * tree.
7 */
8
9#include <linux/of.h>
10#include <linux/device.h>
11#include <linux/spi/spi.h>
12#include <linux/of_spi.h>
13
14/**
15 * of_register_spi_devices - Register child devices onto the SPI bus
16 * @master: Pointer to spi_master device
17 * @np: parent node of SPI device nodes
18 *
19 * Registers an spi_device for each child node of 'np' which has a 'reg'
20 * property.
21 */
22void of_register_spi_devices(struct spi_master *master, struct device_node *np)
23{
24 struct spi_device *spi;
25 struct device_node *nc;
26 const u32 *prop;
27 int rc;
28 int len;
29
30 for_each_child_of_node(np, nc) {
31 /* Alloc an spi_device */
32 spi = spi_alloc_device(master);
33 if (!spi) {
34 dev_err(&master->dev, "spi_device alloc error for %s\n",
35 nc->full_name);
36 spi_dev_put(spi);
37 continue;
38 }
39
40 /* Select device driver */
41 if (of_modalias_node(nc, spi->modalias,
42 sizeof(spi->modalias)) < 0) {
43 dev_err(&master->dev, "cannot find modalias for %s\n",
44 nc->full_name);
45 spi_dev_put(spi);
46 continue;
47 }
48
49 /* Device address */
50 prop = of_get_property(nc, "reg", &len);
51 if (!prop || len < sizeof(*prop)) {
52 dev_err(&master->dev, "%s has no 'reg' property\n",
53 nc->full_name);
54 spi_dev_put(spi);
55 continue;
56 }
57 spi->chip_select = *prop;
58
59 /* Mode (clock phase/polarity/etc.) */
60 if (of_find_property(nc, "spi-cpha", NULL))
61 spi->mode |= SPI_CPHA;
62 if (of_find_property(nc, "spi-cpol", NULL))
63 spi->mode |= SPI_CPOL;
64
65 /* Device speed */
66 prop = of_get_property(nc, "spi-max-frequency", &len);
67 if (!prop || len < sizeof(*prop)) {
68 dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
69 nc->full_name);
70 spi_dev_put(spi);
71 continue;
72 }
73 spi->max_speed_hz = *prop;
74
75 /* IRQ */
76 spi->irq = irq_of_parse_and_map(nc, 0);
77
78 /* Store a pointer to the node in the device structure */
79 of_node_get(nc);
80 spi->dev.archdata.of_node = nc;
81
82 /* Register the new device */
83 request_module(spi->modalias);
84 rc = spi_add_device(spi);
85 if (rc) {
86 dev_err(&master->dev, "spi_device register error %s\n",
87 nc->full_name);
88 spi_dev_put(spi);
89 }
90
91 }
92}
93EXPORT_SYMBOL(of_register_spi_devices);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ecca4a6a6f94..964124b60db2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -178,6 +178,96 @@ struct boardinfo {
178static LIST_HEAD(board_list); 178static LIST_HEAD(board_list);
179static DEFINE_MUTEX(board_lock); 179static DEFINE_MUTEX(board_lock);
180 180
181/**
182 * spi_alloc_device - Allocate a new SPI device
183 * @master: Controller to which device is connected
184 * Context: can sleep
185 *
186 * Allows a driver to allocate and initialize a spi_device without
187 * registering it immediately. This allows a driver to directly
188 * fill the spi_device with device parameters before calling
189 * spi_add_device() on it.
190 *
191 * Caller is responsible to call spi_add_device() on the returned
192 * spi_device structure to add it to the SPI master. If the caller
193 * needs to discard the spi_device without adding it, then it should
194 * call spi_dev_put() on it.
195 *
196 * Returns a pointer to the new device, or NULL.
197 */
198struct spi_device *spi_alloc_device(struct spi_master *master)
199{
200 struct spi_device *spi;
201 struct device *dev = master->dev.parent;
202
203 if (!spi_master_get(master))
204 return NULL;
205
206 spi = kzalloc(sizeof *spi, GFP_KERNEL);
207 if (!spi) {
208 dev_err(dev, "cannot alloc spi_device\n");
209 spi_master_put(master);
210 return NULL;
211 }
212
213 spi->master = master;
214 spi->dev.parent = dev;
215 spi->dev.bus = &spi_bus_type;
216 spi->dev.release = spidev_release;
217 device_initialize(&spi->dev);
218 return spi;
219}
220EXPORT_SYMBOL_GPL(spi_alloc_device);
221
222/**
223 * spi_add_device - Add spi_device allocated with spi_alloc_device
224 * @spi: spi_device to register
225 *
226 * Companion function to spi_alloc_device. Devices allocated with
227 * spi_alloc_device can be added onto the spi bus with this function.
228 *
229 * Returns 0 on success; non-zero on failure
230 */
231int spi_add_device(struct spi_device *spi)
232{
233 struct device *dev = spi->master->dev.parent;
234 int status;
235
236 /* Chipselects are numbered 0..max; validate. */
237 if (spi->chip_select >= spi->master->num_chipselect) {
238 dev_err(dev, "cs%d >= max %d\n",
239 spi->chip_select,
240 spi->master->num_chipselect);
241 return -EINVAL;
242 }
243
244 /* Set the bus ID string */
245 snprintf(spi->dev.bus_id, sizeof spi->dev.bus_id,
246 "%s.%u", spi->master->dev.bus_id,
247 spi->chip_select);
248
249 /* drivers may modify this initial i/o setup */
250 status = spi->master->setup(spi);
251 if (status < 0) {
252 dev_err(dev, "can't %s %s, status %d\n",
253 "setup", spi->dev.bus_id, status);
254 return status;
255 }
256
257 /* driver core catches callers that misbehave by defining
258 * devices that already exist.
259 */
260 status = device_add(&spi->dev);
261 if (status < 0) {
262 dev_err(dev, "can't %s %s, status %d\n",
263 "add", spi->dev.bus_id, status);
264 return status;
265 }
266
267 dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
268 return 0;
269}
270EXPORT_SYMBOL_GPL(spi_add_device);
181 271
182/** 272/**
183 * spi_new_device - instantiate one new SPI device 273 * spi_new_device - instantiate one new SPI device
@@ -197,7 +287,6 @@ struct spi_device *spi_new_device(struct spi_master *master,
197 struct spi_board_info *chip) 287 struct spi_board_info *chip)
198{ 288{
199 struct spi_device *proxy; 289 struct spi_device *proxy;
200 struct device *dev = master->dev.parent;
201 int status; 290 int status;
202 291
203 /* NOTE: caller did any chip->bus_num checks necessary. 292 /* NOTE: caller did any chip->bus_num checks necessary.
@@ -207,66 +296,28 @@ struct spi_device *spi_new_device(struct spi_master *master,
207 * suggests syslogged diagnostics are best here (ugh). 296 * suggests syslogged diagnostics are best here (ugh).
208 */ 297 */
209 298
210 /* Chipselects are numbered 0..max; validate. */ 299 proxy = spi_alloc_device(master);
211 if (chip->chip_select >= master->num_chipselect) { 300 if (!proxy)
212 dev_err(dev, "cs%d > max %d\n",
213 chip->chip_select,
214 master->num_chipselect);
215 return NULL;
216 }
217
218 if (!spi_master_get(master))
219 return NULL; 301 return NULL;
220 302
221 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 303 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
222 304
223 proxy = kzalloc(sizeof *proxy, GFP_KERNEL);
224 if (!proxy) {
225 dev_err(dev, "can't alloc dev for cs%d\n",
226 chip->chip_select);
227 goto fail;
228 }
229 proxy->master = master;
230 proxy->chip_select = chip->chip_select; 305 proxy->chip_select = chip->chip_select;
231 proxy->max_speed_hz = chip->max_speed_hz; 306 proxy->max_speed_hz = chip->max_speed_hz;
232 proxy->mode = chip->mode; 307 proxy->mode = chip->mode;
233 proxy->irq = chip->irq; 308 proxy->irq = chip->irq;
234 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 309 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
235
236 snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id,
237 "%s.%u", master->dev.bus_id,
238 chip->chip_select);
239 proxy->dev.parent = dev;
240 proxy->dev.bus = &spi_bus_type;
241 proxy->dev.platform_data = (void *) chip->platform_data; 310 proxy->dev.platform_data = (void *) chip->platform_data;
242 proxy->controller_data = chip->controller_data; 311 proxy->controller_data = chip->controller_data;
243 proxy->controller_state = NULL; 312 proxy->controller_state = NULL;
244 proxy->dev.release = spidev_release;
245 313
246 /* drivers may modify this initial i/o setup */ 314 status = spi_add_device(proxy);
247 status = master->setup(proxy);
248 if (status < 0) { 315 if (status < 0) {
249 dev_err(dev, "can't %s %s, status %d\n", 316 spi_dev_put(proxy);
250 "setup", proxy->dev.bus_id, status); 317 return NULL;
251 goto fail;
252 } 318 }
253 319
254 /* driver core catches callers that misbehave by defining
255 * devices that already exist.
256 */
257 status = device_register(&proxy->dev);
258 if (status < 0) {
259 dev_err(dev, "can't %s %s, status %d\n",
260 "add", proxy->dev.bus_id, status);
261 goto fail;
262 }
263 dev_dbg(dev, "registered child %s\n", proxy->dev.bus_id);
264 return proxy; 320 return proxy;
265
266fail:
267 spi_master_put(master);
268 kfree(proxy);
269 return NULL;
270} 321}
271EXPORT_SYMBOL_GPL(spi_new_device); 322EXPORT_SYMBOL_GPL(spi_new_device);
272 323
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index c9601dfb4a1e..6b18ba9d2d85 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -46,6 +46,8 @@
46#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ 46#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
47#define _PAGE_F_SECOND _PAGE_SECONDARY 47#define _PAGE_F_SECOND _PAGE_SECONDARY
48#define _PAGE_F_GIX _PAGE_GROUP_IX 48#define _PAGE_F_GIX _PAGE_GROUP_IX
49#define _PAGE_SPECIAL 0x10000 /* software: special page */
50#define __HAVE_ARCH_PTE_SPECIAL
49 51
50/* PTE flags to conserve for HPTE identification */ 52/* PTE flags to conserve for HPTE identification */
51#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ 53#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 7e54adb35596..07b0d8f09cb6 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -70,6 +70,8 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
70#define PGDIR_MASK (~(PGDIR_SIZE-1)) 70#define PGDIR_MASK (~(PGDIR_SIZE-1))
71 71
72/* Additional PTE bits (don't change without checking asm in hash_low.S) */ 72/* Additional PTE bits (don't change without checking asm in hash_low.S) */
73#define __HAVE_ARCH_PTE_SPECIAL
74#define _PAGE_SPECIAL 0x00000400 /* software: special page */
73#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ 75#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
74#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ 76#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
75#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ 77#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index bdbab72f3ebc..6fe39e327047 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -401,6 +401,9 @@ extern int icache_44x_need_flush;
401#ifndef _PAGE_COHERENT 401#ifndef _PAGE_COHERENT
402#define _PAGE_COHERENT 0 402#define _PAGE_COHERENT 0
403#endif 403#endif
404#ifndef _PAGE_WRITETHRU
405#define _PAGE_WRITETHRU 0
406#endif
404#ifndef _PMD_PRESENT_MASK 407#ifndef _PMD_PRESENT_MASK
405#define _PMD_PRESENT_MASK _PMD_PRESENT 408#define _PMD_PRESENT_MASK _PMD_PRESENT
406#endif 409#endif
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index ba8000352b9a..5fc78c0be302 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -245,7 +245,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
245static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 245static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
246static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 246static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
247static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 247static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
248static inline int pte_special(pte_t pte) { return 0; } 248static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
249 249
250static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 250static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
251static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 251static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -265,7 +265,7 @@ static inline pte_t pte_mkyoung(pte_t pte) {
265static inline pte_t pte_mkhuge(pte_t pte) { 265static inline pte_t pte_mkhuge(pte_t pte) {
266 return pte; } 266 return pte; }
267static inline pte_t pte_mkspecial(pte_t pte) { 267static inline pte_t pte_mkspecial(pte_t pte) {
268 return pte; } 268 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
269static inline unsigned long pte_pgprot(pte_t pte) 269static inline unsigned long pte_pgprot(pte_t pte)
270{ 270{
271 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; 271 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
index 3d6e31024e56..734e0754fb9b 100644
--- a/include/asm-powerpc/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -84,6 +84,7 @@ struct pt_regs {
84#ifndef __ASSEMBLY__ 84#ifndef __ASSEMBLY__
85 85
86#define instruction_pointer(regs) ((regs)->nip) 86#define instruction_pointer(regs) ((regs)->nip)
87#define user_stack_pointer(regs) ((regs)->gpr[1])
87#define regs_return_value(regs) ((regs)->gpr[3]) 88#define regs_return_value(regs) ((regs)->gpr[3])
88 89
89#ifdef CONFIG_SMP 90#ifdef CONFIG_SMP
diff --git a/include/asm-powerpc/signal.h b/include/asm-powerpc/signal.h
index a8c7babf4950..a7360cdd99eb 100644
--- a/include/asm-powerpc/signal.h
+++ b/include/asm-powerpc/signal.h
@@ -122,8 +122,7 @@ typedef struct sigaltstack {
122 122
123#ifdef __KERNEL__ 123#ifdef __KERNEL__
124struct pt_regs; 124struct pt_regs;
125extern int do_signal(sigset_t *oldset, struct pt_regs *regs); 125extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags);
126extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
127#define ptrace_signal_deliver(regs, cookie) do { } while (0) 126#define ptrace_signal_deliver(regs, cookie) do { } while (0)
128#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
129 128
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 416d4c288cea..4d28e1e4521b 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -62,6 +62,8 @@ extern int smp_hw_index[];
62#endif 62#endif
63 63
64DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 64DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
65DECLARE_PER_CPU(cpumask_t, cpu_core_map);
66extern int cpu_to_core_id(int cpu);
65 67
66/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 68/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
67 * 69 *
diff --git a/include/asm-powerpc/syscall.h b/include/asm-powerpc/syscall.h
new file mode 100644
index 000000000000..efa7f0b879f3
--- /dev/null
+++ b/include/asm-powerpc/syscall.h
@@ -0,0 +1,84 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17
18static inline long syscall_get_nr(struct task_struct *task,
19 struct pt_regs *regs)
20{
21 return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L;
22}
23
24static inline void syscall_rollback(struct task_struct *task,
25 struct pt_regs *regs)
26{
27 regs->gpr[3] = regs->orig_gpr3;
28}
29
30static inline long syscall_get_error(struct task_struct *task,
31 struct pt_regs *regs)
32{
33 return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0;
34}
35
36static inline long syscall_get_return_value(struct task_struct *task,
37 struct pt_regs *regs)
38{
39 return regs->gpr[3];
40}
41
42static inline void syscall_set_return_value(struct task_struct *task,
43 struct pt_regs *regs,
44 int error, long val)
45{
46 if (error) {
47 regs->ccr |= 0x1000L;
48 regs->gpr[3] = -error;
49 } else {
50 regs->ccr &= ~0x1000L;
51 regs->gpr[3] = val;
52 }
53}
54
55static inline void syscall_get_arguments(struct task_struct *task,
56 struct pt_regs *regs,
57 unsigned int i, unsigned int n,
58 unsigned long *args)
59{
60 BUG_ON(i + n > 6);
61#ifdef CONFIG_PPC64
62 if (test_tsk_thread_flag(task, TIF_32BIT)) {
63 /*
64 * Zero-extend 32-bit argument values. The high bits are
65 * garbage ignored by the actual syscall dispatch.
66 */
67 while (n-- > 0)
68 args[n] = (u32) regs->gpr[3 + i + n];
69 return;
70 }
71#endif
72 memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
73}
74
75static inline void syscall_set_arguments(struct task_struct *task,
76 struct pt_regs *regs,
77 unsigned int i, unsigned int n,
78 const unsigned long *args)
79{
80 BUG_ON(i + n > 6);
81 memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
82}
83
84#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index a9db562df69a..9665a26a253a 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -108,6 +108,7 @@ static inline struct thread_info *current_thread_info(void)
108#define TIF_SECCOMP 10 /* secure computing */ 108#define TIF_SECCOMP 10 /* secure computing */
109#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 109#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
110#define TIF_NOERROR 12 /* Force successful syscall return */ 110#define TIF_NOERROR 12 /* Force successful syscall return */
111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
111#define TIF_FREEZE 14 /* Freezing for suspend */ 112#define TIF_FREEZE 14 /* Freezing for suspend */
112#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ 113#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
113#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ 114#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
@@ -125,12 +126,14 @@ static inline struct thread_info *current_thread_info(void)
125#define _TIF_SECCOMP (1<<TIF_SECCOMP) 126#define _TIF_SECCOMP (1<<TIF_SECCOMP)
126#define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 127#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
127#define _TIF_NOERROR (1<<TIF_NOERROR) 128#define _TIF_NOERROR (1<<TIF_NOERROR)
129#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
128#define _TIF_FREEZE (1<<TIF_FREEZE) 130#define _TIF_FREEZE (1<<TIF_FREEZE)
129#define _TIF_RUNLATCH (1<<TIF_RUNLATCH) 131#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
130#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 132#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
131#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) 133#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
132 134
133#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) 135#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
136 _TIF_NOTIFY_RESUME)
134#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 137#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
135 138
136/* Bits in local_flags */ 139/* Bits in local_flags */
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 100c6fbfc587..c32da6f97999 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -108,6 +108,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
108#include <asm/smp.h> 108#include <asm/smp.h>
109 109
110#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 110#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
111#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
112#define topology_core_id(cpu) (cpu_to_core_id(cpu))
111#endif 113#endif
112#endif 114#endif
113 115
diff --git a/include/linux/of.h b/include/linux/of.h
index 59a61bdc98b6..79886ade070f 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -70,5 +70,6 @@ extern int of_n_addr_cells(struct device_node *np);
70extern int of_n_size_cells(struct device_node *np); 70extern int of_n_size_cells(struct device_node *np);
71extern const struct of_device_id *of_match_node( 71extern const struct of_device_id *of_match_node(
72 const struct of_device_id *matches, const struct device_node *node); 72 const struct of_device_id *matches, const struct device_node *node);
73extern int of_modalias_node(struct device_node *node, char *modalias, int len);
73 74
74#endif /* _LINUX_OF_H */ 75#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h
new file mode 100644
index 000000000000..5f71ee8c0868
--- /dev/null
+++ b/include/linux/of_spi.h
@@ -0,0 +1,18 @@
1/*
2 * OpenFirmware SPI support routines
3 * Copyright (C) 2008 Secret Lab Technologies Ltd.
4 *
5 * Support routines for deriving SPI device attachments from the device
6 * tree.
7 */
8
9#ifndef __LINUX_OF_SPI_H
10#define __LINUX_OF_SPI_H
11
12#include <linux/of.h>
13#include <linux/spi/spi.h>
14
15extern void of_register_spi_devices(struct spi_master *master,
16 struct device_node *np);
17
18#endif /* __LINUX_OF_SPI */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a9cc29d46653..4be01bb44377 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -778,8 +778,20 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
778 * use spi_new_device() to describe each device. You can also call 778 * use spi_new_device() to describe each device. You can also call
779 * spi_unregister_device() to start making that device vanish, but 779 * spi_unregister_device() to start making that device vanish, but
780 * normally that would be handled by spi_unregister_master(). 780 * normally that would be handled by spi_unregister_master().
781 *
782 * You can also use spi_alloc_device() and spi_add_device() to use a two
783 * stage registration sequence for each spi_device. This gives the caller
784 * some more control over the spi_device structure before it is registered,
785 * but requires that caller to initialize fields that would otherwise
786 * be defined using the board info.
781 */ 787 */
782extern struct spi_device * 788extern struct spi_device *
789spi_alloc_device(struct spi_master *master);
790
791extern int
792spi_add_device(struct spi_device *spi);
793
794extern struct spi_device *
783spi_new_device(struct spi_master *, struct spi_board_info *); 795spi_new_device(struct spi_master *, struct spi_board_info *);
784 796
785static inline void 797static inline void