aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-15 12:15:17 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-15 12:15:17 -0400
commitf3efbe582b5396d134024c03a5fa253f2a85d9a6 (patch)
treee4e15b7567b82d24cb1e7327398286a2b88df04c /arch/powerpc/kernel
parent05d3ed0a1fe3ea05ab9f3b8d32576a0bc2e19660 (diff)
parentb635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff)
Merge branch 'linus' into x86/gart
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile15
-rw-r--r--arch/powerpc/kernel/cpu_setup_44x.S6
-rw-r--r--arch/powerpc/kernel/cputable.c11
-rw-r--r--arch/powerpc/kernel/entry_32.S11
-rw-r--r--arch/powerpc/kernel/entry_64.S10
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/iommu.c28
-rw-r--r--arch/powerpc/kernel/irq.c25
-rw-r--r--arch/powerpc/kernel/kgdb.c410
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/legacy_serial.c44
-rw-r--r--arch/powerpc/kernel/lparcfg.c390
-rw-r--r--arch/powerpc/kernel/machine_kexec.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c17
-rw-r--r--arch/powerpc/kernel/process.c48
-rw-r--r--arch/powerpc/kernel/prom_init.c48
-rw-r--r--arch/powerpc/kernel/ptrace.c130
-rw-r--r--arch/powerpc/kernel/ptrace32.c2
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/kernel/rtas_flash.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c24
-rw-r--r--arch/powerpc/kernel/setup_32.c16
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/signal.c27
-rw-r--r--arch/powerpc/kernel/smp.c119
-rw-r--r--arch/powerpc/kernel/stacktrace.c1
-rw-r--r--arch/powerpc/kernel/suspend.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c314
-rw-r--r--arch/powerpc/kernel/traps.c16
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/kernel/vio.c1033
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
32 files changed, 2408 insertions, 362 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bf0b1fd0ec34..64f5948ebc9d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -59,8 +59,6 @@ obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
59obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o 59obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
60obj-$(CONFIG_44x) += cpu_setup_44x.o 60obj-$(CONFIG_44x) += cpu_setup_44x.o
61 61
62ifeq ($(CONFIG_PPC_MERGE),y)
63
64extra-$(CONFIG_PPC_STD_MMU) := head_32.o 62extra-$(CONFIG_PPC_STD_MMU) := head_32.o
65extra-$(CONFIG_PPC64) := head_64.o 63extra-$(CONFIG_PPC64) := head_64.o
66extra-$(CONFIG_40x) := head_40x.o 64extra-$(CONFIG_40x) := head_40x.o
@@ -74,6 +72,7 @@ obj-y += time.o prom.o traps.o setup-common.o \
74 misc_$(CONFIG_WORD_SIZE).o 72 misc_$(CONFIG_WORD_SIZE).o
75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 73obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
76obj-$(CONFIG_PPC64) += dma_64.o iommu.o 74obj-$(CONFIG_PPC64) += dma_64.o iommu.o
75obj-$(CONFIG_KGDB) += kgdb.o
77obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 76obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
78obj-$(CONFIG_MODULES) += ppc_ksyms.o 77obj-$(CONFIG_MODULES) += ppc_ksyms.o
79obj-$(CONFIG_BOOTX_TEXT) += btext.o 78obj-$(CONFIG_BOOTX_TEXT) += btext.o
@@ -99,12 +98,6 @@ ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
99obj-y += iomap.o 98obj-y += iomap.o
100endif 99endif
101 100
102else
103# stuff used from here for ARCH=ppc
104smpobj-$(CONFIG_SMP) += smp.o
105
106endif
107
108obj-$(CONFIG_PPC64) += $(obj64-y) 101obj-$(CONFIG_PPC64) += $(obj64-y)
109 102
110extra-$(CONFIG_PPC_FPU) += fpu.o 103extra-$(CONFIG_PPC_FPU) += fpu.o
@@ -120,9 +113,6 @@ PHONY += systbl_chk
120systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i 113systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
121 $(call cmd,systbl_chk) 114 $(call cmd,systbl_chk)
122 115
123
124ifeq ($(CONFIG_PPC_MERGE),y)
125
126$(obj)/built-in.o: prom_init_check 116$(obj)/built-in.o: prom_init_check
127 117
128quiet_cmd_prom_init_check = CALL $< 118quiet_cmd_prom_init_check = CALL $<
@@ -132,7 +122,4 @@ PHONY += prom_init_check
132prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o 122prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
133 $(call cmd,prom_init_check) 123 $(call cmd,prom_init_check)
134 124
135endif
136
137
138clean-files := vmlinux.lds 125clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
index 5465e8de0e61..80cac984d85d 100644
--- a/arch/powerpc/kernel/cpu_setup_44x.S
+++ b/arch/powerpc/kernel/cpu_setup_44x.S
@@ -39,12 +39,6 @@ _GLOBAL(__setup_cpu_440gx)
39_GLOBAL(__setup_cpu_440spe) 39_GLOBAL(__setup_cpu_440spe)
40 b __fixup_440A_mcheck 40 b __fixup_440A_mcheck
41 41
42 /* Temporary fixup for arch/ppc until we kill the whole thing */
43#ifndef CONFIG_PPC_MERGE
44_GLOBAL(__fixup_440A_mcheck)
45 blr
46#endif
47
48/* enable APU between CPU and FPU */ 42/* enable APU between CPU and FPU */
49_GLOBAL(__init_fpu_44x) 43_GLOBAL(__init_fpu_44x)
50 mfspr r3,SPRN_CCR0 44 mfspr r3,SPRN_CCR0
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b936a1dd0a50..25c273c761d1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -23,6 +23,9 @@
23struct cpu_spec* cur_cpu_spec = NULL; 23struct cpu_spec* cur_cpu_spec = NULL;
24EXPORT_SYMBOL(cur_cpu_spec); 24EXPORT_SYMBOL(cur_cpu_spec);
25 25
26/* The platform string corresponding to the real PVR */
27const char *powerpc_base_platform;
28
26/* NOTE: 29/* NOTE:
27 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's 30 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
28 * the responsibility of the appropriate CPU save/restore functions to 31 * the responsibility of the appropriate CPU save/restore functions to
@@ -1652,6 +1655,14 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
1652 } else 1655 } else
1653 *t = *s; 1656 *t = *s;
1654 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; 1657 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
1658
1659 /*
1660 * Set the base platform string once; assumes
1661 * we're called with real pvr first.
1662 */
1663 if (*PTRRELOC(&powerpc_base_platform) == NULL)
1664 *PTRRELOC(&powerpc_base_platform) = t->platform;
1665
1655#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) 1666#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE)
1656 /* ppc64 and booke expect identify_cpu to also call 1667 /* ppc64 and booke expect identify_cpu to also call
1657 * setup_cpu for that processor. I will consolidate 1668 * setup_cpu for that processor. I will consolidate
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index da52269aec1e..1cbbf7033641 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -343,7 +343,12 @@ syscall_dotrace:
343 stw r0,_TRAP(r1) 343 stw r0,_TRAP(r1)
344 addi r3,r1,STACK_FRAME_OVERHEAD 344 addi r3,r1,STACK_FRAME_OVERHEAD
345 bl do_syscall_trace_enter 345 bl do_syscall_trace_enter
346 lwz r0,GPR0(r1) /* Restore original registers */ 346 /*
347 * Restore argument registers possibly just changed.
348 * We use the return value of do_syscall_trace_enter
349 * for call number to look up in the table (r0).
350 */
351 mr r0,r3
347 lwz r3,GPR3(r1) 352 lwz r3,GPR3(r1)
348 lwz r4,GPR4(r1) 353 lwz r4,GPR4(r1)
349 lwz r5,GPR5(r1) 354 lwz r5,GPR5(r1)
@@ -1055,8 +1060,8 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
1055 SAVE_NVGPRS(r1) 1060 SAVE_NVGPRS(r1)
1056 rlwinm r3,r3,0,0,30 1061 rlwinm r3,r3,0,0,30
1057 stw r3,_TRAP(r1) 1062 stw r3,_TRAP(r1)
10582: li r3,0 10632: addi r3,r1,STACK_FRAME_OVERHEAD
1059 addi r4,r1,STACK_FRAME_OVERHEAD 1064 mr r4,r9
1060 bl do_signal 1065 bl do_signal
1061 REST_NVGPRS(r1) 1066 REST_NVGPRS(r1)
1062 b recheck 1067 b recheck
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d7369243ae44..2d802e97097c 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -214,7 +214,12 @@ syscall_dotrace:
214 bl .save_nvgprs 214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD 215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter 216 bl .do_syscall_trace_enter
217 ld r0,GPR0(r1) /* Restore original registers */ 217 /*
218 * Restore argument registers possibly just changed.
219 * We use the return value of do_syscall_trace_enter
220 * for the call number to look up in the table (r0).
221 */
222 mr r0,r3
218 ld r3,GPR3(r1) 223 ld r3,GPR3(r1)
219 ld r4,GPR4(r1) 224 ld r4,GPR4(r1)
220 ld r5,GPR5(r1) 225 ld r5,GPR5(r1)
@@ -638,8 +643,7 @@ user_work:
638 b .ret_from_except_lite 643 b .ret_from_except_lite
639 644
6401: bl .save_nvgprs 6451: bl .save_nvgprs
641 li r3,0 646 addi r3,r1,STACK_FRAME_OVERHEAD
642 addi r4,r1,STACK_FRAME_OVERHEAD
643 bl .do_signal 647 bl .do_signal
644 b .ret_from_except 648 b .ret_from_except
645 649
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index c3cf0e8f3ac1..d308a9f70f1b 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -60,7 +60,7 @@ void cpu_idle(void)
60 60
61 set_thread_flag(TIF_POLLING_NRFLAG); 61 set_thread_flag(TIF_POLLING_NRFLAG);
62 while (1) { 62 while (1) {
63 tick_nohz_stop_sched_tick(); 63 tick_nohz_stop_sched_tick(1);
64 while (!need_resched() && !cpu_should_die()) { 64 while (!need_resched() && !cpu_should_die()) {
65 ppc64_runlatch_off(); 65 ppc64_runlatch_off();
66 66
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 2385f68c1751..550a19399bfa 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -49,6 +49,8 @@ static int novmerge = 1;
49 49
50static int protect4gb = 1; 50static int protect4gb = 1;
51 51
52static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
53
52static inline unsigned long iommu_num_pages(unsigned long vaddr, 54static inline unsigned long iommu_num_pages(unsigned long vaddr,
53 unsigned long slen) 55 unsigned long slen)
54{ 56{
@@ -191,6 +193,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
191{ 193{
192 unsigned long entry, flags; 194 unsigned long entry, flags;
193 dma_addr_t ret = DMA_ERROR_CODE; 195 dma_addr_t ret = DMA_ERROR_CODE;
196 int build_fail;
194 197
195 spin_lock_irqsave(&(tbl->it_lock), flags); 198 spin_lock_irqsave(&(tbl->it_lock), flags);
196 199
@@ -205,9 +208,21 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
205 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 208 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
206 209
207 /* Put the TCEs in the HW table */ 210 /* Put the TCEs in the HW table */
208 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, 211 build_fail = ppc_md.tce_build(tbl, entry, npages,
209 direction, attrs); 212 (unsigned long)page & IOMMU_PAGE_MASK,
213 direction, attrs);
214
215 /* ppc_md.tce_build() only returns non-zero for transient errors.
216 * Clean up the table bitmap in this case and return
217 * DMA_ERROR_CODE. For all other errors the functionality is
218 * not altered.
219 */
220 if (unlikely(build_fail)) {
221 __iommu_free(tbl, ret, npages);
210 222
223 spin_unlock_irqrestore(&(tbl->it_lock), flags);
224 return DMA_ERROR_CODE;
225 }
211 226
212 /* Flush/invalidate TLB caches if necessary */ 227 /* Flush/invalidate TLB caches if necessary */
213 if (ppc_md.tce_flush) 228 if (ppc_md.tce_flush)
@@ -276,7 +291,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
276 dma_addr_t dma_next = 0, dma_addr; 291 dma_addr_t dma_next = 0, dma_addr;
277 unsigned long flags; 292 unsigned long flags;
278 struct scatterlist *s, *outs, *segstart; 293 struct scatterlist *s, *outs, *segstart;
279 int outcount, incount, i; 294 int outcount, incount, i, build_fail = 0;
280 unsigned int align; 295 unsigned int align;
281 unsigned long handle; 296 unsigned long handle;
282 unsigned int max_seg_size; 297 unsigned int max_seg_size;
@@ -337,8 +352,11 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
337 npages, entry, dma_addr); 352 npages, entry, dma_addr);
338 353
339 /* Insert into HW table */ 354 /* Insert into HW table */
340 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, 355 build_fail = ppc_md.tce_build(tbl, entry, npages,
341 direction, attrs); 356 vaddr & IOMMU_PAGE_MASK,
357 direction, attrs);
358 if(unlikely(build_fail))
359 goto failure;
342 360
343 /* If we are in an open segment, try merging */ 361 /* If we are in an open segment, try merging */
344 if (segstart != s) { 362 if (segstart != s) {
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 6ac8612da3c3..d972decf0324 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -77,22 +77,12 @@ static int ppc_spurious_interrupts;
77EXPORT_SYMBOL(__irq_offset_value); 77EXPORT_SYMBOL(__irq_offset_value);
78atomic_t ppc_n_lost_interrupts; 78atomic_t ppc_n_lost_interrupts;
79 79
80#ifndef CONFIG_PPC_MERGE
81#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
82unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
83#endif
84
85#ifdef CONFIG_TAU_INT 80#ifdef CONFIG_TAU_INT
86extern int tau_initialized; 81extern int tau_initialized;
87extern int tau_interrupts(int); 82extern int tau_interrupts(int);
88#endif 83#endif
89#endif /* CONFIG_PPC32 */ 84#endif /* CONFIG_PPC32 */
90 85
91#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
92extern atomic_t ipi_recv;
93extern atomic_t ipi_sent;
94#endif
95
96#ifdef CONFIG_PPC64 86#ifdef CONFIG_PPC64
97EXPORT_SYMBOL(irq_desc); 87EXPORT_SYMBOL(irq_desc);
98 88
@@ -216,21 +206,14 @@ int show_interrupts(struct seq_file *p, void *v)
216skip: 206skip:
217 spin_unlock_irqrestore(&desc->lock, flags); 207 spin_unlock_irqrestore(&desc->lock, flags);
218 } else if (i == NR_IRQS) { 208 } else if (i == NR_IRQS) {
219#ifdef CONFIG_PPC32 209#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
220#ifdef CONFIG_TAU_INT
221 if (tau_initialized){ 210 if (tau_initialized){
222 seq_puts(p, "TAU: "); 211 seq_puts(p, "TAU: ");
223 for_each_online_cpu(j) 212 for_each_online_cpu(j)
224 seq_printf(p, "%10u ", tau_interrupts(j)); 213 seq_printf(p, "%10u ", tau_interrupts(j));
225 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 214 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
226 } 215 }
227#endif 216#endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
228#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
229 /* should this be per processor send/receive? */
230 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
231 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
232#endif
233#endif /* CONFIG_PPC32 */
234 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); 217 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
235 } 218 }
236 return 0; 219 return 0;
@@ -454,8 +437,6 @@ void do_softirq(void)
454 * IRQ controller and virtual interrupts 437 * IRQ controller and virtual interrupts
455 */ 438 */
456 439
457#ifdef CONFIG_PPC_MERGE
458
459static LIST_HEAD(irq_hosts); 440static LIST_HEAD(irq_hosts);
460static DEFINE_SPINLOCK(irq_big_lock); 441static DEFINE_SPINLOCK(irq_big_lock);
461static DEFINE_PER_CPU(unsigned int, irq_radix_reader); 442static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
@@ -1114,8 +1095,6 @@ static int __init irq_debugfs_init(void)
1114__initcall(irq_debugfs_init); 1095__initcall(irq_debugfs_init);
1115#endif /* CONFIG_VIRQ_DEBUG */ 1096#endif /* CONFIG_VIRQ_DEBUG */
1116 1097
1117#endif /* CONFIG_PPC_MERGE */
1118
1119#ifdef CONFIG_PPC64 1098#ifdef CONFIG_PPC64
1120static int __init setup_noirqdistrib(char *str) 1099static int __init setup_noirqdistrib(char *str)
1121{ 1100{
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
new file mode 100644
index 000000000000..b4fdf2f2743c
--- /dev/null
+++ b/arch/powerpc/kernel/kgdb.c
@@ -0,0 +1,410 @@
1/*
2 * PowerPC backend to the KGDB stub.
3 *
4 * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
5 * Copyright (C) 2003 Timesys Corporation.
6 * Copyright (C) 2004-2006 MontaVista Software, Inc.
7 * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
8 * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
9 * Sergei Shtylyov <sshtylyov@ru.mvista.com>
10 * Copyright (C) 2007-2008 Wind River Systems, Inc.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program as licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/kgdb.h>
20#include <linux/smp.h>
21#include <linux/signal.h>
22#include <linux/ptrace.h>
23#include <asm/current.h>
24#include <asm/processor.h>
25#include <asm/machdep.h>
26
27/*
28 * This table contains the mapping between PowerPC hardware trap types, and
29 * signals, which are primarily what GDB understands. GDB and the kernel
30 * don't always agree on values, so we use constants taken from gdb-6.2.
31 */
32static struct hard_trap_info
33{
34 unsigned int tt; /* Trap type code for powerpc */
35 unsigned char signo; /* Signal that we map this trap into */
36} hard_trap_info[] = {
37 { 0x0100, 0x02 /* SIGINT */ }, /* system reset */
38 { 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
39 { 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
40 { 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
41 { 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
42 { 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
43 { 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
44 { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
45 { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
46 { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
47#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
48 { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
49#if defined(CONFIG_FSL_BOOKE)
50 { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
51 { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
52 { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
53 { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
54 { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
55 { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */
56 { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
57 { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
58 { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
59#else /* ! CONFIG_FSL_BOOKE */
60 { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
61 { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
62 { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
63 { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
64 { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
65#endif
66#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
67 { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
68#if defined(CONFIG_8xx)
69 { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
70#else /* ! CONFIG_8xx */
71 { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
72 { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
73 { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
74#if defined(CONFIG_PPC64)
75 { 0x1200, 0x05 /* SIGILL */ }, /* system error */
76 { 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
77 { 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
78 { 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
79 { 0x1800, 0x04 /* SIGILL */ }, /* thermal */
80#else /* ! CONFIG_PPC64 */
81 { 0x1400, 0x02 /* SIGINT */ }, /* SMI */
82 { 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
83 { 0x1700, 0x04 /* SIGILL */ }, /* TAU */
84 { 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
85#endif
86#endif
87#endif
88 { 0x0000, 0x00 } /* Must be last */
89};
90
91static int computeSignal(unsigned int tt)
92{
93 struct hard_trap_info *ht;
94
95 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
96 if (ht->tt == tt)
97 return ht->signo;
98
99 return SIGHUP; /* default for things we don't know about */
100}
101
102static int kgdb_call_nmi_hook(struct pt_regs *regs)
103{
104 kgdb_nmicallback(raw_smp_processor_id(), regs);
105 return 0;
106}
107
108#ifdef CONFIG_SMP
109void kgdb_roundup_cpus(unsigned long flags)
110{
111 smp_send_debugger_break(MSG_ALL_BUT_SELF);
112}
113#endif
114
115/* KGDB functions to use existing PowerPC64 hooks. */
116static int kgdb_debugger(struct pt_regs *regs)
117{
118 return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
119}
120
121static int kgdb_handle_breakpoint(struct pt_regs *regs)
122{
123 if (user_mode(regs))
124 return 0;
125
126 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
127 return 0;
128
129 if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
130 regs->nip += 4;
131
132 return 1;
133}
134
135static int kgdb_singlestep(struct pt_regs *regs)
136{
137 struct thread_info *thread_info, *exception_thread_info;
138
139 if (user_mode(regs))
140 return 0;
141
142 /*
143 * On Book E and perhaps other processsors, singlestep is handled on
144 * the critical exception stack. This causes current_thread_info()
145 * to fail, since it it locates the thread_info by masking off
146 * the low bits of the current stack pointer. We work around
147 * this issue by copying the thread_info from the kernel stack
148 * before calling kgdb_handle_exception, and copying it back
149 * afterwards. On most processors the copy is avoided since
150 * exception_thread_info == thread_info.
151 */
152 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
153 exception_thread_info = current_thread_info();
154
155 if (thread_info != exception_thread_info)
156 memcpy(exception_thread_info, thread_info, sizeof *thread_info);
157
158 kgdb_handle_exception(0, SIGTRAP, 0, regs);
159
160 if (thread_info != exception_thread_info)
161 memcpy(thread_info, exception_thread_info, sizeof *thread_info);
162
163 return 1;
164}
165
166static int kgdb_iabr_match(struct pt_regs *regs)
167{
168 if (user_mode(regs))
169 return 0;
170
171 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
172 return 0;
173 return 1;
174}
175
176static int kgdb_dabr_match(struct pt_regs *regs)
177{
178 if (user_mode(regs))
179 return 0;
180
181 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
182 return 0;
183 return 1;
184}
185
186#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
187
188#define PACK32(ptr, src) do { \
189 u32 *ptr32; \
190 ptr32 = (u32 *)ptr; \
191 *(ptr32++) = (src); \
192 ptr = (unsigned long *)ptr32; \
193 } while (0)
194
195
196void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
197{
198 unsigned long *ptr = gdb_regs;
199 int reg;
200
201 memset(gdb_regs, 0, NUMREGBYTES);
202
203 for (reg = 0; reg < 32; reg++)
204 PACK64(ptr, regs->gpr[reg]);
205
206#ifdef CONFIG_FSL_BOOKE
207#ifdef CONFIG_SPE
208 for (reg = 0; reg < 32; reg++)
209 PACK64(ptr, current->thread.evr[reg]);
210#else
211 ptr += 32;
212#endif
213#else
214 /* fp registers not used by kernel, leave zero */
215 ptr += 32 * 8 / sizeof(long);
216#endif
217
218 PACK64(ptr, regs->nip);
219 PACK64(ptr, regs->msr);
220 PACK32(ptr, regs->ccr);
221 PACK64(ptr, regs->link);
222 PACK64(ptr, regs->ctr);
223 PACK32(ptr, regs->xer);
224
225 BUG_ON((unsigned long)ptr >
226 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
227}
228
229void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
230{
231 struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
232 STACK_FRAME_OVERHEAD);
233 unsigned long *ptr = gdb_regs;
234 int reg;
235
236 memset(gdb_regs, 0, NUMREGBYTES);
237
238 /* Regs GPR0-2 */
239 for (reg = 0; reg < 3; reg++)
240 PACK64(ptr, regs->gpr[reg]);
241
242 /* Regs GPR3-13 are caller saved, not in regs->gpr[] */
243 ptr += 11;
244
245 /* Regs GPR14-31 */
246 for (reg = 14; reg < 32; reg++)
247 PACK64(ptr, regs->gpr[reg]);
248
249#ifdef CONFIG_FSL_BOOKE
250#ifdef CONFIG_SPE
251 for (reg = 0; reg < 32; reg++)
252 PACK64(ptr, p->thread.evr[reg]);
253#else
254 ptr += 32;
255#endif
256#else
257 /* fp registers not used by kernel, leave zero */
258 ptr += 32 * 8 / sizeof(long);
259#endif
260
261 PACK64(ptr, regs->nip);
262 PACK64(ptr, regs->msr);
263 PACK32(ptr, regs->ccr);
264 PACK64(ptr, regs->link);
265 PACK64(ptr, regs->ctr);
266 PACK32(ptr, regs->xer);
267
268 BUG_ON((unsigned long)ptr >
269 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
270}
271
272#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
273
274#define UNPACK32(dest, ptr) do { \
275 u32 *ptr32; \
276 ptr32 = (u32 *)ptr; \
277 dest = *(ptr32++); \
278 ptr = (unsigned long *)ptr32; \
279 } while (0)
280
281void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
282{
283 unsigned long *ptr = gdb_regs;
284 int reg;
285#ifdef CONFIG_SPE
286 union {
287 u32 v32[2];
288 u64 v64;
289 } acc;
290#endif
291
292 for (reg = 0; reg < 32; reg++)
293 UNPACK64(regs->gpr[reg], ptr);
294
295#ifdef CONFIG_FSL_BOOKE
296#ifdef CONFIG_SPE
297 for (reg = 0; reg < 32; reg++)
298 UNPACK64(current->thread.evr[reg], ptr);
299#else
300 ptr += 32;
301#endif
302#else
303 /* fp registers not used by kernel, leave zero */
304 ptr += 32 * 8 / sizeof(int);
305#endif
306
307 UNPACK64(regs->nip, ptr);
308 UNPACK64(regs->msr, ptr);
309 UNPACK32(regs->ccr, ptr);
310 UNPACK64(regs->link, ptr);
311 UNPACK64(regs->ctr, ptr);
312 UNPACK32(regs->xer, ptr);
313
314 BUG_ON((unsigned long)ptr >
315 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
316}
317
318/*
319 * This function does PowerPC specific procesing for interfacing to gdb.
320 */
321int kgdb_arch_handle_exception(int vector, int signo, int err_code,
322 char *remcom_in_buffer, char *remcom_out_buffer,
323 struct pt_regs *linux_regs)
324{
325 char *ptr = &remcom_in_buffer[1];
326 unsigned long addr;
327
328 switch (remcom_in_buffer[0]) {
329 /*
330 * sAA..AA Step one instruction from AA..AA
331 * This will return an error to gdb ..
332 */
333 case 's':
334 case 'c':
335 /* handle the optional parameter */
336 if (kgdb_hex2long(&ptr, &addr))
337 linux_regs->nip = addr;
338
339 atomic_set(&kgdb_cpu_doing_single_step, -1);
340 /* set the trace bit if we're stepping */
341 if (remcom_in_buffer[0] == 's') {
342#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
343 mtspr(SPRN_DBCR0,
344 mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
345 linux_regs->msr |= MSR_DE;
346#else
347 linux_regs->msr |= MSR_SE;
348#endif
349 kgdb_single_step = 1;
350 if (kgdb_contthread)
351 atomic_set(&kgdb_cpu_doing_single_step,
352 raw_smp_processor_id());
353 }
354 return 0;
355 }
356
357 return -1;
358}
359
360/*
361 * Global data
362 */
363struct kgdb_arch arch_kgdb_ops = {
364 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
365};
366
367static int kgdb_not_implemented(struct pt_regs *regs)
368{
369 return 0;
370}
371
372static void *old__debugger_ipi;
373static void *old__debugger;
374static void *old__debugger_bpt;
375static void *old__debugger_sstep;
376static void *old__debugger_iabr_match;
377static void *old__debugger_dabr_match;
378static void *old__debugger_fault_handler;
379
380int kgdb_arch_init(void)
381{
382 old__debugger_ipi = __debugger_ipi;
383 old__debugger = __debugger;
384 old__debugger_bpt = __debugger_bpt;
385 old__debugger_sstep = __debugger_sstep;
386 old__debugger_iabr_match = __debugger_iabr_match;
387 old__debugger_dabr_match = __debugger_dabr_match;
388 old__debugger_fault_handler = __debugger_fault_handler;
389
390 __debugger_ipi = kgdb_call_nmi_hook;
391 __debugger = kgdb_debugger;
392 __debugger_bpt = kgdb_handle_breakpoint;
393 __debugger_sstep = kgdb_singlestep;
394 __debugger_iabr_match = kgdb_iabr_match;
395 __debugger_dabr_match = kgdb_dabr_match;
396 __debugger_fault_handler = kgdb_not_implemented;
397
398 return 0;
399}
400
401void kgdb_arch_exit(void)
402{
403 __debugger_ipi = old__debugger_ipi;
404 __debugger = old__debugger;
405 __debugger_bpt = old__debugger_bpt;
406 __debugger_sstep = old__debugger_sstep;
407 __debugger_iabr_match = old__debugger_iabr_match;
408 __debugger_dabr_match = old__debugger_dabr_match;
409 __debugger_fault_handler = old__debugger_fault_handler;
410}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 4ba2af125450..de79915452c8 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -144,7 +144,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
144 kcb->kprobe_saved_msr = regs->msr; 144 kcb->kprobe_saved_msr = regs->msr;
145} 145}
146 146
147/* Called with kretprobe_lock held */
148void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 147void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
149 struct pt_regs *regs) 148 struct pt_regs *regs)
150{ 149{
@@ -312,8 +311,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
312 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 311 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
313 312
314 INIT_HLIST_HEAD(&empty_rp); 313 INIT_HLIST_HEAD(&empty_rp);
315 spin_lock_irqsave(&kretprobe_lock, flags); 314 kretprobe_hash_lock(current, &head, &flags);
316 head = kretprobe_inst_table_head(current);
317 315
318 /* 316 /*
319 * It is possible to have multiple instances associated with a given 317 * It is possible to have multiple instances associated with a given
@@ -352,7 +350,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
352 regs->nip = orig_ret_address; 350 regs->nip = orig_ret_address;
353 351
354 reset_current_kprobe(); 352 reset_current_kprobe();
355 spin_unlock_irqrestore(&kretprobe_lock, flags); 353 kretprobe_hash_unlock(current, &flags);
356 preempt_enable_no_resched(); 354 preempt_enable_no_resched();
357 355
358 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 356 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 4d96e1db55ee..9ddfaef1a184 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -493,18 +493,18 @@ static int __init serial_dev_init(void)
493device_initcall(serial_dev_init); 493device_initcall(serial_dev_init);
494 494
495 495
496#ifdef CONFIG_SERIAL_8250_CONSOLE
496/* 497/*
497 * This is called very early, as part of console_init() (typically just after 498 * This is called very early, as part of console_init() (typically just after
498 * time_init()). This function is respondible for trying to find a good 499 * time_init()). This function is respondible for trying to find a good
499 * default console on serial ports. It tries to match the open firmware 500 * default console on serial ports. It tries to match the open firmware
500 * default output with one of the available serial console drivers, either 501 * default output with one of the available serial console drivers that have
501 * one of the platform serial ports that have been probed earlier by 502 * been probed earlier by find_legacy_serial_ports()
502 * find_legacy_serial_ports() or some more platform specific ones.
503 */ 503 */
504static int __init check_legacy_serial_console(void) 504static int __init check_legacy_serial_console(void)
505{ 505{
506 struct device_node *prom_stdout = NULL; 506 struct device_node *prom_stdout = NULL;
507 int speed = 0, offset = 0; 507 int i, speed = 0, offset = 0;
508 const char *name; 508 const char *name;
509 const u32 *spd; 509 const u32 *spd;
510 510
@@ -548,31 +548,20 @@ static int __init check_legacy_serial_console(void)
548 if (spd) 548 if (spd)
549 speed = *spd; 549 speed = *spd;
550 550
551 if (0) 551 if (strcmp(name, "serial") != 0)
552 ; 552 goto not_found;
553#ifdef CONFIG_SERIAL_8250_CONSOLE 553
554 else if (strcmp(name, "serial") == 0) { 554 /* Look for it in probed array */
555 int i; 555 for (i = 0; i < legacy_serial_count; i++) {
556 /* Look for it in probed array */ 556 if (prom_stdout != legacy_serial_infos[i].np)
557 for (i = 0; i < legacy_serial_count; i++) { 557 continue;
558 if (prom_stdout != legacy_serial_infos[i].np) 558 offset = i;
559 continue; 559 speed = legacy_serial_infos[i].speed;
560 offset = i; 560 break;
561 speed = legacy_serial_infos[i].speed;
562 break;
563 }
564 if (i >= legacy_serial_count)
565 goto not_found;
566 } 561 }
567#endif /* CONFIG_SERIAL_8250_CONSOLE */ 562 if (i >= legacy_serial_count)
568#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
569 else if (strcmp(name, "ch-a") == 0)
570 offset = 0;
571 else if (strcmp(name, "ch-b") == 0)
572 offset = 1;
573#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
574 else
575 goto not_found; 563 goto not_found;
564
576 of_node_put(prom_stdout); 565 of_node_put(prom_stdout);
577 566
578 DBG("Found serial console at ttyS%d\n", offset); 567 DBG("Found serial console at ttyS%d\n", offset);
@@ -591,3 +580,4 @@ static int __init check_legacy_serial_console(void)
591} 580}
592console_initcall(check_legacy_serial_console); 581console_initcall(check_legacy_serial_console);
593 582
583#endif /* CONFIG_SERIAL_8250_CONSOLE */
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 827a5726a035..1a09719c7628 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -34,8 +34,9 @@
34#include <asm/time.h> 34#include <asm/time.h>
35#include <asm/prom.h> 35#include <asm/prom.h>
36#include <asm/vdso_datapage.h> 36#include <asm/vdso_datapage.h>
37#include <asm/vio.h>
37 38
38#define MODULE_VERS "1.7" 39#define MODULE_VERS "1.8"
39#define MODULE_NAME "lparcfg" 40#define MODULE_NAME "lparcfg"
40 41
41/* #define LPARCFG_DEBUG */ 42/* #define LPARCFG_DEBUG */
@@ -129,32 +130,46 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v)
129/* 130/*
130 * Methods used to fetch LPAR data when running on a pSeries platform. 131 * Methods used to fetch LPAR data when running on a pSeries platform.
131 */ 132 */
132static void log_plpar_hcall_return(unsigned long rc, char *tag) 133/**
134 * h_get_mpp
135 * H_GET_MPP hcall returns info in 7 parms
136 */
137int h_get_mpp(struct hvcall_mpp_data *mpp_data)
133{ 138{
134 switch(rc) { 139 int rc;
135 case 0: 140 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
136 return; 141
137 case H_HARDWARE: 142 rc = plpar_hcall9(H_GET_MPP, retbuf);
138 printk(KERN_INFO "plpar-hcall (%s) " 143
139 "Hardware fault\n", tag); 144 mpp_data->entitled_mem = retbuf[0];
140 return; 145 mpp_data->mapped_mem = retbuf[1];
141 case H_FUNCTION: 146
142 printk(KERN_INFO "plpar-hcall (%s) " 147 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
143 "Function not allowed\n", tag); 148 mpp_data->pool_num = retbuf[2] & 0xffff;
144 return; 149
145 case H_AUTHORITY: 150 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
146 printk(KERN_INFO "plpar-hcall (%s) " 151 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
147 "Not authorized to this function\n", tag); 152 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
148 return; 153
149 case H_PARAMETER: 154 mpp_data->pool_size = retbuf[4];
150 printk(KERN_INFO "plpar-hcall (%s) " 155 mpp_data->loan_request = retbuf[5];
151 "Bad parameter(s)\n",tag); 156 mpp_data->backing_mem = retbuf[6];
152 return; 157
153 default: 158 return rc;
154 printk(KERN_INFO "plpar-hcall (%s) "
155 "Unexpected rc(0x%lx)\n", tag, rc);
156 }
157} 159}
160EXPORT_SYMBOL(h_get_mpp);
161
162struct hvcall_ppp_data {
163 u64 entitlement;
164 u64 unallocated_entitlement;
165 u16 group_num;
166 u16 pool_num;
167 u8 capped;
168 u8 weight;
169 u8 unallocated_weight;
170 u16 active_procs_in_pool;
171 u16 active_system_procs;
172};
158 173
159/* 174/*
160 * H_GET_PPP hcall returns info in 4 parms. 175 * H_GET_PPP hcall returns info in 4 parms.
@@ -176,27 +191,30 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag)
176 * XXXX - Active processors in Physical Processor Pool. 191 * XXXX - Active processors in Physical Processor Pool.
177 * XXXX - Processors active on platform. 192 * XXXX - Processors active on platform.
178 */ 193 */
179static unsigned int h_get_ppp(unsigned long *entitled, 194static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
180 unsigned long *unallocated,
181 unsigned long *aggregation,
182 unsigned long *resource)
183{ 195{
184 unsigned long rc; 196 unsigned long rc;
185 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 197 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
186 198
187 rc = plpar_hcall(H_GET_PPP, retbuf); 199 rc = plpar_hcall(H_GET_PPP, retbuf);
188 200
189 *entitled = retbuf[0]; 201 ppp_data->entitlement = retbuf[0];
190 *unallocated = retbuf[1]; 202 ppp_data->unallocated_entitlement = retbuf[1];
191 *aggregation = retbuf[2];
192 *resource = retbuf[3];
193 203
194 log_plpar_hcall_return(rc, "H_GET_PPP"); 204 ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
205 ppp_data->pool_num = retbuf[2] & 0xffff;
206
207 ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
208 ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
209 ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
210 ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
211 ppp_data->active_system_procs = retbuf[3] & 0xffff;
195 212
196 return rc; 213 return rc;
197} 214}
198 215
199static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) 216static unsigned h_pic(unsigned long *pool_idle_time,
217 unsigned long *num_procs)
200{ 218{
201 unsigned long rc; 219 unsigned long rc;
202 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 220 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
@@ -206,8 +224,87 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
206 *pool_idle_time = retbuf[0]; 224 *pool_idle_time = retbuf[0];
207 *num_procs = retbuf[1]; 225 *num_procs = retbuf[1];
208 226
209 if (rc != H_AUTHORITY) 227 return rc;
210 log_plpar_hcall_return(rc, "H_PIC"); 228}
229
230/*
231 * parse_ppp_data
232 * Parse out the data returned from h_get_ppp and h_pic
233 */
234static void parse_ppp_data(struct seq_file *m)
235{
236 struct hvcall_ppp_data ppp_data;
237 int rc;
238
239 rc = h_get_ppp(&ppp_data);
240 if (rc)
241 return;
242
243 seq_printf(m, "partition_entitled_capacity=%ld\n",
244 ppp_data.entitlement);
245 seq_printf(m, "group=%d\n", ppp_data.group_num);
246 seq_printf(m, "system_active_processors=%d\n",
247 ppp_data.active_system_procs);
248
249 /* pool related entries are apropriate for shared configs */
250 if (lppaca[0].shared_proc) {
251 unsigned long pool_idle_time, pool_procs;
252
253 seq_printf(m, "pool=%d\n", ppp_data.pool_num);
254
255 /* report pool_capacity in percentage */
256 seq_printf(m, "pool_capacity=%d\n",
257 ppp_data.active_procs_in_pool * 100);
258
259 h_pic(&pool_idle_time, &pool_procs);
260 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
261 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
262 }
263
264 seq_printf(m, "unallocated_capacity_weight=%d\n",
265 ppp_data.unallocated_weight);
266 seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
267 seq_printf(m, "capped=%d\n", ppp_data.capped);
268 seq_printf(m, "unallocated_capacity=%ld\n",
269 ppp_data.unallocated_entitlement);
270}
271
272/**
273 * parse_mpp_data
274 * Parse out data returned from h_get_mpp
275 */
276static void parse_mpp_data(struct seq_file *m)
277{
278 struct hvcall_mpp_data mpp_data;
279 int rc;
280
281 rc = h_get_mpp(&mpp_data);
282 if (rc)
283 return;
284
285 seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
286
287 if (mpp_data.mapped_mem != -1)
288 seq_printf(m, "mapped_entitled_memory=%ld\n",
289 mpp_data.mapped_mem);
290
291 seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
292 seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
293
294 seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
295 seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
296 mpp_data.unallocated_mem_weight);
297 seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
298 mpp_data.unallocated_entitlement);
299
300 if (mpp_data.pool_size != -1)
301 seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
302 mpp_data.pool_size);
303
304 seq_printf(m, "entitled_memory_loan_request=%ld\n",
305 mpp_data.loan_request);
306
307 seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
211} 308}
212 309
213#define SPLPAR_CHARACTERISTICS_TOKEN 20 310#define SPLPAR_CHARACTERISTICS_TOKEN 20
@@ -313,6 +410,25 @@ static int lparcfg_count_active_processors(void)
313 return count; 410 return count;
314} 411}
315 412
413static void pseries_cmo_data(struct seq_file *m)
414{
415 int cpu;
416 unsigned long cmo_faults = 0;
417 unsigned long cmo_fault_time = 0;
418
419 if (!firmware_has_feature(FW_FEATURE_CMO))
420 return;
421
422 for_each_possible_cpu(cpu) {
423 cmo_faults += lppaca[cpu].cmo_faults;
424 cmo_fault_time += lppaca[cpu].cmo_fault_time;
425 }
426
427 seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
428 seq_printf(m, "cmo_fault_time_usec=%lu\n",
429 cmo_fault_time / tb_ticks_per_usec);
430}
431
316static int pseries_lparcfg_data(struct seq_file *m, void *v) 432static int pseries_lparcfg_data(struct seq_file *m, void *v)
317{ 433{
318 int partition_potential_processors; 434 int partition_potential_processors;
@@ -334,60 +450,13 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
334 partition_active_processors = lparcfg_count_active_processors(); 450 partition_active_processors = lparcfg_count_active_processors();
335 451
336 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 452 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
337 unsigned long h_entitled, h_unallocated;
338 unsigned long h_aggregation, h_resource;
339 unsigned long pool_idle_time, pool_procs;
340 unsigned long purr;
341
342 h_get_ppp(&h_entitled, &h_unallocated, &h_aggregation,
343 &h_resource);
344
345 seq_printf(m, "R4=0x%lx\n", h_entitled);
346 seq_printf(m, "R5=0x%lx\n", h_unallocated);
347 seq_printf(m, "R6=0x%lx\n", h_aggregation);
348 seq_printf(m, "R7=0x%lx\n", h_resource);
349
350 purr = get_purr();
351
352 /* this call handles the ibm,get-system-parameter contents */ 453 /* this call handles the ibm,get-system-parameter contents */
353 parse_system_parameter_string(m); 454 parse_system_parameter_string(m);
455 parse_ppp_data(m);
456 parse_mpp_data(m);
457 pseries_cmo_data(m);
354 458
355 seq_printf(m, "partition_entitled_capacity=%ld\n", h_entitled); 459 seq_printf(m, "purr=%ld\n", get_purr());
356
357 seq_printf(m, "group=%ld\n", (h_aggregation >> 2 * 8) & 0xffff);
358
359 seq_printf(m, "system_active_processors=%ld\n",
360 (h_resource >> 0 * 8) & 0xffff);
361
362 /* pool related entries are apropriate for shared configs */
363 if (lppaca[0].shared_proc) {
364
365 h_pic(&pool_idle_time, &pool_procs);
366
367 seq_printf(m, "pool=%ld\n",
368 (h_aggregation >> 0 * 8) & 0xffff);
369
370 /* report pool_capacity in percentage */
371 seq_printf(m, "pool_capacity=%ld\n",
372 ((h_resource >> 2 * 8) & 0xffff) * 100);
373
374 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
375
376 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
377 }
378
379 seq_printf(m, "unallocated_capacity_weight=%ld\n",
380 (h_resource >> 4 * 8) & 0xFF);
381
382 seq_printf(m, "capacity_weight=%ld\n",
383 (h_resource >> 5 * 8) & 0xFF);
384
385 seq_printf(m, "capped=%ld\n", (h_resource >> 6 * 8) & 0x01);
386
387 seq_printf(m, "unallocated_capacity=%ld\n", h_unallocated);
388
389 seq_printf(m, "purr=%ld\n", purr);
390
391 } else { /* non SPLPAR case */ 460 } else { /* non SPLPAR case */
392 461
393 seq_printf(m, "system_active_processors=%d\n", 462 seq_printf(m, "system_active_processors=%d\n",
@@ -414,6 +483,83 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
414 return 0; 483 return 0;
415} 484}
416 485
486static ssize_t update_ppp(u64 *entitlement, u8 *weight)
487{
488 struct hvcall_ppp_data ppp_data;
489 u8 new_weight;
490 u64 new_entitled;
491 ssize_t retval;
492
493 /* Get our current parameters */
494 retval = h_get_ppp(&ppp_data);
495 if (retval)
496 return retval;
497
498 if (entitlement) {
499 new_weight = ppp_data.weight;
500 new_entitled = *entitlement;
501 } else if (weight) {
502 new_weight = *weight;
503 new_entitled = ppp_data.entitlement;
504 } else
505 return -EINVAL;
506
507 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
508 __FUNCTION__, ppp_data.entitlement, ppp_data.weight);
509
510 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
511 __FUNCTION__, new_entitled, new_weight);
512
513 retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
514 return retval;
515}
516
517/**
518 * update_mpp
519 *
520 * Update the memory entitlement and weight for the partition. Caller must
521 * specify either a new entitlement or weight, not both, to be updated
522 * since the h_set_mpp call takes both entitlement and weight as parameters.
523 */
524static ssize_t update_mpp(u64 *entitlement, u8 *weight)
525{
526 struct hvcall_mpp_data mpp_data;
527 u64 new_entitled;
528 u8 new_weight;
529 ssize_t rc;
530
531 if (entitlement) {
532 /* Check with vio to ensure the new memory entitlement
533 * can be handled.
534 */
535 rc = vio_cmo_entitlement_update(*entitlement);
536 if (rc)
537 return rc;
538 }
539
540 rc = h_get_mpp(&mpp_data);
541 if (rc)
542 return rc;
543
544 if (entitlement) {
545 new_weight = mpp_data.mem_weight;
546 new_entitled = *entitlement;
547 } else if (weight) {
548 new_weight = *weight;
549 new_entitled = mpp_data.entitled_mem;
550 } else
551 return -EINVAL;
552
553 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
554 __FUNCTION__, mpp_data.entitled_mem, mpp_data.mem_weight);
555
556 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
557 __FUNCTION__, new_entitled, new_weight);
558
559 rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
560 return rc;
561}
562
417/* 563/*
418 * Interface for changing system parameters (variable capacity weight 564 * Interface for changing system parameters (variable capacity weight
419 * and entitled capacity). Format of input is "param_name=value"; 565 * and entitled capacity). Format of input is "param_name=value";
@@ -427,35 +573,27 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
427static ssize_t lparcfg_write(struct file *file, const char __user * buf, 573static ssize_t lparcfg_write(struct file *file, const char __user * buf,
428 size_t count, loff_t * off) 574 size_t count, loff_t * off)
429{ 575{
430 char *kbuf; 576 int kbuf_sz = 64;
577 char kbuf[kbuf_sz];
431 char *tmp; 578 char *tmp;
432 u64 new_entitled, *new_entitled_ptr = &new_entitled; 579 u64 new_entitled, *new_entitled_ptr = &new_entitled;
433 u8 new_weight, *new_weight_ptr = &new_weight; 580 u8 new_weight, *new_weight_ptr = &new_weight;
434 581 ssize_t retval;
435 unsigned long current_entitled; /* parameters for h_get_ppp */
436 unsigned long dummy;
437 unsigned long resource;
438 u8 current_weight;
439
440 ssize_t retval = -ENOMEM;
441 582
442 if (!firmware_has_feature(FW_FEATURE_SPLPAR) || 583 if (!firmware_has_feature(FW_FEATURE_SPLPAR) ||
443 firmware_has_feature(FW_FEATURE_ISERIES)) 584 firmware_has_feature(FW_FEATURE_ISERIES))
444 return -EINVAL; 585 return -EINVAL;
445 586
446 kbuf = kmalloc(count, GFP_KERNEL); 587 if (count > kbuf_sz)
447 if (!kbuf) 588 return -EINVAL;
448 goto out;
449 589
450 retval = -EFAULT;
451 if (copy_from_user(kbuf, buf, count)) 590 if (copy_from_user(kbuf, buf, count))
452 goto out; 591 return -EFAULT;
453 592
454 retval = -EINVAL;
455 kbuf[count - 1] = '\0'; 593 kbuf[count - 1] = '\0';
456 tmp = strchr(kbuf, '='); 594 tmp = strchr(kbuf, '=');
457 if (!tmp) 595 if (!tmp)
458 goto out; 596 return -EINVAL;
459 597
460 *tmp++ = '\0'; 598 *tmp++ = '\0';
461 599
@@ -463,34 +601,32 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
463 char *endp; 601 char *endp;
464 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10); 602 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
465 if (endp == tmp) 603 if (endp == tmp)
466 goto out; 604 return -EINVAL;
467 new_weight_ptr = &current_weight; 605
606 retval = update_ppp(new_entitled_ptr, NULL);
468 } else if (!strcmp(kbuf, "capacity_weight")) { 607 } else if (!strcmp(kbuf, "capacity_weight")) {
469 char *endp; 608 char *endp;
470 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10); 609 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
471 if (endp == tmp) 610 if (endp == tmp)
472 goto out; 611 return -EINVAL;
473 new_entitled_ptr = &current_entitled;
474 } else
475 goto out;
476
477 /* Get our current parameters */
478 retval = h_get_ppp(&current_entitled, &dummy, &dummy, &resource);
479 if (retval) {
480 retval = -EIO;
481 goto out;
482 }
483
484 current_weight = (resource >> 5 * 8) & 0xFF;
485 612
486 pr_debug("%s: current_entitled = %lu, current_weight = %u\n", 613 retval = update_ppp(NULL, new_weight_ptr);
487 __func__, current_entitled, current_weight); 614 } else if (!strcmp(kbuf, "entitled_memory")) {
615 char *endp;
616 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
617 if (endp == tmp)
618 return -EINVAL;
488 619
489 pr_debug("%s: new_entitled = %lu, new_weight = %u\n", 620 retval = update_mpp(new_entitled_ptr, NULL);
490 __func__, *new_entitled_ptr, *new_weight_ptr); 621 } else if (!strcmp(kbuf, "entitled_memory_weight")) {
622 char *endp;
623 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
624 if (endp == tmp)
625 return -EINVAL;
491 626
492 retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, 627 retval = update_mpp(NULL, new_weight_ptr);
493 *new_weight_ptr); 628 } else
629 return -EINVAL;
494 630
495 if (retval == H_SUCCESS || retval == H_CONSTRAINED) { 631 if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
496 retval = count; 632 retval = count;
@@ -500,14 +636,8 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
500 retval = -EIO; 636 retval = -EIO;
501 } else if (retval == H_PARAMETER) { 637 } else if (retval == H_PARAMETER) {
502 retval = -EINVAL; 638 retval = -EINVAL;
503 } else {
504 printk(KERN_WARNING "%s: received unknown hv return code %ld",
505 __func__, retval);
506 retval = -EIO;
507 } 639 }
508 640
509out:
510 kfree(kbuf);
511 return retval; 641 return retval;
512} 642}
513 643
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 29a0e039d436..aab76887a842 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -48,7 +48,7 @@ void machine_kexec_cleanup(struct kimage *image)
48 * Do not allocate memory (or fail in any way) in machine_kexec(). 48 * Do not allocate memory (or fail in any way) in machine_kexec().
49 * We are past the point of no return, committed to rebooting now. 49 * We are past the point of no return, committed to rebooting now.
50 */ 50 */
51NORET_TYPE void machine_kexec(struct kimage *image) 51void machine_kexec(struct kimage *image)
52{ 52{
53 if (ppc_md.machine_kexec) 53 if (ppc_md.machine_kexec)
54 ppc_md.machine_kexec(image); 54 ppc_md.machine_kexec(image);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 224e9a11765c..ea0c61e09b76 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -650,11 +650,18 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
650 } 650 }
651 } 651 }
652 652
653 /* Out of paranoia, let's put the ISA hole last if any */ 653 /* If there's an ISA hole and the pci_mem_offset is -not- matching
654 if (isa_hole >= 0 && memno > 0 && isa_hole != (memno-1)) { 654 * the ISA hole offset, then we need to remove the ISA hole from
655 struct resource tmp = hose->mem_resources[isa_hole]; 655 * the resource list for that brige
656 hose->mem_resources[isa_hole] = hose->mem_resources[memno-1]; 656 */
657 hose->mem_resources[memno-1] = tmp; 657 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
658 unsigned int next = isa_hole + 1;
659 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
660 if (next < memno)
661 memmove(&hose->mem_resources[isa_hole],
662 &hose->mem_resources[next],
663 sizeof(struct resource) * (memno - next));
664 hose->mem_resources[--memno].flags = 0;
658 } 665 }
659} 666}
660 667
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 219f3634115e..957bded0020d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -47,6 +47,8 @@
47#ifdef CONFIG_PPC64 47#ifdef CONFIG_PPC64
48#include <asm/firmware.h> 48#include <asm/firmware.h>
49#endif 49#endif
50#include <linux/kprobes.h>
51#include <linux/kdebug.h>
50 52
51extern unsigned long _get_SP(void); 53extern unsigned long _get_SP(void);
52 54
@@ -239,21 +241,53 @@ void discard_lazy_cpu_state(void)
239} 241}
240#endif /* CONFIG_SMP */ 242#endif /* CONFIG_SMP */
241 243
244void do_dabr(struct pt_regs *regs, unsigned long address,
245 unsigned long error_code)
246{
247 siginfo_t info;
248
249 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
250 11, SIGSEGV) == NOTIFY_STOP)
251 return;
252
253 if (debugger_dabr_match(regs))
254 return;
255
256 /* Clear the DAC and struct entries. One shot trigger */
257#if defined(CONFIG_BOOKE)
258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
259 | DBCR0_IDM));
260#endif
261
262 /* Clear the DABR */
263 set_dabr(0);
264
265 /* Deliver the signal to userspace */
266 info.si_signo = SIGTRAP;
267 info.si_errno = 0;
268 info.si_code = TRAP_HWBKPT;
269 info.si_addr = (void __user *)address;
270 force_sig_info(SIGTRAP, &info, current);
271}
272
242static DEFINE_PER_CPU(unsigned long, current_dabr); 273static DEFINE_PER_CPU(unsigned long, current_dabr);
243 274
244int set_dabr(unsigned long dabr) 275int set_dabr(unsigned long dabr)
245{ 276{
246 __get_cpu_var(current_dabr) = dabr; 277 __get_cpu_var(current_dabr) = dabr;
247 278
248#ifdef CONFIG_PPC_MERGE /* XXX for now */
249 if (ppc_md.set_dabr) 279 if (ppc_md.set_dabr)
250 return ppc_md.set_dabr(dabr); 280 return ppc_md.set_dabr(dabr);
251#endif
252 281
253 /* XXX should we have a CPU_FTR_HAS_DABR ? */ 282 /* XXX should we have a CPU_FTR_HAS_DABR ? */
254#if defined(CONFIG_PPC64) || defined(CONFIG_6xx) 283#if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
255 mtspr(SPRN_DABR, dabr); 284 mtspr(SPRN_DABR, dabr);
256#endif 285#endif
286
287#if defined(CONFIG_BOOKE)
288 mtspr(SPRN_DAC1, dabr);
289#endif
290
257 return 0; 291 return 0;
258} 292}
259 293
@@ -337,6 +371,12 @@ struct task_struct *__switch_to(struct task_struct *prev,
337 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 371 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
338 set_dabr(new->thread.dabr); 372 set_dabr(new->thread.dabr);
339 373
374#if defined(CONFIG_BOOKE)
375 /* If new thread DAC (HW breakpoint) is the same then leave it */
376 if (new->thread.dabr)
377 set_dabr(new->thread.dabr);
378#endif
379
340 new_thread = &new->thread; 380 new_thread = &new->thread;
341 old_thread = &current->thread; 381 old_thread = &current->thread;
342 382
@@ -525,6 +565,10 @@ void flush_thread(void)
525 if (current->thread.dabr) { 565 if (current->thread.dabr) {
526 current->thread.dabr = 0; 566 current->thread.dabr = 0;
527 set_dabr(0); 567 set_dabr(0);
568
569#if defined(CONFIG_BOOKE)
570 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
571#endif
528 } 572 }
529} 573}
530 574
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1ea8c8d3ce89..b72849ac7db3 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -205,8 +205,6 @@ static int __initdata mem_reserve_cnt;
205static cell_t __initdata regbuf[1024]; 205static cell_t __initdata regbuf[1024];
206 206
207 207
208#define MAX_CPU_THREADS 2
209
210/* 208/*
211 * Error results ... some OF calls will return "-1" on error, some 209 * Error results ... some OF calls will return "-1" on error, some
212 * will return 0, some will return either. To simplify, here are 210 * will return 0, some will return either. To simplify, here are
@@ -643,6 +641,11 @@ static void __init early_cmdline_parse(void)
643#else 641#else
644#define OV5_MSI 0x00 642#define OV5_MSI 0x00
645#endif /* CONFIG_PCI_MSI */ 643#endif /* CONFIG_PCI_MSI */
644#ifdef CONFIG_PPC_SMLPAR
645#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
646#else
647#define OV5_CMO 0x00
648#endif
646 649
647/* 650/*
648 * The architecture vector has an array of PVR mask/value pairs, 651 * The architecture vector has an array of PVR mask/value pairs,
@@ -687,10 +690,12 @@ static unsigned char ibm_architecture_vec[] = {
687 0, /* don't halt */ 690 0, /* don't halt */
688 691
689 /* option vector 5: PAPR/OF options */ 692 /* option vector 5: PAPR/OF options */
690 3 - 2, /* length */ 693 5 - 2, /* length */
691 0, /* don't ignore, don't halt */ 694 0, /* don't ignore, don't halt */
692 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | 695 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
693 OV5_DONATE_DEDICATE_CPU | OV5_MSI, 696 OV5_DONATE_DEDICATE_CPU | OV5_MSI,
697 0,
698 OV5_CMO,
694}; 699};
695 700
696/* Old method - ELF header with PT_NOTE sections */ 701/* Old method - ELF header with PT_NOTE sections */
@@ -1332,10 +1337,6 @@ static void __init prom_hold_cpus(void)
1332 unsigned int reg; 1337 unsigned int reg;
1333 phandle node; 1338 phandle node;
1334 char type[64]; 1339 char type[64];
1335 int cpuid = 0;
1336 unsigned int interrupt_server[MAX_CPU_THREADS];
1337 unsigned int cpu_threads, hw_cpu_num;
1338 int propsize;
1339 struct prom_t *_prom = &RELOC(prom); 1340 struct prom_t *_prom = &RELOC(prom);
1340 unsigned long *spinloop 1341 unsigned long *spinloop
1341 = (void *) LOW_ADDR(__secondary_hold_spinloop); 1342 = (void *) LOW_ADDR(__secondary_hold_spinloop);
@@ -1379,7 +1380,6 @@ static void __init prom_hold_cpus(void)
1379 reg = -1; 1380 reg = -1;
1380 prom_getprop(node, "reg", &reg, sizeof(reg)); 1381 prom_getprop(node, "reg", &reg, sizeof(reg));
1381 1382
1382 prom_debug("\ncpuid = 0x%x\n", cpuid);
1383 prom_debug("cpu hw idx = 0x%x\n", reg); 1383 prom_debug("cpu hw idx = 0x%x\n", reg);
1384 1384
1385 /* Init the acknowledge var which will be reset by 1385 /* Init the acknowledge var which will be reset by
@@ -1388,28 +1388,9 @@ static void __init prom_hold_cpus(void)
1388 */ 1388 */
1389 *acknowledge = (unsigned long)-1; 1389 *acknowledge = (unsigned long)-1;
1390 1390
1391 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s", 1391 if (reg != _prom->cpu) {
1392 &interrupt_server,
1393 sizeof(interrupt_server));
1394 if (propsize < 0) {
1395 /* no property. old hardware has no SMT */
1396 cpu_threads = 1;
1397 interrupt_server[0] = reg; /* fake it with phys id */
1398 } else {
1399 /* We have a threaded processor */
1400 cpu_threads = propsize / sizeof(u32);
1401 if (cpu_threads > MAX_CPU_THREADS) {
1402 prom_printf("SMT: too many threads!\n"
1403 "SMT: found %x, max is %x\n",
1404 cpu_threads, MAX_CPU_THREADS);
1405 cpu_threads = 1; /* ToDo: panic? */
1406 }
1407 }
1408
1409 hw_cpu_num = interrupt_server[0];
1410 if (hw_cpu_num != _prom->cpu) {
1411 /* Primary Thread of non-boot cpu */ 1392 /* Primary Thread of non-boot cpu */
1412 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg); 1393 prom_printf("starting cpu hw idx %x... ", reg);
1413 call_prom("start-cpu", 3, 0, node, 1394 call_prom("start-cpu", 3, 0, node,
1414 secondary_hold, reg); 1395 secondary_hold, reg);
1415 1396
@@ -1424,17 +1405,10 @@ static void __init prom_hold_cpus(void)
1424 } 1405 }
1425#ifdef CONFIG_SMP 1406#ifdef CONFIG_SMP
1426 else 1407 else
1427 prom_printf("%x : boot cpu %x\n", cpuid, reg); 1408 prom_printf("boot cpu hw idx %x\n", reg);
1428#endif /* CONFIG_SMP */ 1409#endif /* CONFIG_SMP */
1429
1430 /* Reserve cpu #s for secondary threads. They start later. */
1431 cpuid += cpu_threads;
1432 } 1410 }
1433 1411
1434 if (cpuid > NR_CPUS)
1435 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1436 ") exceeded: ignoring extras\n");
1437
1438 prom_debug("prom_hold_cpus: end...\n"); 1412 prom_debug("prom_hold_cpus: end...\n");
1439} 1413}
1440 1414
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8feb93e7890c..3635be61f899 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ptrace.h> 23#include <linux/ptrace.h>
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/tracehook.h>
25#include <linux/elf.h> 26#include <linux/elf.h>
26#include <linux/user.h> 27#include <linux/user.h>
27#include <linux/security.h> 28#include <linux/security.h>
@@ -374,7 +375,7 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
374 flush_vsx_to_thread(target); 375 flush_vsx_to_thread(target);
375 376
376 for (i = 0; i < 32 ; i++) 377 for (i = 0; i < 32 ; i++)
377 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET]; 378 buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
378 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 379 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
379 buf, 0, 32 * sizeof(double)); 380 buf, 0, 32 * sizeof(double));
380 381
@@ -393,7 +394,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
393 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 394 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
394 buf, 0, 32 * sizeof(double)); 395 buf, 0, 32 * sizeof(double));
395 for (i = 0; i < 32 ; i++) 396 for (i = 0; i < 32 ; i++)
396 current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 397 target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
397 398
398 399
399 return ret; 400 return ret;
@@ -703,7 +704,7 @@ void user_enable_single_step(struct task_struct *task)
703 704
704 if (regs != NULL) { 705 if (regs != NULL) {
705#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 706#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
706 task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC; 707 task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
707 regs->msr |= MSR_DE; 708 regs->msr |= MSR_DE;
708#else 709#else
709 regs->msr |= MSR_SE; 710 regs->msr |= MSR_SE;
@@ -716,9 +717,16 @@ void user_disable_single_step(struct task_struct *task)
716{ 717{
717 struct pt_regs *regs = task->thread.regs; 718 struct pt_regs *regs = task->thread.regs;
718 719
720
721#if defined(CONFIG_BOOKE)
722 /* If DAC then do not single step, skip */
723 if (task->thread.dabr)
724 return;
725#endif
726
719 if (regs != NULL) { 727 if (regs != NULL) {
720#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 728#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
721 task->thread.dbcr0 = 0; 729 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
722 regs->msr &= ~MSR_DE; 730 regs->msr &= ~MSR_DE;
723#else 731#else
724 regs->msr &= ~MSR_SE; 732 regs->msr &= ~MSR_SE;
@@ -727,22 +735,76 @@ void user_disable_single_step(struct task_struct *task)
727 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 735 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
728} 736}
729 737
730static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, 738int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
731 unsigned long data) 739 unsigned long data)
732{ 740{
733 /* We only support one DABR and no IABRS at the moment */ 741 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
742 * For embedded processors we support one DAC and no IAC's at the
743 * moment.
744 */
734 if (addr > 0) 745 if (addr > 0)
735 return -EINVAL; 746 return -EINVAL;
736 747
737 /* The bottom 3 bits are flags */ 748 /* The bottom 3 bits in dabr are flags */
738 if ((data & ~0x7UL) >= TASK_SIZE) 749 if ((data & ~0x7UL) >= TASK_SIZE)
739 return -EIO; 750 return -EIO;
740 751
741 /* Ensure translation is on */ 752#ifndef CONFIG_BOOKE
753
754 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
755 * It was assumed, on previous implementations, that 3 bits were
756 * passed together with the data address, fitting the design of the
757 * DABR register, as follows:
758 *
759 * bit 0: Read flag
760 * bit 1: Write flag
761 * bit 2: Breakpoint translation
762 *
763 * Thus, we use them here as so.
764 */
765
766 /* Ensure breakpoint translation bit is set */
742 if (data && !(data & DABR_TRANSLATION)) 767 if (data && !(data & DABR_TRANSLATION))
743 return -EIO; 768 return -EIO;
744 769
770 /* Move contents to the DABR register */
745 task->thread.dabr = data; 771 task->thread.dabr = data;
772
773#endif
774#if defined(CONFIG_BOOKE)
775
776 /* As described above, it was assumed 3 bits were passed with the data
777 * address, but we will assume only the mode bits will be passed
778 * as to not cause alignment restrictions for DAC-based processors.
779 */
780
781 /* DAC's hold the whole address without any mode flags */
782 task->thread.dabr = data & ~0x3UL;
783
784 if (task->thread.dabr == 0) {
785 task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM);
786 task->thread.regs->msr &= ~MSR_DE;
787 return 0;
788 }
789
790 /* Read or Write bits must be set */
791
792 if (!(data & 0x3UL))
793 return -EINVAL;
794
795 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
796 register */
797 task->thread.dbcr0 = DBCR0_IDM;
798
799 /* Check for write and read flags and set DBCR0
800 accordingly */
801 if (data & 0x1UL)
802 task->thread.dbcr0 |= DBSR_DAC1R;
803 if (data & 0x2UL)
804 task->thread.dbcr0 |= DBSR_DAC1W;
805
806 task->thread.regs->msr |= MSR_DE;
807#endif
746 return 0; 808 return 0;
747} 809}
748 810
@@ -913,15 +975,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
913 case PTRACE_GETVSRREGS: 975 case PTRACE_GETVSRREGS:
914 return copy_regset_to_user(child, &user_ppc_native_view, 976 return copy_regset_to_user(child, &user_ppc_native_view,
915 REGSET_VSX, 977 REGSET_VSX,
916 0, (32 * sizeof(vector128) + 978 0, 32 * sizeof(double),
917 sizeof(u32)),
918 (void __user *) data); 979 (void __user *) data);
919 980
920 case PTRACE_SETVSRREGS: 981 case PTRACE_SETVSRREGS:
921 return copy_regset_from_user(child, &user_ppc_native_view, 982 return copy_regset_from_user(child, &user_ppc_native_view,
922 REGSET_VSX, 983 REGSET_VSX,
923 0, (32 * sizeof(vector128) + 984 0, 32 * sizeof(double),
924 sizeof(u32)),
925 (const void __user *) data); 985 (const void __user *) data);
926#endif 986#endif
927#ifdef CONFIG_SPE 987#ifdef CONFIG_SPE
@@ -953,31 +1013,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
953 return ret; 1013 return ret;
954} 1014}
955 1015
956static void do_syscall_trace(void) 1016/*
1017 * We must return the syscall number to actually look up in the table.
1018 * This can be -1L to skip running any syscall at all.
1019 */
1020long do_syscall_trace_enter(struct pt_regs *regs)
957{ 1021{
958 /* the 0x80 provides a way for the tracing parent to distinguish 1022 long ret = 0;
959 between a syscall stop and SIGTRAP delivery */
960 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
961 ? 0x80 : 0));
962
963 /*
964 * this isn't the same as continuing with a signal, but it will do
965 * for normal use. strace only continues with a signal if the
966 * stopping signal is not SIGTRAP. -brl
967 */
968 if (current->exit_code) {
969 send_sig(current->exit_code, current, 1);
970 current->exit_code = 0;
971 }
972}
973 1023
974void do_syscall_trace_enter(struct pt_regs *regs)
975{
976 secure_computing(regs->gpr[0]); 1024 secure_computing(regs->gpr[0]);
977 1025
978 if (test_thread_flag(TIF_SYSCALL_TRACE) 1026 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
979 && (current->ptrace & PT_PTRACED)) 1027 tracehook_report_syscall_entry(regs))
980 do_syscall_trace(); 1028 /*
1029 * Tracing decided this syscall should not happen.
1030 * We'll return a bogus call number to get an ENOSYS
1031 * error, but leave the original number in regs->gpr[0].
1032 */
1033 ret = -1L;
981 1034
982 if (unlikely(current->audit_context)) { 1035 if (unlikely(current->audit_context)) {
983#ifdef CONFIG_PPC64 1036#ifdef CONFIG_PPC64
@@ -995,16 +1048,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
995 regs->gpr[5] & 0xffffffff, 1048 regs->gpr[5] & 0xffffffff,
996 regs->gpr[6] & 0xffffffff); 1049 regs->gpr[6] & 0xffffffff);
997 } 1050 }
1051
1052 return ret ?: regs->gpr[0];
998} 1053}
999 1054
1000void do_syscall_trace_leave(struct pt_regs *regs) 1055void do_syscall_trace_leave(struct pt_regs *regs)
1001{ 1056{
1057 int step;
1058
1002 if (unlikely(current->audit_context)) 1059 if (unlikely(current->audit_context))
1003 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 1060 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
1004 regs->result); 1061 regs->result);
1005 1062
1006 if ((test_thread_flag(TIF_SYSCALL_TRACE) 1063 step = test_thread_flag(TIF_SINGLESTEP);
1007 || test_thread_flag(TIF_SINGLESTEP)) 1064 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1008 && (current->ptrace & PT_PTRACED)) 1065 tracehook_report_syscall_exit(regs, step);
1009 do_syscall_trace();
1010} 1066}
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 67bf1a1e7e14..197d49c790ad 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -294,6 +294,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
294 case PTRACE_SETFPREGS: 294 case PTRACE_SETFPREGS:
295 case PTRACE_GETVRREGS: 295 case PTRACE_GETVRREGS:
296 case PTRACE_SETVRREGS: 296 case PTRACE_SETVRREGS:
297 case PTRACE_GETVSRREGS:
298 case PTRACE_SETVSRREGS:
297 case PTRACE_GETREGS64: 299 case PTRACE_GETREGS64:
298 case PTRACE_SETREGS64: 300 case PTRACE_SETREGS64:
299 case PPC_PTRACE_GETFPREGS: 301 case PPC_PTRACE_GETFPREGS:
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index c680f1bbd387..1f8505c23548 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -792,6 +792,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
792 if (args.token == RTAS_UNKNOWN_SERVICE) 792 if (args.token == RTAS_UNKNOWN_SERVICE)
793 return -EINVAL; 793 return -EINVAL;
794 794
795 args.rets = &args.args[nargs];
796 memset(args.rets, 0, args.nret * sizeof(rtas_arg_t));
797
795 /* Need to handle ibm,suspend_me call specially */ 798 /* Need to handle ibm,suspend_me call specially */
796 if (args.token == ibm_suspend_me_token) { 799 if (args.token == ibm_suspend_me_token) {
797 rc = rtas_ibm_suspend_me(&args); 800 rc = rtas_ibm_suspend_me(&args);
@@ -808,8 +811,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
808 enter_rtas(__pa(&rtas.args)); 811 enter_rtas(__pa(&rtas.args));
809 args = rtas.args; 812 args = rtas.args;
810 813
811 args.rets = &args.args[nargs];
812
813 /* A -1 return code indicates that the last command couldn't 814 /* A -1 return code indicates that the last command couldn't
814 be completed due to a hardware error. */ 815 be completed due to a hardware error. */
815 if (args.rets[0] == -1) 816 if (args.rets[0] == -1)
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 09ded5c424a9..149cb112cd1a 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
286} 286}
287 287
288/* constructor for flash_block_cache */ 288/* constructor for flash_block_cache */
289void rtas_block_ctor(struct kmem_cache *cache, void *ptr) 289void rtas_block_ctor(void *ptr)
290{ 290{
291 memset(ptr, 0, RTAS_BLK_SIZE); 291 memset(ptr, 0, RTAS_BLK_SIZE);
292} 292}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 61a3f4132087..9cc5a52711e5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc)
367 * setup_cpu_maps - initialize the following cpu maps: 367 * setup_cpu_maps - initialize the following cpu maps:
368 * cpu_possible_map 368 * cpu_possible_map
369 * cpu_present_map 369 * cpu_present_map
370 * cpu_sibling_map
371 * 370 *
372 * Having the possible map set up early allows us to restrict allocations 371 * Having the possible map set up early allows us to restrict allocations
373 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 372 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
@@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void)
475 */ 474 */
476 cpu_init_thread_core_maps(nthreads); 475 cpu_init_thread_core_maps(nthreads);
477} 476}
478
479/*
480 * Being that cpu_sibling_map is now a per_cpu array, then it cannot
481 * be initialized until the per_cpu areas have been created. This
482 * function is now called from setup_per_cpu_areas().
483 */
484void __init smp_setup_cpu_sibling_map(void)
485{
486#ifdef CONFIG_PPC64
487 int i, cpu, base;
488
489 for_each_possible_cpu(cpu) {
490 DBG("Sibling map for CPU %d:", cpu);
491 base = cpu_first_thread_in_core(cpu);
492 for (i = 0; i < threads_per_core; i++) {
493 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
494 DBG(" %d", base + i);
495 }
496 DBG("\n");
497 }
498
499#endif /* CONFIG_PPC64 */
500}
501#endif /* CONFIG_SMP */ 477#endif /* CONFIG_SMP */
502 478
503#ifdef CONFIG_PCSPKR_PLATFORM 479#ifdef CONFIG_PCSPKR_PLATFORM
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 4efebe88e64a..066e65c59b58 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -43,10 +43,6 @@
43 43
44#define DBG(fmt...) 44#define DBG(fmt...)
45 45
46#if defined CONFIG_KGDB
47#include <asm/kgdb.h>
48#endif
49
50extern void bootx_init(unsigned long r4, unsigned long phys); 46extern void bootx_init(unsigned long r4, unsigned long phys);
51 47
52int boot_cpuid; 48int boot_cpuid;
@@ -302,18 +298,6 @@ void __init setup_arch(char **cmdline_p)
302 298
303 xmon_setup(); 299 xmon_setup();
304 300
305#if defined(CONFIG_KGDB)
306 if (ppc_md.kgdb_map_scc)
307 ppc_md.kgdb_map_scc();
308 set_debug_traps();
309 if (strstr(cmd_line, "gdb")) {
310 if (ppc_md.progress)
311 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
312 printk("kgdb breakpoint activated\n");
313 breakpoint();
314 }
315#endif
316
317 /* 301 /*
318 * Set cache line size based on type of cpu as a default. 302 * Set cache line size based on type of cpu as a default.
319 * Systems with OF can look in the properties on the cpu node(s) 303 * Systems with OF can look in the properties on the cpu node(s)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04d8de9f0fc6..8b25f51f03bf 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void)
611 paca[i].data_offset = ptr - __per_cpu_start; 611 paca[i].data_offset = ptr - __per_cpu_start;
612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 612 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
613 } 613 }
614
615 /* Now that per_cpu is setup, initialize cpu_sibling_map */
616 smp_setup_cpu_sibling_map();
617} 614}
618#endif 615#endif
619 616
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index ad55488939c3..a54405ebd7b0 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -9,7 +9,7 @@
9 * this archive for more details. 9 * this archive for more details.
10 */ 10 */
11 11
12#include <linux/ptrace.h> 12#include <linux/tracehook.h>
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/unistd.h> 15#include <asm/unistd.h>
@@ -112,7 +112,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
112 } 112 }
113} 113}
114 114
115int do_signal(sigset_t *oldset, struct pt_regs *regs) 115static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
116{ 116{
117 siginfo_t info; 117 siginfo_t info;
118 int signr; 118 int signr;
@@ -145,8 +145,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
145 * user space. The DABR will have been cleared if it 145 * user space. The DABR will have been cleared if it
146 * triggered inside the kernel. 146 * triggered inside the kernel.
147 */ 147 */
148 if (current->thread.dabr) 148 if (current->thread.dabr) {
149 set_dabr(current->thread.dabr); 149 set_dabr(current->thread.dabr);
150#if defined(CONFIG_BOOKE)
151 mtspr(SPRN_DBCR0, current->thread.dbcr0);
152#endif
153 }
150 154
151 if (is32) { 155 if (is32) {
152 if (ka.sa.sa_flags & SA_SIGINFO) 156 if (ka.sa.sa_flags & SA_SIGINFO)
@@ -173,11 +177,28 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
173 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. 177 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
174 */ 178 */
175 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; 179 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
180
181 /*
182 * Let tracing know that we've done the handler setup.
183 */
184 tracehook_signal_handler(signr, &info, &ka, regs,
185 test_thread_flag(TIF_SINGLESTEP));
176 } 186 }
177 187
178 return ret; 188 return ret;
179} 189}
180 190
191void do_signal(struct pt_regs *regs, unsigned long thread_info_flags)
192{
193 if (thread_info_flags & _TIF_SIGPENDING)
194 do_signal_pending(NULL, regs);
195
196 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
197 clear_thread_flag(TIF_NOTIFY_RESUME);
198 tracehook_notify_resume(regs);
199 }
200}
201
181long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 202long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
182 unsigned long r5, unsigned long r6, unsigned long r7, 203 unsigned long r5, unsigned long r6, unsigned long r7,
183 unsigned long r8, struct pt_regs *regs) 204 unsigned long r8, struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index f5ae9fa222ea..5337ca7bb649 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -41,6 +41,7 @@
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/machdep.h> 43#include <asm/machdep.h>
44#include <asm/cputhreads.h>
44#include <asm/cputable.h> 45#include <asm/cputable.h>
45#include <asm/system.h> 46#include <asm/system.h>
46#include <asm/mpic.h> 47#include <asm/mpic.h>
@@ -62,10 +63,12 @@ struct thread_info *secondary_ti;
62cpumask_t cpu_possible_map = CPU_MASK_NONE; 63cpumask_t cpu_possible_map = CPU_MASK_NONE;
63cpumask_t cpu_online_map = CPU_MASK_NONE; 64cpumask_t cpu_online_map = CPU_MASK_NONE;
64DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 65DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
66DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
65 67
66EXPORT_SYMBOL(cpu_online_map); 68EXPORT_SYMBOL(cpu_online_map);
67EXPORT_SYMBOL(cpu_possible_map); 69EXPORT_SYMBOL(cpu_possible_map);
68EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 70EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71EXPORT_PER_CPU_SYMBOL(cpu_core_map);
69 72
70/* SMP operations for this machine */ 73/* SMP operations for this machine */
71struct smp_ops_t *smp_ops; 74struct smp_ops_t *smp_ops;
@@ -228,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void)
228 BUG_ON(smp_processor_id() != boot_cpuid); 231 BUG_ON(smp_processor_id() != boot_cpuid);
229 232
230 cpu_set(boot_cpuid, cpu_online_map); 233 cpu_set(boot_cpuid, cpu_online_map);
234 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
235 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
231#ifdef CONFIG_PPC64 236#ifdef CONFIG_PPC64
232 paca[boot_cpuid].__current = current; 237 paca[boot_cpuid].__current = current;
233#endif 238#endif
@@ -375,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
375 return 0; 380 return 0;
376} 381}
377 382
383/* Return the value of the reg property corresponding to the given
384 * logical cpu.
385 */
386int cpu_to_core_id(int cpu)
387{
388 struct device_node *np;
389 const int *reg;
390 int id = -1;
391
392 np = of_get_cpu_node(cpu, NULL);
393 if (!np)
394 goto out;
395
396 reg = of_get_property(np, "reg", NULL);
397 if (!reg)
398 goto out;
399
400 id = *reg;
401out:
402 of_node_put(np);
403 return id;
404}
405
406/* Must be called when no change can occur to cpu_present_map,
407 * i.e. during cpu online or offline.
408 */
409static struct device_node *cpu_to_l2cache(int cpu)
410{
411 struct device_node *np;
412 const phandle *php;
413 phandle ph;
414
415 if (!cpu_present(cpu))
416 return NULL;
417
418 np = of_get_cpu_node(cpu, NULL);
419 if (np == NULL)
420 return NULL;
421
422 php = of_get_property(np, "l2-cache", NULL);
423 if (php == NULL)
424 return NULL;
425 ph = *php;
426 of_node_put(np);
427
428 return of_find_node_by_phandle(ph);
429}
378 430
379/* Activate a secondary processor. */ 431/* Activate a secondary processor. */
380int __devinit start_secondary(void *unused) 432int __devinit start_secondary(void *unused)
381{ 433{
382 unsigned int cpu = smp_processor_id(); 434 unsigned int cpu = smp_processor_id();
435 struct device_node *l2_cache;
436 int i, base;
383 437
384 atomic_inc(&init_mm.mm_count); 438 atomic_inc(&init_mm.mm_count);
385 current->active_mm = &init_mm; 439 current->active_mm = &init_mm;
@@ -400,6 +454,33 @@ int __devinit start_secondary(void *unused)
400 454
401 ipi_call_lock(); 455 ipi_call_lock();
402 cpu_set(cpu, cpu_online_map); 456 cpu_set(cpu, cpu_online_map);
457 /* Update sibling maps */
458 base = cpu_first_thread_in_core(cpu);
459 for (i = 0; i < threads_per_core; i++) {
460 if (cpu_is_offline(base + i))
461 continue;
462 cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
463 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
464
465 /* cpu_core_map should be a superset of
466 * cpu_sibling_map even if we don't have cache
467 * information, so update the former here, too.
468 */
469 cpu_set(cpu, per_cpu(cpu_core_map, base +i));
470 cpu_set(base + i, per_cpu(cpu_core_map, cpu));
471 }
472 l2_cache = cpu_to_l2cache(cpu);
473 for_each_online_cpu(i) {
474 struct device_node *np = cpu_to_l2cache(i);
475 if (!np)
476 continue;
477 if (np == l2_cache) {
478 cpu_set(cpu, per_cpu(cpu_core_map, i));
479 cpu_set(i, per_cpu(cpu_core_map, cpu));
480 }
481 of_node_put(np);
482 }
483 of_node_put(l2_cache);
403 ipi_call_unlock(); 484 ipi_call_unlock();
404 485
405 local_irq_enable(); 486 local_irq_enable();
@@ -437,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus)
437#ifdef CONFIG_HOTPLUG_CPU 518#ifdef CONFIG_HOTPLUG_CPU
438int __cpu_disable(void) 519int __cpu_disable(void)
439{ 520{
440 if (smp_ops->cpu_disable) 521 struct device_node *l2_cache;
441 return smp_ops->cpu_disable(); 522 int cpu = smp_processor_id();
523 int base, i;
524 int err;
442 525
443 return -ENOSYS; 526 if (!smp_ops->cpu_disable)
527 return -ENOSYS;
528
529 err = smp_ops->cpu_disable();
530 if (err)
531 return err;
532
533 /* Update sibling maps */
534 base = cpu_first_thread_in_core(cpu);
535 for (i = 0; i < threads_per_core; i++) {
536 cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
537 cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
538 cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
539 cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
540 }
541
542 l2_cache = cpu_to_l2cache(cpu);
543 for_each_present_cpu(i) {
544 struct device_node *np = cpu_to_l2cache(i);
545 if (!np)
546 continue;
547 if (np == l2_cache) {
548 cpu_clear(cpu, per_cpu(cpu_core_map, i));
549 cpu_clear(i, per_cpu(cpu_core_map, cpu));
550 }
551 of_node_put(np);
552 }
553 of_node_put(l2_cache);
554
555
556 return 0;
444} 557}
445 558
446void __cpu_die(unsigned int cpu) 559void __cpu_die(unsigned int cpu)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index f2589645870a..b0dbb1daa4df 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/stacktrace.h> 15#include <linux/stacktrace.h>
16#include <linux/module.h>
17#include <asm/ptrace.h> 16#include <asm/ptrace.h>
18#include <asm/processor.h> 17#include <asm/processor.h>
19 18
diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
index 8cee57107541..6fc6328dc626 100644
--- a/arch/powerpc/kernel/suspend.c
+++ b/arch/powerpc/kernel/suspend.c
@@ -7,6 +7,7 @@
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */ 8 */
9 9
10#include <linux/mm.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
12/* References to section boundaries */ 13/* References to section boundaries */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index aba0ba95f062..56d172d16e56 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -22,6 +22,8 @@
22 22
23static DEFINE_PER_CPU(struct cpu, cpu_devices); 23static DEFINE_PER_CPU(struct cpu, cpu_devices);
24 24
25static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
26
25/* SMT stuff */ 27/* SMT stuff */
26 28
27#ifdef CONFIG_PPC_MULTIPLATFORM 29#ifdef CONFIG_PPC_MULTIPLATFORM
@@ -297,8 +299,289 @@ static struct sysdev_attribute pa6t_attrs[] = {
297#endif /* CONFIG_DEBUG_KERNEL */ 299#endif /* CONFIG_DEBUG_KERNEL */
298}; 300};
299 301
302struct cache_desc {
303 struct kobject kobj;
304 struct cache_desc *next;
305 const char *type; /* Instruction, Data, or Unified */
306 u32 size; /* total cache size in KB */
307 u32 line_size; /* in bytes */
308 u32 nr_sets; /* number of sets */
309 u32 level; /* e.g. 1, 2, 3... */
310 u32 associativity; /* e.g. 8-way... 0 is fully associative */
311};
312
313DEFINE_PER_CPU(struct cache_desc *, cache_desc);
314
315static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
316{
317 return container_of(k, struct cache_desc, kobj);
318}
319
320static void cache_desc_release(struct kobject *k)
321{
322 struct cache_desc *desc = kobj_to_cache_desc(k);
323
324 pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
325
326 if (desc->next)
327 kobject_put(&desc->next->kobj);
328
329 kfree(kobj_to_cache_desc(k));
330}
331
332static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
333{
334 struct kobj_attribute *kobj_attr;
335
336 kobj_attr = container_of(attr, struct kobj_attribute, attr);
337
338 return kobj_attr->show(k, kobj_attr, buf);
339}
340
341static struct sysfs_ops cache_desc_sysfs_ops = {
342 .show = cache_desc_show,
343};
344
345static struct kobj_type cache_desc_type = {
346 .release = cache_desc_release,
347 .sysfs_ops = &cache_desc_sysfs_ops,
348};
349
350static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
351{
352 struct cache_desc *cache = kobj_to_cache_desc(k);
353
354 return sprintf(buf, "%uK\n", cache->size);
355}
356
357static struct kobj_attribute cache_size_attr =
358 __ATTR(size, 0444, cache_size_show, NULL);
359
360static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
361{
362 struct cache_desc *cache = kobj_to_cache_desc(k);
363
364 return sprintf(buf, "%u\n", cache->line_size);
365}
366
367static struct kobj_attribute cache_line_size_attr =
368 __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
369
370static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
371{
372 struct cache_desc *cache = kobj_to_cache_desc(k);
373
374 return sprintf(buf, "%u\n", cache->nr_sets);
375}
376
377static struct kobj_attribute cache_nr_sets_attr =
378 __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
379
380static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
381{
382 struct cache_desc *cache = kobj_to_cache_desc(k);
383
384 return sprintf(buf, "%s\n", cache->type);
385}
386
387static struct kobj_attribute cache_type_attr =
388 __ATTR(type, 0444, cache_type_show, NULL);
389
390static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
391{
392 struct cache_desc *cache = kobj_to_cache_desc(k);
393
394 return sprintf(buf, "%u\n", cache->level);
395}
396
397static struct kobj_attribute cache_level_attr =
398 __ATTR(level, 0444, cache_level_show, NULL);
399
400static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
401{
402 struct cache_desc *cache = kobj_to_cache_desc(k);
403
404 return sprintf(buf, "%u\n", cache->associativity);
405}
406
407static struct kobj_attribute cache_assoc_attr =
408 __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
409
410struct cache_desc_info {
411 const char *type;
412 const char *size_prop;
413 const char *line_size_prop;
414 const char *nr_sets_prop;
415};
416
417/* PowerPC Processor binding says the [di]-cache-* must be equal on
418 * unified caches, so just use d-cache properties. */
419static struct cache_desc_info ucache_info = {
420 .type = "Unified",
421 .size_prop = "d-cache-size",
422 .line_size_prop = "d-cache-line-size",
423 .nr_sets_prop = "d-cache-sets",
424};
300 425
301static void register_cpu_online(unsigned int cpu) 426static struct cache_desc_info dcache_info = {
427 .type = "Data",
428 .size_prop = "d-cache-size",
429 .line_size_prop = "d-cache-line-size",
430 .nr_sets_prop = "d-cache-sets",
431};
432
433static struct cache_desc_info icache_info = {
434 .type = "Instruction",
435 .size_prop = "i-cache-size",
436 .line_size_prop = "i-cache-line-size",
437 .nr_sets_prop = "i-cache-sets",
438};
439
440static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
441{
442 const u32 *cache_line_size;
443 struct cache_desc *new;
444 const u32 *cache_size;
445 const u32 *nr_sets;
446 int rc;
447
448 new = kzalloc(sizeof(*new), GFP_KERNEL);
449 if (!new)
450 return NULL;
451
452 rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
453 "index%d", index);
454 if (rc)
455 goto err;
456
457 /* type */
458 new->type = info->type;
459 rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
460 WARN_ON(rc);
461
462 /* level */
463 new->level = level;
464 rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
465 WARN_ON(rc);
466
467 /* size */
468 cache_size = of_get_property(np, info->size_prop, NULL);
469 if (cache_size) {
470 new->size = *cache_size / 1024;
471 rc = sysfs_create_file(&new->kobj,
472 &cache_size_attr.attr);
473 WARN_ON(rc);
474 }
475
476 /* coherency_line_size */
477 cache_line_size = of_get_property(np, info->line_size_prop, NULL);
478 if (cache_line_size) {
479 new->line_size = *cache_line_size;
480 rc = sysfs_create_file(&new->kobj,
481 &cache_line_size_attr.attr);
482 WARN_ON(rc);
483 }
484
485 /* number_of_sets */
486 nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
487 if (nr_sets) {
488 new->nr_sets = *nr_sets;
489 rc = sysfs_create_file(&new->kobj,
490 &cache_nr_sets_attr.attr);
491 WARN_ON(rc);
492 }
493
494 /* ways_of_associativity */
495 if (new->nr_sets == 1) {
496 /* fully associative */
497 new->associativity = 0;
498 goto create_assoc;
499 }
500
501 if (new->nr_sets && new->size && new->line_size) {
502 /* If we have values for all of these we can derive
503 * the associativity. */
504 new->associativity =
505 ((new->size * 1024) / new->nr_sets) / new->line_size;
506create_assoc:
507 rc = sysfs_create_file(&new->kobj,
508 &cache_assoc_attr.attr);
509 WARN_ON(rc);
510 }
511
512 return new;
513err:
514 kfree(new);
515 return NULL;
516}
517
518static bool cache_is_unified(struct device_node *np)
519{
520 return of_get_property(np, "cache-unified", NULL);
521}
522
523static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
524{
525 const phandle *next_cache_phandle;
526 struct device_node *next_cache;
527 struct cache_desc *new, **end;
528
529 pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
530
531 if (cache_is_unified(np)) {
532 new = create_cache_desc(np, parent, index, level,
533 &ucache_info);
534 } else {
535 new = create_cache_desc(np, parent, index, level,
536 &dcache_info);
537 if (new) {
538 index++;
539 new->next = create_cache_desc(np, parent, index, level,
540 &icache_info);
541 }
542 }
543 if (!new)
544 return NULL;
545
546 end = &new->next;
547 while (*end)
548 end = &(*end)->next;
549
550 next_cache_phandle = of_get_property(np, "l2-cache", NULL);
551 if (!next_cache_phandle)
552 goto out;
553
554 next_cache = of_find_node_by_phandle(*next_cache_phandle);
555 if (!next_cache)
556 goto out;
557
558 *end = create_cache_index_info(next_cache, parent, ++index, ++level);
559
560 of_node_put(next_cache);
561out:
562 return new;
563}
564
565static void __cpuinit create_cache_info(struct sys_device *sysdev)
566{
567 struct kobject *cache_toplevel;
568 struct device_node *np = NULL;
569 int cpu = sysdev->id;
570
571 cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
572 if (!cache_toplevel)
573 return;
574 per_cpu(cache_toplevel, cpu) = cache_toplevel;
575 np = of_get_cpu_node(cpu, NULL);
576 if (np != NULL) {
577 per_cpu(cache_desc, cpu) =
578 create_cache_index_info(np, cache_toplevel, 0, 1);
579 of_node_put(np);
580 }
581 return;
582}
583
584static void __cpuinit register_cpu_online(unsigned int cpu)
302{ 585{
303 struct cpu *c = &per_cpu(cpu_devices, cpu); 586 struct cpu *c = &per_cpu(cpu_devices, cpu);
304 struct sys_device *s = &c->sysdev; 587 struct sys_device *s = &c->sysdev;
@@ -346,9 +629,33 @@ static void register_cpu_online(unsigned int cpu)
346 629
347 if (cpu_has_feature(CPU_FTR_DSCR)) 630 if (cpu_has_feature(CPU_FTR_DSCR))
348 sysdev_create_file(s, &attr_dscr); 631 sysdev_create_file(s, &attr_dscr);
632
633 create_cache_info(s);
349} 634}
350 635
351#ifdef CONFIG_HOTPLUG_CPU 636#ifdef CONFIG_HOTPLUG_CPU
637static void remove_cache_info(struct sys_device *sysdev)
638{
639 struct kobject *cache_toplevel;
640 struct cache_desc *cache_desc;
641 int cpu = sysdev->id;
642
643 cache_desc = per_cpu(cache_desc, cpu);
644 if (cache_desc != NULL) {
645 sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr);
646 sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr);
647 sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr);
648 sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr);
649 sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr);
650 sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr);
651
652 kobject_put(&cache_desc->kobj);
653 }
654 cache_toplevel = per_cpu(cache_toplevel, cpu);
655 if (cache_toplevel != NULL)
656 kobject_put(cache_toplevel);
657}
658
352static void unregister_cpu_online(unsigned int cpu) 659static void unregister_cpu_online(unsigned int cpu)
353{ 660{
354 struct cpu *c = &per_cpu(cpu_devices, cpu); 661 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -399,6 +706,8 @@ static void unregister_cpu_online(unsigned int cpu)
399 706
400 if (cpu_has_feature(CPU_FTR_DSCR)) 707 if (cpu_has_feature(CPU_FTR_DSCR))
401 sysdev_remove_file(s, &attr_dscr); 708 sysdev_remove_file(s, &attr_dscr);
709
710 remove_cache_info(s);
402} 711}
403#endif /* CONFIG_HOTPLUG_CPU */ 712#endif /* CONFIG_HOTPLUG_CPU */
404 713
@@ -529,7 +838,8 @@ static void register_nodes(void)
529#endif 838#endif
530 839
531/* Only valid if CPU is present. */ 840/* Only valid if CPU is present. */
532static ssize_t show_physical_id(struct sys_device *dev, char *buf) 841static ssize_t show_physical_id(struct sys_device *dev,
842 struct sysdev_attribute *attr, char *buf)
533{ 843{
534 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 844 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
535 845
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 878fbddb6ae1..81ccb8dd1a54 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1067,6 +1067,22 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1067 } 1067 }
1068 1068
1069 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1069 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1070 } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1071 regs->msr &= ~MSR_DE;
1072
1073 if (user_mode(regs)) {
1074 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
1075 DBCR0_IDM);
1076 } else {
1077 /* Disable DAC interupts */
1078 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
1079 DBSR_DAC1W | DBCR0_IDM));
1080
1081 /* Clear the DAC event */
1082 mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
1083 }
1084 /* Setup and send the trap to the handler */
1085 do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
1070 } 1086 }
1071} 1087}
1072#endif /* CONFIG_4xx || CONFIG_BOOKE */ 1088#endif /* CONFIG_4xx || CONFIG_BOOKE */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f177c60ea766..65639a43e644 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -788,9 +788,7 @@ static int __init vdso_init(void)
788 788
789 return 0; 789 return 0;
790} 790}
791#ifdef CONFIG_PPC_MERGE
792arch_initcall(vdso_init); 791arch_initcall(vdso_init);
793#endif
794 792
795int in_gate_area_no_task(unsigned long addr) 793int in_gate_area_no_task(unsigned long addr)
796{ 794{
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index b77f8af7ddde..22a3c33fd751 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1,11 +1,12 @@
1/* 1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support. 2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 * 3 *
4 * Copyright (c) 2003-2005 IBM Corp. 4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com 5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com 6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com> 7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell 8 * Stephen Rothwell
9 * Robert Jennings <rcjenn@us.ibm.com>
9 * 10 *
10 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
@@ -46,6 +47,996 @@ static struct vio_dev vio_bus_device = { /* fake "parent" device */
46 .dev.bus = &vio_bus_type, 47 .dev.bus = &vio_bus_type,
47}; 48};
48 49
50#ifdef CONFIG_PPC_SMLPAR
51/**
52 * vio_cmo_pool - A pool of IO memory for CMO use
53 *
54 * @size: The size of the pool in bytes
55 * @free: The amount of free memory in the pool
56 */
57struct vio_cmo_pool {
58 size_t size;
59 size_t free;
60};
61
62/* How many ms to delay queued balance work */
63#define VIO_CMO_BALANCE_DELAY 100
64
65/* Portion out IO memory to CMO devices by this chunk size */
66#define VIO_CMO_BALANCE_CHUNK 131072
67
68/**
69 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
70 *
71 * @vio_dev: struct vio_dev pointer
72 * @list: pointer to other devices on bus that are being tracked
73 */
74struct vio_cmo_dev_entry {
75 struct vio_dev *viodev;
76 struct list_head list;
77};
78
79/**
80 * vio_cmo - VIO bus accounting structure for CMO entitlement
81 *
82 * @lock: spinlock for entire structure
83 * @balance_q: work queue for balancing system entitlement
84 * @device_list: list of CMO-enabled devices requiring entitlement
85 * @entitled: total system entitlement in bytes
86 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
87 * @excess: pool of excess entitlement not needed for device reserves or spare
88 * @spare: IO memory for device hotplug functionality
89 * @min: minimum necessary for system operation
90 * @desired: desired memory for system operation
91 * @curr: bytes currently allocated
92 * @high: high water mark for IO data usage
93 */
94struct vio_cmo {
95 spinlock_t lock;
96 struct delayed_work balance_q;
97 struct list_head device_list;
98 size_t entitled;
99 struct vio_cmo_pool reserve;
100 struct vio_cmo_pool excess;
101 size_t spare;
102 size_t min;
103 size_t desired;
104 size_t curr;
105 size_t high;
106} vio_cmo;
107
108/**
109 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
110 */
111static int vio_cmo_num_OF_devs(void)
112{
113 struct device_node *node_vroot;
114 int count = 0;
115
116 /*
117 * Count the number of vdevice entries with an
118 * ibm,my-dma-window OF property
119 */
120 node_vroot = of_find_node_by_name(NULL, "vdevice");
121 if (node_vroot) {
122 struct device_node *of_node;
123 struct property *prop;
124
125 for_each_child_of_node(node_vroot, of_node) {
126 prop = of_find_property(of_node, "ibm,my-dma-window",
127 NULL);
128 if (prop)
129 count++;
130 }
131 }
132 of_node_put(node_vroot);
133 return count;
134}
135
136/**
137 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
138 *
139 * @viodev: VIO device requesting IO memory
140 * @size: size of allocation requested
141 *
142 * Allocations come from memory reserved for the devices and any excess
143 * IO memory available to all devices. The spare pool used to service
144 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
145 * made available.
146 *
147 * Return codes:
148 * 0 for successful allocation and -ENOMEM for a failure
149 */
150static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
151{
152 unsigned long flags;
153 size_t reserve_free = 0;
154 size_t excess_free = 0;
155 int ret = -ENOMEM;
156
157 spin_lock_irqsave(&vio_cmo.lock, flags);
158
159 /* Determine the amount of free entitlement available in reserve */
160 if (viodev->cmo.entitled > viodev->cmo.allocated)
161 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
162
163 /* If spare is not fulfilled, the excess pool can not be used. */
164 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
165 excess_free = vio_cmo.excess.free;
166
167 /* The request can be satisfied */
168 if ((reserve_free + excess_free) >= size) {
169 vio_cmo.curr += size;
170 if (vio_cmo.curr > vio_cmo.high)
171 vio_cmo.high = vio_cmo.curr;
172 viodev->cmo.allocated += size;
173 size -= min(reserve_free, size);
174 vio_cmo.excess.free -= size;
175 ret = 0;
176 }
177
178 spin_unlock_irqrestore(&vio_cmo.lock, flags);
179 return ret;
180}
181
182/**
183 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
184 * @viodev: VIO device freeing IO memory
185 * @size: size of deallocation
186 *
187 * IO memory is freed by the device back to the correct memory pools.
188 * The spare pool is replenished first from either memory pool, then
189 * the reserve pool is used to reduce device entitlement, the excess
190 * pool is used to increase the reserve pool toward the desired entitlement
191 * target, and then the remaining memory is returned to the pools.
192 *
193 */
194static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
195{
196 unsigned long flags;
197 size_t spare_needed = 0;
198 size_t excess_freed = 0;
199 size_t reserve_freed = size;
200 size_t tmp;
201 int balance = 0;
202
203 spin_lock_irqsave(&vio_cmo.lock, flags);
204 vio_cmo.curr -= size;
205
206 /* Amount of memory freed from the excess pool */
207 if (viodev->cmo.allocated > viodev->cmo.entitled) {
208 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
209 viodev->cmo.entitled));
210 reserve_freed -= excess_freed;
211 }
212
213 /* Remove allocation from device */
214 viodev->cmo.allocated -= (reserve_freed + excess_freed);
215
216 /* Spare is a subset of the reserve pool, replenish it first. */
217 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
218
219 /*
220 * Replenish the spare in the reserve pool from the excess pool.
221 * This moves entitlement into the reserve pool.
222 */
223 if (spare_needed && excess_freed) {
224 tmp = min(excess_freed, spare_needed);
225 vio_cmo.excess.size -= tmp;
226 vio_cmo.reserve.size += tmp;
227 vio_cmo.spare += tmp;
228 excess_freed -= tmp;
229 spare_needed -= tmp;
230 balance = 1;
231 }
232
233 /*
234 * Replenish the spare in the reserve pool from the reserve pool.
235 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
236 * if needed, and gives it to the spare pool. The amount of used
237 * memory in this pool does not change.
238 */
239 if (spare_needed && reserve_freed) {
240 tmp = min(spare_needed, min(reserve_freed,
241 (viodev->cmo.entitled -
242 VIO_CMO_MIN_ENT)));
243
244 vio_cmo.spare += tmp;
245 viodev->cmo.entitled -= tmp;
246 reserve_freed -= tmp;
247 spare_needed -= tmp;
248 balance = 1;
249 }
250
251 /*
252 * Increase the reserve pool until the desired allocation is met.
253 * Move an allocation freed from the excess pool into the reserve
254 * pool and schedule a balance operation.
255 */
256 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
257 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
258
259 vio_cmo.excess.size -= tmp;
260 vio_cmo.reserve.size += tmp;
261 excess_freed -= tmp;
262 balance = 1;
263 }
264
265 /* Return memory from the excess pool to that pool */
266 if (excess_freed)
267 vio_cmo.excess.free += excess_freed;
268
269 if (balance)
270 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
271 spin_unlock_irqrestore(&vio_cmo.lock, flags);
272}
273
274/**
275 * vio_cmo_entitlement_update - Manage system entitlement changes
276 *
277 * @new_entitlement: new system entitlement to attempt to accommodate
278 *
279 * Increases in entitlement will be used to fulfill the spare entitlement
280 * and the rest is given to the excess pool. Decreases, if they are
281 * possible, come from the excess pool and from unused device entitlement
282 *
283 * Returns: 0 on success, -ENOMEM when change can not be made
284 */
285int vio_cmo_entitlement_update(size_t new_entitlement)
286{
287 struct vio_dev *viodev;
288 struct vio_cmo_dev_entry *dev_ent;
289 unsigned long flags;
290 size_t avail, delta, tmp;
291
292 spin_lock_irqsave(&vio_cmo.lock, flags);
293
294 /* Entitlement increases */
295 if (new_entitlement > vio_cmo.entitled) {
296 delta = new_entitlement - vio_cmo.entitled;
297
298 /* Fulfill spare allocation */
299 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
300 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
301 vio_cmo.spare += tmp;
302 vio_cmo.reserve.size += tmp;
303 delta -= tmp;
304 }
305
306 /* Remaining new allocation goes to the excess pool */
307 vio_cmo.entitled += delta;
308 vio_cmo.excess.size += delta;
309 vio_cmo.excess.free += delta;
310
311 goto out;
312 }
313
314 /* Entitlement decreases */
315 delta = vio_cmo.entitled - new_entitlement;
316 avail = vio_cmo.excess.free;
317
318 /*
319 * Need to check how much unused entitlement each device can
320 * sacrifice to fulfill entitlement change.
321 */
322 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
323 if (avail >= delta)
324 break;
325
326 viodev = dev_ent->viodev;
327 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
328 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
329 avail += viodev->cmo.entitled -
330 max_t(size_t, viodev->cmo.allocated,
331 VIO_CMO_MIN_ENT);
332 }
333
334 if (delta <= avail) {
335 vio_cmo.entitled -= delta;
336
337 /* Take entitlement from the excess pool first */
338 tmp = min(vio_cmo.excess.free, delta);
339 vio_cmo.excess.size -= tmp;
340 vio_cmo.excess.free -= tmp;
341 delta -= tmp;
342
343 /*
344 * Remove all but VIO_CMO_MIN_ENT bytes from devices
345 * until entitlement change is served
346 */
347 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
348 if (!delta)
349 break;
350
351 viodev = dev_ent->viodev;
352 tmp = 0;
353 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
354 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
355 tmp = viodev->cmo.entitled -
356 max_t(size_t, viodev->cmo.allocated,
357 VIO_CMO_MIN_ENT);
358 viodev->cmo.entitled -= min(tmp, delta);
359 delta -= min(tmp, delta);
360 }
361 } else {
362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return -ENOMEM;
364 }
365
366out:
367 schedule_delayed_work(&vio_cmo.balance_q, 0);
368 spin_unlock_irqrestore(&vio_cmo.lock, flags);
369 return 0;
370}
371
372/**
373 * vio_cmo_balance - Balance entitlement among devices
374 *
375 * @work: work queue structure for this operation
376 *
377 * Any system entitlement above the minimum needed for devices, or
378 * already allocated to devices, can be distributed to the devices.
379 * The list of devices is iterated through to recalculate the desired
380 * entitlement level and to determine how much entitlement above the
381 * minimum entitlement is allocated to devices.
382 *
383 * Small chunks of the available entitlement are given to devices until
384 * their requirements are fulfilled or there is no entitlement left to give.
385 * Upon completion sizes of the reserve and excess pools are calculated.
386 *
387 * The system minimum entitlement level is also recalculated here.
388 * Entitlement will be reserved for devices even after vio_bus_remove to
389 * accommodate reloading the driver. The OF tree is walked to count the
390 * number of devices present and this will remove entitlement for devices
391 * that have actually left the system after having vio_bus_remove called.
392 */
393static void vio_cmo_balance(struct work_struct *work)
394{
395 struct vio_cmo *cmo;
396 struct vio_dev *viodev;
397 struct vio_cmo_dev_entry *dev_ent;
398 unsigned long flags;
399 size_t avail = 0, level, chunk, need;
400 int devcount = 0, fulfilled;
401
402 cmo = container_of(work, struct vio_cmo, balance_q.work);
403
404 spin_lock_irqsave(&vio_cmo.lock, flags);
405
406 /* Calculate minimum entitlement and fulfill spare */
407 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
408 BUG_ON(cmo->min > cmo->entitled);
409 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
410 cmo->min += cmo->spare;
411 cmo->desired = cmo->min;
412
413 /*
414 * Determine how much entitlement is available and reset device
415 * entitlements
416 */
417 avail = cmo->entitled - cmo->spare;
418 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
419 viodev = dev_ent->viodev;
420 devcount++;
421 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
422 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
423 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
424 }
425
426 /*
427 * Having provided each device with the minimum entitlement, loop
428 * over the devices portioning out the remaining entitlement
429 * until there is nothing left.
430 */
431 level = VIO_CMO_MIN_ENT;
432 while (avail) {
433 fulfilled = 0;
434 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
435 viodev = dev_ent->viodev;
436
437 if (viodev->cmo.desired <= level) {
438 fulfilled++;
439 continue;
440 }
441
442 /*
443 * Give the device up to VIO_CMO_BALANCE_CHUNK
444 * bytes of entitlement, but do not exceed the
445 * desired level of entitlement for the device.
446 */
447 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
448 chunk = min(chunk, (viodev->cmo.desired -
449 viodev->cmo.entitled));
450 viodev->cmo.entitled += chunk;
451
452 /*
453 * If the memory for this entitlement increase was
454 * already allocated to the device it does not come
455 * from the available pool being portioned out.
456 */
457 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
458 max(viodev->cmo.allocated, level);
459 avail -= need;
460
461 }
462 if (fulfilled == devcount)
463 break;
464 level += VIO_CMO_BALANCE_CHUNK;
465 }
466
467 /* Calculate new reserve and excess pool sizes */
468 cmo->reserve.size = cmo->min;
469 cmo->excess.free = 0;
470 cmo->excess.size = 0;
471 need = 0;
472 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
473 viodev = dev_ent->viodev;
474 /* Calculated reserve size above the minimum entitlement */
475 if (viodev->cmo.entitled)
476 cmo->reserve.size += (viodev->cmo.entitled -
477 VIO_CMO_MIN_ENT);
478 /* Calculated used excess entitlement */
479 if (viodev->cmo.allocated > viodev->cmo.entitled)
480 need += viodev->cmo.allocated - viodev->cmo.entitled;
481 }
482 cmo->excess.size = cmo->entitled - cmo->reserve.size;
483 cmo->excess.free = cmo->excess.size - need;
484
485 cancel_delayed_work(container_of(work, struct delayed_work, work));
486 spin_unlock_irqrestore(&vio_cmo.lock, flags);
487}
488
489static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
490 dma_addr_t *dma_handle, gfp_t flag)
491{
492 struct vio_dev *viodev = to_vio_dev(dev);
493 void *ret;
494
495 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
496 atomic_inc(&viodev->cmo.allocs_failed);
497 return NULL;
498 }
499
500 ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
501 if (unlikely(ret == NULL)) {
502 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
503 atomic_inc(&viodev->cmo.allocs_failed);
504 }
505
506 return ret;
507}
508
509static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
510 void *vaddr, dma_addr_t dma_handle)
511{
512 struct vio_dev *viodev = to_vio_dev(dev);
513
514 dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
515
516 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
517}
518
519static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
520 size_t size,
521 enum dma_data_direction direction,
522 struct dma_attrs *attrs)
523{
524 struct vio_dev *viodev = to_vio_dev(dev);
525 dma_addr_t ret = DMA_ERROR_CODE;
526
527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
528 atomic_inc(&viodev->cmo.allocs_failed);
529 return ret;
530 }
531
532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
533 if (unlikely(dma_mapping_error(dev, ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed);
536 }
537
538 return ret;
539}
540
541static void vio_dma_iommu_unmap_single(struct device *dev,
542 dma_addr_t dma_handle, size_t size,
543 enum dma_data_direction direction,
544 struct dma_attrs *attrs)
545{
546 struct vio_dev *viodev = to_vio_dev(dev);
547
548 dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs);
549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
551}
552
553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction,
555 struct dma_attrs *attrs)
556{
557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct scatterlist *sgl;
559 int ret, count = 0;
560 size_t alloc_size = 0;
561
562 for (sgl = sglist; count < nelems; count++, sgl++)
563 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
564
565 if (vio_cmo_alloc(viodev, alloc_size)) {
566 atomic_inc(&viodev->cmo.allocs_failed);
567 return 0;
568 }
569
570 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
571
572 if (unlikely(!ret)) {
573 vio_cmo_dealloc(viodev, alloc_size);
574 atomic_inc(&viodev->cmo.allocs_failed);
575 }
576
577 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
578 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
579 if (alloc_size)
580 vio_cmo_dealloc(viodev, alloc_size);
581
582 return ret;
583}
584
585static void vio_dma_iommu_unmap_sg(struct device *dev,
586 struct scatterlist *sglist, int nelems,
587 enum dma_data_direction direction,
588 struct dma_attrs *attrs)
589{
590 struct vio_dev *viodev = to_vio_dev(dev);
591 struct scatterlist *sgl;
592 size_t alloc_size = 0;
593 int count = 0;
594
595 for (sgl = sglist; count < nelems; count++, sgl++)
596 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
597
598 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
599
600 vio_cmo_dealloc(viodev, alloc_size);
601}
602
603struct dma_mapping_ops vio_dma_mapping_ops = {
604 .alloc_coherent = vio_dma_iommu_alloc_coherent,
605 .free_coherent = vio_dma_iommu_free_coherent,
606 .map_single = vio_dma_iommu_map_single,
607 .unmap_single = vio_dma_iommu_unmap_single,
608 .map_sg = vio_dma_iommu_map_sg,
609 .unmap_sg = vio_dma_iommu_unmap_sg,
610};
611
612/**
613 * vio_cmo_set_dev_desired - Set desired entitlement for a device
614 *
615 * @viodev: struct vio_dev for device to alter
616 * @new_desired: new desired entitlement level in bytes
617 *
618 * For use by devices to request a change to their entitlement at runtime or
619 * through sysfs. The desired entitlement level is changed and a balancing
620 * of system resources is scheduled to run in the future.
621 */
622void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
623{
624 unsigned long flags;
625 struct vio_cmo_dev_entry *dev_ent;
626 int found = 0;
627
628 if (!firmware_has_feature(FW_FEATURE_CMO))
629 return;
630
631 spin_lock_irqsave(&vio_cmo.lock, flags);
632 if (desired < VIO_CMO_MIN_ENT)
633 desired = VIO_CMO_MIN_ENT;
634
635 /*
636 * Changes will not be made for devices not in the device list.
637 * If it is not in the device list, then no driver is loaded
638 * for the device and it can not receive entitlement.
639 */
640 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
641 if (viodev == dev_ent->viodev) {
642 found = 1;
643 break;
644 }
645 if (!found)
646 return;
647
648 /* Increase/decrease in desired device entitlement */
649 if (desired >= viodev->cmo.desired) {
650 /* Just bump the bus and device values prior to a balance*/
651 vio_cmo.desired += desired - viodev->cmo.desired;
652 viodev->cmo.desired = desired;
653 } else {
654 /* Decrease bus and device values for desired entitlement */
655 vio_cmo.desired -= viodev->cmo.desired - desired;
656 viodev->cmo.desired = desired;
657 /*
658 * If less entitlement is desired than current entitlement, move
659 * any reserve memory in the change region to the excess pool.
660 */
661 if (viodev->cmo.entitled > desired) {
662 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
663 vio_cmo.excess.size += viodev->cmo.entitled - desired;
664 /*
665 * If entitlement moving from the reserve pool to the
666 * excess pool is currently unused, add to the excess
667 * free counter.
668 */
669 if (viodev->cmo.allocated < viodev->cmo.entitled)
670 vio_cmo.excess.free += viodev->cmo.entitled -
671 max(viodev->cmo.allocated, desired);
672 viodev->cmo.entitled = desired;
673 }
674 }
675 schedule_delayed_work(&vio_cmo.balance_q, 0);
676 spin_unlock_irqrestore(&vio_cmo.lock, flags);
677}
678
679/**
680 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
681 *
682 * @viodev - Pointer to struct vio_dev for device
683 *
684 * Determine the devices IO memory entitlement needs, attempting
685 * to satisfy the system minimum entitlement at first and scheduling
686 * a balance operation to take care of the rest at a later time.
687 *
688 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
689 * -ENOMEM when entitlement is not available for device or
690 * device entry.
691 *
692 */
693static int vio_cmo_bus_probe(struct vio_dev *viodev)
694{
695 struct vio_cmo_dev_entry *dev_ent;
696 struct device *dev = &viodev->dev;
697 struct vio_driver *viodrv = to_vio_driver(dev->driver);
698 unsigned long flags;
699 size_t size;
700
701 /*
702 * Check to see that device has a DMA window and configure
703 * entitlement for the device.
704 */
705 if (of_get_property(viodev->dev.archdata.of_node,
706 "ibm,my-dma-window", NULL)) {
707 /* Check that the driver is CMO enabled and get desired DMA */
708 if (!viodrv->get_desired_dma) {
709 dev_err(dev, "%s: device driver does not support CMO\n",
710 __func__);
711 return -EINVAL;
712 }
713
714 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
715 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
716 viodev->cmo.desired = VIO_CMO_MIN_ENT;
717 size = VIO_CMO_MIN_ENT;
718
719 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
720 GFP_KERNEL);
721 if (!dev_ent)
722 return -ENOMEM;
723
724 dev_ent->viodev = viodev;
725 spin_lock_irqsave(&vio_cmo.lock, flags);
726 list_add(&dev_ent->list, &vio_cmo.device_list);
727 } else {
728 viodev->cmo.desired = 0;
729 size = 0;
730 spin_lock_irqsave(&vio_cmo.lock, flags);
731 }
732
733 /*
734 * If the needs for vio_cmo.min have not changed since they
735 * were last set, the number of devices in the OF tree has
736 * been constant and the IO memory for this is already in
737 * the reserve pool.
738 */
739 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
740 VIO_CMO_MIN_ENT)) {
741 /* Updated desired entitlement if device requires it */
742 if (size)
743 vio_cmo.desired += (viodev->cmo.desired -
744 VIO_CMO_MIN_ENT);
745 } else {
746 size_t tmp;
747
748 tmp = vio_cmo.spare + vio_cmo.excess.free;
749 if (tmp < size) {
750 dev_err(dev, "%s: insufficient free "
751 "entitlement to add device. "
752 "Need %lu, have %lu\n", __func__,
753 size, (vio_cmo.spare + tmp));
754 spin_unlock_irqrestore(&vio_cmo.lock, flags);
755 return -ENOMEM;
756 }
757
758 /* Use excess pool first to fulfill request */
759 tmp = min(size, vio_cmo.excess.free);
760 vio_cmo.excess.free -= tmp;
761 vio_cmo.excess.size -= tmp;
762 vio_cmo.reserve.size += tmp;
763
764 /* Use spare if excess pool was insufficient */
765 vio_cmo.spare -= size - tmp;
766
767 /* Update bus accounting */
768 vio_cmo.min += size;
769 vio_cmo.desired += viodev->cmo.desired;
770 }
771 spin_unlock_irqrestore(&vio_cmo.lock, flags);
772 return 0;
773}
774
775/**
776 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
777 *
778 * @viodev - Pointer to struct vio_dev for device
779 *
780 * Remove the device from the cmo device list. The minimum entitlement
781 * will be reserved for the device as long as it is in the system. The
782 * rest of the entitlement the device had been allocated will be returned
783 * to the system.
784 */
785static void vio_cmo_bus_remove(struct vio_dev *viodev)
786{
787 struct vio_cmo_dev_entry *dev_ent;
788 unsigned long flags;
789 size_t tmp;
790
791 spin_lock_irqsave(&vio_cmo.lock, flags);
792 if (viodev->cmo.allocated) {
793 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
794 "allocated after remove operation.\n",
795 __func__, viodev->cmo.allocated);
796 BUG();
797 }
798
799 /*
800 * Remove the device from the device list being maintained for
801 * CMO enabled devices.
802 */
803 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
804 if (viodev == dev_ent->viodev) {
805 list_del(&dev_ent->list);
806 kfree(dev_ent);
807 break;
808 }
809
810 /*
811 * Devices may not require any entitlement and they do not need
812 * to be processed. Otherwise, return the device's entitlement
813 * back to the pools.
814 */
815 if (viodev->cmo.entitled) {
816 /*
817 * This device has not yet left the OF tree, it's
818 * minimum entitlement remains in vio_cmo.min and
819 * vio_cmo.desired
820 */
821 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
822
823 /*
824 * Save min allocation for device in reserve as long
825 * as it exists in OF tree as determined by later
826 * balance operation
827 */
828 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
829
830 /* Replenish spare from freed reserve pool */
831 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
832 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
833 vio_cmo.spare));
834 vio_cmo.spare += tmp;
835 viodev->cmo.entitled -= tmp;
836 }
837
838 /* Remaining reserve goes to excess pool */
839 vio_cmo.excess.size += viodev->cmo.entitled;
840 vio_cmo.excess.free += viodev->cmo.entitled;
841 vio_cmo.reserve.size -= viodev->cmo.entitled;
842
843 /*
844 * Until the device is removed it will keep a
845 * minimum entitlement; this will guarantee that
846 * a module unload/load will result in a success.
847 */
848 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
849 viodev->cmo.desired = VIO_CMO_MIN_ENT;
850 atomic_set(&viodev->cmo.allocs_failed, 0);
851 }
852
853 spin_unlock_irqrestore(&vio_cmo.lock, flags);
854}
855
856static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
857{
858 vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
859 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
860}
861
862/**
863 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
864 *
865 * Set up the reserve and excess entitlement pools based on available
866 * system entitlement and the number of devices in the OF tree that
867 * require entitlement in the reserve pool.
868 */
869static void vio_cmo_bus_init(void)
870{
871 struct hvcall_mpp_data mpp_data;
872 int err;
873
874 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
875 spin_lock_init(&vio_cmo.lock);
876 INIT_LIST_HEAD(&vio_cmo.device_list);
877 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
878
879 /* Get current system entitlement */
880 err = h_get_mpp(&mpp_data);
881
882 /*
883 * On failure, continue with entitlement set to 0, will panic()
884 * later when spare is reserved.
885 */
886 if (err != H_SUCCESS) {
887 printk(KERN_ERR "%s: unable to determine system IO "\
888 "entitlement. (%d)\n", __func__, err);
889 vio_cmo.entitled = 0;
890 } else {
891 vio_cmo.entitled = mpp_data.entitled_mem;
892 }
893
894 /* Set reservation and check against entitlement */
895 vio_cmo.spare = VIO_CMO_MIN_ENT;
896 vio_cmo.reserve.size = vio_cmo.spare;
897 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
898 VIO_CMO_MIN_ENT);
899 if (vio_cmo.reserve.size > vio_cmo.entitled) {
900 printk(KERN_ERR "%s: insufficient system entitlement\n",
901 __func__);
902 panic("%s: Insufficient system entitlement", __func__);
903 }
904
905 /* Set the remaining accounting variables */
906 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
907 vio_cmo.excess.free = vio_cmo.excess.size;
908 vio_cmo.min = vio_cmo.reserve.size;
909 vio_cmo.desired = vio_cmo.reserve.size;
910}
911
912/* sysfs device functions and data structures for CMO */
913
914#define viodev_cmo_rd_attr(name) \
915static ssize_t viodev_cmo_##name##_show(struct device *dev, \
916 struct device_attribute *attr, \
917 char *buf) \
918{ \
919 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
920}
921
922static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
923 struct device_attribute *attr, char *buf)
924{
925 struct vio_dev *viodev = to_vio_dev(dev);
926 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
927}
928
929static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
930 struct device_attribute *attr, const char *buf, size_t count)
931{
932 struct vio_dev *viodev = to_vio_dev(dev);
933 atomic_set(&viodev->cmo.allocs_failed, 0);
934 return count;
935}
936
937static ssize_t viodev_cmo_desired_set(struct device *dev,
938 struct device_attribute *attr, const char *buf, size_t count)
939{
940 struct vio_dev *viodev = to_vio_dev(dev);
941 size_t new_desired;
942 int ret;
943
944 ret = strict_strtoul(buf, 10, &new_desired);
945 if (ret)
946 return ret;
947
948 vio_cmo_set_dev_desired(viodev, new_desired);
949 return count;
950}
951
952viodev_cmo_rd_attr(desired);
953viodev_cmo_rd_attr(entitled);
954viodev_cmo_rd_attr(allocated);
955
956static ssize_t name_show(struct device *, struct device_attribute *, char *);
957static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
958static struct device_attribute vio_cmo_dev_attrs[] = {
959 __ATTR_RO(name),
960 __ATTR_RO(devspec),
961 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
962 viodev_cmo_desired_show, viodev_cmo_desired_set),
963 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
964 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
965 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
966 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
967 __ATTR_NULL
968};
969
970/* sysfs bus functions and data structures for CMO */
971
972#define viobus_cmo_rd_attr(name) \
973static ssize_t \
974viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
975{ \
976 return sprintf(buf, "%lu\n", vio_cmo.name); \
977}
978
979#define viobus_cmo_pool_rd_attr(name, var) \
980static ssize_t \
981viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
982{ \
983 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
984}
985
986static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
987 size_t count)
988{
989 unsigned long flags;
990
991 spin_lock_irqsave(&vio_cmo.lock, flags);
992 vio_cmo.high = vio_cmo.curr;
993 spin_unlock_irqrestore(&vio_cmo.lock, flags);
994
995 return count;
996}
997
998viobus_cmo_rd_attr(entitled);
999viobus_cmo_pool_rd_attr(reserve, size);
1000viobus_cmo_pool_rd_attr(excess, size);
1001viobus_cmo_pool_rd_attr(excess, free);
1002viobus_cmo_rd_attr(spare);
1003viobus_cmo_rd_attr(min);
1004viobus_cmo_rd_attr(desired);
1005viobus_cmo_rd_attr(curr);
1006viobus_cmo_rd_attr(high);
1007
1008static struct bus_attribute vio_cmo_bus_attrs[] = {
1009 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
1010 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
1011 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
1012 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
1013 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
1014 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
1015 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
1016 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
1017 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1018 viobus_cmo_high_show, viobus_cmo_high_reset),
1019 __ATTR_NULL
1020};
1021
1022static void vio_cmo_sysfs_init(void)
1023{
1024 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1025 vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
1026}
1027#else /* CONFIG_PPC_SMLPAR */
1028/* Dummy functions for iSeries platform */
1029int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1030void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1031static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1032static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1033static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1034static void vio_cmo_bus_init(void) {}
1035static void vio_cmo_sysfs_init(void) { }
1036#endif /* CONFIG_PPC_SMLPAR */
1037EXPORT_SYMBOL(vio_cmo_entitlement_update);
1038EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1039
49static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 1040static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
50{ 1041{
51 const unsigned char *dma_window; 1042 const unsigned char *dma_window;
@@ -114,8 +1105,17 @@ static int vio_bus_probe(struct device *dev)
114 return error; 1105 return error;
115 1106
116 id = vio_match_device(viodrv->id_table, viodev); 1107 id = vio_match_device(viodrv->id_table, viodev);
117 if (id) 1108 if (id) {
1109 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1110 if (firmware_has_feature(FW_FEATURE_CMO)) {
1111 error = vio_cmo_bus_probe(viodev);
1112 if (error)
1113 return error;
1114 }
118 error = viodrv->probe(viodev, id); 1115 error = viodrv->probe(viodev, id);
1116 if (error)
1117 vio_cmo_bus_remove(viodev);
1118 }
119 1119
120 return error; 1120 return error;
121} 1121}
@@ -125,12 +1125,23 @@ static int vio_bus_remove(struct device *dev)
125{ 1125{
126 struct vio_dev *viodev = to_vio_dev(dev); 1126 struct vio_dev *viodev = to_vio_dev(dev);
127 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1127 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1128 struct device *devptr;
1129 int ret = 1;
1130
1131 /*
1132 * Hold a reference to the device after the remove function is called
1133 * to allow for CMO accounting cleanup for the device.
1134 */
1135 devptr = get_device(dev);
128 1136
129 if (viodrv->remove) 1137 if (viodrv->remove)
130 return viodrv->remove(viodev); 1138 ret = viodrv->remove(viodev);
1139
1140 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1141 vio_cmo_bus_remove(viodev);
131 1142
132 /* driver can't remove */ 1143 put_device(devptr);
133 return 1; 1144 return ret;
134} 1145}
135 1146
136/** 1147/**
@@ -215,7 +1226,11 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
215 viodev->unit_address = *unit_address; 1226 viodev->unit_address = *unit_address;
216 } 1227 }
217 viodev->dev.archdata.of_node = of_node_get(of_node); 1228 viodev->dev.archdata.of_node = of_node_get(of_node);
218 viodev->dev.archdata.dma_ops = &dma_iommu_ops; 1229
1230 if (firmware_has_feature(FW_FEATURE_CMO))
1231 vio_cmo_set_dma_ops(viodev);
1232 else
1233 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
219 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); 1234 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
220 viodev->dev.archdata.numa_node = of_node_to_nid(of_node); 1235 viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
221 1236
@@ -245,6 +1260,9 @@ static int __init vio_bus_init(void)
245 int err; 1260 int err;
246 struct device_node *node_vroot; 1261 struct device_node *node_vroot;
247 1262
1263 if (firmware_has_feature(FW_FEATURE_CMO))
1264 vio_cmo_sysfs_init();
1265
248 err = bus_register(&vio_bus_type); 1266 err = bus_register(&vio_bus_type);
249 if (err) { 1267 if (err) {
250 printk(KERN_ERR "failed to register VIO bus\n"); 1268 printk(KERN_ERR "failed to register VIO bus\n");
@@ -262,6 +1280,9 @@ static int __init vio_bus_init(void)
262 return err; 1280 return err;
263 } 1281 }
264 1282
1283 if (firmware_has_feature(FW_FEATURE_CMO))
1284 vio_cmo_bus_init();
1285
265 node_vroot = of_find_node_by_name(NULL, "vdevice"); 1286 node_vroot = of_find_node_by_name(NULL, "vdevice");
266 if (node_vroot) { 1287 if (node_vroot) {
267 struct device_node *of_node; 1288 struct device_node *of_node;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index a914411bced5..4a8ce62fe112 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -85,7 +85,7 @@ SECTIONS
85 85
86 /* The dummy segment contents for the bug workaround mentioned above 86 /* The dummy segment contents for the bug workaround mentioned above
87 near PHDRS. */ 87 near PHDRS. */
88 .dummy : { 88 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
89 LONG(0xf177) 89 LONG(0xf177)
90 } :kernel :dummy 90 } :kernel :dummy
91 91