aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/compat.h2
-rw-r--r--arch/sparc/include/asm/irqflags_32.h35
-rw-r--r--arch/sparc/include/asm/irqflags_64.h29
-rw-r--r--arch/sparc/include/asm/jump_label.h32
-rw-r--r--arch/sparc/include/asm/memblock.h2
-rw-r--r--arch/sparc/include/asm/perf_event.h4
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/irq_32.c13
-rw-r--r--arch/sparc/kernel/jump_label.c47
-rw-r--r--arch/sparc/kernel/module.c6
-rw-r--r--arch/sparc/kernel/pci_msi.c8
-rw-r--r--arch/sparc/kernel/pcr.c8
-rw-r--r--arch/sparc/kernel/perf_event.c248
-rw-r--r--arch/sparc/kernel/signal32.c161
-rw-r--r--arch/sparc/kernel/signal_32.c55
-rw-r--r--arch/sparc/kernel/signal_64.c45
-rw-r--r--arch/sparc/mm/init_64.c46
-rw-r--r--arch/sparc/prom/p1275.c2
19 files changed, 445 insertions, 303 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 491e9d6de191..3e9d31401fb2 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,10 +26,12 @@ config SPARC
26 select ARCH_WANT_OPTIONAL_GPIOLIB 26 select ARCH_WANT_OPTIONAL_GPIOLIB
27 select RTC_CLASS 27 select RTC_CLASS
28 select RTC_DRV_M48T59 28 select RTC_DRV_M48T59
29 select HAVE_IRQ_WORK
29 select HAVE_PERF_EVENTS 30 select HAVE_PERF_EVENTS
30 select PERF_USE_VMALLOC 31 select PERF_USE_VMALLOC
31 select HAVE_DMA_ATTRS 32 select HAVE_DMA_ATTRS
32 select HAVE_DMA_API_DEBUG 33 select HAVE_DMA_API_DEBUG
34 select HAVE_ARCH_JUMP_LABEL
33 35
34config SPARC32 36config SPARC32
35 def_bool !64BIT 37 def_bool !64BIT
@@ -53,6 +55,7 @@ config SPARC64
53 select RTC_DRV_BQ4802 55 select RTC_DRV_BQ4802
54 select RTC_DRV_SUN4V 56 select RTC_DRV_SUN4V
55 select RTC_DRV_STARFIRE 57 select RTC_DRV_STARFIRE
58 select HAVE_IRQ_WORK
56 select HAVE_PERF_EVENTS 59 select HAVE_PERF_EVENTS
57 select PERF_USE_VMALLOC 60 select PERF_USE_VMALLOC
58 61
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 5016f76ea98a..6f57325bb883 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
167 return (u32)(unsigned long)uptr; 167 return (u32)(unsigned long)uptr;
168} 168}
169 169
170static inline void __user *compat_alloc_user_space(long len) 170static inline void __user *arch_compat_alloc_user_space(long len)
171{ 171{
172 struct pt_regs *regs = current_thread_info()->kregs; 172 struct pt_regs *regs = current_thread_info()->kregs;
173 unsigned long usp = regs->u_regs[UREG_I6]; 173 unsigned long usp = regs->u_regs[UREG_I6];
diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h
index 0fca9d97d44f..d4d0711de0f9 100644
--- a/arch/sparc/include/asm/irqflags_32.h
+++ b/arch/sparc/include/asm/irqflags_32.h
@@ -5,33 +5,40 @@
5 * 5 *
6 * This file gets included from lowlevel asm headers too, to provide 6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the 7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers. 8 * arch_local_irq_*() functions from the lowlevel headers.
9 */ 9 */
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15extern void raw_local_irq_restore(unsigned long); 15#include <linux/types.h>
16extern unsigned long __raw_local_irq_save(void);
17extern void raw_local_irq_enable(void);
18 16
19static inline unsigned long getipl(void) 17extern void arch_local_irq_restore(unsigned long);
18extern unsigned long arch_local_irq_save(void);
19extern void arch_local_irq_enable(void);
20
21static inline unsigned long arch_local_save_flags(void)
20{ 22{
21 unsigned long retval; 23 unsigned long flags;
24
25 asm volatile("rd %%psr, %0" : "=r" (flags));
26 return flags;
27}
22 28
23 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); 29static inline void arch_local_irq_disable(void)
24 return retval; 30{
31 arch_local_irq_save();
25} 32}
26 33
27#define raw_local_save_flags(flags) ((flags) = getipl()) 34static inline bool arch_irqs_disabled_flags(unsigned long flags)
28#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save()) 35{
29#define raw_local_irq_disable() ((void) __raw_local_irq_save()) 36 return (flags & PSR_PIL) != 0;
30#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0) 37}
31 38
32static inline int raw_irqs_disabled_flags(unsigned long flags) 39static inline bool arch_irqs_disabled(void)
33{ 40{
34 return ((flags & PSR_PIL) != 0); 41 return arch_irqs_disabled_flags(arch_local_save_flags());
35} 42}
36 43
37#endif /* (__ASSEMBLY__) */ 44#endif /* (__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index bfa1ea45b4cd..aab969c82c2b 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * This file gets included from lowlevel asm headers too, to provide 6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the 7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers. 8 * arch_local_irq_*() functions from the lowlevel headers.
9 */ 9 */
10#ifndef _ASM_IRQFLAGS_H 10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H 11#define _ASM_IRQFLAGS_H
@@ -14,7 +14,7 @@
14 14
15#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
16 16
17static inline unsigned long __raw_local_save_flags(void) 17static inline unsigned long arch_local_save_flags(void)
18{ 18{
19 unsigned long flags; 19 unsigned long flags;
20 20
@@ -26,10 +26,7 @@ static inline unsigned long __raw_local_save_flags(void)
26 return flags; 26 return flags;
27} 27}
28 28
29#define raw_local_save_flags(flags) \ 29static inline void arch_local_irq_restore(unsigned long flags)
30 do { (flags) = __raw_local_save_flags(); } while (0)
31
32static inline void raw_local_irq_restore(unsigned long flags)
33{ 30{
34 __asm__ __volatile__( 31 __asm__ __volatile__(
35 "wrpr %0, %%pil" 32 "wrpr %0, %%pil"
@@ -39,7 +36,7 @@ static inline void raw_local_irq_restore(unsigned long flags)
39 ); 36 );
40} 37}
41 38
42static inline void raw_local_irq_disable(void) 39static inline void arch_local_irq_disable(void)
43{ 40{
44 __asm__ __volatile__( 41 __asm__ __volatile__(
45 "wrpr %0, %%pil" 42 "wrpr %0, %%pil"
@@ -49,7 +46,7 @@ static inline void raw_local_irq_disable(void)
49 ); 46 );
50} 47}
51 48
52static inline void raw_local_irq_enable(void) 49static inline void arch_local_irq_enable(void)
53{ 50{
54 __asm__ __volatile__( 51 __asm__ __volatile__(
55 "wrpr 0, %%pil" 52 "wrpr 0, %%pil"
@@ -59,22 +56,17 @@ static inline void raw_local_irq_enable(void)
59 ); 56 );
60} 57}
61 58
62static inline int raw_irqs_disabled_flags(unsigned long flags) 59static inline int arch_irqs_disabled_flags(unsigned long flags)
63{ 60{
64 return (flags > 0); 61 return (flags > 0);
65} 62}
66 63
67static inline int raw_irqs_disabled(void) 64static inline int arch_irqs_disabled(void)
68{ 65{
69 unsigned long flags = __raw_local_save_flags(); 66 return arch_irqs_disabled_flags(arch_local_save_flags());
70
71 return raw_irqs_disabled_flags(flags);
72} 67}
73 68
74/* 69static inline unsigned long arch_local_irq_save(void)
75 * For spinlocks, etc:
76 */
77static inline unsigned long __raw_local_irq_save(void)
78{ 70{
79 unsigned long flags, tmp; 71 unsigned long flags, tmp;
80 72
@@ -100,9 +92,6 @@ static inline unsigned long __raw_local_irq_save(void)
100 return flags; 92 return flags;
101} 93}
102 94
103#define raw_local_irq_save(flags) \
104 do { (flags) = __raw_local_irq_save(); } while (0)
105
106#endif /* (__ASSEMBLY__) */ 95#endif /* (__ASSEMBLY__) */
107 96
108#endif /* !(_ASM_IRQFLAGS_H) */ 97#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
new file mode 100644
index 000000000000..62e66d7b2fb6
--- /dev/null
+++ b/arch/sparc/include/asm/jump_label.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_SPARC_JUMP_LABEL_H
2#define _ASM_SPARC_JUMP_LABEL_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm/system.h>
8
9#define JUMP_LABEL_NOP_SIZE 4
10
11#define JUMP_LABEL(key, label) \
12 do { \
13 asm goto("1:\n\t" \
14 "nop\n\t" \
15 "nop\n\t" \
16 ".pushsection __jump_table, \"a\"\n\t"\
17 ".word 1b, %l[" #label "], %c0\n\t" \
18 ".popsection \n\t" \
19 : : "i" (key) : : label);\
20 } while (0)
21
22#endif /* __KERNEL__ */
23
24typedef u32 jump_label_t;
25
26struct jump_entry {
27 jump_label_t code;
28 jump_label_t target;
29 jump_label_t key;
30};
31
32#endif
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h
index f12af880649b..c67b047ef85e 100644
--- a/arch/sparc/include/asm/memblock.h
+++ b/arch/sparc/include/asm/memblock.h
@@ -5,6 +5,4 @@
5 5
6#define MEMBLOCK_DBG(fmt...) prom_printf(fmt) 6#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
7 7
8#define MEMBLOCK_REAL_LIMIT 0
9
10#endif /* !(_SPARC64_MEMBLOCK_H) */ 8#endif /* !(_SPARC64_MEMBLOCK_H) */
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
index 727af70646cb..6e8bfa1786da 100644
--- a/arch/sparc/include/asm/perf_event.h
+++ b/arch/sparc/include/asm/perf_event.h
@@ -1,10 +1,6 @@
1#ifndef __ASM_SPARC_PERF_EVENT_H 1#ifndef __ASM_SPARC_PERF_EVENT_H
2#define __ASM_SPARC_PERF_EVENT_H 2#define __ASM_SPARC_PERF_EVENT_H
3 3
4extern void set_perf_event_pending(void);
5
6#define PERF_EVENT_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_EVENTS 4#ifdef CONFIG_PERF_EVENTS
9#include <asm/ptrace.h> 5#include <asm/ptrace.h>
10 6
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 0c2dc1f24a9a..599398fbbc7c 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
119 119
120pc--$(CONFIG_PERF_EVENTS) := perf_event.o 120pc--$(CONFIG_PERF_EVENTS) := perf_event.o
121obj-$(CONFIG_SPARC64) += $(pc--y) 121obj-$(CONFIG_SPARC64) += $(pc--y)
122
123obj-$(CONFIG_SPARC64) += jump_label.o
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index e1af43728329..0116d8d10def 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -57,7 +57,7 @@
57#define SMP_NOP2 57#define SMP_NOP2
58#define SMP_NOP3 58#define SMP_NOP3
59#endif /* SMP */ 59#endif /* SMP */
60unsigned long __raw_local_irq_save(void) 60unsigned long arch_local_irq_save(void)
61{ 61{
62 unsigned long retval; 62 unsigned long retval;
63 unsigned long tmp; 63 unsigned long tmp;
@@ -74,8 +74,9 @@ unsigned long __raw_local_irq_save(void)
74 74
75 return retval; 75 return retval;
76} 76}
77EXPORT_SYMBOL(arch_local_irq_save);
77 78
78void raw_local_irq_enable(void) 79void arch_local_irq_enable(void)
79{ 80{
80 unsigned long tmp; 81 unsigned long tmp;
81 82
@@ -89,8 +90,9 @@ void raw_local_irq_enable(void)
89 : "i" (PSR_PIL) 90 : "i" (PSR_PIL)
90 : "memory"); 91 : "memory");
91} 92}
93EXPORT_SYMBOL(arch_local_irq_enable);
92 94
93void raw_local_irq_restore(unsigned long old_psr) 95void arch_local_irq_restore(unsigned long old_psr)
94{ 96{
95 unsigned long tmp; 97 unsigned long tmp;
96 98
@@ -105,10 +107,7 @@ void raw_local_irq_restore(unsigned long old_psr)
105 : "i" (PSR_PIL), "r" (old_psr) 107 : "i" (PSR_PIL), "r" (old_psr)
106 : "memory"); 108 : "memory");
107} 109}
108 110EXPORT_SYMBOL(arch_local_irq_restore);
109EXPORT_SYMBOL(__raw_local_irq_save);
110EXPORT_SYMBOL(raw_local_irq_enable);
111EXPORT_SYMBOL(raw_local_irq_restore);
112 111
113/* 112/*
114 * Dave Redman (djhr@tadpole.co.uk) 113 * Dave Redman (djhr@tadpole.co.uk)
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644
index 000000000000..ea2dafc93d78
--- /dev/null
+++ b/arch/sparc/kernel/jump_label.c
@@ -0,0 +1,47 @@
1#include <linux/kernel.h>
2#include <linux/types.h>
3#include <linux/mutex.h>
4#include <linux/cpu.h>
5
6#include <linux/jump_label.h>
7#include <linux/memory.h>
8
9#ifdef HAVE_JUMP_LABEL
10
11void arch_jump_label_transform(struct jump_entry *entry,
12 enum jump_label_type type)
13{
14 u32 val;
15 u32 *insn = (u32 *) (unsigned long) entry->code;
16
17 if (type == JUMP_LABEL_ENABLE) {
18 s32 off = (s32)entry->target - (s32)entry->code;
19
20#ifdef CONFIG_SPARC64
21 /* ba,pt %xcc, . + (off << 2) */
22 val = 0x10680000 | ((u32) off >> 2);
23#else
24 /* ba . + (off << 2) */
25 val = 0x10800000 | ((u32) off >> 2);
26#endif
27 } else {
28 val = 0x01000000;
29 }
30
31 get_online_cpus();
32 mutex_lock(&text_mutex);
33 *insn = val;
34 flushi(insn);
35 mutex_unlock(&text_mutex);
36 put_online_cpus();
37}
38
39void arch_jump_label_text_poke_early(jump_label_t addr)
40{
41 u32 *insn_p = (u32 *) (unsigned long) addr;
42
43 *insn_p = 0x01000000;
44 flushi(insn_p);
45}
46
47#endif
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index f848aadf54dc..ee3c7dde8d9f 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -18,6 +18,9 @@
18#include <asm/spitfire.h> 18#include <asm/spitfire.h>
19 19
20#ifdef CONFIG_SPARC64 20#ifdef CONFIG_SPARC64
21
22#include <linux/jump_label.h>
23
21static void *module_map(unsigned long size) 24static void *module_map(unsigned long size)
22{ 25{
23 struct vm_struct *area; 26 struct vm_struct *area;
@@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr,
227 const Elf_Shdr *sechdrs, 230 const Elf_Shdr *sechdrs,
228 struct module *me) 231 struct module *me)
229{ 232{
233 /* make jump label nops */
234 jump_label_apply_nops(me);
235
230 /* Cheetah's I-cache is fully coherent. */ 236 /* Cheetah's I-cache is fully coherent. */
231 if (tlb_type == spitfire) { 237 if (tlb_type == spitfire) {
232 unsigned long va; 238 unsigned long va;
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 548b8ca9c210..b210416ace7b 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num)
114 114
115static struct irq_chip msi_irq = { 115static struct irq_chip msi_irq = {
116 .name = "PCI-MSI", 116 .name = "PCI-MSI",
117 .mask = mask_msi_irq, 117 .irq_mask = mask_msi_irq,
118 .unmask = unmask_msi_irq, 118 .irq_unmask = unmask_msi_irq,
119 .enable = unmask_msi_irq, 119 .irq_enable = unmask_msi_irq,
120 .disable = mask_msi_irq, 120 .irq_disable = mask_msi_irq,
121 /* XXX affinity XXX */ 121 /* XXX affinity XXX */
122}; 122};
123 123
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c4a6a50b4849..b87873c0e8ea 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h> 10#include <linux/irq_work.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12 12
13#include <asm/pil.h> 13#include <asm/pil.h>
@@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
43 43
44 old_regs = set_irq_regs(regs); 44 old_regs = set_irq_regs(regs);
45 irq_enter(); 45 irq_enter();
46#ifdef CONFIG_PERF_EVENTS 46#ifdef CONFIG_IRQ_WORK
47 perf_event_do_pending(); 47 irq_work_run();
48#endif 48#endif
49 irq_exit(); 49 irq_exit();
50 set_irq_regs(old_regs); 50 set_irq_regs(old_regs);
51} 51}
52 52
53void set_perf_event_pending(void) 53void arch_irq_work_raise(void)
54{ 54{
55 set_softint(1 << PIL_DEFERRED_PCR_WORK); 55 set_softint(1 << PIL_DEFERRED_PCR_WORK);
56} 56}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 357ced3c33ff..0d6deb55a2ae 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
658 658
659 enc = perf_event_get_enc(cpuc->events[i]); 659 enc = perf_event_get_enc(cpuc->events[i]);
660 pcr &= ~mask_for_index(idx); 660 pcr &= ~mask_for_index(idx);
661 pcr |= event_encoding(enc, idx); 661 if (hwc->state & PERF_HES_STOPPED)
662 pcr |= nop_for_index(idx);
663 else
664 pcr |= event_encoding(enc, idx);
662 } 665 }
663out: 666out:
664 return pcr; 667 return pcr;
665} 668}
666 669
667void hw_perf_enable(void) 670static void sparc_pmu_enable(struct pmu *pmu)
668{ 671{
669 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
670 u64 pcr; 673 u64 pcr;
@@ -691,7 +694,7 @@ void hw_perf_enable(void)
691 pcr_ops->write(cpuc->pcr); 694 pcr_ops->write(cpuc->pcr);
692} 695}
693 696
694void hw_perf_disable(void) 697static void sparc_pmu_disable(struct pmu *pmu)
695{ 698{
696 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 699 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
697 u64 val; 700 u64 val;
@@ -710,19 +713,65 @@ void hw_perf_disable(void)
710 pcr_ops->write(cpuc->pcr); 713 pcr_ops->write(cpuc->pcr);
711} 714}
712 715
713static void sparc_pmu_disable(struct perf_event *event) 716static int active_event_index(struct cpu_hw_events *cpuc,
717 struct perf_event *event)
718{
719 int i;
720
721 for (i = 0; i < cpuc->n_events; i++) {
722 if (cpuc->event[i] == event)
723 break;
724 }
725 BUG_ON(i == cpuc->n_events);
726 return cpuc->current_idx[i];
727}
728
729static void sparc_pmu_start(struct perf_event *event, int flags)
730{
731 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
732 int idx = active_event_index(cpuc, event);
733
734 if (flags & PERF_EF_RELOAD) {
735 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
736 sparc_perf_event_set_period(event, &event->hw, idx);
737 }
738
739 event->hw.state = 0;
740
741 sparc_pmu_enable_event(cpuc, &event->hw, idx);
742}
743
744static void sparc_pmu_stop(struct perf_event *event, int flags)
745{
746 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
747 int idx = active_event_index(cpuc, event);
748
749 if (!(event->hw.state & PERF_HES_STOPPED)) {
750 sparc_pmu_disable_event(cpuc, &event->hw, idx);
751 event->hw.state |= PERF_HES_STOPPED;
752 }
753
754 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
755 sparc_perf_event_update(event, &event->hw, idx);
756 event->hw.state |= PERF_HES_UPTODATE;
757 }
758}
759
760static void sparc_pmu_del(struct perf_event *event, int _flags)
714{ 761{
715 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 762 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
716 struct hw_perf_event *hwc = &event->hw;
717 unsigned long flags; 763 unsigned long flags;
718 int i; 764 int i;
719 765
720 local_irq_save(flags); 766 local_irq_save(flags);
721 perf_disable(); 767 perf_pmu_disable(event->pmu);
722 768
723 for (i = 0; i < cpuc->n_events; i++) { 769 for (i = 0; i < cpuc->n_events; i++) {
724 if (event == cpuc->event[i]) { 770 if (event == cpuc->event[i]) {
725 int idx = cpuc->current_idx[i]; 771 /* Absorb the final count and turn off the
772 * event.
773 */
774 sparc_pmu_stop(event, PERF_EF_UPDATE);
726 775
727 /* Shift remaining entries down into 776 /* Shift remaining entries down into
728 * the existing slot. 777 * the existing slot.
@@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event)
734 cpuc->current_idx[i]; 783 cpuc->current_idx[i];
735 } 784 }
736 785
737 /* Absorb the final count and turn off the
738 * event.
739 */
740 sparc_pmu_disable_event(cpuc, hwc, idx);
741 barrier();
742 sparc_perf_event_update(event, hwc, idx);
743
744 perf_event_update_userpage(event); 786 perf_event_update_userpage(event);
745 787
746 cpuc->n_events--; 788 cpuc->n_events--;
@@ -748,23 +790,10 @@ static void sparc_pmu_disable(struct perf_event *event)
748 } 790 }
749 } 791 }
750 792
751 perf_enable(); 793 perf_pmu_enable(event->pmu);
752 local_irq_restore(flags); 794 local_irq_restore(flags);
753} 795}
754 796
755static int active_event_index(struct cpu_hw_events *cpuc,
756 struct perf_event *event)
757{
758 int i;
759
760 for (i = 0; i < cpuc->n_events; i++) {
761 if (cpuc->event[i] == event)
762 break;
763 }
764 BUG_ON(i == cpuc->n_events);
765 return cpuc->current_idx[i];
766}
767
768static void sparc_pmu_read(struct perf_event *event) 797static void sparc_pmu_read(struct perf_event *event)
769{ 798{
770 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 799 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event)
774 sparc_perf_event_update(event, hwc, idx); 803 sparc_perf_event_update(event, hwc, idx);
775} 804}
776 805
777static void sparc_pmu_unthrottle(struct perf_event *event)
778{
779 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
780 int idx = active_event_index(cpuc, event);
781 struct hw_perf_event *hwc = &event->hw;
782
783 sparc_pmu_enable_event(cpuc, hwc, idx);
784}
785
786static atomic_t active_events = ATOMIC_INIT(0); 806static atomic_t active_events = ATOMIC_INIT(0);
787static DEFINE_MUTEX(pmc_grab_mutex); 807static DEFINE_MUTEX(pmc_grab_mutex);
788 808
@@ -877,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
877 if (!n_ev) 897 if (!n_ev)
878 return 0; 898 return 0;
879 899
880 if (n_ev > perf_max_events) 900 if (n_ev > MAX_HWEVENTS)
881 return -1; 901 return -1;
882 902
883 msk0 = perf_event_get_msk(events[0]); 903 msk0 = perf_event_get_msk(events[0]);
@@ -984,23 +1004,27 @@ static int collect_events(struct perf_event *group, int max_count,
984 return n; 1004 return n;
985} 1005}
986 1006
987static int sparc_pmu_enable(struct perf_event *event) 1007static int sparc_pmu_add(struct perf_event *event, int ef_flags)
988{ 1008{
989 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1009 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
990 int n0, ret = -EAGAIN; 1010 int n0, ret = -EAGAIN;
991 unsigned long flags; 1011 unsigned long flags;
992 1012
993 local_irq_save(flags); 1013 local_irq_save(flags);
994 perf_disable(); 1014 perf_pmu_disable(event->pmu);
995 1015
996 n0 = cpuc->n_events; 1016 n0 = cpuc->n_events;
997 if (n0 >= perf_max_events) 1017 if (n0 >= MAX_HWEVENTS)
998 goto out; 1018 goto out;
999 1019
1000 cpuc->event[n0] = event; 1020 cpuc->event[n0] = event;
1001 cpuc->events[n0] = event->hw.event_base; 1021 cpuc->events[n0] = event->hw.event_base;
1002 cpuc->current_idx[n0] = PIC_NO_INDEX; 1022 cpuc->current_idx[n0] = PIC_NO_INDEX;
1003 1023
1024 event->hw.state = PERF_HES_UPTODATE;
1025 if (!(ef_flags & PERF_EF_START))
1026 event->hw.state |= PERF_HES_STOPPED;
1027
1004 /* 1028 /*
1005 * If group events scheduling transaction was started, 1029 * If group events scheduling transaction was started,
1006 * skip the schedulability test here, it will be peformed 1030 * skip the schedulability test here, it will be peformed
@@ -1020,12 +1044,12 @@ nocheck:
1020 1044
1021 ret = 0; 1045 ret = 0;
1022out: 1046out:
1023 perf_enable(); 1047 perf_pmu_enable(event->pmu);
1024 local_irq_restore(flags); 1048 local_irq_restore(flags);
1025 return ret; 1049 return ret;
1026} 1050}
1027 1051
1028static int __hw_perf_event_init(struct perf_event *event) 1052static int sparc_pmu_event_init(struct perf_event *event)
1029{ 1053{
1030 struct perf_event_attr *attr = &event->attr; 1054 struct perf_event_attr *attr = &event->attr;
1031 struct perf_event *evts[MAX_HWEVENTS]; 1055 struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,16 +1062,37 @@ static int __hw_perf_event_init(struct perf_event *event)
1038 if (atomic_read(&nmi_active) < 0) 1062 if (atomic_read(&nmi_active) < 0)
1039 return -ENODEV; 1063 return -ENODEV;
1040 1064
1041 if (attr->type == PERF_TYPE_HARDWARE) { 1065 switch (attr->type) {
1066 case PERF_TYPE_HARDWARE:
1042 if (attr->config >= sparc_pmu->max_events) 1067 if (attr->config >= sparc_pmu->max_events)
1043 return -EINVAL; 1068 return -EINVAL;
1044 pmap = sparc_pmu->event_map(attr->config); 1069 pmap = sparc_pmu->event_map(attr->config);
1045 } else if (attr->type == PERF_TYPE_HW_CACHE) { 1070 break;
1071
1072 case PERF_TYPE_HW_CACHE:
1046 pmap = sparc_map_cache_event(attr->config); 1073 pmap = sparc_map_cache_event(attr->config);
1047 if (IS_ERR(pmap)) 1074 if (IS_ERR(pmap))
1048 return PTR_ERR(pmap); 1075 return PTR_ERR(pmap);
1049 } else 1076 break;
1050 return -EOPNOTSUPP; 1077
1078 case PERF_TYPE_RAW:
1079 pmap = NULL;
1080 break;
1081
1082 default:
1083 return -ENOENT;
1084
1085 }
1086
1087 if (pmap) {
1088 hwc->event_base = perf_event_encode(pmap);
1089 } else {
1090 /*
1091 * User gives us "(encoding << 16) | pic_mask" for
1092 * PERF_TYPE_RAW events.
1093 */
1094 hwc->event_base = attr->config;
1095 }
1051 1096
1052 /* We save the enable bits in the config_base. */ 1097 /* We save the enable bits in the config_base. */
1053 hwc->config_base = sparc_pmu->irq_bit; 1098 hwc->config_base = sparc_pmu->irq_bit;
@@ -1058,12 +1103,10 @@ static int __hw_perf_event_init(struct perf_event *event)
1058 if (!attr->exclude_hv) 1103 if (!attr->exclude_hv)
1059 hwc->config_base |= sparc_pmu->hv_bit; 1104 hwc->config_base |= sparc_pmu->hv_bit;
1060 1105
1061 hwc->event_base = perf_event_encode(pmap);
1062
1063 n = 0; 1106 n = 0;
1064 if (event->group_leader != event) { 1107 if (event->group_leader != event) {
1065 n = collect_events(event->group_leader, 1108 n = collect_events(event->group_leader,
1066 perf_max_events - 1, 1109 MAX_HWEVENTS - 1,
1067 evts, events, current_idx_dmy); 1110 evts, events, current_idx_dmy);
1068 if (n < 0) 1111 if (n < 0)
1069 return -EINVAL; 1112 return -EINVAL;
@@ -1099,10 +1142,11 @@ static int __hw_perf_event_init(struct perf_event *event)
1099 * Set the flag to make pmu::enable() not perform the 1142 * Set the flag to make pmu::enable() not perform the
1100 * schedulability test, it will be performed at commit time 1143 * schedulability test, it will be performed at commit time
1101 */ 1144 */
1102static void sparc_pmu_start_txn(const struct pmu *pmu) 1145static void sparc_pmu_start_txn(struct pmu *pmu)
1103{ 1146{
1104 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1147 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1105 1148
1149 perf_pmu_disable(pmu);
1106 cpuhw->group_flag |= PERF_EVENT_TXN; 1150 cpuhw->group_flag |= PERF_EVENT_TXN;
1107} 1151}
1108 1152
@@ -1111,11 +1155,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
1111 * Clear the flag and pmu::enable() will perform the 1155 * Clear the flag and pmu::enable() will perform the
1112 * schedulability test. 1156 * schedulability test.
1113 */ 1157 */
1114static void sparc_pmu_cancel_txn(const struct pmu *pmu) 1158static void sparc_pmu_cancel_txn(struct pmu *pmu)
1115{ 1159{
1116 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1160 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1117 1161
1118 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1162 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1163 perf_pmu_enable(pmu);
1119} 1164}
1120 1165
1121/* 1166/*
@@ -1123,7 +1168,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1123 * Perform the group schedulability test as a whole 1168 * Perform the group schedulability test as a whole
1124 * Return 0 if success 1169 * Return 0 if success
1125 */ 1170 */
1126static int sparc_pmu_commit_txn(const struct pmu *pmu) 1171static int sparc_pmu_commit_txn(struct pmu *pmu)
1127{ 1172{
1128 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1173 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1129 int n; 1174 int n;
@@ -1139,28 +1184,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
1139 return -EAGAIN; 1184 return -EAGAIN;
1140 1185
1141 cpuc->group_flag &= ~PERF_EVENT_TXN; 1186 cpuc->group_flag &= ~PERF_EVENT_TXN;
1187 perf_pmu_enable(pmu);
1142 return 0; 1188 return 0;
1143} 1189}
1144 1190
1145static const struct pmu pmu = { 1191static struct pmu pmu = {
1146 .enable = sparc_pmu_enable, 1192 .pmu_enable = sparc_pmu_enable,
1147 .disable = sparc_pmu_disable, 1193 .pmu_disable = sparc_pmu_disable,
1194 .event_init = sparc_pmu_event_init,
1195 .add = sparc_pmu_add,
1196 .del = sparc_pmu_del,
1197 .start = sparc_pmu_start,
1198 .stop = sparc_pmu_stop,
1148 .read = sparc_pmu_read, 1199 .read = sparc_pmu_read,
1149 .unthrottle = sparc_pmu_unthrottle,
1150 .start_txn = sparc_pmu_start_txn, 1200 .start_txn = sparc_pmu_start_txn,
1151 .cancel_txn = sparc_pmu_cancel_txn, 1201 .cancel_txn = sparc_pmu_cancel_txn,
1152 .commit_txn = sparc_pmu_commit_txn, 1202 .commit_txn = sparc_pmu_commit_txn,
1153}; 1203};
1154 1204
1155const struct pmu *hw_perf_event_init(struct perf_event *event)
1156{
1157 int err = __hw_perf_event_init(event);
1158
1159 if (err)
1160 return ERR_PTR(err);
1161 return &pmu;
1162}
1163
1164void perf_event_print_debug(void) 1205void perf_event_print_debug(void)
1165{ 1206{
1166 unsigned long flags; 1207 unsigned long flags;
@@ -1236,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1236 continue; 1277 continue;
1237 1278
1238 if (perf_event_overflow(event, 1, &data, regs)) 1279 if (perf_event_overflow(event, 1, &data, regs))
1239 sparc_pmu_disable_event(cpuc, hwc, idx); 1280 sparc_pmu_stop(event, 0);
1240 } 1281 }
1241 1282
1242 return NOTIFY_STOP; 1283 return NOTIFY_STOP;
@@ -1277,28 +1318,21 @@ void __init init_hw_perf_events(void)
1277 1318
1278 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1319 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1279 1320
1280 /* All sparc64 PMUs currently have 2 events. */ 1321 perf_pmu_register(&pmu);
1281 perf_max_events = 2;
1282
1283 register_die_notifier(&perf_event_nmi_notifier); 1322 register_die_notifier(&perf_event_nmi_notifier);
1284} 1323}
1285 1324
1286static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) 1325void perf_callchain_kernel(struct perf_callchain_entry *entry,
1287{ 1326 struct pt_regs *regs)
1288 if (entry->nr < PERF_MAX_STACK_DEPTH)
1289 entry->ip[entry->nr++] = ip;
1290}
1291
1292static void perf_callchain_kernel(struct pt_regs *regs,
1293 struct perf_callchain_entry *entry)
1294{ 1327{
1295 unsigned long ksp, fp; 1328 unsigned long ksp, fp;
1296#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1329#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1297 int graph = 0; 1330 int graph = 0;
1298#endif 1331#endif
1299 1332
1300 callchain_store(entry, PERF_CONTEXT_KERNEL); 1333 stack_trace_flush();
1301 callchain_store(entry, regs->tpc); 1334
1335 perf_callchain_store(entry, regs->tpc);
1302 1336
1303 ksp = regs->u_regs[UREG_I6]; 1337 ksp = regs->u_regs[UREG_I6];
1304 fp = ksp + STACK_BIAS; 1338 fp = ksp + STACK_BIAS;
@@ -1322,13 +1356,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1322 pc = sf->callers_pc; 1356 pc = sf->callers_pc;
1323 fp = (unsigned long)sf->fp + STACK_BIAS; 1357 fp = (unsigned long)sf->fp + STACK_BIAS;
1324 } 1358 }
1325 callchain_store(entry, pc); 1359 perf_callchain_store(entry, pc);
1326#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1360#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1327 if ((pc + 8UL) == (unsigned long) &return_to_handler) { 1361 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1328 int index = current->curr_ret_stack; 1362 int index = current->curr_ret_stack;
1329 if (current->ret_stack && index >= graph) { 1363 if (current->ret_stack && index >= graph) {
1330 pc = current->ret_stack[index - graph].ret; 1364 pc = current->ret_stack[index - graph].ret;
1331 callchain_store(entry, pc); 1365 perf_callchain_store(entry, pc);
1332 graph++; 1366 graph++;
1333 } 1367 }
1334 } 1368 }
@@ -1336,13 +1370,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1336 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1370 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1337} 1371}
1338 1372
1339static void perf_callchain_user_64(struct pt_regs *regs, 1373static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1340 struct perf_callchain_entry *entry) 1374 struct pt_regs *regs)
1341{ 1375{
1342 unsigned long ufp; 1376 unsigned long ufp;
1343 1377
1344 callchain_store(entry, PERF_CONTEXT_USER); 1378 perf_callchain_store(entry, regs->tpc);
1345 callchain_store(entry, regs->tpc);
1346 1379
1347 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1380 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1348 do { 1381 do {
@@ -1355,17 +1388,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
1355 1388
1356 pc = sf.callers_pc; 1389 pc = sf.callers_pc;
1357 ufp = (unsigned long)sf.fp + STACK_BIAS; 1390 ufp = (unsigned long)sf.fp + STACK_BIAS;
1358 callchain_store(entry, pc); 1391 perf_callchain_store(entry, pc);
1359 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1392 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1360} 1393}
1361 1394
1362static void perf_callchain_user_32(struct pt_regs *regs, 1395static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1363 struct perf_callchain_entry *entry) 1396 struct pt_regs *regs)
1364{ 1397{
1365 unsigned long ufp; 1398 unsigned long ufp;
1366 1399
1367 callchain_store(entry, PERF_CONTEXT_USER); 1400 perf_callchain_store(entry, regs->tpc);
1368 callchain_store(entry, regs->tpc);
1369 1401
1370 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1402 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1371 do { 1403 do {
@@ -1378,34 +1410,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1378 1410
1379 pc = sf.callers_pc; 1411 pc = sf.callers_pc;
1380 ufp = (unsigned long)sf.fp; 1412 ufp = (unsigned long)sf.fp;
1381 callchain_store(entry, pc); 1413 perf_callchain_store(entry, pc);
1382 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1414 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1383} 1415}
1384 1416
1385/* Like powerpc we can't get PMU interrupts within the PMU handler, 1417void
1386 * so no need for separate NMI and IRQ chains as on x86. 1418perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1387 */
1388static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1389
1390struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1391{ 1419{
1392 struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 1420 flushw_user();
1393 1421 if (test_thread_flag(TIF_32BIT))
1394 entry->nr = 0; 1422 perf_callchain_user_32(entry, regs);
1395 if (!user_mode(regs)) { 1423 else
1396 stack_trace_flush(); 1424 perf_callchain_user_64(entry, regs);
1397 perf_callchain_kernel(regs, entry);
1398 if (current->mm)
1399 regs = task_pt_regs(current);
1400 else
1401 regs = NULL;
1402 }
1403 if (regs) {
1404 flushw_user();
1405 if (test_thread_flag(TIF_32BIT))
1406 perf_callchain_user_32(regs, entry);
1407 else
1408 perf_callchain_user_64(regs, entry);
1409 }
1410 return entry;
1411} 1425}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ea22cd373c64..75fad425e249 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
453 return err; 453 return err;
454} 454}
455 455
456static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, 456/* The I-cache flush instruction only works in the primary ASI, which
457 int signo, sigset_t *oldset) 457 * right now is the nucleus, aka. kernel space.
458 *
459 * Therefore we have to kick the instructions out using the kernel
460 * side linear mapping of the physical address backing the user
461 * instructions.
462 */
463static void flush_signal_insns(unsigned long address)
464{
465 unsigned long pstate, paddr;
466 pte_t *ptep, pte;
467 pgd_t *pgdp;
468 pud_t *pudp;
469 pmd_t *pmdp;
470
471 /* Commit all stores of the instructions we are about to flush. */
472 wmb();
473
474 /* Disable cross-call reception. In this way even a very wide
475 * munmap() on another cpu can't tear down the page table
476 * hierarchy from underneath us, since that can't complete
477 * until the IPI tlb flush returns.
478 */
479
480 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
481 __asm__ __volatile__("wrpr %0, %1, %%pstate"
482 : : "r" (pstate), "i" (PSTATE_IE));
483
484 pgdp = pgd_offset(current->mm, address);
485 if (pgd_none(*pgdp))
486 goto out_irqs_on;
487 pudp = pud_offset(pgdp, address);
488 if (pud_none(*pudp))
489 goto out_irqs_on;
490 pmdp = pmd_offset(pudp, address);
491 if (pmd_none(*pmdp))
492 goto out_irqs_on;
493
494 ptep = pte_offset_map(pmdp, address);
495 pte = *ptep;
496 if (!pte_present(pte))
497 goto out_unmap;
498
499 paddr = (unsigned long) page_address(pte_page(pte));
500
501 __asm__ __volatile__("flush %0 + %1"
502 : /* no outputs */
503 : "r" (paddr),
504 "r" (address & (PAGE_SIZE - 1))
505 : "memory");
506
507out_unmap:
508 pte_unmap(ptep);
509out_irqs_on:
510 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
511
512}
513
514static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
515 int signo, sigset_t *oldset)
458{ 516{
459 struct signal_frame32 __user *sf; 517 struct signal_frame32 __user *sf;
460 int sigframe_size; 518 int sigframe_size;
@@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
547 if (ka->ka_restorer) { 605 if (ka->ka_restorer) {
548 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 606 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
549 } else { 607 } else {
550 /* Flush instruction space. */
551 unsigned long address = ((unsigned long)&(sf->insns[0])); 608 unsigned long address = ((unsigned long)&(sf->insns[0]));
552 pgd_t *pgdp = pgd_offset(current->mm, address);
553 pud_t *pudp = pud_offset(pgdp, address);
554 pmd_t *pmdp = pmd_offset(pudp, address);
555 pte_t *ptep;
556 pte_t pte;
557 609
558 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); 610 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
559 611
@@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
562 if (err) 614 if (err)
563 goto sigsegv; 615 goto sigsegv;
564 616
565 preempt_disable(); 617 flush_signal_insns(address);
566 ptep = pte_offset_map(pmdp, address);
567 pte = *ptep;
568 if (pte_present(pte)) {
569 unsigned long page = (unsigned long)
570 page_address(pte_page(pte));
571
572 wmb();
573 __asm__ __volatile__("flush %0 + %1"
574 : /* no outputs */
575 : "r" (page),
576 "r" (address & (PAGE_SIZE - 1))
577 : "memory");
578 }
579 pte_unmap(ptep);
580 preempt_enable();
581 } 618 }
582 return; 619 return 0;
583 620
584sigill: 621sigill:
585 do_exit(SIGILL); 622 do_exit(SIGILL);
623 return -EINVAL;
624
586sigsegv: 625sigsegv:
587 force_sigsegv(signo, current); 626 force_sigsegv(signo, current);
627 return -EFAULT;
588} 628}
589 629
590static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, 630static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
591 unsigned long signr, sigset_t *oldset, 631 unsigned long signr, sigset_t *oldset,
592 siginfo_t *info) 632 siginfo_t *info)
593{ 633{
594 struct rt_signal_frame32 __user *sf; 634 struct rt_signal_frame32 __user *sf;
595 int sigframe_size; 635 int sigframe_size;
@@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
687 if (ka->ka_restorer) 727 if (ka->ka_restorer)
688 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 728 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
689 else { 729 else {
690 /* Flush instruction space. */
691 unsigned long address = ((unsigned long)&(sf->insns[0])); 730 unsigned long address = ((unsigned long)&(sf->insns[0]));
692 pgd_t *pgdp = pgd_offset(current->mm, address);
693 pud_t *pudp = pud_offset(pgdp, address);
694 pmd_t *pmdp = pmd_offset(pudp, address);
695 pte_t *ptep;
696 731
697 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); 732 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
698 733
@@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
704 if (err) 739 if (err)
705 goto sigsegv; 740 goto sigsegv;
706 741
707 preempt_disable(); 742 flush_signal_insns(address);
708 ptep = pte_offset_map(pmdp, address);
709 if (pte_present(*ptep)) {
710 unsigned long page = (unsigned long)
711 page_address(pte_page(*ptep));
712
713 wmb();
714 __asm__ __volatile__("flush %0 + %1"
715 : /* no outputs */
716 : "r" (page),
717 "r" (address & (PAGE_SIZE - 1))
718 : "memory");
719 }
720 pte_unmap(ptep);
721 preempt_enable();
722 } 743 }
723 return; 744 return 0;
724 745
725sigill: 746sigill:
726 do_exit(SIGILL); 747 do_exit(SIGILL);
748 return -EINVAL;
749
727sigsegv: 750sigsegv:
728 force_sigsegv(signr, current); 751 force_sigsegv(signr, current);
752 return -EFAULT;
729} 753}
730 754
731static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, 755static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
732 siginfo_t *info, 756 siginfo_t *info,
733 sigset_t *oldset, struct pt_regs *regs) 757 sigset_t *oldset, struct pt_regs *regs)
734{ 758{
759 int err;
760
735 if (ka->sa.sa_flags & SA_SIGINFO) 761 if (ka->sa.sa_flags & SA_SIGINFO)
736 setup_rt_frame32(ka, regs, signr, oldset, info); 762 err = setup_rt_frame32(ka, regs, signr, oldset, info);
737 else 763 else
738 setup_frame32(ka, regs, signr, oldset); 764 err = setup_frame32(ka, regs, signr, oldset);
765
766 if (err)
767 return err;
739 768
740 spin_lock_irq(&current->sighand->siglock); 769 spin_lock_irq(&current->sighand->siglock);
741 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 770 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
743 sigaddset(&current->blocked,signr); 772 sigaddset(&current->blocked,signr);
744 recalc_sigpending(); 773 recalc_sigpending();
745 spin_unlock_irq(&current->sighand->siglock); 774 spin_unlock_irq(&current->sighand->siglock);
775
776 tracehook_signal_handler(signr, info, ka, regs, 0);
777
778 return 0;
746} 779}
747 780
748static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, 781static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
789 if (signr > 0) { 822 if (signr > 0) {
790 if (restart_syscall) 823 if (restart_syscall)
791 syscall_restart32(orig_i0, regs, &ka.sa); 824 syscall_restart32(orig_i0, regs, &ka.sa);
792 handle_signal32(signr, &ka, &info, oldset, regs); 825 if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
793 826 /* A signal was successfully delivered; the saved
794 /* A signal was successfully delivered; the saved 827 * sigmask will have been stored in the signal frame,
795 * sigmask will have been stored in the signal frame, 828 * and will be restored by sigreturn, so we can simply
796 * and will be restored by sigreturn, so we can simply 829 * clear the TS_RESTORE_SIGMASK flag.
797 * clear the TS_RESTORE_SIGMASK flag. 830 */
798 */ 831 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
799 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 832 }
800
801 tracehook_signal_handler(signr, &info, &ka, regs, 0);
802 return; 833 return;
803 } 834 }
804 if (restart_syscall && 835 if (restart_syscall &&
@@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
809 regs->u_regs[UREG_I0] = orig_i0; 840 regs->u_regs[UREG_I0] = orig_i0;
810 regs->tpc -= 4; 841 regs->tpc -= 4;
811 regs->tnpc -= 4; 842 regs->tnpc -= 4;
843 pt_regs_clear_syscall(regs);
812 } 844 }
813 if (restart_syscall && 845 if (restart_syscall &&
814 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 846 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
815 regs->u_regs[UREG_G1] = __NR_restart_syscall; 847 regs->u_regs[UREG_G1] = __NR_restart_syscall;
816 regs->tpc -= 4; 848 regs->tpc -= 4;
817 regs->tnpc -= 4; 849 regs->tnpc -= 4;
850 pt_regs_clear_syscall(regs);
818 } 851 }
819 852
820 /* If there's no signal to deliver, we just put the saved sigmask 853 /* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 9882df92ba0a..5e5c5fd03783 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
315 return err; 315 return err;
316} 316}
317 317
318static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs, 318static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
319 int signo, sigset_t *oldset) 319 int signo, sigset_t *oldset)
320{ 320{
321 struct signal_frame __user *sf; 321 struct signal_frame __user *sf;
322 int sigframe_size, err; 322 int sigframe_size, err;
@@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
384 /* Flush instruction space. */ 384 /* Flush instruction space. */
385 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 385 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
386 } 386 }
387 return; 387 return 0;
388 388
389sigill_and_return: 389sigill_and_return:
390 do_exit(SIGILL); 390 do_exit(SIGILL);
391 return -EINVAL;
392
391sigsegv: 393sigsegv:
392 force_sigsegv(signo, current); 394 force_sigsegv(signo, current);
395 return -EFAULT;
393} 396}
394 397
395static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, 398static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
396 int signo, sigset_t *oldset, siginfo_t *info) 399 int signo, sigset_t *oldset, siginfo_t *info)
397{ 400{
398 struct rt_signal_frame __user *sf; 401 struct rt_signal_frame __user *sf;
399 int sigframe_size; 402 int sigframe_size;
@@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
466 /* Flush instruction space. */ 469 /* Flush instruction space. */
467 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 470 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
468 } 471 }
469 return; 472 return 0;
470 473
471sigill: 474sigill:
472 do_exit(SIGILL); 475 do_exit(SIGILL);
476 return -EINVAL;
477
473sigsegv: 478sigsegv:
474 force_sigsegv(signo, current); 479 force_sigsegv(signo, current);
480 return -EFAULT;
475} 481}
476 482
477static inline void 483static inline int
478handle_signal(unsigned long signr, struct k_sigaction *ka, 484handle_signal(unsigned long signr, struct k_sigaction *ka,
479 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 485 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
480{ 486{
487 int err;
488
481 if (ka->sa.sa_flags & SA_SIGINFO) 489 if (ka->sa.sa_flags & SA_SIGINFO)
482 setup_rt_frame(ka, regs, signr, oldset, info); 490 err = setup_rt_frame(ka, regs, signr, oldset, info);
483 else 491 else
484 setup_frame(ka, regs, signr, oldset); 492 err = setup_frame(ka, regs, signr, oldset);
493
494 if (err)
495 return err;
485 496
486 spin_lock_irq(&current->sighand->siglock); 497 spin_lock_irq(&current->sighand->siglock);
487 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 498 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
489 sigaddset(&current->blocked, signr); 500 sigaddset(&current->blocked, signr);
490 recalc_sigpending(); 501 recalc_sigpending();
491 spin_unlock_irq(&current->sighand->siglock); 502 spin_unlock_irq(&current->sighand->siglock);
503
504 tracehook_signal_handler(signr, info, ka, regs, 0);
505
506 return 0;
492} 507}
493 508
494static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 509static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
546 if (signr > 0) { 561 if (signr > 0) {
547 if (restart_syscall) 562 if (restart_syscall)
548 syscall_restart(orig_i0, regs, &ka.sa); 563 syscall_restart(orig_i0, regs, &ka.sa);
549 handle_signal(signr, &ka, &info, oldset, regs); 564 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
550 565 /* a signal was successfully delivered; the saved
551 /* a signal was successfully delivered; the saved 566 * sigmask will have been stored in the signal frame,
552 * sigmask will have been stored in the signal frame, 567 * and will be restored by sigreturn, so we can simply
553 * and will be restored by sigreturn, so we can simply 568 * clear the TIF_RESTORE_SIGMASK flag.
554 * clear the TIF_RESTORE_SIGMASK flag. 569 */
555 */ 570 if (test_thread_flag(TIF_RESTORE_SIGMASK))
556 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 571 clear_thread_flag(TIF_RESTORE_SIGMASK);
557 clear_thread_flag(TIF_RESTORE_SIGMASK); 572 }
558
559 tracehook_signal_handler(signr, &info, &ka, regs, 0);
560 return; 573 return;
561 } 574 }
562 if (restart_syscall && 575 if (restart_syscall &&
@@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
567 regs->u_regs[UREG_I0] = orig_i0; 580 regs->u_regs[UREG_I0] = orig_i0;
568 regs->pc -= 4; 581 regs->pc -= 4;
569 regs->npc -= 4; 582 regs->npc -= 4;
583 pt_regs_clear_syscall(regs);
570 } 584 }
571 if (restart_syscall && 585 if (restart_syscall &&
572 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 586 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
573 regs->u_regs[UREG_G1] = __NR_restart_syscall; 587 regs->u_regs[UREG_G1] = __NR_restart_syscall;
574 regs->pc -= 4; 588 regs->pc -= 4;
575 regs->npc -= 4; 589 regs->npc -= 4;
590 pt_regs_clear_syscall(regs);
576 } 591 }
577 592
578 /* if there's no signal to deliver, we just put the saved sigmask 593 /* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 9fa48c30037e..006fe4515886 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
409 return (void __user *) sp; 409 return (void __user *) sp;
410} 410}
411 411
412static inline void 412static inline int
413setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, 413setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
414 int signo, sigset_t *oldset, siginfo_t *info) 414 int signo, sigset_t *oldset, siginfo_t *info)
415{ 415{
@@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
483 } 483 }
484 /* 4. return to kernel instructions */ 484 /* 4. return to kernel instructions */
485 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 485 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
486 return; 486 return 0;
487 487
488sigill: 488sigill:
489 do_exit(SIGILL); 489 do_exit(SIGILL);
490 return -EINVAL;
491
490sigsegv: 492sigsegv:
491 force_sigsegv(signo, current); 493 force_sigsegv(signo, current);
494 return -EFAULT;
492} 495}
493 496
494static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, 497static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
495 siginfo_t *info, 498 siginfo_t *info,
496 sigset_t *oldset, struct pt_regs *regs) 499 sigset_t *oldset, struct pt_regs *regs)
497{ 500{
498 setup_rt_frame(ka, regs, signr, oldset, 501 int err;
499 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); 502
503 err = setup_rt_frame(ka, regs, signr, oldset,
504 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
505 if (err)
506 return err;
500 spin_lock_irq(&current->sighand->siglock); 507 spin_lock_irq(&current->sighand->siglock);
501 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 508 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
502 if (!(ka->sa.sa_flags & SA_NOMASK)) 509 if (!(ka->sa.sa_flags & SA_NOMASK))
503 sigaddset(&current->blocked,signr); 510 sigaddset(&current->blocked,signr);
504 recalc_sigpending(); 511 recalc_sigpending();
505 spin_unlock_irq(&current->sighand->siglock); 512 spin_unlock_irq(&current->sighand->siglock);
513
514 tracehook_signal_handler(signr, info, ka, regs, 0);
515
516 return 0;
506} 517}
507 518
508static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 519static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
571 if (signr > 0) { 582 if (signr > 0) {
572 if (restart_syscall) 583 if (restart_syscall)
573 syscall_restart(orig_i0, regs, &ka.sa); 584 syscall_restart(orig_i0, regs, &ka.sa);
574 handle_signal(signr, &ka, &info, oldset, regs); 585 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
575 586 /* A signal was successfully delivered; the saved
576 /* A signal was successfully delivered; the saved 587 * sigmask will have been stored in the signal frame,
577 * sigmask will have been stored in the signal frame, 588 * and will be restored by sigreturn, so we can simply
578 * and will be restored by sigreturn, so we can simply 589 * clear the TS_RESTORE_SIGMASK flag.
579 * clear the TS_RESTORE_SIGMASK flag. 590 */
580 */ 591 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
581 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 592 }
582
583 tracehook_signal_handler(signr, &info, &ka, regs, 0);
584 return; 593 return;
585 } 594 }
586 if (restart_syscall && 595 if (restart_syscall &&
@@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
591 regs->u_regs[UREG_I0] = orig_i0; 600 regs->u_regs[UREG_I0] = orig_i0;
592 regs->tpc -= 4; 601 regs->tpc -= 4;
593 regs->tnpc -= 4; 602 regs->tnpc -= 4;
603 pt_regs_clear_syscall(regs);
594 } 604 }
595 if (restart_syscall && 605 if (restart_syscall &&
596 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 606 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
597 regs->u_regs[UREG_G1] = __NR_restart_syscall; 607 regs->u_regs[UREG_G1] = __NR_restart_syscall;
598 regs->tpc -= 4; 608 regs->tpc -= 4;
599 regs->tnpc -= 4; 609 regs->tnpc -= 4;
610 pt_regs_clear_syscall(regs);
600 } 611 }
601 612
602 /* If there's no signal to deliver, we just put the saved sigmask 613 /* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f0434513df15..4c2572773b55 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -785,8 +785,7 @@ static int find_node(unsigned long addr)
785 return -1; 785 return -1;
786} 786}
787 787
788static unsigned long long nid_range(unsigned long long start, 788u64 memblock_nid_range(u64 start, u64 end, int *nid)
789 unsigned long long end, int *nid)
790{ 789{
791 *nid = find_node(start); 790 *nid = find_node(start);
792 start += PAGE_SIZE; 791 start += PAGE_SIZE;
@@ -804,8 +803,7 @@ static unsigned long long nid_range(unsigned long long start,
804 return start; 803 return start;
805} 804}
806#else 805#else
807static unsigned long long nid_range(unsigned long long start, 806u64 memblock_nid_range(u64 start, u64 end, int *nid)
808 unsigned long long end, int *nid)
809{ 807{
810 *nid = 0; 808 *nid = 0;
811 return end; 809 return end;
@@ -822,8 +820,7 @@ static void __init allocate_node_data(int nid)
822 struct pglist_data *p; 820 struct pglist_data *p;
823 821
824#ifdef CONFIG_NEED_MULTIPLE_NODES 822#ifdef CONFIG_NEED_MULTIPLE_NODES
825 paddr = memblock_alloc_nid(sizeof(struct pglist_data), 823 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
826 SMP_CACHE_BYTES, nid, nid_range);
827 if (!paddr) { 824 if (!paddr) {
828 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 825 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
829 prom_halt(); 826 prom_halt();
@@ -843,8 +840,7 @@ static void __init allocate_node_data(int nid)
843 if (p->node_spanned_pages) { 840 if (p->node_spanned_pages) {
844 num_pages = bootmem_bootmap_pages(p->node_spanned_pages); 841 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
845 842
846 paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, 843 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
847 nid_range);
848 if (!paddr) { 844 if (!paddr) {
849 prom_printf("Cannot allocate bootmap for nid[%d]\n", 845 prom_printf("Cannot allocate bootmap for nid[%d]\n",
850 nid); 846 nid);
@@ -972,19 +968,19 @@ int of_node_to_nid(struct device_node *dp)
972 968
973static void __init add_node_ranges(void) 969static void __init add_node_ranges(void)
974{ 970{
975 int i; 971 struct memblock_region *reg;
976 972
977 for (i = 0; i < memblock.memory.cnt; i++) { 973 for_each_memblock(memory, reg) {
978 unsigned long size = memblock_size_bytes(&memblock.memory, i); 974 unsigned long size = reg->size;
979 unsigned long start, end; 975 unsigned long start, end;
980 976
981 start = memblock.memory.region[i].base; 977 start = reg->base;
982 end = start + size; 978 end = start + size;
983 while (start < end) { 979 while (start < end) {
984 unsigned long this_end; 980 unsigned long this_end;
985 int nid; 981 int nid;
986 982
987 this_end = nid_range(start, end, &nid); 983 this_end = memblock_nid_range(start, end, &nid);
988 984
989 numadbg("Adding active range nid[%d] " 985 numadbg("Adding active range nid[%d] "
990 "start[%lx] end[%lx]\n", 986 "start[%lx] end[%lx]\n",
@@ -1281,7 +1277,7 @@ static void __init bootmem_init_nonnuma(void)
1281{ 1277{
1282 unsigned long top_of_ram = memblock_end_of_DRAM(); 1278 unsigned long top_of_ram = memblock_end_of_DRAM();
1283 unsigned long total_ram = memblock_phys_mem_size(); 1279 unsigned long total_ram = memblock_phys_mem_size();
1284 unsigned int i; 1280 struct memblock_region *reg;
1285 1281
1286 numadbg("bootmem_init_nonnuma()\n"); 1282 numadbg("bootmem_init_nonnuma()\n");
1287 1283
@@ -1292,15 +1288,14 @@ static void __init bootmem_init_nonnuma(void)
1292 1288
1293 init_node_masks_nonnuma(); 1289 init_node_masks_nonnuma();
1294 1290
1295 for (i = 0; i < memblock.memory.cnt; i++) { 1291 for_each_memblock(memory, reg) {
1296 unsigned long size = memblock_size_bytes(&memblock.memory, i);
1297 unsigned long start_pfn, end_pfn; 1292 unsigned long start_pfn, end_pfn;
1298 1293
1299 if (!size) 1294 if (!reg->size)
1300 continue; 1295 continue;
1301 1296
1302 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; 1297 start_pfn = memblock_region_memory_base_pfn(reg);
1303 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); 1298 end_pfn = memblock_region_memory_end_pfn(reg);
1304 add_active_range(0, start_pfn, end_pfn); 1299 add_active_range(0, start_pfn, end_pfn);
1305 } 1300 }
1306 1301
@@ -1318,7 +1313,7 @@ static void __init reserve_range_in_node(int nid, unsigned long start,
1318 unsigned long this_end; 1313 unsigned long this_end;
1319 int n; 1314 int n;
1320 1315
1321 this_end = nid_range(start, end, &n); 1316 this_end = memblock_nid_range(start, end, &n);
1322 if (n == nid) { 1317 if (n == nid) {
1323 numadbg(" MATCH reserving range [%lx:%lx]\n", 1318 numadbg(" MATCH reserving range [%lx:%lx]\n",
1324 start, this_end); 1319 start, this_end);
@@ -1334,17 +1329,12 @@ static void __init reserve_range_in_node(int nid, unsigned long start,
1334 1329
1335static void __init trim_reserved_in_node(int nid) 1330static void __init trim_reserved_in_node(int nid)
1336{ 1331{
1337 int i; 1332 struct memblock_region *reg;
1338 1333
1339 numadbg(" trim_reserved_in_node(%d)\n", nid); 1334 numadbg(" trim_reserved_in_node(%d)\n", nid);
1340 1335
1341 for (i = 0; i < memblock.reserved.cnt; i++) { 1336 for_each_memblock(reserved, reg)
1342 unsigned long start = memblock.reserved.region[i].base; 1337 reserve_range_in_node(nid, reg->base, reg->base + reg->size);
1343 unsigned long size = memblock_size_bytes(&memblock.reserved, i);
1344 unsigned long end = start + size;
1345
1346 reserve_range_in_node(nid, start, end);
1347 }
1348} 1338}
1349 1339
1350static void __init bootmem_init_one_node(int nid) 1340static void __init bootmem_init_one_node(int nid)
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index fa6e4e219b9c..d9850c2b9bf2 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -39,7 +39,7 @@ void p1275_cmd_direct(unsigned long *args)
39 unsigned long flags; 39 unsigned long flags;
40 40
41 raw_local_save_flags(flags); 41 raw_local_save_flags(flags);
42 raw_local_irq_restore(PIL_NMI); 42 raw_local_irq_restore((unsigned long)PIL_NMI);
43 raw_spin_lock(&prom_entry_lock); 43 raw_spin_lock(&prom_entry_lock);
44 44
45 prom_world(1); 45 prom_world(1);