aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/central.c4
-rw-r--r--arch/sparc/kernel/irq_64.c41
-rw-r--r--arch/sparc/kernel/kstack.h4
-rw-r--r--arch/sparc/kernel/of_device_32.c2
-rw-r--r--arch/sparc/kernel/pci.c7
-rw-r--r--arch/sparc/kernel/pcic.c103
-rw-r--r--arch/sparc/kernel/perf_event.c627
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/process_64.c16
-rw-r--r--arch/sparc/kernel/signal32.c10
-rw-r--r--arch/sparc/kernel/signal_32.c6
-rw-r--r--arch/sparc/kernel/signal_64.c8
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/time_32.c116
-rw-r--r--arch/sparc/kernel/tsb.S6
15 files changed, 583 insertions, 375 deletions
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index f3b5466c389c..4589ca33220f 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -99,7 +99,7 @@ static int __devinit clock_board_probe(struct of_device *op,
99 99
100 p->leds_resource.start = (unsigned long) 100 p->leds_resource.start = (unsigned long)
101 (p->clock_regs + CLOCK_CTRL); 101 (p->clock_regs + CLOCK_CTRL);
102 p->leds_resource.end = p->leds_resource.end; 102 p->leds_resource.end = p->leds_resource.start;
103 p->leds_resource.name = "leds"; 103 p->leds_resource.name = "leds";
104 104
105 p->leds_pdev.name = "sunfire-clockboard-leds"; 105 p->leds_pdev.name = "sunfire-clockboard-leds";
@@ -194,7 +194,7 @@ static int __devinit fhc_probe(struct of_device *op,
194 if (!p->central) { 194 if (!p->central) {
195 p->leds_resource.start = (unsigned long) 195 p->leds_resource.start = (unsigned long)
196 (p->pregs + FHC_PREGS_CTRL); 196 (p->pregs + FHC_PREGS_CTRL);
197 p->leds_resource.end = p->leds_resource.end; 197 p->leds_resource.end = p->leds_resource.start;
198 p->leds_resource.name = "leds"; 198 p->leds_resource.name = "leds";
199 199
200 p->leds_pdev.name = "sunfire-fhc-leds"; 200 p->leds_pdev.name = "sunfire-fhc-leds";
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 8d6882bb480a..e1cbdb94d97b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -250,12 +250,12 @@ struct irq_handler_data {
250}; 250};
251 251
252#ifdef CONFIG_SMP 252#ifdef CONFIG_SMP
253static int irq_choose_cpu(unsigned int virt_irq) 253static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
254{ 254{
255 cpumask_t mask; 255 cpumask_t mask;
256 int cpuid; 256 int cpuid;
257 257
258 cpumask_copy(&mask, irq_desc[virt_irq].affinity); 258 cpumask_copy(&mask, affinity);
259 if (cpus_equal(mask, cpu_online_map)) { 259 if (cpus_equal(mask, cpu_online_map)) {
260 cpuid = map_to_cpu(virt_irq); 260 cpuid = map_to_cpu(virt_irq);
261 } else { 261 } else {
@@ -268,10 +268,8 @@ static int irq_choose_cpu(unsigned int virt_irq)
268 return cpuid; 268 return cpuid;
269} 269}
270#else 270#else
271static int irq_choose_cpu(unsigned int virt_irq) 271#define irq_choose_cpu(virt_irq, affinity) \
272{ 272 real_hard_smp_processor_id()
273 return real_hard_smp_processor_id();
274}
275#endif 273#endif
276 274
277static void sun4u_irq_enable(unsigned int virt_irq) 275static void sun4u_irq_enable(unsigned int virt_irq)
@@ -282,7 +280,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
282 unsigned long cpuid, imap, val; 280 unsigned long cpuid, imap, val;
283 unsigned int tid; 281 unsigned int tid;
284 282
285 cpuid = irq_choose_cpu(virt_irq); 283 cpuid = irq_choose_cpu(virt_irq,
284 irq_desc[virt_irq].affinity);
286 imap = data->imap; 285 imap = data->imap;
287 286
288 tid = sun4u_compute_tid(imap, cpuid); 287 tid = sun4u_compute_tid(imap, cpuid);
@@ -299,7 +298,24 @@ static void sun4u_irq_enable(unsigned int virt_irq)
299static int sun4u_set_affinity(unsigned int virt_irq, 298static int sun4u_set_affinity(unsigned int virt_irq,
300 const struct cpumask *mask) 299 const struct cpumask *mask)
301{ 300{
302 sun4u_irq_enable(virt_irq); 301 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
302
303 if (likely(data)) {
304 unsigned long cpuid, imap, val;
305 unsigned int tid;
306
307 cpuid = irq_choose_cpu(virt_irq, mask);
308 imap = data->imap;
309
310 tid = sun4u_compute_tid(imap, cpuid);
311
312 val = upa_readq(imap);
313 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
314 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
315 val |= tid | IMAP_VALID;
316 upa_writeq(val, imap);
317 upa_writeq(ICLR_IDLE, data->iclr);
318 }
303 319
304 return 0; 320 return 0;
305} 321}
@@ -340,7 +356,8 @@ static void sun4u_irq_eoi(unsigned int virt_irq)
340static void sun4v_irq_enable(unsigned int virt_irq) 356static void sun4v_irq_enable(unsigned int virt_irq)
341{ 357{
342 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 358 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
343 unsigned long cpuid = irq_choose_cpu(virt_irq); 359 unsigned long cpuid = irq_choose_cpu(virt_irq,
360 irq_desc[virt_irq].affinity);
344 int err; 361 int err;
345 362
346 err = sun4v_intr_settarget(ino, cpuid); 363 err = sun4v_intr_settarget(ino, cpuid);
@@ -361,7 +378,7 @@ static int sun4v_set_affinity(unsigned int virt_irq,
361 const struct cpumask *mask) 378 const struct cpumask *mask)
362{ 379{
363 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 380 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
364 unsigned long cpuid = irq_choose_cpu(virt_irq); 381 unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
365 int err; 382 int err;
366 383
367 err = sun4v_intr_settarget(ino, cpuid); 384 err = sun4v_intr_settarget(ino, cpuid);
@@ -403,7 +420,7 @@ static void sun4v_virq_enable(unsigned int virt_irq)
403 unsigned long cpuid, dev_handle, dev_ino; 420 unsigned long cpuid, dev_handle, dev_ino;
404 int err; 421 int err;
405 422
406 cpuid = irq_choose_cpu(virt_irq); 423 cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
407 424
408 dev_handle = virt_irq_table[virt_irq].dev_handle; 425 dev_handle = virt_irq_table[virt_irq].dev_handle;
409 dev_ino = virt_irq_table[virt_irq].dev_ino; 426 dev_ino = virt_irq_table[virt_irq].dev_ino;
@@ -433,7 +450,7 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
433 unsigned long cpuid, dev_handle, dev_ino; 450 unsigned long cpuid, dev_handle, dev_ino;
434 int err; 451 int err;
435 452
436 cpuid = irq_choose_cpu(virt_irq); 453 cpuid = irq_choose_cpu(virt_irq, mask);
437 454
438 dev_handle = virt_irq_table[virt_irq].dev_handle; 455 dev_handle = virt_irq_table[virt_irq].dev_handle;
439 dev_ino = virt_irq_table[virt_irq].dev_ino; 456 dev_ino = virt_irq_table[virt_irq].dev_ino;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 4248d969272f..5247283d1c03 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
11{ 11{
12 unsigned long base = (unsigned long) tp; 12 unsigned long base = (unsigned long) tp;
13 13
14 /* Stack pointer must be 16-byte aligned. */
15 if (sp & (16UL - 1))
16 return false;
17
14 if (sp >= (base + sizeof(struct thread_info)) && 18 if (sp >= (base + sizeof(struct thread_info)) &&
15 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) 19 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
16 return true; 20 return true;
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 09138d403c7f..da527b33ebc7 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
105 105
106static int of_bus_ambapp_match(struct device_node *np) 106static int of_bus_ambapp_match(struct device_node *np)
107{ 107{
108 return !strcmp(np->name, "ambapp"); 108 return !strcmp(np->type, "ambapp");
109} 109}
110 110
111static void of_bus_ambapp_count_cells(struct device_node *child, 111static void of_bus_ambapp_count_cells(struct device_node *child,
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 539e83f8e087..592b03d85167 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
247 struct pci_bus *bus, int devfn) 247 struct pci_bus *bus, int devfn)
248{ 248{
249 struct dev_archdata *sd; 249 struct dev_archdata *sd;
250 struct pci_slot *slot;
250 struct of_device *op; 251 struct of_device *op;
251 struct pci_dev *dev; 252 struct pci_dev *dev;
252 const char *type; 253 const char *type;
@@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
286 dev->dev.bus = &pci_bus_type; 287 dev->dev.bus = &pci_bus_type;
287 dev->devfn = devfn; 288 dev->devfn = devfn;
288 dev->multifunction = 0; /* maybe a lie? */ 289 dev->multifunction = 0; /* maybe a lie? */
290 set_pcie_port_type(dev);
291
292 list_for_each_entry(slot, &dev->bus->slots, list)
293 if (PCI_SLOT(dev->devfn) == slot->number)
294 dev->slot = slot;
289 295
290 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); 296 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
291 dev->device = of_getintprop_default(node, "device-id", 0xffff); 297 dev->device = of_getintprop_default(node, "device-id", 0xffff);
@@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
322 328
323 dev->current_state = 4; /* unknown power state */ 329 dev->current_state = 4; /* unknown power state */
324 dev->error_state = pci_channel_io_normal; 330 dev->error_state = pci_channel_io_normal;
331 dev->dma_mask = 0xffffffff;
325 332
326 if (!strcmp(node->name, "pci")) { 333 if (!strcmp(node->name, "pci")) {
327 /* a PCI-PCI bridge */ 334 /* a PCI-PCI bridge */
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 85e7037429b9..4e2724ec2bb6 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -30,6 +30,7 @@
30#include <asm/oplib.h> 30#include <asm/oplib.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32#include <asm/pcic.h> 32#include <asm/pcic.h>
33#include <asm/timex.h>
33#include <asm/timer.h> 34#include <asm/timer.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
@@ -163,8 +164,6 @@ void __iomem *pcic_regs;
163volatile int pcic_speculative; 164volatile int pcic_speculative;
164volatile int pcic_trapped; 165volatile int pcic_trapped;
165 166
166static void pci_do_gettimeofday(struct timeval *tv);
167static int pci_do_settimeofday(struct timespec *tv);
168 167
169#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) 168#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
170 169
@@ -716,19 +715,27 @@ static irqreturn_t pcic_timer_handler (int irq, void *h)
716#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ 715#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */
717#define TICK_TIMER_LIMIT ((100*1000000/4)/100) 716#define TICK_TIMER_LIMIT ((100*1000000/4)/100)
718 717
718u32 pci_gettimeoffset(void)
719{
720 /*
721 * We divide all by 100
722 * to have microsecond resolution and to avoid overflow
723 */
724 unsigned long count =
725 readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
726 count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
727 return count * 1000;
728}
729
730
719void __init pci_time_init(void) 731void __init pci_time_init(void)
720{ 732{
721 struct linux_pcic *pcic = &pcic0; 733 struct linux_pcic *pcic = &pcic0;
722 unsigned long v; 734 unsigned long v;
723 int timer_irq, irq; 735 int timer_irq, irq;
724 736
725 /* A hack until do_gettimeofday prototype is moved to arch specific headers 737 do_arch_gettimeoffset = pci_gettimeoffset;
726 and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */ 738
727 ((unsigned int *)do_gettimeofday)[0] =
728 0x10800000 | ((((unsigned long)pci_do_gettimeofday -
729 (unsigned long)do_gettimeofday) >> 2) & 0x003fffff);
730 ((unsigned int *)do_gettimeofday)[1] = 0x01000000;
731 BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM);
732 btfixup(); 739 btfixup();
733 740
734 writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); 741 writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT);
@@ -746,84 +753,6 @@ void __init pci_time_init(void)
746 local_irq_enable(); 753 local_irq_enable();
747} 754}
748 755
749static inline unsigned long do_gettimeoffset(void)
750{
751 /*
752 * We divide all by 100
753 * to have microsecond resolution and to avoid overflow
754 */
755 unsigned long count =
756 readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
757 count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
758 return count;
759}
760
761static void pci_do_gettimeofday(struct timeval *tv)
762{
763 unsigned long flags;
764 unsigned long seq;
765 unsigned long usec, sec;
766 unsigned long max_ntp_tick = tick_usec - tickadj;
767
768 do {
769 seq = read_seqbegin_irqsave(&xtime_lock, flags);
770 usec = do_gettimeoffset();
771
772 /*
773 * If time_adjust is negative then NTP is slowing the clock
774 * so make sure not to go into next possible interval.
775 * Better to lose some accuracy than have time go backwards..
776 */
777 if (unlikely(time_adjust < 0))
778 usec = min(usec, max_ntp_tick);
779
780 sec = xtime.tv_sec;
781 usec += (xtime.tv_nsec / 1000);
782 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
783
784 while (usec >= 1000000) {
785 usec -= 1000000;
786 sec++;
787 }
788
789 tv->tv_sec = sec;
790 tv->tv_usec = usec;
791}
792
793static int pci_do_settimeofday(struct timespec *tv)
794{
795 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
796 return -EINVAL;
797
798 /*
799 * This is revolting. We need to set "xtime" correctly. However, the
800 * value in this location is the value at the most recent update of
801 * wall time. Discover what correction gettimeofday() would have
802 * made, and then undo it!
803 */
804 tv->tv_nsec -= 1000 * do_gettimeoffset();
805 while (tv->tv_nsec < 0) {
806 tv->tv_nsec += NSEC_PER_SEC;
807 tv->tv_sec--;
808 }
809
810 wall_to_monotonic.tv_sec += xtime.tv_sec - tv->tv_sec;
811 wall_to_monotonic.tv_nsec += xtime.tv_nsec - tv->tv_nsec;
812
813 if (wall_to_monotonic.tv_nsec > NSEC_PER_SEC) {
814 wall_to_monotonic.tv_nsec -= NSEC_PER_SEC;
815 wall_to_monotonic.tv_sec++;
816 }
817 if (wall_to_monotonic.tv_nsec < 0) {
818 wall_to_monotonic.tv_nsec += NSEC_PER_SEC;
819 wall_to_monotonic.tv_sec--;
820 }
821
822 xtime.tv_sec = tv->tv_sec;
823 xtime.tv_nsec = tv->tv_nsec;
824 ntp_clear();
825 return 0;
826}
827 756
828#if 0 757#if 0
829static void watchdog_reset() { 758static void watchdog_reset() {
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 198fb4e79ba2..e856456ec02f 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1,6 +1,6 @@
1/* Performance event support for sparc64. 1/* Performance event support for sparc64.
2 * 2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
4 * 4 *
5 * This code is based almost entirely upon the x86 perf event 5 * This code is based almost entirely upon the x86 perf event
6 * code, which is: 6 * code, which is:
@@ -18,11 +18,15 @@
18#include <linux/kdebug.h> 18#include <linux/kdebug.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20 20
21#include <asm/stacktrace.h>
21#include <asm/cpudata.h> 22#include <asm/cpudata.h>
23#include <asm/uaccess.h>
22#include <asm/atomic.h> 24#include <asm/atomic.h>
23#include <asm/nmi.h> 25#include <asm/nmi.h>
24#include <asm/pcr.h> 26#include <asm/pcr.h>
25 27
28#include "kstack.h"
29
26/* Sparc64 chips have two performance counters, 32-bits each, with 30/* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0. 31 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register. 32 * The counters are accessed in one go using a 64-bit register.
@@ -51,16 +55,49 @@
51 55
52#define PIC_UPPER_INDEX 0 56#define PIC_UPPER_INDEX 0
53#define PIC_LOWER_INDEX 1 57#define PIC_LOWER_INDEX 1
58#define PIC_NO_INDEX -1
54 59
55struct cpu_hw_events { 60struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS]; 61 /* Number of events currently scheduled onto this cpu.
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 62 * This tells how many entries in the arrays below
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 63 * are valid.
64 */
65 int n_events;
66
67 /* Number of new events added since the last hw_perf_disable().
68 * This works because the perf event layer always adds new
69 * events inside of a perf_{disable,enable}() sequence.
70 */
71 int n_added;
72
73 /* Array of events current scheduled on this cpu. */
74 struct perf_event *event[MAX_HWEVENTS];
75
76 /* Array of encoded longs, specifying the %pcr register
77 * encoding and the mask of PIC counters this even can
78 * be scheduled on. See perf_event_encode() et al.
79 */
80 unsigned long events[MAX_HWEVENTS];
81
82 /* The current counter index assigned to an event. When the
83 * event hasn't been programmed into the cpu yet, this will
84 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
85 * we ought to schedule the event.
86 */
87 int current_idx[MAX_HWEVENTS];
88
89 /* Software copy of %pcr register on this cpu. */
59 u64 pcr; 90 u64 pcr;
91
92 /* Enabled/disable state. */
60 int enabled; 93 int enabled;
61}; 94};
62DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 95DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
63 96
97/* An event map describes the characteristics of a performance
98 * counter event. In particular it gives the encoding as well as
99 * a mask telling which counters the event can be measured on.
100 */
64struct perf_event_map { 101struct perf_event_map {
65 u16 encoding; 102 u16 encoding;
66 u8 pic_mask; 103 u8 pic_mask;
@@ -69,15 +106,20 @@ struct perf_event_map {
69#define PIC_LOWER 0x02 106#define PIC_LOWER 0x02
70}; 107};
71 108
109/* Encode a perf_event_map entry into a long. */
72static unsigned long perf_event_encode(const struct perf_event_map *pmap) 110static unsigned long perf_event_encode(const struct perf_event_map *pmap)
73{ 111{
74 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; 112 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
75} 113}
76 114
77static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) 115static u8 perf_event_get_msk(unsigned long val)
78{ 116{
79 *msk = val & 0xff; 117 return val & 0xff;
80 *enc = val >> 16; 118}
119
120static u64 perf_event_get_enc(unsigned long val)
121{
122 return val >> 16;
81} 123}
82 124
83#define C(x) PERF_COUNT_HW_CACHE_##x 125#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -491,53 +533,6 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw
491 pcr_ops->write(cpuc->pcr); 533 pcr_ops->write(cpuc->pcr);
492} 534}
493 535
494void hw_perf_enable(void)
495{
496 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
497 u64 val;
498 int i;
499
500 if (cpuc->enabled)
501 return;
502
503 cpuc->enabled = 1;
504 barrier();
505
506 val = cpuc->pcr;
507
508 for (i = 0; i < MAX_HWEVENTS; i++) {
509 struct perf_event *cp = cpuc->events[i];
510 struct hw_perf_event *hwc;
511
512 if (!cp)
513 continue;
514 hwc = &cp->hw;
515 val |= hwc->config_base;
516 }
517
518 cpuc->pcr = val;
519
520 pcr_ops->write(cpuc->pcr);
521}
522
523void hw_perf_disable(void)
524{
525 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
526 u64 val;
527
528 if (!cpuc->enabled)
529 return;
530
531 cpuc->enabled = 0;
532
533 val = cpuc->pcr;
534 val &= ~(PCR_UTRACE | PCR_STRACE |
535 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
536 cpuc->pcr = val;
537
538 pcr_ops->write(cpuc->pcr);
539}
540
541static u32 read_pmc(int idx) 536static u32 read_pmc(int idx)
542{ 537{
543 u64 val; 538 u64 val;
@@ -566,6 +561,30 @@ static void write_pmc(int idx, u64 val)
566 write_pic(pic); 561 write_pic(pic);
567} 562}
568 563
564static u64 sparc_perf_event_update(struct perf_event *event,
565 struct hw_perf_event *hwc, int idx)
566{
567 int shift = 64 - 32;
568 u64 prev_raw_count, new_raw_count;
569 s64 delta;
570
571again:
572 prev_raw_count = atomic64_read(&hwc->prev_count);
573 new_raw_count = read_pmc(idx);
574
575 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
576 new_raw_count) != prev_raw_count)
577 goto again;
578
579 delta = (new_raw_count << shift) - (prev_raw_count << shift);
580 delta >>= shift;
581
582 atomic64_add(delta, &event->count);
583 atomic64_sub(delta, &hwc->period_left);
584
585 return new_raw_count;
586}
587
569static int sparc_perf_event_set_period(struct perf_event *event, 588static int sparc_perf_event_set_period(struct perf_event *event,
570 struct hw_perf_event *hwc, int idx) 589 struct hw_perf_event *hwc, int idx)
571{ 590{
@@ -598,81 +617,166 @@ static int sparc_perf_event_set_period(struct perf_event *event,
598 return ret; 617 return ret;
599} 618}
600 619
601static int sparc_pmu_enable(struct perf_event *event) 620/* If performance event entries have been added, move existing
621 * events around (if necessary) and then assign new entries to
622 * counters.
623 */
624static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
602{ 625{
603 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 626 int i;
604 struct hw_perf_event *hwc = &event->hw;
605 int idx = hwc->idx;
606 627
607 if (test_and_set_bit(idx, cpuc->used_mask)) 628 if (!cpuc->n_added)
608 return -EAGAIN; 629 goto out;
609 630
610 sparc_pmu_disable_event(cpuc, hwc, idx); 631 /* Read in the counters which are moving. */
632 for (i = 0; i < cpuc->n_events; i++) {
633 struct perf_event *cp = cpuc->event[i];
611 634
612 cpuc->events[idx] = event; 635 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
613 set_bit(idx, cpuc->active_mask); 636 cpuc->current_idx[i] != cp->hw.idx) {
637 sparc_perf_event_update(cp, &cp->hw,
638 cpuc->current_idx[i]);
639 cpuc->current_idx[i] = PIC_NO_INDEX;
640 }
641 }
614 642
615 sparc_perf_event_set_period(event, hwc, idx); 643 /* Assign to counters all unassigned events. */
616 sparc_pmu_enable_event(cpuc, hwc, idx); 644 for (i = 0; i < cpuc->n_events; i++) {
617 perf_event_update_userpage(event); 645 struct perf_event *cp = cpuc->event[i];
618 return 0; 646 struct hw_perf_event *hwc = &cp->hw;
647 int idx = hwc->idx;
648 u64 enc;
649
650 if (cpuc->current_idx[i] != PIC_NO_INDEX)
651 continue;
652
653 sparc_perf_event_set_period(cp, hwc, idx);
654 cpuc->current_idx[i] = idx;
655
656 enc = perf_event_get_enc(cpuc->events[i]);
657 pcr |= event_encoding(enc, idx);
658 }
659out:
660 return pcr;
619} 661}
620 662
621static u64 sparc_perf_event_update(struct perf_event *event, 663void hw_perf_enable(void)
622 struct hw_perf_event *hwc, int idx)
623{ 664{
624 int shift = 64 - 32; 665 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
625 u64 prev_raw_count, new_raw_count; 666 u64 pcr;
626 s64 delta;
627 667
628again: 668 if (cpuc->enabled)
629 prev_raw_count = atomic64_read(&hwc->prev_count); 669 return;
630 new_raw_count = read_pmc(idx);
631 670
632 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 671 cpuc->enabled = 1;
633 new_raw_count) != prev_raw_count) 672 barrier();
634 goto again;
635 673
636 delta = (new_raw_count << shift) - (prev_raw_count << shift); 674 pcr = cpuc->pcr;
637 delta >>= shift; 675 if (!cpuc->n_events) {
676 pcr = 0;
677 } else {
678 pcr = maybe_change_configuration(cpuc, pcr);
638 679
639 atomic64_add(delta, &event->count); 680 /* We require that all of the events have the same
640 atomic64_sub(delta, &hwc->period_left); 681 * configuration, so just fetch the settings from the
682 * first entry.
683 */
684 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
685 }
641 686
642 return new_raw_count; 687 pcr_ops->write(cpuc->pcr);
688}
689
690void hw_perf_disable(void)
691{
692 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
693 u64 val;
694
695 if (!cpuc->enabled)
696 return;
697
698 cpuc->enabled = 0;
699 cpuc->n_added = 0;
700
701 val = cpuc->pcr;
702 val &= ~(PCR_UTRACE | PCR_STRACE |
703 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
704 cpuc->pcr = val;
705
706 pcr_ops->write(cpuc->pcr);
643} 707}
644 708
645static void sparc_pmu_disable(struct perf_event *event) 709static void sparc_pmu_disable(struct perf_event *event)
646{ 710{
647 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 711 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
648 struct hw_perf_event *hwc = &event->hw; 712 struct hw_perf_event *hwc = &event->hw;
649 int idx = hwc->idx; 713 unsigned long flags;
714 int i;
650 715
651 clear_bit(idx, cpuc->active_mask); 716 local_irq_save(flags);
652 sparc_pmu_disable_event(cpuc, hwc, idx); 717 perf_disable();
718
719 for (i = 0; i < cpuc->n_events; i++) {
720 if (event == cpuc->event[i]) {
721 int idx = cpuc->current_idx[i];
722
723 /* Shift remaining entries down into
724 * the existing slot.
725 */
726 while (++i < cpuc->n_events) {
727 cpuc->event[i - 1] = cpuc->event[i];
728 cpuc->events[i - 1] = cpuc->events[i];
729 cpuc->current_idx[i - 1] =
730 cpuc->current_idx[i];
731 }
732
733 /* Absorb the final count and turn off the
734 * event.
735 */
736 sparc_pmu_disable_event(cpuc, hwc, idx);
737 barrier();
738 sparc_perf_event_update(event, hwc, idx);
653 739
654 barrier(); 740 perf_event_update_userpage(event);
655 741
656 sparc_perf_event_update(event, hwc, idx); 742 cpuc->n_events--;
657 cpuc->events[idx] = NULL; 743 break;
658 clear_bit(idx, cpuc->used_mask); 744 }
745 }
659 746
660 perf_event_update_userpage(event); 747 perf_enable();
748 local_irq_restore(flags);
749}
750
751static int active_event_index(struct cpu_hw_events *cpuc,
752 struct perf_event *event)
753{
754 int i;
755
756 for (i = 0; i < cpuc->n_events; i++) {
757 if (cpuc->event[i] == event)
758 break;
759 }
760 BUG_ON(i == cpuc->n_events);
761 return cpuc->current_idx[i];
661} 762}
662 763
663static void sparc_pmu_read(struct perf_event *event) 764static void sparc_pmu_read(struct perf_event *event)
664{ 765{
766 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
767 int idx = active_event_index(cpuc, event);
665 struct hw_perf_event *hwc = &event->hw; 768 struct hw_perf_event *hwc = &event->hw;
666 769
667 sparc_perf_event_update(event, hwc, hwc->idx); 770 sparc_perf_event_update(event, hwc, idx);
668} 771}
669 772
670static void sparc_pmu_unthrottle(struct perf_event *event) 773static void sparc_pmu_unthrottle(struct perf_event *event)
671{ 774{
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 775 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
776 int idx = active_event_index(cpuc, event);
673 struct hw_perf_event *hwc = &event->hw; 777 struct hw_perf_event *hwc = &event->hw;
674 778
675 sparc_pmu_enable_event(cpuc, hwc, hwc->idx); 779 sparc_pmu_enable_event(cpuc, hwc, idx);
676} 780}
677 781
678static atomic_t active_events = ATOMIC_INIT(0); 782static atomic_t active_events = ATOMIC_INIT(0);
@@ -750,43 +854,75 @@ static void hw_perf_event_destroy(struct perf_event *event)
750/* Make sure all events can be scheduled into the hardware at 854/* Make sure all events can be scheduled into the hardware at
751 * the same time. This is simplified by the fact that we only 855 * the same time. This is simplified by the fact that we only
752 * need to support 2 simultaneous HW events. 856 * need to support 2 simultaneous HW events.
857 *
858 * As a side effect, the evts[]->hw.idx values will be assigned
859 * on success. These are pending indexes. When the events are
860 * actually programmed into the chip, these values will propagate
861 * to the per-cpu cpuc->current_idx[] slots, see the code in
862 * maybe_change_configuration() for details.
753 */ 863 */
754static int sparc_check_constraints(unsigned long *events, int n_ev) 864static int sparc_check_constraints(struct perf_event **evts,
865 unsigned long *events, int n_ev)
755{ 866{
756 if (n_ev <= perf_max_events) { 867 u8 msk0 = 0, msk1 = 0;
757 u8 msk1, msk2; 868 int idx0 = 0;
758 u16 dummy; 869
759 870 /* This case is possible when we are invoked from
760 if (n_ev == 1) 871 * hw_perf_group_sched_in().
761 return 0; 872 */
762 BUG_ON(n_ev != 2); 873 if (!n_ev)
763 perf_event_decode(events[0], &dummy, &msk1); 874 return 0;
764 perf_event_decode(events[1], &dummy, &msk2); 875
765 876 if (n_ev > perf_max_events)
766 /* If both events can go on any counter, OK. */ 877 return -1;
767 if (msk1 == (PIC_UPPER | PIC_LOWER) && 878
768 msk2 == (PIC_UPPER | PIC_LOWER)) 879 msk0 = perf_event_get_msk(events[0]);
769 return 0; 880 if (n_ev == 1) {
770 881 if (msk0 & PIC_LOWER)
771 /* If one event is limited to a specific counter, 882 idx0 = 1;
772 * and the other can go on both, OK. 883 goto success;
773 */
774 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
775 msk2 == (PIC_UPPER | PIC_LOWER))
776 return 0;
777 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
778 msk1 == (PIC_UPPER | PIC_LOWER))
779 return 0;
780
781 /* If the events are fixed to different counters, OK. */
782 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
783 (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
784 return 0;
785
786 /* Otherwise, there is a conflict. */
787 } 884 }
885 BUG_ON(n_ev != 2);
886 msk1 = perf_event_get_msk(events[1]);
887
888 /* If both events can go on any counter, OK. */
889 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
890 msk1 == (PIC_UPPER | PIC_LOWER))
891 goto success;
788 892
893 /* If one event is limited to a specific counter,
894 * and the other can go on both, OK.
895 */
896 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
897 msk1 == (PIC_UPPER | PIC_LOWER)) {
898 if (msk0 & PIC_LOWER)
899 idx0 = 1;
900 goto success;
901 }
902
903 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
904 msk0 == (PIC_UPPER | PIC_LOWER)) {
905 if (msk1 & PIC_UPPER)
906 idx0 = 1;
907 goto success;
908 }
909
910 /* If the events are fixed to different counters, OK. */
911 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
912 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
913 if (msk0 & PIC_LOWER)
914 idx0 = 1;
915 goto success;
916 }
917
918 /* Otherwise, there is a conflict. */
789 return -1; 919 return -1;
920
921success:
922 evts[0]->hw.idx = idx0;
923 if (n_ev == 2)
924 evts[1]->hw.idx = idx0 ^ 1;
925 return 0;
790} 926}
791 927
792static int check_excludes(struct perf_event **evts, int n_prev, int n_new) 928static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
@@ -818,7 +954,8 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
818} 954}
819 955
820static int collect_events(struct perf_event *group, int max_count, 956static int collect_events(struct perf_event *group, int max_count,
821 struct perf_event *evts[], unsigned long *events) 957 struct perf_event *evts[], unsigned long *events,
958 int *current_idx)
822{ 959{
823 struct perf_event *event; 960 struct perf_event *event;
824 int n = 0; 961 int n = 0;
@@ -827,7 +964,8 @@ static int collect_events(struct perf_event *group, int max_count,
827 if (n >= max_count) 964 if (n >= max_count)
828 return -1; 965 return -1;
829 evts[n] = group; 966 evts[n] = group;
830 events[n++] = group->hw.event_base; 967 events[n] = group->hw.event_base;
968 current_idx[n++] = PIC_NO_INDEX;
831 } 969 }
832 list_for_each_entry(event, &group->sibling_list, group_entry) { 970 list_for_each_entry(event, &group->sibling_list, group_entry) {
833 if (!is_software_event(event) && 971 if (!is_software_event(event) &&
@@ -835,20 +973,100 @@ static int collect_events(struct perf_event *group, int max_count,
835 if (n >= max_count) 973 if (n >= max_count)
836 return -1; 974 return -1;
837 evts[n] = event; 975 evts[n] = event;
838 events[n++] = event->hw.event_base; 976 events[n] = event->hw.event_base;
977 current_idx[n++] = PIC_NO_INDEX;
839 } 978 }
840 } 979 }
841 return n; 980 return n;
842} 981}
843 982
983static void event_sched_in(struct perf_event *event, int cpu)
984{
985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = cpu;
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event))
989 event->pmu->enable(event);
990}
991
992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx, int cpu)
995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub;
998 int n0, n;
999
1000 if (!sparc_pmu)
1001 return 0;
1002
1003 n0 = cpuc->n_events;
1004 n = collect_events(group_leader, perf_max_events - n0,
1005 &cpuc->event[n0], &cpuc->events[n0],
1006 &cpuc->current_idx[n0]);
1007 if (n < 0)
1008 return -EAGAIN;
1009 if (check_excludes(cpuc->event, n0, n))
1010 return -EINVAL;
1011 if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
1012 return -EAGAIN;
1013 cpuc->n_events = n0 + n;
1014 cpuc->n_added += n;
1015
1016 cpuctx->active_oncpu += n;
1017 n = 1;
1018 event_sched_in(group_leader, cpu);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub, cpu);
1022 n++;
1023 }
1024 }
1025 ctx->nr_active += n;
1026
1027 return 1;
1028}
1029
1030static int sparc_pmu_enable(struct perf_event *event)
1031{
1032 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1033 int n0, ret = -EAGAIN;
1034 unsigned long flags;
1035
1036 local_irq_save(flags);
1037 perf_disable();
1038
1039 n0 = cpuc->n_events;
1040 if (n0 >= perf_max_events)
1041 goto out;
1042
1043 cpuc->event[n0] = event;
1044 cpuc->events[n0] = event->hw.event_base;
1045 cpuc->current_idx[n0] = PIC_NO_INDEX;
1046
1047 if (check_excludes(cpuc->event, n0, 1))
1048 goto out;
1049 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1050 goto out;
1051
1052 cpuc->n_events++;
1053 cpuc->n_added++;
1054
1055 ret = 0;
1056out:
1057 perf_enable();
1058 local_irq_restore(flags);
1059 return ret;
1060}
1061
844static int __hw_perf_event_init(struct perf_event *event) 1062static int __hw_perf_event_init(struct perf_event *event)
845{ 1063{
846 struct perf_event_attr *attr = &event->attr; 1064 struct perf_event_attr *attr = &event->attr;
847 struct perf_event *evts[MAX_HWEVENTS]; 1065 struct perf_event *evts[MAX_HWEVENTS];
848 struct hw_perf_event *hwc = &event->hw; 1066 struct hw_perf_event *hwc = &event->hw;
849 unsigned long events[MAX_HWEVENTS]; 1067 unsigned long events[MAX_HWEVENTS];
1068 int current_idx_dmy[MAX_HWEVENTS];
850 const struct perf_event_map *pmap; 1069 const struct perf_event_map *pmap;
851 u64 enc;
852 int n; 1070 int n;
853 1071
854 if (atomic_read(&nmi_active) < 0) 1072 if (atomic_read(&nmi_active) < 0)
@@ -865,10 +1083,7 @@ static int __hw_perf_event_init(struct perf_event *event)
865 } else 1083 } else
866 return -EOPNOTSUPP; 1084 return -EOPNOTSUPP;
867 1085
868 /* We save the enable bits in the config_base. So to 1086 /* We save the enable bits in the config_base. */
869 * turn off sampling just write 'config', and to enable
870 * things write 'config | config_base'.
871 */
872 hwc->config_base = sparc_pmu->irq_bit; 1087 hwc->config_base = sparc_pmu->irq_bit;
873 if (!attr->exclude_user) 1088 if (!attr->exclude_user)
874 hwc->config_base |= PCR_UTRACE; 1089 hwc->config_base |= PCR_UTRACE;
@@ -879,13 +1094,11 @@ static int __hw_perf_event_init(struct perf_event *event)
879 1094
880 hwc->event_base = perf_event_encode(pmap); 1095 hwc->event_base = perf_event_encode(pmap);
881 1096
882 enc = pmap->encoding;
883
884 n = 0; 1097 n = 0;
885 if (event->group_leader != event) { 1098 if (event->group_leader != event) {
886 n = collect_events(event->group_leader, 1099 n = collect_events(event->group_leader,
887 perf_max_events - 1, 1100 perf_max_events - 1,
888 evts, events); 1101 evts, events, current_idx_dmy);
889 if (n < 0) 1102 if (n < 0)
890 return -EINVAL; 1103 return -EINVAL;
891 } 1104 }
@@ -895,9 +1108,11 @@ static int __hw_perf_event_init(struct perf_event *event)
895 if (check_excludes(evts, n, 1)) 1108 if (check_excludes(evts, n, 1))
896 return -EINVAL; 1109 return -EINVAL;
897 1110
898 if (sparc_check_constraints(events, n + 1)) 1111 if (sparc_check_constraints(evts, events, n + 1))
899 return -EINVAL; 1112 return -EINVAL;
900 1113
1114 hwc->idx = PIC_NO_INDEX;
1115
901 /* Try to do all error checking before this point, as unwinding 1116 /* Try to do all error checking before this point, as unwinding
902 * state after grabbing the PMC is difficult. 1117 * state after grabbing the PMC is difficult.
903 */ 1118 */
@@ -910,15 +1125,6 @@ static int __hw_perf_event_init(struct perf_event *event)
910 atomic64_set(&hwc->period_left, hwc->sample_period); 1125 atomic64_set(&hwc->period_left, hwc->sample_period);
911 } 1126 }
912 1127
913 if (pmap->pic_mask & PIC_UPPER) {
914 hwc->idx = PIC_UPPER_INDEX;
915 enc <<= sparc_pmu->upper_shift;
916 } else {
917 hwc->idx = PIC_LOWER_INDEX;
918 enc <<= sparc_pmu->lower_shift;
919 }
920
921 hwc->config |= enc;
922 return 0; 1128 return 0;
923} 1129}
924 1130
@@ -968,7 +1174,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
968 struct perf_sample_data data; 1174 struct perf_sample_data data;
969 struct cpu_hw_events *cpuc; 1175 struct cpu_hw_events *cpuc;
970 struct pt_regs *regs; 1176 struct pt_regs *regs;
971 int idx; 1177 int i;
972 1178
973 if (!atomic_read(&active_events)) 1179 if (!atomic_read(&active_events))
974 return NOTIFY_DONE; 1180 return NOTIFY_DONE;
@@ -997,13 +1203,12 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
997 if (sparc_pmu->irq_bit) 1203 if (sparc_pmu->irq_bit)
998 pcr_ops->write(cpuc->pcr); 1204 pcr_ops->write(cpuc->pcr);
999 1205
1000 for (idx = 0; idx < MAX_HWEVENTS; idx++) { 1206 for (i = 0; i < cpuc->n_events; i++) {
1001 struct perf_event *event = cpuc->events[idx]; 1207 struct perf_event *event = cpuc->event[i];
1208 int idx = cpuc->current_idx[i];
1002 struct hw_perf_event *hwc; 1209 struct hw_perf_event *hwc;
1003 u64 val; 1210 u64 val;
1004 1211
1005 if (!test_bit(idx, cpuc->active_mask))
1006 continue;
1007 hwc = &event->hw; 1212 hwc = &event->hw;
1008 val = sparc_perf_event_update(event, hwc, idx); 1213 val = sparc_perf_event_update(event, hwc, idx);
1009 if (val & (1ULL << 31)) 1214 if (val & (1ULL << 31))
@@ -1055,10 +1260,122 @@ void __init init_hw_perf_events(void)
1055 1260
1056 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1261 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1057 1262
1058 /* All sparc64 PMUs currently have 2 events. But this simple 1263 /* All sparc64 PMUs currently have 2 events. */
1059 * driver only supports one active event at a time. 1264 perf_max_events = 2;
1060 */
1061 perf_max_events = 1;
1062 1265
1063 register_die_notifier(&perf_event_nmi_notifier); 1266 register_die_notifier(&perf_event_nmi_notifier);
1064} 1267}
1268
1269static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1270{
1271 if (entry->nr < PERF_MAX_STACK_DEPTH)
1272 entry->ip[entry->nr++] = ip;
1273}
1274
1275static void perf_callchain_kernel(struct pt_regs *regs,
1276 struct perf_callchain_entry *entry)
1277{
1278 unsigned long ksp, fp;
1279
1280 callchain_store(entry, PERF_CONTEXT_KERNEL);
1281 callchain_store(entry, regs->tpc);
1282
1283 ksp = regs->u_regs[UREG_I6];
1284 fp = ksp + STACK_BIAS;
1285 do {
1286 struct sparc_stackf *sf;
1287 struct pt_regs *regs;
1288 unsigned long pc;
1289
1290 if (!kstack_valid(current_thread_info(), fp))
1291 break;
1292
1293 sf = (struct sparc_stackf *) fp;
1294 regs = (struct pt_regs *) (sf + 1);
1295
1296 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1297 if (user_mode(regs))
1298 break;
1299 pc = regs->tpc;
1300 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1301 } else {
1302 pc = sf->callers_pc;
1303 fp = (unsigned long)sf->fp + STACK_BIAS;
1304 }
1305 callchain_store(entry, pc);
1306 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1307}
1308
1309static void perf_callchain_user_64(struct pt_regs *regs,
1310 struct perf_callchain_entry *entry)
1311{
1312 unsigned long ufp;
1313
1314 callchain_store(entry, PERF_CONTEXT_USER);
1315 callchain_store(entry, regs->tpc);
1316
1317 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1318 do {
1319 struct sparc_stackf *usf, sf;
1320 unsigned long pc;
1321
1322 usf = (struct sparc_stackf *) ufp;
1323 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1324 break;
1325
1326 pc = sf.callers_pc;
1327 ufp = (unsigned long)sf.fp + STACK_BIAS;
1328 callchain_store(entry, pc);
1329 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1330}
1331
1332static void perf_callchain_user_32(struct pt_regs *regs,
1333 struct perf_callchain_entry *entry)
1334{
1335 unsigned long ufp;
1336
1337 callchain_store(entry, PERF_CONTEXT_USER);
1338 callchain_store(entry, regs->tpc);
1339
1340 ufp = regs->u_regs[UREG_I6];
1341 do {
1342 struct sparc_stackf32 *usf, sf;
1343 unsigned long pc;
1344
1345 usf = (struct sparc_stackf32 *) ufp;
1346 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1347 break;
1348
1349 pc = sf.callers_pc;
1350 ufp = (unsigned long)sf.fp;
1351 callchain_store(entry, pc);
1352 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1353}
1354
1355/* Like powerpc we can't get PMU interrupts within the PMU handler,
1356 * so no need for seperate NMI and IRQ chains as on x86.
1357 */
1358static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1359
1360struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1361{
1362 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
1363
1364 entry->nr = 0;
1365 if (!user_mode(regs)) {
1366 stack_trace_flush();
1367 perf_callchain_kernel(regs, entry);
1368 if (current->mm)
1369 regs = task_pt_regs(current);
1370 else
1371 regs = NULL;
1372 }
1373 if (regs) {
1374 flushw_user();
1375 if (test_thread_flag(TIF_32BIT))
1376 perf_callchain_user_32(regs, entry);
1377 else
1378 perf_callchain_user_64(regs, entry);
1379 }
1380 return entry;
1381}
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 2830b415e214..c49865b30719 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
526 * Set some valid stack frames to give to the child. 526 * Set some valid stack frames to give to the child.
527 */ 527 */
528 childstack = (struct sparc_stackf __user *) 528 childstack = (struct sparc_stackf __user *)
529 (sp & ~0x7UL); 529 (sp & ~0xfUL);
530 parentstack = (struct sparc_stackf __user *) 530 parentstack = (struct sparc_stackf __user *)
531 regs->u_regs[UREG_FP]; 531 regs->u_regs[UREG_FP];
532 532
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 18d67854a1b8..cb70476bd8f5 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -365,14 +365,6 @@ void flush_thread(void)
365 struct thread_info *t = current_thread_info(); 365 struct thread_info *t = current_thread_info();
366 struct mm_struct *mm; 366 struct mm_struct *mm;
367 367
368 if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
369 clear_ti_thread_flag(t, TIF_ABI_PENDING);
370 if (test_ti_thread_flag(t, TIF_32BIT))
371 clear_ti_thread_flag(t, TIF_32BIT);
372 else
373 set_ti_thread_flag(t, TIF_32BIT);
374 }
375
376 mm = t->task->mm; 368 mm = t->task->mm;
377 if (mm) 369 if (mm)
378 tsb_context_switch(mm); 370 tsb_context_switch(mm);
@@ -406,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
406 } else 398 } else
407 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 399 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
408 400
409 /* Now 8-byte align the stack as this is mandatory in the 401 /* Now align the stack as this is mandatory in the Sparc ABI
410 * Sparc ABI due to how register windows work. This hides 402 * due to how register windows work. This hides the
411 * the restriction from thread libraries etc. -DaveM 403 * restriction from thread libraries etc.
412 */ 404 */
413 csp &= ~7UL; 405 csp &= ~15UL;
414 406
415 distance = fp - psp; 407 distance = fp - psp;
416 rval = (csp - distance); 408 rval = (csp - distance);
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ba5b09ad6666..ea22cd373c64 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
120}; 120};
121 121
122/* Align macros */ 122/* Align macros */
123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) 123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) 124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
125 125
126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
127{ 127{
@@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
420 sp = current->sas_ss_sp + current->sas_ss_size; 420 sp = current->sas_ss_sp + current->sas_ss_size;
421 } 421 }
422 422
423 sp -= framesize;
424
423 /* Always align the stack frame. This handles two cases. First, 425 /* Always align the stack frame. This handles two cases. First,
424 * sigaltstack need not be mindful of platform specific stack 426 * sigaltstack need not be mindful of platform specific stack
425 * alignment. Second, if we took this signal because the stack 427 * alignment. Second, if we took this signal because the stack
426 * is not aligned properly, we'd like to take the signal cleanly 428 * is not aligned properly, we'd like to take the signal cleanly
427 * and report that. 429 * and report that.
428 */ 430 */
429 sp &= ~7UL; 431 sp &= ~15UL;
430 432
431 return (void __user *)(sp - framesize); 433 return (void __user *) sp;
432} 434}
433 435
434static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) 436static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 7ce1a1005b1d..9882df92ba0a 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
267 sp = current->sas_ss_sp + current->sas_ss_size; 267 sp = current->sas_ss_sp + current->sas_ss_size;
268 } 268 }
269 269
270 sp -= framesize;
271
270 /* Always align the stack frame. This handles two cases. First, 272 /* Always align the stack frame. This handles two cases. First,
271 * sigaltstack need not be mindful of platform specific stack 273 * sigaltstack need not be mindful of platform specific stack
272 * alignment. Second, if we took this signal because the stack 274 * alignment. Second, if we took this signal because the stack
273 * is not aligned properly, we'd like to take the signal cleanly 275 * is not aligned properly, we'd like to take the signal cleanly
274 * and report that. 276 * and report that.
275 */ 277 */
276 sp &= ~7UL; 278 sp &= ~15UL;
277 279
278 return (void __user *)(sp - framesize); 280 return (void __user *) sp;
279} 281}
280 282
281static inline int 283static inline int
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 647afbda7ae1..9fa48c30037e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -353,7 +353,7 @@ segv:
353/* Checks if the fp is valid */ 353/* Checks if the fp is valid */
354static int invalid_frame_pointer(void __user *fp, int fplen) 354static int invalid_frame_pointer(void __user *fp, int fplen)
355{ 355{
356 if (((unsigned long) fp) & 7) 356 if (((unsigned long) fp) & 15)
357 return 1; 357 return 1;
358 return 0; 358 return 0;
359} 359}
@@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
396 sp = current->sas_ss_sp + current->sas_ss_size; 396 sp = current->sas_ss_sp + current->sas_ss_size;
397 } 397 }
398 398
399 sp -= framesize;
400
399 /* Always align the stack frame. This handles two cases. First, 401 /* Always align the stack frame. This handles two cases. First,
400 * sigaltstack need not be mindful of platform specific stack 402 * sigaltstack need not be mindful of platform specific stack
401 * alignment. Second, if we took this signal because the stack 403 * alignment. Second, if we took this signal because the stack
402 * is not aligned properly, we'd like to take the signal cleanly 404 * is not aligned properly, we'd like to take the signal cleanly
403 * and report that. 405 * and report that.
404 */ 406 */
405 sp &= ~7UL; 407 sp &= ~15UL;
406 408
407 return (void __user *)(sp - framesize); 409 return (void __user *) sp;
408} 410}
409 411
410static inline void 412static inline void
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index cfa0e19abe3b..d77f54316948 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -365,6 +365,7 @@ EXPORT_SYMBOL(get_fb_unmapped_area);
365void arch_pick_mmap_layout(struct mm_struct *mm) 365void arch_pick_mmap_layout(struct mm_struct *mm)
366{ 366{
367 unsigned long random_factor = 0UL; 367 unsigned long random_factor = 0UL;
368 unsigned long gap;
368 369
369 if (current->flags & PF_RANDOMIZE) { 370 if (current->flags & PF_RANDOMIZE) {
370 random_factor = get_random_int(); 371 random_factor = get_random_int();
@@ -379,9 +380,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
379 * Fall back to the standard layout if the personality 380 * Fall back to the standard layout if the personality
380 * bit is set, or if the expected stack growth is unlimited: 381 * bit is set, or if the expected stack growth is unlimited:
381 */ 382 */
383 gap = rlimit(RLIMIT_STACK);
382 if (!test_thread_flag(TIF_32BIT) || 384 if (!test_thread_flag(TIF_32BIT) ||
383 (current->personality & ADDR_COMPAT_LAYOUT) || 385 (current->personality & ADDR_COMPAT_LAYOUT) ||
384 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || 386 gap == RLIM_INFINITY ||
385 sysctl_legacy_va_layout) { 387 sysctl_legacy_va_layout) {
386 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 388 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
387 mm->get_unmapped_area = arch_get_unmapped_area; 389 mm->get_unmapped_area = arch_get_unmapped_area;
@@ -389,9 +391,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
389 } else { 391 } else {
390 /* We know it's 32-bit */ 392 /* We know it's 32-bit */
391 unsigned long task_size = STACK_TOP32; 393 unsigned long task_size = STACK_TOP32;
392 unsigned long gap;
393 394
394 gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
395 if (gap < 128 * 1024 * 1024) 395 if (gap < 128 * 1024 * 1024)
396 gap = 128 * 1024 * 1024; 396 gap = 128 * 1024 * 1024;
397 if (gap > (task_size / 6 * 5)) 397 if (gap > (task_size / 6 * 5))
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 5b2f595fe65b..0d4c09b15efc 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -35,6 +35,7 @@
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36 36
37#include <asm/oplib.h> 37#include <asm/oplib.h>
38#include <asm/timex.h>
38#include <asm/timer.h> 39#include <asm/timer.h>
39#include <asm/system.h> 40#include <asm/system.h>
40#include <asm/irq.h> 41#include <asm/irq.h>
@@ -51,7 +52,6 @@ DEFINE_SPINLOCK(rtc_lock);
51EXPORT_SYMBOL(rtc_lock); 52EXPORT_SYMBOL(rtc_lock);
52 53
53static int set_rtc_mmss(unsigned long); 54static int set_rtc_mmss(unsigned long);
54static int sbus_do_settimeofday(struct timespec *tv);
55 55
56unsigned long profile_pc(struct pt_regs *regs) 56unsigned long profile_pc(struct pt_regs *regs)
57{ 57{
@@ -76,6 +76,8 @@ EXPORT_SYMBOL(profile_pc);
76 76
77__volatile__ unsigned int *master_l10_counter; 77__volatile__ unsigned int *master_l10_counter;
78 78
79u32 (*do_arch_gettimeoffset)(void);
80
79/* 81/*
80 * timer_interrupt() needs to keep up the real-time clock, 82 * timer_interrupt() needs to keep up the real-time clock,
81 * as well as call the "do_timer()" routine every clocktick 83 * as well as call the "do_timer()" routine every clocktick
@@ -196,35 +198,14 @@ static int __init clock_init(void)
196{ 198{
197 return of_register_driver(&clock_driver, &of_platform_bus_type); 199 return of_register_driver(&clock_driver, &of_platform_bus_type);
198} 200}
199
200/* Must be after subsys_initcall() so that busses are probed. Must 201/* Must be after subsys_initcall() so that busses are probed. Must
201 * be before device_initcall() because things like the RTC driver 202 * be before device_initcall() because things like the RTC driver
202 * need to see the clock registers. 203 * need to see the clock registers.
203 */ 204 */
204fs_initcall(clock_init); 205fs_initcall(clock_init);
205 206
206static void __init sbus_time_init(void)
207{
208
209 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
210 btfixup();
211
212 sparc_init_timers(timer_interrupt);
213}
214
215void __init time_init(void)
216{
217#ifdef CONFIG_PCI
218 extern void pci_time_init(void);
219 if (pcic_present()) {
220 pci_time_init();
221 return;
222 }
223#endif
224 sbus_time_init();
225}
226 207
227static inline unsigned long do_gettimeoffset(void) 208u32 sbus_do_gettimeoffset(void)
228{ 209{
229 unsigned long val = *master_l10_counter; 210 unsigned long val = *master_l10_counter;
230 unsigned long usec = (val >> 10) & 0x1fffff; 211 unsigned long usec = (val >> 10) & 0x1fffff;
@@ -233,86 +214,39 @@ static inline unsigned long do_gettimeoffset(void)
233 if (val & 0x80000000) 214 if (val & 0x80000000)
234 usec += 1000000 / HZ; 215 usec += 1000000 / HZ;
235 216
236 return usec; 217 return usec * 1000;
237} 218}
238 219
239/* Ok, my cute asm atomicity trick doesn't work anymore.
240 * There are just too many variables that need to be protected
241 * now (both members of xtime, et al.)
242 */
243void do_gettimeofday(struct timeval *tv)
244{
245 unsigned long flags;
246 unsigned long seq;
247 unsigned long usec, sec;
248 unsigned long max_ntp_tick = tick_usec - tickadj;
249
250 do {
251 seq = read_seqbegin_irqsave(&xtime_lock, flags);
252 usec = do_gettimeoffset();
253
254 /*
255 * If time_adjust is negative then NTP is slowing the clock
256 * so make sure not to go into next possible interval.
257 * Better to lose some accuracy than have time go backwards..
258 */
259 if (unlikely(time_adjust < 0))
260 usec = min(usec, max_ntp_tick);
261
262 sec = xtime.tv_sec;
263 usec += (xtime.tv_nsec / 1000);
264 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
265
266 while (usec >= 1000000) {
267 usec -= 1000000;
268 sec++;
269 }
270 220
271 tv->tv_sec = sec; 221u32 arch_gettimeoffset(void)
272 tv->tv_usec = usec;
273}
274
275EXPORT_SYMBOL(do_gettimeofday);
276
277int do_settimeofday(struct timespec *tv)
278{ 222{
279 int ret; 223 if (unlikely(!do_arch_gettimeoffset))
280 224 return 0;
281 write_seqlock_irq(&xtime_lock); 225 return do_arch_gettimeoffset();
282 ret = bus_do_settimeofday(tv);
283 write_sequnlock_irq(&xtime_lock);
284 clock_was_set();
285 return ret;
286} 226}
287 227
288EXPORT_SYMBOL(do_settimeofday); 228static void __init sbus_time_init(void)
289
290static int sbus_do_settimeofday(struct timespec *tv)
291{ 229{
292 time_t wtm_sec, sec = tv->tv_sec; 230 do_arch_gettimeoffset = sbus_do_gettimeoffset;
293 long wtm_nsec, nsec = tv->tv_nsec;
294 231
295 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 232 btfixup();
296 return -EINVAL;
297
298 /*
299 * This is revolting. We need to set "xtime" correctly. However, the
300 * value in this location is the value at the most recent update of
301 * wall time. Discover what correction gettimeofday() would have
302 * made, and then undo it!
303 */
304 nsec -= 1000 * do_gettimeoffset();
305
306 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
307 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
308 233
309 set_normalized_timespec(&xtime, sec, nsec); 234 sparc_init_timers(timer_interrupt);
310 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 235}
311 236
312 ntp_clear(); 237void __init time_init(void)
313 return 0; 238{
239#ifdef CONFIG_PCI
240 extern void pci_time_init(void);
241 if (pcic_present()) {
242 pci_time_init();
243 return;
244 }
245#endif
246 sbus_time_init();
314} 247}
315 248
249
316static int set_rtc_mmss(unsigned long secs) 250static int set_rtc_mmss(unsigned long secs)
317{ 251{
318 struct rtc_device *rtc = rtc_class_open("rtc0"); 252 struct rtc_device *rtc = rtc_class_open("rtc0");
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 8c91d9b29a2f..db15d123f054 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -191,10 +191,12 @@ tsb_dtlb_load:
191 191
192tsb_itlb_load: 192tsb_itlb_load:
193 /* Executable bit must be set. */ 193 /* Executable bit must be set. */
194661: andcc %g5, _PAGE_EXEC_4U, %g0 194661: sethi %hi(_PAGE_EXEC_4U), %g4
195 .section .sun4v_1insn_patch, "ax" 195 andcc %g5, %g4, %g0
196 .section .sun4v_2insn_patch, "ax"
196 .word 661b 197 .word 661b
197 andcc %g5, _PAGE_EXEC_4V, %g0 198 andcc %g5, _PAGE_EXEC_4V, %g0
199 nop
198 .previous 200 .previous
199 201
200 be,pn %xcc, tsb_do_fault 202 be,pn %xcc, tsb_do_fault