aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-11-06 15:26:23 -0500
committerTejun Heo <tj@kernel.org>2012-11-06 15:26:23 -0500
commit5b805f2a7675634fbdf9ac1c9b2256905ab2ea68 (patch)
treeee00d1e3d757458d66209b926d274491c6c3f61c /arch/sparc
parent1db1e31b1ee3ae126ef98f39083b5f213c7b41bf (diff)
parent201e72acb2d3821e2de9ce6091e98859c316b29a (diff)
Merge branch 'cgroup/for-3.7-fixes' into cgroup/for-3.8
This is to receive device_cgroup fixes so that further device_cgroup changes can be made in cgroup/for-3.8. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/ptrace.h13
-rw-r--r--arch/sparc/include/asm/smp_64.h2
-rw-r--r--arch/sparc/include/uapi/asm/sigcontext.h4
-rw-r--r--arch/sparc/kernel/perf_event.c15
-rw-r--r--arch/sparc/kernel/process_64.c120
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/mm/ultra.S64
7 files changed, 200 insertions, 29 deletions
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index 0c6f6b068289..da43bdc62294 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -42,7 +42,18 @@ struct global_reg_snapshot {
42 struct thread_info *thread; 42 struct thread_info *thread;
43 unsigned long pad1; 43 unsigned long pad1;
44}; 44};
45extern struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; 45
46struct global_pmu_snapshot {
47 unsigned long pcr[4];
48 unsigned long pic[4];
49};
50
51union global_cpu_snapshot {
52 struct global_reg_snapshot reg;
53 struct global_pmu_snapshot pmu;
54};
55
56extern union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
46 57
47#define force_successful_syscall_return() \ 58#define force_successful_syscall_return() \
48do { current_thread_info()->syscall_noerror = 1; \ 59do { current_thread_info()->syscall_noerror = 1; \
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index 29862a9e9065..dd3bef4b9896 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -48,6 +48,7 @@ extern void smp_fill_in_sib_core_maps(void);
48extern void cpu_play_dead(void); 48extern void cpu_play_dead(void);
49 49
50extern void smp_fetch_global_regs(void); 50extern void smp_fetch_global_regs(void);
51extern void smp_fetch_global_pmu(void);
51 52
52struct seq_file; 53struct seq_file;
53void smp_bogo(struct seq_file *); 54void smp_bogo(struct seq_file *);
@@ -65,6 +66,7 @@ extern void __cpu_die(unsigned int cpu);
65#define hard_smp_processor_id() 0 66#define hard_smp_processor_id() 0
66#define smp_fill_in_sib_core_maps() do { } while (0) 67#define smp_fill_in_sib_core_maps() do { } while (0)
67#define smp_fetch_global_regs() do { } while (0) 68#define smp_fetch_global_regs() do { } while (0)
69#define smp_fetch_global_pmu() do { } while (0)
68 70
69#endif /* !(CONFIG_SMP) */ 71#endif /* !(CONFIG_SMP) */
70 72
diff --git a/arch/sparc/include/uapi/asm/sigcontext.h b/arch/sparc/include/uapi/asm/sigcontext.h
index e69de29bb2d1..ae5704fa77ad 100644
--- a/arch/sparc/include/uapi/asm/sigcontext.h
+++ b/arch/sparc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,4 @@
1/*
2 * There isn't anything here anymore, but the file must not be empty or patch
3 * will delete it.
4 */
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e48651dace1b..885a8af74064 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -817,15 +817,17 @@ static u64 nop_for_index(int idx)
817 817
818static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) 818static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
819{ 819{
820 u64 val, mask = mask_for_index(idx); 820 u64 enc, val, mask = mask_for_index(idx);
821 int pcr_index = 0; 821 int pcr_index = 0;
822 822
823 if (sparc_pmu->num_pcrs > 1) 823 if (sparc_pmu->num_pcrs > 1)
824 pcr_index = idx; 824 pcr_index = idx;
825 825
826 enc = perf_event_get_enc(cpuc->events[idx]);
827
826 val = cpuc->pcr[pcr_index]; 828 val = cpuc->pcr[pcr_index];
827 val &= ~mask; 829 val &= ~mask;
828 val |= hwc->config; 830 val |= event_encoding(enc, idx);
829 cpuc->pcr[pcr_index] = val; 831 cpuc->pcr[pcr_index] = val;
830 832
831 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); 833 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
@@ -1738,8 +1740,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1738{ 1740{
1739 unsigned long ufp; 1741 unsigned long ufp;
1740 1742
1741 perf_callchain_store(entry, regs->tpc);
1742
1743 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1743 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1744 do { 1744 do {
1745 struct sparc_stackf *usf, sf; 1745 struct sparc_stackf *usf, sf;
@@ -1760,8 +1760,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1760{ 1760{
1761 unsigned long ufp; 1761 unsigned long ufp;
1762 1762
1763 perf_callchain_store(entry, regs->tpc);
1764
1765 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1763 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1766 do { 1764 do {
1767 struct sparc_stackf32 *usf, sf; 1765 struct sparc_stackf32 *usf, sf;
@@ -1780,6 +1778,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1780void 1778void
1781perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 1779perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1782{ 1780{
1781 perf_callchain_store(entry, regs->tpc);
1782
1783 if (!current->mm)
1784 return;
1785
1783 flushw_user(); 1786 flushw_user();
1784 if (test_thread_flag(TIF_32BIT)) 1787 if (test_thread_flag(TIF_32BIT))
1785 perf_callchain_user_32(entry, regs); 1788 perf_callchain_user_32(entry, regs);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index fcaa59421126..d778248ef3f8 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -27,6 +27,7 @@
27#include <linux/tick.h> 27#include <linux/tick.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30#include <linux/perf_event.h>
30#include <linux/elfcore.h> 31#include <linux/elfcore.h>
31#include <linux/sysrq.h> 32#include <linux/sysrq.h>
32#include <linux/nmi.h> 33#include <linux/nmi.h>
@@ -47,6 +48,7 @@
47#include <asm/syscalls.h> 48#include <asm/syscalls.h>
48#include <asm/irq_regs.h> 49#include <asm/irq_regs.h>
49#include <asm/smp.h> 50#include <asm/smp.h>
51#include <asm/pcr.h>
50 52
51#include "kstack.h" 53#include "kstack.h"
52 54
@@ -204,18 +206,22 @@ void show_regs(struct pt_regs *regs)
204 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); 206 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
205} 207}
206 208
207struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; 209union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
208static DEFINE_SPINLOCK(global_reg_snapshot_lock); 210static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
209 211
210static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 212static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
211 int this_cpu) 213 int this_cpu)
212{ 214{
215 struct global_reg_snapshot *rp;
216
213 flushw_all(); 217 flushw_all();
214 218
215 global_reg_snapshot[this_cpu].tstate = regs->tstate; 219 rp = &global_cpu_snapshot[this_cpu].reg;
216 global_reg_snapshot[this_cpu].tpc = regs->tpc; 220
217 global_reg_snapshot[this_cpu].tnpc = regs->tnpc; 221 rp->tstate = regs->tstate;
218 global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; 222 rp->tpc = regs->tpc;
223 rp->tnpc = regs->tnpc;
224 rp->o7 = regs->u_regs[UREG_I7];
219 225
220 if (regs->tstate & TSTATE_PRIV) { 226 if (regs->tstate & TSTATE_PRIV) {
221 struct reg_window *rw; 227 struct reg_window *rw;
@@ -223,17 +229,17 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
223 rw = (struct reg_window *) 229 rw = (struct reg_window *)
224 (regs->u_regs[UREG_FP] + STACK_BIAS); 230 (regs->u_regs[UREG_FP] + STACK_BIAS);
225 if (kstack_valid(tp, (unsigned long) rw)) { 231 if (kstack_valid(tp, (unsigned long) rw)) {
226 global_reg_snapshot[this_cpu].i7 = rw->ins[7]; 232 rp->i7 = rw->ins[7];
227 rw = (struct reg_window *) 233 rw = (struct reg_window *)
228 (rw->ins[6] + STACK_BIAS); 234 (rw->ins[6] + STACK_BIAS);
229 if (kstack_valid(tp, (unsigned long) rw)) 235 if (kstack_valid(tp, (unsigned long) rw))
230 global_reg_snapshot[this_cpu].rpc = rw->ins[7]; 236 rp->rpc = rw->ins[7];
231 } 237 }
232 } else { 238 } else {
233 global_reg_snapshot[this_cpu].i7 = 0; 239 rp->i7 = 0;
234 global_reg_snapshot[this_cpu].rpc = 0; 240 rp->rpc = 0;
235 } 241 }
236 global_reg_snapshot[this_cpu].thread = tp; 242 rp->thread = tp;
237} 243}
238 244
239/* In order to avoid hangs we do not try to synchronize with the 245/* In order to avoid hangs we do not try to synchronize with the
@@ -261,9 +267,9 @@ void arch_trigger_all_cpu_backtrace(void)
261 if (!regs) 267 if (!regs)
262 regs = tp->kregs; 268 regs = tp->kregs;
263 269
264 spin_lock_irqsave(&global_reg_snapshot_lock, flags); 270 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
265 271
266 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); 272 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
267 273
268 this_cpu = raw_smp_processor_id(); 274 this_cpu = raw_smp_processor_id();
269 275
@@ -272,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(void)
272 smp_fetch_global_regs(); 278 smp_fetch_global_regs();
273 279
274 for_each_online_cpu(cpu) { 280 for_each_online_cpu(cpu) {
275 struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; 281 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
276 282
277 __global_reg_poll(gp); 283 __global_reg_poll(gp);
278 284
@@ -295,9 +301,9 @@ void arch_trigger_all_cpu_backtrace(void)
295 } 301 }
296 } 302 }
297 303
298 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); 304 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
299 305
300 spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); 306 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
301} 307}
302 308
303#ifdef CONFIG_MAGIC_SYSRQ 309#ifdef CONFIG_MAGIC_SYSRQ
@@ -309,16 +315,90 @@ static void sysrq_handle_globreg(int key)
309 315
310static struct sysrq_key_op sparc_globalreg_op = { 316static struct sysrq_key_op sparc_globalreg_op = {
311 .handler = sysrq_handle_globreg, 317 .handler = sysrq_handle_globreg,
312 .help_msg = "Globalregs", 318 .help_msg = "global-regs(Y)",
313 .action_msg = "Show Global CPU Regs", 319 .action_msg = "Show Global CPU Regs",
314}; 320};
315 321
316static int __init sparc_globreg_init(void) 322static void __global_pmu_self(int this_cpu)
323{
324 struct global_pmu_snapshot *pp;
325 int i, num;
326
327 pp = &global_cpu_snapshot[this_cpu].pmu;
328
329 num = 1;
330 if (tlb_type == hypervisor &&
331 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
332 num = 4;
333
334 for (i = 0; i < num; i++) {
335 pp->pcr[i] = pcr_ops->read_pcr(i);
336 pp->pic[i] = pcr_ops->read_pic(i);
337 }
338}
339
340static void __global_pmu_poll(struct global_pmu_snapshot *pp)
341{
342 int limit = 0;
343
344 while (!pp->pcr[0] && ++limit < 100) {
345 barrier();
346 udelay(1);
347 }
348}
349
350static void pmu_snapshot_all_cpus(void)
317{ 351{
318 return register_sysrq_key('y', &sparc_globalreg_op); 352 unsigned long flags;
353 int this_cpu, cpu;
354
355 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
356
357 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
358
359 this_cpu = raw_smp_processor_id();
360
361 __global_pmu_self(this_cpu);
362
363 smp_fetch_global_pmu();
364
365 for_each_online_cpu(cpu) {
366 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
367
368 __global_pmu_poll(pp);
369
370 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
371 (cpu == this_cpu ? '*' : ' '), cpu,
372 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
373 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
374 }
375
376 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
377
378 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
379}
380
381static void sysrq_handle_globpmu(int key)
382{
383 pmu_snapshot_all_cpus();
384}
385
386static struct sysrq_key_op sparc_globalpmu_op = {
387 .handler = sysrq_handle_globpmu,
388 .help_msg = "global-pmu(X)",
389 .action_msg = "Show Global PMU Regs",
390};
391
392static int __init sparc_sysrq_init(void)
393{
394 int ret = register_sysrq_key('y', &sparc_globalreg_op);
395
396 if (!ret)
397 ret = register_sysrq_key('x', &sparc_globalpmu_op);
398 return ret;
319} 399}
320 400
321core_initcall(sparc_globreg_init); 401core_initcall(sparc_sysrq_init);
322 402
323#endif 403#endif
324 404
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 781bcb10b8bd..d94b878577b7 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -852,6 +852,8 @@ extern unsigned long xcall_flush_tlb_mm;
852extern unsigned long xcall_flush_tlb_pending; 852extern unsigned long xcall_flush_tlb_pending;
853extern unsigned long xcall_flush_tlb_kernel_range; 853extern unsigned long xcall_flush_tlb_kernel_range;
854extern unsigned long xcall_fetch_glob_regs; 854extern unsigned long xcall_fetch_glob_regs;
855extern unsigned long xcall_fetch_glob_pmu;
856extern unsigned long xcall_fetch_glob_pmu_n4;
855extern unsigned long xcall_receive_signal; 857extern unsigned long xcall_receive_signal;
856extern unsigned long xcall_new_mmu_context_version; 858extern unsigned long xcall_new_mmu_context_version;
857#ifdef CONFIG_KGDB 859#ifdef CONFIG_KGDB
@@ -1000,6 +1002,15 @@ void smp_fetch_global_regs(void)
1000 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); 1002 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1001} 1003}
1002 1004
1005void smp_fetch_global_pmu(void)
1006{
1007 if (tlb_type == hypervisor &&
1008 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1009 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1010 else
1011 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1012}
1013
1003/* We know that the window frames of the user have been flushed 1014/* We know that the window frames of the user have been flushed
1004 * to the stack before we get here because all callers of us 1015 * to the stack before we get here because all callers of us
1005 * are flush_tlb_*() routines, and these run after flush_cache_*() 1016 * are flush_tlb_*() routines, and these run after flush_cache_*()
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 874162a11ceb..f8e13d421fcb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -481,8 +481,8 @@ xcall_sync_tick:
481 481
482 .globl xcall_fetch_glob_regs 482 .globl xcall_fetch_glob_regs
483xcall_fetch_glob_regs: 483xcall_fetch_glob_regs:
484 sethi %hi(global_reg_snapshot), %g1 484 sethi %hi(global_cpu_snapshot), %g1
485 or %g1, %lo(global_reg_snapshot), %g1 485 or %g1, %lo(global_cpu_snapshot), %g1
486 __GET_CPUID(%g2) 486 __GET_CPUID(%g2)
487 sllx %g2, 6, %g3 487 sllx %g2, 6, %g3
488 add %g1, %g3, %g1 488 add %g1, %g3, %g1
@@ -509,6 +509,66 @@ xcall_fetch_glob_regs:
509 stx %g3, [%g1 + GR_SNAP_THREAD] 509 stx %g3, [%g1 + GR_SNAP_THREAD]
510 retry 510 retry
511 511
512 .globl xcall_fetch_glob_pmu
513xcall_fetch_glob_pmu:
514 sethi %hi(global_cpu_snapshot), %g1
515 or %g1, %lo(global_cpu_snapshot), %g1
516 __GET_CPUID(%g2)
517 sllx %g2, 6, %g3
518 add %g1, %g3, %g1
519 rd %pic, %g7
520 stx %g7, [%g1 + (4 * 8)]
521 rd %pcr, %g7
522 stx %g7, [%g1 + (0 * 8)]
523 retry
524
525 .globl xcall_fetch_glob_pmu_n4
526xcall_fetch_glob_pmu_n4:
527 sethi %hi(global_cpu_snapshot), %g1
528 or %g1, %lo(global_cpu_snapshot), %g1
529 __GET_CPUID(%g2)
530 sllx %g2, 6, %g3
531 add %g1, %g3, %g1
532
533 ldxa [%g0] ASI_PIC, %g7
534 stx %g7, [%g1 + (4 * 8)]
535 mov 0x08, %g3
536 ldxa [%g3] ASI_PIC, %g7
537 stx %g7, [%g1 + (5 * 8)]
538 mov 0x10, %g3
539 ldxa [%g3] ASI_PIC, %g7
540 stx %g7, [%g1 + (6 * 8)]
541 mov 0x18, %g3
542 ldxa [%g3] ASI_PIC, %g7
543 stx %g7, [%g1 + (7 * 8)]
544
545 mov %o0, %g2
546 mov %o1, %g3
547 mov %o5, %g7
548
549 mov HV_FAST_VT_GET_PERFREG, %o5
550 mov 3, %o0
551 ta HV_FAST_TRAP
552 stx %o1, [%g1 + (3 * 8)]
553 mov HV_FAST_VT_GET_PERFREG, %o5
554 mov 2, %o0
555 ta HV_FAST_TRAP
556 stx %o1, [%g1 + (2 * 8)]
557 mov HV_FAST_VT_GET_PERFREG, %o5
558 mov 1, %o0
559 ta HV_FAST_TRAP
560 stx %o1, [%g1 + (1 * 8)]
561 mov HV_FAST_VT_GET_PERFREG, %o5
562 mov 0, %o0
563 ta HV_FAST_TRAP
564 stx %o1, [%g1 + (0 * 8)]
565
566 mov %g2, %o0
567 mov %g3, %o1
568 mov %g7, %o5
569
570 retry
571
512#ifdef DCACHE_ALIASING_POSSIBLE 572#ifdef DCACHE_ALIASING_POSSIBLE
513 .align 32 573 .align 32
514 .globl xcall_flush_dcache_page_cheetah 574 .globl xcall_flush_dcache_page_cheetah