aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-05-19 02:01:55 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-19 02:01:55 -0400
commit2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (patch)
treefa7f8400ac685fb52e96f64997c7c682fc2aa021 /arch/powerpc
parent7b39f90fabcf9e2af0cd79d0a60440d821e22b56 (diff)
parent537b60d17894b7c19a6060feae40299d7109d6e7 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: include/linux/mod_devicetable.h scripts/mod/file2alias.c
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h38
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c5
-rw-r--r--arch/powerpc/kernel/entry_64.S9
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/perf_event.c129
-rw-r--r--arch/powerpc/kernel/time.c60
-rw-r--r--arch/powerpc/kvm/44x_tlb.c2
8 files changed, 120 insertions, 130 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 9f4c9d4f5803..bd100fcf40d0 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -130,43 +130,5 @@ static inline int irqs_disabled_flags(unsigned long flags)
130 */ 130 */
131struct irq_chip; 131struct irq_chip;
132 132
133#ifdef CONFIG_PERF_EVENTS
134
135#ifdef CONFIG_PPC64
136static inline unsigned long test_perf_event_pending(void)
137{
138 unsigned long x;
139
140 asm volatile("lbz %0,%1(13)"
141 : "=r" (x)
142 : "i" (offsetof(struct paca_struct, perf_event_pending)));
143 return x;
144}
145
146static inline void set_perf_event_pending(void)
147{
148 asm volatile("stb %0,%1(13)" : :
149 "r" (1),
150 "i" (offsetof(struct paca_struct, perf_event_pending)));
151}
152
153static inline void clear_perf_event_pending(void)
154{
155 asm volatile("stb %0,%1(13)" : :
156 "r" (0),
157 "i" (offsetof(struct paca_struct, perf_event_pending)));
158}
159#endif /* CONFIG_PPC64 */
160
161#else /* CONFIG_PERF_EVENTS */
162
163static inline unsigned long test_perf_event_pending(void)
164{
165 return 0;
166}
167
168static inline void clear_perf_event_pending(void) {}
169#endif /* CONFIG_PERF_EVENTS */
170
171#endif /* __KERNEL__ */ 133#endif /* __KERNEL__ */
172#endif /* _ASM_POWERPC_HW_IRQ_H */ 134#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 957ceb7059c5..c09138d150d4 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -133,7 +133,6 @@ int main(void)
133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
136 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 136 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
138#ifdef CONFIG_PPC_MM_SLICES 137#ifdef CONFIG_PPC_MM_SLICES
139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, 138 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 59c928564a03..4ff4da2c238b 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Contains routines needed to support swiotlb for ppc. 2 * Contains routines needed to support swiotlb for ppc.
3 * 3 *
4 * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor 4 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
5 * Author: Becky Bruce
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
@@ -70,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
70 sd->max_direct_dma_addr = 0; 71 sd->max_direct_dma_addr = 0;
71 72
72 /* May need to bounce if the device can't address all of DRAM */ 73 /* May need to bounce if the device can't address all of DRAM */
73 if (dma_get_mask(dev) < lmb_end_of_DRAM()) 74 if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM())
74 set_dma_ops(dev, &swiotlb_dma_ops); 75 set_dma_ops(dev, &swiotlb_dma_ops);
75 76
76 return NOTIFY_DONE; 77 return NOTIFY_DONE;
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 07109d843787..42e9d908914a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5562: 5562:
557 TRACE_AND_RESTORE_IRQ(r5); 557 TRACE_AND_RESTORE_IRQ(r5);
558 558
559#ifdef CONFIG_PERF_EVENTS
560 /* check paca->perf_event_pending if we're enabling ints */
561 lbz r3,PACAPERFPEND(r13)
562 and. r3,r3,r5
563 beq 27f
564 bl .perf_event_do_pending
56527:
566#endif /* CONFIG_PERF_EVENTS */
567
568 /* extract EE bit and use it to restore paca->hard_enabled */ 559 /* extract EE bit and use it to restore paca->hard_enabled */
569 ld r3,_MSR(r1) 560 ld r3,_MSR(r1)
570 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ 561 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 64f6f2031c22..066bd31551d5 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,6 @@
53#include <linux/bootmem.h> 53#include <linux/bootmem.h>
54#include <linux/pci.h> 54#include <linux/pci.h>
55#include <linux/debugfs.h> 55#include <linux/debugfs.h>
56#include <linux/perf_event.h>
57 56
58#include <asm/uaccess.h> 57#include <asm/uaccess.h>
59#include <asm/system.h> 58#include <asm/system.h>
@@ -145,11 +144,6 @@ notrace void raw_local_irq_restore(unsigned long en)
145 } 144 }
146#endif /* CONFIG_PPC_STD_MMU_64 */ 145#endif /* CONFIG_PPC_STD_MMU_64 */
147 146
148 if (test_perf_event_pending()) {
149 clear_perf_event_pending();
150 perf_event_do_pending();
151 }
152
153 /* 147 /*
154 * if (get_paca()->hard_enabled) return; 148 * if (get_paca()->hard_enabled) return;
155 * But again we need to take care that gcc gets hard_enabled directly 149 * But again we need to take care that gcc gets hard_enabled directly
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 08460a2e9f41..43b83c35cf54 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -35,6 +35,9 @@ struct cpu_hw_events {
35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
38
39 unsigned int group_flag;
40 int n_txn_start;
38}; 41};
39DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 42DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
40 43
@@ -718,66 +721,6 @@ static int collect_events(struct perf_event *group, int max_count,
718 return n; 721 return n;
719} 722}
720 723
721static void event_sched_in(struct perf_event *event)
722{
723 event->state = PERF_EVENT_STATE_ACTIVE;
724 event->oncpu = smp_processor_id();
725 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
726 if (is_software_event(event))
727 event->pmu->enable(event);
728}
729
730/*
731 * Called to enable a whole group of events.
732 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
733 * Assumes the caller has disabled interrupts and has
734 * frozen the PMU with hw_perf_save_disable.
735 */
736int hw_perf_group_sched_in(struct perf_event *group_leader,
737 struct perf_cpu_context *cpuctx,
738 struct perf_event_context *ctx)
739{
740 struct cpu_hw_events *cpuhw;
741 long i, n, n0;
742 struct perf_event *sub;
743
744 if (!ppmu)
745 return 0;
746 cpuhw = &__get_cpu_var(cpu_hw_events);
747 n0 = cpuhw->n_events;
748 n = collect_events(group_leader, ppmu->n_counter - n0,
749 &cpuhw->event[n0], &cpuhw->events[n0],
750 &cpuhw->flags[n0]);
751 if (n < 0)
752 return -EAGAIN;
753 if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
754 return -EAGAIN;
755 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
756 if (i < 0)
757 return -EAGAIN;
758 cpuhw->n_events = n0 + n;
759 cpuhw->n_added += n;
760
761 /*
762 * OK, this group can go on; update event states etc.,
763 * and enable any software events
764 */
765 for (i = n0; i < n0 + n; ++i)
766 cpuhw->event[i]->hw.config = cpuhw->events[i];
767 cpuctx->active_oncpu += n;
768 n = 1;
769 event_sched_in(group_leader);
770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
771 if (sub->state != PERF_EVENT_STATE_OFF) {
772 event_sched_in(sub);
773 ++n;
774 }
775 }
776 ctx->nr_active += n;
777
778 return 1;
779}
780
781/* 724/*
782 * Add a event to the PMU. 725 * Add a event to the PMU.
783 * If all events are not already frozen, then we disable and 726 * If all events are not already frozen, then we disable and
@@ -805,12 +748,22 @@ static int power_pmu_enable(struct perf_event *event)
805 cpuhw->event[n0] = event; 748 cpuhw->event[n0] = event;
806 cpuhw->events[n0] = event->hw.config; 749 cpuhw->events[n0] = event->hw.config;
807 cpuhw->flags[n0] = event->hw.event_base; 750 cpuhw->flags[n0] = event->hw.event_base;
751
752 /*
753 * If group events scheduling transaction was started,
754 * skip the schedulability test here, it will be peformed
755 * at commit time(->commit_txn) as a whole
756 */
757 if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
758 goto nocheck;
759
808 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) 760 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
809 goto out; 761 goto out;
810 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) 762 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
811 goto out; 763 goto out;
812
813 event->hw.config = cpuhw->events[n0]; 764 event->hw.config = cpuhw->events[n0];
765
766nocheck:
814 ++cpuhw->n_events; 767 ++cpuhw->n_events;
815 ++cpuhw->n_added; 768 ++cpuhw->n_added;
816 769
@@ -896,11 +849,65 @@ static void power_pmu_unthrottle(struct perf_event *event)
896 local_irq_restore(flags); 849 local_irq_restore(flags);
897} 850}
898 851
852/*
853 * Start group events scheduling transaction
854 * Set the flag to make pmu::enable() not perform the
855 * schedulability test, it will be performed at commit time
856 */
857void power_pmu_start_txn(const struct pmu *pmu)
858{
859 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
860
861 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
862 cpuhw->n_txn_start = cpuhw->n_events;
863}
864
865/*
866 * Stop group events scheduling transaction
867 * Clear the flag and pmu::enable() will perform the
868 * schedulability test.
869 */
870void power_pmu_cancel_txn(const struct pmu *pmu)
871{
872 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
873
874 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
875}
876
877/*
878 * Commit group events scheduling transaction
879 * Perform the group schedulability test as a whole
880 * Return 0 if success
881 */
882int power_pmu_commit_txn(const struct pmu *pmu)
883{
884 struct cpu_hw_events *cpuhw;
885 long i, n;
886
887 if (!ppmu)
888 return -EAGAIN;
889 cpuhw = &__get_cpu_var(cpu_hw_events);
890 n = cpuhw->n_events;
891 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
892 return -EAGAIN;
893 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
894 if (i < 0)
895 return -EAGAIN;
896
897 for (i = cpuhw->n_txn_start; i < n; ++i)
898 cpuhw->event[i]->hw.config = cpuhw->events[i];
899
900 return 0;
901}
902
899struct pmu power_pmu = { 903struct pmu power_pmu = {
900 .enable = power_pmu_enable, 904 .enable = power_pmu_enable,
901 .disable = power_pmu_disable, 905 .disable = power_pmu_disable,
902 .read = power_pmu_read, 906 .read = power_pmu_read,
903 .unthrottle = power_pmu_unthrottle, 907 .unthrottle = power_pmu_unthrottle,
908 .start_txn = power_pmu_start_txn,
909 .cancel_txn = power_pmu_cancel_txn,
910 .commit_txn = power_pmu_commit_txn,
904}; 911};
905 912
906/* 913/*
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1b16b9a3e49a..0441bbdadbd1 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void)
532} 532}
533#endif /* CONFIG_PPC_ISERIES */ 533#endif /* CONFIG_PPC_ISERIES */
534 534
535#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) 535#ifdef CONFIG_PERF_EVENTS
536DEFINE_PER_CPU(u8, perf_event_pending);
537 536
538void set_perf_event_pending(void) 537/*
538 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
539 */
540#ifdef CONFIG_PPC64
541static inline unsigned long test_perf_event_pending(void)
539{ 542{
540 get_cpu_var(perf_event_pending) = 1; 543 unsigned long x;
541 set_dec(1); 544
542 put_cpu_var(perf_event_pending); 545 asm volatile("lbz %0,%1(13)"
546 : "=r" (x)
547 : "i" (offsetof(struct paca_struct, perf_event_pending)));
548 return x;
543} 549}
544 550
551static inline void set_perf_event_pending_flag(void)
552{
553 asm volatile("stb %0,%1(13)" : :
554 "r" (1),
555 "i" (offsetof(struct paca_struct, perf_event_pending)));
556}
557
558static inline void clear_perf_event_pending(void)
559{
560 asm volatile("stb %0,%1(13)" : :
561 "r" (0),
562 "i" (offsetof(struct paca_struct, perf_event_pending)));
563}
564
565#else /* 32-bit */
566
567DEFINE_PER_CPU(u8, perf_event_pending);
568
569#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
545#define test_perf_event_pending() __get_cpu_var(perf_event_pending) 570#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
546#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 571#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
547 572
548#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 573#endif /* 32 vs 64 bit */
574
575void set_perf_event_pending(void)
576{
577 preempt_disable();
578 set_perf_event_pending_flag();
579 set_dec(1);
580 preempt_enable();
581}
582
583#else /* CONFIG_PERF_EVENTS */
549 584
550#define test_perf_event_pending() 0 585#define test_perf_event_pending() 0
551#define clear_perf_event_pending() 586#define clear_perf_event_pending()
552 587
553#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 588#endif /* CONFIG_PERF_EVENTS */
554 589
555/* 590/*
556 * For iSeries shared processors, we have to let the hypervisor 591 * For iSeries shared processors, we have to let the hypervisor
@@ -582,10 +617,6 @@ void timer_interrupt(struct pt_regs * regs)
582 set_dec(DECREMENTER_MAX); 617 set_dec(DECREMENTER_MAX);
583 618
584#ifdef CONFIG_PPC32 619#ifdef CONFIG_PPC32
585 if (test_perf_event_pending()) {
586 clear_perf_event_pending();
587 perf_event_do_pending();
588 }
589 if (atomic_read(&ppc_n_lost_interrupts) != 0) 620 if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 do_IRQ(regs); 621 do_IRQ(regs);
591#endif 622#endif
@@ -604,6 +635,11 @@ void timer_interrupt(struct pt_regs * regs)
604 635
605 calculate_steal_time(); 636 calculate_steal_time();
606 637
638 if (test_perf_event_pending()) {
639 clear_perf_event_pending();
640 perf_event_do_pending();
641 }
642
607#ifdef CONFIG_PPC_ISERIES 643#ifdef CONFIG_PPC_ISERIES
608 if (firmware_has_feature(FW_FEATURE_ISERIES)) 644 if (firmware_has_feature(FW_FEATURE_ISERIES))
609 get_lppaca()->int_dword.fields.decr_int = 0; 645 get_lppaca()->int_dword.fields.decr_int = 0;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 2570fcc7665d..812312542e50 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -440,7 +440,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
440 unsigned int gtlb_index; 440 unsigned int gtlb_index;
441 441
442 gtlb_index = kvmppc_get_gpr(vcpu, ra); 442 gtlb_index = kvmppc_get_gpr(vcpu, ra);
443 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { 443 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
444 printk("%s: index %d\n", __func__, gtlb_index); 444 printk("%s: index %d\n", __func__, gtlb_index);
445 kvmppc_dump_vcpu(vcpu); 445 kvmppc_dump_vcpu(vcpu);
446 return EMULATE_FAIL; 446 return EMULATE_FAIL;