aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h22
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/perf_event.h (renamed from arch/powerpc/include/asm/perf_counter.h)26
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
5 files changed, 27 insertions, 27 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e73d554538dd..abbc2aaaced5 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
135 */ 135 */
136struct irq_chip; 136struct irq_chip;
137 137
138#ifdef CONFIG_PERF_COUNTERS 138#ifdef CONFIG_PERF_EVENTS
139 139
140#ifdef CONFIG_PPC64 140#ifdef CONFIG_PPC64
141static inline unsigned long test_perf_counter_pending(void) 141static inline unsigned long test_perf_event_pending(void)
142{ 142{
143 unsigned long x; 143 unsigned long x;
144 144
145 asm volatile("lbz %0,%1(13)" 145 asm volatile("lbz %0,%1(13)"
146 : "=r" (x) 146 : "=r" (x)
147 : "i" (offsetof(struct paca_struct, perf_counter_pending))); 147 : "i" (offsetof(struct paca_struct, perf_event_pending)));
148 return x; 148 return x;
149} 149}
150 150
151static inline void set_perf_counter_pending(void) 151static inline void set_perf_event_pending(void)
152{ 152{
153 asm volatile("stb %0,%1(13)" : : 153 asm volatile("stb %0,%1(13)" : :
154 "r" (1), 154 "r" (1),
155 "i" (offsetof(struct paca_struct, perf_counter_pending))); 155 "i" (offsetof(struct paca_struct, perf_event_pending)));
156} 156}
157 157
158static inline void clear_perf_counter_pending(void) 158static inline void clear_perf_event_pending(void)
159{ 159{
160 asm volatile("stb %0,%1(13)" : : 160 asm volatile("stb %0,%1(13)" : :
161 "r" (0), 161 "r" (0),
162 "i" (offsetof(struct paca_struct, perf_counter_pending))); 162 "i" (offsetof(struct paca_struct, perf_event_pending)));
163} 163}
164#endif /* CONFIG_PPC64 */ 164#endif /* CONFIG_PPC64 */
165 165
166#else /* CONFIG_PERF_COUNTERS */ 166#else /* CONFIG_PERF_EVENTS */
167 167
168static inline unsigned long test_perf_counter_pending(void) 168static inline unsigned long test_perf_event_pending(void)
169{ 169{
170 return 0; 170 return 0;
171} 171}
172 172
173static inline void clear_perf_counter_pending(void) {} 173static inline void clear_perf_event_pending(void) {}
174#endif /* CONFIG_PERF_COUNTERS */ 174#endif /* CONFIG_PERF_EVENTS */
175 175
176#endif /* __KERNEL__ */ 176#endif /* __KERNEL__ */
177#endif /* _ASM_POWERPC_HW_IRQ_H */ 177#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b634456ea893..154f405b642f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -122,7 +122,7 @@ struct paca_struct {
122 u8 soft_enabled; /* irq soft-enable flag */ 122 u8 soft_enabled; /* irq soft-enable flag */
123 u8 hard_enabled; /* set if irqs are enabled in MSR */ 123 u8 hard_enabled; /* set if irqs are enabled in MSR */
124 u8 io_sync; /* writel() needs spin_unlock sync */ 124 u8 io_sync; /* writel() needs spin_unlock sync */
125 u8 perf_counter_pending; /* PM interrupt while soft-disabled */ 125 u8 perf_event_pending; /* PM interrupt while soft-disabled */
126 126
127 /* Stuff for accurate time accounting */ 127 /* Stuff for accurate time accounting */
128 u64 user_time; /* accumulated usermode TB ticks */ 128 u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_event.h
index 0ea0639fcf75..2499aaadaeb9 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter support - PowerPC-specific definitions. 2 * Performance event support - PowerPC-specific definitions.
3 * 3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * 5 *
@@ -12,9 +12,9 @@
12 12
13#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
14 14
15#define MAX_HWCOUNTERS 8 15#define MAX_HWEVENTS 8
16#define MAX_EVENT_ALTERNATIVES 8 16#define MAX_EVENT_ALTERNATIVES 8
17#define MAX_LIMITED_HWCOUNTERS 2 17#define MAX_LIMITED_HWEVENTS 2
18 18
19/* 19/*
20 * This struct provides the constants and functions needed to 20 * This struct provides the constants and functions needed to
@@ -22,18 +22,18 @@
22 */ 22 */
23struct power_pmu { 23struct power_pmu {
24 const char *name; 24 const char *name;
25 int n_counter; 25 int n_event;
26 int max_alternatives; 26 int max_alternatives;
27 unsigned long add_fields; 27 unsigned long add_fields;
28 unsigned long test_adder; 28 unsigned long test_adder;
29 int (*compute_mmcr)(u64 events[], int n_ev, 29 int (*compute_mmcr)(u64 events[], int n_ev,
30 unsigned int hwc[], unsigned long mmcr[]); 30 unsigned int hwc[], unsigned long mmcr[]);
31 int (*get_constraint)(u64 event, unsigned long *mskp, 31 int (*get_constraint)(u64 event_id, unsigned long *mskp,
32 unsigned long *valp); 32 unsigned long *valp);
33 int (*get_alternatives)(u64 event, unsigned int flags, 33 int (*get_alternatives)(u64 event_id, unsigned int flags,
34 u64 alt[]); 34 u64 alt[]);
35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
36 int (*limited_pmc_event)(u64 event); 36 int (*limited_pmc_event)(u64 event_id);
37 u32 flags; 37 u32 flags;
38 int n_generic; 38 int n_generic;
39 int *generic_events; 39 int *generic_events;
@@ -61,10 +61,10 @@ struct pt_regs;
61extern unsigned long perf_misc_flags(struct pt_regs *regs); 61extern unsigned long perf_misc_flags(struct pt_regs *regs);
62extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 62extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
63 63
64#define PERF_COUNTER_INDEX_OFFSET 1 64#define PERF_EVENT_INDEX_OFFSET 1
65 65
66/* 66/*
67 * Only override the default definitions in include/linux/perf_counter.h 67 * Only override the default definitions in include/linux/perf_event.h
68 * if we have hardware PMU support. 68 * if we have hardware PMU support.
69 */ 69 */
70#ifdef CONFIG_PPC_PERF_CTRS 70#ifdef CONFIG_PPC_PERF_CTRS
@@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
73 73
74/* 74/*
75 * The power_pmu.get_constraint function returns a 32/64-bit value and 75 * The power_pmu.get_constraint function returns a 32/64-bit value and
76 * a 32/64-bit mask that express the constraints between this event and 76 * a 32/64-bit mask that express the constraints between this event_id and
77 * other events. 77 * other events.
78 * 78 *
79 * The value and mask are divided up into (non-overlapping) bitfields 79 * The value and mask are divided up into (non-overlapping) bitfields
80 * of three different types: 80 * of three different types:
81 * 81 *
82 * Select field: this expresses the constraint that some set of bits 82 * Select field: this expresses the constraint that some set of bits
83 * in MMCR* needs to be set to a specific value for this event. For a 83 * in MMCR* needs to be set to a specific value for this event_id. For a
84 * select field, the mask contains 1s in every bit of the field, and 84 * select field, the mask contains 1s in every bit of the field, and
85 * the value contains a unique value for each possible setting of the 85 * the value contains a unique value for each possible setting of the
86 * MMCR* bits. The constraint checking code will ensure that two events 86 * MMCR* bits. The constraint checking code will ensure that two events
@@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
102 * possible.) For N classes, the field is N+1 bits wide, and each class 102 * possible.) For N classes, the field is N+1 bits wide, and each class
103 * is assigned one bit from the least-significant N bits. The mask has 103 * is assigned one bit from the least-significant N bits. The mask has
104 * only the most-significant bit set, and the value has only the bit 104 * only the most-significant bit set, and the value has only the bit
105 * for the event's class set. The test_adder has the least significant 105 * for the event_id's class set. The test_adder has the least significant
106 * bit set in the field. 106 * bit set in the field.
107 * 107 *
108 * If an event is not subject to the constraint expressed by a particular 108 * If an event_id is not subject to the constraint expressed by a particular
109 * field, then it will have 0 in both the mask and value for that field. 109 * field, then it will have 0 in both the mask and value for that field.
110 */ 110 */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ed24bd92fe49..c7d671a7d9a1 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1)
322SYSCALL_SPU(dup3) 322SYSCALL_SPU(dup3)
323SYSCALL_SPU(pipe2) 323SYSCALL_SPU(pipe2)
324SYSCALL(inotify_init1) 324SYSCALL(inotify_init1)
325SYSCALL_SPU(perf_counter_open) 325SYSCALL_SPU(perf_event_open)
326COMPAT_SYS_SPU(preadv) 326COMPAT_SYS_SPU(preadv)
327COMPAT_SYS_SPU(pwritev) 327COMPAT_SYS_SPU(pwritev)
328COMPAT_SYS(rt_tgsigqueueinfo) 328COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cef080bfc607..f6ca76176766 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -341,7 +341,7 @@
341#define __NR_dup3 316 341#define __NR_dup3 316
342#define __NR_pipe2 317 342#define __NR_pipe2 317
343#define __NR_inotify_init1 318 343#define __NR_inotify_init1 318
344#define __NR_perf_counter_open 319 344#define __NR_perf_event_open 319
345#define __NR_preadv 320 345#define __NR_preadv 320
346#define __NR_pwritev 321 346#define __NR_pwritev 321
347#define __NR_rt_tgsigqueueinfo 322 347#define __NR_rt_tgsigqueueinfo 322