diff options
Diffstat (limited to 'arch')
74 files changed, 1372 insertions, 972 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8a027f9e339e..825066e29c38 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -5,8 +5,9 @@ config ARM | |||
5 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 5 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
6 | select ARCH_HAVE_CUSTOM_GPIO_H | 6 | select ARCH_HAVE_CUSTOM_GPIO_H |
7 | select ARCH_WANT_IPC_PARSE_VERSION | 7 | select ARCH_WANT_IPC_PARSE_VERSION |
8 | select BUILDTIME_EXTABLE_SORT if MMU | ||
8 | select CPU_PM if (SUSPEND || CPU_IDLE) | 9 | select CPU_PM if (SUSPEND || CPU_IDLE) |
9 | select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN | 10 | select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU |
10 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) | 11 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) |
11 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP | 12 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP |
12 | select GENERIC_IRQ_PROBE | 13 | select GENERIC_IRQ_PROBE |
@@ -21,6 +22,7 @@ config ARM | |||
21 | select HAVE_AOUT | 22 | select HAVE_AOUT |
22 | select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL | 23 | select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL |
23 | select HAVE_ARCH_KGDB | 24 | select HAVE_ARCH_KGDB |
25 | select HAVE_ARCH_SECCOMP_FILTER | ||
24 | select HAVE_ARCH_TRACEHOOK | 26 | select HAVE_ARCH_TRACEHOOK |
25 | select HAVE_BPF_JIT | 27 | select HAVE_BPF_JIT |
26 | select HAVE_C_RECORDMCOUNT | 28 | select HAVE_C_RECORDMCOUNT |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c35baf102f6f..45096a1ee0a0 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -32,6 +32,7 @@ KBUILD_DEFCONFIG := versatile_defconfig | |||
32 | # defines filename extension depending memory management type. | 32 | # defines filename extension depending memory management type. |
33 | ifeq ($(CONFIG_MMU),) | 33 | ifeq ($(CONFIG_MMU),) |
34 | MMUEXT := -nommu | 34 | MMUEXT := -nommu |
35 | KBUILD_CFLAGS += $(call cc-option,-mno-unaligned-access) | ||
35 | endif | 36 | endif |
36 | 37 | ||
37 | ifeq ($(CONFIG_FRAME_POINTER),y) | 38 | ifeq ($(CONFIG_FRAME_POINTER),y) |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index aa5269984187..36ae03a3f5d1 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -70,6 +70,14 @@ struct gic_chip_data { | |||
70 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | 70 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * The GIC mapping of CPU interfaces does not necessarily match | ||
74 | * the logical CPU numbering. Let's use a mapping as returned | ||
75 | * by the GIC itself. | ||
76 | */ | ||
77 | #define NR_GIC_CPU_IF 8 | ||
78 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; | ||
79 | |||
80 | /* | ||
73 | * Supported arch specific GIC irq extension. | 81 | * Supported arch specific GIC irq extension. |
74 | * Default make them NULL. | 82 | * Default make them NULL. |
75 | */ | 83 | */ |
@@ -238,11 +246,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
238 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | 246 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
239 | u32 val, mask, bit; | 247 | u32 val, mask, bit; |
240 | 248 | ||
241 | if (cpu >= 8 || cpu >= nr_cpu_ids) | 249 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
242 | return -EINVAL; | 250 | return -EINVAL; |
243 | 251 | ||
244 | mask = 0xff << shift; | 252 | mask = 0xff << shift; |
245 | bit = 1 << (cpu_logical_map(cpu) + shift); | 253 | bit = gic_cpu_map[cpu] << shift; |
246 | 254 | ||
247 | raw_spin_lock(&irq_controller_lock); | 255 | raw_spin_lock(&irq_controller_lock); |
248 | val = readl_relaxed(reg) & ~mask; | 256 | val = readl_relaxed(reg) & ~mask; |
@@ -349,11 +357,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic) | |||
349 | u32 cpumask; | 357 | u32 cpumask; |
350 | unsigned int gic_irqs = gic->gic_irqs; | 358 | unsigned int gic_irqs = gic->gic_irqs; |
351 | void __iomem *base = gic_data_dist_base(gic); | 359 | void __iomem *base = gic_data_dist_base(gic); |
352 | u32 cpu = cpu_logical_map(smp_processor_id()); | ||
353 | |||
354 | cpumask = 1 << cpu; | ||
355 | cpumask |= cpumask << 8; | ||
356 | cpumask |= cpumask << 16; | ||
357 | 360 | ||
358 | writel_relaxed(0, base + GIC_DIST_CTRL); | 361 | writel_relaxed(0, base + GIC_DIST_CTRL); |
359 | 362 | ||
@@ -366,6 +369,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic) | |||
366 | /* | 369 | /* |
367 | * Set all global interrupts to this CPU only. | 370 | * Set all global interrupts to this CPU only. |
368 | */ | 371 | */ |
372 | cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); | ||
369 | for (i = 32; i < gic_irqs; i += 4) | 373 | for (i = 32; i < gic_irqs; i += 4) |
370 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); | 374 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
371 | 375 | ||
@@ -389,9 +393,25 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) | |||
389 | { | 393 | { |
390 | void __iomem *dist_base = gic_data_dist_base(gic); | 394 | void __iomem *dist_base = gic_data_dist_base(gic); |
391 | void __iomem *base = gic_data_cpu_base(gic); | 395 | void __iomem *base = gic_data_cpu_base(gic); |
396 | unsigned int cpu_mask, cpu = smp_processor_id(); | ||
392 | int i; | 397 | int i; |
393 | 398 | ||
394 | /* | 399 | /* |
400 | * Get what the GIC says our CPU mask is. | ||
401 | */ | ||
402 | BUG_ON(cpu >= NR_GIC_CPU_IF); | ||
403 | cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); | ||
404 | gic_cpu_map[cpu] = cpu_mask; | ||
405 | |||
406 | /* | ||
407 | * Clear our mask from the other map entries in case they're | ||
408 | * still undefined. | ||
409 | */ | ||
410 | for (i = 0; i < NR_GIC_CPU_IF; i++) | ||
411 | if (i != cpu) | ||
412 | gic_cpu_map[i] &= ~cpu_mask; | ||
413 | |||
414 | /* | ||
395 | * Deal with the banked PPI and SGI interrupts - disable all | 415 | * Deal with the banked PPI and SGI interrupts - disable all |
396 | * PPI interrupts, ensure all SGI interrupts are enabled. | 416 | * PPI interrupts, ensure all SGI interrupts are enabled. |
397 | */ | 417 | */ |
@@ -646,7 +666,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
646 | { | 666 | { |
647 | irq_hw_number_t hwirq_base; | 667 | irq_hw_number_t hwirq_base; |
648 | struct gic_chip_data *gic; | 668 | struct gic_chip_data *gic; |
649 | int gic_irqs, irq_base; | 669 | int gic_irqs, irq_base, i; |
650 | 670 | ||
651 | BUG_ON(gic_nr >= MAX_GIC_NR); | 671 | BUG_ON(gic_nr >= MAX_GIC_NR); |
652 | 672 | ||
@@ -683,6 +703,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
683 | } | 703 | } |
684 | 704 | ||
685 | /* | 705 | /* |
706 | * Initialize the CPU interface map to all CPUs. | ||
707 | * It will be refined as each CPU probes its ID. | ||
708 | */ | ||
709 | for (i = 0; i < NR_GIC_CPU_IF; i++) | ||
710 | gic_cpu_map[i] = 0xff; | ||
711 | |||
712 | /* | ||
686 | * For primary GICs, skip over SGIs. | 713 | * For primary GICs, skip over SGIs. |
687 | * For secondary GICs, skip over PPIs, too. | 714 | * For secondary GICs, skip over PPIs, too. |
688 | */ | 715 | */ |
@@ -737,7 +764,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
737 | 764 | ||
738 | /* Convert our logical CPU mask into a physical one. */ | 765 | /* Convert our logical CPU mask into a physical one. */ |
739 | for_each_cpu(cpu, mask) | 766 | for_each_cpu(cpu, mask) |
740 | map |= 1 << cpu_logical_map(cpu); | 767 | map |= gic_cpu_map[cpu]; |
741 | 768 | ||
742 | /* | 769 | /* |
743 | * Ensure that stores to Normal memory are visible to the | 770 | * Ensure that stores to Normal memory are visible to the |
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index e0d538803cc3..e4df17ca90c7 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c | |||
@@ -218,7 +218,7 @@ static void __init vic_register(void __iomem *base, unsigned int irq, | |||
218 | v->resume_sources = resume_sources; | 218 | v->resume_sources = resume_sources; |
219 | v->irq = irq; | 219 | v->irq = irq; |
220 | vic_id++; | 220 | vic_id++; |
221 | v->domain = irq_domain_add_legacy(node, fls(valid_sources), irq, 0, | 221 | v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, |
222 | &vic_irqdomain_ops, v); | 222 | &vic_irqdomain_ops, v); |
223 | } | 223 | } |
224 | 224 | ||
@@ -350,7 +350,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, | |||
350 | vic_register(base, irq_start, vic_sources, 0, node); | 350 | vic_register(base, irq_start, vic_sources, 0, node); |
351 | } | 351 | } |
352 | 352 | ||
353 | void __init __vic_init(void __iomem *base, unsigned int irq_start, | 353 | void __init __vic_init(void __iomem *base, int irq_start, |
354 | u32 vic_sources, u32 resume_sources, | 354 | u32 vic_sources, u32 resume_sources, |
355 | struct device_node *node) | 355 | struct device_node *node) |
356 | { | 356 | { |
@@ -407,7 +407,6 @@ void __init vic_init(void __iomem *base, unsigned int irq_start, | |||
407 | int __init vic_of_init(struct device_node *node, struct device_node *parent) | 407 | int __init vic_of_init(struct device_node *node, struct device_node *parent) |
408 | { | 408 | { |
409 | void __iomem *regs; | 409 | void __iomem *regs; |
410 | int irq_base; | ||
411 | 410 | ||
412 | if (WARN(parent, "non-root VICs are not supported")) | 411 | if (WARN(parent, "non-root VICs are not supported")) |
413 | return -EINVAL; | 412 | return -EINVAL; |
@@ -416,18 +415,12 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent) | |||
416 | if (WARN_ON(!regs)) | 415 | if (WARN_ON(!regs)) |
417 | return -EIO; | 416 | return -EIO; |
418 | 417 | ||
419 | irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id()); | 418 | /* |
420 | if (WARN_ON(irq_base < 0)) | 419 | * Passing -1 as first IRQ makes the simple domain allocate descriptors |
421 | goto out_unmap; | 420 | */ |
422 | 421 | __vic_init(regs, -1, ~0, ~0, node); | |
423 | __vic_init(regs, irq_base, ~0, ~0, node); | ||
424 | 422 | ||
425 | return 0; | 423 | return 0; |
426 | |||
427 | out_unmap: | ||
428 | iounmap(regs); | ||
429 | |||
430 | return -EIO; | ||
431 | } | 424 | } |
432 | #endif /* CONFIG OF */ | 425 | #endif /* CONFIG OF */ |
433 | 426 | ||
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 514e398f1a07..d3db39860b9c 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild | |||
@@ -16,7 +16,6 @@ generic-y += local64.h | |||
16 | generic-y += msgbuf.h | 16 | generic-y += msgbuf.h |
17 | generic-y += param.h | 17 | generic-y += param.h |
18 | generic-y += parport.h | 18 | generic-y += parport.h |
19 | generic-y += percpu.h | ||
20 | generic-y += poll.h | 19 | generic-y += poll.h |
21 | generic-y += resource.h | 20 | generic-y += resource.h |
22 | generic-y += sections.h | 21 | generic-y += sections.h |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 2ef95813fce0..eb87200aa4b5 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -250,6 +250,7 @@ | |||
250 | * Beware, it also clobers LR. | 250 | * Beware, it also clobers LR. |
251 | */ | 251 | */ |
252 | .macro safe_svcmode_maskall reg:req | 252 | .macro safe_svcmode_maskall reg:req |
253 | #if __LINUX_ARM_ARCH__ >= 6 | ||
253 | mrs \reg , cpsr | 254 | mrs \reg , cpsr |
254 | mov lr , \reg | 255 | mov lr , \reg |
255 | and lr , lr , #MODE_MASK | 256 | and lr , lr , #MODE_MASK |
@@ -266,6 +267,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) | |||
266 | __ERET | 267 | __ERET |
267 | 1: msr cpsr_c, \reg | 268 | 1: msr cpsr_c, \reg |
268 | 2: | 269 | 2: |
270 | #else | ||
271 | /* | ||
272 | * workaround for possibly broken pre-v6 hardware | ||
273 | * (akita, Sharp Zaurus C-1000, PXA270-based) | ||
274 | */ | ||
275 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg | ||
276 | #endif | ||
269 | .endm | 277 | .endm |
270 | 278 | ||
271 | /* | 279 | /* |
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index d797223b39d5..2744f0602550 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | struct cpuinfo_arm { | 16 | struct cpuinfo_arm { |
17 | struct cpu cpu; | 17 | struct cpu cpu; |
18 | u32 cpuid; | ||
18 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
19 | unsigned int loops_per_jiffy; | 20 | unsigned int loops_per_jiffy; |
20 | #endif | 21 | #endif |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cb47d28cbe1f..a59dcb5ab5fc 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -25,6 +25,19 @@ | |||
25 | #define CPUID_EXT_ISAR4 "c2, 4" | 25 | #define CPUID_EXT_ISAR4 "c2, 4" |
26 | #define CPUID_EXT_ISAR5 "c2, 5" | 26 | #define CPUID_EXT_ISAR5 "c2, 5" |
27 | 27 | ||
28 | #define MPIDR_SMP_BITMASK (0x3 << 30) | ||
29 | #define MPIDR_SMP_VALUE (0x2 << 30) | ||
30 | |||
31 | #define MPIDR_MT_BITMASK (0x1 << 24) | ||
32 | |||
33 | #define MPIDR_HWID_BITMASK 0xFFFFFF | ||
34 | |||
35 | #define MPIDR_LEVEL_BITS 8 | ||
36 | #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) | ||
37 | |||
38 | #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ | ||
39 | ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) | ||
40 | |||
28 | extern unsigned int processor_id; | 41 | extern unsigned int processor_id; |
29 | 42 | ||
30 | #ifdef CONFIG_CPU_CP15 | 43 | #ifdef CONFIG_CPU_CP15 |
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h index a0ada3ea4358..f2e5cad3f306 100644 --- a/arch/arm/include/asm/cti.h +++ b/arch/arm/include/asm/cti.h | |||
@@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti) | |||
146 | */ | 146 | */ |
147 | static inline void cti_unlock(struct cti *cti) | 147 | static inline void cti_unlock(struct cti *cti) |
148 | { | 148 | { |
149 | void __iomem *base = cti->base; | 149 | __raw_writel(LOCKCODE, cti->base + LOCKACCESS); |
150 | unsigned long val; | ||
151 | |||
152 | val = __raw_readl(base + LOCKSTATUS); | ||
153 | |||
154 | if (val & 1) { | ||
155 | val = LOCKCODE; | ||
156 | __raw_writel(val, base + LOCKACCESS); | ||
157 | } | ||
158 | } | 150 | } |
159 | 151 | ||
160 | /** | 152 | /** |
@@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti) | |||
166 | */ | 158 | */ |
167 | static inline void cti_lock(struct cti *cti) | 159 | static inline void cti_lock(struct cti *cti) |
168 | { | 160 | { |
169 | void __iomem *base = cti->base; | 161 | __raw_writel(~LOCKCODE, cti->base + LOCKACCESS); |
170 | unsigned long val; | ||
171 | |||
172 | val = __raw_readl(base + LOCKSTATUS); | ||
173 | |||
174 | if (!(val & 1)) { | ||
175 | val = ~LOCKCODE; | ||
176 | __raw_writel(val, base + LOCKACCESS); | ||
177 | } | ||
178 | } | 162 | } |
179 | #endif | 163 | #endif |
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index c4c87bc12231..3b2c40b5bfa2 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h | |||
@@ -102,6 +102,10 @@ | |||
102 | 102 | ||
103 | #define L2X0_ADDR_FILTER_EN 1 | 103 | #define L2X0_ADDR_FILTER_EN 1 |
104 | 104 | ||
105 | #define L2X0_CTRL_EN 1 | ||
106 | |||
107 | #define L2X0_WAY_SIZE_SHIFT 3 | ||
108 | |||
105 | #ifndef __ASSEMBLY__ | 109 | #ifndef __ASSEMBLY__ |
106 | extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); | 110 | extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); |
107 | #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) | 111 | #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) |
@@ -126,6 +130,7 @@ struct l2x0_regs { | |||
126 | unsigned long filter_end; | 130 | unsigned long filter_end; |
127 | unsigned long prefetch_ctrl; | 131 | unsigned long prefetch_ctrl; |
128 | unsigned long pwr_ctrl; | 132 | unsigned long pwr_ctrl; |
133 | unsigned long ctrl; | ||
129 | }; | 134 | }; |
130 | 135 | ||
131 | extern struct l2x0_regs l2x0_saved_regs; | 136 | extern struct l2x0_regs l2x0_saved_regs; |
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h index e14af1a1a320..2bebad36fc83 100644 --- a/arch/arm/include/asm/hardware/vic.h +++ b/arch/arm/include/asm/hardware/vic.h | |||
@@ -47,7 +47,7 @@ | |||
47 | struct device_node; | 47 | struct device_node; |
48 | struct pt_regs; | 48 | struct pt_regs; |
49 | 49 | ||
50 | void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, | 50 | void __vic_init(void __iomem *base, int irq_start, u32 vic_sources, |
51 | u32 resume_sources, struct device_node *node); | 51 | u32 resume_sources, struct device_node *node); |
52 | void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); | 52 | void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); |
53 | int vic_of_init(struct device_node *node, struct device_node *parent); | 53 | int vic_of_init(struct device_node *node, struct device_node *parent); |
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index c190bc992f0e..01169dd723f1 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h | |||
@@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg, | |||
98 | #define ARM_BASE_WCR 112 | 98 | #define ARM_BASE_WCR 112 |
99 | 99 | ||
100 | /* Accessor macros for the debug registers. */ | 100 | /* Accessor macros for the debug registers. */ |
101 | #define ARM_DBG_READ(M, OP2, VAL) do {\ | 101 | #define ARM_DBG_READ(N, M, OP2, VAL) do {\ |
102 | asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ | 102 | asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\ |
103 | } while (0) | 103 | } while (0) |
104 | 104 | ||
105 | #define ARM_DBG_WRITE(M, OP2, VAL) do {\ | 105 | #define ARM_DBG_WRITE(N, M, OP2, VAL) do {\ |
106 | asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ | 106 | asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\ |
107 | } while (0) | 107 | } while (0) |
108 | 108 | ||
109 | struct notifier_block; | 109 | struct notifier_block; |
diff --git a/arch/arm/include/asm/mach/serial_at91.h b/arch/arm/include/asm/mach/serial_at91.h deleted file mode 100644 index ea6d063923b8..000000000000 --- a/arch/arm/include/asm/mach/serial_at91.h +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/mach/serial_at91.h | ||
3 | * | ||
4 | * Based on serial_sa1100.h by Nicolas Pitre | ||
5 | * | ||
6 | * Copyright (C) 2002 ATMEL Rousset | ||
7 | * | ||
8 | * Low level machine dependent UART functions. | ||
9 | */ | ||
10 | |||
11 | struct uart_port; | ||
12 | |||
13 | /* | ||
14 | * This is a temporary structure for registering these | ||
15 | * functions; it is intended to be discarded after boot. | ||
16 | */ | ||
17 | struct atmel_port_fns { | ||
18 | void (*set_mctrl)(struct uart_port *, u_int); | ||
19 | u_int (*get_mctrl)(struct uart_port *); | ||
20 | void (*enable_ms)(struct uart_port *); | ||
21 | void (*pm)(struct uart_port *, u_int, u_int); | ||
22 | int (*set_wake)(struct uart_port *, u_int); | ||
23 | int (*open)(struct uart_port *); | ||
24 | void (*close)(struct uart_port *); | ||
25 | }; | ||
26 | |||
27 | #if defined(CONFIG_SERIAL_ATMEL) | ||
28 | void atmel_register_uart_fns(struct atmel_port_fns *fns); | ||
29 | #else | ||
30 | #define atmel_register_uart_fns(fns) do { } while (0) | ||
31 | #endif | ||
32 | |||
33 | |||
diff --git a/arch/arm/include/asm/mach/serial_sa1100.h b/arch/arm/include/asm/mach/serial_sa1100.h deleted file mode 100644 index d09064bf95a0..000000000000 --- a/arch/arm/include/asm/mach/serial_sa1100.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/mach/serial_sa1100.h | ||
3 | * | ||
4 | * Author: Nicolas Pitre | ||
5 | * | ||
6 | * Moved and changed lots, Russell King | ||
7 | * | ||
8 | * Low level machine dependent UART functions. | ||
9 | */ | ||
10 | |||
11 | struct uart_port; | ||
12 | struct uart_info; | ||
13 | |||
14 | /* | ||
15 | * This is a temporary structure for registering these | ||
16 | * functions; it is intended to be discarded after boot. | ||
17 | */ | ||
18 | struct sa1100_port_fns { | ||
19 | void (*set_mctrl)(struct uart_port *, u_int); | ||
20 | u_int (*get_mctrl)(struct uart_port *); | ||
21 | void (*pm)(struct uart_port *, u_int, u_int); | ||
22 | int (*set_wake)(struct uart_port *, u_int); | ||
23 | }; | ||
24 | |||
25 | #ifdef CONFIG_SERIAL_SA1100 | ||
26 | void sa1100_register_uart_fns(struct sa1100_port_fns *fns); | ||
27 | void sa1100_register_uart(int idx, int port); | ||
28 | #else | ||
29 | #define sa1100_register_uart_fns(fns) do { } while (0) | ||
30 | #define sa1100_register_uart(idx,port) do { } while (0) | ||
31 | #endif | ||
diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h deleted file mode 100644 index ea297ac70bc6..000000000000 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/mach/udc_pxa2xx.h | ||
3 | * | ||
4 | * This supports machine-specific differences in how the PXA2xx | ||
5 | * USB Device Controller (UDC) is wired. | ||
6 | * | ||
7 | * It is set in linux/arch/arm/mach-pxa/<machine>.c or in | ||
8 | * linux/arch/mach-ixp4xx/<machine>.c and used in | ||
9 | * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c | ||
10 | */ | ||
11 | |||
12 | struct pxa2xx_udc_mach_info { | ||
13 | int (*udc_is_connected)(void); /* do we see host? */ | ||
14 | void (*udc_command)(int cmd); | ||
15 | #define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */ | ||
16 | #define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */ | ||
17 | |||
18 | /* Boards following the design guidelines in the developer's manual, | ||
19 | * with on-chip GPIOs not Lubbock's weird hardware, can have a sane | ||
20 | * VBUS IRQ and omit the methods above. Store the GPIO number | ||
21 | * here. Note that sometimes the signals go through inverters... | ||
22 | */ | ||
23 | bool gpio_pullup_inverted; | ||
24 | int gpio_pullup; /* high == pullup activated */ | ||
25 | }; | ||
26 | |||
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 14965658a923..9f77e7804f3b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -5,18 +5,15 @@ | |||
5 | 5 | ||
6 | typedef struct { | 6 | typedef struct { |
7 | #ifdef CONFIG_CPU_HAS_ASID | 7 | #ifdef CONFIG_CPU_HAS_ASID |
8 | unsigned int id; | 8 | u64 id; |
9 | raw_spinlock_t id_lock; | ||
10 | #endif | 9 | #endif |
11 | unsigned int kvm_seq; | 10 | unsigned int vmalloc_seq; |
12 | } mm_context_t; | 11 | } mm_context_t; |
13 | 12 | ||
14 | #ifdef CONFIG_CPU_HAS_ASID | 13 | #ifdef CONFIG_CPU_HAS_ASID |
15 | #define ASID(mm) ((mm)->context.id & 255) | 14 | #define ASID_BITS 8 |
16 | 15 | #define ASID_MASK ((~0ULL) << ASID_BITS) | |
17 | /* init_mm.context.id_lock should be initialized. */ | 16 | #define ASID(mm) ((mm)->context.id & ~ASID_MASK) |
18 | #define INIT_MM_CONTEXT(name) \ | ||
19 | .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), | ||
20 | #else | 17 | #else |
21 | #define ASID(mm) (0) | 18 | #define ASID(mm) (0) |
22 | #endif | 19 | #endif |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 0306bc642c0d..e1f644bc7cc5 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -20,88 +20,12 @@ | |||
20 | #include <asm/proc-fns.h> | 20 | #include <asm/proc-fns.h> |
21 | #include <asm-generic/mm_hooks.h> | 21 | #include <asm-generic/mm_hooks.h> |
22 | 22 | ||
23 | void __check_kvm_seq(struct mm_struct *mm); | 23 | void __check_vmalloc_seq(struct mm_struct *mm); |
24 | 24 | ||
25 | #ifdef CONFIG_CPU_HAS_ASID | 25 | #ifdef CONFIG_CPU_HAS_ASID |
26 | 26 | ||
27 | /* | 27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
28 | * On ARMv6, we have the following structure in the Context ID: | 28 | #define init_new_context(tsk,mm) ({ mm->context.id = 0; }) |
29 | * | ||
30 | * 31 7 0 | ||
31 | * +-------------------------+-----------+ | ||
32 | * | process ID | ASID | | ||
33 | * +-------------------------+-----------+ | ||
34 | * | context ID | | ||
35 | * +-------------------------------------+ | ||
36 | * | ||
37 | * The ASID is used to tag entries in the CPU caches and TLBs. | ||
38 | * The context ID is used by debuggers and trace logic, and | ||
39 | * should be unique within all running processes. | ||
40 | */ | ||
41 | #define ASID_BITS 8 | ||
42 | #define ASID_MASK ((~0) << ASID_BITS) | ||
43 | #define ASID_FIRST_VERSION (1 << ASID_BITS) | ||
44 | |||
45 | extern unsigned int cpu_last_asid; | ||
46 | |||
47 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
48 | void __new_context(struct mm_struct *mm); | ||
49 | void cpu_set_reserved_ttbr0(void); | ||
50 | |||
51 | static inline void switch_new_context(struct mm_struct *mm) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | |||
55 | __new_context(mm); | ||
56 | |||
57 | local_irq_save(flags); | ||
58 | cpu_switch_mm(mm->pgd, mm); | ||
59 | local_irq_restore(flags); | ||
60 | } | ||
61 | |||
62 | static inline void check_and_switch_context(struct mm_struct *mm, | ||
63 | struct task_struct *tsk) | ||
64 | { | ||
65 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | ||
66 | __check_kvm_seq(mm); | ||
67 | |||
68 | /* | ||
69 | * Required during context switch to avoid speculative page table | ||
70 | * walking with the wrong TTBR. | ||
71 | */ | ||
72 | cpu_set_reserved_ttbr0(); | ||
73 | |||
74 | if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | ||
75 | /* | ||
76 | * The ASID is from the current generation, just switch to the | ||
77 | * new pgd. This condition is only true for calls from | ||
78 | * context_switch() and interrupts are already disabled. | ||
79 | */ | ||
80 | cpu_switch_mm(mm->pgd, mm); | ||
81 | else if (irqs_disabled()) | ||
82 | /* | ||
83 | * Defer the new ASID allocation until after the context | ||
84 | * switch critical region since __new_context() cannot be | ||
85 | * called with interrupts disabled (it sends IPIs). | ||
86 | */ | ||
87 | set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); | ||
88 | else | ||
89 | /* | ||
90 | * That is a direct call to switch_mm() or activate_mm() with | ||
91 | * interrupts enabled and a new context. | ||
92 | */ | ||
93 | switch_new_context(mm); | ||
94 | } | ||
95 | |||
96 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | ||
97 | |||
98 | #define finish_arch_post_lock_switch \ | ||
99 | finish_arch_post_lock_switch | ||
100 | static inline void finish_arch_post_lock_switch(void) | ||
101 | { | ||
102 | if (test_and_clear_thread_flag(TIF_SWITCH_MM)) | ||
103 | switch_new_context(current->mm); | ||
104 | } | ||
105 | 29 | ||
106 | #else /* !CONFIG_CPU_HAS_ASID */ | 30 | #else /* !CONFIG_CPU_HAS_ASID */ |
107 | 31 | ||
@@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void) | |||
110 | static inline void check_and_switch_context(struct mm_struct *mm, | 34 | static inline void check_and_switch_context(struct mm_struct *mm, |
111 | struct task_struct *tsk) | 35 | struct task_struct *tsk) |
112 | { | 36 | { |
113 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | 37 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
114 | __check_kvm_seq(mm); | 38 | __check_vmalloc_seq(mm); |
115 | 39 | ||
116 | if (irqs_disabled()) | 40 | if (irqs_disabled()) |
117 | /* | 41 | /* |
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void) | |||
143 | #endif /* CONFIG_CPU_HAS_ASID */ | 67 | #endif /* CONFIG_CPU_HAS_ASID */ |
144 | 68 | ||
145 | #define destroy_context(mm) do { } while(0) | 69 | #define destroy_context(mm) do { } while(0) |
70 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | ||
146 | 71 | ||
147 | /* | 72 | /* |
148 | * This is called when "tsk" is about to enter lazy TLB mode. | 73 | * This is called when "tsk" is about to enter lazy TLB mode. |
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
186 | } | 111 | } |
187 | 112 | ||
188 | #define deactivate_mm(tsk,mm) do { } while (0) | 113 | #define deactivate_mm(tsk,mm) do { } while (0) |
189 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | ||
190 | 114 | ||
191 | #endif | 115 | #endif |
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h new file mode 100644 index 000000000000..968c0a14e0a3 --- /dev/null +++ b/arch/arm/include/asm/percpu.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Calxeda, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef _ASM_ARM_PERCPU_H_ | ||
17 | #define _ASM_ARM_PERCPU_H_ | ||
18 | |||
19 | /* | ||
20 | * Same as asm-generic/percpu.h, except that we store the per cpu offset | ||
21 | * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 | ||
22 | */ | ||
23 | #if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) | ||
24 | static inline void set_my_cpu_offset(unsigned long off) | ||
25 | { | ||
26 | /* Set TPIDRPRW */ | ||
27 | asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); | ||
28 | } | ||
29 | |||
30 | static inline unsigned long __my_cpu_offset(void) | ||
31 | { | ||
32 | unsigned long off; | ||
33 | /* Read TPIDRPRW */ | ||
34 | asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); | ||
35 | return off; | ||
36 | } | ||
37 | #define __my_cpu_offset __my_cpu_offset() | ||
38 | #else | ||
39 | #define set_my_cpu_offset(x) do {} while(0) | ||
40 | |||
41 | #endif /* CONFIG_SMP */ | ||
42 | |||
43 | #include <asm-generic/percpu.h> | ||
44 | |||
45 | #endif /* _ASM_ARM_PERCPU_H_ */ | ||
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 625cd621a436..755877527cf9 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -21,4 +21,11 @@ | |||
21 | #define C(_x) PERF_COUNT_HW_CACHE_##_x | 21 | #define C(_x) PERF_COUNT_HW_CACHE_##_x |
22 | #define CACHE_OP_UNSUPPORTED 0xFFFF | 22 | #define CACHE_OP_UNSUPPORTED 0xFFFF |
23 | 23 | ||
24 | #ifdef CONFIG_HW_PERF_EVENTS | ||
25 | struct pt_regs; | ||
26 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | ||
27 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | ||
28 | #define perf_misc_flags(regs) perf_misc_flags(regs) | ||
29 | #endif | ||
30 | |||
24 | #endif /* __ARM_PERF_EVENT_H__ */ | 31 | #endif /* __ARM_PERF_EVENT_H__ */ |
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 2317a71c8f8e..f97ee02386ee 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h | |||
@@ -115,6 +115,7 @@ | |||
115 | * The PTE table pointer refers to the hardware entries; the "Linux" | 115 | * The PTE table pointer refers to the hardware entries; the "Linux" |
116 | * entries are stored 1024 bytes below. | 116 | * entries are stored 1024 bytes below. |
117 | */ | 117 | */ |
118 | #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ | ||
118 | #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) | 119 | #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) |
119 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) | 120 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) |
120 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ | 121 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ |
@@ -123,6 +124,7 @@ | |||
123 | #define L_PTE_USER (_AT(pteval_t, 1) << 8) | 124 | #define L_PTE_USER (_AT(pteval_t, 1) << 8) |
124 | #define L_PTE_XN (_AT(pteval_t, 1) << 9) | 125 | #define L_PTE_XN (_AT(pteval_t, 1) << 9) |
125 | #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ | 126 | #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ |
127 | #define L_PTE_NONE (_AT(pteval_t, 1) << 11) | ||
126 | 128 | ||
127 | /* | 129 | /* |
128 | * These are the memory types, defined to be compatible with | 130 | * These are the memory types, defined to be compatible with |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index b24903549d1c..a3f37929940a 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
@@ -67,7 +67,8 @@ | |||
67 | * These bits overlap with the hardware bits but the naming is preserved for | 67 | * These bits overlap with the hardware bits but the naming is preserved for |
68 | * consistency with the classic page table format. | 68 | * consistency with the classic page table format. |
69 | */ | 69 | */ |
70 | #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ | 70 | #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ |
71 | #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ | ||
71 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ | 72 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ |
72 | #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ | 73 | #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ |
73 | #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ | 74 | #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ |
@@ -76,6 +77,7 @@ | |||
76 | #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ | 77 | #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ |
77 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ | 78 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ |
78 | #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ | 79 | #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ |
80 | #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ | ||
79 | 81 | ||
80 | /* | 82 | /* |
81 | * To be used in assembly code with the upper page attributes. | 83 | * To be used in assembly code with the upper page attributes. |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 08c12312a1f9..9c82f988c0e3 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel; | |||
73 | 73 | ||
74 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) | 74 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
75 | 75 | ||
76 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) | 76 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) |
77 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) | 77 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) |
78 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) | 78 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) |
79 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) | 79 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
@@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel; | |||
83 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) | 83 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
84 | #define PAGE_KERNEL_EXEC pgprot_kernel | 84 | #define PAGE_KERNEL_EXEC pgprot_kernel |
85 | 85 | ||
86 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) | 86 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) |
87 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) | 87 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
88 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | 88 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) |
89 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) | 89 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
@@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |||
203 | #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) | 203 | #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) |
204 | #define pte_special(pte) (0) | 204 | #define pte_special(pte) (0) |
205 | 205 | ||
206 | #define pte_present_user(pte) \ | 206 | #define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) |
207 | ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ | ||
208 | (L_PTE_PRESENT | L_PTE_USER)) | ||
209 | 207 | ||
210 | #if __LINUX_ARM_ARCH__ < 6 | 208 | #if __LINUX_ARM_ARCH__ < 6 |
211 | static inline void __sync_icache_dcache(pte_t pteval) | 209 | static inline void __sync_icache_dcache(pte_t pteval) |
@@ -242,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |||
242 | 240 | ||
243 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 241 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
244 | { | 242 | { |
245 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; | 243 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE; |
246 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 244 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
247 | return pte; | 245 | return pte; |
248 | } | 246 | } |
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a26170dce02e..f24edad26c70 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -67,19 +67,19 @@ struct arm_pmu { | |||
67 | cpumask_t active_irqs; | 67 | cpumask_t active_irqs; |
68 | char *name; | 68 | char *name; |
69 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 69 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
70 | void (*enable)(struct hw_perf_event *evt, int idx); | 70 | void (*enable)(struct perf_event *event); |
71 | void (*disable)(struct hw_perf_event *evt, int idx); | 71 | void (*disable)(struct perf_event *event); |
72 | int (*get_event_idx)(struct pmu_hw_events *hw_events, | 72 | int (*get_event_idx)(struct pmu_hw_events *hw_events, |
73 | struct hw_perf_event *hwc); | 73 | struct perf_event *event); |
74 | int (*set_event_filter)(struct hw_perf_event *evt, | 74 | int (*set_event_filter)(struct hw_perf_event *evt, |
75 | struct perf_event_attr *attr); | 75 | struct perf_event_attr *attr); |
76 | u32 (*read_counter)(int idx); | 76 | u32 (*read_counter)(struct perf_event *event); |
77 | void (*write_counter)(int idx, u32 val); | 77 | void (*write_counter)(struct perf_event *event, u32 val); |
78 | void (*start)(void); | 78 | void (*start)(struct arm_pmu *); |
79 | void (*stop)(void); | 79 | void (*stop)(struct arm_pmu *); |
80 | void (*reset)(void *); | 80 | void (*reset)(void *); |
81 | int (*request_irq)(irq_handler_t handler); | 81 | int (*request_irq)(struct arm_pmu *, irq_handler_t handler); |
82 | void (*free_irq)(void); | 82 | void (*free_irq)(struct arm_pmu *); |
83 | int (*map_event)(struct perf_event *event); | 83 | int (*map_event)(struct perf_event *event); |
84 | int num_events; | 84 | int num_events; |
85 | atomic_t active_events; | 85 | atomic_t active_events; |
@@ -93,15 +93,11 @@ struct arm_pmu { | |||
93 | 93 | ||
94 | extern const struct dev_pm_ops armpmu_dev_pm_ops; | 94 | extern const struct dev_pm_ops armpmu_dev_pm_ops; |
95 | 95 | ||
96 | int armpmu_register(struct arm_pmu *armpmu, char *name, int type); | 96 | int armpmu_register(struct arm_pmu *armpmu, int type); |
97 | 97 | ||
98 | u64 armpmu_event_update(struct perf_event *event, | 98 | u64 armpmu_event_update(struct perf_event *event); |
99 | struct hw_perf_event *hwc, | ||
100 | int idx); | ||
101 | 99 | ||
102 | int armpmu_event_set_period(struct perf_event *event, | 100 | int armpmu_event_set_period(struct perf_event *event); |
103 | struct hw_perf_event *hwc, | ||
104 | int idx); | ||
105 | 101 | ||
106 | int armpmu_map_event(struct perf_event *event, | 102 | int armpmu_map_event(struct perf_event *event, |
107 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | 103 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], |
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index 6d65ba222db9..a219227c3e43 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); | 18 | extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); |
19 | extern void arm_dt_memblock_reserve(void); | 19 | extern void arm_dt_memblock_reserve(void); |
20 | extern void __init arm_dt_init_cpu_maps(void); | ||
20 | 21 | ||
21 | #else /* CONFIG_OF */ | 22 | #else /* CONFIG_OF */ |
22 | 23 | ||
@@ -26,6 +27,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys) | |||
26 | } | 27 | } |
27 | 28 | ||
28 | static inline void arm_dt_memblock_reserve(void) { } | 29 | static inline void arm_dt_memblock_reserve(void) { } |
30 | static inline void arm_dt_init_cpu_maps(void) { } | ||
29 | 31 | ||
30 | #endif /* CONFIG_OF */ | 32 | #endif /* CONFIG_OF */ |
31 | #endif /* ASMARM_PROM_H */ | 33 | #endif /* ASMARM_PROM_H */ |
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2e3be16c6766..d3a22bebe6ce 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h | |||
@@ -79,6 +79,7 @@ extern void cpu_die(void); | |||
79 | 79 | ||
80 | extern void arch_send_call_function_single_ipi(int cpu); | 80 | extern void arch_send_call_function_single_ipi(int cpu); |
81 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 81 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
82 | extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); | ||
82 | 83 | ||
83 | struct smp_operations { | 84 | struct smp_operations { |
84 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_SMP |
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 558d6c80aca9..aaa61b6f50ff 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -5,6 +5,9 @@ | |||
5 | #ifndef __ASMARM_SMP_PLAT_H | 5 | #ifndef __ASMARM_SMP_PLAT_H |
6 | #define __ASMARM_SMP_PLAT_H | 6 | #define __ASMARM_SMP_PLAT_H |
7 | 7 | ||
8 | #include <linux/cpumask.h> | ||
9 | #include <linux/err.h> | ||
10 | |||
8 | #include <asm/cputype.h> | 11 | #include <asm/cputype.h> |
9 | 12 | ||
10 | /* | 13 | /* |
@@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void) | |||
48 | */ | 51 | */ |
49 | extern int __cpu_logical_map[]; | 52 | extern int __cpu_logical_map[]; |
50 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] | 53 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] |
54 | /* | ||
55 | * Retrieve logical cpu index corresponding to a given MPIDR[23:0] | ||
56 | * - mpidr: MPIDR[23:0] to be used for the look-up | ||
57 | * | ||
58 | * Returns the cpu logical index or -EINVAL on look-up error | ||
59 | */ | ||
60 | static inline int get_logical_index(u32 mpidr) | ||
61 | { | ||
62 | int cpu; | ||
63 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | ||
64 | if (cpu_logical_map(cpu) == mpidr) | ||
65 | return cpu; | ||
66 | return -EINVAL; | ||
67 | } | ||
51 | 68 | ||
52 | #endif | 69 | #endif |
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 9fdded6b1089..f1d96d4e8092 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #ifndef _ASM_ARM_SYSCALL_H | 7 | #ifndef _ASM_ARM_SYSCALL_H |
8 | #define _ASM_ARM_SYSCALL_H | 8 | #define _ASM_ARM_SYSCALL_H |
9 | 9 | ||
10 | #include <linux/audit.h> /* for AUDIT_ARCH_* */ | ||
11 | #include <linux/elf.h> /* for ELF_EM */ | ||
10 | #include <linux/err.h> | 12 | #include <linux/err.h> |
11 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
12 | 14 | ||
@@ -95,4 +97,11 @@ static inline void syscall_set_arguments(struct task_struct *task, | |||
95 | memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); | 97 | memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); |
96 | } | 98 | } |
97 | 99 | ||
100 | static inline int syscall_get_arch(struct task_struct *task, | ||
101 | struct pt_regs *regs) | ||
102 | { | ||
103 | /* ARM tasks don't change audit architectures on the fly. */ | ||
104 | return AUDIT_ARCH_ARM; | ||
105 | } | ||
106 | |||
98 | #endif /* _ASM_ARM_SYSCALL_H */ | 107 | #endif /* _ASM_ARM_SYSCALL_H */ |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 8477b4c1d39f..cddda1f41f0f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -151,10 +151,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
151 | #define TIF_SYSCALL_TRACE 8 | 151 | #define TIF_SYSCALL_TRACE 8 |
152 | #define TIF_SYSCALL_AUDIT 9 | 152 | #define TIF_SYSCALL_AUDIT 9 |
153 | #define TIF_SYSCALL_TRACEPOINT 10 | 153 | #define TIF_SYSCALL_TRACEPOINT 10 |
154 | #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ | ||
154 | #define TIF_USING_IWMMXT 17 | 155 | #define TIF_USING_IWMMXT 17 |
155 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 156 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
156 | #define TIF_RESTORE_SIGMASK 20 | 157 | #define TIF_RESTORE_SIGMASK 20 |
157 | #define TIF_SECCOMP 21 | ||
158 | #define TIF_SWITCH_MM 22 /* deferred switch_mm */ | 158 | #define TIF_SWITCH_MM 22 /* deferred switch_mm */ |
159 | 159 | ||
160 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 160 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -163,11 +163,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
163 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 163 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
164 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 164 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
165 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 165 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
166 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | ||
167 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 166 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
167 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | ||
168 | 168 | ||
169 | /* Checks for any syscall work in entry-common.S */ | 169 | /* Checks for any syscall work in entry-common.S */ |
170 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) | 170 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
171 | _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) | ||
171 | 172 | ||
172 | /* | 173 | /* |
173 | * Change these and you break ASM code in entry-common.S | 174 | * Change these and you break ASM code in entry-common.S |
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index bee7f9d47f02..70f1bdeb241b 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -19,8 +19,10 @@ | |||
19 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
20 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
21 | 21 | ||
22 | #include <asm/cputype.h> | ||
22 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
23 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/smp_plat.h> | ||
24 | #include <asm/mach/arch.h> | 26 | #include <asm/mach/arch.h> |
25 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
26 | 28 | ||
@@ -61,6 +63,108 @@ void __init arm_dt_memblock_reserve(void) | |||
61 | } | 63 | } |
62 | } | 64 | } |
63 | 65 | ||
66 | /* | ||
67 | * arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree | ||
68 | * and builds the cpu logical map array containing MPIDR values related to | ||
69 | * logical cpus | ||
70 | * | ||
71 | * Updates the cpu possible mask with the number of parsed cpu nodes | ||
72 | */ | ||
73 | void __init arm_dt_init_cpu_maps(void) | ||
74 | { | ||
75 | /* | ||
76 | * Temp logical map is initialized with UINT_MAX values that are | ||
77 | * considered invalid logical map entries since the logical map must | ||
78 | * contain a list of MPIDR[23:0] values where MPIDR[31:24] must | ||
79 | * read as 0. | ||
80 | */ | ||
81 | struct device_node *cpu, *cpus; | ||
82 | u32 i, j, cpuidx = 1; | ||
83 | u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; | ||
84 | |||
85 | u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; | ||
86 | bool bootcpu_valid = false; | ||
87 | cpus = of_find_node_by_path("/cpus"); | ||
88 | |||
89 | if (!cpus) | ||
90 | return; | ||
91 | |||
92 | for_each_child_of_node(cpus, cpu) { | ||
93 | u32 hwid; | ||
94 | |||
95 | pr_debug(" * %s...\n", cpu->full_name); | ||
96 | /* | ||
97 | * A device tree containing CPU nodes with missing "reg" | ||
98 | * properties is considered invalid to build the | ||
99 | * cpu_logical_map. | ||
100 | */ | ||
101 | if (of_property_read_u32(cpu, "reg", &hwid)) { | ||
102 | pr_debug(" * %s missing reg property\n", | ||
103 | cpu->full_name); | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * 8 MSBs must be set to 0 in the DT since the reg property | ||
109 | * defines the MPIDR[23:0]. | ||
110 | */ | ||
111 | if (hwid & ~MPIDR_HWID_BITMASK) | ||
112 | return; | ||
113 | |||
114 | /* | ||
115 | * Duplicate MPIDRs are a recipe for disaster. | ||
116 | * Scan all initialized entries and check for | ||
117 | * duplicates. If any is found just bail out. | ||
118 | * temp values were initialized to UINT_MAX | ||
119 | * to avoid matching valid MPIDR[23:0] values. | ||
120 | */ | ||
121 | for (j = 0; j < cpuidx; j++) | ||
122 | if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg " | ||
123 | "properties in the DT\n")) | ||
124 | return; | ||
125 | |||
126 | /* | ||
127 | * Build a stashed array of MPIDR values. Numbering scheme | ||
128 | * requires that if detected the boot CPU must be assigned | ||
129 | * logical id 0. Other CPUs get sequential indexes starting | ||
130 | * from 1. If a CPU node with a reg property matching the | ||
131 | * boot CPU MPIDR is detected, this is recorded so that the | ||
132 | * logical map built from DT is validated and can be used | ||
133 | * to override the map created in smp_setup_processor_id(). | ||
134 | */ | ||
135 | if (hwid == mpidr) { | ||
136 | i = 0; | ||
137 | bootcpu_valid = true; | ||
138 | } else { | ||
139 | i = cpuidx++; | ||
140 | } | ||
141 | |||
142 | if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " | ||
143 | "max cores %u, capping them\n", | ||
144 | cpuidx, nr_cpu_ids)) { | ||
145 | cpuidx = nr_cpu_ids; | ||
146 | break; | ||
147 | } | ||
148 | |||
149 | tmp_map[i] = hwid; | ||
150 | } | ||
151 | |||
152 | if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], " | ||
153 | "fall back to default cpu_logical_map\n")) | ||
154 | return; | ||
155 | |||
156 | /* | ||
157 | * Since the boot CPU node contains proper data, and all nodes have | ||
158 | * a reg property, the DT CPU list can be considered valid and the | ||
159 | * logical map created in smp_setup_processor_id() can be overridden | ||
160 | */ | ||
161 | for (i = 0; i < cpuidx; i++) { | ||
162 | set_cpu_possible(i, true); | ||
163 | cpu_logical_map(i) = tmp_map[i]; | ||
164 | pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i)); | ||
165 | } | ||
166 | } | ||
167 | |||
64 | /** | 168 | /** |
65 | * setup_machine_fdt - Machine setup when an dtb was passed to the kernel | 169 | * setup_machine_fdt - Machine setup when an dtb was passed to the kernel |
66 | * @dt_phys: physical address of dt blob | 170 | * @dt_phys: physical address of dt blob |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 34711757ba59..804153c0a9cf 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -417,16 +417,6 @@ local_restart: | |||
417 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing | 417 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
418 | stmdb sp!, {r4, r5} @ push fifth and sixth args | 418 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
419 | 419 | ||
420 | #ifdef CONFIG_SECCOMP | ||
421 | tst r10, #_TIF_SECCOMP | ||
422 | beq 1f | ||
423 | mov r0, scno | ||
424 | bl __secure_computing | ||
425 | add r0, sp, #S_R0 + S_OFF @ pointer to regs | ||
426 | ldmia r0, {r0 - r3} @ have to reload r0 - r3 | ||
427 | 1: | ||
428 | #endif | ||
429 | |||
430 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? | 420 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
431 | bne __sys_trace | 421 | bne __sys_trace |
432 | 422 | ||
@@ -458,11 +448,13 @@ __sys_trace: | |||
458 | ldmccia r1, {r0 - r6} @ have to reload r0 - r6 | 448 | ldmccia r1, {r0 - r6} @ have to reload r0 - r6 |
459 | stmccia sp, {r4, r5} @ and update the stack args | 449 | stmccia sp, {r4, r5} @ and update the stack args |
460 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine | 450 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine |
461 | b 2b | 451 | cmp scno, #-1 @ skip the syscall? |
452 | bne 2b | ||
453 | add sp, sp, #S_OFF @ restore stack | ||
454 | b ret_slow_syscall | ||
462 | 455 | ||
463 | __sys_trace_return: | 456 | __sys_trace_return: |
464 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | 457 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
465 | mov r1, scno | ||
466 | mov r0, sp | 458 | mov r0, sp |
467 | bl syscall_trace_exit | 459 | bl syscall_trace_exit |
468 | b ret_slow_syscall | 460 | b ret_slow_syscall |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 278cfc144f44..2c228a07e58c 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -68,7 +68,7 @@ __after_proc_init: | |||
68 | * CP15 system control register value returned in r0 from | 68 | * CP15 system control register value returned in r0 from |
69 | * the CPU init function. | 69 | * the CPU init function. |
70 | */ | 70 | */ |
71 | #ifdef CONFIG_ALIGNMENT_TRAP | 71 | #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 |
72 | orr r0, r0, #CR_A | 72 | orr r0, r0, #CR_A |
73 | #else | 73 | #else |
74 | bic r0, r0, #CR_A | 74 | bic r0, r0, #CR_A |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 281bf3301241..5ff2e77782b1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -52,14 +52,14 @@ static u8 debug_arch; | |||
52 | /* Maximum supported watchpoint length. */ | 52 | /* Maximum supported watchpoint length. */ |
53 | static u8 max_watchpoint_len; | 53 | static u8 max_watchpoint_len; |
54 | 54 | ||
55 | #define READ_WB_REG_CASE(OP2, M, VAL) \ | 55 | #define READ_WB_REG_CASE(OP2, M, VAL) \ |
56 | case ((OP2 << 4) + M): \ | 56 | case ((OP2 << 4) + M): \ |
57 | ARM_DBG_READ(c ## M, OP2, VAL); \ | 57 | ARM_DBG_READ(c0, c ## M, OP2, VAL); \ |
58 | break | 58 | break |
59 | 59 | ||
60 | #define WRITE_WB_REG_CASE(OP2, M, VAL) \ | 60 | #define WRITE_WB_REG_CASE(OP2, M, VAL) \ |
61 | case ((OP2 << 4) + M): \ | 61 | case ((OP2 << 4) + M): \ |
62 | ARM_DBG_WRITE(c ## M, OP2, VAL);\ | 62 | ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \ |
63 | break | 63 | break |
64 | 64 | ||
65 | #define GEN_READ_WB_REG_CASES(OP2, VAL) \ | 65 | #define GEN_READ_WB_REG_CASES(OP2, VAL) \ |
@@ -136,12 +136,12 @@ static u8 get_debug_arch(void) | |||
136 | 136 | ||
137 | /* Do we implement the extended CPUID interface? */ | 137 | /* Do we implement the extended CPUID interface? */ |
138 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { | 138 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { |
139 | pr_warning("CPUID feature registers not supported. " | 139 | pr_warn_once("CPUID feature registers not supported. " |
140 | "Assuming v6 debug is present.\n"); | 140 | "Assuming v6 debug is present.\n"); |
141 | return ARM_DEBUG_ARCH_V6; | 141 | return ARM_DEBUG_ARCH_V6; |
142 | } | 142 | } |
143 | 143 | ||
144 | ARM_DBG_READ(c0, 0, didr); | 144 | ARM_DBG_READ(c0, c0, 0, didr); |
145 | return (didr >> 16) & 0xf; | 145 | return (didr >> 16) & 0xf; |
146 | } | 146 | } |
147 | 147 | ||
@@ -169,7 +169,7 @@ static int debug_exception_updates_fsr(void) | |||
169 | static int get_num_wrp_resources(void) | 169 | static int get_num_wrp_resources(void) |
170 | { | 170 | { |
171 | u32 didr; | 171 | u32 didr; |
172 | ARM_DBG_READ(c0, 0, didr); | 172 | ARM_DBG_READ(c0, c0, 0, didr); |
173 | return ((didr >> 28) & 0xf) + 1; | 173 | return ((didr >> 28) & 0xf) + 1; |
174 | } | 174 | } |
175 | 175 | ||
@@ -177,7 +177,7 @@ static int get_num_wrp_resources(void) | |||
177 | static int get_num_brp_resources(void) | 177 | static int get_num_brp_resources(void) |
178 | { | 178 | { |
179 | u32 didr; | 179 | u32 didr; |
180 | ARM_DBG_READ(c0, 0, didr); | 180 | ARM_DBG_READ(c0, c0, 0, didr); |
181 | return ((didr >> 24) & 0xf) + 1; | 181 | return ((didr >> 24) & 0xf) + 1; |
182 | } | 182 | } |
183 | 183 | ||
@@ -228,19 +228,17 @@ static int get_num_brps(void) | |||
228 | * be put into halting debug mode at any time by an external debugger | 228 | * be put into halting debug mode at any time by an external debugger |
229 | * but there is nothing we can do to prevent that. | 229 | * but there is nothing we can do to prevent that. |
230 | */ | 230 | */ |
231 | static int enable_monitor_mode(void) | 231 | static int monitor_mode_enabled(void) |
232 | { | 232 | { |
233 | u32 dscr; | 233 | u32 dscr; |
234 | int ret = 0; | 234 | ARM_DBG_READ(c0, c1, 0, dscr); |
235 | 235 | return !!(dscr & ARM_DSCR_MDBGEN); | |
236 | ARM_DBG_READ(c1, 0, dscr); | 236 | } |
237 | 237 | ||
238 | /* Ensure that halting mode is disabled. */ | 238 | static int enable_monitor_mode(void) |
239 | if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, | 239 | { |
240 | "halting debug mode enabled. Unable to access hardware resources.\n")) { | 240 | u32 dscr; |
241 | ret = -EPERM; | 241 | ARM_DBG_READ(c0, c1, 0, dscr); |
242 | goto out; | ||
243 | } | ||
244 | 242 | ||
245 | /* If monitor mode is already enabled, just return. */ | 243 | /* If monitor mode is already enabled, just return. */ |
246 | if (dscr & ARM_DSCR_MDBGEN) | 244 | if (dscr & ARM_DSCR_MDBGEN) |
@@ -250,24 +248,27 @@ static int enable_monitor_mode(void) | |||
250 | switch (get_debug_arch()) { | 248 | switch (get_debug_arch()) { |
251 | case ARM_DEBUG_ARCH_V6: | 249 | case ARM_DEBUG_ARCH_V6: |
252 | case ARM_DEBUG_ARCH_V6_1: | 250 | case ARM_DEBUG_ARCH_V6_1: |
253 | ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); | 251 | ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN)); |
254 | break; | 252 | break; |
255 | case ARM_DEBUG_ARCH_V7_ECP14: | 253 | case ARM_DEBUG_ARCH_V7_ECP14: |
256 | case ARM_DEBUG_ARCH_V7_1: | 254 | case ARM_DEBUG_ARCH_V7_1: |
257 | ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); | 255 | ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN)); |
256 | isb(); | ||
258 | break; | 257 | break; |
259 | default: | 258 | default: |
260 | ret = -ENODEV; | 259 | return -ENODEV; |
261 | goto out; | ||
262 | } | 260 | } |
263 | 261 | ||
264 | /* Check that the write made it through. */ | 262 | /* Check that the write made it through. */ |
265 | ARM_DBG_READ(c1, 0, dscr); | 263 | ARM_DBG_READ(c0, c1, 0, dscr); |
266 | if (!(dscr & ARM_DSCR_MDBGEN)) | 264 | if (!(dscr & ARM_DSCR_MDBGEN)) { |
267 | ret = -EPERM; | 265 | pr_warn_once("Failed to enable monitor mode on CPU %d.\n", |
266 | smp_processor_id()); | ||
267 | return -EPERM; | ||
268 | } | ||
268 | 269 | ||
269 | out: | 270 | out: |
270 | return ret; | 271 | return 0; |
271 | } | 272 | } |
272 | 273 | ||
273 | int hw_breakpoint_slots(int type) | 274 | int hw_breakpoint_slots(int type) |
@@ -328,14 +329,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
328 | { | 329 | { |
329 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 330 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
330 | struct perf_event **slot, **slots; | 331 | struct perf_event **slot, **slots; |
331 | int i, max_slots, ctrl_base, val_base, ret = 0; | 332 | int i, max_slots, ctrl_base, val_base; |
332 | u32 addr, ctrl; | 333 | u32 addr, ctrl; |
333 | 334 | ||
334 | /* Ensure that we are in monitor mode and halting mode is disabled. */ | ||
335 | ret = enable_monitor_mode(); | ||
336 | if (ret) | ||
337 | goto out; | ||
338 | |||
339 | addr = info->address; | 335 | addr = info->address; |
340 | ctrl = encode_ctrl_reg(info->ctrl) | 0x1; | 336 | ctrl = encode_ctrl_reg(info->ctrl) | 0x1; |
341 | 337 | ||
@@ -362,9 +358,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
362 | } | 358 | } |
363 | } | 359 | } |
364 | 360 | ||
365 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { | 361 | if (i == max_slots) { |
366 | ret = -EBUSY; | 362 | pr_warning("Can't find any breakpoint slot\n"); |
367 | goto out; | 363 | return -EBUSY; |
368 | } | 364 | } |
369 | 365 | ||
370 | /* Override the breakpoint data with the step data. */ | 366 | /* Override the breakpoint data with the step data. */ |
@@ -383,9 +379,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
383 | 379 | ||
384 | /* Setup the control register. */ | 380 | /* Setup the control register. */ |
385 | write_wb_reg(ctrl_base + i, ctrl); | 381 | write_wb_reg(ctrl_base + i, ctrl); |
386 | 382 | return 0; | |
387 | out: | ||
388 | return ret; | ||
389 | } | 383 | } |
390 | 384 | ||
391 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | 385 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) |
@@ -416,8 +410,10 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
416 | } | 410 | } |
417 | } | 411 | } |
418 | 412 | ||
419 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) | 413 | if (i == max_slots) { |
414 | pr_warning("Can't find any breakpoint slot\n"); | ||
420 | return; | 415 | return; |
416 | } | ||
421 | 417 | ||
422 | /* Ensure that we disable the mismatch breakpoint. */ | 418 | /* Ensure that we disable the mismatch breakpoint. */ |
423 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && | 419 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && |
@@ -596,6 +592,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
596 | int ret = 0; | 592 | int ret = 0; |
597 | u32 offset, alignment_mask = 0x3; | 593 | u32 offset, alignment_mask = 0x3; |
598 | 594 | ||
595 | /* Ensure that we are in monitor debug mode. */ | ||
596 | if (!monitor_mode_enabled()) | ||
597 | return -ENODEV; | ||
598 | |||
599 | /* Build the arch_hw_breakpoint. */ | 599 | /* Build the arch_hw_breakpoint. */ |
600 | ret = arch_build_bp_info(bp); | 600 | ret = arch_build_bp_info(bp); |
601 | if (ret) | 601 | if (ret) |
@@ -858,7 +858,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
858 | local_irq_enable(); | 858 | local_irq_enable(); |
859 | 859 | ||
860 | /* We only handle watchpoints and hardware breakpoints. */ | 860 | /* We only handle watchpoints and hardware breakpoints. */ |
861 | ARM_DBG_READ(c1, 0, dscr); | 861 | ARM_DBG_READ(c0, c1, 0, dscr); |
862 | 862 | ||
863 | /* Perform perf callbacks. */ | 863 | /* Perform perf callbacks. */ |
864 | switch (ARM_DSCR_MOE(dscr)) { | 864 | switch (ARM_DSCR_MOE(dscr)) { |
@@ -906,7 +906,7 @@ static struct undef_hook debug_reg_hook = { | |||
906 | static void reset_ctrl_regs(void *unused) | 906 | static void reset_ctrl_regs(void *unused) |
907 | { | 907 | { |
908 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); | 908 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); |
909 | u32 dbg_power; | 909 | u32 val; |
910 | 910 | ||
911 | /* | 911 | /* |
912 | * v7 debug contains save and restore registers so that debug state | 912 | * v7 debug contains save and restore registers so that debug state |
@@ -919,23 +919,30 @@ static void reset_ctrl_regs(void *unused) | |||
919 | switch (debug_arch) { | 919 | switch (debug_arch) { |
920 | case ARM_DEBUG_ARCH_V6: | 920 | case ARM_DEBUG_ARCH_V6: |
921 | case ARM_DEBUG_ARCH_V6_1: | 921 | case ARM_DEBUG_ARCH_V6_1: |
922 | /* ARMv6 cores just need to reset the registers. */ | 922 | /* ARMv6 cores clear the registers out of reset. */ |
923 | goto reset_regs; | 923 | goto out_mdbgen; |
924 | case ARM_DEBUG_ARCH_V7_ECP14: | 924 | case ARM_DEBUG_ARCH_V7_ECP14: |
925 | /* | 925 | /* |
926 | * Ensure sticky power-down is clear (i.e. debug logic is | 926 | * Ensure sticky power-down is clear (i.e. debug logic is |
927 | * powered up). | 927 | * powered up). |
928 | */ | 928 | */ |
929 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); | 929 | ARM_DBG_READ(c1, c5, 4, val); |
930 | if ((dbg_power & 0x1) == 0) | 930 | if ((val & 0x1) == 0) |
931 | err = -EPERM; | 931 | err = -EPERM; |
932 | |||
933 | /* | ||
934 | * Check whether we implement OS save and restore. | ||
935 | */ | ||
936 | ARM_DBG_READ(c1, c1, 4, val); | ||
937 | if ((val & 0x9) == 0) | ||
938 | goto clear_vcr; | ||
932 | break; | 939 | break; |
933 | case ARM_DEBUG_ARCH_V7_1: | 940 | case ARM_DEBUG_ARCH_V7_1: |
934 | /* | 941 | /* |
935 | * Ensure the OS double lock is clear. | 942 | * Ensure the OS double lock is clear. |
936 | */ | 943 | */ |
937 | asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power)); | 944 | ARM_DBG_READ(c1, c3, 4, val); |
938 | if ((dbg_power & 0x1) == 1) | 945 | if ((val & 0x1) == 1) |
939 | err = -EPERM; | 946 | err = -EPERM; |
940 | break; | 947 | break; |
941 | } | 948 | } |
@@ -947,24 +954,29 @@ static void reset_ctrl_regs(void *unused) | |||
947 | } | 954 | } |
948 | 955 | ||
949 | /* | 956 | /* |
950 | * Unconditionally clear the lock by writing a value | 957 | * Unconditionally clear the OS lock by writing a value |
951 | * other than 0xC5ACCE55 to the access register. | 958 | * other than 0xC5ACCE55 to the access register. |
952 | */ | 959 | */ |
953 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); | 960 | ARM_DBG_WRITE(c1, c0, 4, 0); |
954 | isb(); | 961 | isb(); |
955 | 962 | ||
956 | /* | 963 | /* |
957 | * Clear any configured vector-catch events before | 964 | * Clear any configured vector-catch events before |
958 | * enabling monitor mode. | 965 | * enabling monitor mode. |
959 | */ | 966 | */ |
960 | asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); | 967 | clear_vcr: |
968 | ARM_DBG_WRITE(c0, c7, 0, 0); | ||
961 | isb(); | 969 | isb(); |
962 | 970 | ||
963 | reset_regs: | 971 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { |
964 | if (enable_monitor_mode()) | 972 | pr_warning("CPU %d failed to disable vector catch\n", cpu); |
965 | return; | 973 | return; |
974 | } | ||
966 | 975 | ||
967 | /* We must also reset any reserved registers. */ | 976 | /* |
977 | * The control/value register pairs are UNKNOWN out of reset so | ||
978 | * clear them to avoid spurious debug events. | ||
979 | */ | ||
968 | raw_num_brps = get_num_brp_resources(); | 980 | raw_num_brps = get_num_brp_resources(); |
969 | for (i = 0; i < raw_num_brps; ++i) { | 981 | for (i = 0; i < raw_num_brps; ++i) { |
970 | write_wb_reg(ARM_BASE_BCR + i, 0UL); | 982 | write_wb_reg(ARM_BASE_BCR + i, 0UL); |
@@ -975,6 +987,19 @@ reset_regs: | |||
975 | write_wb_reg(ARM_BASE_WCR + i, 0UL); | 987 | write_wb_reg(ARM_BASE_WCR + i, 0UL); |
976 | write_wb_reg(ARM_BASE_WVR + i, 0UL); | 988 | write_wb_reg(ARM_BASE_WVR + i, 0UL); |
977 | } | 989 | } |
990 | |||
991 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { | ||
992 | pr_warning("CPU %d failed to clear debug register pairs\n", cpu); | ||
993 | return; | ||
994 | } | ||
995 | |||
996 | /* | ||
997 | * Have a crack at enabling monitor mode. We don't actually need | ||
998 | * it yet, but reporting an error early is useful if it fails. | ||
999 | */ | ||
1000 | out_mdbgen: | ||
1001 | if (enable_monitor_mode()) | ||
1002 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); | ||
978 | } | 1003 | } |
979 | 1004 | ||
980 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, | 1005 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, |
@@ -992,8 +1017,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
992 | 1017 | ||
993 | static int __init arch_hw_breakpoint_init(void) | 1018 | static int __init arch_hw_breakpoint_init(void) |
994 | { | 1019 | { |
995 | u32 dscr; | ||
996 | |||
997 | debug_arch = get_debug_arch(); | 1020 | debug_arch = get_debug_arch(); |
998 | 1021 | ||
999 | if (!debug_arch_supported()) { | 1022 | if (!debug_arch_supported()) { |
@@ -1028,17 +1051,10 @@ static int __init arch_hw_breakpoint_init(void) | |||
1028 | core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : | 1051 | core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : |
1029 | "", core_num_wrps); | 1052 | "", core_num_wrps); |
1030 | 1053 | ||
1031 | ARM_DBG_READ(c1, 0, dscr); | 1054 | /* Work out the maximum supported watchpoint length. */ |
1032 | if (dscr & ARM_DSCR_HDBGEN) { | 1055 | max_watchpoint_len = get_max_wp_len(); |
1033 | max_watchpoint_len = 4; | 1056 | pr_info("maximum watchpoint size is %u bytes.\n", |
1034 | pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n", | 1057 | max_watchpoint_len); |
1035 | max_watchpoint_len); | ||
1036 | } else { | ||
1037 | /* Work out the maximum supported watchpoint length. */ | ||
1038 | max_watchpoint_len = get_max_wp_len(); | ||
1039 | pr_info("maximum watchpoint size is %u bytes.\n", | ||
1040 | max_watchpoint_len); | ||
1041 | } | ||
1042 | 1058 | ||
1043 | /* Register debug fault handler. */ | 1059 | /* Register debug fault handler. */ |
1044 | hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, | 1060 | hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 53c0304b734a..f9e8657dd241 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event, | |||
86 | return -ENOENT; | 86 | return -ENOENT; |
87 | } | 87 | } |
88 | 88 | ||
89 | int | 89 | int armpmu_event_set_period(struct perf_event *event) |
90 | armpmu_event_set_period(struct perf_event *event, | ||
91 | struct hw_perf_event *hwc, | ||
92 | int idx) | ||
93 | { | 90 | { |
94 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 91 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
92 | struct hw_perf_event *hwc = &event->hw; | ||
95 | s64 left = local64_read(&hwc->period_left); | 93 | s64 left = local64_read(&hwc->period_left); |
96 | s64 period = hwc->sample_period; | 94 | s64 period = hwc->sample_period; |
97 | int ret = 0; | 95 | int ret = 0; |
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event, | |||
119 | 117 | ||
120 | local64_set(&hwc->prev_count, (u64)-left); | 118 | local64_set(&hwc->prev_count, (u64)-left); |
121 | 119 | ||
122 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | 120 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
123 | 121 | ||
124 | perf_event_update_userpage(event); | 122 | perf_event_update_userpage(event); |
125 | 123 | ||
126 | return ret; | 124 | return ret; |
127 | } | 125 | } |
128 | 126 | ||
129 | u64 | 127 | u64 armpmu_event_update(struct perf_event *event) |
130 | armpmu_event_update(struct perf_event *event, | ||
131 | struct hw_perf_event *hwc, | ||
132 | int idx) | ||
133 | { | 128 | { |
134 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 129 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
130 | struct hw_perf_event *hwc = &event->hw; | ||
135 | u64 delta, prev_raw_count, new_raw_count; | 131 | u64 delta, prev_raw_count, new_raw_count; |
136 | 132 | ||
137 | again: | 133 | again: |
138 | prev_raw_count = local64_read(&hwc->prev_count); | 134 | prev_raw_count = local64_read(&hwc->prev_count); |
139 | new_raw_count = armpmu->read_counter(idx); | 135 | new_raw_count = armpmu->read_counter(event); |
140 | 136 | ||
141 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 137 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
142 | new_raw_count) != prev_raw_count) | 138 | new_raw_count) != prev_raw_count) |
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event) | |||
159 | if (hwc->idx < 0) | 155 | if (hwc->idx < 0) |
160 | return; | 156 | return; |
161 | 157 | ||
162 | armpmu_event_update(event, hwc, hwc->idx); | 158 | armpmu_event_update(event); |
163 | } | 159 | } |
164 | 160 | ||
165 | static void | 161 | static void |
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags) | |||
173 | * PERF_EF_UPDATE, see comments in armpmu_start(). | 169 | * PERF_EF_UPDATE, see comments in armpmu_start(). |
174 | */ | 170 | */ |
175 | if (!(hwc->state & PERF_HES_STOPPED)) { | 171 | if (!(hwc->state & PERF_HES_STOPPED)) { |
176 | armpmu->disable(hwc, hwc->idx); | 172 | armpmu->disable(event); |
177 | armpmu_event_update(event, hwc, hwc->idx); | 173 | armpmu_event_update(event); |
178 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 174 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
179 | } | 175 | } |
180 | } | 176 | } |
181 | 177 | ||
182 | static void | 178 | static void armpmu_start(struct perf_event *event, int flags) |
183 | armpmu_start(struct perf_event *event, int flags) | ||
184 | { | 179 | { |
185 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 180 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
186 | struct hw_perf_event *hwc = &event->hw; | 181 | struct hw_perf_event *hwc = &event->hw; |
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags) | |||
200 | * get an interrupt too soon or *way* too late if the overflow has | 195 | * get an interrupt too soon or *way* too late if the overflow has |
201 | * happened since disabling. | 196 | * happened since disabling. |
202 | */ | 197 | */ |
203 | armpmu_event_set_period(event, hwc, hwc->idx); | 198 | armpmu_event_set_period(event); |
204 | armpmu->enable(hwc, hwc->idx); | 199 | armpmu->enable(event); |
205 | } | 200 | } |
206 | 201 | ||
207 | static void | 202 | static void |
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
233 | perf_pmu_disable(event->pmu); | 228 | perf_pmu_disable(event->pmu); |
234 | 229 | ||
235 | /* If we don't have a space for the counter then finish early. */ | 230 | /* If we don't have a space for the counter then finish early. */ |
236 | idx = armpmu->get_event_idx(hw_events, hwc); | 231 | idx = armpmu->get_event_idx(hw_events, event); |
237 | if (idx < 0) { | 232 | if (idx < 0) { |
238 | err = idx; | 233 | err = idx; |
239 | goto out; | 234 | goto out; |
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
244 | * sure it is disabled. | 239 | * sure it is disabled. |
245 | */ | 240 | */ |
246 | event->hw.idx = idx; | 241 | event->hw.idx = idx; |
247 | armpmu->disable(hwc, idx); | 242 | armpmu->disable(event); |
248 | hw_events->events[idx] = event; | 243 | hw_events->events[idx] = event; |
249 | 244 | ||
250 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | 245 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events, | |||
264 | struct perf_event *event) | 259 | struct perf_event *event) |
265 | { | 260 | { |
266 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 261 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
267 | struct hw_perf_event fake_event = event->hw; | ||
268 | struct pmu *leader_pmu = event->group_leader->pmu; | 262 | struct pmu *leader_pmu = event->group_leader->pmu; |
269 | 263 | ||
270 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
271 | return 1; | 265 | return 1; |
272 | 266 | ||
273 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; | 267 | return armpmu->get_event_idx(hw_events, event) >= 0; |
274 | } | 268 | } |
275 | 269 | ||
276 | static int | 270 | static int |
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) | |||
316 | static void | 310 | static void |
317 | armpmu_release_hardware(struct arm_pmu *armpmu) | 311 | armpmu_release_hardware(struct arm_pmu *armpmu) |
318 | { | 312 | { |
319 | armpmu->free_irq(); | 313 | armpmu->free_irq(armpmu); |
320 | pm_runtime_put_sync(&armpmu->plat_device->dev); | 314 | pm_runtime_put_sync(&armpmu->plat_device->dev); |
321 | } | 315 | } |
322 | 316 | ||
@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) | |||
330 | return -ENODEV; | 324 | return -ENODEV; |
331 | 325 | ||
332 | pm_runtime_get_sync(&pmu_device->dev); | 326 | pm_runtime_get_sync(&pmu_device->dev); |
333 | err = armpmu->request_irq(armpmu_dispatch_irq); | 327 | err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
334 | if (err) { | 328 | if (err) { |
335 | armpmu_release_hardware(armpmu); | 329 | armpmu_release_hardware(armpmu); |
336 | return err; | 330 | return err; |
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu) | |||
465 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | 459 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
466 | 460 | ||
467 | if (enabled) | 461 | if (enabled) |
468 | armpmu->start(); | 462 | armpmu->start(armpmu); |
469 | } | 463 | } |
470 | 464 | ||
471 | static void armpmu_disable(struct pmu *pmu) | 465 | static void armpmu_disable(struct pmu *pmu) |
472 | { | 466 | { |
473 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | 467 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
474 | armpmu->stop(); | 468 | armpmu->stop(armpmu); |
475 | } | 469 | } |
476 | 470 | ||
477 | #ifdef CONFIG_PM_RUNTIME | 471 | #ifdef CONFIG_PM_RUNTIME |
@@ -517,12 +511,13 @@ static void __init armpmu_init(struct arm_pmu *armpmu) | |||
517 | }; | 511 | }; |
518 | } | 512 | } |
519 | 513 | ||
520 | int armpmu_register(struct arm_pmu *armpmu, char *name, int type) | 514 | int armpmu_register(struct arm_pmu *armpmu, int type) |
521 | { | 515 | { |
522 | armpmu_init(armpmu); | 516 | armpmu_init(armpmu); |
517 | pm_runtime_enable(&armpmu->plat_device->dev); | ||
523 | pr_info("enabled with %s PMU driver, %d counters available\n", | 518 | pr_info("enabled with %s PMU driver, %d counters available\n", |
524 | armpmu->name, armpmu->num_events); | 519 | armpmu->name, armpmu->num_events); |
525 | return perf_pmu_register(&armpmu->pmu, name, type); | 520 | return perf_pmu_register(&armpmu->pmu, armpmu->name, type); |
526 | } | 521 | } |
527 | 522 | ||
528 | /* | 523 | /* |
@@ -576,6 +571,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
576 | { | 571 | { |
577 | struct frame_tail __user *tail; | 572 | struct frame_tail __user *tail; |
578 | 573 | ||
574 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
575 | /* We don't support guest os callchain now */ | ||
576 | return; | ||
577 | } | ||
579 | 578 | ||
580 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; | 579 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
581 | 580 | ||
@@ -603,9 +602,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
603 | { | 602 | { |
604 | struct stackframe fr; | 603 | struct stackframe fr; |
605 | 604 | ||
605 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
606 | /* We don't support guest os callchain now */ | ||
607 | return; | ||
608 | } | ||
609 | |||
606 | fr.fp = regs->ARM_fp; | 610 | fr.fp = regs->ARM_fp; |
607 | fr.sp = regs->ARM_sp; | 611 | fr.sp = regs->ARM_sp; |
608 | fr.lr = regs->ARM_lr; | 612 | fr.lr = regs->ARM_lr; |
609 | fr.pc = regs->ARM_pc; | 613 | fr.pc = regs->ARM_pc; |
610 | walk_stackframe(&fr, callchain_trace, entry); | 614 | walk_stackframe(&fr, callchain_trace, entry); |
611 | } | 615 | } |
616 | |||
617 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | ||
618 | { | ||
619 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) | ||
620 | return perf_guest_cbs->get_guest_ip(); | ||
621 | |||
622 | return instruction_pointer(regs); | ||
623 | } | ||
624 | |||
625 | unsigned long perf_misc_flags(struct pt_regs *regs) | ||
626 | { | ||
627 | int misc = 0; | ||
628 | |||
629 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
630 | if (perf_guest_cbs->is_user_mode()) | ||
631 | misc |= PERF_RECORD_MISC_GUEST_USER; | ||
632 | else | ||
633 | misc |= PERF_RECORD_MISC_GUEST_KERNEL; | ||
634 | } else { | ||
635 | if (user_mode(regs)) | ||
636 | misc |= PERF_RECORD_MISC_USER; | ||
637 | else | ||
638 | misc |= PERF_RECORD_MISC_KERNEL; | ||
639 | } | ||
640 | |||
641 | return misc; | ||
642 | } | ||
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 8d7d8d4de9d6..9a4f6307a016 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/slab.h> | ||
26 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
27 | 28 | ||
28 | #include <asm/cputype.h> | 29 | #include <asm/cputype.h> |
@@ -45,7 +46,7 @@ const char *perf_pmu_name(void) | |||
45 | if (!cpu_pmu) | 46 | if (!cpu_pmu) |
46 | return NULL; | 47 | return NULL; |
47 | 48 | ||
48 | return cpu_pmu->pmu.name; | 49 | return cpu_pmu->name; |
49 | } | 50 | } |
50 | EXPORT_SYMBOL_GPL(perf_pmu_name); | 51 | EXPORT_SYMBOL_GPL(perf_pmu_name); |
51 | 52 | ||
@@ -70,7 +71,7 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) | |||
70 | return &__get_cpu_var(cpu_hw_events); | 71 | return &__get_cpu_var(cpu_hw_events); |
71 | } | 72 | } |
72 | 73 | ||
73 | static void cpu_pmu_free_irq(void) | 74 | static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) |
74 | { | 75 | { |
75 | int i, irq, irqs; | 76 | int i, irq, irqs; |
76 | struct platform_device *pmu_device = cpu_pmu->plat_device; | 77 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
@@ -86,7 +87,7 @@ static void cpu_pmu_free_irq(void) | |||
86 | } | 87 | } |
87 | } | 88 | } |
88 | 89 | ||
89 | static int cpu_pmu_request_irq(irq_handler_t handler) | 90 | static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) |
90 | { | 91 | { |
91 | int i, err, irq, irqs; | 92 | int i, err, irq, irqs; |
92 | struct platform_device *pmu_device = cpu_pmu->plat_device; | 93 | struct platform_device *pmu_device = cpu_pmu->plat_device; |
@@ -147,7 +148,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
147 | 148 | ||
148 | /* Ensure the PMU has sane values out of reset. */ | 149 | /* Ensure the PMU has sane values out of reset. */ |
149 | if (cpu_pmu && cpu_pmu->reset) | 150 | if (cpu_pmu && cpu_pmu->reset) |
150 | on_each_cpu(cpu_pmu->reset, NULL, 1); | 151 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); |
151 | } | 152 | } |
152 | 153 | ||
153 | /* | 154 | /* |
@@ -163,7 +164,9 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, | |||
163 | return NOTIFY_DONE; | 164 | return NOTIFY_DONE; |
164 | 165 | ||
165 | if (cpu_pmu && cpu_pmu->reset) | 166 | if (cpu_pmu && cpu_pmu->reset) |
166 | cpu_pmu->reset(NULL); | 167 | cpu_pmu->reset(cpu_pmu); |
168 | else | ||
169 | return NOTIFY_DONE; | ||
167 | 170 | ||
168 | return NOTIFY_OK; | 171 | return NOTIFY_OK; |
169 | } | 172 | } |
@@ -195,13 +198,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { | |||
195 | /* | 198 | /* |
196 | * CPU PMU identification and probing. | 199 | * CPU PMU identification and probing. |
197 | */ | 200 | */ |
198 | static struct arm_pmu *__devinit probe_current_pmu(void) | 201 | static int __devinit probe_current_pmu(struct arm_pmu *pmu) |
199 | { | 202 | { |
200 | struct arm_pmu *pmu = NULL; | ||
201 | int cpu = get_cpu(); | 203 | int cpu = get_cpu(); |
202 | unsigned long cpuid = read_cpuid_id(); | 204 | unsigned long cpuid = read_cpuid_id(); |
203 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; | 205 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; |
204 | unsigned long part_number = (cpuid & 0xFFF0); | 206 | unsigned long part_number = (cpuid & 0xFFF0); |
207 | int ret = -ENODEV; | ||
205 | 208 | ||
206 | pr_info("probing PMU on CPU %d\n", cpu); | 209 | pr_info("probing PMU on CPU %d\n", cpu); |
207 | 210 | ||
@@ -211,25 +214,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void) | |||
211 | case 0xB360: /* ARM1136 */ | 214 | case 0xB360: /* ARM1136 */ |
212 | case 0xB560: /* ARM1156 */ | 215 | case 0xB560: /* ARM1156 */ |
213 | case 0xB760: /* ARM1176 */ | 216 | case 0xB760: /* ARM1176 */ |
214 | pmu = armv6pmu_init(); | 217 | ret = armv6pmu_init(pmu); |
215 | break; | 218 | break; |
216 | case 0xB020: /* ARM11mpcore */ | 219 | case 0xB020: /* ARM11mpcore */ |
217 | pmu = armv6mpcore_pmu_init(); | 220 | ret = armv6mpcore_pmu_init(pmu); |
218 | break; | 221 | break; |
219 | case 0xC080: /* Cortex-A8 */ | 222 | case 0xC080: /* Cortex-A8 */ |
220 | pmu = armv7_a8_pmu_init(); | 223 | ret = armv7_a8_pmu_init(pmu); |
221 | break; | 224 | break; |
222 | case 0xC090: /* Cortex-A9 */ | 225 | case 0xC090: /* Cortex-A9 */ |
223 | pmu = armv7_a9_pmu_init(); | 226 | ret = armv7_a9_pmu_init(pmu); |
224 | break; | 227 | break; |
225 | case 0xC050: /* Cortex-A5 */ | 228 | case 0xC050: /* Cortex-A5 */ |
226 | pmu = armv7_a5_pmu_init(); | 229 | ret = armv7_a5_pmu_init(pmu); |
227 | break; | 230 | break; |
228 | case 0xC0F0: /* Cortex-A15 */ | 231 | case 0xC0F0: /* Cortex-A15 */ |
229 | pmu = armv7_a15_pmu_init(); | 232 | ret = armv7_a15_pmu_init(pmu); |
230 | break; | 233 | break; |
231 | case 0xC070: /* Cortex-A7 */ | 234 | case 0xC070: /* Cortex-A7 */ |
232 | pmu = armv7_a7_pmu_init(); | 235 | ret = armv7_a7_pmu_init(pmu); |
233 | break; | 236 | break; |
234 | } | 237 | } |
235 | /* Intel CPUs [xscale]. */ | 238 | /* Intel CPUs [xscale]. */ |
@@ -237,43 +240,54 @@ static struct arm_pmu *__devinit probe_current_pmu(void) | |||
237 | part_number = (cpuid >> 13) & 0x7; | 240 | part_number = (cpuid >> 13) & 0x7; |
238 | switch (part_number) { | 241 | switch (part_number) { |
239 | case 1: | 242 | case 1: |
240 | pmu = xscale1pmu_init(); | 243 | ret = xscale1pmu_init(pmu); |
241 | break; | 244 | break; |
242 | case 2: | 245 | case 2: |
243 | pmu = xscale2pmu_init(); | 246 | ret = xscale2pmu_init(pmu); |
244 | break; | 247 | break; |
245 | } | 248 | } |
246 | } | 249 | } |
247 | 250 | ||
248 | put_cpu(); | 251 | put_cpu(); |
249 | return pmu; | 252 | return ret; |
250 | } | 253 | } |
251 | 254 | ||
252 | static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) | 255 | static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) |
253 | { | 256 | { |
254 | const struct of_device_id *of_id; | 257 | const struct of_device_id *of_id; |
255 | struct arm_pmu *(*init_fn)(void); | 258 | int (*init_fn)(struct arm_pmu *); |
256 | struct device_node *node = pdev->dev.of_node; | 259 | struct device_node *node = pdev->dev.of_node; |
260 | struct arm_pmu *pmu; | ||
261 | int ret = -ENODEV; | ||
257 | 262 | ||
258 | if (cpu_pmu) { | 263 | if (cpu_pmu) { |
259 | pr_info("attempt to register multiple PMU devices!"); | 264 | pr_info("attempt to register multiple PMU devices!"); |
260 | return -ENOSPC; | 265 | return -ENOSPC; |
261 | } | 266 | } |
262 | 267 | ||
268 | pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); | ||
269 | if (!pmu) { | ||
270 | pr_info("failed to allocate PMU device!"); | ||
271 | return -ENOMEM; | ||
272 | } | ||
273 | |||
263 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { | 274 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { |
264 | init_fn = of_id->data; | 275 | init_fn = of_id->data; |
265 | cpu_pmu = init_fn(); | 276 | ret = init_fn(pmu); |
266 | } else { | 277 | } else { |
267 | cpu_pmu = probe_current_pmu(); | 278 | ret = probe_current_pmu(pmu); |
268 | } | 279 | } |
269 | 280 | ||
270 | if (!cpu_pmu) | 281 | if (ret) { |
271 | return -ENODEV; | 282 | pr_info("failed to register PMU devices!"); |
283 | kfree(pmu); | ||
284 | return ret; | ||
285 | } | ||
272 | 286 | ||
287 | cpu_pmu = pmu; | ||
273 | cpu_pmu->plat_device = pdev; | 288 | cpu_pmu->plat_device = pdev; |
274 | cpu_pmu_init(cpu_pmu); | 289 | cpu_pmu_init(cpu_pmu); |
275 | register_cpu_notifier(&cpu_pmu_hotplug_notifier); | 290 | armpmu_register(cpu_pmu, PERF_TYPE_RAW); |
276 | armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); | ||
277 | 291 | ||
278 | return 0; | 292 | return 0; |
279 | } | 293 | } |
@@ -290,6 +304,16 @@ static struct platform_driver cpu_pmu_driver = { | |||
290 | 304 | ||
291 | static int __init register_pmu_driver(void) | 305 | static int __init register_pmu_driver(void) |
292 | { | 306 | { |
293 | return platform_driver_register(&cpu_pmu_driver); | 307 | int err; |
308 | |||
309 | err = register_cpu_notifier(&cpu_pmu_hotplug_notifier); | ||
310 | if (err) | ||
311 | return err; | ||
312 | |||
313 | err = platform_driver_register(&cpu_pmu_driver); | ||
314 | if (err) | ||
315 | unregister_cpu_notifier(&cpu_pmu_hotplug_notifier); | ||
316 | |||
317 | return err; | ||
294 | } | 318 | } |
295 | device_initcall(register_pmu_driver); | 319 | device_initcall(register_pmu_driver); |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 6ccc07971745..f3e22ff8b6a2 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | |||
401 | return ret; | 401 | return ret; |
402 | } | 402 | } |
403 | 403 | ||
404 | static inline u32 | 404 | static inline u32 armv6pmu_read_counter(struct perf_event *event) |
405 | armv6pmu_read_counter(int counter) | ||
406 | { | 405 | { |
406 | struct hw_perf_event *hwc = &event->hw; | ||
407 | int counter = hwc->idx; | ||
407 | unsigned long value = 0; | 408 | unsigned long value = 0; |
408 | 409 | ||
409 | if (ARMV6_CYCLE_COUNTER == counter) | 410 | if (ARMV6_CYCLE_COUNTER == counter) |
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter) | |||
418 | return value; | 419 | return value; |
419 | } | 420 | } |
420 | 421 | ||
421 | static inline void | 422 | static inline void armv6pmu_write_counter(struct perf_event *event, u32 value) |
422 | armv6pmu_write_counter(int counter, | ||
423 | u32 value) | ||
424 | { | 423 | { |
424 | struct hw_perf_event *hwc = &event->hw; | ||
425 | int counter = hwc->idx; | ||
426 | |||
425 | if (ARMV6_CYCLE_COUNTER == counter) | 427 | if (ARMV6_CYCLE_COUNTER == counter) |
426 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | 428 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); |
427 | else if (ARMV6_COUNTER0 == counter) | 429 | else if (ARMV6_COUNTER0 == counter) |
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter, | |||
432 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | 434 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); |
433 | } | 435 | } |
434 | 436 | ||
435 | static void | 437 | static void armv6pmu_enable_event(struct perf_event *event) |
436 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
437 | int idx) | ||
438 | { | 438 | { |
439 | unsigned long val, mask, evt, flags; | 439 | unsigned long val, mask, evt, flags; |
440 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
441 | struct hw_perf_event *hwc = &event->hw; | ||
440 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 442 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
443 | int idx = hwc->idx; | ||
441 | 444 | ||
442 | if (ARMV6_CYCLE_COUNTER == idx) { | 445 | if (ARMV6_CYCLE_COUNTER == idx) { |
443 | mask = 0; | 446 | mask = 0; |
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num, | |||
473 | { | 476 | { |
474 | unsigned long pmcr = armv6_pmcr_read(); | 477 | unsigned long pmcr = armv6_pmcr_read(); |
475 | struct perf_sample_data data; | 478 | struct perf_sample_data data; |
476 | struct pmu_hw_events *cpuc; | 479 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
480 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
477 | struct pt_regs *regs; | 481 | struct pt_regs *regs; |
478 | int idx; | 482 | int idx; |
479 | 483 | ||
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num, | |||
489 | */ | 493 | */ |
490 | armv6_pmcr_write(pmcr); | 494 | armv6_pmcr_write(pmcr); |
491 | 495 | ||
492 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
493 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 496 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
494 | struct perf_event *event = cpuc->events[idx]; | 497 | struct perf_event *event = cpuc->events[idx]; |
495 | struct hw_perf_event *hwc; | 498 | struct hw_perf_event *hwc; |
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num, | |||
506 | continue; | 509 | continue; |
507 | 510 | ||
508 | hwc = &event->hw; | 511 | hwc = &event->hw; |
509 | armpmu_event_update(event, hwc, idx); | 512 | armpmu_event_update(event); |
510 | perf_sample_data_init(&data, 0, hwc->last_period); | 513 | perf_sample_data_init(&data, 0, hwc->last_period); |
511 | if (!armpmu_event_set_period(event, hwc, idx)) | 514 | if (!armpmu_event_set_period(event)) |
512 | continue; | 515 | continue; |
513 | 516 | ||
514 | if (perf_event_overflow(event, &data, regs)) | 517 | if (perf_event_overflow(event, &data, regs)) |
515 | cpu_pmu->disable(hwc, idx); | 518 | cpu_pmu->disable(event); |
516 | } | 519 | } |
517 | 520 | ||
518 | /* | 521 | /* |
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num, | |||
527 | return IRQ_HANDLED; | 530 | return IRQ_HANDLED; |
528 | } | 531 | } |
529 | 532 | ||
530 | static void | 533 | static void armv6pmu_start(struct arm_pmu *cpu_pmu) |
531 | armv6pmu_start(void) | ||
532 | { | 534 | { |
533 | unsigned long flags, val; | 535 | unsigned long flags, val; |
534 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 536 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -540,8 +542,7 @@ armv6pmu_start(void) | |||
540 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 542 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
541 | } | 543 | } |
542 | 544 | ||
543 | static void | 545 | static void armv6pmu_stop(struct arm_pmu *cpu_pmu) |
544 | armv6pmu_stop(void) | ||
545 | { | 546 | { |
546 | unsigned long flags, val; | 547 | unsigned long flags, val; |
547 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 548 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -555,10 +556,11 @@ armv6pmu_stop(void) | |||
555 | 556 | ||
556 | static int | 557 | static int |
557 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, | 558 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
558 | struct hw_perf_event *event) | 559 | struct perf_event *event) |
559 | { | 560 | { |
561 | struct hw_perf_event *hwc = &event->hw; | ||
560 | /* Always place a cycle counter into the cycle counter. */ | 562 | /* Always place a cycle counter into the cycle counter. */ |
561 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | 563 | if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { |
562 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | 564 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) |
563 | return -EAGAIN; | 565 | return -EAGAIN; |
564 | 566 | ||
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
579 | } | 581 | } |
580 | } | 582 | } |
581 | 583 | ||
582 | static void | 584 | static void armv6pmu_disable_event(struct perf_event *event) |
583 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
584 | int idx) | ||
585 | { | 585 | { |
586 | unsigned long val, mask, evt, flags; | 586 | unsigned long val, mask, evt, flags; |
587 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
588 | struct hw_perf_event *hwc = &event->hw; | ||
587 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 589 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
590 | int idx = hwc->idx; | ||
588 | 591 | ||
589 | if (ARMV6_CYCLE_COUNTER == idx) { | 592 | if (ARMV6_CYCLE_COUNTER == idx) { |
590 | mask = ARMV6_PMCR_CCOUNT_IEN; | 593 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
613 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 616 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
614 | } | 617 | } |
615 | 618 | ||
616 | static void | 619 | static void armv6mpcore_pmu_disable_event(struct perf_event *event) |
617 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
618 | int idx) | ||
619 | { | 620 | { |
620 | unsigned long val, mask, flags, evt = 0; | 621 | unsigned long val, mask, flags, evt = 0; |
622 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
623 | struct hw_perf_event *hwc = &event->hw; | ||
621 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 624 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
625 | int idx = hwc->idx; | ||
622 | 626 | ||
623 | if (ARMV6_CYCLE_COUNTER == idx) { | 627 | if (ARMV6_CYCLE_COUNTER == idx) { |
624 | mask = ARMV6_PMCR_CCOUNT_IEN; | 628 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event) | |||
649 | &armv6_perf_cache_map, 0xFF); | 653 | &armv6_perf_cache_map, 0xFF); |
650 | } | 654 | } |
651 | 655 | ||
652 | static struct arm_pmu armv6pmu = { | 656 | static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu) |
653 | .name = "v6", | ||
654 | .handle_irq = armv6pmu_handle_irq, | ||
655 | .enable = armv6pmu_enable_event, | ||
656 | .disable = armv6pmu_disable_event, | ||
657 | .read_counter = armv6pmu_read_counter, | ||
658 | .write_counter = armv6pmu_write_counter, | ||
659 | .get_event_idx = armv6pmu_get_event_idx, | ||
660 | .start = armv6pmu_start, | ||
661 | .stop = armv6pmu_stop, | ||
662 | .map_event = armv6_map_event, | ||
663 | .num_events = 3, | ||
664 | .max_period = (1LLU << 32) - 1, | ||
665 | }; | ||
666 | |||
667 | static struct arm_pmu *__devinit armv6pmu_init(void) | ||
668 | { | 657 | { |
669 | return &armv6pmu; | 658 | cpu_pmu->name = "v6"; |
659 | cpu_pmu->handle_irq = armv6pmu_handle_irq; | ||
660 | cpu_pmu->enable = armv6pmu_enable_event; | ||
661 | cpu_pmu->disable = armv6pmu_disable_event; | ||
662 | cpu_pmu->read_counter = armv6pmu_read_counter; | ||
663 | cpu_pmu->write_counter = armv6pmu_write_counter; | ||
664 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | ||
665 | cpu_pmu->start = armv6pmu_start; | ||
666 | cpu_pmu->stop = armv6pmu_stop; | ||
667 | cpu_pmu->map_event = armv6_map_event; | ||
668 | cpu_pmu->num_events = 3; | ||
669 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
670 | |||
671 | return 0; | ||
670 | } | 672 | } |
671 | 673 | ||
672 | /* | 674 | /* |
@@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event) | |||
683 | &armv6mpcore_perf_cache_map, 0xFF); | 685 | &armv6mpcore_perf_cache_map, 0xFF); |
684 | } | 686 | } |
685 | 687 | ||
686 | static struct arm_pmu armv6mpcore_pmu = { | 688 | static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
687 | .name = "v6mpcore", | ||
688 | .handle_irq = armv6pmu_handle_irq, | ||
689 | .enable = armv6pmu_enable_event, | ||
690 | .disable = armv6mpcore_pmu_disable_event, | ||
691 | .read_counter = armv6pmu_read_counter, | ||
692 | .write_counter = armv6pmu_write_counter, | ||
693 | .get_event_idx = armv6pmu_get_event_idx, | ||
694 | .start = armv6pmu_start, | ||
695 | .stop = armv6pmu_stop, | ||
696 | .map_event = armv6mpcore_map_event, | ||
697 | .num_events = 3, | ||
698 | .max_period = (1LLU << 32) - 1, | ||
699 | }; | ||
700 | |||
701 | static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) | ||
702 | { | 689 | { |
703 | return &armv6mpcore_pmu; | 690 | cpu_pmu->name = "v6mpcore"; |
691 | cpu_pmu->handle_irq = armv6pmu_handle_irq; | ||
692 | cpu_pmu->enable = armv6pmu_enable_event; | ||
693 | cpu_pmu->disable = armv6mpcore_pmu_disable_event; | ||
694 | cpu_pmu->read_counter = armv6pmu_read_counter; | ||
695 | cpu_pmu->write_counter = armv6pmu_write_counter; | ||
696 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | ||
697 | cpu_pmu->start = armv6pmu_start; | ||
698 | cpu_pmu->stop = armv6pmu_stop; | ||
699 | cpu_pmu->map_event = armv6mpcore_map_event; | ||
700 | cpu_pmu->num_events = 3; | ||
701 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
702 | |||
703 | return 0; | ||
704 | } | 704 | } |
705 | #else | 705 | #else |
706 | static struct arm_pmu *__devinit armv6pmu_init(void) | 706 | static int armv6pmu_init(struct arm_pmu *cpu_pmu) |
707 | { | 707 | { |
708 | return NULL; | 708 | return -ENODEV; |
709 | } | 709 | } |
710 | 710 | ||
711 | static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) | 711 | static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
712 | { | 712 | { |
713 | return NULL; | 713 | return -ENODEV; |
714 | } | 714 | } |
715 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ | 715 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index bd4b090ebcfd..7d0cce85d17e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -18,8 +18,6 @@ | |||
18 | 18 | ||
19 | #ifdef CONFIG_CPU_V7 | 19 | #ifdef CONFIG_CPU_V7 |
20 | 20 | ||
21 | static struct arm_pmu armv7pmu; | ||
22 | |||
23 | /* | 21 | /* |
24 | * Common ARMv7 event types | 22 | * Common ARMv7 event types |
25 | * | 23 | * |
@@ -738,7 +736,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
738 | */ | 736 | */ |
739 | #define ARMV7_IDX_CYCLE_COUNTER 0 | 737 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
740 | #define ARMV7_IDX_COUNTER0 1 | 738 | #define ARMV7_IDX_COUNTER0 1 |
741 | #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | 739 | #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ |
740 | (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | ||
742 | 741 | ||
743 | #define ARMV7_MAX_COUNTERS 32 | 742 | #define ARMV7_MAX_COUNTERS 32 |
744 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) | 743 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) |
@@ -804,49 +803,34 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc) | |||
804 | return pmnc & ARMV7_OVERFLOWED_MASK; | 803 | return pmnc & ARMV7_OVERFLOWED_MASK; |
805 | } | 804 | } |
806 | 805 | ||
807 | static inline int armv7_pmnc_counter_valid(int idx) | 806 | static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) |
808 | { | 807 | { |
809 | return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; | 808 | return idx >= ARMV7_IDX_CYCLE_COUNTER && |
809 | idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); | ||
810 | } | 810 | } |
811 | 811 | ||
812 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) | 812 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) |
813 | { | 813 | { |
814 | int ret = 0; | 814 | return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); |
815 | u32 counter; | ||
816 | |||
817 | if (!armv7_pmnc_counter_valid(idx)) { | ||
818 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
819 | smp_processor_id(), idx); | ||
820 | } else { | ||
821 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
822 | ret = pmnc & BIT(counter); | ||
823 | } | ||
824 | |||
825 | return ret; | ||
826 | } | 815 | } |
827 | 816 | ||
828 | static inline int armv7_pmnc_select_counter(int idx) | 817 | static inline int armv7_pmnc_select_counter(int idx) |
829 | { | 818 | { |
830 | u32 counter; | 819 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
831 | |||
832 | if (!armv7_pmnc_counter_valid(idx)) { | ||
833 | pr_err("CPU%u selecting wrong PMNC counter %d\n", | ||
834 | smp_processor_id(), idx); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | |||
838 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
839 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); | 820 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); |
840 | isb(); | 821 | isb(); |
841 | 822 | ||
842 | return idx; | 823 | return idx; |
843 | } | 824 | } |
844 | 825 | ||
845 | static inline u32 armv7pmu_read_counter(int idx) | 826 | static inline u32 armv7pmu_read_counter(struct perf_event *event) |
846 | { | 827 | { |
828 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
829 | struct hw_perf_event *hwc = &event->hw; | ||
830 | int idx = hwc->idx; | ||
847 | u32 value = 0; | 831 | u32 value = 0; |
848 | 832 | ||
849 | if (!armv7_pmnc_counter_valid(idx)) | 833 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) |
850 | pr_err("CPU%u reading wrong counter %d\n", | 834 | pr_err("CPU%u reading wrong counter %d\n", |
851 | smp_processor_id(), idx); | 835 | smp_processor_id(), idx); |
852 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | 836 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) |
@@ -857,9 +841,13 @@ static inline u32 armv7pmu_read_counter(int idx) | |||
857 | return value; | 841 | return value; |
858 | } | 842 | } |
859 | 843 | ||
860 | static inline void armv7pmu_write_counter(int idx, u32 value) | 844 | static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) |
861 | { | 845 | { |
862 | if (!armv7_pmnc_counter_valid(idx)) | 846 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
847 | struct hw_perf_event *hwc = &event->hw; | ||
848 | int idx = hwc->idx; | ||
849 | |||
850 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) | ||
863 | pr_err("CPU%u writing wrong counter %d\n", | 851 | pr_err("CPU%u writing wrong counter %d\n", |
864 | smp_processor_id(), idx); | 852 | smp_processor_id(), idx); |
865 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | 853 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) |
@@ -878,60 +866,28 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val) | |||
878 | 866 | ||
879 | static inline int armv7_pmnc_enable_counter(int idx) | 867 | static inline int armv7_pmnc_enable_counter(int idx) |
880 | { | 868 | { |
881 | u32 counter; | 869 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
882 | |||
883 | if (!armv7_pmnc_counter_valid(idx)) { | ||
884 | pr_err("CPU%u enabling wrong PMNC counter %d\n", | ||
885 | smp_processor_id(), idx); | ||
886 | return -EINVAL; | ||
887 | } | ||
888 | |||
889 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
890 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); | 870 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); |
891 | return idx; | 871 | return idx; |
892 | } | 872 | } |
893 | 873 | ||
894 | static inline int armv7_pmnc_disable_counter(int idx) | 874 | static inline int armv7_pmnc_disable_counter(int idx) |
895 | { | 875 | { |
896 | u32 counter; | 876 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
897 | |||
898 | if (!armv7_pmnc_counter_valid(idx)) { | ||
899 | pr_err("CPU%u disabling wrong PMNC counter %d\n", | ||
900 | smp_processor_id(), idx); | ||
901 | return -EINVAL; | ||
902 | } | ||
903 | |||
904 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
905 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); | 877 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); |
906 | return idx; | 878 | return idx; |
907 | } | 879 | } |
908 | 880 | ||
909 | static inline int armv7_pmnc_enable_intens(int idx) | 881 | static inline int armv7_pmnc_enable_intens(int idx) |
910 | { | 882 | { |
911 | u32 counter; | 883 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
912 | |||
913 | if (!armv7_pmnc_counter_valid(idx)) { | ||
914 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | ||
915 | smp_processor_id(), idx); | ||
916 | return -EINVAL; | ||
917 | } | ||
918 | |||
919 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
920 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); | 884 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); |
921 | return idx; | 885 | return idx; |
922 | } | 886 | } |
923 | 887 | ||
924 | static inline int armv7_pmnc_disable_intens(int idx) | 888 | static inline int armv7_pmnc_disable_intens(int idx) |
925 | { | 889 | { |
926 | u32 counter; | 890 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
927 | |||
928 | if (!armv7_pmnc_counter_valid(idx)) { | ||
929 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | ||
930 | smp_processor_id(), idx); | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | |||
934 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
935 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); | 891 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); |
936 | isb(); | 892 | isb(); |
937 | /* Clear the overflow flag in case an interrupt is pending. */ | 893 | /* Clear the overflow flag in case an interrupt is pending. */ |
@@ -956,7 +912,7 @@ static inline u32 armv7_pmnc_getreset_flags(void) | |||
956 | } | 912 | } |
957 | 913 | ||
958 | #ifdef DEBUG | 914 | #ifdef DEBUG |
959 | static void armv7_pmnc_dump_regs(void) | 915 | static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) |
960 | { | 916 | { |
961 | u32 val; | 917 | u32 val; |
962 | unsigned int cnt; | 918 | unsigned int cnt; |
@@ -981,7 +937,8 @@ static void armv7_pmnc_dump_regs(void) | |||
981 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | 937 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); |
982 | printk(KERN_INFO "CCNT =0x%08x\n", val); | 938 | printk(KERN_INFO "CCNT =0x%08x\n", val); |
983 | 939 | ||
984 | for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { | 940 | for (cnt = ARMV7_IDX_COUNTER0; |
941 | cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { | ||
985 | armv7_pmnc_select_counter(cnt); | 942 | armv7_pmnc_select_counter(cnt); |
986 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | 943 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); |
987 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | 944 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", |
@@ -993,10 +950,19 @@ static void armv7_pmnc_dump_regs(void) | |||
993 | } | 950 | } |
994 | #endif | 951 | #endif |
995 | 952 | ||
996 | static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | 953 | static void armv7pmu_enable_event(struct perf_event *event) |
997 | { | 954 | { |
998 | unsigned long flags; | 955 | unsigned long flags; |
956 | struct hw_perf_event *hwc = &event->hw; | ||
957 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
999 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 958 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
959 | int idx = hwc->idx; | ||
960 | |||
961 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { | ||
962 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | ||
963 | smp_processor_id(), idx); | ||
964 | return; | ||
965 | } | ||
1000 | 966 | ||
1001 | /* | 967 | /* |
1002 | * Enable counter and interrupt, and set the counter to count | 968 | * Enable counter and interrupt, and set the counter to count |
@@ -1014,7 +980,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1014 | * We only need to set the event for the cycle counter if we | 980 | * We only need to set the event for the cycle counter if we |
1015 | * have the ability to perform event filtering. | 981 | * have the ability to perform event filtering. |
1016 | */ | 982 | */ |
1017 | if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) | 983 | if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) |
1018 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | 984 | armv7_pmnc_write_evtsel(idx, hwc->config_base); |
1019 | 985 | ||
1020 | /* | 986 | /* |
@@ -1030,10 +996,19 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1030 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 996 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1031 | } | 997 | } |
1032 | 998 | ||
1033 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | 999 | static void armv7pmu_disable_event(struct perf_event *event) |
1034 | { | 1000 | { |
1035 | unsigned long flags; | 1001 | unsigned long flags; |
1002 | struct hw_perf_event *hwc = &event->hw; | ||
1003 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
1036 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1004 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
1005 | int idx = hwc->idx; | ||
1006 | |||
1007 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { | ||
1008 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | ||
1009 | smp_processor_id(), idx); | ||
1010 | return; | ||
1011 | } | ||
1037 | 1012 | ||
1038 | /* | 1013 | /* |
1039 | * Disable counter and interrupt | 1014 | * Disable counter and interrupt |
@@ -1057,7 +1032,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1057 | { | 1032 | { |
1058 | u32 pmnc; | 1033 | u32 pmnc; |
1059 | struct perf_sample_data data; | 1034 | struct perf_sample_data data; |
1060 | struct pmu_hw_events *cpuc; | 1035 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
1036 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
1061 | struct pt_regs *regs; | 1037 | struct pt_regs *regs; |
1062 | int idx; | 1038 | int idx; |
1063 | 1039 | ||
@@ -1077,7 +1053,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1077 | */ | 1053 | */ |
1078 | regs = get_irq_regs(); | 1054 | regs = get_irq_regs(); |
1079 | 1055 | ||
1080 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
1081 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 1056 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1082 | struct perf_event *event = cpuc->events[idx]; | 1057 | struct perf_event *event = cpuc->events[idx]; |
1083 | struct hw_perf_event *hwc; | 1058 | struct hw_perf_event *hwc; |
@@ -1094,13 +1069,13 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1094 | continue; | 1069 | continue; |
1095 | 1070 | ||
1096 | hwc = &event->hw; | 1071 | hwc = &event->hw; |
1097 | armpmu_event_update(event, hwc, idx); | 1072 | armpmu_event_update(event); |
1098 | perf_sample_data_init(&data, 0, hwc->last_period); | 1073 | perf_sample_data_init(&data, 0, hwc->last_period); |
1099 | if (!armpmu_event_set_period(event, hwc, idx)) | 1074 | if (!armpmu_event_set_period(event)) |
1100 | continue; | 1075 | continue; |
1101 | 1076 | ||
1102 | if (perf_event_overflow(event, &data, regs)) | 1077 | if (perf_event_overflow(event, &data, regs)) |
1103 | cpu_pmu->disable(hwc, idx); | 1078 | cpu_pmu->disable(event); |
1104 | } | 1079 | } |
1105 | 1080 | ||
1106 | /* | 1081 | /* |
@@ -1115,7 +1090,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1115 | return IRQ_HANDLED; | 1090 | return IRQ_HANDLED; |
1116 | } | 1091 | } |
1117 | 1092 | ||
1118 | static void armv7pmu_start(void) | 1093 | static void armv7pmu_start(struct arm_pmu *cpu_pmu) |
1119 | { | 1094 | { |
1120 | unsigned long flags; | 1095 | unsigned long flags; |
1121 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1096 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -1126,7 +1101,7 @@ static void armv7pmu_start(void) | |||
1126 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 1101 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1127 | } | 1102 | } |
1128 | 1103 | ||
1129 | static void armv7pmu_stop(void) | 1104 | static void armv7pmu_stop(struct arm_pmu *cpu_pmu) |
1130 | { | 1105 | { |
1131 | unsigned long flags; | 1106 | unsigned long flags; |
1132 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 1107 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -1138,10 +1113,12 @@ static void armv7pmu_stop(void) | |||
1138 | } | 1113 | } |
1139 | 1114 | ||
1140 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, | 1115 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, |
1141 | struct hw_perf_event *event) | 1116 | struct perf_event *event) |
1142 | { | 1117 | { |
1143 | int idx; | 1118 | int idx; |
1144 | unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; | 1119 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
1120 | struct hw_perf_event *hwc = &event->hw; | ||
1121 | unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT; | ||
1145 | 1122 | ||
1146 | /* Always place a cycle counter into the cycle counter. */ | 1123 | /* Always place a cycle counter into the cycle counter. */ |
1147 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { | 1124 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { |
@@ -1192,11 +1169,14 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, | |||
1192 | 1169 | ||
1193 | static void armv7pmu_reset(void *info) | 1170 | static void armv7pmu_reset(void *info) |
1194 | { | 1171 | { |
1172 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; | ||
1195 | u32 idx, nb_cnt = cpu_pmu->num_events; | 1173 | u32 idx, nb_cnt = cpu_pmu->num_events; |
1196 | 1174 | ||
1197 | /* The counter and interrupt enable registers are unknown at reset. */ | 1175 | /* The counter and interrupt enable registers are unknown at reset. */ |
1198 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) | 1176 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { |
1199 | armv7pmu_disable_event(NULL, idx); | 1177 | armv7_pmnc_disable_counter(idx); |
1178 | armv7_pmnc_disable_intens(idx); | ||
1179 | } | ||
1200 | 1180 | ||
1201 | /* Initialize & Reset PMNC: C and P bits */ | 1181 | /* Initialize & Reset PMNC: C and P bits */ |
1202 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | 1182 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); |
@@ -1232,17 +1212,18 @@ static int armv7_a7_map_event(struct perf_event *event) | |||
1232 | &armv7_a7_perf_cache_map, 0xFF); | 1212 | &armv7_a7_perf_cache_map, 0xFF); |
1233 | } | 1213 | } |
1234 | 1214 | ||
1235 | static struct arm_pmu armv7pmu = { | 1215 | static void armv7pmu_init(struct arm_pmu *cpu_pmu) |
1236 | .handle_irq = armv7pmu_handle_irq, | 1216 | { |
1237 | .enable = armv7pmu_enable_event, | 1217 | cpu_pmu->handle_irq = armv7pmu_handle_irq; |
1238 | .disable = armv7pmu_disable_event, | 1218 | cpu_pmu->enable = armv7pmu_enable_event; |
1239 | .read_counter = armv7pmu_read_counter, | 1219 | cpu_pmu->disable = armv7pmu_disable_event; |
1240 | .write_counter = armv7pmu_write_counter, | 1220 | cpu_pmu->read_counter = armv7pmu_read_counter; |
1241 | .get_event_idx = armv7pmu_get_event_idx, | 1221 | cpu_pmu->write_counter = armv7pmu_write_counter; |
1242 | .start = armv7pmu_start, | 1222 | cpu_pmu->get_event_idx = armv7pmu_get_event_idx; |
1243 | .stop = armv7pmu_stop, | 1223 | cpu_pmu->start = armv7pmu_start; |
1244 | .reset = armv7pmu_reset, | 1224 | cpu_pmu->stop = armv7pmu_stop; |
1245 | .max_period = (1LLU << 32) - 1, | 1225 | cpu_pmu->reset = armv7pmu_reset; |
1226 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
1246 | }; | 1227 | }; |
1247 | 1228 | ||
1248 | static u32 __devinit armv7_read_num_pmnc_events(void) | 1229 | static u32 __devinit armv7_read_num_pmnc_events(void) |
@@ -1256,70 +1237,75 @@ static u32 __devinit armv7_read_num_pmnc_events(void) | |||
1256 | return nb_cnt + 1; | 1237 | return nb_cnt + 1; |
1257 | } | 1238 | } |
1258 | 1239 | ||
1259 | static struct arm_pmu *__devinit armv7_a8_pmu_init(void) | 1240 | static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
1260 | { | 1241 | { |
1261 | armv7pmu.name = "ARMv7 Cortex-A8"; | 1242 | armv7pmu_init(cpu_pmu); |
1262 | armv7pmu.map_event = armv7_a8_map_event; | 1243 | cpu_pmu->name = "ARMv7 Cortex-A8"; |
1263 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1244 | cpu_pmu->map_event = armv7_a8_map_event; |
1264 | return &armv7pmu; | 1245 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1246 | return 0; | ||
1265 | } | 1247 | } |
1266 | 1248 | ||
1267 | static struct arm_pmu *__devinit armv7_a9_pmu_init(void) | 1249 | static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) |
1268 | { | 1250 | { |
1269 | armv7pmu.name = "ARMv7 Cortex-A9"; | 1251 | armv7pmu_init(cpu_pmu); |
1270 | armv7pmu.map_event = armv7_a9_map_event; | 1252 | cpu_pmu->name = "ARMv7 Cortex-A9"; |
1271 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1253 | cpu_pmu->map_event = armv7_a9_map_event; |
1272 | return &armv7pmu; | 1254 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1255 | return 0; | ||
1273 | } | 1256 | } |
1274 | 1257 | ||
1275 | static struct arm_pmu *__devinit armv7_a5_pmu_init(void) | 1258 | static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) |
1276 | { | 1259 | { |
1277 | armv7pmu.name = "ARMv7 Cortex-A5"; | 1260 | armv7pmu_init(cpu_pmu); |
1278 | armv7pmu.map_event = armv7_a5_map_event; | 1261 | cpu_pmu->name = "ARMv7 Cortex-A5"; |
1279 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1262 | cpu_pmu->map_event = armv7_a5_map_event; |
1280 | return &armv7pmu; | 1263 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1264 | return 0; | ||
1281 | } | 1265 | } |
1282 | 1266 | ||
1283 | static struct arm_pmu *__devinit armv7_a15_pmu_init(void) | 1267 | static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) |
1284 | { | 1268 | { |
1285 | armv7pmu.name = "ARMv7 Cortex-A15"; | 1269 | armv7pmu_init(cpu_pmu); |
1286 | armv7pmu.map_event = armv7_a15_map_event; | 1270 | cpu_pmu->name = "ARMv7 Cortex-A15"; |
1287 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1271 | cpu_pmu->map_event = armv7_a15_map_event; |
1288 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | 1272 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1289 | return &armv7pmu; | 1273 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
1274 | return 0; | ||
1290 | } | 1275 | } |
1291 | 1276 | ||
1292 | static struct arm_pmu *__devinit armv7_a7_pmu_init(void) | 1277 | static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) |
1293 | { | 1278 | { |
1294 | armv7pmu.name = "ARMv7 Cortex-A7"; | 1279 | armv7pmu_init(cpu_pmu); |
1295 | armv7pmu.map_event = armv7_a7_map_event; | 1280 | cpu_pmu->name = "ARMv7 Cortex-A7"; |
1296 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1281 | cpu_pmu->map_event = armv7_a7_map_event; |
1297 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | 1282 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); |
1298 | return &armv7pmu; | 1283 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; |
1284 | return 0; | ||
1299 | } | 1285 | } |
1300 | #else | 1286 | #else |
1301 | static struct arm_pmu *__devinit armv7_a8_pmu_init(void) | 1287 | static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
1302 | { | 1288 | { |
1303 | return NULL; | 1289 | return -ENODEV; |
1304 | } | 1290 | } |
1305 | 1291 | ||
1306 | static struct arm_pmu *__devinit armv7_a9_pmu_init(void) | 1292 | static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) |
1307 | { | 1293 | { |
1308 | return NULL; | 1294 | return -ENODEV; |
1309 | } | 1295 | } |
1310 | 1296 | ||
1311 | static struct arm_pmu *__devinit armv7_a5_pmu_init(void) | 1297 | static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) |
1312 | { | 1298 | { |
1313 | return NULL; | 1299 | return -ENODEV; |
1314 | } | 1300 | } |
1315 | 1301 | ||
1316 | static struct arm_pmu *__devinit armv7_a15_pmu_init(void) | 1302 | static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) |
1317 | { | 1303 | { |
1318 | return NULL; | 1304 | return -ENODEV; |
1319 | } | 1305 | } |
1320 | 1306 | ||
1321 | static struct arm_pmu *__devinit armv7_a7_pmu_init(void) | 1307 | static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) |
1322 | { | 1308 | { |
1323 | return NULL; | 1309 | return -ENODEV; |
1324 | } | 1310 | } |
1325 | #endif /* CONFIG_CPU_V7 */ | 1311 | #endif /* CONFIG_CPU_V7 */ |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 426e19f380a2..0c8265e53d5f 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
224 | { | 224 | { |
225 | unsigned long pmnc; | 225 | unsigned long pmnc; |
226 | struct perf_sample_data data; | 226 | struct perf_sample_data data; |
227 | struct pmu_hw_events *cpuc; | 227 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
228 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
228 | struct pt_regs *regs; | 229 | struct pt_regs *regs; |
229 | int idx; | 230 | int idx; |
230 | 231 | ||
@@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
248 | 249 | ||
249 | regs = get_irq_regs(); | 250 | regs = get_irq_regs(); |
250 | 251 | ||
251 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
253 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
254 | struct hw_perf_event *hwc; | 254 | struct hw_perf_event *hwc; |
@@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
260 | continue; | 260 | continue; |
261 | 261 | ||
262 | hwc = &event->hw; | 262 | hwc = &event->hw; |
263 | armpmu_event_update(event, hwc, idx); | 263 | armpmu_event_update(event); |
264 | perf_sample_data_init(&data, 0, hwc->last_period); | 264 | perf_sample_data_init(&data, 0, hwc->last_period); |
265 | if (!armpmu_event_set_period(event, hwc, idx)) | 265 | if (!armpmu_event_set_period(event)) |
266 | continue; | 266 | continue; |
267 | 267 | ||
268 | if (perf_event_overflow(event, &data, regs)) | 268 | if (perf_event_overflow(event, &data, regs)) |
269 | cpu_pmu->disable(hwc, idx); | 269 | cpu_pmu->disable(event); |
270 | } | 270 | } |
271 | 271 | ||
272 | irq_work_run(); | 272 | irq_work_run(); |
@@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
280 | return IRQ_HANDLED; | 280 | return IRQ_HANDLED; |
281 | } | 281 | } |
282 | 282 | ||
283 | static void | 283 | static void xscale1pmu_enable_event(struct perf_event *event) |
284 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
285 | { | 284 | { |
286 | unsigned long val, mask, evt, flags; | 285 | unsigned long val, mask, evt, flags; |
286 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
287 | struct hw_perf_event *hwc = &event->hw; | ||
287 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 288 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
289 | int idx = hwc->idx; | ||
288 | 290 | ||
289 | switch (idx) { | 291 | switch (idx) { |
290 | case XSCALE_CYCLE_COUNTER: | 292 | case XSCALE_CYCLE_COUNTER: |
@@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
314 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 316 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
315 | } | 317 | } |
316 | 318 | ||
317 | static void | 319 | static void xscale1pmu_disable_event(struct perf_event *event) |
318 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
319 | { | 320 | { |
320 | unsigned long val, mask, evt, flags; | 321 | unsigned long val, mask, evt, flags; |
322 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
323 | struct hw_perf_event *hwc = &event->hw; | ||
321 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 324 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
325 | int idx = hwc->idx; | ||
322 | 326 | ||
323 | switch (idx) { | 327 | switch (idx) { |
324 | case XSCALE_CYCLE_COUNTER: | 328 | case XSCALE_CYCLE_COUNTER: |
@@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
348 | 352 | ||
349 | static int | 353 | static int |
350 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, | 354 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
351 | struct hw_perf_event *event) | 355 | struct perf_event *event) |
352 | { | 356 | { |
353 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | 357 | struct hw_perf_event *hwc = &event->hw; |
358 | if (XSCALE_PERFCTR_CCNT == hwc->config_base) { | ||
354 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) | 359 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) |
355 | return -EAGAIN; | 360 | return -EAGAIN; |
356 | 361 | ||
@@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
366 | } | 371 | } |
367 | } | 372 | } |
368 | 373 | ||
369 | static void | 374 | static void xscale1pmu_start(struct arm_pmu *cpu_pmu) |
370 | xscale1pmu_start(void) | ||
371 | { | 375 | { |
372 | unsigned long flags, val; | 376 | unsigned long flags, val; |
373 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 377 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -379,8 +383,7 @@ xscale1pmu_start(void) | |||
379 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 383 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
380 | } | 384 | } |
381 | 385 | ||
382 | static void | 386 | static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) |
383 | xscale1pmu_stop(void) | ||
384 | { | 387 | { |
385 | unsigned long flags, val; | 388 | unsigned long flags, val; |
386 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 389 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -392,9 +395,10 @@ xscale1pmu_stop(void) | |||
392 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 395 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
393 | } | 396 | } |
394 | 397 | ||
395 | static inline u32 | 398 | static inline u32 xscale1pmu_read_counter(struct perf_event *event) |
396 | xscale1pmu_read_counter(int counter) | ||
397 | { | 399 | { |
400 | struct hw_perf_event *hwc = &event->hw; | ||
401 | int counter = hwc->idx; | ||
398 | u32 val = 0; | 402 | u32 val = 0; |
399 | 403 | ||
400 | switch (counter) { | 404 | switch (counter) { |
@@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter) | |||
412 | return val; | 416 | return val; |
413 | } | 417 | } |
414 | 418 | ||
415 | static inline void | 419 | static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) |
416 | xscale1pmu_write_counter(int counter, u32 val) | ||
417 | { | 420 | { |
421 | struct hw_perf_event *hwc = &event->hw; | ||
422 | int counter = hwc->idx; | ||
423 | |||
418 | switch (counter) { | 424 | switch (counter) { |
419 | case XSCALE_CYCLE_COUNTER: | 425 | case XSCALE_CYCLE_COUNTER: |
420 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); | 426 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); |
@@ -434,24 +440,22 @@ static int xscale_map_event(struct perf_event *event) | |||
434 | &xscale_perf_cache_map, 0xFF); | 440 | &xscale_perf_cache_map, 0xFF); |
435 | } | 441 | } |
436 | 442 | ||
437 | static struct arm_pmu xscale1pmu = { | 443 | static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu) |
438 | .name = "xscale1", | ||
439 | .handle_irq = xscale1pmu_handle_irq, | ||
440 | .enable = xscale1pmu_enable_event, | ||
441 | .disable = xscale1pmu_disable_event, | ||
442 | .read_counter = xscale1pmu_read_counter, | ||
443 | .write_counter = xscale1pmu_write_counter, | ||
444 | .get_event_idx = xscale1pmu_get_event_idx, | ||
445 | .start = xscale1pmu_start, | ||
446 | .stop = xscale1pmu_stop, | ||
447 | .map_event = xscale_map_event, | ||
448 | .num_events = 3, | ||
449 | .max_period = (1LLU << 32) - 1, | ||
450 | }; | ||
451 | |||
452 | static struct arm_pmu *__devinit xscale1pmu_init(void) | ||
453 | { | 444 | { |
454 | return &xscale1pmu; | 445 | cpu_pmu->name = "xscale1"; |
446 | cpu_pmu->handle_irq = xscale1pmu_handle_irq; | ||
447 | cpu_pmu->enable = xscale1pmu_enable_event; | ||
448 | cpu_pmu->disable = xscale1pmu_disable_event; | ||
449 | cpu_pmu->read_counter = xscale1pmu_read_counter; | ||
450 | cpu_pmu->write_counter = xscale1pmu_write_counter; | ||
451 | cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; | ||
452 | cpu_pmu->start = xscale1pmu_start; | ||
453 | cpu_pmu->stop = xscale1pmu_stop; | ||
454 | cpu_pmu->map_event = xscale_map_event; | ||
455 | cpu_pmu->num_events = 3; | ||
456 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
457 | |||
458 | return 0; | ||
455 | } | 459 | } |
456 | 460 | ||
457 | #define XSCALE2_OVERFLOWED_MASK 0x01f | 461 | #define XSCALE2_OVERFLOWED_MASK 0x01f |
@@ -567,7 +571,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
567 | { | 571 | { |
568 | unsigned long pmnc, of_flags; | 572 | unsigned long pmnc, of_flags; |
569 | struct perf_sample_data data; | 573 | struct perf_sample_data data; |
570 | struct pmu_hw_events *cpuc; | 574 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
575 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
571 | struct pt_regs *regs; | 576 | struct pt_regs *regs; |
572 | int idx; | 577 | int idx; |
573 | 578 | ||
@@ -585,7 +590,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
585 | 590 | ||
586 | regs = get_irq_regs(); | 591 | regs = get_irq_regs(); |
587 | 592 | ||
588 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
589 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 593 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
590 | struct perf_event *event = cpuc->events[idx]; | 594 | struct perf_event *event = cpuc->events[idx]; |
591 | struct hw_perf_event *hwc; | 595 | struct hw_perf_event *hwc; |
@@ -597,13 +601,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
597 | continue; | 601 | continue; |
598 | 602 | ||
599 | hwc = &event->hw; | 603 | hwc = &event->hw; |
600 | armpmu_event_update(event, hwc, idx); | 604 | armpmu_event_update(event); |
601 | perf_sample_data_init(&data, 0, hwc->last_period); | 605 | perf_sample_data_init(&data, 0, hwc->last_period); |
602 | if (!armpmu_event_set_period(event, hwc, idx)) | 606 | if (!armpmu_event_set_period(event)) |
603 | continue; | 607 | continue; |
604 | 608 | ||
605 | if (perf_event_overflow(event, &data, regs)) | 609 | if (perf_event_overflow(event, &data, regs)) |
606 | cpu_pmu->disable(hwc, idx); | 610 | cpu_pmu->disable(event); |
607 | } | 611 | } |
608 | 612 | ||
609 | irq_work_run(); | 613 | irq_work_run(); |
@@ -617,11 +621,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
617 | return IRQ_HANDLED; | 621 | return IRQ_HANDLED; |
618 | } | 622 | } |
619 | 623 | ||
620 | static void | 624 | static void xscale2pmu_enable_event(struct perf_event *event) |
621 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
622 | { | 625 | { |
623 | unsigned long flags, ien, evtsel; | 626 | unsigned long flags, ien, evtsel; |
627 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
628 | struct hw_perf_event *hwc = &event->hw; | ||
624 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 629 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
630 | int idx = hwc->idx; | ||
625 | 631 | ||
626 | ien = xscale2pmu_read_int_enable(); | 632 | ien = xscale2pmu_read_int_enable(); |
627 | evtsel = xscale2pmu_read_event_select(); | 633 | evtsel = xscale2pmu_read_event_select(); |
@@ -661,11 +667,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
661 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 667 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
662 | } | 668 | } |
663 | 669 | ||
664 | static void | 670 | static void xscale2pmu_disable_event(struct perf_event *event) |
665 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
666 | { | 671 | { |
667 | unsigned long flags, ien, evtsel, of_flags; | 672 | unsigned long flags, ien, evtsel, of_flags; |
673 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
674 | struct hw_perf_event *hwc = &event->hw; | ||
668 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 675 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
676 | int idx = hwc->idx; | ||
669 | 677 | ||
670 | ien = xscale2pmu_read_int_enable(); | 678 | ien = xscale2pmu_read_int_enable(); |
671 | evtsel = xscale2pmu_read_event_select(); | 679 | evtsel = xscale2pmu_read_event_select(); |
@@ -713,7 +721,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
713 | 721 | ||
714 | static int | 722 | static int |
715 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, | 723 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
716 | struct hw_perf_event *event) | 724 | struct perf_event *event) |
717 | { | 725 | { |
718 | int idx = xscale1pmu_get_event_idx(cpuc, event); | 726 | int idx = xscale1pmu_get_event_idx(cpuc, event); |
719 | if (idx >= 0) | 727 | if (idx >= 0) |
@@ -727,8 +735,7 @@ out: | |||
727 | return idx; | 735 | return idx; |
728 | } | 736 | } |
729 | 737 | ||
730 | static void | 738 | static void xscale2pmu_start(struct arm_pmu *cpu_pmu) |
731 | xscale2pmu_start(void) | ||
732 | { | 739 | { |
733 | unsigned long flags, val; | 740 | unsigned long flags, val; |
734 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 741 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -740,8 +747,7 @@ xscale2pmu_start(void) | |||
740 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 747 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
741 | } | 748 | } |
742 | 749 | ||
743 | static void | 750 | static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) |
744 | xscale2pmu_stop(void) | ||
745 | { | 751 | { |
746 | unsigned long flags, val; | 752 | unsigned long flags, val; |
747 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 753 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -753,9 +759,10 @@ xscale2pmu_stop(void) | |||
753 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 759 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
754 | } | 760 | } |
755 | 761 | ||
756 | static inline u32 | 762 | static inline u32 xscale2pmu_read_counter(struct perf_event *event) |
757 | xscale2pmu_read_counter(int counter) | ||
758 | { | 763 | { |
764 | struct hw_perf_event *hwc = &event->hw; | ||
765 | int counter = hwc->idx; | ||
759 | u32 val = 0; | 766 | u32 val = 0; |
760 | 767 | ||
761 | switch (counter) { | 768 | switch (counter) { |
@@ -779,9 +786,11 @@ xscale2pmu_read_counter(int counter) | |||
779 | return val; | 786 | return val; |
780 | } | 787 | } |
781 | 788 | ||
782 | static inline void | 789 | static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) |
783 | xscale2pmu_write_counter(int counter, u32 val) | ||
784 | { | 790 | { |
791 | struct hw_perf_event *hwc = &event->hw; | ||
792 | int counter = hwc->idx; | ||
793 | |||
785 | switch (counter) { | 794 | switch (counter) { |
786 | case XSCALE_CYCLE_COUNTER: | 795 | case XSCALE_CYCLE_COUNTER: |
787 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); | 796 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); |
@@ -801,33 +810,31 @@ xscale2pmu_write_counter(int counter, u32 val) | |||
801 | } | 810 | } |
802 | } | 811 | } |
803 | 812 | ||
804 | static struct arm_pmu xscale2pmu = { | 813 | static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu) |
805 | .name = "xscale2", | ||
806 | .handle_irq = xscale2pmu_handle_irq, | ||
807 | .enable = xscale2pmu_enable_event, | ||
808 | .disable = xscale2pmu_disable_event, | ||
809 | .read_counter = xscale2pmu_read_counter, | ||
810 | .write_counter = xscale2pmu_write_counter, | ||
811 | .get_event_idx = xscale2pmu_get_event_idx, | ||
812 | .start = xscale2pmu_start, | ||
813 | .stop = xscale2pmu_stop, | ||
814 | .map_event = xscale_map_event, | ||
815 | .num_events = 5, | ||
816 | .max_period = (1LLU << 32) - 1, | ||
817 | }; | ||
818 | |||
819 | static struct arm_pmu *__devinit xscale2pmu_init(void) | ||
820 | { | 814 | { |
821 | return &xscale2pmu; | 815 | cpu_pmu->name = "xscale2"; |
816 | cpu_pmu->handle_irq = xscale2pmu_handle_irq; | ||
817 | cpu_pmu->enable = xscale2pmu_enable_event; | ||
818 | cpu_pmu->disable = xscale2pmu_disable_event; | ||
819 | cpu_pmu->read_counter = xscale2pmu_read_counter; | ||
820 | cpu_pmu->write_counter = xscale2pmu_write_counter; | ||
821 | cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; | ||
822 | cpu_pmu->start = xscale2pmu_start; | ||
823 | cpu_pmu->stop = xscale2pmu_stop; | ||
824 | cpu_pmu->map_event = xscale_map_event; | ||
825 | cpu_pmu->num_events = 5; | ||
826 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
827 | |||
828 | return 0; | ||
822 | } | 829 | } |
823 | #else | 830 | #else |
824 | static struct arm_pmu *__devinit xscale1pmu_init(void) | 831 | static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu) |
825 | { | 832 | { |
826 | return NULL; | 833 | return -ENODEV; |
827 | } | 834 | } |
828 | 835 | ||
829 | static struct arm_pmu *__devinit xscale2pmu_init(void) | 836 | static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu) |
830 | { | 837 | { |
831 | return NULL; | 838 | return -ENODEV; |
832 | } | 839 | } |
833 | #endif /* CONFIG_CPU_XSCALE */ | 840 | #endif /* CONFIG_CPU_XSCALE */ |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 90084a6de35a..44bc0b327e2b 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/leds.h> | 34 | #include <linux/leds.h> |
35 | 35 | ||
36 | #include <asm/cacheflush.h> | 36 | #include <asm/cacheflush.h> |
37 | #include <asm/idmap.h> | ||
37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
38 | #include <asm/thread_notify.h> | 39 | #include <asm/thread_notify.h> |
39 | #include <asm/stacktrace.h> | 40 | #include <asm/stacktrace.h> |
@@ -56,8 +57,6 @@ static const char *isa_modes[] = { | |||
56 | "ARM" , "Thumb" , "Jazelle", "ThumbEE" | 57 | "ARM" , "Thumb" , "Jazelle", "ThumbEE" |
57 | }; | 58 | }; |
58 | 59 | ||
59 | extern void setup_mm_for_reboot(void); | ||
60 | |||
61 | static volatile int hlt_counter; | 60 | static volatile int hlt_counter; |
62 | 61 | ||
63 | void disable_hlt(void) | 62 | void disable_hlt(void) |
@@ -70,6 +69,7 @@ EXPORT_SYMBOL(disable_hlt); | |||
70 | void enable_hlt(void) | 69 | void enable_hlt(void) |
71 | { | 70 | { |
72 | hlt_counter--; | 71 | hlt_counter--; |
72 | BUG_ON(hlt_counter < 0); | ||
73 | } | 73 | } |
74 | 74 | ||
75 | EXPORT_SYMBOL(enable_hlt); | 75 | EXPORT_SYMBOL(enable_hlt); |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 739db3a1b2d2..03deeffd9f6d 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -916,16 +916,11 @@ enum ptrace_syscall_dir { | |||
916 | PTRACE_SYSCALL_EXIT, | 916 | PTRACE_SYSCALL_EXIT, |
917 | }; | 917 | }; |
918 | 918 | ||
919 | static int ptrace_syscall_trace(struct pt_regs *regs, int scno, | 919 | static int tracehook_report_syscall(struct pt_regs *regs, |
920 | enum ptrace_syscall_dir dir) | 920 | enum ptrace_syscall_dir dir) |
921 | { | 921 | { |
922 | unsigned long ip; | 922 | unsigned long ip; |
923 | 923 | ||
924 | current_thread_info()->syscall = scno; | ||
925 | |||
926 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
927 | return scno; | ||
928 | |||
929 | /* | 924 | /* |
930 | * IP is used to denote syscall entry/exit: | 925 | * IP is used to denote syscall entry/exit: |
931 | * IP = 0 -> entry, =1 -> exit | 926 | * IP = 0 -> entry, =1 -> exit |
@@ -944,19 +939,41 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno, | |||
944 | 939 | ||
945 | asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) | 940 | asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) |
946 | { | 941 | { |
947 | scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); | 942 | current_thread_info()->syscall = scno; |
943 | |||
944 | /* Do the secure computing check first; failures should be fast. */ | ||
945 | if (secure_computing(scno) == -1) | ||
946 | return -1; | ||
947 | |||
948 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
949 | scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); | ||
950 | |||
948 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 951 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
949 | trace_sys_enter(regs, scno); | 952 | trace_sys_enter(regs, scno); |
953 | |||
950 | audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, | 954 | audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, |
951 | regs->ARM_r2, regs->ARM_r3); | 955 | regs->ARM_r2, regs->ARM_r3); |
956 | |||
952 | return scno; | 957 | return scno; |
953 | } | 958 | } |
954 | 959 | ||
955 | asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) | 960 | asmlinkage void syscall_trace_exit(struct pt_regs *regs) |
956 | { | 961 | { |
957 | scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); | 962 | /* |
958 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 963 | * Audit the syscall before anything else, as a debugger may |
959 | trace_sys_exit(regs, scno); | 964 | * come in and change the current registers. |
965 | */ | ||
960 | audit_syscall_exit(regs); | 966 | audit_syscall_exit(regs); |
961 | return scno; | 967 | |
968 | /* | ||
969 | * Note that we haven't updated the ->syscall field for the | ||
970 | * current thread. This isn't a problem because it will have | ||
971 | * been set on syscall entry and there hasn't been an opportunity | ||
972 | * for a PTRACE_SET_SYSCALL since then. | ||
973 | */ | ||
974 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | ||
975 | trace_sys_exit(regs, regs_return_value(regs)); | ||
976 | |||
977 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
978 | tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); | ||
962 | } | 979 | } |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index da1d1aa20ad9..9a89bf4aefe1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -383,6 +383,12 @@ void cpu_init(void) | |||
383 | BUG(); | 383 | BUG(); |
384 | } | 384 | } |
385 | 385 | ||
386 | /* | ||
387 | * This only works on resume and secondary cores. For booting on the | ||
388 | * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. | ||
389 | */ | ||
390 | set_my_cpu_offset(per_cpu_offset(cpu)); | ||
391 | |||
386 | cpu_proc_init(); | 392 | cpu_proc_init(); |
387 | 393 | ||
388 | /* | 394 | /* |
@@ -426,13 +432,14 @@ int __cpu_logical_map[NR_CPUS]; | |||
426 | void __init smp_setup_processor_id(void) | 432 | void __init smp_setup_processor_id(void) |
427 | { | 433 | { |
428 | int i; | 434 | int i; |
429 | u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0; | 435 | u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; |
436 | u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
430 | 437 | ||
431 | cpu_logical_map(0) = cpu; | 438 | cpu_logical_map(0) = cpu; |
432 | for (i = 1; i < NR_CPUS; ++i) | 439 | for (i = 1; i < nr_cpu_ids; ++i) |
433 | cpu_logical_map(i) = i == cpu ? 0 : i; | 440 | cpu_logical_map(i) = i == cpu ? 0 : i; |
434 | 441 | ||
435 | printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu); | 442 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); |
436 | } | 443 | } |
437 | 444 | ||
438 | static void __init setup_processor(void) | 445 | static void __init setup_processor(void) |
@@ -758,6 +765,7 @@ void __init setup_arch(char **cmdline_p) | |||
758 | 765 | ||
759 | unflatten_device_tree(); | 766 | unflatten_device_tree(); |
760 | 767 | ||
768 | arm_dt_init_cpu_maps(); | ||
761 | #ifdef CONFIG_SMP | 769 | #ifdef CONFIG_SMP |
762 | if (is_smp()) { | 770 | if (is_smp()) { |
763 | smp_set_ops(mdesc->smp); | 771 | smp_set_ops(mdesc->smp); |
@@ -841,12 +849,9 @@ static const char *hwcap_str[] = { | |||
841 | 849 | ||
842 | static int c_show(struct seq_file *m, void *v) | 850 | static int c_show(struct seq_file *m, void *v) |
843 | { | 851 | { |
844 | int i; | 852 | int i, j; |
853 | u32 cpuid; | ||
845 | 854 | ||
846 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", | ||
847 | cpu_name, read_cpuid_id() & 15, elf_platform); | ||
848 | |||
849 | #if defined(CONFIG_SMP) | ||
850 | for_each_online_cpu(i) { | 855 | for_each_online_cpu(i) { |
851 | /* | 856 | /* |
852 | * glibc reads /proc/cpuinfo to determine the number of | 857 | * glibc reads /proc/cpuinfo to determine the number of |
@@ -854,45 +859,48 @@ static int c_show(struct seq_file *m, void *v) | |||
854 | * "processor". Give glibc what it expects. | 859 | * "processor". Give glibc what it expects. |
855 | */ | 860 | */ |
856 | seq_printf(m, "processor\t: %d\n", i); | 861 | seq_printf(m, "processor\t: %d\n", i); |
857 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", | 862 | cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); |
863 | seq_printf(m, "model name\t: %s rev %d (%s)\n", | ||
864 | cpu_name, cpuid & 15, elf_platform); | ||
865 | |||
866 | #if defined(CONFIG_SMP) | ||
867 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | ||
858 | per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), | 868 | per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), |
859 | (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); | 869 | (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); |
860 | } | 870 | #else |
861 | #else /* CONFIG_SMP */ | 871 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", |
862 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | 872 | loops_per_jiffy / (500000/HZ), |
863 | loops_per_jiffy / (500000/HZ), | 873 | (loops_per_jiffy / (5000/HZ)) % 100); |
864 | (loops_per_jiffy / (5000/HZ)) % 100); | ||
865 | #endif | 874 | #endif |
875 | /* dump out the processor features */ | ||
876 | seq_puts(m, "Features\t: "); | ||
866 | 877 | ||
867 | /* dump out the processor features */ | 878 | for (j = 0; hwcap_str[j]; j++) |
868 | seq_puts(m, "Features\t: "); | 879 | if (elf_hwcap & (1 << j)) |
869 | 880 | seq_printf(m, "%s ", hwcap_str[j]); | |
870 | for (i = 0; hwcap_str[i]; i++) | ||
871 | if (elf_hwcap & (1 << i)) | ||
872 | seq_printf(m, "%s ", hwcap_str[i]); | ||
873 | 881 | ||
874 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); | 882 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); |
875 | seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); | 883 | seq_printf(m, "CPU architecture: %s\n", |
884 | proc_arch[cpu_architecture()]); | ||
876 | 885 | ||
877 | if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { | 886 | if ((cpuid & 0x0008f000) == 0x00000000) { |
878 | /* pre-ARM7 */ | 887 | /* pre-ARM7 */ |
879 | seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); | 888 | seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); |
880 | } else { | ||
881 | if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { | ||
882 | /* ARM7 */ | ||
883 | seq_printf(m, "CPU variant\t: 0x%02x\n", | ||
884 | (read_cpuid_id() >> 16) & 127); | ||
885 | } else { | 889 | } else { |
886 | /* post-ARM7 */ | 890 | if ((cpuid & 0x0008f000) == 0x00007000) { |
887 | seq_printf(m, "CPU variant\t: 0x%x\n", | 891 | /* ARM7 */ |
888 | (read_cpuid_id() >> 20) & 15); | 892 | seq_printf(m, "CPU variant\t: 0x%02x\n", |
893 | (cpuid >> 16) & 127); | ||
894 | } else { | ||
895 | /* post-ARM7 */ | ||
896 | seq_printf(m, "CPU variant\t: 0x%x\n", | ||
897 | (cpuid >> 20) & 15); | ||
898 | } | ||
899 | seq_printf(m, "CPU part\t: 0x%03x\n", | ||
900 | (cpuid >> 4) & 0xfff); | ||
889 | } | 901 | } |
890 | seq_printf(m, "CPU part\t: 0x%03x\n", | 902 | seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); |
891 | (read_cpuid_id() >> 4) & 0xfff); | ||
892 | } | 903 | } |
893 | seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); | ||
894 | |||
895 | seq_puts(m, "\n"); | ||
896 | 904 | ||
897 | seq_printf(m, "Hardware\t: %s\n", machine_name); | 905 | seq_printf(m, "Hardware\t: %s\n", machine_name); |
898 | seq_printf(m, "Revision\t: %04x\n", system_rev); | 906 | seq_printf(m, "Revision\t: %04x\n", system_rev); |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index fbc8b2623d82..84f4cbf652e5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -281,6 +281,7 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) | |||
281 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | 281 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); |
282 | 282 | ||
283 | cpu_info->loops_per_jiffy = loops_per_jiffy; | 283 | cpu_info->loops_per_jiffy = loops_per_jiffy; |
284 | cpu_info->cpuid = read_cpuid_id(); | ||
284 | 285 | ||
285 | store_cpu_topology(cpuid); | 286 | store_cpu_topology(cpuid); |
286 | } | 287 | } |
@@ -313,9 +314,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
313 | current->active_mm = mm; | 314 | current->active_mm = mm; |
314 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 315 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
315 | 316 | ||
317 | cpu_init(); | ||
318 | |||
316 | printk("CPU%u: Booted secondary processor\n", cpu); | 319 | printk("CPU%u: Booted secondary processor\n", cpu); |
317 | 320 | ||
318 | cpu_init(); | ||
319 | preempt_disable(); | 321 | preempt_disable(); |
320 | trace_hardirqs_off(); | 322 | trace_hardirqs_off(); |
321 | 323 | ||
@@ -371,6 +373,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
371 | 373 | ||
372 | void __init smp_prepare_boot_cpu(void) | 374 | void __init smp_prepare_boot_cpu(void) |
373 | { | 375 | { |
376 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); | ||
374 | } | 377 | } |
375 | 378 | ||
376 | void __init smp_prepare_cpus(unsigned int max_cpus) | 379 | void __init smp_prepare_cpus(unsigned int max_cpus) |
@@ -421,6 +424,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
421 | smp_cross_call(mask, IPI_CALL_FUNC); | 424 | smp_cross_call(mask, IPI_CALL_FUNC); |
422 | } | 425 | } |
423 | 426 | ||
427 | void arch_send_wakeup_ipi_mask(const struct cpumask *mask) | ||
428 | { | ||
429 | smp_cross_call(mask, IPI_WAKEUP); | ||
430 | } | ||
431 | |||
424 | void arch_send_call_function_single_ipi(int cpu) | 432 | void arch_send_call_function_single_ipi(int cpu) |
425 | { | 433 | { |
426 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 434 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
@@ -443,7 +451,7 @@ void show_ipi_list(struct seq_file *p, int prec) | |||
443 | for (i = 0; i < NR_IPI; i++) { | 451 | for (i = 0; i < NR_IPI; i++) { |
444 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); | 452 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); |
445 | 453 | ||
446 | for_each_present_cpu(cpu) | 454 | for_each_online_cpu(cpu) |
447 | seq_printf(p, "%10u ", | 455 | seq_printf(p, "%10u ", |
448 | __get_irq_stat(cpu, ipi_irqs[i])); | 456 | __get_irq_stat(cpu, ipi_irqs[i])); |
449 | 457 | ||
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index b22d700fea27..ff07879ad95d 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -31,6 +31,8 @@ static void __iomem *twd_base; | |||
31 | 31 | ||
32 | static struct clk *twd_clk; | 32 | static struct clk *twd_clk; |
33 | static unsigned long twd_timer_rate; | 33 | static unsigned long twd_timer_rate; |
34 | static bool common_setup_called; | ||
35 | static DEFINE_PER_CPU(bool, percpu_setup_called); | ||
34 | 36 | ||
35 | static struct clock_event_device __percpu **twd_evt; | 37 | static struct clock_event_device __percpu **twd_evt; |
36 | static int twd_ppi; | 38 | static int twd_ppi; |
@@ -248,17 +250,9 @@ static struct clk *twd_get_clock(void) | |||
248 | return clk; | 250 | return clk; |
249 | } | 251 | } |
250 | 252 | ||
251 | err = clk_prepare(clk); | 253 | err = clk_prepare_enable(clk); |
252 | if (err) { | 254 | if (err) { |
253 | pr_err("smp_twd: clock failed to prepare: %d\n", err); | 255 | pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); |
254 | clk_put(clk); | ||
255 | return ERR_PTR(err); | ||
256 | } | ||
257 | |||
258 | err = clk_enable(clk); | ||
259 | if (err) { | ||
260 | pr_err("smp_twd: clock failed to enable: %d\n", err); | ||
261 | clk_unprepare(clk); | ||
262 | clk_put(clk); | 256 | clk_put(clk); |
263 | return ERR_PTR(err); | 257 | return ERR_PTR(err); |
264 | } | 258 | } |
@@ -272,15 +266,45 @@ static struct clk *twd_get_clock(void) | |||
272 | static int __cpuinit twd_timer_setup(struct clock_event_device *clk) | 266 | static int __cpuinit twd_timer_setup(struct clock_event_device *clk) |
273 | { | 267 | { |
274 | struct clock_event_device **this_cpu_clk; | 268 | struct clock_event_device **this_cpu_clk; |
269 | int cpu = smp_processor_id(); | ||
270 | |||
271 | /* | ||
272 | * If the basic setup for this CPU has been done before don't | ||
273 | * bother with the below. | ||
274 | */ | ||
275 | if (per_cpu(percpu_setup_called, cpu)) { | ||
276 | __raw_writel(0, twd_base + TWD_TIMER_CONTROL); | ||
277 | clockevents_register_device(*__this_cpu_ptr(twd_evt)); | ||
278 | enable_percpu_irq(clk->irq, 0); | ||
279 | return 0; | ||
280 | } | ||
281 | per_cpu(percpu_setup_called, cpu) = true; | ||
275 | 282 | ||
276 | if (!twd_clk) | 283 | /* |
284 | * This stuff only need to be done once for the entire TWD cluster | ||
285 | * during the runtime of the system. | ||
286 | */ | ||
287 | if (!common_setup_called) { | ||
277 | twd_clk = twd_get_clock(); | 288 | twd_clk = twd_get_clock(); |
278 | 289 | ||
279 | if (!IS_ERR_OR_NULL(twd_clk)) | 290 | /* |
280 | twd_timer_rate = clk_get_rate(twd_clk); | 291 | * We use IS_ERR_OR_NULL() here, because if the clock stubs |
281 | else | 292 | * are active we will get a valid clk reference which is |
282 | twd_calibrate_rate(); | 293 | * however NULL and will return the rate 0. In that case we |
294 | * need to calibrate the rate instead. | ||
295 | */ | ||
296 | if (!IS_ERR_OR_NULL(twd_clk)) | ||
297 | twd_timer_rate = clk_get_rate(twd_clk); | ||
298 | else | ||
299 | twd_calibrate_rate(); | ||
300 | |||
301 | common_setup_called = true; | ||
302 | } | ||
283 | 303 | ||
304 | /* | ||
305 | * The following is done once per CPU the first time .setup() is | ||
306 | * called. | ||
307 | */ | ||
284 | __raw_writel(0, twd_base + TWD_TIMER_CONTROL); | 308 | __raw_writel(0, twd_base + TWD_TIMER_CONTROL); |
285 | 309 | ||
286 | clk->name = "local_timer"; | 310 | clk->name = "local_timer"; |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 26c12c6440fc..79282ebcd939 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -196,32 +196,7 @@ static inline void parse_dt_topology(void) {} | |||
196 | static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} | 196 | static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} |
197 | #endif | 197 | #endif |
198 | 198 | ||
199 | 199 | /* | |
200 | /* | ||
201 | * cpu topology management | ||
202 | */ | ||
203 | |||
204 | #define MPIDR_SMP_BITMASK (0x3 << 30) | ||
205 | #define MPIDR_SMP_VALUE (0x2 << 30) | ||
206 | |||
207 | #define MPIDR_MT_BITMASK (0x1 << 24) | ||
208 | |||
209 | /* | ||
210 | * These masks reflect the current use of the affinity levels. | ||
211 | * The affinity level can be up to 16 bits according to ARM ARM | ||
212 | */ | ||
213 | #define MPIDR_HWID_BITMASK 0xFFFFFF | ||
214 | |||
215 | #define MPIDR_LEVEL0_MASK 0x3 | ||
216 | #define MPIDR_LEVEL0_SHIFT 0 | ||
217 | |||
218 | #define MPIDR_LEVEL1_MASK 0xF | ||
219 | #define MPIDR_LEVEL1_SHIFT 8 | ||
220 | |||
221 | #define MPIDR_LEVEL2_MASK 0xFF | ||
222 | #define MPIDR_LEVEL2_SHIFT 16 | ||
223 | |||
224 | /* | ||
225 | * cpu topology table | 200 | * cpu topology table |
226 | */ | 201 | */ |
227 | struct cputopo_arm cpu_topology[NR_CPUS]; | 202 | struct cputopo_arm cpu_topology[NR_CPUS]; |
@@ -282,19 +257,14 @@ void store_cpu_topology(unsigned int cpuid) | |||
282 | 257 | ||
283 | if (mpidr & MPIDR_MT_BITMASK) { | 258 | if (mpidr & MPIDR_MT_BITMASK) { |
284 | /* core performance interdependency */ | 259 | /* core performance interdependency */ |
285 | cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT) | 260 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
286 | & MPIDR_LEVEL0_MASK; | 261 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
287 | cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT) | 262 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); |
288 | & MPIDR_LEVEL1_MASK; | ||
289 | cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT) | ||
290 | & MPIDR_LEVEL2_MASK; | ||
291 | } else { | 263 | } else { |
292 | /* largely independent cores */ | 264 | /* largely independent cores */ |
293 | cpuid_topo->thread_id = -1; | 265 | cpuid_topo->thread_id = -1; |
294 | cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT) | 266 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
295 | & MPIDR_LEVEL0_MASK; | 267 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
296 | cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT) | ||
297 | & MPIDR_LEVEL1_MASK; | ||
298 | } | 268 | } |
299 | } else { | 269 | } else { |
300 | /* | 270 | /* |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 36ff15bbfdd4..b9f38e388b43 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -114,6 +114,15 @@ SECTIONS | |||
114 | 114 | ||
115 | RO_DATA(PAGE_SIZE) | 115 | RO_DATA(PAGE_SIZE) |
116 | 116 | ||
117 | . = ALIGN(4); | ||
118 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { | ||
119 | __start___ex_table = .; | ||
120 | #ifdef CONFIG_MMU | ||
121 | *(__ex_table) | ||
122 | #endif | ||
123 | __stop___ex_table = .; | ||
124 | } | ||
125 | |||
117 | #ifdef CONFIG_ARM_UNWIND | 126 | #ifdef CONFIG_ARM_UNWIND |
118 | /* | 127 | /* |
119 | * Stack unwinding tables | 128 | * Stack unwinding tables |
@@ -220,16 +229,6 @@ SECTIONS | |||
220 | READ_MOSTLY_DATA(L1_CACHE_BYTES) | 229 | READ_MOSTLY_DATA(L1_CACHE_BYTES) |
221 | 230 | ||
222 | /* | 231 | /* |
223 | * The exception fixup table (might need resorting at runtime) | ||
224 | */ | ||
225 | . = ALIGN(4); | ||
226 | __start___ex_table = .; | ||
227 | #ifdef CONFIG_MMU | ||
228 | *(__ex_table) | ||
229 | #endif | ||
230 | __stop___ex_table = .; | ||
231 | |||
232 | /* | ||
233 | * and the usual data section | 232 | * and the usual data section |
234 | */ | 233 | */ |
235 | DATA_DATA | 234 | DATA_DATA |
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c index b3d86d7081a0..9f82f9dcbb98 100644 --- a/arch/arm/mach-integrator/impd1.c +++ b/arch/arm/mach-integrator/impd1.c | |||
@@ -342,9 +342,10 @@ static int impd1_probe(struct lm_device *dev) | |||
342 | 342 | ||
343 | pc_base = dev->resource.start + idev->offset; | 343 | pc_base = dev->resource.start + idev->offset; |
344 | snprintf(devname, 32, "lm%x:%5.5lx", dev->id, idev->offset >> 12); | 344 | snprintf(devname, 32, "lm%x:%5.5lx", dev->id, idev->offset >> 12); |
345 | d = amba_ahb_device_add(&dev->dev, devname, pc_base, SZ_4K, | 345 | d = amba_ahb_device_add_res(&dev->dev, devname, pc_base, SZ_4K, |
346 | dev->irq, dev->irq, | 346 | dev->irq, dev->irq, |
347 | idev->platform_data, idev->id); | 347 | idev->platform_data, idev->id, |
348 | &dev->resource); | ||
348 | if (IS_ERR(d)) { | 349 | if (IS_ERR(d)) { |
349 | dev_err(&dev->dev, "unable to register device: %ld\n", PTR_ERR(d)); | 350 | dev_err(&dev->dev, "unable to register device: %ld\n", PTR_ERR(d)); |
350 | continue; | 351 | continue; |
diff --git a/arch/arm/mach-ixp4xx/include/mach/udc.h b/arch/arm/mach-ixp4xx/include/mach/udc.h index 80d6da2eafac..7bd8b96c8843 100644 --- a/arch/arm/mach-ixp4xx/include/mach/udc.h +++ b/arch/arm/mach-ixp4xx/include/mach/udc.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/arm/mach-ixp4xx/include/mach/udc.h | 2 | * arch/arm/mach-ixp4xx/include/mach/udc.h |
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | #include <asm/mach/udc_pxa2xx.h> | 5 | #include <linux/platform_data/pxa2xx_udc.h> |
6 | 6 | ||
7 | extern void ixp4xx_set_udc_info(struct pxa2xx_udc_mach_info *info); | 7 | extern void ixp4xx_set_udc_info(struct pxa2xx_udc_mach_info *info); |
8 | 8 | ||
diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c index 2a791766283d..031e2fbd0e19 100644 --- a/arch/arm/mach-omap2/pmu.c +++ b/arch/arm/mach-omap2/pmu.c | |||
@@ -57,8 +57,6 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[]) | |||
57 | if (IS_ERR(omap_pmu_dev)) | 57 | if (IS_ERR(omap_pmu_dev)) |
58 | return PTR_ERR(omap_pmu_dev); | 58 | return PTR_ERR(omap_pmu_dev); |
59 | 59 | ||
60 | pm_runtime_enable(&omap_pmu_dev->dev); | ||
61 | |||
62 | return 0; | 60 | return 0; |
63 | } | 61 | } |
64 | 62 | ||
diff --git a/arch/arm/mach-pxa/include/mach/udc.h b/arch/arm/mach-pxa/include/mach/udc.h index 2f82332e81a0..9a827e32db98 100644 --- a/arch/arm/mach-pxa/include/mach/udc.h +++ b/arch/arm/mach-pxa/include/mach/udc.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/arm/mach-pxa/include/mach/udc.h | 2 | * arch/arm/mach-pxa/include/mach/udc.h |
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | #include <asm/mach/udc_pxa2xx.h> | 5 | #include <linux/platform_data/pxa2xx_udc.h> |
6 | 6 | ||
7 | extern void pxa_set_udc_info(struct pxa2xx_udc_mach_info *info); | 7 | extern void pxa_set_udc_info(struct pxa2xx_udc_mach_info *info); |
8 | 8 | ||
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index d3b3cd216d64..28511d43637a 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -467,6 +467,7 @@ static void __init realview_eb_init(void) | |||
467 | MACHINE_START(REALVIEW_EB, "ARM-RealView EB") | 467 | MACHINE_START(REALVIEW_EB, "ARM-RealView EB") |
468 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ | 468 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ |
469 | .atag_offset = 0x100, | 469 | .atag_offset = 0x100, |
470 | .smp = smp_ops(realview_smp_ops), | ||
470 | .fixup = realview_fixup, | 471 | .fixup = realview_fixup, |
471 | .map_io = realview_eb_map_io, | 472 | .map_io = realview_eb_map_io, |
472 | .init_early = realview_init_early, | 473 | .init_early = realview_init_early, |
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 6a7ad3c2a3fc..9a23739f7026 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
17 | #include <linux/platform_data/sa11x0-serial.h> | ||
17 | #include <linux/serial_core.h> | 18 | #include <linux/serial_core.h> |
18 | #include <linux/mfd/ucb1x00.h> | 19 | #include <linux/mfd/ucb1x00.h> |
19 | #include <linux/mtd/mtd.h> | 20 | #include <linux/mtd/mtd.h> |
@@ -37,7 +38,6 @@ | |||
37 | #include <asm/mach/flash.h> | 38 | #include <asm/mach/flash.h> |
38 | #include <asm/mach/irda.h> | 39 | #include <asm/mach/irda.h> |
39 | #include <asm/mach/map.h> | 40 | #include <asm/mach/map.h> |
40 | #include <asm/mach/serial_sa1100.h> | ||
41 | #include <mach/assabet.h> | 41 | #include <mach/assabet.h> |
42 | #include <linux/platform_data/mfd-mcp-sa11x0.h> | 42 | #include <linux/platform_data/mfd-mcp-sa11x0.h> |
43 | #include <mach/irqs.h> | 43 | #include <mach/irqs.h> |
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c index 038df4894b0f..b2dadf3ea3df 100644 --- a/arch/arm/mach-sa1100/badge4.c +++ b/arch/arm/mach-sa1100/badge4.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/platform_data/sa11x0-serial.h> | ||
19 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/tty.h> | 22 | #include <linux/tty.h> |
@@ -34,7 +35,6 @@ | |||
34 | #include <asm/mach/flash.h> | 35 | #include <asm/mach/flash.h> |
35 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
36 | #include <asm/hardware/sa1111.h> | 37 | #include <asm/hardware/sa1111.h> |
37 | #include <asm/mach/serial_sa1100.h> | ||
38 | 38 | ||
39 | #include <mach/badge4.h> | 39 | #include <mach/badge4.h> |
40 | 40 | ||
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c index ad0eb08ea077..304bca4a07c0 100644 --- a/arch/arm/mach-sa1100/cerf.c +++ b/arch/arm/mach-sa1100/cerf.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/tty.h> | 15 | #include <linux/tty.h> |
16 | #include <linux/platform_data/sa11x0-serial.h> | ||
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
18 | #include <linux/mtd/mtd.h> | 19 | #include <linux/mtd/mtd.h> |
@@ -27,7 +28,6 @@ | |||
27 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
28 | #include <asm/mach/flash.h> | 29 | #include <asm/mach/flash.h> |
29 | #include <asm/mach/map.h> | 30 | #include <asm/mach/map.h> |
30 | #include <asm/mach/serial_sa1100.h> | ||
31 | 31 | ||
32 | #include <mach/cerf.h> | 32 | #include <mach/cerf.h> |
33 | #include <linux/platform_data/mfd-mcp-sa11x0.h> | 33 | #include <linux/platform_data/mfd-mcp-sa11x0.h> |
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index 170cb6107f68..45f424f5fca6 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/tty.h> | 22 | #include <linux/tty.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/platform_data/sa11x0-serial.h> | ||
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/mfd/ucb1x00.h> | 26 | #include <linux/mfd/ucb1x00.h> |
26 | #include <linux/mtd/mtd.h> | 27 | #include <linux/mtd/mtd.h> |
@@ -40,7 +41,6 @@ | |||
40 | #include <asm/mach/arch.h> | 41 | #include <asm/mach/arch.h> |
41 | #include <asm/mach/flash.h> | 42 | #include <asm/mach/flash.h> |
42 | #include <asm/mach/map.h> | 43 | #include <asm/mach/map.h> |
43 | #include <asm/mach/serial_sa1100.h> | ||
44 | 44 | ||
45 | #include <asm/hardware/scoop.h> | 45 | #include <asm/hardware/scoop.h> |
46 | #include <asm/mach/sharpsl_param.h> | 46 | #include <asm/mach/sharpsl_param.h> |
diff --git a/arch/arm/mach-sa1100/h3xxx.c b/arch/arm/mach-sa1100/h3xxx.c index 63150e1ffe9e..f17e7382242a 100644 --- a/arch/arm/mach-sa1100/h3xxx.c +++ b/arch/arm/mach-sa1100/h3xxx.c | |||
@@ -17,12 +17,12 @@ | |||
17 | #include <linux/mfd/htc-egpio.h> | 17 | #include <linux/mfd/htc-egpio.h> |
18 | #include <linux/mtd/mtd.h> | 18 | #include <linux/mtd/mtd.h> |
19 | #include <linux/mtd/partitions.h> | 19 | #include <linux/mtd/partitions.h> |
20 | #include <linux/platform_data/sa11x0-serial.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
22 | 23 | ||
23 | #include <asm/mach/flash.h> | 24 | #include <asm/mach/flash.h> |
24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
25 | #include <asm/mach/serial_sa1100.h> | ||
26 | 26 | ||
27 | #include <mach/h3xxx.h> | 27 | #include <mach/h3xxx.h> |
28 | 28 | ||
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c index fc106aab7c7e..d005939c41fc 100644 --- a/arch/arm/mach-sa1100/hackkit.c +++ b/arch/arm/mach-sa1100/hackkit.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/platform_data/sa11x0-serial.h> | ||
21 | #include <linux/serial_core.h> | 22 | #include <linux/serial_core.h> |
22 | #include <linux/mtd/mtd.h> | 23 | #include <linux/mtd/mtd.h> |
23 | #include <linux/mtd/partitions.h> | 24 | #include <linux/mtd/partitions.h> |
@@ -35,7 +36,6 @@ | |||
35 | #include <asm/mach/flash.h> | 36 | #include <asm/mach/flash.h> |
36 | #include <asm/mach/map.h> | 37 | #include <asm/mach/map.h> |
37 | #include <asm/mach/irq.h> | 38 | #include <asm/mach/irq.h> |
38 | #include <asm/mach/serial_sa1100.h> | ||
39 | 39 | ||
40 | #include <mach/hardware.h> | 40 | #include <mach/hardware.h> |
41 | #include <mach/irqs.h> | 41 | #include <mach/irqs.h> |
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c index e3084f47027d..35cfc428b4d4 100644 --- a/arch/arm/mach-sa1100/jornada720.c +++ b/arch/arm/mach-sa1100/jornada720.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/platform_data/sa11x0-serial.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | #include <linux/ioport.h> | 22 | #include <linux/ioport.h> |
22 | #include <linux/mtd/mtd.h> | 23 | #include <linux/mtd/mtd.h> |
@@ -30,7 +31,6 @@ | |||
30 | #include <asm/mach/arch.h> | 31 | #include <asm/mach/arch.h> |
31 | #include <asm/mach/flash.h> | 32 | #include <asm/mach/flash.h> |
32 | #include <asm/mach/map.h> | 33 | #include <asm/mach/map.h> |
33 | #include <asm/mach/serial_sa1100.h> | ||
34 | 34 | ||
35 | #include <mach/hardware.h> | 35 | #include <mach/hardware.h> |
36 | #include <mach/irqs.h> | 36 | #include <mach/irqs.h> |
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c index 3048b17e84c5..f69f78fc3ddd 100644 --- a/arch/arm/mach-sa1100/lart.c +++ b/arch/arm/mach-sa1100/lart.c | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/platform_data/sa11x0-serial.h> | ||
7 | #include <linux/tty.h> | 8 | #include <linux/tty.h> |
8 | #include <linux/gpio.h> | 9 | #include <linux/gpio.h> |
9 | #include <linux/leds.h> | 10 | #include <linux/leds.h> |
@@ -18,7 +19,6 @@ | |||
18 | 19 | ||
19 | #include <asm/mach/arch.h> | 20 | #include <asm/mach/arch.h> |
20 | #include <asm/mach/map.h> | 21 | #include <asm/mach/map.h> |
21 | #include <asm/mach/serial_sa1100.h> | ||
22 | #include <linux/platform_data/mfd-mcp-sa11x0.h> | 22 | #include <linux/platform_data/mfd-mcp-sa11x0.h> |
23 | #include <mach/irqs.h> | 23 | #include <mach/irqs.h> |
24 | 24 | ||
diff --git a/arch/arm/mach-sa1100/nanoengine.c b/arch/arm/mach-sa1100/nanoengine.c index 41f69d97066f..102e08f7b109 100644 --- a/arch/arm/mach-sa1100/nanoengine.c +++ b/arch/arm/mach-sa1100/nanoengine.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/platform_data/sa11x0-serial.h> | ||
16 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
17 | #include <linux/mtd/partitions.h> | 18 | #include <linux/mtd/partitions.h> |
18 | #include <linux/root_dev.h> | 19 | #include <linux/root_dev.h> |
@@ -24,7 +25,6 @@ | |||
24 | #include <asm/mach/arch.h> | 25 | #include <asm/mach/arch.h> |
25 | #include <asm/mach/flash.h> | 26 | #include <asm/mach/flash.h> |
26 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
27 | #include <asm/mach/serial_sa1100.h> | ||
28 | 28 | ||
29 | #include <mach/hardware.h> | 29 | #include <mach/hardware.h> |
30 | #include <mach/nanoengine.h> | 30 | #include <mach/nanoengine.h> |
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 266db873a4e4..88be0474f3d7 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/irq.h> | 7 | #include <linux/irq.h> |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/platform_data/sa11x0-serial.h> | ||
10 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
11 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
12 | #include <linux/serial_core.h> | 13 | #include <linux/serial_core.h> |
@@ -14,7 +15,6 @@ | |||
14 | 15 | ||
15 | #include <asm/mach-types.h> | 16 | #include <asm/mach-types.h> |
16 | #include <asm/mach/map.h> | 17 | #include <asm/mach/map.h> |
17 | #include <asm/mach/serial_sa1100.h> | ||
18 | #include <asm/hardware/sa1111.h> | 18 | #include <asm/hardware/sa1111.h> |
19 | #include <asm/sizes.h> | 19 | #include <asm/sizes.h> |
20 | 20 | ||
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 37fe0a0a5369..c51bb63f90fb 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/tty.h> | 7 | #include <linux/tty.h> |
8 | #include <linux/ioport.h> | 8 | #include <linux/ioport.h> |
9 | #include <linux/platform_data/sa11x0-serial.h> | ||
9 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
10 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
11 | #include <linux/io.h> | 12 | #include <linux/io.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <asm/mach/arch.h> | 19 | #include <asm/mach/arch.h> |
19 | #include <asm/mach/map.h> | 20 | #include <asm/mach/map.h> |
20 | #include <asm/mach/flash.h> | 21 | #include <asm/mach/flash.h> |
21 | #include <asm/mach/serial_sa1100.h> | ||
22 | #include <mach/irqs.h> | 22 | #include <mach/irqs.h> |
23 | 23 | ||
24 | #include "generic.h" | 24 | #include "generic.h" |
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c index ff6b7b35bca9..6460d25fbb88 100644 --- a/arch/arm/mach-sa1100/shannon.c +++ b/arch/arm/mach-sa1100/shannon.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
6 | #include <linux/device.h> | 6 | #include <linux/device.h> |
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/platform_data/sa11x0-serial.h> | ||
8 | #include <linux/tty.h> | 9 | #include <linux/tty.h> |
9 | #include <linux/mtd/mtd.h> | 10 | #include <linux/mtd/mtd.h> |
10 | #include <linux/mtd/partitions.h> | 11 | #include <linux/mtd/partitions.h> |
@@ -18,7 +19,6 @@ | |||
18 | #include <asm/mach/arch.h> | 19 | #include <asm/mach/arch.h> |
19 | #include <asm/mach/flash.h> | 20 | #include <asm/mach/flash.h> |
20 | #include <asm/mach/map.h> | 21 | #include <asm/mach/map.h> |
21 | #include <asm/mach/serial_sa1100.h> | ||
22 | #include <linux/platform_data/mfd-mcp-sa11x0.h> | 22 | #include <linux/platform_data/mfd-mcp-sa11x0.h> |
23 | #include <mach/shannon.h> | 23 | #include <mach/shannon.h> |
24 | #include <mach/irqs.h> | 24 | #include <mach/irqs.h> |
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index 71790e581d93..6d65f65fcb23 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/proc_fs.h> | 9 | #include <linux/proc_fs.h> |
10 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <linux/pm.h> | 11 | #include <linux/pm.h> |
12 | #include <linux/platform_data/sa11x0-serial.h> | ||
12 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
13 | #include <linux/mfd/ucb1x00.h> | 14 | #include <linux/mfd/ucb1x00.h> |
14 | #include <linux/mtd/mtd.h> | 15 | #include <linux/mtd/mtd.h> |
@@ -23,7 +24,6 @@ | |||
23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
24 | #include <asm/mach/flash.h> | 25 | #include <asm/mach/flash.h> |
25 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
26 | #include <asm/mach/serial_sa1100.h> | ||
27 | #include <linux/platform_data/mfd-mcp-sa11x0.h> | 27 | #include <linux/platform_data/mfd-mcp-sa11x0.h> |
28 | #include <mach/simpad.h> | 28 | #include <mach/simpad.h> |
29 | #include <mach/irqs.h> | 29 | #include <mach/irqs.h> |
diff --git a/arch/arm/mm/cache-aurora-l2.h b/arch/arm/mm/cache-aurora-l2.h new file mode 100644 index 000000000000..c86124769831 --- /dev/null +++ b/arch/arm/mm/cache-aurora-l2.h | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * AURORA shared L2 cache controller support | ||
3 | * | ||
4 | * Copyright (C) 2012 Marvell | ||
5 | * | ||
6 | * Yehuda Yitschak <yehuday@marvell.com> | ||
7 | * Gregory CLEMENT <gregory.clement@free-electrons.com> | ||
8 | * | ||
9 | * This file is licensed under the terms of the GNU General Public | ||
10 | * License version 2. This program is licensed "as is" without any | ||
11 | * warranty of any kind, whether express or implied. | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_ARM_HARDWARE_AURORA_L2_H | ||
15 | #define __ASM_ARM_HARDWARE_AURORA_L2_H | ||
16 | |||
17 | #define AURORA_SYNC_REG 0x700 | ||
18 | #define AURORA_RANGE_BASE_ADDR_REG 0x720 | ||
19 | #define AURORA_FLUSH_PHY_ADDR_REG 0x7f0 | ||
20 | #define AURORA_INVAL_RANGE_REG 0x774 | ||
21 | #define AURORA_CLEAN_RANGE_REG 0x7b4 | ||
22 | #define AURORA_FLUSH_RANGE_REG 0x7f4 | ||
23 | |||
24 | #define AURORA_ACR_REPLACEMENT_OFFSET 27 | ||
25 | #define AURORA_ACR_REPLACEMENT_MASK \ | ||
26 | (0x3 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
27 | #define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \ | ||
28 | (0 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
29 | #define AURORA_ACR_REPLACEMENT_TYPE_LFSR \ | ||
30 | (1 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
31 | #define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \ | ||
32 | (3 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
33 | |||
34 | #define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0 | ||
35 | #define AURORA_ACR_FORCE_WRITE_POLICY_MASK \ | ||
36 | (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
37 | #define AURORA_ACR_FORCE_WRITE_POLICY_DIS \ | ||
38 | (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
39 | #define AURORA_ACR_FORCE_WRITE_BACK_POLICY \ | ||
40 | (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
41 | #define AURORA_ACR_FORCE_WRITE_THRO_POLICY \ | ||
42 | (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
43 | |||
44 | #define MAX_RANGE_SIZE 1024 | ||
45 | |||
46 | #define AURORA_WAY_SIZE_SHIFT 2 | ||
47 | |||
48 | #define AURORA_CTRL_FW 0x100 | ||
49 | |||
50 | /* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make | ||
51 | * the distinction between a number coming from hardware and a number | ||
52 | * coming from the device tree */ | ||
53 | #define AURORA_CACHE_ID 0x100 | ||
54 | |||
55 | #endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */ | ||
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 8a97e6443c62..6911b8b2745c 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <asm/cacheflush.h> | 26 | #include <asm/cacheflush.h> |
27 | #include <asm/hardware/cache-l2x0.h> | 27 | #include <asm/hardware/cache-l2x0.h> |
28 | #include "cache-aurora-l2.h" | ||
28 | 29 | ||
29 | #define CACHE_LINE_SIZE 32 | 30 | #define CACHE_LINE_SIZE 32 |
30 | 31 | ||
@@ -34,14 +35,20 @@ static u32 l2x0_way_mask; /* Bitmask of active ways */ | |||
34 | static u32 l2x0_size; | 35 | static u32 l2x0_size; |
35 | static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; | 36 | static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; |
36 | 37 | ||
38 | /* Aurora don't have the cache ID register available, so we have to | ||
39 | * pass it though the device tree */ | ||
40 | static u32 cache_id_part_number_from_dt; | ||
41 | |||
37 | struct l2x0_regs l2x0_saved_regs; | 42 | struct l2x0_regs l2x0_saved_regs; |
38 | 43 | ||
39 | struct l2x0_of_data { | 44 | struct l2x0_of_data { |
40 | void (*setup)(const struct device_node *, u32 *, u32 *); | 45 | void (*setup)(const struct device_node *, u32 *, u32 *); |
41 | void (*save)(void); | 46 | void (*save)(void); |
42 | void (*resume)(void); | 47 | struct outer_cache_fns outer_cache; |
43 | }; | 48 | }; |
44 | 49 | ||
50 | static bool of_init = false; | ||
51 | |||
45 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) | 52 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
46 | { | 53 | { |
47 | /* wait for cache operation by line or way to complete */ | 54 | /* wait for cache operation by line or way to complete */ |
@@ -168,7 +175,7 @@ static void l2x0_inv_all(void) | |||
168 | /* invalidate all ways */ | 175 | /* invalidate all ways */ |
169 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 176 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
170 | /* Invalidating when L2 is enabled is a nono */ | 177 | /* Invalidating when L2 is enabled is a nono */ |
171 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); | 178 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); |
172 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 179 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
173 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 180 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
174 | cache_sync(); | 181 | cache_sync(); |
@@ -292,11 +299,18 @@ static void l2x0_unlock(u32 cache_id) | |||
292 | int lockregs; | 299 | int lockregs; |
293 | int i; | 300 | int i; |
294 | 301 | ||
295 | if (cache_id == L2X0_CACHE_ID_PART_L310) | 302 | switch (cache_id) { |
303 | case L2X0_CACHE_ID_PART_L310: | ||
296 | lockregs = 8; | 304 | lockregs = 8; |
297 | else | 305 | break; |
306 | case AURORA_CACHE_ID: | ||
307 | lockregs = 4; | ||
308 | break; | ||
309 | default: | ||
298 | /* L210 and unknown types */ | 310 | /* L210 and unknown types */ |
299 | lockregs = 1; | 311 | lockregs = 1; |
312 | break; | ||
313 | } | ||
300 | 314 | ||
301 | for (i = 0; i < lockregs; i++) { | 315 | for (i = 0; i < lockregs; i++) { |
302 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + | 316 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + |
@@ -312,18 +326,22 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
312 | u32 cache_id; | 326 | u32 cache_id; |
313 | u32 way_size = 0; | 327 | u32 way_size = 0; |
314 | int ways; | 328 | int ways; |
329 | int way_size_shift = L2X0_WAY_SIZE_SHIFT; | ||
315 | const char *type; | 330 | const char *type; |
316 | 331 | ||
317 | l2x0_base = base; | 332 | l2x0_base = base; |
318 | 333 | if (cache_id_part_number_from_dt) | |
319 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); | 334 | cache_id = cache_id_part_number_from_dt; |
335 | else | ||
336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) | ||
337 | & L2X0_CACHE_ID_PART_MASK; | ||
320 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 338 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
321 | 339 | ||
322 | aux &= aux_mask; | 340 | aux &= aux_mask; |
323 | aux |= aux_val; | 341 | aux |= aux_val; |
324 | 342 | ||
325 | /* Determine the number of ways */ | 343 | /* Determine the number of ways */ |
326 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { | 344 | switch (cache_id) { |
327 | case L2X0_CACHE_ID_PART_L310: | 345 | case L2X0_CACHE_ID_PART_L310: |
328 | if (aux & (1 << 16)) | 346 | if (aux & (1 << 16)) |
329 | ways = 16; | 347 | ways = 16; |
@@ -340,6 +358,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
340 | ways = (aux >> 13) & 0xf; | 358 | ways = (aux >> 13) & 0xf; |
341 | type = "L210"; | 359 | type = "L210"; |
342 | break; | 360 | break; |
361 | |||
362 | case AURORA_CACHE_ID: | ||
363 | sync_reg_offset = AURORA_SYNC_REG; | ||
364 | ways = (aux >> 13) & 0xf; | ||
365 | ways = 2 << ((ways + 1) >> 2); | ||
366 | way_size_shift = AURORA_WAY_SIZE_SHIFT; | ||
367 | type = "Aurora"; | ||
368 | break; | ||
343 | default: | 369 | default: |
344 | /* Assume unknown chips have 8 ways */ | 370 | /* Assume unknown chips have 8 ways */ |
345 | ways = 8; | 371 | ways = 8; |
@@ -353,7 +379,8 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
353 | * L2 cache Size = Way size * Number of ways | 379 | * L2 cache Size = Way size * Number of ways |
354 | */ | 380 | */ |
355 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; | 381 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; |
356 | way_size = 1 << (way_size + 3); | 382 | way_size = 1 << (way_size + way_size_shift); |
383 | |||
357 | l2x0_size = ways * way_size * SZ_1K; | 384 | l2x0_size = ways * way_size * SZ_1K; |
358 | 385 | ||
359 | /* | 386 | /* |
@@ -361,7 +388,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
361 | * If you are booting from non-secure mode | 388 | * If you are booting from non-secure mode |
362 | * accessing the below registers will fault. | 389 | * accessing the below registers will fault. |
363 | */ | 390 | */ |
364 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { | 391 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { |
365 | /* Make sure that I&D is not locked down when starting */ | 392 | /* Make sure that I&D is not locked down when starting */ |
366 | l2x0_unlock(cache_id); | 393 | l2x0_unlock(cache_id); |
367 | 394 | ||
@@ -371,7 +398,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
371 | l2x0_inv_all(); | 398 | l2x0_inv_all(); |
372 | 399 | ||
373 | /* enable L2X0 */ | 400 | /* enable L2X0 */ |
374 | writel_relaxed(1, l2x0_base + L2X0_CTRL); | 401 | writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); |
375 | } | 402 | } |
376 | 403 | ||
377 | /* Re-read it in case some bits are reserved. */ | 404 | /* Re-read it in case some bits are reserved. */ |
@@ -380,13 +407,15 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
380 | /* Save the value for resuming. */ | 407 | /* Save the value for resuming. */ |
381 | l2x0_saved_regs.aux_ctrl = aux; | 408 | l2x0_saved_regs.aux_ctrl = aux; |
382 | 409 | ||
383 | outer_cache.inv_range = l2x0_inv_range; | 410 | if (!of_init) { |
384 | outer_cache.clean_range = l2x0_clean_range; | 411 | outer_cache.inv_range = l2x0_inv_range; |
385 | outer_cache.flush_range = l2x0_flush_range; | 412 | outer_cache.clean_range = l2x0_clean_range; |
386 | outer_cache.sync = l2x0_cache_sync; | 413 | outer_cache.flush_range = l2x0_flush_range; |
387 | outer_cache.flush_all = l2x0_flush_all; | 414 | outer_cache.sync = l2x0_cache_sync; |
388 | outer_cache.inv_all = l2x0_inv_all; | 415 | outer_cache.flush_all = l2x0_flush_all; |
389 | outer_cache.disable = l2x0_disable; | 416 | outer_cache.inv_all = l2x0_inv_all; |
417 | outer_cache.disable = l2x0_disable; | ||
418 | } | ||
390 | 419 | ||
391 | printk(KERN_INFO "%s cache controller enabled\n", type); | 420 | printk(KERN_INFO "%s cache controller enabled\n", type); |
392 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", | 421 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", |
@@ -394,6 +423,100 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
394 | } | 423 | } |
395 | 424 | ||
396 | #ifdef CONFIG_OF | 425 | #ifdef CONFIG_OF |
426 | static int l2_wt_override; | ||
427 | |||
428 | /* | ||
429 | * Note that the end addresses passed to Linux primitives are | ||
430 | * noninclusive, while the hardware cache range operations use | ||
431 | * inclusive start and end addresses. | ||
432 | */ | ||
433 | static unsigned long calc_range_end(unsigned long start, unsigned long end) | ||
434 | { | ||
435 | /* | ||
436 | * Limit the number of cache lines processed at once, | ||
437 | * since cache range operations stall the CPU pipeline | ||
438 | * until completion. | ||
439 | */ | ||
440 | if (end > start + MAX_RANGE_SIZE) | ||
441 | end = start + MAX_RANGE_SIZE; | ||
442 | |||
443 | /* | ||
444 | * Cache range operations can't straddle a page boundary. | ||
445 | */ | ||
446 | if (end > PAGE_ALIGN(start+1)) | ||
447 | end = PAGE_ALIGN(start+1); | ||
448 | |||
449 | return end; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * Make sure 'start' and 'end' reference the same page, as L2 is PIPT | ||
454 | * and range operations only do a TLB lookup on the start address. | ||
455 | */ | ||
456 | static void aurora_pa_range(unsigned long start, unsigned long end, | ||
457 | unsigned long offset) | ||
458 | { | ||
459 | unsigned long flags; | ||
460 | |||
461 | raw_spin_lock_irqsave(&l2x0_lock, flags); | ||
462 | writel(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); | ||
463 | writel(end, l2x0_base + offset); | ||
464 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
465 | |||
466 | cache_sync(); | ||
467 | } | ||
468 | |||
469 | static void aurora_inv_range(unsigned long start, unsigned long end) | ||
470 | { | ||
471 | /* | ||
472 | * round start and end adresses up to cache line size | ||
473 | */ | ||
474 | start &= ~(CACHE_LINE_SIZE - 1); | ||
475 | end = ALIGN(end, CACHE_LINE_SIZE); | ||
476 | |||
477 | /* | ||
478 | * Invalidate all full cache lines between 'start' and 'end'. | ||
479 | */ | ||
480 | while (start < end) { | ||
481 | unsigned long range_end = calc_range_end(start, end); | ||
482 | aurora_pa_range(start, range_end - CACHE_LINE_SIZE, | ||
483 | AURORA_INVAL_RANGE_REG); | ||
484 | start = range_end; | ||
485 | } | ||
486 | } | ||
487 | |||
488 | static void aurora_clean_range(unsigned long start, unsigned long end) | ||
489 | { | ||
490 | /* | ||
491 | * If L2 is forced to WT, the L2 will always be clean and we | ||
492 | * don't need to do anything here. | ||
493 | */ | ||
494 | if (!l2_wt_override) { | ||
495 | start &= ~(CACHE_LINE_SIZE - 1); | ||
496 | end = ALIGN(end, CACHE_LINE_SIZE); | ||
497 | while (start != end) { | ||
498 | unsigned long range_end = calc_range_end(start, end); | ||
499 | aurora_pa_range(start, range_end - CACHE_LINE_SIZE, | ||
500 | AURORA_CLEAN_RANGE_REG); | ||
501 | start = range_end; | ||
502 | } | ||
503 | } | ||
504 | } | ||
505 | |||
506 | static void aurora_flush_range(unsigned long start, unsigned long end) | ||
507 | { | ||
508 | if (!l2_wt_override) { | ||
509 | start &= ~(CACHE_LINE_SIZE - 1); | ||
510 | end = ALIGN(end, CACHE_LINE_SIZE); | ||
511 | while (start != end) { | ||
512 | unsigned long range_end = calc_range_end(start, end); | ||
513 | aurora_pa_range(start, range_end - CACHE_LINE_SIZE, | ||
514 | AURORA_FLUSH_RANGE_REG); | ||
515 | start = range_end; | ||
516 | } | ||
517 | } | ||
518 | } | ||
519 | |||
397 | static void __init l2x0_of_setup(const struct device_node *np, | 520 | static void __init l2x0_of_setup(const struct device_node *np, |
398 | u32 *aux_val, u32 *aux_mask) | 521 | u32 *aux_val, u32 *aux_mask) |
399 | { | 522 | { |
@@ -491,9 +614,15 @@ static void __init pl310_save(void) | |||
491 | } | 614 | } |
492 | } | 615 | } |
493 | 616 | ||
617 | static void aurora_save(void) | ||
618 | { | ||
619 | l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL); | ||
620 | l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | ||
621 | } | ||
622 | |||
494 | static void l2x0_resume(void) | 623 | static void l2x0_resume(void) |
495 | { | 624 | { |
496 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { | 625 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { |
497 | /* restore aux ctrl and enable l2 */ | 626 | /* restore aux ctrl and enable l2 */ |
498 | l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); | 627 | l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); |
499 | 628 | ||
@@ -502,7 +631,7 @@ static void l2x0_resume(void) | |||
502 | 631 | ||
503 | l2x0_inv_all(); | 632 | l2x0_inv_all(); |
504 | 633 | ||
505 | writel_relaxed(1, l2x0_base + L2X0_CTRL); | 634 | writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); |
506 | } | 635 | } |
507 | } | 636 | } |
508 | 637 | ||
@@ -510,7 +639,7 @@ static void pl310_resume(void) | |||
510 | { | 639 | { |
511 | u32 l2x0_revision; | 640 | u32 l2x0_revision; |
512 | 641 | ||
513 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { | 642 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { |
514 | /* restore pl310 setup */ | 643 | /* restore pl310 setup */ |
515 | writel_relaxed(l2x0_saved_regs.tag_latency, | 644 | writel_relaxed(l2x0_saved_regs.tag_latency, |
516 | l2x0_base + L2X0_TAG_LATENCY_CTRL); | 645 | l2x0_base + L2X0_TAG_LATENCY_CTRL); |
@@ -536,22 +665,108 @@ static void pl310_resume(void) | |||
536 | l2x0_resume(); | 665 | l2x0_resume(); |
537 | } | 666 | } |
538 | 667 | ||
668 | static void aurora_resume(void) | ||
669 | { | ||
670 | if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
671 | writel(l2x0_saved_regs.aux_ctrl, l2x0_base + L2X0_AUX_CTRL); | ||
672 | writel(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); | ||
673 | } | ||
674 | } | ||
675 | |||
676 | static void __init aurora_broadcast_l2_commands(void) | ||
677 | { | ||
678 | __u32 u; | ||
679 | /* Enable Broadcasting of cache commands to L2*/ | ||
680 | __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u)); | ||
681 | u |= AURORA_CTRL_FW; /* Set the FW bit */ | ||
682 | __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u)); | ||
683 | isb(); | ||
684 | } | ||
685 | |||
686 | static void __init aurora_of_setup(const struct device_node *np, | ||
687 | u32 *aux_val, u32 *aux_mask) | ||
688 | { | ||
689 | u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; | ||
690 | u32 mask = AURORA_ACR_REPLACEMENT_MASK; | ||
691 | |||
692 | of_property_read_u32(np, "cache-id-part", | ||
693 | &cache_id_part_number_from_dt); | ||
694 | |||
695 | /* Determine and save the write policy */ | ||
696 | l2_wt_override = of_property_read_bool(np, "wt-override"); | ||
697 | |||
698 | if (l2_wt_override) { | ||
699 | val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; | ||
700 | mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; | ||
701 | } | ||
702 | |||
703 | *aux_val &= ~mask; | ||
704 | *aux_val |= val; | ||
705 | *aux_mask &= ~mask; | ||
706 | } | ||
707 | |||
539 | static const struct l2x0_of_data pl310_data = { | 708 | static const struct l2x0_of_data pl310_data = { |
540 | pl310_of_setup, | 709 | .setup = pl310_of_setup, |
541 | pl310_save, | 710 | .save = pl310_save, |
542 | pl310_resume, | 711 | .outer_cache = { |
712 | .resume = pl310_resume, | ||
713 | .inv_range = l2x0_inv_range, | ||
714 | .clean_range = l2x0_clean_range, | ||
715 | .flush_range = l2x0_flush_range, | ||
716 | .sync = l2x0_cache_sync, | ||
717 | .flush_all = l2x0_flush_all, | ||
718 | .inv_all = l2x0_inv_all, | ||
719 | .disable = l2x0_disable, | ||
720 | .set_debug = pl310_set_debug, | ||
721 | }, | ||
543 | }; | 722 | }; |
544 | 723 | ||
545 | static const struct l2x0_of_data l2x0_data = { | 724 | static const struct l2x0_of_data l2x0_data = { |
546 | l2x0_of_setup, | 725 | .setup = l2x0_of_setup, |
547 | NULL, | 726 | .save = NULL, |
548 | l2x0_resume, | 727 | .outer_cache = { |
728 | .resume = l2x0_resume, | ||
729 | .inv_range = l2x0_inv_range, | ||
730 | .clean_range = l2x0_clean_range, | ||
731 | .flush_range = l2x0_flush_range, | ||
732 | .sync = l2x0_cache_sync, | ||
733 | .flush_all = l2x0_flush_all, | ||
734 | .inv_all = l2x0_inv_all, | ||
735 | .disable = l2x0_disable, | ||
736 | }, | ||
737 | }; | ||
738 | |||
739 | static const struct l2x0_of_data aurora_with_outer_data = { | ||
740 | .setup = aurora_of_setup, | ||
741 | .save = aurora_save, | ||
742 | .outer_cache = { | ||
743 | .resume = aurora_resume, | ||
744 | .inv_range = aurora_inv_range, | ||
745 | .clean_range = aurora_clean_range, | ||
746 | .flush_range = aurora_flush_range, | ||
747 | .sync = l2x0_cache_sync, | ||
748 | .flush_all = l2x0_flush_all, | ||
749 | .inv_all = l2x0_inv_all, | ||
750 | .disable = l2x0_disable, | ||
751 | }, | ||
752 | }; | ||
753 | |||
754 | static const struct l2x0_of_data aurora_no_outer_data = { | ||
755 | .setup = aurora_of_setup, | ||
756 | .save = aurora_save, | ||
757 | .outer_cache = { | ||
758 | .resume = aurora_resume, | ||
759 | }, | ||
549 | }; | 760 | }; |
550 | 761 | ||
551 | static const struct of_device_id l2x0_ids[] __initconst = { | 762 | static const struct of_device_id l2x0_ids[] __initconst = { |
552 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, | 763 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, |
553 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, | 764 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, |
554 | { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, | 765 | { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, |
766 | { .compatible = "marvell,aurora-system-cache", | ||
767 | .data = (void *)&aurora_no_outer_data}, | ||
768 | { .compatible = "marvell,aurora-outer-cache", | ||
769 | .data = (void *)&aurora_with_outer_data}, | ||
555 | {} | 770 | {} |
556 | }; | 771 | }; |
557 | 772 | ||
@@ -577,17 +792,24 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | |||
577 | data = of_match_node(l2x0_ids, np)->data; | 792 | data = of_match_node(l2x0_ids, np)->data; |
578 | 793 | ||
579 | /* L2 configuration can only be changed if the cache is disabled */ | 794 | /* L2 configuration can only be changed if the cache is disabled */ |
580 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { | 795 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { |
581 | if (data->setup) | 796 | if (data->setup) |
582 | data->setup(np, &aux_val, &aux_mask); | 797 | data->setup(np, &aux_val, &aux_mask); |
798 | |||
799 | /* For aurora cache in no outer mode select the | ||
800 | * correct mode using the coprocessor*/ | ||
801 | if (data == &aurora_no_outer_data) | ||
802 | aurora_broadcast_l2_commands(); | ||
583 | } | 803 | } |
584 | 804 | ||
585 | if (data->save) | 805 | if (data->save) |
586 | data->save(); | 806 | data->save(); |
587 | 807 | ||
808 | of_init = true; | ||
588 | l2x0_init(l2x0_base, aux_val, aux_mask); | 809 | l2x0_init(l2x0_base, aux_val, aux_mask); |
589 | 810 | ||
590 | outer_cache.resume = data->resume; | 811 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); |
812 | |||
591 | return 0; | 813 | return 0; |
592 | } | 814 | } |
593 | #endif | 815 | #endif |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 4e07eec1270d..bc4a5e9ebb78 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -2,6 +2,9 @@ | |||
2 | * linux/arch/arm/mm/context.c | 2 | * linux/arch/arm/mm/context.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | 4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. |
5 | * Copyright (C) 2012 ARM Limited | ||
6 | * | ||
7 | * Author: Will Deacon <will.deacon@arm.com> | ||
5 | * | 8 | * |
6 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -14,14 +17,40 @@ | |||
14 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
15 | 18 | ||
16 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
20 | #include <asm/smp_plat.h> | ||
17 | #include <asm/thread_notify.h> | 21 | #include <asm/thread_notify.h> |
18 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
19 | 23 | ||
24 | /* | ||
25 | * On ARMv6, we have the following structure in the Context ID: | ||
26 | * | ||
27 | * 31 7 0 | ||
28 | * +-------------------------+-----------+ | ||
29 | * | process ID | ASID | | ||
30 | * +-------------------------+-----------+ | ||
31 | * | context ID | | ||
32 | * +-------------------------------------+ | ||
33 | * | ||
34 | * The ASID is used to tag entries in the CPU caches and TLBs. | ||
35 | * The context ID is used by debuggers and trace logic, and | ||
36 | * should be unique within all running processes. | ||
37 | */ | ||
38 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | ||
39 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | ||
40 | |||
41 | #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) | ||
42 | #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) | ||
43 | |||
20 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | 44 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
21 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | ||
47 | |||
48 | static DEFINE_PER_CPU(atomic64_t, active_asids); | ||
49 | static DEFINE_PER_CPU(u64, reserved_asids); | ||
50 | static cpumask_t tlb_flush_pending; | ||
22 | 51 | ||
23 | #ifdef CONFIG_ARM_LPAE | 52 | #ifdef CONFIG_ARM_LPAE |
24 | void cpu_set_reserved_ttbr0(void) | 53 | static void cpu_set_reserved_ttbr0(void) |
25 | { | 54 | { |
26 | unsigned long ttbl = __pa(swapper_pg_dir); | 55 | unsigned long ttbl = __pa(swapper_pg_dir); |
27 | unsigned long ttbh = 0; | 56 | unsigned long ttbh = 0; |
@@ -37,7 +66,7 @@ void cpu_set_reserved_ttbr0(void) | |||
37 | isb(); | 66 | isb(); |
38 | } | 67 | } |
39 | #else | 68 | #else |
40 | void cpu_set_reserved_ttbr0(void) | 69 | static void cpu_set_reserved_ttbr0(void) |
41 | { | 70 | { |
42 | u32 ttb; | 71 | u32 ttb; |
43 | /* Copy TTBR1 into TTBR0 */ | 72 | /* Copy TTBR1 into TTBR0 */ |
@@ -84,124 +113,104 @@ static int __init contextidr_notifier_init(void) | |||
84 | arch_initcall(contextidr_notifier_init); | 113 | arch_initcall(contextidr_notifier_init); |
85 | #endif | 114 | #endif |
86 | 115 | ||
87 | /* | 116 | static void flush_context(unsigned int cpu) |
88 | * We fork()ed a process, and we need a new context for the child | ||
89 | * to run in. | ||
90 | */ | ||
91 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
92 | { | 117 | { |
93 | mm->context.id = 0; | 118 | int i; |
94 | raw_spin_lock_init(&mm->context.id_lock); | 119 | u64 asid; |
95 | } | 120 | |
121 | /* Update the list of reserved ASIDs and the ASID bitmap. */ | ||
122 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); | ||
123 | for_each_possible_cpu(i) { | ||
124 | if (i == cpu) { | ||
125 | asid = 0; | ||
126 | } else { | ||
127 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); | ||
128 | __set_bit(ASID_TO_IDX(asid), asid_map); | ||
129 | } | ||
130 | per_cpu(reserved_asids, i) = asid; | ||
131 | } | ||
96 | 132 | ||
97 | static void flush_context(void) | 133 | /* Queue a TLB invalidate and flush the I-cache if necessary. */ |
98 | { | 134 | if (!tlb_ops_need_broadcast()) |
99 | cpu_set_reserved_ttbr0(); | 135 | cpumask_set_cpu(cpu, &tlb_flush_pending); |
100 | local_flush_tlb_all(); | 136 | else |
101 | if (icache_is_vivt_asid_tagged()) { | 137 | cpumask_setall(&tlb_flush_pending); |
138 | |||
139 | if (icache_is_vivt_asid_tagged()) | ||
102 | __flush_icache_all(); | 140 | __flush_icache_all(); |
103 | dsb(); | ||
104 | } | ||
105 | } | 141 | } |
106 | 142 | ||
107 | #ifdef CONFIG_SMP | 143 | static int is_reserved_asid(u64 asid) |
144 | { | ||
145 | int cpu; | ||
146 | for_each_possible_cpu(cpu) | ||
147 | if (per_cpu(reserved_asids, cpu) == asid) | ||
148 | return 1; | ||
149 | return 0; | ||
150 | } | ||
108 | 151 | ||
109 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | 152 | static void new_context(struct mm_struct *mm, unsigned int cpu) |
110 | { | 153 | { |
111 | unsigned long flags; | 154 | u64 asid = mm->context.id; |
155 | u64 generation = atomic64_read(&asid_generation); | ||
112 | 156 | ||
113 | /* | 157 | if (asid != 0 && is_reserved_asid(asid)) { |
114 | * Locking needed for multi-threaded applications where the | ||
115 | * same mm->context.id could be set from different CPUs during | ||
116 | * the broadcast. This function is also called via IPI so the | ||
117 | * mm->context.id_lock has to be IRQ-safe. | ||
118 | */ | ||
119 | raw_spin_lock_irqsave(&mm->context.id_lock, flags); | ||
120 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | ||
121 | /* | 158 | /* |
122 | * Old version of ASID found. Set the new one and | 159 | * Our current ASID was active during a rollover, we can |
123 | * reset mm_cpumask(mm). | 160 | * continue to use it and this was just a false alarm. |
124 | */ | 161 | */ |
125 | mm->context.id = asid; | 162 | asid = generation | (asid & ~ASID_MASK); |
163 | } else { | ||
164 | /* | ||
165 | * Allocate a free ASID. If we can't find one, take a | ||
166 | * note of the currently active ASIDs and mark the TLBs | ||
167 | * as requiring flushes. | ||
168 | */ | ||
169 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | ||
170 | if (asid == NUM_USER_ASIDS) { | ||
171 | generation = atomic64_add_return(ASID_FIRST_VERSION, | ||
172 | &asid_generation); | ||
173 | flush_context(cpu); | ||
174 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | ||
175 | } | ||
176 | __set_bit(asid, asid_map); | ||
177 | asid = generation | IDX_TO_ASID(asid); | ||
126 | cpumask_clear(mm_cpumask(mm)); | 178 | cpumask_clear(mm_cpumask(mm)); |
127 | } | 179 | } |
128 | raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); | ||
129 | 180 | ||
130 | /* | 181 | mm->context.id = asid; |
131 | * Set the mm_cpumask(mm) bit for the current CPU. | ||
132 | */ | ||
133 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
134 | } | 182 | } |
135 | 183 | ||
136 | /* | 184 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
137 | * Reset the ASID on the current CPU. This function call is broadcast | ||
138 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. | ||
139 | */ | ||
140 | static void reset_context(void *info) | ||
141 | { | 185 | { |
142 | unsigned int asid; | 186 | unsigned long flags; |
143 | unsigned int cpu = smp_processor_id(); | 187 | unsigned int cpu = smp_processor_id(); |
144 | struct mm_struct *mm = current->active_mm; | ||
145 | 188 | ||
146 | smp_rmb(); | 189 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
147 | asid = cpu_last_asid + cpu + 1; | 190 | __check_vmalloc_seq(mm); |
148 | 191 | ||
149 | flush_context(); | 192 | /* |
150 | set_mm_context(mm, asid); | 193 | * Required during context switch to avoid speculative page table |
151 | 194 | * walking with the wrong TTBR. | |
152 | /* set the new ASID */ | 195 | */ |
153 | cpu_switch_mm(mm->pgd, mm); | 196 | cpu_set_reserved_ttbr0(); |
154 | } | ||
155 | 197 | ||
156 | #else | 198 | if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) |
199 | && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) | ||
200 | goto switch_mm_fastpath; | ||
157 | 201 | ||
158 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | 202 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
159 | { | 203 | /* Check that our ASID belongs to the current generation. */ |
160 | mm->context.id = asid; | 204 | if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) |
161 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | 205 | new_context(mm, cpu); |
162 | } | ||
163 | 206 | ||
164 | #endif | 207 | atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); |
208 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
165 | 209 | ||
166 | void __new_context(struct mm_struct *mm) | 210 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) |
167 | { | 211 | local_flush_tlb_all(); |
168 | unsigned int asid; | 212 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
169 | 213 | ||
170 | raw_spin_lock(&cpu_asid_lock); | 214 | switch_mm_fastpath: |
171 | #ifdef CONFIG_SMP | 215 | cpu_switch_mm(mm->pgd, mm); |
172 | /* | ||
173 | * Check the ASID again, in case the change was broadcast from | ||
174 | * another CPU before we acquired the lock. | ||
175 | */ | ||
176 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | ||
177 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
178 | raw_spin_unlock(&cpu_asid_lock); | ||
179 | return; | ||
180 | } | ||
181 | #endif | ||
182 | /* | ||
183 | * At this point, it is guaranteed that the current mm (with | ||
184 | * an old ASID) isn't active on any other CPU since the ASIDs | ||
185 | * are changed simultaneously via IPI. | ||
186 | */ | ||
187 | asid = ++cpu_last_asid; | ||
188 | if (asid == 0) | ||
189 | asid = cpu_last_asid = ASID_FIRST_VERSION; | ||
190 | |||
191 | /* | ||
192 | * If we've used up all our ASIDs, we need | ||
193 | * to start a new version and flush the TLB. | ||
194 | */ | ||
195 | if (unlikely((asid & ~ASID_MASK) == 0)) { | ||
196 | asid = cpu_last_asid + smp_processor_id() + 1; | ||
197 | flush_context(); | ||
198 | #ifdef CONFIG_SMP | ||
199 | smp_wmb(); | ||
200 | smp_call_function(reset_context, NULL, 1); | ||
201 | #endif | ||
202 | cpu_last_asid += NR_CPUS; | ||
203 | } | ||
204 | |||
205 | set_mm_context(mm, asid); | ||
206 | raw_spin_unlock(&cpu_asid_lock); | ||
207 | } | 216 | } |
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index ab88ed4f8e08..99db769307ec 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c | |||
@@ -92,6 +92,9 @@ static int __init init_static_idmap(void) | |||
92 | (long long)idmap_start, (long long)idmap_end); | 92 | (long long)idmap_start, (long long)idmap_end); |
93 | identity_mapping_add(idmap_pgd, idmap_start, idmap_end); | 93 | identity_mapping_add(idmap_pgd, idmap_start, idmap_end); |
94 | 94 | ||
95 | /* Flush L1 for the hardware to see this page table content */ | ||
96 | flush_cache_louis(); | ||
97 | |||
95 | return 0; | 98 | return 0; |
96 | } | 99 | } |
97 | early_initcall(init_static_idmap); | 100 | early_initcall(init_static_idmap); |
@@ -103,12 +106,15 @@ early_initcall(init_static_idmap); | |||
103 | */ | 106 | */ |
104 | void setup_mm_for_reboot(void) | 107 | void setup_mm_for_reboot(void) |
105 | { | 108 | { |
106 | /* Clean and invalidate L1. */ | ||
107 | flush_cache_all(); | ||
108 | |||
109 | /* Switch to the identity mapping. */ | 109 | /* Switch to the identity mapping. */ |
110 | cpu_switch_mm(idmap_pgd, &init_mm); | 110 | cpu_switch_mm(idmap_pgd, &init_mm); |
111 | 111 | ||
112 | /* Flush the TLB. */ | 112 | #ifdef CONFIG_CPU_HAS_ASID |
113 | /* | ||
114 | * We don't have a clean ASID for the identity mapping, which | ||
115 | * may clash with virtual addresses of the previous page tables | ||
116 | * and therefore potentially in the TLB. | ||
117 | */ | ||
113 | local_flush_tlb_all(); | 118 | local_flush_tlb_all(); |
119 | #endif | ||
114 | } | 120 | } |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 5dcc2fd46c46..88fd86cf3d9a 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys, | |||
47 | } | 47 | } |
48 | EXPORT_SYMBOL(ioremap_page); | 48 | EXPORT_SYMBOL(ioremap_page); |
49 | 49 | ||
50 | void __check_kvm_seq(struct mm_struct *mm) | 50 | void __check_vmalloc_seq(struct mm_struct *mm) |
51 | { | 51 | { |
52 | unsigned int seq; | 52 | unsigned int seq; |
53 | 53 | ||
54 | do { | 54 | do { |
55 | seq = init_mm.context.kvm_seq; | 55 | seq = init_mm.context.vmalloc_seq; |
56 | memcpy(pgd_offset(mm, VMALLOC_START), | 56 | memcpy(pgd_offset(mm, VMALLOC_START), |
57 | pgd_offset_k(VMALLOC_START), | 57 | pgd_offset_k(VMALLOC_START), |
58 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | 58 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - |
59 | pgd_index(VMALLOC_START))); | 59 | pgd_index(VMALLOC_START))); |
60 | mm->context.kvm_seq = seq; | 60 | mm->context.vmalloc_seq = seq; |
61 | } while (seq != init_mm.context.kvm_seq); | 61 | } while (seq != init_mm.context.vmalloc_seq); |
62 | } | 62 | } |
63 | 63 | ||
64 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 64 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
@@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
89 | if (!pmd_none(pmd)) { | 89 | if (!pmd_none(pmd)) { |
90 | /* | 90 | /* |
91 | * Clear the PMD from the page table, and | 91 | * Clear the PMD from the page table, and |
92 | * increment the kvm sequence so others | 92 | * increment the vmalloc sequence so others |
93 | * notice this change. | 93 | * notice this change. |
94 | * | 94 | * |
95 | * Note: this is still racy on SMP machines. | 95 | * Note: this is still racy on SMP machines. |
96 | */ | 96 | */ |
97 | pmd_clear(pmdp); | 97 | pmd_clear(pmdp); |
98 | init_mm.context.kvm_seq++; | 98 | init_mm.context.vmalloc_seq++; |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * Free the page table, if there was one. | 101 | * Free the page table, if there was one. |
@@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
112 | * Ensure that the active_mm is up to date - we want to | 112 | * Ensure that the active_mm is up to date - we want to |
113 | * catch any use-after-iounmap cases. | 113 | * catch any use-after-iounmap cases. |
114 | */ | 114 | */ |
115 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) | 115 | if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) |
116 | __check_kvm_seq(current->active_mm); | 116 | __check_vmalloc_seq(current->active_mm); |
117 | 117 | ||
118 | flush_tlb_kernel_range(virt, end); | 118 | flush_tlb_kernel_range(virt, end); |
119 | } | 119 | } |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 941dfb9e9a78..99b47b950efc 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -488,7 +488,7 @@ static void __init build_mem_type_table(void) | |||
488 | #endif | 488 | #endif |
489 | 489 | ||
490 | for (i = 0; i < 16; i++) { | 490 | for (i = 0; i < 16; i++) { |
491 | unsigned long v = pgprot_val(protection_map[i]); | 491 | pteval_t v = pgprot_val(protection_map[i]); |
492 | protection_map[i] = __pgprot(v | user_pgprot); | 492 | protection_map[i] = __pgprot(v | user_pgprot); |
493 | } | 493 | } |
494 | 494 | ||
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index b29a2265af01..eb6aa73bc8b7 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -167,6 +167,10 @@ | |||
167 | tst r1, #L_PTE_YOUNG | 167 | tst r1, #L_PTE_YOUNG |
168 | tstne r1, #L_PTE_PRESENT | 168 | tstne r1, #L_PTE_PRESENT |
169 | moveq r3, #0 | 169 | moveq r3, #0 |
170 | #ifndef CONFIG_CPU_USE_DOMAINS | ||
171 | tstne r1, #L_PTE_NONE | ||
172 | movne r3, #0 | ||
173 | #endif | ||
170 | 174 | ||
171 | str r3, [r0] | 175 | str r3, [r0] |
172 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | 176 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index fd045e706390..6d98c13ab827 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -100,7 +100,11 @@ ENTRY(cpu_v7_set_pte_ext) | |||
100 | orrne r3, r3, #PTE_EXT_XN | 100 | orrne r3, r3, #PTE_EXT_XN |
101 | 101 | ||
102 | tst r1, #L_PTE_YOUNG | 102 | tst r1, #L_PTE_YOUNG |
103 | tstne r1, #L_PTE_PRESENT | 103 | tstne r1, #L_PTE_VALID |
104 | #ifndef CONFIG_CPU_USE_DOMAINS | ||
105 | eorne r1, r1, #L_PTE_NONE | ||
106 | tstne r1, #L_PTE_NONE | ||
107 | #endif | ||
104 | moveq r3, #0 | 108 | moveq r3, #0 |
105 | 109 | ||
106 | ARM( str r3, [r0, #2048]! ) | 110 | ARM( str r3, [r0, #2048]! ) |
@@ -161,11 +165,11 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
161 | * TFR EV X F I D LR S | 165 | * TFR EV X F I D LR S |
162 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM | 166 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM |
163 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | 167 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced |
164 | * 1 0 110 0011 1100 .111 1101 < we want | 168 | * 01 0 110 0011 1100 .111 1101 < we want |
165 | */ | 169 | */ |
166 | .align 2 | 170 | .align 2 |
167 | .type v7_crval, #object | 171 | .type v7_crval, #object |
168 | v7_crval: | 172 | v7_crval: |
169 | crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c | 173 | crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c |
170 | 174 | ||
171 | .previous | 175 | .previous |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 8de0f1dd1549..7b56386f9496 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -65,8 +65,11 @@ ENDPROC(cpu_v7_switch_mm) | |||
65 | */ | 65 | */ |
66 | ENTRY(cpu_v7_set_pte_ext) | 66 | ENTRY(cpu_v7_set_pte_ext) |
67 | #ifdef CONFIG_MMU | 67 | #ifdef CONFIG_MMU |
68 | tst r2, #L_PTE_PRESENT | 68 | tst r2, #L_PTE_VALID |
69 | beq 1f | 69 | beq 1f |
70 | tst r3, #1 << (57 - 32) @ L_PTE_NONE | ||
71 | bicne r2, #L_PTE_VALID | ||
72 | bne 1f | ||
70 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | 73 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY |
71 | orreq r2, #L_PTE_RDONLY | 74 | orreq r2, #L_PTE_RDONLY |
72 | 1: strd r2, r3, [r0] | 75 | 1: strd r2, r3, [r0] |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 846d279f3176..42cc833aa02f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -57,7 +57,7 @@ ENTRY(cpu_v7_reset) | |||
57 | THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) | 57 | THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) |
58 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU | 58 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU |
59 | isb | 59 | isb |
60 | mov pc, r0 | 60 | bx r0 |
61 | ENDPROC(cpu_v7_reset) | 61 | ENDPROC(cpu_v7_reset) |
62 | .popsection | 62 | .popsection |
63 | 63 | ||
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index c641fb685017..b6f305e3b908 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #define r_skb_hl ARM_R8 | 42 | #define r_skb_hl ARM_R8 |
43 | 43 | ||
44 | #define SCRATCH_SP_OFFSET 0 | 44 | #define SCRATCH_SP_OFFSET 0 |
45 | #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + (k)) | 45 | #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k)) |
46 | 46 | ||
47 | #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) | 47 | #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) |
48 | #define SEEN_MEM_WORD(k) (1 << (k)) | 48 | #define SEEN_MEM_WORD(k) (1 << (k)) |
@@ -845,7 +845,7 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
845 | ctx.skf = fp; | 845 | ctx.skf = fp; |
846 | ctx.ret0_fp_idx = -1; | 846 | ctx.ret0_fp_idx = -1; |
847 | 847 | ||
848 | ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1)); | 848 | ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); |
849 | if (ctx.offsets == NULL) | 849 | if (ctx.offsets == NULL) |
850 | return; | 850 | return; |
851 | 851 | ||
@@ -864,7 +864,7 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
864 | 864 | ||
865 | ctx.idx += ctx.imm_count; | 865 | ctx.idx += ctx.imm_count; |
866 | if (ctx.imm_count) { | 866 | if (ctx.imm_count) { |
867 | ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count); | 867 | ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); |
868 | if (ctx.imms == NULL) | 868 | if (ctx.imms == NULL) |
869 | goto out; | 869 | goto out; |
870 | } | 870 | } |
diff --git a/arch/avr32/include/asm/mach/serial_at91.h b/arch/avr32/include/asm/mach/serial_at91.h deleted file mode 100644 index 55b317a89061..000000000000 --- a/arch/avr32/include/asm/mach/serial_at91.h +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/asm-arm/mach/serial_at91.h | ||
3 | * | ||
4 | * Based on serial_sa1100.h by Nicolas Pitre | ||
5 | * | ||
6 | * Copyright (C) 2002 ATMEL Rousset | ||
7 | * | ||
8 | * Low level machine dependent UART functions. | ||
9 | */ | ||
10 | |||
11 | struct uart_port; | ||
12 | |||
13 | /* | ||
14 | * This is a temporary structure for registering these | ||
15 | * functions; it is intended to be discarded after boot. | ||
16 | */ | ||
17 | struct atmel_port_fns { | ||
18 | void (*set_mctrl)(struct uart_port *, u_int); | ||
19 | u_int (*get_mctrl)(struct uart_port *); | ||
20 | void (*enable_ms)(struct uart_port *); | ||
21 | void (*pm)(struct uart_port *, u_int, u_int); | ||
22 | int (*set_wake)(struct uart_port *, u_int); | ||
23 | int (*open)(struct uart_port *); | ||
24 | void (*close)(struct uart_port *); | ||
25 | }; | ||
26 | |||
27 | #if defined(CONFIG_SERIAL_ATMEL) | ||
28 | void atmel_register_uart_fns(struct atmel_port_fns *fns); | ||
29 | #else | ||
30 | #define atmel_register_uart_fns(fns) do { } while (0) | ||
31 | #endif | ||
32 | |||
33 | |||