diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kernel/bios32.c | 25 | ||||
-rw-r--r-- | arch/arm/kernel/calls.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/crash_dump.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/debug.S | 19 | ||||
-rw-r--r-- | arch/arm/kernel/ecard.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/elf.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/etm.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 21 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 70 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-decode.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 33 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 26 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 31 | ||||
-rw-r--r-- | arch/arm/kernel/sleep.S | 14 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/swp_emulate.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 9 |
21 files changed, 126 insertions, 185 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 74554f1742d7..8d95446150a3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o | |||
29 | obj-$(CONFIG_ARTHUR) += arthur.o | 29 | obj-$(CONFIG_ARTHUR) += arthur.o |
30 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 30 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
31 | obj-$(CONFIG_PCI) += bios32.o isa.o | 31 | obj-$(CONFIG_PCI) += bios32.o isa.o |
32 | obj-$(CONFIG_PM) += sleep.o | 32 | obj-$(CONFIG_PM_SLEEP) += sleep.o |
33 | obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o | 33 | obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o |
34 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o | 34 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o |
35 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o | 35 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index d86fcd44b220..e4ee050aad7d 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -159,31 +159,6 @@ static void __devinit pci_fixup_dec21285(struct pci_dev *dev) | |||
159 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285); | 159 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285); |
160 | 160 | ||
161 | /* | 161 | /* |
162 | * Same as above. The PrPMC800 carrier board for the PrPMC1100 | ||
163 | * card maps the host-bridge @ 00:01:00 for some reason and it | ||
164 | * ends up getting scanned. Note that we only want to do this | ||
165 | * fixup when we find the IXP4xx on a PrPMC system, which is why | ||
166 | * we check the machine type. We could be running on a board | ||
167 | * with an IXP4xx target device and we don't want to kill the | ||
168 | * resources in that case. | ||
169 | */ | ||
170 | static void __devinit pci_fixup_prpmc1100(struct pci_dev *dev) | ||
171 | { | ||
172 | int i; | ||
173 | |||
174 | if (machine_is_prpmc1100()) { | ||
175 | dev->class &= 0xff; | ||
176 | dev->class |= PCI_CLASS_BRIDGE_HOST << 8; | ||
177 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
178 | dev->resource[i].start = 0; | ||
179 | dev->resource[i].end = 0; | ||
180 | dev->resource[i].flags = 0; | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP4XX, pci_fixup_prpmc1100); | ||
185 | |||
186 | /* | ||
187 | * PCI IDE controllers use non-standard I/O port decoding, respect it. | 162 | * PCI IDE controllers use non-standard I/O port decoding, respect it. |
188 | */ | 163 | */ |
189 | static void __devinit pci_fixup_ide_bases(struct pci_dev *dev) | 164 | static void __devinit pci_fixup_ide_bases(struct pci_dev *dev) |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 5c26eccef998..7fbf28c35bb2 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -379,6 +379,10 @@ | |||
379 | CALL(sys_fanotify_init) | 379 | CALL(sys_fanotify_init) |
380 | CALL(sys_fanotify_mark) | 380 | CALL(sys_fanotify_mark) |
381 | CALL(sys_prlimit64) | 381 | CALL(sys_prlimit64) |
382 | /* 370 */ CALL(sys_name_to_handle_at) | ||
383 | CALL(sys_open_by_handle_at) | ||
384 | CALL(sys_clock_adjtime) | ||
385 | CALL(sys_syncfs) | ||
382 | #ifndef syscalls_counted | 386 | #ifndef syscalls_counted |
383 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 387 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
384 | #define syscalls_counted | 388 | #define syscalls_counted |
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c index cd3b853a8a6d..90c50d4b43f7 100644 --- a/arch/arm/kernel/crash_dump.c +++ b/arch/arm/kernel/crash_dump.c | |||
@@ -18,9 +18,6 @@ | |||
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | 20 | ||
21 | /* stores the physical address of elf header of crash image */ | ||
22 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | ||
23 | |||
24 | /** | 21 | /** |
25 | * copy_oldmem_page() - copy one page from old kernel memory | 22 | * copy_oldmem_page() - copy one page from old kernel memory |
26 | * @pfn: page frame number to be copied | 23 | * @pfn: page frame number to be copied |
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index d2d983be096d..bcd66e00bdbe 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S | |||
@@ -25,7 +25,7 @@ | |||
25 | .macro addruart, rp, rv | 25 | .macro addruart, rp, rv |
26 | .endm | 26 | .endm |
27 | 27 | ||
28 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) | 28 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) |
29 | 29 | ||
30 | .macro senduart, rd, rx | 30 | .macro senduart, rd, rx |
31 | mcr p14, 0, \rd, c0, c5, 0 | 31 | mcr p14, 0, \rd, c0, c5, 0 |
@@ -49,23 +49,6 @@ | |||
49 | 1002: | 49 | 1002: |
50 | .endm | 50 | .endm |
51 | 51 | ||
52 | #elif defined(CONFIG_CPU_V7) | ||
53 | |||
54 | .macro senduart, rd, rx | ||
55 | mcr p14, 0, \rd, c0, c5, 0 | ||
56 | .endm | ||
57 | |||
58 | .macro busyuart, rd, rx | ||
59 | busy: mrc p14, 0, pc, c0, c1, 0 | ||
60 | bcs busy | ||
61 | .endm | ||
62 | |||
63 | .macro waituart, rd, rx | ||
64 | wait: mrc p14, 0, pc, c0, c1, 0 | ||
65 | bcs wait | ||
66 | |||
67 | .endm | ||
68 | |||
69 | #elif defined(CONFIG_CPU_XSCALE) | 52 | #elif defined(CONFIG_CPU_XSCALE) |
70 | 53 | ||
71 | .macro senduart, rd, rx | 54 | .macro senduart, rd, rx |
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 2ad62df37730..d16500110ee9 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c | |||
@@ -1043,8 +1043,8 @@ ecard_probe(int slot, card_type_t type) | |||
1043 | */ | 1043 | */ |
1044 | if (slot < 8) { | 1044 | if (slot < 8) { |
1045 | ec->irq = 32 + slot; | 1045 | ec->irq = 32 + slot; |
1046 | set_irq_chip(ec->irq, &ecard_chip); | 1046 | irq_set_chip_and_handler(ec->irq, &ecard_chip, |
1047 | set_irq_handler(ec->irq, handle_level_irq); | 1047 | handle_level_irq); |
1048 | set_irq_flags(ec->irq, IRQF_VALID); | 1048 | set_irq_flags(ec->irq, IRQF_VALID); |
1049 | } | 1049 | } |
1050 | 1050 | ||
@@ -1103,7 +1103,7 @@ static int __init ecard_init(void) | |||
1103 | 1103 | ||
1104 | irqhw = ecard_probeirqhw(); | 1104 | irqhw = ecard_probeirqhw(); |
1105 | 1105 | ||
1106 | set_irq_chained_handler(IRQ_EXPANSIONCARD, | 1106 | irq_set_chained_handler(IRQ_EXPANSIONCARD, |
1107 | irqhw ? ecard_irqexp_handler : ecard_irq_handler); | 1107 | irqhw ? ecard_irqexp_handler : ecard_irq_handler); |
1108 | 1108 | ||
1109 | ecard_proc_init(); | 1109 | ecard_proc_init(); |
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index d4a0da1e48f4..9b05c6a0dcea 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c | |||
@@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch); | |||
40 | void elf_set_personality(const struct elf32_hdr *x) | 40 | void elf_set_personality(const struct elf32_hdr *x) |
41 | { | 41 | { |
42 | unsigned int eflags = x->e_flags; | 42 | unsigned int eflags = x->e_flags; |
43 | unsigned int personality = PER_LINUX_32BIT; | 43 | unsigned int personality = current->personality & ~PER_MASK; |
44 | |||
45 | /* | ||
46 | * We only support Linux ELF executables, so always set the | ||
47 | * personality to LINUX. | ||
48 | */ | ||
49 | personality |= PER_LINUX; | ||
44 | 50 | ||
45 | /* | 51 | /* |
46 | * APCS-26 is only valid for OABI executables | 52 | * APCS-26 is only valid for OABI executables |
47 | */ | 53 | */ |
48 | if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { | 54 | if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN && |
49 | if (eflags & EF_ARM_APCS_26) | 55 | (eflags & EF_ARM_APCS_26)) |
50 | personality = PER_LINUX; | 56 | personality &= ~ADDR_LIMIT_32BIT; |
51 | } | 57 | else |
58 | personality |= ADDR_LIMIT_32BIT; | ||
52 | 59 | ||
53 | set_personality(personality); | 60 | set_personality(personality); |
54 | 61 | ||
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 052b509e2d5f..1bec8b5f22f0 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c | |||
@@ -338,7 +338,7 @@ static struct miscdevice etb_miscdev = { | |||
338 | .fops = &etb_fops, | 338 | .fops = &etb_fops, |
339 | }; | 339 | }; |
340 | 340 | ||
341 | static int __init etb_probe(struct amba_device *dev, const struct amba_id *id) | 341 | static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id) |
342 | { | 342 | { |
343 | struct tracectx *t = &tracer; | 343 | struct tracectx *t = &tracer; |
344 | int ret = 0; | 344 | int ret = 0; |
@@ -530,7 +530,7 @@ static ssize_t trace_mode_store(struct kobject *kobj, | |||
530 | static struct kobj_attribute trace_mode_attr = | 530 | static struct kobj_attribute trace_mode_attr = |
531 | __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); | 531 | __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); |
532 | 532 | ||
533 | static int __init etm_probe(struct amba_device *dev, const struct amba_id *id) | 533 | static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id) |
534 | { | 534 | { |
535 | struct tracectx *t = &tracer; | 535 | struct tracectx *t = &tracer; |
536 | int ret = 0; | 536 | int ret = 0; |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 44b84fe6e1b0..87acc25d7a3e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -238,8 +238,8 @@ static int enable_monitor_mode(void) | |||
238 | ARM_DBG_READ(c1, 0, dscr); | 238 | ARM_DBG_READ(c1, 0, dscr); |
239 | 239 | ||
240 | /* Ensure that halting mode is disabled. */ | 240 | /* Ensure that halting mode is disabled. */ |
241 | if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, "halting debug mode enabled." | 241 | if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, |
242 | "Unable to access hardware resources.")) { | 242 | "halting debug mode enabled. Unable to access hardware resources.\n")) { |
243 | ret = -EPERM; | 243 | ret = -EPERM; |
244 | goto out; | 244 | goto out; |
245 | } | 245 | } |
@@ -377,7 +377,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
377 | } | 377 | } |
378 | } | 378 | } |
379 | 379 | ||
380 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) { | 380 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { |
381 | ret = -EBUSY; | 381 | ret = -EBUSY; |
382 | goto out; | 382 | goto out; |
383 | } | 383 | } |
@@ -423,7 +423,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) | 426 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) |
427 | return; | 427 | return; |
428 | 428 | ||
429 | /* Reset the control register. */ | 429 | /* Reset the control register. */ |
@@ -635,7 +635,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
635 | if (WARN_ONCE(!bp->overflow_handler && | 635 | if (WARN_ONCE(!bp->overflow_handler && |
636 | (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() | 636 | (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() |
637 | || !bp->hw.bp_target), | 637 | || !bp->hw.bp_target), |
638 | "overflow handler required but none found")) { | 638 | "overflow handler required but none found\n")) { |
639 | ret = -EINVAL; | 639 | ret = -EINVAL; |
640 | } | 640 | } |
641 | out: | 641 | out: |
@@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info) | |||
868 | */ | 868 | */ |
869 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); | 869 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); |
870 | isb(); | 870 | isb(); |
871 | |||
872 | /* | ||
873 | * Clear any configured vector-catch events before | ||
874 | * enabling monitor mode. | ||
875 | */ | ||
876 | asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); | ||
877 | isb(); | ||
871 | } | 878 | } |
872 | 879 | ||
873 | if (enable_monitor_mode()) | 880 | if (enable_monitor_mode()) |
@@ -936,8 +943,8 @@ static int __init arch_hw_breakpoint_init(void) | |||
936 | ARM_DBG_READ(c1, 0, dscr); | 943 | ARM_DBG_READ(c1, 0, dscr); |
937 | if (dscr & ARM_DSCR_HDBGEN) { | 944 | if (dscr & ARM_DSCR_HDBGEN) { |
938 | max_watchpoint_len = 4; | 945 | max_watchpoint_len = 4; |
939 | pr_warning("halting debug mode enabled. Assuming maximum " | 946 | pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n", |
940 | "watchpoint size of %u bytes.", max_watchpoint_len); | 947 | max_watchpoint_len); |
941 | } else { | 948 | } else { |
942 | /* Work out the maximum supported watchpoint length. */ | 949 | /* Work out the maximum supported watchpoint length. */ |
943 | max_watchpoint_len = get_max_wp_len(); | 950 | max_watchpoint_len = get_max_wp_len(); |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3535d3793e65..83bbad03fcc6 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -51,63 +51,18 @@ | |||
51 | 51 | ||
52 | unsigned long irq_err_count; | 52 | unsigned long irq_err_count; |
53 | 53 | ||
54 | int show_interrupts(struct seq_file *p, void *v) | 54 | int arch_show_interrupts(struct seq_file *p, int prec) |
55 | { | 55 | { |
56 | int i = *(loff_t *) v, cpu; | ||
57 | struct irq_desc *desc; | ||
58 | struct irqaction * action; | ||
59 | unsigned long flags; | ||
60 | int prec, n; | ||
61 | |||
62 | for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++) | ||
63 | n *= 10; | ||
64 | |||
65 | #ifdef CONFIG_SMP | ||
66 | if (prec < 4) | ||
67 | prec = 4; | ||
68 | #endif | ||
69 | |||
70 | if (i == 0) { | ||
71 | char cpuname[12]; | ||
72 | |||
73 | seq_printf(p, "%*s ", prec, ""); | ||
74 | for_each_present_cpu(cpu) { | ||
75 | sprintf(cpuname, "CPU%d", cpu); | ||
76 | seq_printf(p, " %10s", cpuname); | ||
77 | } | ||
78 | seq_putc(p, '\n'); | ||
79 | } | ||
80 | |||
81 | if (i < nr_irqs) { | ||
82 | desc = irq_to_desc(i); | ||
83 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
84 | action = desc->action; | ||
85 | if (!action) | ||
86 | goto unlock; | ||
87 | |||
88 | seq_printf(p, "%*d: ", prec, i); | ||
89 | for_each_present_cpu(cpu) | ||
90 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); | ||
91 | seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-"); | ||
92 | seq_printf(p, " %s", action->name); | ||
93 | for (action = action->next; action; action = action->next) | ||
94 | seq_printf(p, ", %s", action->name); | ||
95 | |||
96 | seq_putc(p, '\n'); | ||
97 | unlock: | ||
98 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
99 | } else if (i == nr_irqs) { | ||
100 | #ifdef CONFIG_FIQ | 56 | #ifdef CONFIG_FIQ |
101 | show_fiq_list(p, prec); | 57 | show_fiq_list(p, prec); |
102 | #endif | 58 | #endif |
103 | #ifdef CONFIG_SMP | 59 | #ifdef CONFIG_SMP |
104 | show_ipi_list(p, prec); | 60 | show_ipi_list(p, prec); |
105 | #endif | 61 | #endif |
106 | #ifdef CONFIG_LOCAL_TIMERS | 62 | #ifdef CONFIG_LOCAL_TIMERS |
107 | show_local_irqs(p, prec); | 63 | show_local_irqs(p, prec); |
108 | #endif | 64 | #endif |
109 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); | 65 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); |
110 | } | ||
111 | return 0; | 66 | return 0; |
112 | } | 67 | } |
113 | 68 | ||
@@ -144,24 +99,21 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
144 | 99 | ||
145 | void set_irq_flags(unsigned int irq, unsigned int iflags) | 100 | void set_irq_flags(unsigned int irq, unsigned int iflags) |
146 | { | 101 | { |
147 | struct irq_desc *desc; | 102 | unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
148 | unsigned long flags; | ||
149 | 103 | ||
150 | if (irq >= nr_irqs) { | 104 | if (irq >= nr_irqs) { |
151 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); | 105 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); |
152 | return; | 106 | return; |
153 | } | 107 | } |
154 | 108 | ||
155 | desc = irq_to_desc(irq); | ||
156 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
157 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | ||
158 | if (iflags & IRQF_VALID) | 109 | if (iflags & IRQF_VALID) |
159 | desc->status &= ~IRQ_NOREQUEST; | 110 | clr |= IRQ_NOREQUEST; |
160 | if (iflags & IRQF_PROBE) | 111 | if (iflags & IRQF_PROBE) |
161 | desc->status &= ~IRQ_NOPROBE; | 112 | clr |= IRQ_NOPROBE; |
162 | if (!(iflags & IRQF_NOAUTOEN)) | 113 | if (!(iflags & IRQF_NOAUTOEN)) |
163 | desc->status &= ~IRQ_NOAUTOEN; | 114 | clr |= IRQ_NOAUTOEN; |
164 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 115 | /* Order is clear bits in "clr" then set bits in "set" */ |
116 | irq_modify_status(irq, clr, set & ~clr); | ||
165 | } | 117 | } |
166 | 118 | ||
167 | void __init init_IRQ(void) | 119 | void __init init_IRQ(void) |
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 8f6ed43861f1..23891317dc4b 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -594,7 +594,8 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) | |||
594 | long cpsr = regs->ARM_cpsr; | 594 | long cpsr = regs->ARM_cpsr; |
595 | 595 | ||
596 | fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); | 596 | fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); |
597 | regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ | 597 | if (rn != 15) |
598 | regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ | ||
598 | rdv = fnr.r1; | 599 | rdv = fnr.r1; |
599 | 600 | ||
600 | if (rd == 15) { | 601 | if (rd == 15) { |
@@ -622,10 +623,11 @@ static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs) | |||
622 | long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; | 623 | long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; |
623 | long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; | 624 | long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; |
624 | long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ | 625 | long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ |
626 | long rnv_wb; | ||
625 | 627 | ||
626 | /* Save Rn in case of writeback. */ | 628 | rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); |
627 | regs->uregs[rn] = | 629 | if (rn != 15) |
628 | insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); | 630 | regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ |
629 | } | 631 | } |
630 | 632 | ||
631 | static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) | 633 | static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 22e194eb8536..979da3947f42 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -79,6 +79,7 @@ struct arm_pmu { | |||
79 | void (*write_counter)(int idx, u32 val); | 79 | void (*write_counter)(int idx, u32 val); |
80 | void (*start)(void); | 80 | void (*start)(void); |
81 | void (*stop)(void); | 81 | void (*stop)(void); |
82 | void (*reset)(void *); | ||
82 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | 83 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] |
83 | [PERF_COUNT_HW_CACHE_OP_MAX] | 84 | [PERF_COUNT_HW_CACHE_OP_MAX] |
84 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 85 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
@@ -204,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event, | |||
204 | static u64 | 205 | static u64 |
205 | armpmu_event_update(struct perf_event *event, | 206 | armpmu_event_update(struct perf_event *event, |
206 | struct hw_perf_event *hwc, | 207 | struct hw_perf_event *hwc, |
207 | int idx) | 208 | int idx, int overflow) |
208 | { | 209 | { |
209 | int shift = 64 - 32; | 210 | u64 delta, prev_raw_count, new_raw_count; |
210 | s64 prev_raw_count, new_raw_count; | ||
211 | u64 delta; | ||
212 | 211 | ||
213 | again: | 212 | again: |
214 | prev_raw_count = local64_read(&hwc->prev_count); | 213 | prev_raw_count = local64_read(&hwc->prev_count); |
@@ -218,8 +217,13 @@ again: | |||
218 | new_raw_count) != prev_raw_count) | 217 | new_raw_count) != prev_raw_count) |
219 | goto again; | 218 | goto again; |
220 | 219 | ||
221 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 220 | new_raw_count &= armpmu->max_period; |
222 | delta >>= shift; | 221 | prev_raw_count &= armpmu->max_period; |
222 | |||
223 | if (overflow) | ||
224 | delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; | ||
225 | else | ||
226 | delta = new_raw_count - prev_raw_count; | ||
223 | 227 | ||
224 | local64_add(delta, &event->count); | 228 | local64_add(delta, &event->count); |
225 | local64_sub(delta, &hwc->period_left); | 229 | local64_sub(delta, &hwc->period_left); |
@@ -236,7 +240,7 @@ armpmu_read(struct perf_event *event) | |||
236 | if (hwc->idx < 0) | 240 | if (hwc->idx < 0) |
237 | return; | 241 | return; |
238 | 242 | ||
239 | armpmu_event_update(event, hwc, hwc->idx); | 243 | armpmu_event_update(event, hwc, hwc->idx, 0); |
240 | } | 244 | } |
241 | 245 | ||
242 | static void | 246 | static void |
@@ -254,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags) | |||
254 | if (!(hwc->state & PERF_HES_STOPPED)) { | 258 | if (!(hwc->state & PERF_HES_STOPPED)) { |
255 | armpmu->disable(hwc, hwc->idx); | 259 | armpmu->disable(hwc, hwc->idx); |
256 | barrier(); /* why? */ | 260 | barrier(); /* why? */ |
257 | armpmu_event_update(event, hwc, hwc->idx); | 261 | armpmu_event_update(event, hwc, hwc->idx, 0); |
258 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 262 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
259 | } | 263 | } |
260 | } | 264 | } |
@@ -624,6 +628,19 @@ static struct pmu pmu = { | |||
624 | #include "perf_event_v6.c" | 628 | #include "perf_event_v6.c" |
625 | #include "perf_event_v7.c" | 629 | #include "perf_event_v7.c" |
626 | 630 | ||
631 | /* | ||
632 | * Ensure the PMU has sane values out of reset. | ||
633 | * This requires SMP to be available, so exists as a separate initcall. | ||
634 | */ | ||
635 | static int __init | ||
636 | armpmu_reset(void) | ||
637 | { | ||
638 | if (armpmu && armpmu->reset) | ||
639 | return on_each_cpu(armpmu->reset, NULL, 1); | ||
640 | return 0; | ||
641 | } | ||
642 | arch_initcall(armpmu_reset); | ||
643 | |||
627 | static int __init | 644 | static int __init |
628 | init_hw_perf_events(void) | 645 | init_hw_perf_events(void) |
629 | { | 646 | { |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 6fc2d228db55..f1e8dd94afe8 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -474,7 +474,7 @@ armv6pmu_handle_irq(int irq_num, | |||
474 | continue; | 474 | continue; |
475 | 475 | ||
476 | hwc = &event->hw; | 476 | hwc = &event->hw; |
477 | armpmu_event_update(event, hwc, idx); | 477 | armpmu_event_update(event, hwc, idx, 1); |
478 | data.period = event->hw.last_period; | 478 | data.period = event->hw.last_period; |
479 | if (!armpmu_event_set_period(event, hwc, idx)) | 479 | if (!armpmu_event_set_period(event, hwc, idx)) |
480 | continue; | 480 | continue; |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 2e1402556fa0..4960686afb58 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -466,6 +466,7 @@ static inline unsigned long armv7_pmnc_read(void) | |||
466 | static inline void armv7_pmnc_write(unsigned long val) | 466 | static inline void armv7_pmnc_write(unsigned long val) |
467 | { | 467 | { |
468 | val &= ARMV7_PMNC_MASK; | 468 | val &= ARMV7_PMNC_MASK; |
469 | isb(); | ||
469 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | 470 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); |
470 | } | 471 | } |
471 | 472 | ||
@@ -502,6 +503,7 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) | |||
502 | 503 | ||
503 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | 504 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; |
504 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | 505 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); |
506 | isb(); | ||
505 | 507 | ||
506 | return idx; | 508 | return idx; |
507 | } | 509 | } |
@@ -780,7 +782,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
780 | continue; | 782 | continue; |
781 | 783 | ||
782 | hwc = &event->hw; | 784 | hwc = &event->hw; |
783 | armpmu_event_update(event, hwc, idx); | 785 | armpmu_event_update(event, hwc, idx, 1); |
784 | data.period = event->hw.last_period; | 786 | data.period = event->hw.last_period; |
785 | if (!armpmu_event_set_period(event, hwc, idx)) | 787 | if (!armpmu_event_set_period(event, hwc, idx)) |
786 | continue; | 788 | continue; |
@@ -847,6 +849,18 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | |||
847 | } | 849 | } |
848 | } | 850 | } |
849 | 851 | ||
852 | static void armv7pmu_reset(void *info) | ||
853 | { | ||
854 | u32 idx, nb_cnt = armpmu->num_events; | ||
855 | |||
856 | /* The counter and interrupt enable registers are unknown at reset. */ | ||
857 | for (idx = 1; idx < nb_cnt; ++idx) | ||
858 | armv7pmu_disable_event(NULL, idx); | ||
859 | |||
860 | /* Initialize & Reset PMNC: C and P bits */ | ||
861 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
862 | } | ||
863 | |||
850 | static struct arm_pmu armv7pmu = { | 864 | static struct arm_pmu armv7pmu = { |
851 | .handle_irq = armv7pmu_handle_irq, | 865 | .handle_irq = armv7pmu_handle_irq, |
852 | .enable = armv7pmu_enable_event, | 866 | .enable = armv7pmu_enable_event, |
@@ -856,17 +870,15 @@ static struct arm_pmu armv7pmu = { | |||
856 | .get_event_idx = armv7pmu_get_event_idx, | 870 | .get_event_idx = armv7pmu_get_event_idx, |
857 | .start = armv7pmu_start, | 871 | .start = armv7pmu_start, |
858 | .stop = armv7pmu_stop, | 872 | .stop = armv7pmu_stop, |
873 | .reset = armv7pmu_reset, | ||
859 | .raw_event_mask = 0xFF, | 874 | .raw_event_mask = 0xFF, |
860 | .max_period = (1LLU << 32) - 1, | 875 | .max_period = (1LLU << 32) - 1, |
861 | }; | 876 | }; |
862 | 877 | ||
863 | static u32 __init armv7_reset_read_pmnc(void) | 878 | static u32 __init armv7_read_num_pmnc_events(void) |
864 | { | 879 | { |
865 | u32 nb_cnt; | 880 | u32 nb_cnt; |
866 | 881 | ||
867 | /* Initialize & Reset PMNC: C and P bits */ | ||
868 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
869 | |||
870 | /* Read the nb of CNTx counters supported from PMNC */ | 882 | /* Read the nb of CNTx counters supported from PMNC */ |
871 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | 883 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; |
872 | 884 | ||
@@ -880,7 +892,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void) | |||
880 | armv7pmu.name = "ARMv7 Cortex-A8"; | 892 | armv7pmu.name = "ARMv7 Cortex-A8"; |
881 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; | 893 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; |
882 | armv7pmu.event_map = &armv7_a8_perf_map; | 894 | armv7pmu.event_map = &armv7_a8_perf_map; |
883 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 895 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
884 | return &armv7pmu; | 896 | return &armv7pmu; |
885 | } | 897 | } |
886 | 898 | ||
@@ -890,7 +902,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) | |||
890 | armv7pmu.name = "ARMv7 Cortex-A9"; | 902 | armv7pmu.name = "ARMv7 Cortex-A9"; |
891 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; | 903 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; |
892 | armv7pmu.event_map = &armv7_a9_perf_map; | 904 | armv7pmu.event_map = &armv7_a9_perf_map; |
893 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 905 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
894 | return &armv7pmu; | 906 | return &armv7pmu; |
895 | } | 907 | } |
896 | #else | 908 | #else |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 28cd3b025bc3..39affbe4fdb2 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -246,7 +246,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
246 | continue; | 246 | continue; |
247 | 247 | ||
248 | hwc = &event->hw; | 248 | hwc = &event->hw; |
249 | armpmu_event_update(event, hwc, idx); | 249 | armpmu_event_update(event, hwc, idx, 1); |
250 | data.period = event->hw.last_period; | 250 | data.period = event->hw.last_period; |
251 | if (!armpmu_event_set_period(event, hwc, idx)) | 251 | if (!armpmu_event_set_period(event, hwc, idx)) |
252 | continue; | 252 | continue; |
@@ -578,7 +578,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
578 | continue; | 578 | continue; |
579 | 579 | ||
580 | hwc = &event->hw; | 580 | hwc = &event->hw; |
581 | armpmu_event_update(event, hwc, idx); | 581 | armpmu_event_update(event, hwc, idx, 1); |
582 | data.period = event->hw.last_period; | 582 | data.period = event->hw.last_period; |
583 | if (!armpmu_event_set_period(event, hwc, idx)) | 583 | if (!armpmu_event_set_period(event, hwc, idx)) |
584 | continue; | 584 | continue; |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 94bbedbed639..5e1e54197227 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
372 | if (clone_flags & CLONE_SETTLS) | 372 | if (clone_flags & CLONE_SETTLS) |
373 | thread->tp_value = regs->ARM_r3; | 373 | thread->tp_value = regs->ARM_r3; |
374 | 374 | ||
375 | thread_notify(THREAD_NOTIFY_COPY, thread); | ||
376 | |||
375 | return 0; | 377 | return 0; |
376 | } | 378 | } |
377 | 379 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index d1da92174277..006c1e884eaf 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -466,13 +466,13 @@ static struct machine_desc * __init setup_machine(unsigned int nr) | |||
466 | /* can't use cpu_relax() here as it may require MMU setup */; | 466 | /* can't use cpu_relax() here as it may require MMU setup */; |
467 | } | 467 | } |
468 | 468 | ||
469 | static int __init arm_add_memory(unsigned long start, unsigned long size) | 469 | static int __init arm_add_memory(phys_addr_t start, unsigned long size) |
470 | { | 470 | { |
471 | struct membank *bank = &meminfo.bank[meminfo.nr_banks]; | 471 | struct membank *bank = &meminfo.bank[meminfo.nr_banks]; |
472 | 472 | ||
473 | if (meminfo.nr_banks >= NR_BANKS) { | 473 | if (meminfo.nr_banks >= NR_BANKS) { |
474 | printk(KERN_CRIT "NR_BANKS too low, " | 474 | printk(KERN_CRIT "NR_BANKS too low, " |
475 | "ignoring memory at %#lx\n", start); | 475 | "ignoring memory at 0x%08llx\n", (long long)start); |
476 | return -EINVAL; | 476 | return -EINVAL; |
477 | } | 477 | } |
478 | 478 | ||
@@ -502,7 +502,8 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) | |||
502 | static int __init early_mem(char *p) | 502 | static int __init early_mem(char *p) |
503 | { | 503 | { |
504 | static int usermem __initdata = 0; | 504 | static int usermem __initdata = 0; |
505 | unsigned long size, start; | 505 | unsigned long size; |
506 | phys_addr_t start; | ||
506 | char *endp; | 507 | char *endp; |
507 | 508 | ||
508 | /* | 509 | /* |
@@ -788,30 +789,6 @@ static void __init reserve_crashkernel(void) | |||
788 | static inline void reserve_crashkernel(void) {} | 789 | static inline void reserve_crashkernel(void) {} |
789 | #endif /* CONFIG_KEXEC */ | 790 | #endif /* CONFIG_KEXEC */ |
790 | 791 | ||
791 | /* | ||
792 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | ||
793 | * is_kdump_kernel() to determine if we are booting after a panic. Hence | ||
794 | * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. | ||
795 | */ | ||
796 | |||
797 | #ifdef CONFIG_CRASH_DUMP | ||
798 | /* | ||
799 | * elfcorehdr= specifies the location of elf core header stored by the crashed | ||
800 | * kernel. This option will be passed by kexec loader to the capture kernel. | ||
801 | */ | ||
802 | static int __init setup_elfcorehdr(char *arg) | ||
803 | { | ||
804 | char *end; | ||
805 | |||
806 | if (!arg) | ||
807 | return -EINVAL; | ||
808 | |||
809 | elfcorehdr_addr = memparse(arg, &end); | ||
810 | return end > arg ? 0 : -EINVAL; | ||
811 | } | ||
812 | early_param("elfcorehdr", setup_elfcorehdr); | ||
813 | #endif /* CONFIG_CRASH_DUMP */ | ||
814 | |||
815 | static void __init squash_mem_tags(struct tag *tag) | 792 | static void __init squash_mem_tags(struct tag *tag) |
816 | { | 793 | { |
817 | for (; tag->hdr.size; tag = tag_next(tag)) | 794 | for (; tag->hdr.size; tag = tag_next(tag)) |
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index bfad698a02e7..6398ead9d1c0 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -119,11 +119,19 @@ ENTRY(cpu_resume) | |||
119 | #else | 119 | #else |
120 | ldr r0, sleep_save_sp @ stack phys addr | 120 | ldr r0, sleep_save_sp @ stack phys addr |
121 | #endif | 121 | #endif |
122 | msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off | 122 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
123 | #ifdef MULTI_CPU | 123 | #ifdef MULTI_CPU |
124 | ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn | 124 | @ load v:p, stack, return fn, resume fn |
125 | ARM( ldmia r0!, {r1, sp, lr, pc} ) | ||
126 | THUMB( ldmia r0!, {r1, r2, r3, r4} ) | ||
127 | THUMB( mov sp, r2 ) | ||
128 | THUMB( mov lr, r3 ) | ||
129 | THUMB( bx r4 ) | ||
125 | #else | 130 | #else |
126 | ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn | 131 | @ load v:p, stack, return fn |
132 | ARM( ldmia r0!, {r1, sp, lr} ) | ||
133 | THUMB( ldmia r0!, {r1, r2, lr} ) | ||
134 | THUMB( mov sp, r2 ) | ||
127 | b cpu_do_resume | 135 | b cpu_do_resume |
128 | #endif | 136 | #endif |
129 | ENDPROC(cpu_resume) | 137 | ENDPROC(cpu_resume) |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 4539ebcb089f..8fe05ad932e4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -474,13 +474,12 @@ static void smp_timer_broadcast(const struct cpumask *mask) | |||
474 | #define smp_timer_broadcast NULL | 474 | #define smp_timer_broadcast NULL |
475 | #endif | 475 | #endif |
476 | 476 | ||
477 | #ifndef CONFIG_LOCAL_TIMERS | ||
478 | static void broadcast_timer_set_mode(enum clock_event_mode mode, | 477 | static void broadcast_timer_set_mode(enum clock_event_mode mode, |
479 | struct clock_event_device *evt) | 478 | struct clock_event_device *evt) |
480 | { | 479 | { |
481 | } | 480 | } |
482 | 481 | ||
483 | static void local_timer_setup(struct clock_event_device *evt) | 482 | static void broadcast_timer_setup(struct clock_event_device *evt) |
484 | { | 483 | { |
485 | evt->name = "dummy_timer"; | 484 | evt->name = "dummy_timer"; |
486 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | 485 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
@@ -492,7 +491,6 @@ static void local_timer_setup(struct clock_event_device *evt) | |||
492 | 491 | ||
493 | clockevents_register_device(evt); | 492 | clockevents_register_device(evt); |
494 | } | 493 | } |
495 | #endif | ||
496 | 494 | ||
497 | void __cpuinit percpu_timer_setup(void) | 495 | void __cpuinit percpu_timer_setup(void) |
498 | { | 496 | { |
@@ -502,7 +500,8 @@ void __cpuinit percpu_timer_setup(void) | |||
502 | evt->cpumask = cpumask_of(cpu); | 500 | evt->cpumask = cpumask_of(cpu); |
503 | evt->broadcast = smp_timer_broadcast; | 501 | evt->broadcast = smp_timer_broadcast; |
504 | 502 | ||
505 | local_timer_setup(evt); | 503 | if (local_timer_setup(evt)) |
504 | broadcast_timer_setup(evt); | ||
506 | } | 505 | } |
507 | 506 | ||
508 | #ifdef CONFIG_HOTPLUG_CPU | 507 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 7a5760922914..40ee7e5045e4 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c | |||
@@ -158,7 +158,7 @@ static int emulate_swpX(unsigned int address, unsigned int *data, | |||
158 | 158 | ||
159 | if (res == 0) { | 159 | if (res == 0) { |
160 | /* | 160 | /* |
161 | * Barrier also required between aquiring a lock for a | 161 | * Barrier also required between acquiring a lock for a |
162 | * protected resource and accessing the resource. Inserted for | 162 | * protected resource and accessing the resource. Inserted for |
163 | * same reason as above. | 163 | * same reason as above. |
164 | */ | 164 | */ |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 21ac43f1c2d0..3b54ad19d489 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs) | |||
410 | struct thread_info *thread = current_thread_info(); | 410 | struct thread_info *thread = current_thread_info(); |
411 | siginfo_t info; | 411 | siginfo_t info; |
412 | 412 | ||
413 | if (current->personality != PER_LINUX && | 413 | if ((current->personality & PER_MASK) != PER_LINUX && |
414 | current->personality != PER_LINUX_32BIT && | ||
415 | thread->exec_domain->handler) { | 414 | thread->exec_domain->handler) { |
416 | thread->exec_domain->handler(n, regs); | 415 | thread->exec_domain->handler(n, regs); |
417 | return regs->ARM_r0; | 416 | return regs->ARM_r0; |
@@ -712,17 +711,17 @@ EXPORT_SYMBOL(__readwrite_bug); | |||
712 | 711 | ||
713 | void __pte_error(const char *file, int line, pte_t pte) | 712 | void __pte_error(const char *file, int line, pte_t pte) |
714 | { | 713 | { |
715 | printk("%s:%d: bad pte %08lx.\n", file, line, pte_val(pte)); | 714 | printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte)); |
716 | } | 715 | } |
717 | 716 | ||
718 | void __pmd_error(const char *file, int line, pmd_t pmd) | 717 | void __pmd_error(const char *file, int line, pmd_t pmd) |
719 | { | 718 | { |
720 | printk("%s:%d: bad pmd %08lx.\n", file, line, pmd_val(pmd)); | 719 | printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd)); |
721 | } | 720 | } |
722 | 721 | ||
723 | void __pgd_error(const char *file, int line, pgd_t pgd) | 722 | void __pgd_error(const char *file, int line, pgd_t pgd) |
724 | { | 723 | { |
725 | printk("%s:%d: bad pgd %08lx.\n", file, line, pgd_val(pgd)); | 724 | printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd)); |
726 | } | 725 | } |
727 | 726 | ||
728 | asmlinkage void __div0(void) | 727 | asmlinkage void __div0(void) |