From bc28248ee25e5c239cbe6afca35a100b08401de5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 17 May 2009 18:58:34 +0100 Subject: [ARM] smp: move core localtimer support out of platform specific files Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 56 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 48 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 6014dfd22af4..91130e218aef 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include @@ -32,6 +34,7 @@ #include #include #include +#include /* * as from 2.5, kernels no longer have an init_tasks structure @@ -274,9 +277,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void) local_fiq_enable(); /* - * Setup local timer for this CPU. + * Setup the percpu timer for this CPU. */ - local_timer_setup(); + percpu_timer_setup(); calibrate_delay(); @@ -383,10 +386,16 @@ void show_local_irqs(struct seq_file *p) seq_putc(p, '\n'); } +/* + * Timer (local or broadcast) support + */ +static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); + static void ipi_timer(void) { + struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); irq_enter(); - local_timer_interrupt(); + evt->event_handler(evt); irq_exit(); } @@ -405,6 +414,42 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) } #endif +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +static void smp_timer_broadcast(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_TIMER); +} + +static void broadcast_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ +} + +static void local_timer_setup(struct clock_event_device *evt) +{ + evt->name = "dummy_timer"; + evt->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DUMMY; + evt->rating = 400; + evt->mult = 1; + evt->set_mode = broadcast_timer_set_mode; + evt->broadcast = smp_timer_broadcast; + + clockevents_register_device(evt); +} +#endif + +void __cpuinit percpu_timer_setup(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); + + evt->cpumask = cpumask_of(cpu); + + local_timer_setup(evt); +} + static DEFINE_SPINLOCK(stop_lock); /* @@ -501,11 +546,6 @@ void smp_send_reschedule(int cpu) send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } -void smp_timer_broadcast(const struct cpumask *mask) -{ - send_ipi_message(mask, IPI_TIMER); -} - void smp_send_stop(void) { cpumask_t mask = cpu_online_map; -- cgit v1.2.2 From a8cbcd92bd4bf893085eddf7f58e63ea98503d94 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 16 May 2009 11:51:14 +0100 Subject: [ARM] smp: separate SCU support code from realview Signed-off-by: Russell King --- arch/arm/kernel/Makefile | 1 + arch/arm/kernel/smp_scu.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 arch/arm/kernel/smp_scu.c (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 11a5197a221f..90ffbaf23b4e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c new file mode 100644 index 000000000000..7f24ee9d7330 --- /dev/null +++ b/arch/arm/kernel/smp_scu.c @@ -0,0 +1,41 @@ +/* + * linux/arch/arm/kernel/smp_scu.c + * + * Copyright (C) 2002 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +#include + +#define SCU_CTRL 0x00 +#define SCU_CONFIG 0x04 +#define SCU_CPU_STATUS 0x08 +#define SCU_INVALIDATE 0x0c +#define SCU_FPGA_REVISION 0x10 + +/* + * Get the number of CPU cores from the SCU configuration + */ +unsigned int __init scu_get_core_count(void __iomem *scu_base) +{ + unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG); + return (ncores & 0x03) + 1; +} + +/* + * Enable the SCU + */ +void __init scu_enable(void __iomem *scu_base) +{ + u32 scu_ctrl; + + scu_ctrl = __raw_readl(scu_base + SCU_CTRL); + scu_ctrl |= 1; + __raw_writel(scu_ctrl, scu_base + SCU_CTRL); +} -- cgit v1.2.2 From f32f4ce25745209f16a5a6cef7442144b596c68a Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 16 May 2009 12:14:21 +0100 Subject: [ARM] smp: allow re-use of realview localtimer TWD support Signed-off-by: Russell King --- arch/arm/kernel/Makefile | 1 + arch/arm/kernel/smp_twd.c | 173 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) create mode 100644 arch/arm/kernel/smp_twd.c (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 90ffbaf23b4e..ff89d0b3abc5 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o +obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c new file mode 100644 index 000000000000..aabd62d6bd19 --- /dev/null +++ b/arch/arm/kernel/smp_twd.c @@ -0,0 +1,173 @@ +/* + * linux/arch/arm/kernel/smp_twd.c + * + * Copyright (C) 2002 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TWD_TIMER_LOAD 0x00 +#define TWD_TIMER_COUNTER 0x04 +#define TWD_TIMER_CONTROL 0x08 +#define TWD_TIMER_INTSTAT 0x0C + +#define TWD_WDOG_LOAD 0x20 +#define TWD_WDOG_COUNTER 0x24 +#define TWD_WDOG_CONTROL 0x28 +#define TWD_WDOG_INTSTAT 0x2C +#define TWD_WDOG_RESETSTAT 0x30 +#define TWD_WDOG_DISABLE 0x34 + +#define TWD_TIMER_CONTROL_ENABLE (1 << 0) +#define TWD_TIMER_CONTROL_ONESHOT (0 << 1) +#define TWD_TIMER_CONTROL_PERIODIC (1 << 1) +#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) + +/* set up by the platform code */ +void __iomem *twd_base; + +static unsigned long twd_timer_rate; + +static void twd_set_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + unsigned long ctrl; + + switch(mode) { + case CLOCK_EVT_MODE_PERIODIC: + /* timer load already set up */ + ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE + | TWD_TIMER_CONTROL_PERIODIC; + break; + case CLOCK_EVT_MODE_ONESHOT: + /* period set, and timer enabled in 'next_event' hook */ + ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT; + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + default: + ctrl = 0; + } + + __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); +} + +static int twd_set_next_event(unsigned long evt, + struct clock_event_device *unused) +{ + unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL); + + __raw_writel(evt, twd_base + TWD_TIMER_COUNTER); + __raw_writel(ctrl | TWD_TIMER_CONTROL_ENABLE, twd_base + TWD_TIMER_CONTROL); + + return 0; +} + +/* + * local_timer_ack: checks for a local timer interrupt. + * + * If a local timer interrupt has occurred, acknowledge and return 1. + * Otherwise, return 0. + */ +int twd_timer_ack(void) +{ + if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { + __raw_writel(1, twd_base + TWD_TIMER_INTSTAT); + return 1; + } + + return 0; +} + +static void __cpuinit twd_calibrate_rate(void) +{ + unsigned long load, count; + u64 waitjiffies; + + /* + * If this is the first time round, we need to work out how fast + * the timer ticks + */ + if (twd_timer_rate == 0) { + printk("Calibrating local timer... "); + + /* Wait for a tick to start */ + waitjiffies = get_jiffies_64() + 1; + + while (get_jiffies_64() < waitjiffies) + udelay(10); + + /* OK, now the tick has started, let's get the timer going */ + waitjiffies += 5; + + /* enable, no interrupt or reload */ + __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL); + + /* maximum value */ + __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); + + while (get_jiffies_64() < waitjiffies) + udelay(10); + + count = __raw_readl(twd_base + TWD_TIMER_COUNTER); + + twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); + + printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, + (twd_timer_rate / 100000) % 100); + } + + load = twd_timer_rate / HZ; + + __raw_writel(load, twd_base + TWD_TIMER_LOAD); +} + +/* + * Setup the local clock events for a CPU. + */ +void __cpuinit twd_timer_setup(struct clock_event_device *clk) +{ + unsigned long flags; + + twd_calibrate_rate(); + + clk->name = "local_timer"; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->rating = 350; + clk->set_mode = twd_set_mode; + clk->set_next_event = twd_set_next_event; + clk->shift = 20; + clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); + clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); + clk->min_delta_ns = clockevent_delta2ns(0xf, clk); + + /* Make sure our local interrupt controller has this enabled */ + local_irq_save(flags); + get_irq_chip(clk->irq)->unmask(clk->irq); + local_irq_restore(flags); + + clockevents_register_device(clk); +} + +/* + * take a local timer down + */ +void __cpuexit twd_timer_stop(void) +{ + __raw_writel(0, twd_base + TWD_TIMER_CONTROL); +} -- cgit v1.2.2 From 4c5158d4c3ab1f2927a740372a0ee9c3fed7ba47 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 17 May 2009 10:58:54 +0100 Subject: [ARM] smp: fix style issues in smp_twd.c Signed-off-by: Russell King --- arch/arm/kernel/smp_twd.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index aabd62d6bd19..d8c88c633c6f 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -48,7 +48,7 @@ static void twd_set_mode(enum clock_event_mode mode, { unsigned long ctrl; - switch(mode) { + switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* timer load already set up */ ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE @@ -72,8 +72,10 @@ static int twd_set_next_event(unsigned long evt, { unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL); + ctrl |= TWD_TIMER_CONTROL_ENABLE; + __raw_writel(evt, twd_base + TWD_TIMER_COUNTER); - __raw_writel(ctrl | TWD_TIMER_CONTROL_ENABLE, twd_base + TWD_TIMER_CONTROL); + __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); return 0; } @@ -104,7 +106,7 @@ static void __cpuinit twd_calibrate_rate(void) * the timer ticks */ if (twd_timer_rate == 0) { - printk("Calibrating local timer... "); + printk(KERN_INFO "Calibrating local timer... "); /* Wait for a tick to start */ waitjiffies = get_jiffies_64() + 1; @@ -146,15 +148,15 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) twd_calibrate_rate(); - clk->name = "local_timer"; - clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - clk->rating = 350; - clk->set_mode = twd_set_mode; - clk->set_next_event = twd_set_next_event; - clk->shift = 20; - clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); - clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); - clk->min_delta_ns = clockevent_delta2ns(0xf, clk); + clk->name = "local_timer"; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->rating = 350; + clk->set_mode = twd_set_mode; + clk->set_next_event = twd_set_next_event; + clk->shift = 20; + clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); + clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); + clk->min_delta_ns = clockevent_delta2ns(0xf, clk); /* Make sure our local interrupt controller has this enabled */ local_irq_save(flags); -- cgit v1.2.2 From 288ddad5b095ff65812cf1060c67d23c07568871 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 20 May 2009 15:52:40 -0700 Subject: syscall: Sort out syscall_restart name clash. Stephen Rothwell writes: > Today's linux-next build of at least some av32 and arm configs failed like this: > > arch/avr32/kernel/signal.c:216: error: conflicting types for 'restart_syscall' > include/linux/sched.h:2184: error: previous definition of 'restart_syscall' was here > > Caused by commit 690cc3ffe33ac4a2857583c22d4c6244ae11684d ("syscall: > Implement a convinience function restart_syscall") from the net tree. Grrr. Some days it feels like all of the good names are already taken. Let's just rename the two static users in arm and avr32 to get this sorted out. Signed-off-by: Eric W. Biederman Signed-off-by: David S. Miller --- arch/arm/kernel/signal.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 80b8b5c7e07a..614c9f642878 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -532,7 +532,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, return err; } -static inline void restart_syscall(struct pt_regs *regs) +static inline void setup_syscall_restart(struct pt_regs *regs) { regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; @@ -567,7 +567,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, } /* fallthrough */ case -ERESTARTNOINTR: - restart_syscall(regs); + setup_syscall_restart(regs); } } @@ -691,7 +691,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) if (regs->ARM_r0 == -ERESTARTNOHAND || regs->ARM_r0 == -ERESTARTSYS || regs->ARM_r0 == -ERESTARTNOINTR) { - restart_syscall(regs); + setup_syscall_restart(regs); } } single_step_set(current); -- cgit v1.2.2 From af73110d23fb54f940197d93a410e9fa0cee66e2 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 18 May 2009 16:26:27 +0100 Subject: [ARM] 5516/1: Flush the D-cache after initialising the SCU On MP systems, the data loaded by CPU0 before the SCU was initialised may not be visible to the other CPUs. Signed-off-by: Catalin Marinas Signed-off-by: Russell King This also includes the following compile fix: This patch includes 'asm/cacheflush.h' which is needed to use 'flush_cache_all()' function. Signed-off-by: Santosh Shilimkar Signed-off-by: Russell King --- arch/arm/kernel/smp_scu.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 7f24ee9d7330..d3831f616ee9 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -12,6 +12,7 @@ #include #include +#include #define SCU_CTRL 0x00 #define SCU_CONFIG 0x04 @@ -38,4 +39,10 @@ void __init scu_enable(void __iomem *scu_base) scu_ctrl = __raw_readl(scu_base + SCU_CTRL); scu_ctrl |= 1; __raw_writel(scu_ctrl, scu_base + SCU_CTRL); + + /* + * Ensure that the data accessed by CPU0 before the SCU was + * initialised is visible to the other CPUs. + */ + flush_cache_all(); } -- cgit v1.2.2 From e03cdade0ca945a04e982525e50fef275190b77b Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 28 May 2009 14:16:52 +0100 Subject: [ARM] smp: use new cpumask functions Convert cpu_*_mask bit twiddling to the new set_cpu_*() API. Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 91130e218aef..0d8097fa4ca5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -166,7 +166,7 @@ int __cpuexit __cpu_disable(void) * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); /* * OK - migrate IRQs away from this CPU @@ -288,7 +288,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * OK, now it's safe to let the boot CPU continue */ - cpu_set(cpu, cpu_online_map); + set_cpu_online(cpu, true); /* * OK, it's off to the idle thread for us @@ -462,7 +462,7 @@ static void ipi_cpu_stop(unsigned int cpu) dump_stack(); spin_unlock(&stop_lock); - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); local_fiq_disable(); local_irq_disable(); -- cgit v1.2.2 From faa7bc51c11d5bbe440ac04710fd7a3208782000 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sat, 30 May 2009 14:00:14 +0100 Subject: Check whether the TLB operations need broadcasting on SMP systems ARMv7 SMP hardware can handle the TLB maintenance operations broadcasting in hardware so that the software can avoid the costly IPIs. This patch adds the necessary checks (the MMFR3 CPUID register) to avoid the broadcasting if already supported by the hardware. (this patch is based on the work done by Tony Thompson @ ARM) Signed-off-by: Catalin Marinas --- arch/arm/kernel/smp.c | 69 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 26 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 6014dfd22af4..ece658e773a6 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include /* * as from 2.5, kernels no longer have an init_tasks structure @@ -545,6 +546,12 @@ struct tlb_args { unsigned long ta_end; }; +/* all SMP configurations have the extended CPUID registers */ +static inline int tlb_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; +} + static inline void ipi_flush_tlb_all(void *ignored) { local_flush_tlb_all(); @@ -587,51 +594,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) void flush_tlb_all(void) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + if (tlb_ops_need_broadcast()) + on_each_cpu(ipi_flush_tlb_all, NULL, 1); + else + local_flush_tlb_all(); } void flush_tlb_mm(struct mm_struct *mm) { - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); + if (tlb_ops_need_broadcast()) + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); + else + local_flush_tlb_mm(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - struct tlb_args ta; - - ta.ta_vma = vma; - ta.ta_start = uaddr; - - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = uaddr; + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); + } else + local_flush_tlb_page(vma, uaddr); } void flush_tlb_kernel_page(unsigned long kaddr) { - struct tlb_args ta; - - ta.ta_start = kaddr; - - on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = kaddr; + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); + } else + local_flush_tlb_kernel_page(kaddr); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct tlb_args ta; - - ta.ta_vma = vma; - ta.ta_start = start; - ta.ta_end = end; - - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); + } else + local_flush_tlb_range(vma, start, end); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - struct tlb_args ta; - - ta.ta_start = start; - ta.ta_end = end; - - on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); + } else + local_flush_tlb_kernel_range(start, end); } -- cgit v1.2.2 From d71e1352e240dea32d481ad8d662e8de4406ac7e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sat, 30 May 2009 14:00:15 +0100 Subject: Clear the IT state when invoking a Thumb-2 signal handler If a process is interrupted during an If-Then block and a signal is invoked, the ITSTATE bits must be cleared otherwise the handler would not run correctly. Signed-off-by: Catalin Marinas Cc: Joseph S. Myers --- arch/arm/kernel/signal.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 80b8b5c7e07a..442b87476f97 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -426,9 +426,13 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, */ thumb = handler & 1; - if (thumb) + if (thumb) { cpsr |= PSR_T_BIT; - else +#if __LINUX_ARM_ARCH__ >= 7 + /* clear the If-Then Thumb-2 execution state */ + cpsr &= ~PSR_IT_MASK; +#endif + } else cpsr &= ~PSR_T_BIT; } #endif -- cgit v1.2.2 From 8c7e65742fd05f27071282d260376adb98e0e9ac Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sat, 30 May 2009 14:00:17 +0100 Subject: arm: Provide _sdata and __bss_stop in the vmlinux.lds.S file _sdata and __bss_stop are common symbols defined by many architectures and made available to the kernel via asm-generic/sections.h. Kmemleak uses these symbols when scanning the data sections. Signed-off-by: Catalin Marinas --- arch/arm/kernel/vmlinux.lds.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index c90f27250ead..6c0779792546 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -141,6 +141,7 @@ SECTIONS .data : AT(__data_loc) { _data = .; /* address in memory */ + _sdata = .; /* * first, the init task union, aligned @@ -192,6 +193,7 @@ SECTIONS __bss_start = .; /* BSS */ *(.bss) *(COMMON) + __bss_stop = .; _end = .; } /* Stabs debugging sections. */ -- cgit v1.2.2 From 26584853a44c58f3d6ac7360d697a2ddcd1a3efa Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sat, 30 May 2009 14:00:18 +0100 Subject: Add core support for ARMv6/v7 big-endian Starting with ARMv6, the CPUs support the BE-8 variant of big-endian (byte-invariant). This patch adds the core support: - setting of the BE-8 mode via the CPSR.E register for both kernel and user threads - big-endian page table walking - REV used to rotate instructions read from memory during fault processing as they are still little-endian format - Kconfig and Makefile support for BE-8. The --be8 option must be passed to the final linking stage to convert the instructions to little-endian Signed-off-by: Catalin Marinas --- arch/arm/kernel/entry-armv.S | 3 +++ arch/arm/kernel/entry-common.S | 3 +++ arch/arm/kernel/process.c | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d662a2f1fd85..9d7ce4377c5e 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -482,6 +482,9 @@ __und_usr: subeq r4, r2, #4 @ ARM instr at LR - 4 subne r4, r2, #2 @ Thumb instr at LR - 2 1: ldreqt r0, [r4] +#ifdef CONFIG_CPU_ENDIAN_BE8 + reveq r0, r0 @ little endian instruction +#endif beq call_fpe @ Thumb instruction #if __LINUX_ARM_ARCH__ >= 7 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index b55cb0331809..366e5097a41a 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -210,6 +210,9 @@ ENTRY(vector_swi) A710( teq ip, #0x0f000000 ) A710( bne .Larm710bug ) #endif +#ifdef CONFIG_CPU_ENDIAN_BE8 + rev r10, r10 @ little endian instruction +#endif #elif defined(CONFIG_AEABI) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c3265a2e7cd4..1585423699ee 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -365,7 +365,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) regs.ARM_r2 = (unsigned long)fn; regs.ARM_r3 = (unsigned long)do_exit; regs.ARM_pc = (unsigned long)kernel_thread_helper; - regs.ARM_cpsr = SVC_MODE; + regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE; return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } -- cgit v1.2.2 From bb1f17b0372de93758653ca3454bc0df18dc2e5c Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 16 Jun 2009 15:31:18 -0700 Subject: mm: consolidate init_mm definition * create mm/init-mm.c, move init_mm there * remove INIT_MM, initialize init_mm with C99 initializer * unexport init_mm on all arches: init_mm is already unexported on x86. One strange place is some OMAP driver (drivers/video/omap/) which won't build modular, but it's already wants get_vm_area() export. Somebody should look there. [akpm@linux-foundation.org: add missing #includes] Signed-off-by: Alexey Dobriyan Cc: Mike Frysinger Cc: Americo Wang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/init_task.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c index e859af349467..3f470866bb89 100644 --- a/arch/arm/kernel/init_task.c +++ b/arch/arm/kernel/init_task.c @@ -14,10 +14,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * -- cgit v1.2.2 From 41184f6a5ef0d88529904d54f06f88b67fb76f4a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 19 Jun 2009 11:30:12 +0100 Subject: [ARM] 5556/1: Fix the irq_desc.cpu references The cpu member of struct irq_desc was recently renamed to node. The patch renames the ARM references to the old member. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/irq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 6874c7dca75a..096f600dc8d8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -167,7 +167,7 @@ void __init init_IRQ(void) #ifdef CONFIG_SMP cpumask_setall(bad_irq_desc.affinity); - bad_irq_desc.cpu = smp_processor_id(); + bad_irq_desc.node = smp_processor_id(); #endif init_arch_irq(); } @@ -176,7 +176,7 @@ void __init init_IRQ(void) static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) { - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); + pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); @@ -195,7 +195,7 @@ void migrate_irqs(void) for (i = 0; i < NR_IRQS; i++) { struct irq_desc *desc = irq_desc + i; - if (desc->cpu == cpu) { + if (desc->node == cpu) { unsigned int newcpu = cpumask_any_and(desc->affinity, cpu_online_mask); if (newcpu >= nr_cpu_ids) { -- cgit v1.2.2 From 7436127ce9042f95a10bb5423f726fd63a61934d Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 19 Jun 2009 16:39:29 +0100 Subject: [ARM] 5557/1: Discard some ARM.ex*.*exit.text sections when !HOTPLUG or !HOTPLUG_CPU Not discarding these sections when hotplug isn't available prevents the kernel from building. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/vmlinux.lds.S | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6c0779792546..4340bf3d2c84 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -84,6 +84,14 @@ SECTIONS *(.exitcall.exit) *(.ARM.exidx.exit.text) *(.ARM.extab.exit.text) +#ifndef CONFIG_HOTPLUG_CPU + *(.ARM.exidx.cpuexit.text) + *(.ARM.extab.cpuexit.text) +#endif +#ifndef CONFIG_HOTPLUG + *(.ARM.exidx.devexit.text) + *(.ARM.extab.devexit.text) +#endif #ifndef CONFIG_MMU *(.fixup) *(__ex_table) -- cgit v1.2.2 From c894ed6956f126d60d888e8efc5fb3a595ba89ae Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 19 Jun 2009 16:42:11 +0100 Subject: [ARM] 5558/1: Add extra checks to ARM unwinder to avoid tracing corrupt stacks There are situations where the unwinder goes beyond stack boundaries and unwinds random data. This patch moves the stack boundaries check after the unwind_exec_insn() call and adds an extra check for possible infinite loops (like "mov pc, lr" with pc == lr). The patch also fixes a bug in the unwind instructions interpreter. The 0xb0 instruction can only set PC to LR if this wasn't already set by a previous instruction (this is used on exceptions taken while in kernel mode where svc_entry is annotated with ".save {r0 - pc}"). Tested-by: Tony Lindgren Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/unwind.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 1dedc2c7ff49..dd56e11f339a 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -212,7 +212,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ctrl->vrs[14] = *vsp++; ctrl->vrs[SP] = (unsigned long)vsp; } else if (insn == 0xb0) { - ctrl->vrs[PC] = ctrl->vrs[LR]; + if (ctrl->vrs[PC] == 0) + ctrl->vrs[PC] = ctrl->vrs[LR]; /* no further processing */ ctrl->entries = 0; } else if (insn == 0xb1) { @@ -309,18 +310,20 @@ int unwind_frame(struct stackframe *frame) } while (ctrl.entries > 0) { - int urc; - - if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) - return -URC_FAILURE; - urc = unwind_exec_insn(&ctrl); + int urc = unwind_exec_insn(&ctrl); if (urc < 0) return urc; + if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) + return -URC_FAILURE; } if (ctrl.vrs[PC] == 0) ctrl.vrs[PC] = ctrl.vrs[LR]; + /* check for infinite loop */ + if (frame->pc == ctrl.vrs[PC]) + return -URC_FAILURE; + frame->fp = ctrl.vrs[FP]; frame->sp = ctrl.vrs[SP]; frame->lr = ctrl.vrs[LR]; @@ -332,7 +335,6 @@ int unwind_frame(struct stackframe *frame) void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; - unsigned long high, low; register unsigned long current_sp asm ("sp"); pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); @@ -362,9 +364,6 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.pc = thread_saved_pc(tsk); } - low = frame.sp & ~(THREAD_SIZE - 1); - high = low + THREAD_SIZE; - while (1) { int urc; unsigned long where = frame.pc; -- cgit v1.2.2 From feb97c3644a560ffdf9a17c65b1df807b5b4432f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 19 Jun 2009 16:43:08 +0100 Subject: [ARM] 5559/1: Limit the stack unwinding caused by a kthread exit When a kthread function returns, it branches to do_exit(). However, the unwinding information isn't valid anymore and any stack trace caused by do_exit() may be incorrect. This patch adds a kernel_thread_exit() function and annotated with '.cantunwind' so that the unwinder stops when reaching it. Tested-by: Tony Lindgren Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/process.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1585423699ee..56820cce91a4 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -352,6 +352,23 @@ asm( ".section .text\n" " .size kernel_thread_helper, . - kernel_thread_helper\n" " .previous"); +#ifdef CONFIG_ARM_UNWIND +extern void kernel_thread_exit(long code); +asm( ".section .text\n" +" .align\n" +" .type kernel_thread_exit, #function\n" +"kernel_thread_exit:\n" +" .fnstart\n" +" .cantunwind\n" +" bl do_exit\n" +" nop\n" +" .fnend\n" +" .size kernel_thread_exit, . - kernel_thread_exit\n" +" .previous"); +#else +#define kernel_thread_exit do_exit +#endif + /* * Create a kernel thread. */ @@ -363,7 +380,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) regs.ARM_r1 = (unsigned long)arg; regs.ARM_r2 = (unsigned long)fn; - regs.ARM_r3 = (unsigned long)do_exit; + regs.ARM_r3 = (unsigned long)kernel_thread_exit; regs.ARM_pc = (unsigned long)kernel_thread_helper; regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE; -- cgit v1.2.2 From e01916e3e7834cb51327e5e4983ff76bfce8a91f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 20 Jun 2009 22:25:45 +0100 Subject: [ARM] wire up rt_tgsigqueueinfo and perf_counter_open Signed-off-by: Russell King --- arch/arm/kernel/calls.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 1680e9e9c831..f776e72a4cb8 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -372,6 +372,8 @@ /* 360 */ CALL(sys_inotify_init1) CALL(sys_preadv) CALL(sys_pwritev) + CALL(sys_rt_tgsigqueueinfo) + CALL(sys_perf_counter_open) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted -- cgit v1.2.2 From 9ccdac3662dbf3c75e8f8851a214bdf7d365a4bd Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 22 Jun 2009 22:34:55 +0100 Subject: [ARM] idle: clean up pm_idle calling, obey hlt_counter pm_idle is used by infrastructure (eg, cpuidle) which expects architectures to call it in a certain way. Arrange for ARM to follow x86's lead on this and call pm_idle() with interrupts already disabled. However, we expect pm_idle() to enable interrupts before it returns. Also, OMAP wants to be able to disable hlt-ing, so allow hlt_counter to prevent all calls to pm_idle. Signed-off-by: Russell King --- arch/arm/kernel/process.c | 58 ++++++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 26 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 56820cce91a4..39196dff478c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd) /* * Function pointers to optional machine specific functions */ -void (*pm_idle)(void); -EXPORT_SYMBOL(pm_idle); - void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart); */ static void default_idle(void) { - if (hlt_counter) - cpu_relax(); - else { - local_irq_disable(); - if (!need_resched()) - arch_idle(); - local_irq_enable(); - } + if (!need_resched()) + arch_idle(); + local_irq_enable(); } +void (*pm_idle)(void) = default_idle; +EXPORT_SYMBOL(pm_idle); + /* - * The idle thread. We try to conserve power, while trying to keep - * overall latency low. The architecture specific idle is passed - * a value to indicate the level of "idleness" of the system. + * The idle thread, has rather strange semantics for calling pm_idle, + * but this is what x86 does and we need to do the same, so that + * things like cpuidle get called in the same way. The only difference + * is that we always respect 'hlt_counter' to prevent low power idle. */ void cpu_idle(void) { @@ -151,21 +147,31 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - void (*idle)(void) = pm_idle; - + tick_nohz_stop_sched_tick(1); + leds_event(led_idle_start); + while (!need_resched()) { #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(smp_processor_id())) { - leds_event(led_idle_start); - cpu_die(); - } + if (cpu_is_offline(smp_processor_id())) + cpu_die(); #endif - if (!idle) - idle = default_idle; - leds_event(led_idle_start); - tick_nohz_stop_sched_tick(1); - while (!need_resched()) - idle(); + local_irq_disable(); + if (hlt_counter) { + local_irq_enable(); + cpu_relax(); + } else { + stop_critical_timings(); + pm_idle(); + start_critical_timings(); + /* + * This will eventually be removed - pm_idle + * functions should always return with IRQs + * enabled. + */ + WARN_ON(irqs_disabled()); + local_irq_enable(); + } + } leds_event(led_idle_end); tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); -- cgit v1.2.2