diff options
Diffstat (limited to 'arch/mips/kernel')
| -rw-r--r-- | arch/mips/kernel/Makefile | 3 | ||||
| -rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 173 | ||||
| -rw-r--r-- | arch/mips/kernel/cevt-smtc.c | 321 | ||||
| -rw-r--r-- | arch/mips/kernel/cpu-probe.c | 28 | ||||
| -rw-r--r-- | arch/mips/kernel/entry.S | 10 | ||||
| -rw-r--r-- | arch/mips/kernel/genex.S | 45 | ||||
| -rw-r--r-- | arch/mips/kernel/head.S | 1 | ||||
| -rw-r--r-- | arch/mips/kernel/kgdb.c | 3 | ||||
| -rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/proc.c | 13 | ||||
| -rw-r--r-- | arch/mips/kernel/process.c | 19 | ||||
| -rw-r--r-- | arch/mips/kernel/ptrace.c | 102 | ||||
| -rw-r--r-- | arch/mips/kernel/ptrace32.c | 59 | ||||
| -rw-r--r-- | arch/mips/kernel/scall64-n32.S | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/scall64-o32.S | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/setup.c | 33 | ||||
| -rw-r--r-- | arch/mips/kernel/signal32.c | 12 | ||||
| -rw-r--r-- | arch/mips/kernel/smp.c | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/smtc.c | 260 | ||||
| -rw-r--r-- | arch/mips/kernel/traps.c | 70 | ||||
| -rw-r--r-- | arch/mips/kernel/vmlinux.lds.S | 1 | ||||
| -rw-r--r-- | arch/mips/kernel/watch.c | 188 |
22 files changed, 982 insertions, 367 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f93974797..d9da7112aaf8 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
| @@ -6,10 +6,11 @@ extra-y := head.o init_task.o vmlinux.lds | |||
| 6 | 6 | ||
| 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ |
| 8 | ptrace.o reset.o setup.o signal.o syscall.o \ | 8 | ptrace.o reset.o setup.o signal.o syscall.o \ |
| 9 | time.o topology.o traps.o unaligned.o | 9 | time.o topology.o traps.o unaligned.o watch.o |
| 10 | 10 | ||
| 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
| 12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
| 13 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | ||
| 13 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 14 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
| 14 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 15 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
| 15 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | 16 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o |
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0d..4a4c59f2737a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
| @@ -12,6 +12,14 @@ | |||
| 12 | 12 | ||
| 13 | #include <asm/smtc_ipi.h> | 13 | #include <asm/smtc_ipi.h> |
| 14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
| 15 | #include <asm/cevt-r4k.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||
| 19 | * of these routines with SMTC-specific variants. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #ifndef CONFIG_MIPS_MT_SMTC | ||
| 15 | 23 | ||
| 16 | static int mips_next_event(unsigned long delta, | 24 | static int mips_next_event(unsigned long delta, |
| 17 | struct clock_event_device *evt) | 25 | struct clock_event_device *evt) |
| @@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, | |||
| 19 | unsigned int cnt; | 27 | unsigned int cnt; |
| 20 | int res; | 28 | int res; |
| 21 | 29 | ||
| 22 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 23 | { | ||
| 24 | unsigned long flags, vpflags; | ||
| 25 | local_irq_save(flags); | ||
| 26 | vpflags = dvpe(); | ||
| 27 | #endif | ||
| 28 | cnt = read_c0_count(); | 30 | cnt = read_c0_count(); |
| 29 | cnt += delta; | 31 | cnt += delta; |
| 30 | write_c0_compare(cnt); | 32 | write_c0_compare(cnt); |
| 31 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; | 33 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; |
| 32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 33 | evpe(vpflags); | ||
| 34 | local_irq_restore(flags); | ||
| 35 | } | ||
| 36 | #endif | ||
| 37 | return res; | 34 | return res; |
| 38 | } | 35 | } |
| 39 | 36 | ||
| 40 | static void mips_set_mode(enum clock_event_mode mode, | 37 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 41 | struct clock_event_device *evt) | 38 | |
| 39 | void mips_set_clock_mode(enum clock_event_mode mode, | ||
| 40 | struct clock_event_device *evt) | ||
| 42 | { | 41 | { |
| 43 | /* Nothing to do ... */ | 42 | /* Nothing to do ... */ |
| 44 | } | 43 | } |
| 45 | 44 | ||
| 46 | static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 45 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
| 47 | static int cp0_timer_irq_installed; | 46 | int cp0_timer_irq_installed; |
| 48 | 47 | ||
| 49 | /* | 48 | #ifndef CONFIG_MIPS_MT_SMTC |
| 50 | * Timer ack for an R4k-compatible timer of a known frequency. | ||
| 51 | */ | ||
| 52 | static void c0_timer_ack(void) | ||
| 53 | { | ||
| 54 | write_c0_compare(read_c0_compare()); | ||
| 55 | } | ||
| 56 | 49 | ||
| 57 | /* | 50 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
| 58 | * Possibly handle a performance counter interrupt. | ||
| 59 | * Return true if the timer interrupt should not be checked | ||
| 60 | */ | ||
| 61 | static inline int handle_perf_irq(int r2) | ||
| 62 | { | ||
| 63 | /* | ||
| 64 | * The performance counter overflow interrupt may be shared with the | ||
| 65 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
| 66 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
| 67 | * and we can't reliably determine if a counter interrupt has also | ||
| 68 | * happened (!r2) then don't check for a timer interrupt. | ||
| 69 | */ | ||
| 70 | return (cp0_perfcount_irq < 0) && | ||
| 71 | perf_irq() == IRQ_HANDLED && | ||
| 72 | !r2; | ||
| 73 | } | ||
| 74 | |||
| 75 | static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
| 76 | { | 51 | { |
| 77 | const int r2 = cpu_has_mips_r2; | 52 | const int r2 = cpu_has_mips_r2; |
| 78 | struct clock_event_device *cd; | 53 | struct clock_event_device *cd; |
| @@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
| 93 | * interrupt. Being the paranoiacs we are we check anyway. | 68 | * interrupt. Being the paranoiacs we are we check anyway. |
| 94 | */ | 69 | */ |
| 95 | if (!r2 || (read_c0_cause() & (1 << 30))) { | 70 | if (!r2 || (read_c0_cause() & (1 << 30))) { |
| 96 | c0_timer_ack(); | 71 | /* Clear Count/Compare Interrupt */ |
| 97 | #ifdef CONFIG_MIPS_MT_SMTC | 72 | write_c0_compare(read_c0_compare()); |
| 98 | if (cpu_data[cpu].vpe_id) | ||
| 99 | goto out; | ||
| 100 | cpu = 0; | ||
| 101 | #endif | ||
| 102 | cd = &per_cpu(mips_clockevent_device, cpu); | 73 | cd = &per_cpu(mips_clockevent_device, cpu); |
| 103 | cd->event_handler(cd); | 74 | cd->event_handler(cd); |
| 104 | } | 75 | } |
| @@ -107,65 +78,16 @@ out: | |||
| 107 | return IRQ_HANDLED; | 78 | return IRQ_HANDLED; |
| 108 | } | 79 | } |
| 109 | 80 | ||
| 110 | static struct irqaction c0_compare_irqaction = { | 81 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |
| 82 | |||
| 83 | struct irqaction c0_compare_irqaction = { | ||
| 111 | .handler = c0_compare_interrupt, | 84 | .handler = c0_compare_interrupt, |
| 112 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 113 | .flags = IRQF_DISABLED, | ||
| 114 | #else | ||
| 115 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 85 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
| 116 | #endif | ||
| 117 | .name = "timer", | 86 | .name = "timer", |
| 118 | }; | 87 | }; |
| 119 | 88 | ||
| 120 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 121 | DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | ||
| 122 | |||
| 123 | static void smtc_set_mode(enum clock_event_mode mode, | ||
| 124 | struct clock_event_device *evt) | ||
| 125 | { | ||
| 126 | } | ||
| 127 | |||
| 128 | static void mips_broadcast(cpumask_t mask) | ||
| 129 | { | ||
| 130 | unsigned int cpu; | ||
| 131 | |||
| 132 | for_each_cpu_mask(cpu, mask) | ||
| 133 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
| 134 | } | ||
| 135 | |||
| 136 | static void setup_smtc_dummy_clockevent_device(void) | ||
| 137 | { | ||
| 138 | //uint64_t mips_freq = mips_hpt_^frequency; | ||
| 139 | unsigned int cpu = smp_processor_id(); | ||
| 140 | struct clock_event_device *cd; | ||
| 141 | 89 | ||
| 142 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 90 | void mips_event_handler(struct clock_event_device *dev) |
| 143 | |||
| 144 | cd->name = "SMTC"; | ||
| 145 | cd->features = CLOCK_EVT_FEAT_DUMMY; | ||
| 146 | |||
| 147 | /* Calculate the min / max delta */ | ||
| 148 | cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
| 149 | cd->shift = 0; //32; | ||
| 150 | cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); | ||
| 151 | cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); | ||
| 152 | |||
| 153 | cd->rating = 200; | ||
| 154 | cd->irq = 17; //-1; | ||
| 155 | // if (cpu) | ||
| 156 | // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); | ||
| 157 | // else | ||
| 158 | cd->cpumask = cpumask_of_cpu(cpu); | ||
| 159 | |||
| 160 | cd->set_mode = smtc_set_mode; | ||
| 161 | |||
| 162 | cd->broadcast = mips_broadcast; | ||
| 163 | |||
| 164 | clockevents_register_device(cd); | ||
| 165 | } | ||
| 166 | #endif | ||
| 167 | |||
| 168 | static void mips_event_handler(struct clock_event_device *dev) | ||
| 169 | { | 91 | { |
| 170 | } | 92 | } |
| 171 | 93 | ||
| @@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) | |||
| 177 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; | 99 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; |
| 178 | } | 100 | } |
| 179 | 101 | ||
| 180 | static int c0_compare_int_usable(void) | 102 | /* |
| 103 | * Compare interrupt can be routed and latched outside the core, | ||
| 104 | * so a single execution hazard barrier may not be enough to give | ||
| 105 | * it time to clear as seen in the Cause register. 4 time the | ||
| 106 | * pipeline depth seems reasonably conservative, and empirically | ||
| 107 | * works better in configurations with high CPU/bus clock ratios. | ||
| 108 | */ | ||
| 109 | |||
| 110 | #define compare_change_hazard() \ | ||
| 111 | do { \ | ||
| 112 | irq_disable_hazard(); \ | ||
| 113 | irq_disable_hazard(); \ | ||
| 114 | irq_disable_hazard(); \ | ||
| 115 | irq_disable_hazard(); \ | ||
| 116 | } while (0) | ||
| 117 | |||
| 118 | int c0_compare_int_usable(void) | ||
| 181 | { | 119 | { |
| 182 | unsigned int delta; | 120 | unsigned int delta; |
| 183 | unsigned int cnt; | 121 | unsigned int cnt; |
| @@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) | |||
| 187 | */ | 125 | */ |
| 188 | if (c0_compare_int_pending()) { | 126 | if (c0_compare_int_pending()) { |
| 189 | write_c0_compare(read_c0_count()); | 127 | write_c0_compare(read_c0_count()); |
| 190 | irq_disable_hazard(); | 128 | compare_change_hazard(); |
| 191 | if (c0_compare_int_pending()) | 129 | if (c0_compare_int_pending()) |
| 192 | return 0; | 130 | return 0; |
| 193 | } | 131 | } |
| @@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) | |||
| 196 | cnt = read_c0_count(); | 134 | cnt = read_c0_count(); |
| 197 | cnt += delta; | 135 | cnt += delta; |
| 198 | write_c0_compare(cnt); | 136 | write_c0_compare(cnt); |
| 199 | irq_disable_hazard(); | 137 | compare_change_hazard(); |
| 200 | if ((int)(read_c0_count() - cnt) < 0) | 138 | if ((int)(read_c0_count() - cnt) < 0) |
| 201 | break; | 139 | break; |
| 202 | /* increase delta if the timer was already expired */ | 140 | /* increase delta if the timer was already expired */ |
| @@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) | |||
| 205 | while ((int)(read_c0_count() - cnt) <= 0) | 143 | while ((int)(read_c0_count() - cnt) <= 0) |
| 206 | ; /* Wait for expiry */ | 144 | ; /* Wait for expiry */ |
| 207 | 145 | ||
| 146 | compare_change_hazard(); | ||
| 208 | if (!c0_compare_int_pending()) | 147 | if (!c0_compare_int_pending()) |
| 209 | return 0; | 148 | return 0; |
| 210 | 149 | ||
| 211 | write_c0_compare(read_c0_count()); | 150 | write_c0_compare(read_c0_count()); |
| 212 | irq_disable_hazard(); | 151 | compare_change_hazard(); |
| 213 | if (c0_compare_int_pending()) | 152 | if (c0_compare_int_pending()) |
| 214 | return 0; | 153 | return 0; |
| 215 | 154 | ||
| @@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) | |||
| 219 | return 1; | 158 | return 1; |
| 220 | } | 159 | } |
| 221 | 160 | ||
| 161 | #ifndef CONFIG_MIPS_MT_SMTC | ||
| 162 | |||
| 222 | int __cpuinit mips_clockevent_init(void) | 163 | int __cpuinit mips_clockevent_init(void) |
| 223 | { | 164 | { |
| 224 | uint64_t mips_freq = mips_hpt_frequency; | 165 | uint64_t mips_freq = mips_hpt_frequency; |
| @@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) | |||
| 229 | if (!cpu_has_counter || !mips_hpt_frequency) | 170 | if (!cpu_has_counter || !mips_hpt_frequency) |
| 230 | return -ENXIO; | 171 | return -ENXIO; |
| 231 | 172 | ||
| 232 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 233 | setup_smtc_dummy_clockevent_device(); | ||
| 234 | |||
| 235 | /* | ||
| 236 | * On SMTC we only register VPE0's compare interrupt as clockevent | ||
| 237 | * device. | ||
| 238 | */ | ||
| 239 | if (cpu) | ||
| 240 | return 0; | ||
| 241 | #endif | ||
| 242 | |||
| 243 | if (!c0_compare_int_usable()) | 173 | if (!c0_compare_int_usable()) |
| 244 | return -ENXIO; | 174 | return -ENXIO; |
| 245 | 175 | ||
| @@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) | |||
| 265 | 195 | ||
| 266 | cd->rating = 300; | 196 | cd->rating = 300; |
| 267 | cd->irq = irq; | 197 | cd->irq = irq; |
| 268 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 269 | cd->cpumask = CPU_MASK_ALL; | ||
| 270 | #else | ||
| 271 | cd->cpumask = cpumask_of_cpu(cpu); | 198 | cd->cpumask = cpumask_of_cpu(cpu); |
| 272 | #endif | ||
| 273 | cd->set_next_event = mips_next_event; | 199 | cd->set_next_event = mips_next_event; |
| 274 | cd->set_mode = mips_set_mode; | 200 | cd->set_mode = mips_set_clock_mode; |
| 275 | cd->event_handler = mips_event_handler; | 201 | cd->event_handler = mips_event_handler; |
| 276 | 202 | ||
| 277 | clockevents_register_device(cd); | 203 | clockevents_register_device(cd); |
| @@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) | |||
| 281 | 207 | ||
| 282 | cp0_timer_irq_installed = 1; | 208 | cp0_timer_irq_installed = 1; |
| 283 | 209 | ||
| 284 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 285 | #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) | ||
| 286 | setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); | ||
| 287 | #else | ||
| 288 | setup_irq(irq, &c0_compare_irqaction); | 210 | setup_irq(irq, &c0_compare_irqaction); |
| 289 | #endif | ||
| 290 | 211 | ||
| 291 | return 0; | 212 | return 0; |
| 292 | } | 213 | } |
| 214 | |||
| 215 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 000000000000..5162fe4b5952 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c | |||
| @@ -0,0 +1,321 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
| 7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
| 8 | * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl | ||
| 9 | */ | ||
| 10 | #include <linux/clockchips.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/percpu.h> | ||
| 13 | |||
| 14 | #include <asm/smtc_ipi.h> | ||
| 15 | #include <asm/time.h> | ||
| 16 | #include <asm/cevt-r4k.h> | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Variant clock event timer support for SMTC on MIPS 34K, 1004K | ||
| 20 | * or other MIPS MT cores. | ||
| 21 | * | ||
| 22 | * Notes on SMTC Support: | ||
| 23 | * | ||
| 24 | * SMTC has multiple microthread TCs pretending to be Linux CPUs. | ||
| 25 | * But there's only one Count/Compare pair per VPE, and Compare | ||
| 26 | * interrupts are taken opportunisitically by available TCs | ||
| 27 | * bound to the VPE with the Count register. The new timer | ||
| 28 | * framework provides for global broadcasts, but we really | ||
| 29 | * want VPE-level multicasts for best behavior. So instead | ||
| 30 | * of invoking the high-level clock-event broadcast code, | ||
| 31 | * this version of SMTC support uses the historical SMTC | ||
| 32 | * multicast mechanisms "under the hood", appearing to the | ||
| 33 | * generic clock layer as if the interrupts are per-CPU. | ||
| 34 | * | ||
| 35 | * The approach taken here is to maintain a set of NR_CPUS | ||
| 36 | * virtual timers, and track which "CPU" needs to be alerted | ||
| 37 | * at each event. | ||
| 38 | * | ||
| 39 | * It's unlikely that we'll see a MIPS MT core with more than | ||
| 40 | * 2 VPEs, but we *know* that we won't need to handle more | ||
| 41 | * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements | ||
| 42 | * is always going to be overkill, but always going to be enough. | ||
| 43 | */ | ||
| 44 | |||
| 45 | unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; | ||
| 46 | static int smtc_nextinvpe[NR_CPUS]; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Timestamps stored are absolute values to be programmed | ||
| 50 | * into Count register. Valid timestamps will never be zero. | ||
| 51 | * If a Zero Count value is actually calculated, it is converted | ||
| 52 | * to be a 1, which will introduce 1 or two CPU cycles of error | ||
| 53 | * roughly once every four billion events, which at 1000 HZ means | ||
| 54 | * about once every 50 days. If that's actually a problem, one | ||
| 55 | * could alternate squashing 0 to 1 and to -1. | ||
| 56 | */ | ||
| 57 | |||
| 58 | #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) | ||
| 59 | #define ISVALID(x) ((x) != 0L) | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Time comparison is subtle, as it's really truncated | ||
| 63 | * modular arithmetic. | ||
| 64 | */ | ||
| 65 | |||
| 66 | #define IS_SOONER(a, b, reference) \ | ||
| 67 | (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) | ||
| 68 | |||
| 69 | /* | ||
| 70 | * CATCHUP_INCREMENT, used when the function falls behind the counter. | ||
| 71 | * Could be an increasing function instead of a constant; | ||
| 72 | */ | ||
| 73 | |||
| 74 | #define CATCHUP_INCREMENT 64 | ||
| 75 | |||
| 76 | static int mips_next_event(unsigned long delta, | ||
| 77 | struct clock_event_device *evt) | ||
| 78 | { | ||
| 79 | unsigned long flags; | ||
| 80 | unsigned int mtflags; | ||
| 81 | unsigned long timestamp, reference, previous; | ||
| 82 | unsigned long nextcomp = 0L; | ||
| 83 | int vpe = current_cpu_data.vpe_id; | ||
| 84 | int cpu = smp_processor_id(); | ||
| 85 | local_irq_save(flags); | ||
| 86 | mtflags = dmt(); | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Maintain the per-TC virtual timer | ||
| 90 | * and program the per-VPE shared Count register | ||
| 91 | * as appropriate here... | ||
| 92 | */ | ||
| 93 | reference = (unsigned long)read_c0_count(); | ||
| 94 | timestamp = MAKEVALID(reference + delta); | ||
| 95 | /* | ||
| 96 | * To really model the clock, we have to catch the case | ||
| 97 | * where the current next-in-VPE timestamp is the old | ||
| 98 | * timestamp for the calling CPE, but the new value is | ||
| 99 | * in fact later. In that case, we have to do a full | ||
| 100 | * scan and discover the new next-in-VPE CPU id and | ||
| 101 | * timestamp. | ||
| 102 | */ | ||
| 103 | previous = smtc_nexttime[vpe][cpu]; | ||
| 104 | if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) | ||
| 105 | && IS_SOONER(previous, timestamp, reference)) { | ||
| 106 | int i; | ||
| 107 | int soonest = cpu; | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Update timestamp array here, so that new | ||
| 111 | * value gets considered along with those of | ||
| 112 | * other virtual CPUs on the VPE. | ||
| 113 | */ | ||
| 114 | smtc_nexttime[vpe][cpu] = timestamp; | ||
| 115 | for_each_online_cpu(i) { | ||
| 116 | if (ISVALID(smtc_nexttime[vpe][i]) | ||
| 117 | && IS_SOONER(smtc_nexttime[vpe][i], | ||
| 118 | smtc_nexttime[vpe][soonest], reference)) { | ||
| 119 | soonest = i; | ||
| 120 | } | ||
| 121 | } | ||
| 122 | smtc_nextinvpe[vpe] = soonest; | ||
| 123 | nextcomp = smtc_nexttime[vpe][soonest]; | ||
| 124 | /* | ||
| 125 | * Otherwise, we don't have to process the whole array rank, | ||
| 126 | * we just have to see if the event horizon has gotten closer. | ||
| 127 | */ | ||
| 128 | } else { | ||
| 129 | if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || | ||
| 130 | IS_SOONER(timestamp, | ||
| 131 | smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { | ||
| 132 | smtc_nextinvpe[vpe] = cpu; | ||
| 133 | nextcomp = timestamp; | ||
| 134 | } | ||
| 135 | /* | ||
| 136 | * Since next-in-VPE may me the same as the executing | ||
| 137 | * virtual CPU, we update the array *after* checking | ||
| 138 | * its value. | ||
| 139 | */ | ||
| 140 | smtc_nexttime[vpe][cpu] = timestamp; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | ||
| 144 | * It may be that, in fact, we don't need to update Compare, | ||
| 145 | * but if we do, we want to make sure we didn't fall into | ||
| 146 | * a crack just behind Count. | ||
| 147 | */ | ||
| 148 | if (ISVALID(nextcomp)) { | ||
| 149 | write_c0_compare(nextcomp); | ||
| 150 | ehb(); | ||
| 151 | /* | ||
| 152 | * We never return an error, we just make sure | ||
| 153 | * that we trigger the handlers as quickly as | ||
| 154 | * we can if we fell behind. | ||
| 155 | */ | ||
| 156 | while ((nextcomp - (unsigned long)read_c0_count()) | ||
| 157 | > (unsigned long)LONG_MAX) { | ||
| 158 | nextcomp += CATCHUP_INCREMENT; | ||
| 159 | write_c0_compare(nextcomp); | ||
| 160 | ehb(); | ||
| 161 | } | ||
| 162 | } | ||
| 163 | emt(mtflags); | ||
| 164 | local_irq_restore(flags); | ||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 168 | |||
| 169 | void smtc_distribute_timer(int vpe) | ||
| 170 | { | ||
| 171 | unsigned long flags; | ||
| 172 | unsigned int mtflags; | ||
| 173 | int cpu; | ||
| 174 | struct clock_event_device *cd; | ||
| 175 | unsigned long nextstamp = 0L; | ||
| 176 | unsigned long reference; | ||
| 177 | |||
| 178 | |||
| 179 | repeat: | ||
| 180 | for_each_online_cpu(cpu) { | ||
| 181 | /* | ||
| 182 | * Find virtual CPUs within the current VPE who have | ||
| 183 | * unserviced timer requests whose time is now past. | ||
| 184 | */ | ||
| 185 | local_irq_save(flags); | ||
| 186 | mtflags = dmt(); | ||
| 187 | if (cpu_data[cpu].vpe_id == vpe && | ||
| 188 | ISVALID(smtc_nexttime[vpe][cpu])) { | ||
| 189 | reference = (unsigned long)read_c0_count(); | ||
| 190 | if ((smtc_nexttime[vpe][cpu] - reference) | ||
| 191 | > (unsigned long)LONG_MAX) { | ||
| 192 | smtc_nexttime[vpe][cpu] = 0L; | ||
| 193 | emt(mtflags); | ||
| 194 | local_irq_restore(flags); | ||
| 195 | /* | ||
| 196 | * We don't send IPIs to ourself. | ||
| 197 | */ | ||
| 198 | if (cpu != smp_processor_id()) { | ||
| 199 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
| 200 | } else { | ||
| 201 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
| 202 | cd->event_handler(cd); | ||
| 203 | } | ||
| 204 | } else { | ||
| 205 | /* Local to VPE but Valid Time not yet reached. */ | ||
| 206 | if (!ISVALID(nextstamp) || | ||
| 207 | IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, | ||
| 208 | reference)) { | ||
| 209 | smtc_nextinvpe[vpe] = cpu; | ||
| 210 | nextstamp = smtc_nexttime[vpe][cpu]; | ||
| 211 | } | ||
| 212 | emt(mtflags); | ||
| 213 | local_irq_restore(flags); | ||
| 214 | } | ||
| 215 | } else { | ||
| 216 | emt(mtflags); | ||
| 217 | local_irq_restore(flags); | ||
| 218 | |||
| 219 | } | ||
| 220 | } | ||
| 221 | /* Reprogram for interrupt at next soonest timestamp for VPE */ | ||
| 222 | if (ISVALID(nextstamp)) { | ||
| 223 | write_c0_compare(nextstamp); | ||
| 224 | ehb(); | ||
| 225 | if ((nextstamp - (unsigned long)read_c0_count()) | ||
| 226 | > (unsigned long)LONG_MAX) | ||
| 227 | goto repeat; | ||
| 228 | } | ||
| 229 | } | ||
| 230 | |||
| 231 | |||
| 232 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
| 233 | { | ||
| 234 | int cpu = smp_processor_id(); | ||
| 235 | |||
| 236 | /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ | ||
| 237 | handle_perf_irq(1); | ||
| 238 | |||
| 239 | if (read_c0_cause() & (1 << 30)) { | ||
| 240 | /* Clear Count/Compare Interrupt */ | ||
| 241 | write_c0_compare(read_c0_compare()); | ||
| 242 | smtc_distribute_timer(cpu_data[cpu].vpe_id); | ||
| 243 | } | ||
| 244 | return IRQ_HANDLED; | ||
| 245 | } | ||
| 246 | |||
| 247 | |||
| 248 | int __cpuinit mips_clockevent_init(void) | ||
| 249 | { | ||
| 250 | uint64_t mips_freq = mips_hpt_frequency; | ||
| 251 | unsigned int cpu = smp_processor_id(); | ||
| 252 | struct clock_event_device *cd; | ||
| 253 | unsigned int irq; | ||
| 254 | int i; | ||
| 255 | int j; | ||
| 256 | |||
| 257 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
| 258 | return -ENXIO; | ||
| 259 | if (cpu == 0) { | ||
| 260 | for (i = 0; i < num_possible_cpus(); i++) { | ||
| 261 | smtc_nextinvpe[i] = 0; | ||
| 262 | for (j = 0; j < num_possible_cpus(); j++) | ||
| 263 | smtc_nexttime[i][j] = 0L; | ||
| 264 | } | ||
| 265 | /* | ||
| 266 | * SMTC also can't have the usablility test | ||
| 267 | * run by secondary TCs once Compare is in use. | ||
| 268 | */ | ||
| 269 | if (!c0_compare_int_usable()) | ||
| 270 | return -ENXIO; | ||
| 271 | } | ||
| 272 | |||
| 273 | /* | ||
| 274 | * With vectored interrupts things are getting platform specific. | ||
| 275 | * get_c0_compare_int is a hook to allow a platform to return the | ||
| 276 | * interrupt number of it's liking. | ||
| 277 | */ | ||
| 278 | irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
| 279 | if (get_c0_compare_int) | ||
| 280 | irq = get_c0_compare_int(); | ||
| 281 | |||
| 282 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
| 283 | |||
| 284 | cd->name = "MIPS"; | ||
| 285 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
| 286 | |||
| 287 | /* Calculate the min / max delta */ | ||
| 288 | cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
| 289 | cd->shift = 32; | ||
| 290 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
| 291 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
| 292 | |||
| 293 | cd->rating = 300; | ||
| 294 | cd->irq = irq; | ||
| 295 | cd->cpumask = cpumask_of_cpu(cpu); | ||
| 296 | cd->set_next_event = mips_next_event; | ||
| 297 | cd->set_mode = mips_set_clock_mode; | ||
| 298 | cd->event_handler = mips_event_handler; | ||
| 299 | |||
| 300 | clockevents_register_device(cd); | ||
| 301 | |||
| 302 | /* | ||
| 303 | * On SMTC we only want to do the data structure | ||
| 304 | * initialization and IRQ setup once. | ||
| 305 | */ | ||
| 306 | if (cpu) | ||
| 307 | return 0; | ||
| 308 | /* | ||
| 309 | * And we need the hwmask associated with the c0_compare | ||
| 310 | * vector to be initialized. | ||
| 311 | */ | ||
| 312 | irq_hwmask[irq] = (0x100 << cp0_compare_irq); | ||
| 313 | if (cp0_timer_irq_installed) | ||
| 314 | return 0; | ||
| 315 | |||
| 316 | cp0_timer_irq_installed = 1; | ||
| 317 | |||
| 318 | setup_irq(irq, &c0_compare_irqaction); | ||
| 319 | |||
| 320 | return 0; | ||
| 321 | } | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 335a6ae3d594..0cf15457ecac 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <asm/fpu.h> | 21 | #include <asm/fpu.h> |
| 22 | #include <asm/mipsregs.h> | 22 | #include <asm/mipsregs.h> |
| 23 | #include <asm/system.h> | 23 | #include <asm/system.h> |
| 24 | #include <asm/watch.h> | ||
| 24 | 25 | ||
| 25 | /* | 26 | /* |
| 26 | * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, | 27 | * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, |
| @@ -45,18 +46,7 @@ static void r39xx_wait(void) | |||
| 45 | local_irq_enable(); | 46 | local_irq_enable(); |
| 46 | } | 47 | } |
| 47 | 48 | ||
| 48 | /* | 49 | extern void r4k_wait(void); |
| 49 | * There is a race when WAIT instruction executed with interrupt | ||
| 50 | * enabled. | ||
| 51 | * But it is implementation-dependent wheter the pipelie restarts when | ||
| 52 | * a non-enabled interrupt is requested. | ||
| 53 | */ | ||
| 54 | static void r4k_wait(void) | ||
| 55 | { | ||
| 56 | __asm__(" .set mips3 \n" | ||
| 57 | " wait \n" | ||
| 58 | " .set mips0 \n"); | ||
| 59 | } | ||
| 60 | 50 | ||
| 61 | /* | 51 | /* |
| 62 | * This variant is preferable as it allows testing need_resched and going to | 52 | * This variant is preferable as it allows testing need_resched and going to |
| @@ -65,14 +55,18 @@ static void r4k_wait(void) | |||
| 65 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes | 55 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes |
| 66 | * using this version a gamble. | 56 | * using this version a gamble. |
| 67 | */ | 57 | */ |
| 68 | static void r4k_wait_irqoff(void) | 58 | void r4k_wait_irqoff(void) |
| 69 | { | 59 | { |
| 70 | local_irq_disable(); | 60 | local_irq_disable(); |
| 71 | if (!need_resched()) | 61 | if (!need_resched()) |
| 72 | __asm__(" .set mips3 \n" | 62 | __asm__(" .set push \n" |
| 63 | " .set mips3 \n" | ||
| 73 | " wait \n" | 64 | " wait \n" |
| 74 | " .set mips0 \n"); | 65 | " .set pop \n"); |
| 75 | local_irq_enable(); | 66 | local_irq_enable(); |
| 67 | __asm__(" .globl __pastwait \n" | ||
| 68 | "__pastwait: \n"); | ||
| 69 | return; | ||
| 76 | } | 70 | } |
| 77 | 71 | ||
| 78 | /* | 72 | /* |
| @@ -128,7 +122,7 @@ static int __init wait_disable(char *s) | |||
| 128 | 122 | ||
| 129 | __setup("nowait", wait_disable); | 123 | __setup("nowait", wait_disable); |
| 130 | 124 | ||
| 131 | static inline void check_wait(void) | 125 | void __init check_wait(void) |
| 132 | { | 126 | { |
| 133 | struct cpuinfo_mips *c = ¤t_cpu_data; | 127 | struct cpuinfo_mips *c = ¤t_cpu_data; |
| 134 | 128 | ||
| @@ -242,7 +236,6 @@ static inline void check_errata(void) | |||
| 242 | 236 | ||
| 243 | void __init check_bugs32(void) | 237 | void __init check_bugs32(void) |
| 244 | { | 238 | { |
| 245 | check_wait(); | ||
| 246 | check_errata(); | 239 | check_errata(); |
| 247 | } | 240 | } |
| 248 | 241 | ||
| @@ -685,6 +678,7 @@ static inline void spram_config(void) {} | |||
| 685 | static inline void cpu_probe_mips(struct cpuinfo_mips *c) | 678 | static inline void cpu_probe_mips(struct cpuinfo_mips *c) |
| 686 | { | 679 | { |
| 687 | decode_configs(c); | 680 | decode_configs(c); |
| 681 | mips_probe_watch_registers(c); | ||
| 688 | switch (c->processor_id & 0xff00) { | 682 | switch (c->processor_id & 0xff00) { |
| 689 | case PRID_IMP_4KC: | 683 | case PRID_IMP_4KC: |
| 690 | c->cputype = CPU_4KC; | 684 | c->cputype = CPU_4KC; |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e29598ae939d..ffa331029e08 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
| @@ -79,11 +79,6 @@ FEXPORT(syscall_exit) | |||
| 79 | 79 | ||
| 80 | FEXPORT(restore_all) # restore full frame | 80 | FEXPORT(restore_all) # restore full frame |
| 81 | #ifdef CONFIG_MIPS_MT_SMTC | 81 | #ifdef CONFIG_MIPS_MT_SMTC |
| 82 | /* Detect and execute deferred IPI "interrupts" */ | ||
| 83 | LONG_L s0, TI_REGS($28) | ||
| 84 | LONG_S sp, TI_REGS($28) | ||
| 85 | jal deferred_smtc_ipi | ||
| 86 | LONG_S s0, TI_REGS($28) | ||
| 87 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 82 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
| 88 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | 83 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ |
| 89 | mfc0 v0, CP0_TCSTATUS | 84 | mfc0 v0, CP0_TCSTATUS |
| @@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame | |||
| 112 | xor t0, t0, t3 | 107 | xor t0, t0, t3 |
| 113 | mtc0 t0, CP0_TCCONTEXT | 108 | mtc0 t0, CP0_TCCONTEXT |
| 114 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 109 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
| 110 | /* Detect and execute deferred IPI "interrupts" */ | ||
| 111 | LONG_L s0, TI_REGS($28) | ||
| 112 | LONG_S sp, TI_REGS($28) | ||
| 113 | jal deferred_smtc_ipi | ||
| 114 | LONG_S s0, TI_REGS($28) | ||
| 115 | #endif /* CONFIG_MIPS_MT_SMTC */ | 115 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 116 | .set noat | 116 | .set noat |
| 117 | RESTORE_TEMP | 117 | RESTORE_TEMP |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index c6ada98ee042..757d48f0d80f 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <asm/stackframe.h> | 20 | #include <asm/stackframe.h> |
| 21 | #include <asm/war.h> | 21 | #include <asm/war.h> |
| 22 | #include <asm/page.h> | 22 | #include <asm/page.h> |
| 23 | #include <asm/thread_info.h> | ||
| 23 | 24 | ||
| 24 | #define PANIC_PIC(msg) \ | 25 | #define PANIC_PIC(msg) \ |
| 25 | .set push; \ | 26 | .set push; \ |
| @@ -126,7 +127,42 @@ handle_vcei: | |||
| 126 | 127 | ||
| 127 | __FINIT | 128 | __FINIT |
| 128 | 129 | ||
| 130 | .align 5 /* 32 byte rollback region */ | ||
| 131 | LEAF(r4k_wait) | ||
| 132 | .set push | ||
| 133 | .set noreorder | ||
| 134 | /* start of rollback region */ | ||
| 135 | LONG_L t0, TI_FLAGS($28) | ||
| 136 | nop | ||
| 137 | andi t0, _TIF_NEED_RESCHED | ||
| 138 | bnez t0, 1f | ||
| 139 | nop | ||
| 140 | nop | ||
| 141 | nop | ||
| 142 | .set mips3 | ||
| 143 | wait | ||
| 144 | /* end of rollback region (the region size must be power of two) */ | ||
| 145 | .set pop | ||
| 146 | 1: | ||
| 147 | jr ra | ||
| 148 | END(r4k_wait) | ||
| 149 | |||
| 150 | .macro BUILD_ROLLBACK_PROLOGUE handler | ||
| 151 | FEXPORT(rollback_\handler) | ||
| 152 | .set push | ||
| 153 | .set noat | ||
| 154 | MFC0 k0, CP0_EPC | ||
| 155 | PTR_LA k1, r4k_wait | ||
| 156 | ori k0, 0x1f /* 32 byte rollback region */ | ||
| 157 | xori k0, 0x1f | ||
| 158 | bne k0, k1, 9f | ||
| 159 | MTC0 k0, CP0_EPC | ||
| 160 | 9: | ||
| 161 | .set pop | ||
| 162 | .endm | ||
| 163 | |||
| 129 | .align 5 | 164 | .align 5 |
| 165 | BUILD_ROLLBACK_PROLOGUE handle_int | ||
| 130 | NESTED(handle_int, PT_SIZE, sp) | 166 | NESTED(handle_int, PT_SIZE, sp) |
| 131 | #ifdef CONFIG_TRACE_IRQFLAGS | 167 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 132 | /* | 168 | /* |
| @@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp) | |||
| 201 | * This prototype is copied to ebase + n*IntCtl.VS and patched | 237 | * This prototype is copied to ebase + n*IntCtl.VS and patched |
| 202 | * to invoke the handler | 238 | * to invoke the handler |
| 203 | */ | 239 | */ |
| 240 | BUILD_ROLLBACK_PROLOGUE except_vec_vi | ||
| 204 | NESTED(except_vec_vi, 0, sp) | 241 | NESTED(except_vec_vi, 0, sp) |
| 205 | SAVE_SOME | 242 | SAVE_SOME |
| 206 | SAVE_AT | 243 | SAVE_AT |
| @@ -245,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) | |||
| 245 | and t0, a0, t1 | 282 | and t0, a0, t1 |
| 246 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
| 247 | mfc0 t2, CP0_TCCONTEXT | 284 | mfc0 t2, CP0_TCCONTEXT |
| 248 | or t0, t0, t2 | 285 | or t2, t0, t2 |
| 249 | mtc0 t0, CP0_TCCONTEXT | 286 | mtc0 t2, CP0_TCCONTEXT |
| 250 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
| 251 | xor t1, t1, t0 | 288 | xor t1, t1, t0 |
| 252 | mtc0 t1, CP0_STATUS | 289 | mtc0 t1, CP0_STATUS |
| @@ -416,7 +453,11 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
| 416 | BUILD_HANDLER tr tr sti silent /* #13 */ | 453 | BUILD_HANDLER tr tr sti silent /* #13 */ |
| 417 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ | 454 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ |
| 418 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ | 455 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ |
| 456 | #ifdef CONFIG_HARDWARE_WATCHPOINTS | ||
| 457 | BUILD_HANDLER watch watch sti silent /* #23 */ | ||
| 458 | #else | ||
| 419 | BUILD_HANDLER watch watch sti verbose /* #23 */ | 459 | BUILD_HANDLER watch watch sti verbose /* #23 */ |
| 460 | #endif | ||
| 420 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ | 461 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ |
| 421 | BUILD_HANDLER mt mt sti silent /* #25 */ | 462 | BUILD_HANDLER mt mt sti silent /* #25 */ |
| 422 | BUILD_HANDLER dsp dsp sti silent /* #26 */ | 463 | BUILD_HANDLER dsp dsp sti silent /* #26 */ |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 361364501d34..492a0a8d70fb 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <asm/irqflags.h> | 22 | #include <asm/irqflags.h> |
| 23 | #include <asm/regdef.h> | 23 | #include <asm/regdef.h> |
| 24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
| 25 | #include <asm/pgtable-bits.h> | ||
| 25 | #include <asm/mipsregs.h> | 26 | #include <asm/mipsregs.h> |
| 26 | #include <asm/stackframe.h> | 27 | #include <asm/stackframe.h> |
| 27 | 28 | ||
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 8f6d58ede33c..6e152c80cd4a 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c | |||
| @@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
| 236 | 236 | ||
| 237 | atomic_set(&kgdb_cpu_doing_single_step, -1); | 237 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
| 238 | if (remcom_in_buffer[0] == 's') | 238 | if (remcom_in_buffer[0] == 's') |
| 239 | if (kgdb_contthread) | 239 | atomic_set(&kgdb_cpu_doing_single_step, cpu); |
| 240 | atomic_set(&kgdb_cpu_doing_single_step, cpu); | ||
| 241 | 240 | ||
| 242 | return 0; | 241 | return 0; |
| 243 | } | 242 | } |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index df4d3f2f740c..dc9eb72ed9de 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
| @@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh); | |||
| 159 | /* | 159 | /* |
| 160 | * FPU Use Factor empirically derived from experiments on 34K | 160 | * FPU Use Factor empirically derived from experiments on 34K |
| 161 | */ | 161 | */ |
| 162 | #define FPUSEFACTOR 333 | 162 | #define FPUSEFACTOR 2000 |
| 163 | 163 | ||
| 164 | static __init int mt_fp_affinity_init(void) | 164 | static __init int mt_fp_affinity_init(void) |
| 165 | { | 165 | { |
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 36f065398243..75bb1300dd7a 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
| @@ -23,6 +23,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 23 | unsigned int version = cpu_data[n].processor_id; | 23 | unsigned int version = cpu_data[n].processor_id; |
| 24 | unsigned int fp_vers = cpu_data[n].fpu_id; | 24 | unsigned int fp_vers = cpu_data[n].fpu_id; |
| 25 | char fmt [64]; | 25 | char fmt [64]; |
| 26 | int i; | ||
| 26 | 27 | ||
| 27 | #ifdef CONFIG_SMP | 28 | #ifdef CONFIG_SMP |
| 28 | if (!cpu_isset(n, cpu_online_map)) | 29 | if (!cpu_isset(n, cpu_online_map)) |
| @@ -50,8 +51,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 50 | seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); | 51 | seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); |
| 51 | seq_printf(m, "extra interrupt vector\t: %s\n", | 52 | seq_printf(m, "extra interrupt vector\t: %s\n", |
| 52 | cpu_has_divec ? "yes" : "no"); | 53 | cpu_has_divec ? "yes" : "no"); |
| 53 | seq_printf(m, "hardware watchpoint\t: %s\n", | 54 | seq_printf(m, "hardware watchpoint\t: %s", |
| 54 | cpu_has_watch ? "yes" : "no"); | 55 | cpu_has_watch ? "yes, " : "no\n"); |
| 56 | if (cpu_has_watch) { | ||
| 57 | seq_printf(m, "count: %d, address/irw mask: [", | ||
| 58 | cpu_data[n].watch_reg_count); | ||
| 59 | for (i = 0; i < cpu_data[n].watch_reg_count; i++) | ||
| 60 | seq_printf(m, "%s0x%04x", i ? ", " : "" , | ||
| 61 | cpu_data[n].watch_reg_masks[i]); | ||
| 62 | seq_printf(m, "]\n"); | ||
| 63 | } | ||
| 55 | seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", | 64 | seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", |
| 56 | cpu_has_mips16 ? " mips16" : "", | 65 | cpu_has_mips16 ? " mips16" : "", |
| 57 | cpu_has_mdmx ? " mdmx" : "", | 66 | cpu_has_mdmx ? " mdmx" : "", |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 17edc69cf5c1..ca2e4026ad20 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
| @@ -54,7 +54,7 @@ void __noreturn cpu_idle(void) | |||
| 54 | while (1) { | 54 | while (1) { |
| 55 | tick_nohz_stop_sched_tick(1); | 55 | tick_nohz_stop_sched_tick(1); |
| 56 | while (!need_resched()) { | 56 | while (!need_resched()) { |
| 57 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | 57 | #ifdef CONFIG_MIPS_MT_SMTC |
| 58 | extern void smtc_idle_loop_hook(void); | 58 | extern void smtc_idle_loop_hook(void); |
| 59 | 59 | ||
| 60 | smtc_idle_loop_hook(); | 60 | smtc_idle_loop_hook(); |
| @@ -144,17 +144,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
| 144 | */ | 144 | */ |
| 145 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | 145 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); |
| 146 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 146 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
| 147 | clear_tsk_thread_flag(p, TIF_USEDFPU); | ||
| 148 | 147 | ||
| 149 | #ifdef CONFIG_MIPS_MT_FPAFF | 148 | #ifdef CONFIG_MIPS_MT_SMTC |
| 150 | /* | 149 | /* |
| 151 | * FPU affinity support is cleaner if we track the | 150 | * SMTC restores TCStatus after Status, and the CU bits |
| 152 | * user-visible CPU affinity from the very beginning. | 151 | * are aliased there. |
| 153 | * The generic cpus_allowed mask will already have | ||
| 154 | * been copied from the parent before copy_thread | ||
| 155 | * is invoked. | ||
| 156 | */ | 152 | */ |
| 157 | p->thread.user_cpus_allowed = p->cpus_allowed; | 153 | childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); |
| 154 | #endif | ||
| 155 | clear_tsk_thread_flag(p, TIF_USEDFPU); | ||
| 156 | |||
| 157 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
| 158 | clear_tsk_thread_flag(p, TIF_FPUBOUND); | ||
| 158 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 159 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
| 159 | 160 | ||
| 160 | if (clone_flags & CLONE_SETTLS) | 161 | if (clone_flags & CLONE_SETTLS) |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 35234b92b9a5..054861ccb4dd 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
| @@ -46,7 +46,8 @@ | |||
| 46 | */ | 46 | */ |
| 47 | void ptrace_disable(struct task_struct *child) | 47 | void ptrace_disable(struct task_struct *child) |
| 48 | { | 48 | { |
| 49 | /* Nothing to do.. */ | 49 | /* Don't load the watchpoint registers for the ex-child. */ |
| 50 | clear_tsk_thread_flag(child, TIF_LOAD_WATCH); | ||
| 50 | } | 51 | } |
| 51 | 52 | ||
| 52 | /* | 53 | /* |
| @@ -167,6 +168,93 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) | |||
| 167 | return 0; | 168 | return 0; |
| 168 | } | 169 | } |
| 169 | 170 | ||
| 171 | int ptrace_get_watch_regs(struct task_struct *child, | ||
| 172 | struct pt_watch_regs __user *addr) | ||
| 173 | { | ||
| 174 | enum pt_watch_style style; | ||
| 175 | int i; | ||
| 176 | |||
| 177 | if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) | ||
| 178 | return -EIO; | ||
| 179 | if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) | ||
| 180 | return -EIO; | ||
| 181 | |||
| 182 | #ifdef CONFIG_32BIT | ||
| 183 | style = pt_watch_style_mips32; | ||
| 184 | #define WATCH_STYLE mips32 | ||
| 185 | #else | ||
| 186 | style = pt_watch_style_mips64; | ||
| 187 | #define WATCH_STYLE mips64 | ||
| 188 | #endif | ||
| 189 | |||
| 190 | __put_user(style, &addr->style); | ||
| 191 | __put_user(current_cpu_data.watch_reg_use_cnt, | ||
| 192 | &addr->WATCH_STYLE.num_valid); | ||
| 193 | for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { | ||
| 194 | __put_user(child->thread.watch.mips3264.watchlo[i], | ||
| 195 | &addr->WATCH_STYLE.watchlo[i]); | ||
| 196 | __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, | ||
| 197 | &addr->WATCH_STYLE.watchhi[i]); | ||
| 198 | __put_user(current_cpu_data.watch_reg_masks[i], | ||
| 199 | &addr->WATCH_STYLE.watch_masks[i]); | ||
| 200 | } | ||
| 201 | for (; i < 8; i++) { | ||
| 202 | __put_user(0, &addr->WATCH_STYLE.watchlo[i]); | ||
| 203 | __put_user(0, &addr->WATCH_STYLE.watchhi[i]); | ||
| 204 | __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); | ||
| 205 | } | ||
| 206 | |||
| 207 | return 0; | ||
| 208 | } | ||
| 209 | |||
| 210 | int ptrace_set_watch_regs(struct task_struct *child, | ||
| 211 | struct pt_watch_regs __user *addr) | ||
| 212 | { | ||
| 213 | int i; | ||
| 214 | int watch_active = 0; | ||
| 215 | unsigned long lt[NUM_WATCH_REGS]; | ||
| 216 | u16 ht[NUM_WATCH_REGS]; | ||
| 217 | |||
| 218 | if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) | ||
| 219 | return -EIO; | ||
| 220 | if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) | ||
| 221 | return -EIO; | ||
| 222 | /* Check the values. */ | ||
| 223 | for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { | ||
| 224 | __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); | ||
| 225 | #ifdef CONFIG_32BIT | ||
| 226 | if (lt[i] & __UA_LIMIT) | ||
| 227 | return -EINVAL; | ||
| 228 | #else | ||
| 229 | if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { | ||
| 230 | if (lt[i] & 0xffffffff80000000UL) | ||
| 231 | return -EINVAL; | ||
| 232 | } else { | ||
| 233 | if (lt[i] & __UA_LIMIT) | ||
| 234 | return -EINVAL; | ||
| 235 | } | ||
| 236 | #endif | ||
| 237 | __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); | ||
| 238 | if (ht[i] & ~0xff8) | ||
| 239 | return -EINVAL; | ||
| 240 | } | ||
| 241 | /* Install them. */ | ||
| 242 | for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { | ||
| 243 | if (lt[i] & 7) | ||
| 244 | watch_active = 1; | ||
| 245 | child->thread.watch.mips3264.watchlo[i] = lt[i]; | ||
| 246 | /* Set the G bit. */ | ||
| 247 | child->thread.watch.mips3264.watchhi[i] = ht[i]; | ||
| 248 | } | ||
| 249 | |||
| 250 | if (watch_active) | ||
| 251 | set_tsk_thread_flag(child, TIF_LOAD_WATCH); | ||
| 252 | else | ||
| 253 | clear_tsk_thread_flag(child, TIF_LOAD_WATCH); | ||
| 254 | |||
| 255 | return 0; | ||
| 256 | } | ||
| 257 | |||
| 170 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | 258 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) |
| 171 | { | 259 | { |
| 172 | int ret; | 260 | int ret; |
| @@ -238,7 +326,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 238 | case FPC_EIR: { /* implementation / version register */ | 326 | case FPC_EIR: { /* implementation / version register */ |
| 239 | unsigned int flags; | 327 | unsigned int flags; |
| 240 | #ifdef CONFIG_MIPS_MT_SMTC | 328 | #ifdef CONFIG_MIPS_MT_SMTC |
| 241 | unsigned int irqflags; | 329 | unsigned long irqflags; |
| 242 | unsigned int mtflags; | 330 | unsigned int mtflags; |
| 243 | #endif /* CONFIG_MIPS_MT_SMTC */ | 331 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 244 | 332 | ||
| @@ -440,6 +528,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 440 | (unsigned long __user *) data); | 528 | (unsigned long __user *) data); |
| 441 | break; | 529 | break; |
| 442 | 530 | ||
| 531 | case PTRACE_GET_WATCH_REGS: | ||
| 532 | ret = ptrace_get_watch_regs(child, | ||
| 533 | (struct pt_watch_regs __user *) addr); | ||
| 534 | break; | ||
| 535 | |||
| 536 | case PTRACE_SET_WATCH_REGS: | ||
| 537 | ret = ptrace_set_watch_regs(child, | ||
| 538 | (struct pt_watch_regs __user *) addr); | ||
| 539 | break; | ||
| 540 | |||
| 443 | default: | 541 | default: |
| 444 | ret = ptrace_request(child, request, addr, data); | 542 | ret = ptrace_request(child, request, addr, data); |
| 445 | break; | 543 | break; |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 76818be6ba7c..1ca34104e593 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | * binaries. | 15 | * binaries. |
| 16 | */ | 16 | */ |
| 17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
| 18 | #include <linux/compat.h> | ||
| 18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| 19 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
| 20 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
| @@ -36,47 +37,17 @@ | |||
| 36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
| 37 | #include <asm/bootinfo.h> | 38 | #include <asm/bootinfo.h> |
| 38 | 39 | ||
| 39 | int ptrace_getregs(struct task_struct *child, __s64 __user *data); | ||
| 40 | int ptrace_setregs(struct task_struct *child, __s64 __user *data); | ||
| 41 | |||
| 42 | int ptrace_getfpregs(struct task_struct *child, __u32 __user *data); | ||
| 43 | int ptrace_setfpregs(struct task_struct *child, __u32 __user *data); | ||
| 44 | |||
| 45 | /* | 40 | /* |
| 46 | * Tracing a 32-bit process with a 64-bit strace and vice versa will not | 41 | * Tracing a 32-bit process with a 64-bit strace and vice versa will not |
| 47 | * work. I don't know how to fix this. | 42 | * work. I don't know how to fix this. |
| 48 | */ | 43 | */ |
| 49 | asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | 44 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
| 45 | compat_ulong_t caddr, compat_ulong_t cdata) | ||
| 50 | { | 46 | { |
| 51 | struct task_struct *child; | 47 | int addr = caddr; |
| 48 | int data = cdata; | ||
| 52 | int ret; | 49 | int ret; |
| 53 | 50 | ||
| 54 | #if 0 | ||
| 55 | printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n", | ||
| 56 | (int) request, (int) pid, (unsigned long) addr, | ||
| 57 | (unsigned long) data); | ||
| 58 | #endif | ||
| 59 | lock_kernel(); | ||
| 60 | if (request == PTRACE_TRACEME) { | ||
| 61 | ret = ptrace_traceme(); | ||
| 62 | goto out; | ||
| 63 | } | ||
| 64 | |||
| 65 | child = ptrace_get_task_struct(pid); | ||
| 66 | if (IS_ERR(child)) { | ||
| 67 | ret = PTR_ERR(child); | ||
| 68 | goto out; | ||
| 69 | } | ||
| 70 | |||
| 71 | if (request == PTRACE_ATTACH) { | ||
| 72 | ret = ptrace_attach(child); | ||
| 73 | goto out_tsk; | ||
| 74 | } | ||
| 75 | |||
| 76 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
| 77 | if (ret < 0) | ||
| 78 | goto out_tsk; | ||
| 79 | |||
| 80 | switch (request) { | 51 | switch (request) { |
| 81 | /* when I and D space are separate, these will need to be fixed. */ | 52 | /* when I and D space are separate, these will need to be fixed. */ |
| 82 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | 53 | case PTRACE_PEEKTEXT: /* read word at location addr. */ |
| @@ -214,7 +185,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
| 214 | if (!cpu_has_dsp) { | 185 | if (!cpu_has_dsp) { |
| 215 | tmp = 0; | 186 | tmp = 0; |
| 216 | ret = -EIO; | 187 | ret = -EIO; |
| 217 | goto out_tsk; | 188 | goto out; |
| 218 | } | 189 | } |
| 219 | dregs = __get_dsp_regs(child); | 190 | dregs = __get_dsp_regs(child); |
| 220 | tmp = (unsigned long) (dregs[addr - DSP_BASE]); | 191 | tmp = (unsigned long) (dregs[addr - DSP_BASE]); |
| @@ -224,14 +195,14 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
| 224 | if (!cpu_has_dsp) { | 195 | if (!cpu_has_dsp) { |
| 225 | tmp = 0; | 196 | tmp = 0; |
| 226 | ret = -EIO; | 197 | ret = -EIO; |
| 227 | goto out_tsk; | 198 | goto out; |
| 228 | } | 199 | } |
| 229 | tmp = child->thread.dsp.dspcontrol; | 200 | tmp = child->thread.dsp.dspcontrol; |
| 230 | break; | 201 | break; |
| 231 | default: | 202 | default: |
| 232 | tmp = 0; | 203 | tmp = 0; |
| 233 | ret = -EIO; | 204 | ret = -EIO; |
| 234 | goto out_tsk; | 205 | goto out; |
| 235 | } | 206 | } |
| 236 | ret = put_user(tmp, (unsigned __user *) (unsigned long) data); | 207 | ret = put_user(tmp, (unsigned __user *) (unsigned long) data); |
| 237 | break; | 208 | break; |
| @@ -410,14 +381,20 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
| 410 | (unsigned long __user *) (unsigned long) data); | 381 | (unsigned long __user *) (unsigned long) data); |
| 411 | break; | 382 | break; |
| 412 | 383 | ||
| 384 | case PTRACE_GET_WATCH_REGS: | ||
| 385 | ret = ptrace_get_watch_regs(child, | ||
| 386 | (struct pt_watch_regs __user *) (unsigned long) addr); | ||
| 387 | break; | ||
| 388 | |||
| 389 | case PTRACE_SET_WATCH_REGS: | ||
| 390 | ret = ptrace_set_watch_regs(child, | ||
| 391 | (struct pt_watch_regs __user *) (unsigned long) addr); | ||
| 392 | break; | ||
| 393 | |||
| 413 | default: | 394 | default: |
| 414 | ret = ptrace_request(child, request, addr, data); | 395 | ret = ptrace_request(child, request, addr, data); |
| 415 | break; | 396 | break; |
| 416 | } | 397 | } |
| 417 | |||
| 418 | out_tsk: | ||
| 419 | put_task_struct(child); | ||
| 420 | out: | 398 | out: |
| 421 | unlock_kernel(); | ||
| 422 | return ret; | 399 | return ret; |
| 423 | } | 400 | } |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index da7f1b6ea0fb..324c5499dec2 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
| @@ -219,7 +219,7 @@ EXPORT(sysn32_call_table) | |||
| 219 | PTR compat_sys_getrusage | 219 | PTR compat_sys_getrusage |
| 220 | PTR compat_sys_sysinfo | 220 | PTR compat_sys_sysinfo |
| 221 | PTR compat_sys_times | 221 | PTR compat_sys_times |
| 222 | PTR sys32_ptrace | 222 | PTR compat_sys_ptrace |
| 223 | PTR sys_getuid /* 6100 */ | 223 | PTR sys_getuid /* 6100 */ |
| 224 | PTR sys_syslog | 224 | PTR sys_syslog |
| 225 | PTR sys_getgid | 225 | PTR sys_getgid |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d7cd1aac9ada..85fedac99a57 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
| @@ -231,7 +231,7 @@ sys_call_table: | |||
| 231 | PTR sys_setuid | 231 | PTR sys_setuid |
| 232 | PTR sys_getuid | 232 | PTR sys_getuid |
| 233 | PTR compat_sys_stime /* 4025 */ | 233 | PTR compat_sys_stime /* 4025 */ |
| 234 | PTR sys32_ptrace | 234 | PTR compat_sys_ptrace |
| 235 | PTR sys_alarm | 235 | PTR sys_alarm |
| 236 | PTR sys_ni_syscall /* was sys_fstat */ | 236 | PTR sys_ni_syscall /* was sys_fstat */ |
| 237 | PTR sys_pause | 237 | PTR sys_pause |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 2aae76bce293..16f8edfe5cdc 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
| @@ -160,30 +160,33 @@ early_param("rd_size", rd_size_early); | |||
| 160 | static unsigned long __init init_initrd(void) | 160 | static unsigned long __init init_initrd(void) |
| 161 | { | 161 | { |
| 162 | unsigned long end; | 162 | unsigned long end; |
| 163 | u32 *initrd_header; | ||
| 164 | 163 | ||
| 165 | /* | 164 | /* |
| 166 | * Board specific code or command line parser should have | 165 | * Board specific code or command line parser should have |
| 167 | * already set up initrd_start and initrd_end. In these cases | 166 | * already set up initrd_start and initrd_end. In these cases |
| 168 | * perfom sanity checks and use them if all looks good. | 167 | * perfom sanity checks and use them if all looks good. |
| 169 | */ | 168 | */ |
| 170 | if (initrd_start && initrd_end > initrd_start) | 169 | if (!initrd_start || initrd_end <= initrd_start) { |
| 171 | goto sanitize; | 170 | #ifdef CONFIG_PROBE_INITRD_HEADER |
| 171 | u32 *initrd_header; | ||
| 172 | 172 | ||
| 173 | /* | 173 | /* |
| 174 | * See if initrd has been added to the kernel image by | 174 | * See if initrd has been added to the kernel image by |
| 175 | * arch/mips/boot/addinitrd.c. In that case a header is | 175 | * arch/mips/boot/addinitrd.c. In that case a header is |
| 176 | * prepended to initrd and is made up by 8 bytes. The fisrt | 176 | * prepended to initrd and is made up by 8 bytes. The first |
| 177 | * word is a magic number and the second one is the size of | 177 | * word is a magic number and the second one is the size of |
| 178 | * initrd. Initrd start must be page aligned in any cases. | 178 | * initrd. Initrd start must be page aligned in any cases. |
| 179 | */ | 179 | */ |
| 180 | initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; | 180 | initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; |
| 181 | if (initrd_header[0] != 0x494E5244) | 181 | if (initrd_header[0] != 0x494E5244) |
| 182 | goto disable; | ||
| 183 | initrd_start = (unsigned long)(initrd_header + 2); | ||
| 184 | initrd_end = initrd_start + initrd_header[1]; | ||
| 185 | #else | ||
| 182 | goto disable; | 186 | goto disable; |
| 183 | initrd_start = (unsigned long)(initrd_header + 2); | 187 | #endif |
| 184 | initrd_end = initrd_start + initrd_header[1]; | 188 | } |
| 185 | 189 | ||
| 186 | sanitize: | ||
| 187 | if (initrd_start & ~PAGE_MASK) { | 190 | if (initrd_start & ~PAGE_MASK) { |
| 188 | pr_err("initrd start must be page aligned\n"); | 191 | pr_err("initrd start must be page aligned\n"); |
| 189 | goto disable; | 192 | goto disable; |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 572c610db1b1..652709b353ad 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
| @@ -482,6 +482,18 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | |||
| 482 | return err; | 482 | return err; |
| 483 | } | 483 | } |
| 484 | 484 | ||
| 485 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | ||
| 486 | { | ||
| 487 | memset(to, 0, sizeof *to); | ||
| 488 | |||
| 489 | if (copy_from_user(to, from, 3*sizeof(int)) || | ||
| 490 | copy_from_user(to->_sifields._pad, | ||
| 491 | from->_sifields._pad, SI_PAD_SIZE32)) | ||
| 492 | return -EFAULT; | ||
| 493 | |||
| 494 | return 0; | ||
| 495 | } | ||
| 496 | |||
| 485 | asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | 497 | asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) |
| 486 | { | 498 | { |
| 487 | struct sigframe32 __user *frame; | 499 | struct sigframe32 __user *frame; |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 4410f172b8ab..7b59cfb7e602 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
| @@ -121,6 +121,8 @@ asmlinkage __cpuinit void start_secondary(void) | |||
| 121 | cpu = smp_processor_id(); | 121 | cpu = smp_processor_id(); |
| 122 | cpu_data[cpu].udelay_val = loops_per_jiffy; | 122 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
| 123 | 123 | ||
| 124 | notify_cpu_starting(cpu); | ||
| 125 | |||
| 124 | mp_ops->smp_finish(); | 126 | mp_ops->smp_finish(); |
| 125 | set_cpu_sibling_map(cpu); | 127 | set_cpu_sibling_map(cpu); |
| 126 | 128 | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a516286532ab..897fb2b4751c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
| @@ -1,4 +1,21 @@ | |||
| 1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or | ||
| 3 | * modify it under the terms of the GNU General Public License | ||
| 4 | * as published by the Free Software Foundation; either version 2 | ||
| 5 | * of the License, or (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, write to the Free Software | ||
| 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 15 | * | ||
| 16 | * Copyright (C) 2004 Mips Technologies, Inc | ||
| 17 | * Copyright (C) 2008 Kevin D. Kissell | ||
| 18 | */ | ||
| 2 | 19 | ||
| 3 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
| 4 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| @@ -21,7 +38,6 @@ | |||
| 21 | #include <asm/time.h> | 38 | #include <asm/time.h> |
| 22 | #include <asm/addrspace.h> | 39 | #include <asm/addrspace.h> |
| 23 | #include <asm/smtc.h> | 40 | #include <asm/smtc.h> |
| 24 | #include <asm/smtc_ipi.h> | ||
| 25 | #include <asm/smtc_proc.h> | 41 | #include <asm/smtc_proc.h> |
| 26 | 42 | ||
| 27 | /* | 43 | /* |
| @@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; | |||
| 58 | 74 | ||
| 59 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 75 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
| 60 | 76 | ||
| 61 | /* | ||
| 62 | * Clock interrupt "latch" buffers, per "CPU" | ||
| 63 | */ | ||
| 64 | |||
| 65 | static atomic_t ipi_timer_latch[NR_CPUS]; | ||
| 66 | 77 | ||
| 67 | /* | 78 | /* |
| 68 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 79 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
| @@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS]; | |||
| 70 | 81 | ||
| 71 | #define IPIBUF_PER_CPU 4 | 82 | #define IPIBUF_PER_CPU 4 |
| 72 | 83 | ||
| 73 | static struct smtc_ipi_q IPIQ[NR_CPUS]; | 84 | struct smtc_ipi_q IPIQ[NR_CPUS]; |
| 74 | static struct smtc_ipi_q freeIPIq; | 85 | static struct smtc_ipi_q freeIPIq; |
| 75 | 86 | ||
| 76 | 87 | ||
| @@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) | |||
| 282 | * phys_cpu_present_map and the logical/physical mappings. | 293 | * phys_cpu_present_map and the logical/physical mappings. |
| 283 | */ | 294 | */ |
| 284 | 295 | ||
| 285 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | 296 | int __init smtc_build_cpu_map(int start_cpu_slot) |
| 286 | { | 297 | { |
| 287 | int i, ntcs; | 298 | int i, ntcs; |
| 288 | 299 | ||
| @@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
| 325 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | 336 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() |
| 326 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | 337 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) |
| 327 | | TCSTATUS_A); | 338 | | TCSTATUS_A); |
| 328 | write_tc_c0_tccontext(0); | 339 | /* |
| 340 | * TCContext gets an offset from the base of the IPIQ array | ||
| 341 | * to be used in low-level code to detect the presence of | ||
| 342 | * an active IPI queue | ||
| 343 | */ | ||
| 344 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | ||
| 329 | /* Bind tc to vpe */ | 345 | /* Bind tc to vpe */ |
| 330 | write_tc_c0_tcbind(vpe); | 346 | write_tc_c0_tcbind(vpe); |
| 331 | /* In general, all TCs should have the same cpu_data indications */ | 347 | /* In general, all TCs should have the same cpu_data indications */ |
| @@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
| 336 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 352 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
| 337 | cpu_data[cpu].vpe_id = vpe; | 353 | cpu_data[cpu].vpe_id = vpe; |
| 338 | cpu_data[cpu].tc_id = tc; | 354 | cpu_data[cpu].tc_id = tc; |
| 355 | /* Multi-core SMTC hasn't been tested, but be prepared */ | ||
| 356 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | ||
| 339 | } | 357 | } |
| 340 | 358 | ||
| 359 | /* | ||
| 360 | * Tweak to get Count registes in as close a sync as possible. | ||
| 361 | * Value seems good for 34K-class cores. | ||
| 362 | */ | ||
| 363 | |||
| 364 | #define CP0_SKEW 8 | ||
| 341 | 365 | ||
| 342 | void mipsmt_prepare_cpus(void) | 366 | void smtc_prepare_cpus(int cpus) |
| 343 | { | 367 | { |
| 344 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | 368 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; |
| 345 | unsigned long flags; | 369 | unsigned long flags; |
| @@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) | |||
| 363 | IPIQ[i].head = IPIQ[i].tail = NULL; | 387 | IPIQ[i].head = IPIQ[i].tail = NULL; |
| 364 | spin_lock_init(&IPIQ[i].lock); | 388 | spin_lock_init(&IPIQ[i].lock); |
| 365 | IPIQ[i].depth = 0; | 389 | IPIQ[i].depth = 0; |
| 366 | atomic_set(&ipi_timer_latch[i], 0); | ||
| 367 | } | 390 | } |
| 368 | 391 | ||
| 369 | /* cpu_data index starts at zero */ | 392 | /* cpu_data index starts at zero */ |
| 370 | cpu = 0; | 393 | cpu = 0; |
| 371 | cpu_data[cpu].vpe_id = 0; | 394 | cpu_data[cpu].vpe_id = 0; |
| 372 | cpu_data[cpu].tc_id = 0; | 395 | cpu_data[cpu].tc_id = 0; |
| 396 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; | ||
| 373 | cpu++; | 397 | cpu++; |
| 374 | 398 | ||
| 375 | /* Report on boot-time options */ | 399 | /* Report on boot-time options */ |
| @@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) | |||
| 484 | write_vpe_c0_compare(0); | 508 | write_vpe_c0_compare(0); |
| 485 | /* Propagate Config7 */ | 509 | /* Propagate Config7 */ |
| 486 | write_vpe_c0_config7(read_c0_config7()); | 510 | write_vpe_c0_config7(read_c0_config7()); |
| 487 | write_vpe_c0_count(read_c0_count()); | 511 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); |
| 512 | ehb(); | ||
| 488 | } | 513 | } |
| 489 | /* enable multi-threading within VPE */ | 514 | /* enable multi-threading within VPE */ |
| 490 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 515 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); |
| @@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void) | |||
| 556 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | 581 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) |
| 557 | { | 582 | { |
| 558 | extern u32 kernelsp[NR_CPUS]; | 583 | extern u32 kernelsp[NR_CPUS]; |
| 559 | long flags; | 584 | unsigned long flags; |
| 560 | int mtflags; | 585 | int mtflags; |
| 561 | 586 | ||
| 562 | LOCK_MT_PRA(); | 587 | LOCK_MT_PRA(); |
| @@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | |||
| 585 | 610 | ||
| 586 | void smtc_init_secondary(void) | 611 | void smtc_init_secondary(void) |
| 587 | { | 612 | { |
| 588 | /* | ||
| 589 | * Start timer on secondary VPEs if necessary. | ||
| 590 | * plat_timer_setup has already have been invoked by init/main | ||
| 591 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
| 592 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
| 593 | * to across available VPEs. | ||
| 594 | */ | ||
| 595 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
| 596 | ((read_c0_tcbind() & TCBIND_CURVPE) | ||
| 597 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
| 598 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
| 599 | } | ||
| 600 | |||
| 601 | local_irq_enable(); | 613 | local_irq_enable(); |
| 602 | } | 614 | } |
| 603 | 615 | ||
| 604 | void smtc_smp_finish(void) | 616 | void smtc_smp_finish(void) |
| 605 | { | 617 | { |
| 618 | int cpu = smp_processor_id(); | ||
| 619 | |||
| 620 | /* | ||
| 621 | * Lowest-numbered CPU per VPE starts a clock tick. | ||
| 622 | * Like per_cpu_trap_init() hack, this assumes that | ||
| 623 | * SMTC init code assigns TCs consdecutively and | ||
| 624 | * in ascending order across available VPEs. | ||
| 625 | */ | ||
| 626 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | ||
| 627 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
| 628 | |||
| 606 | printk("TC %d going on-line as CPU %d\n", | 629 | printk("TC %d going on-line as CPU %d\n", |
| 607 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 630 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); |
| 608 | } | 631 | } |
| @@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
| 753 | { | 776 | { |
| 754 | int tcstatus; | 777 | int tcstatus; |
| 755 | struct smtc_ipi *pipi; | 778 | struct smtc_ipi *pipi; |
| 756 | long flags; | 779 | unsigned long flags; |
| 757 | int mtflags; | 780 | int mtflags; |
| 781 | unsigned long tcrestart; | ||
| 782 | extern void r4k_wait_irqoff(void), __pastwait(void); | ||
| 758 | 783 | ||
| 759 | if (cpu == smp_processor_id()) { | 784 | if (cpu == smp_processor_id()) { |
| 760 | printk("Cannot Send IPI to self!\n"); | 785 | printk("Cannot Send IPI to self!\n"); |
| @@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
| 771 | pipi->arg = (void *)action; | 796 | pipi->arg = (void *)action; |
| 772 | pipi->dest = cpu; | 797 | pipi->dest = cpu; |
| 773 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 798 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
| 774 | if (type == SMTC_CLOCK_TICK) | ||
| 775 | atomic_inc(&ipi_timer_latch[cpu]); | ||
| 776 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 799 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
| 777 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 800 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
| 778 | LOCK_CORE_PRA(); | 801 | LOCK_CORE_PRA(); |
| @@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
| 800 | 823 | ||
| 801 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | 824 | if ((tcstatus & TCSTATUS_IXMT) != 0) { |
| 802 | /* | 825 | /* |
| 803 | * Spin-waiting here can deadlock, | 826 | * If we're in the the irq-off version of the wait |
| 804 | * so we queue the message for the target TC. | 827 | * loop, we need to force exit from the wait and |
| 828 | * do a direct post of the IPI. | ||
| 829 | */ | ||
| 830 | if (cpu_wait == r4k_wait_irqoff) { | ||
| 831 | tcrestart = read_tc_c0_tcrestart(); | ||
| 832 | if (tcrestart >= (unsigned long)r4k_wait_irqoff | ||
| 833 | && tcrestart < (unsigned long)__pastwait) { | ||
| 834 | write_tc_c0_tcrestart(__pastwait); | ||
| 835 | tcstatus &= ~TCSTATUS_IXMT; | ||
| 836 | write_tc_c0_tcstatus(tcstatus); | ||
| 837 | goto postdirect; | ||
| 838 | } | ||
| 839 | } | ||
| 840 | /* | ||
| 841 | * Otherwise we queue the message for the target TC | ||
| 842 | * to pick up when he does a local_irq_restore() | ||
| 805 | */ | 843 | */ |
| 806 | write_tc_c0_tchalt(0); | 844 | write_tc_c0_tchalt(0); |
| 807 | UNLOCK_CORE_PRA(); | 845 | UNLOCK_CORE_PRA(); |
| 808 | /* Try to reduce redundant timer interrupt messages */ | ||
| 809 | if (type == SMTC_CLOCK_TICK) { | ||
| 810 | if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ | ||
| 811 | smtc_ipi_nq(&freeIPIq, pipi); | ||
| 812 | return; | ||
| 813 | } | ||
| 814 | } | ||
| 815 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 846 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
| 816 | } else { | 847 | } else { |
| 817 | if (type == SMTC_CLOCK_TICK) | 848 | postdirect: |
| 818 | atomic_inc(&ipi_timer_latch[cpu]); | ||
| 819 | post_direct_ipi(cpu, pipi); | 849 | post_direct_ipi(cpu, pipi); |
| 820 | write_tc_c0_tchalt(0); | 850 | write_tc_c0_tchalt(0); |
| 821 | UNLOCK_CORE_PRA(); | 851 | UNLOCK_CORE_PRA(); |
| @@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) | |||
| 883 | smp_call_function_interrupt(); | 913 | smp_call_function_interrupt(); |
| 884 | } | 914 | } |
| 885 | 915 | ||
| 886 | DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | 916 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
| 887 | 917 | ||
| 888 | void ipi_decode(struct smtc_ipi *pipi) | 918 | void ipi_decode(struct smtc_ipi *pipi) |
| 889 | { | 919 | { |
| @@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
| 891 | struct clock_event_device *cd; | 921 | struct clock_event_device *cd; |
| 892 | void *arg_copy = pipi->arg; | 922 | void *arg_copy = pipi->arg; |
| 893 | int type_copy = pipi->type; | 923 | int type_copy = pipi->type; |
| 894 | int ticks; | ||
| 895 | |||
| 896 | smtc_ipi_nq(&freeIPIq, pipi); | 924 | smtc_ipi_nq(&freeIPIq, pipi); |
| 897 | switch (type_copy) { | 925 | switch (type_copy) { |
| 898 | case SMTC_CLOCK_TICK: | 926 | case SMTC_CLOCK_TICK: |
| 899 | irq_enter(); | 927 | irq_enter(); |
| 900 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; | 928 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; |
| 901 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 929 | cd = &per_cpu(mips_clockevent_device, cpu); |
| 902 | ticks = atomic_read(&ipi_timer_latch[cpu]); | 930 | cd->event_handler(cd); |
| 903 | atomic_sub(ticks, &ipi_timer_latch[cpu]); | ||
| 904 | while (ticks) { | ||
| 905 | cd->event_handler(cd); | ||
| 906 | ticks--; | ||
| 907 | } | ||
| 908 | irq_exit(); | 931 | irq_exit(); |
| 909 | break; | 932 | break; |
| 910 | 933 | ||
| @@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
| 937 | } | 960 | } |
| 938 | } | 961 | } |
| 939 | 962 | ||
| 963 | /* | ||
| 964 | * Similar to smtc_ipi_replay(), but invoked from context restore, | ||
| 965 | * so it reuses the current exception frame rather than set up a | ||
| 966 | * new one with self_ipi. | ||
| 967 | */ | ||
| 968 | |||
| 940 | void deferred_smtc_ipi(void) | 969 | void deferred_smtc_ipi(void) |
| 941 | { | 970 | { |
| 942 | struct smtc_ipi *pipi; | 971 | int cpu = smp_processor_id(); |
| 943 | unsigned long flags; | ||
| 944 | /* DEBUG */ | ||
| 945 | int q = smp_processor_id(); | ||
| 946 | 972 | ||
| 947 | /* | 973 | /* |
| 948 | * Test is not atomic, but much faster than a dequeue, | 974 | * Test is not atomic, but much faster than a dequeue, |
| 949 | * and the vast majority of invocations will have a null queue. | 975 | * and the vast majority of invocations will have a null queue. |
| 976 | * If irq_disabled when this was called, then any IPIs queued | ||
| 977 | * after we test last will be taken on the next irq_enable/restore. | ||
| 978 | * If interrupts were enabled, then any IPIs added after the | ||
| 979 | * last test will be taken directly. | ||
| 950 | */ | 980 | */ |
| 951 | if (IPIQ[q].head != NULL) { | 981 | |
| 952 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | 982 | while (IPIQ[cpu].head != NULL) { |
| 953 | /* ipi_decode() should be called with interrupts off */ | 983 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
| 954 | local_irq_save(flags); | 984 | struct smtc_ipi *pipi; |
| 985 | unsigned long flags; | ||
| 986 | |||
| 987 | /* | ||
| 988 | * It may be possible we'll come in with interrupts | ||
| 989 | * already enabled. | ||
| 990 | */ | ||
| 991 | local_irq_save(flags); | ||
| 992 | |||
| 993 | spin_lock(&q->lock); | ||
| 994 | pipi = __smtc_ipi_dq(q); | ||
| 995 | spin_unlock(&q->lock); | ||
| 996 | if (pipi != NULL) | ||
| 955 | ipi_decode(pipi); | 997 | ipi_decode(pipi); |
| 956 | local_irq_restore(flags); | 998 | /* |
| 957 | } | 999 | * The use of the __raw_local restore isn't |
| 1000 | * as obviously necessary here as in smtc_ipi_replay(), | ||
| 1001 | * but it's more efficient, given that we're already | ||
| 1002 | * running down the IPI queue. | ||
| 1003 | */ | ||
| 1004 | __raw_local_irq_restore(flags); | ||
| 958 | } | 1005 | } |
| 959 | } | 1006 | } |
| 960 | 1007 | ||
| @@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | |||
| 975 | struct smtc_ipi *pipi; | 1022 | struct smtc_ipi *pipi; |
| 976 | unsigned long tcstatus; | 1023 | unsigned long tcstatus; |
| 977 | int sent; | 1024 | int sent; |
| 978 | long flags; | 1025 | unsigned long flags; |
| 979 | unsigned int mtflags; | 1026 | unsigned int mtflags; |
| 980 | unsigned int vpflags; | 1027 | unsigned int vpflags; |
| 981 | 1028 | ||
| @@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
| 1066 | 1113 | ||
| 1067 | /* | 1114 | /* |
| 1068 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1115 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
| 1069 | * | ||
| 1070 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
| 1071 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
| 1072 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
| 1073 | * result in a recursive call to raw_local_irq_restore(). | ||
| 1074 | */ | 1116 | */ |
| 1075 | 1117 | ||
| 1076 | static void __smtc_ipi_replay(void) | 1118 | /* |
| 1119 | * smtc_ipi_replay is called from raw_local_irq_restore | ||
| 1120 | */ | ||
| 1121 | |||
| 1122 | void smtc_ipi_replay(void) | ||
| 1077 | { | 1123 | { |
| 1078 | unsigned int cpu = smp_processor_id(); | 1124 | unsigned int cpu = smp_processor_id(); |
| 1079 | 1125 | ||
| 1080 | /* | 1126 | /* |
| 1081 | * To the extent that we've ever turned interrupts off, | 1127 | * To the extent that we've ever turned interrupts off, |
| 1082 | * we may have accumulated deferred IPIs. This is subtle. | 1128 | * we may have accumulated deferred IPIs. This is subtle. |
| 1083 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
| 1084 | * exact number - but we'll also disable interrupts | ||
| 1085 | * and create a window of failure where a new IPI gets | ||
| 1086 | * queued after we test the depth but before we re-enable | ||
| 1087 | * interrupts. So long as IXMT never gets set, however, | ||
| 1088 | * we should be OK: If we pick up something and dispatch | 1129 | * we should be OK: If we pick up something and dispatch |
| 1089 | * it here, that's great. If we see nothing, but concurrent | 1130 | * it here, that's great. If we see nothing, but concurrent |
| 1090 | * with this operation, another TC sends us an IPI, IXMT | 1131 | * with this operation, another TC sends us an IPI, IXMT |
| 1091 | * is clear, and we'll handle it as a real pseudo-interrupt | 1132 | * is clear, and we'll handle it as a real pseudo-interrupt |
| 1092 | * and not a pseudo-pseudo interrupt. | 1133 | * and not a pseudo-pseudo interrupt. The important thing |
| 1134 | * is to do the last check for queued message *after* the | ||
| 1135 | * re-enabling of interrupts. | ||
| 1093 | */ | 1136 | */ |
| 1094 | if (IPIQ[cpu].depth > 0) { | 1137 | while (IPIQ[cpu].head != NULL) { |
| 1095 | while (1) { | 1138 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
| 1096 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 1139 | struct smtc_ipi *pipi; |
| 1097 | struct smtc_ipi *pipi; | 1140 | unsigned long flags; |
| 1098 | extern void self_ipi(struct smtc_ipi *); | 1141 | |
| 1099 | 1142 | /* | |
| 1100 | spin_lock(&q->lock); | 1143 | * It's just possible we'll come in with interrupts |
| 1101 | pipi = __smtc_ipi_dq(q); | 1144 | * already enabled. |
| 1102 | spin_unlock(&q->lock); | 1145 | */ |
| 1103 | if (!pipi) | 1146 | local_irq_save(flags); |
| 1104 | break; | 1147 | |
| 1148 | spin_lock(&q->lock); | ||
| 1149 | pipi = __smtc_ipi_dq(q); | ||
| 1150 | spin_unlock(&q->lock); | ||
| 1151 | /* | ||
| 1152 | ** But use a raw restore here to avoid recursion. | ||
| 1153 | */ | ||
| 1154 | __raw_local_irq_restore(flags); | ||
| 1105 | 1155 | ||
| 1156 | if (pipi) { | ||
| 1106 | self_ipi(pipi); | 1157 | self_ipi(pipi); |
| 1107 | smtc_cpu_stats[cpu].selfipis++; | 1158 | smtc_cpu_stats[cpu].selfipis++; |
| 1108 | } | 1159 | } |
| 1109 | } | 1160 | } |
| 1110 | } | 1161 | } |
| 1111 | 1162 | ||
| 1112 | void smtc_ipi_replay(void) | ||
| 1113 | { | ||
| 1114 | raw_local_irq_disable(); | ||
| 1115 | __smtc_ipi_replay(); | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | EXPORT_SYMBOL(smtc_ipi_replay); | 1163 | EXPORT_SYMBOL(smtc_ipi_replay); |
| 1119 | 1164 | ||
| 1120 | void smtc_idle_loop_hook(void) | 1165 | void smtc_idle_loop_hook(void) |
| @@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) | |||
| 1193 | } | 1238 | } |
| 1194 | } | 1239 | } |
| 1195 | 1240 | ||
| 1196 | /* | ||
| 1197 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
| 1198 | */ | ||
| 1199 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
| 1200 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
| 1201 | if ((tc != smp_processor_id()) && | ||
| 1202 | atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { | ||
| 1203 | if (clock_hang_reported[tc] == 0) { | ||
| 1204 | pdb_msg += sprintf(pdb_msg, | ||
| 1205 | "TC %d looks hung with timer latch at %d\n", | ||
| 1206 | tc, atomic_read(&ipi_timer_latch[tc])); | ||
| 1207 | clock_hang_reported[tc]++; | ||
| 1208 | } | ||
| 1209 | } | ||
| 1210 | } | ||
| 1211 | emt(mtflags); | 1241 | emt(mtflags); |
| 1212 | local_irq_restore(flags); | 1242 | local_irq_restore(flags); |
| 1213 | if (pdb_msg != &id_ho_db_msg[0]) | 1243 | if (pdb_msg != &id_ho_db_msg[0]) |
| 1214 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | 1244 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); |
| 1215 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 1245 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
| 1216 | 1246 | ||
| 1217 | /* | 1247 | smtc_ipi_replay(); |
| 1218 | * Replay any accumulated deferred IPIs. If "Instant Replay" | ||
| 1219 | * is in use, there should never be any. | ||
| 1220 | */ | ||
| 1221 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
| 1222 | { | ||
| 1223 | unsigned long flags; | ||
| 1224 | |||
| 1225 | local_irq_save(flags); | ||
| 1226 | __smtc_ipi_replay(); | ||
| 1227 | local_irq_restore(flags); | ||
| 1228 | } | ||
| 1229 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
| 1230 | } | 1248 | } |
| 1231 | 1249 | ||
| 1232 | void smtc_soft_dump(void) | 1250 | void smtc_soft_dump(void) |
| @@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) | |||
| 1242 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | 1260 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); |
| 1243 | } | 1261 | } |
| 1244 | smtc_ipi_qdump(); | 1262 | smtc_ipi_qdump(); |
| 1245 | printk("Timer IPI Backlogs:\n"); | ||
| 1246 | for (i=0; i < NR_CPUS; i++) { | ||
| 1247 | printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); | ||
| 1248 | } | ||
| 1249 | printk("%d Recoveries of \"stolen\" FPU\n", | 1263 | printk("%d Recoveries of \"stolen\" FPU\n", |
| 1250 | atomic_read(&smtc_fpu_recoveries)); | 1264 | atomic_read(&smtc_fpu_recoveries)); |
| 1251 | } | 1265 | } |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 426cced1e9dc..80b9e070c207 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -42,10 +42,14 @@ | |||
| 42 | #include <asm/tlbdebug.h> | 42 | #include <asm/tlbdebug.h> |
| 43 | #include <asm/traps.h> | 43 | #include <asm/traps.h> |
| 44 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
| 45 | #include <asm/watch.h> | ||
| 45 | #include <asm/mmu_context.h> | 46 | #include <asm/mmu_context.h> |
| 46 | #include <asm/types.h> | 47 | #include <asm/types.h> |
| 47 | #include <asm/stacktrace.h> | 48 | #include <asm/stacktrace.h> |
| 48 | 49 | ||
| 50 | extern void check_wait(void); | ||
| 51 | extern asmlinkage void r4k_wait(void); | ||
| 52 | extern asmlinkage void rollback_handle_int(void); | ||
| 49 | extern asmlinkage void handle_int(void); | 53 | extern asmlinkage void handle_int(void); |
| 50 | extern asmlinkage void handle_tlbm(void); | 54 | extern asmlinkage void handle_tlbm(void); |
| 51 | extern asmlinkage void handle_tlbl(void); | 55 | extern asmlinkage void handle_tlbl(void); |
| @@ -373,8 +377,8 @@ void __noreturn die(const char * str, const struct pt_regs * regs) | |||
| 373 | do_exit(SIGSEGV); | 377 | do_exit(SIGSEGV); |
| 374 | } | 378 | } |
| 375 | 379 | ||
| 376 | extern const struct exception_table_entry __start___dbe_table[]; | 380 | extern struct exception_table_entry __start___dbe_table[]; |
| 377 | extern const struct exception_table_entry __stop___dbe_table[]; | 381 | extern struct exception_table_entry __stop___dbe_table[]; |
| 378 | 382 | ||
| 379 | __asm__( | 383 | __asm__( |
| 380 | " .section __dbe_table, \"a\"\n" | 384 | " .section __dbe_table, \"a\"\n" |
| @@ -822,8 +826,10 @@ static void mt_ase_fp_affinity(void) | |||
| 822 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { | 826 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { |
| 823 | cpumask_t tmask; | 827 | cpumask_t tmask; |
| 824 | 828 | ||
| 825 | cpus_and(tmask, current->thread.user_cpus_allowed, | 829 | current->thread.user_cpus_allowed |
| 826 | mt_fpu_cpumask); | 830 | = current->cpus_allowed; |
| 831 | cpus_and(tmask, current->cpus_allowed, | ||
| 832 | mt_fpu_cpumask); | ||
| 827 | set_cpus_allowed(current, tmask); | 833 | set_cpus_allowed(current, tmask); |
| 828 | set_thread_flag(TIF_FPUBOUND); | 834 | set_thread_flag(TIF_FPUBOUND); |
| 829 | } | 835 | } |
| @@ -907,13 +913,26 @@ asmlinkage void do_mdmx(struct pt_regs *regs) | |||
| 907 | 913 | ||
| 908 | asmlinkage void do_watch(struct pt_regs *regs) | 914 | asmlinkage void do_watch(struct pt_regs *regs) |
| 909 | { | 915 | { |
| 916 | u32 cause; | ||
| 917 | |||
| 910 | /* | 918 | /* |
| 911 | * We use the watch exception where available to detect stack | 919 | * Clear WP (bit 22) bit of cause register so we don't loop |
| 912 | * overflows. | 920 | * forever. |
| 913 | */ | 921 | */ |
| 914 | dump_tlb_all(); | 922 | cause = read_c0_cause(); |
| 915 | show_regs(regs); | 923 | cause &= ~(1 << 22); |
| 916 | panic("Caught WATCH exception - probably caused by stack overflow."); | 924 | write_c0_cause(cause); |
| 925 | |||
| 926 | /* | ||
| 927 | * If the current thread has the watch registers loaded, save | ||
| 928 | * their values and send SIGTRAP. Otherwise another thread | ||
| 929 | * left the registers set, clear them and continue. | ||
| 930 | */ | ||
| 931 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { | ||
| 932 | mips_read_watch_registers(); | ||
| 933 | force_sig(SIGTRAP, current); | ||
| 934 | } else | ||
| 935 | mips_clear_watch_registers(); | ||
| 917 | } | 936 | } |
| 918 | 937 | ||
| 919 | asmlinkage void do_mcheck(struct pt_regs *regs) | 938 | asmlinkage void do_mcheck(struct pt_regs *regs) |
| @@ -1200,7 +1219,7 @@ void *set_except_vector(int n, void *addr) | |||
| 1200 | if (n == 0 && cpu_has_divec) { | 1219 | if (n == 0 && cpu_has_divec) { |
| 1201 | *(u32 *)(ebase + 0x200) = 0x08000000 | | 1220 | *(u32 *)(ebase + 0x200) = 0x08000000 | |
| 1202 | (0x03ffffff & (handler >> 2)); | 1221 | (0x03ffffff & (handler >> 2)); |
| 1203 | flush_icache_range(ebase + 0x200, ebase + 0x204); | 1222 | local_flush_icache_range(ebase + 0x200, ebase + 0x204); |
| 1204 | } | 1223 | } |
| 1205 | return (void *)old_handler; | 1224 | return (void *)old_handler; |
| 1206 | } | 1225 | } |
| @@ -1251,6 +1270,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 1251 | 1270 | ||
| 1252 | extern char except_vec_vi, except_vec_vi_lui; | 1271 | extern char except_vec_vi, except_vec_vi_lui; |
| 1253 | extern char except_vec_vi_ori, except_vec_vi_end; | 1272 | extern char except_vec_vi_ori, except_vec_vi_end; |
| 1273 | extern char rollback_except_vec_vi; | ||
| 1274 | char *vec_start = (cpu_wait == r4k_wait) ? | ||
| 1275 | &rollback_except_vec_vi : &except_vec_vi; | ||
| 1254 | #ifdef CONFIG_MIPS_MT_SMTC | 1276 | #ifdef CONFIG_MIPS_MT_SMTC |
| 1255 | /* | 1277 | /* |
| 1256 | * We need to provide the SMTC vectored interrupt handler | 1278 | * We need to provide the SMTC vectored interrupt handler |
| @@ -1258,11 +1280,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 1258 | * Status.IM bit to be masked before going there. | 1280 | * Status.IM bit to be masked before going there. |
| 1259 | */ | 1281 | */ |
| 1260 | extern char except_vec_vi_mori; | 1282 | extern char except_vec_vi_mori; |
| 1261 | const int mori_offset = &except_vec_vi_mori - &except_vec_vi; | 1283 | const int mori_offset = &except_vec_vi_mori - vec_start; |
| 1262 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1284 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 1263 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | 1285 | const int handler_len = &except_vec_vi_end - vec_start; |
| 1264 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | 1286 | const int lui_offset = &except_vec_vi_lui - vec_start; |
| 1265 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | 1287 | const int ori_offset = &except_vec_vi_ori - vec_start; |
| 1266 | 1288 | ||
| 1267 | if (handler_len > VECTORSPACING) { | 1289 | if (handler_len > VECTORSPACING) { |
| 1268 | /* | 1290 | /* |
| @@ -1272,7 +1294,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 1272 | panic("VECTORSPACING too small"); | 1294 | panic("VECTORSPACING too small"); |
| 1273 | } | 1295 | } |
| 1274 | 1296 | ||
| 1275 | memcpy(b, &except_vec_vi, handler_len); | 1297 | memcpy(b, vec_start, handler_len); |
| 1276 | #ifdef CONFIG_MIPS_MT_SMTC | 1298 | #ifdef CONFIG_MIPS_MT_SMTC |
| 1277 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ | 1299 | BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ |
| 1278 | 1300 | ||
| @@ -1283,7 +1305,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 1283 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1305 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); |
| 1284 | w = (u32 *)(b + ori_offset); | 1306 | w = (u32 *)(b + ori_offset); |
| 1285 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | 1307 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); |
| 1286 | flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); | 1308 | local_flush_icache_range((unsigned long)b, |
| 1309 | (unsigned long)(b+handler_len)); | ||
| 1287 | } | 1310 | } |
| 1288 | else { | 1311 | else { |
| 1289 | /* | 1312 | /* |
| @@ -1295,7 +1318,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 1295 | w = (u32 *)b; | 1318 | w = (u32 *)b; |
| 1296 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | 1319 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ |
| 1297 | *w = 0; | 1320 | *w = 0; |
| 1298 | flush_icache_range((unsigned long)b, (unsigned long)(b+8)); | 1321 | local_flush_icache_range((unsigned long)b, |
| 1322 | (unsigned long)(b+8)); | ||
| 1299 | } | 1323 | } |
| 1300 | 1324 | ||
| 1301 | return (void *)old_handler; | 1325 | return (void *)old_handler; |
| @@ -1515,7 +1539,7 @@ void __cpuinit per_cpu_trap_init(void) | |||
| 1515 | void __init set_handler(unsigned long offset, void *addr, unsigned long size) | 1539 | void __init set_handler(unsigned long offset, void *addr, unsigned long size) |
| 1516 | { | 1540 | { |
| 1517 | memcpy((void *)(ebase + offset), addr, size); | 1541 | memcpy((void *)(ebase + offset), addr, size); |
| 1518 | flush_icache_range(ebase + offset, ebase + offset + size); | 1542 | local_flush_icache_range(ebase + offset, ebase + offset + size); |
| 1519 | } | 1543 | } |
| 1520 | 1544 | ||
| 1521 | static char panic_null_cerr[] __cpuinitdata = | 1545 | static char panic_null_cerr[] __cpuinitdata = |
| @@ -1552,6 +1576,10 @@ void __init trap_init(void) | |||
| 1552 | extern char except_vec3_generic, except_vec3_r4000; | 1576 | extern char except_vec3_generic, except_vec3_r4000; |
| 1553 | extern char except_vec4; | 1577 | extern char except_vec4; |
| 1554 | unsigned long i; | 1578 | unsigned long i; |
| 1579 | int rollback; | ||
| 1580 | |||
| 1581 | check_wait(); | ||
| 1582 | rollback = (cpu_wait == r4k_wait); | ||
| 1555 | 1583 | ||
| 1556 | #if defined(CONFIG_KGDB) | 1584 | #if defined(CONFIG_KGDB) |
| 1557 | if (kgdb_early_setup) | 1585 | if (kgdb_early_setup) |
| @@ -1616,7 +1644,7 @@ void __init trap_init(void) | |||
| 1616 | if (board_be_init) | 1644 | if (board_be_init) |
| 1617 | board_be_init(); | 1645 | board_be_init(); |
| 1618 | 1646 | ||
| 1619 | set_except_vector(0, handle_int); | 1647 | set_except_vector(0, rollback ? rollback_handle_int : handle_int); |
| 1620 | set_except_vector(1, handle_tlbm); | 1648 | set_except_vector(1, handle_tlbm); |
| 1621 | set_except_vector(2, handle_tlbl); | 1649 | set_except_vector(2, handle_tlbl); |
| 1622 | set_except_vector(3, handle_tlbs); | 1650 | set_except_vector(3, handle_tlbs); |
| @@ -1680,6 +1708,8 @@ void __init trap_init(void) | |||
| 1680 | signal32_init(); | 1708 | signal32_init(); |
| 1681 | #endif | 1709 | #endif |
| 1682 | 1710 | ||
| 1683 | flush_icache_range(ebase, ebase + 0x400); | 1711 | local_flush_icache_range(ebase, ebase + 0x400); |
| 1684 | flush_tlb_handlers(); | 1712 | flush_tlb_handlers(); |
| 1713 | |||
| 1714 | sort_extable(__start___dbe_table, __stop___dbe_table); | ||
| 1685 | } | 1715 | } |
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index b5470ceb418b..afb119f35682 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
| @@ -36,6 +36,7 @@ SECTIONS | |||
| 36 | SCHED_TEXT | 36 | SCHED_TEXT |
| 37 | LOCK_TEXT | 37 | LOCK_TEXT |
| 38 | KPROBES_TEXT | 38 | KPROBES_TEXT |
| 39 | *(.text.*) | ||
| 39 | *(.fixup) | 40 | *(.fixup) |
| 40 | *(.gnu.warning) | 41 | *(.gnu.warning) |
| 41 | } :text = 0 | 42 | } :text = 0 |
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c new file mode 100644 index 000000000000..c15406968030 --- /dev/null +++ b/arch/mips/kernel/watch.c | |||
| @@ -0,0 +1,188 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2008 David Daney | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/sched.h> | ||
| 10 | |||
| 11 | #include <asm/processor.h> | ||
| 12 | #include <asm/watch.h> | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Install the watch registers for the current thread. A maximum of | ||
| 16 | * four registers are installed although the machine may have more. | ||
| 17 | */ | ||
| 18 | void mips_install_watch_registers(void) | ||
| 19 | { | ||
| 20 | struct mips3264_watch_reg_state *watches = | ||
| 21 | ¤t->thread.watch.mips3264; | ||
| 22 | switch (current_cpu_data.watch_reg_use_cnt) { | ||
| 23 | default: | ||
| 24 | BUG(); | ||
| 25 | case 4: | ||
| 26 | write_c0_watchlo3(watches->watchlo[3]); | ||
| 27 | /* Write 1 to the I, R, and W bits to clear them, and | ||
| 28 | 1 to G so all ASIDs are trapped. */ | ||
| 29 | write_c0_watchhi3(0x40000007 | watches->watchhi[3]); | ||
| 30 | case 3: | ||
| 31 | write_c0_watchlo2(watches->watchlo[2]); | ||
| 32 | write_c0_watchhi2(0x40000007 | watches->watchhi[2]); | ||
| 33 | case 2: | ||
| 34 | write_c0_watchlo1(watches->watchlo[1]); | ||
| 35 | write_c0_watchhi1(0x40000007 | watches->watchhi[1]); | ||
| 36 | case 1: | ||
| 37 | write_c0_watchlo0(watches->watchlo[0]); | ||
| 38 | write_c0_watchhi0(0x40000007 | watches->watchhi[0]); | ||
| 39 | } | ||
| 40 | } | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Read back the watchhi registers so the user space debugger has | ||
| 44 | * access to the I, R, and W bits. A maximum of four registers are | ||
| 45 | * read although the machine may have more. | ||
| 46 | */ | ||
| 47 | void mips_read_watch_registers(void) | ||
| 48 | { | ||
| 49 | struct mips3264_watch_reg_state *watches = | ||
| 50 | ¤t->thread.watch.mips3264; | ||
| 51 | switch (current_cpu_data.watch_reg_use_cnt) { | ||
| 52 | default: | ||
| 53 | BUG(); | ||
| 54 | case 4: | ||
| 55 | watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff); | ||
| 56 | case 3: | ||
| 57 | watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff); | ||
| 58 | case 2: | ||
| 59 | watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff); | ||
| 60 | case 1: | ||
| 61 | watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff); | ||
| 62 | } | ||
| 63 | if (current_cpu_data.watch_reg_use_cnt == 1 && | ||
| 64 | (watches->watchhi[0] & 7) == 0) { | ||
| 65 | /* Pathological case of release 1 architecture that | ||
| 66 | * doesn't set the condition bits. We assume that | ||
| 67 | * since we got here, the watch condition was met and | ||
| 68 | * signal that the conditions requested in watchlo | ||
| 69 | * were met. */ | ||
| 70 | watches->watchhi[0] |= (watches->watchlo[0] & 7); | ||
| 71 | } | ||
| 72 | } | ||
| 73 | |||
| 74 | /* | ||
| 75 | * Disable all watch registers. Although only four registers are | ||
| 76 | * installed, all are cleared to eliminate the possibility of endless | ||
| 77 | * looping in the watch handler. | ||
| 78 | */ | ||
| 79 | void mips_clear_watch_registers(void) | ||
| 80 | { | ||
| 81 | switch (current_cpu_data.watch_reg_count) { | ||
| 82 | default: | ||
| 83 | BUG(); | ||
| 84 | case 8: | ||
| 85 | write_c0_watchlo7(0); | ||
| 86 | case 7: | ||
| 87 | write_c0_watchlo6(0); | ||
| 88 | case 6: | ||
| 89 | write_c0_watchlo5(0); | ||
| 90 | case 5: | ||
| 91 | write_c0_watchlo4(0); | ||
| 92 | case 4: | ||
| 93 | write_c0_watchlo3(0); | ||
| 94 | case 3: | ||
| 95 | write_c0_watchlo2(0); | ||
| 96 | case 2: | ||
| 97 | write_c0_watchlo1(0); | ||
| 98 | case 1: | ||
| 99 | write_c0_watchlo0(0); | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) | ||
| 104 | { | ||
| 105 | unsigned int t; | ||
| 106 | |||
| 107 | if ((c->options & MIPS_CPU_WATCH) == 0) | ||
| 108 | return; | ||
| 109 | /* | ||
| 110 | * Check which of the I,R and W bits are supported, then | ||
| 111 | * disable the register. | ||
| 112 | */ | ||
| 113 | write_c0_watchlo0(7); | ||
| 114 | t = read_c0_watchlo0(); | ||
| 115 | write_c0_watchlo0(0); | ||
| 116 | c->watch_reg_masks[0] = t & 7; | ||
| 117 | |||
| 118 | /* Write the mask bits and read them back to determine which | ||
| 119 | * can be used. */ | ||
| 120 | c->watch_reg_count = 1; | ||
| 121 | c->watch_reg_use_cnt = 1; | ||
| 122 | t = read_c0_watchhi0(); | ||
| 123 | write_c0_watchhi0(t | 0xff8); | ||
| 124 | t = read_c0_watchhi0(); | ||
| 125 | c->watch_reg_masks[0] |= (t & 0xff8); | ||
| 126 | if ((t & 0x80000000) == 0) | ||
| 127 | return; | ||
| 128 | |||
| 129 | write_c0_watchlo1(7); | ||
| 130 | t = read_c0_watchlo1(); | ||
| 131 | write_c0_watchlo1(0); | ||
| 132 | c->watch_reg_masks[1] = t & 7; | ||
| 133 | |||
| 134 | c->watch_reg_count = 2; | ||
| 135 | c->watch_reg_use_cnt = 2; | ||
| 136 | t = read_c0_watchhi1(); | ||
| 137 | write_c0_watchhi1(t | 0xff8); | ||
| 138 | t = read_c0_watchhi1(); | ||
| 139 | c->watch_reg_masks[1] |= (t & 0xff8); | ||
| 140 | if ((t & 0x80000000) == 0) | ||
| 141 | return; | ||
| 142 | |||
| 143 | write_c0_watchlo2(7); | ||
| 144 | t = read_c0_watchlo2(); | ||
| 145 | write_c0_watchlo2(0); | ||
| 146 | c->watch_reg_masks[2] = t & 7; | ||
| 147 | |||
| 148 | c->watch_reg_count = 3; | ||
| 149 | c->watch_reg_use_cnt = 3; | ||
| 150 | t = read_c0_watchhi2(); | ||
| 151 | write_c0_watchhi2(t | 0xff8); | ||
| 152 | t = read_c0_watchhi2(); | ||
| 153 | c->watch_reg_masks[2] |= (t & 0xff8); | ||
| 154 | if ((t & 0x80000000) == 0) | ||
| 155 | return; | ||
| 156 | |||
| 157 | write_c0_watchlo3(7); | ||
| 158 | t = read_c0_watchlo3(); | ||
| 159 | write_c0_watchlo3(0); | ||
| 160 | c->watch_reg_masks[3] = t & 7; | ||
| 161 | |||
| 162 | c->watch_reg_count = 4; | ||
| 163 | c->watch_reg_use_cnt = 4; | ||
| 164 | t = read_c0_watchhi3(); | ||
| 165 | write_c0_watchhi3(t | 0xff8); | ||
| 166 | t = read_c0_watchhi3(); | ||
| 167 | c->watch_reg_masks[3] |= (t & 0xff8); | ||
| 168 | if ((t & 0x80000000) == 0) | ||
| 169 | return; | ||
| 170 | |||
| 171 | /* We use at most 4, but probe and report up to 8. */ | ||
| 172 | c->watch_reg_count = 5; | ||
| 173 | t = read_c0_watchhi4(); | ||
| 174 | if ((t & 0x80000000) == 0) | ||
| 175 | return; | ||
| 176 | |||
| 177 | c->watch_reg_count = 6; | ||
| 178 | t = read_c0_watchhi5(); | ||
| 179 | if ((t & 0x80000000) == 0) | ||
| 180 | return; | ||
| 181 | |||
| 182 | c->watch_reg_count = 7; | ||
| 183 | t = read_c0_watchhi6(); | ||
| 184 | if ((t & 0x80000000) == 0) | ||
| 185 | return; | ||
| 186 | |||
| 187 | c->watch_reg_count = 8; | ||
| 188 | } | ||
