aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts2
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/include/asm/thread_info.h7
-rw-r--r--arch/arm/include/asm/tls.h4
-rw-r--r--arch/arm/kernel/irq.c6
-rw-r--r--arch/arm/kernel/signal.c55
-rw-r--r--arch/arm/kernel/smp.c28
-rw-r--r--arch/arm/mm/abort-ev6.S17
-rw-r--r--arch/arm/mm/cache-l2x0.c25
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/plat-omap/dma.c14
-rw-r--r--arch/arm/vfp/vfpmodule.c99
14 files changed, 188 insertions, 88 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf006d40342c..36586dba6fa6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1186,6 +1186,15 @@ if !MMU
1186source "arch/arm/Kconfig-nommu" 1186source "arch/arm/Kconfig-nommu"
1187endif 1187endif
1188 1188
1189config ARM_ERRATA_326103
1190 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
1191 depends on CPU_V6
1192 help
1193 Executing a SWP instruction to read-only memory does not set bit 11
1194 of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
1195 treat the access as a read, preventing a COW from occurring and
1196 causing the faulting task to livelock.
1197
1189config ARM_ERRATA_411920 1198config ARM_ERRATA_411920
1190 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" 1199 bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
1191 depends on CPU_V6 || CPU_V6K 1200 depends on CPU_V6 || CPU_V6K
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 0b32925f2147..e2fe3195c0d1 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -173,7 +173,7 @@
173 mmc@5000 { 173 mmc@5000 {
174 compatible = "arm,primecell"; 174 compatible = "arm,primecell";
175 reg = < 0x5000 0x1000>; 175 reg = < 0x5000 0x1000>;
176 interrupts = <22>; 176 interrupts = <22 34>;
177 }; 177 };
178 kmi@6000 { 178 kmi@6000 {
179 compatible = "arm,pl050", "arm,primecell"; 179 compatible = "arm,pl050", "arm,primecell";
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 166461073b78..7e8175269064 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -41,7 +41,7 @@
41 mmc@b000 { 41 mmc@b000 {
42 compatible = "arm,primecell"; 42 compatible = "arm,primecell";
43 reg = <0xb000 0x1000>; 43 reg = <0xb000 0x1000>;
44 interrupts = <23>; 44 interrupts = <23 34>;
45 }; 45 };
46 }; 46 };
47 }; 47 };
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d4c24d412a8d..0f04d84582e1 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *);
118extern void vfp_sync_hwstate(struct thread_info *); 118extern void vfp_sync_hwstate(struct thread_info *);
119extern void vfp_flush_hwstate(struct thread_info *); 119extern void vfp_flush_hwstate(struct thread_info *);
120 120
121struct user_vfp;
122struct user_vfp_exc;
123
124extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
125 struct user_vfp_exc __user *);
126extern int vfp_restore_user_hwstate(struct user_vfp __user *,
127 struct user_vfp_exc __user *);
121#endif 128#endif
122 129
123/* 130/*
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 60843eb0f61c..73409e6c0251 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -7,6 +7,8 @@
7 7
8 .macro set_tls_v6k, tp, tmp1, tmp2 8 .macro set_tls_v6k, tp, tmp1, tmp2
9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register 9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
10 mov \tmp1, #0
11 mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
10 .endm 12 .endm
11 13
12 .macro set_tls_v6, tp, tmp1, tmp2 14 .macro set_tls_v6, tp, tmp1, tmp2
@@ -15,6 +17,8 @@
15 mov \tmp2, #0xffff0fff 17 mov \tmp2, #0xffff0fff
16 tst \tmp1, #HWCAP_TLS @ hardware TLS available? 18 tst \tmp1, #HWCAP_TLS @ hardware TLS available?
17 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register 19 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
20 movne \tmp1, #0
21 mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
18 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 22 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
19 .endm 23 .endm
20 24
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 71ccdbfed662..8349d4e97e2b 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
155 } 155 }
156 156
157 c = irq_data_get_irq_chip(d); 157 c = irq_data_get_irq_chip(d);
158 if (c->irq_set_affinity) 158 if (!c->irq_set_affinity)
159 c->irq_set_affinity(d, affinity, true);
160 else
161 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 159 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
160 else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
161 cpumask_copy(d->affinity, affinity);
162 162
163 return ret; 163 return ret;
164} 164}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7cb532fc8aa4..d68d1b694680 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
180 180
181static int preserve_vfp_context(struct vfp_sigframe __user *frame) 181static int preserve_vfp_context(struct vfp_sigframe __user *frame)
182{ 182{
183 struct thread_info *thread = current_thread_info();
184 struct vfp_hard_struct *h = &thread->vfpstate.hard;
185 const unsigned long magic = VFP_MAGIC; 183 const unsigned long magic = VFP_MAGIC;
186 const unsigned long size = VFP_STORAGE_SIZE; 184 const unsigned long size = VFP_STORAGE_SIZE;
187 int err = 0; 185 int err = 0;
188 186
189 vfp_sync_hwstate(thread);
190 __put_user_error(magic, &frame->magic, err); 187 __put_user_error(magic, &frame->magic, err);
191 __put_user_error(size, &frame->size, err); 188 __put_user_error(size, &frame->size, err);
192 189
193 /* 190 if (err)
194 * Copy the floating point registers. There can be unused 191 return -EFAULT;
195 * registers see asm/hwcap.h for details.
196 */
197 err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
198 sizeof(h->fpregs));
199 /*
200 * Copy the status and control register.
201 */
202 __put_user_error(h->fpscr, &frame->ufp.fpscr, err);
203
204 /*
205 * Copy the exception registers.
206 */
207 __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
208 __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
209 __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
210 192
211 return err ? -EFAULT : 0; 193 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
212} 194}
213 195
214static int restore_vfp_context(struct vfp_sigframe __user *frame) 196static int restore_vfp_context(struct vfp_sigframe __user *frame)
215{ 197{
216 struct thread_info *thread = current_thread_info();
217 struct vfp_hard_struct *h = &thread->vfpstate.hard;
218 unsigned long magic; 198 unsigned long magic;
219 unsigned long size; 199 unsigned long size;
220 unsigned long fpexc;
221 int err = 0; 200 int err = 0;
222 201
223 __get_user_error(magic, &frame->magic, err); 202 __get_user_error(magic, &frame->magic, err);
@@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
228 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 207 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
229 return -EINVAL; 208 return -EINVAL;
230 209
231 vfp_flush_hwstate(thread); 210 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
232
233 /*
234 * Copy the floating point registers. There can be unused
235 * registers see asm/hwcap.h for details.
236 */
237 err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
238 sizeof(h->fpregs));
239 /*
240 * Copy the status and control register.
241 */
242 __get_user_error(h->fpscr, &frame->ufp.fpscr, err);
243
244 /*
245 * Sanitise and restore the exception registers.
246 */
247 __get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
248 /* Ensure the VFP is enabled. */
249 fpexc |= FPEXC_EN;
250 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
251 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
252 h->fpexc = fpexc;
253
254 __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
255 __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
256
257 return err ? -EFAULT : 0;
258} 211}
259 212
260#endif 213#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index addbbe8028c2..f6a4d32b0421 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu)
510 local_fiq_disable(); 510 local_fiq_disable();
511 local_irq_disable(); 511 local_irq_disable();
512 512
513#ifdef CONFIG_HOTPLUG_CPU
514 platform_cpu_kill(cpu);
515#endif
516
517 while (1) 513 while (1)
518 cpu_relax(); 514 cpu_relax();
519} 515}
@@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu)
576 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 572 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
577} 573}
578 574
575#ifdef CONFIG_HOTPLUG_CPU
576static void smp_kill_cpus(cpumask_t *mask)
577{
578 unsigned int cpu;
579 for_each_cpu(cpu, mask)
580 platform_cpu_kill(cpu);
581}
582#else
583static void smp_kill_cpus(cpumask_t *mask) { }
584#endif
585
579void smp_send_stop(void) 586void smp_send_stop(void)
580{ 587{
581 unsigned long timeout; 588 unsigned long timeout;
589 struct cpumask mask;
582 590
583 if (num_online_cpus() > 1) { 591 cpumask_copy(&mask, cpu_online_mask);
584 struct cpumask mask; 592 cpumask_clear_cpu(smp_processor_id(), &mask);
585 cpumask_copy(&mask, cpu_online_mask); 593 smp_cross_call(&mask, IPI_CPU_STOP);
586 cpumask_clear_cpu(smp_processor_id(), &mask);
587
588 smp_cross_call(&mask, IPI_CPU_STOP);
589 }
590 594
591 /* Wait up to one second for other CPUs to stop */ 595 /* Wait up to one second for other CPUs to stop */
592 timeout = USEC_PER_SEC; 596 timeout = USEC_PER_SEC;
@@ -595,6 +599,8 @@ void smp_send_stop(void)
595 599
596 if (num_online_cpus() > 1) 600 if (num_online_cpus() > 1)
597 pr_warning("SMP: failed to stop secondary CPUs\n"); 601 pr_warning("SMP: failed to stop secondary CPUs\n");
602
603 smp_kill_cpus(&mask);
598} 604}
599 605
600/* 606/*
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index ff1f7cc11f87..80741992a9fc 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
26 mrc p15, 0, r1, c5, c0, 0 @ get FSR 26 mrc p15, 0, r1, c5, c0, 0 @ get FSR
27 mrc p15, 0, r0, c6, c0, 0 @ get FAR 27 mrc p15, 0, r0, c6, c0, 0 @ get FAR
28/* 28/*
29 * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). 29 * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
30 * The test below covers all the write situations, including Java bytecodes
31 */ 30 */
32 bic r1, r1, #1 << 11 @ clear bit 11 of FSR 31#ifdef CONFIG_ARM_ERRATA_326103
32 ldr ip, =0x4107b36
33 mrc p15, 0, r3, c0, c0, 0 @ get processor id
34 teq ip, r3, lsr #4 @ r0 ARM1136?
35 bne do_DataAbort
33 tst r5, #PSR_J_BIT @ Java? 36 tst r5, #PSR_J_BIT @ Java?
37 tsteq r5, #PSR_T_BIT @ Thumb?
34 bne do_DataAbort 38 bne do_DataAbort
35 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 39 bic r1, r1, #1 << 11 @ clear bit 11 of FSR
36 ldreq r3, [r4] @ read aborted ARM instruction 40 ldr r3, [r4] @ read aborted ARM instruction
37#ifdef CONFIG_CPU_ENDIAN_BE8 41#ifdef CONFIG_CPU_ENDIAN_BE8
38 reveq r3, r3 42 rev r3, r3
39#endif 43#endif
40 do_ldrd_abort tmp=ip, insn=r3 44 do_ldrd_abort tmp=ip, insn=r3
41 tst r3, #1 << 20 @ L = 0 -> write 45 tst r3, #1 << 20 @ L = 0 -> write
42 orreq r1, r1, #1 << 11 @ yes. 46 orreq r1, r1, #1 << 11 @ yes.
47#endif
43 b do_DataAbort 48 b do_DataAbort
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index a53fd2aaa2f4..2a8e380501e8 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
32static DEFINE_RAW_SPINLOCK(l2x0_lock); 32static DEFINE_RAW_SPINLOCK(l2x0_lock);
33static u32 l2x0_way_mask; /* Bitmask of active ways */ 33static u32 l2x0_way_mask; /* Bitmask of active ways */
34static u32 l2x0_size; 34static u32 l2x0_size;
35static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
35 36
36struct l2x0_regs l2x0_saved_regs; 37struct l2x0_regs l2x0_saved_regs;
37 38
@@ -61,12 +62,7 @@ static inline void cache_sync(void)
61{ 62{
62 void __iomem *base = l2x0_base; 63 void __iomem *base = l2x0_base;
63 64
64#ifdef CONFIG_PL310_ERRATA_753970 65 writel_relaxed(0, base + sync_reg_offset);
65 /* write to an unmmapped register */
66 writel_relaxed(0, base + L2X0_DUMMY_REG);
67#else
68 writel_relaxed(0, base + L2X0_CACHE_SYNC);
69#endif
70 cache_wait(base + L2X0_CACHE_SYNC, 1); 66 cache_wait(base + L2X0_CACHE_SYNC, 1);
71} 67}
72 68
@@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
85} 81}
86 82
87#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 83#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
84static inline void debug_writel(unsigned long val)
85{
86 if (outer_cache.set_debug)
87 outer_cache.set_debug(val);
88}
88 89
89#define debug_writel(val) outer_cache.set_debug(val) 90static void pl310_set_debug(unsigned long val)
90
91static void l2x0_set_debug(unsigned long val)
92{ 91{
93 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 92 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
94} 93}
@@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
98{ 97{
99} 98}
100 99
101#define l2x0_set_debug NULL 100#define pl310_set_debug NULL
102#endif 101#endif
103 102
104#ifdef CONFIG_PL310_ERRATA_588369 103#ifdef CONFIG_PL310_ERRATA_588369
@@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
331 else 330 else
332 ways = 8; 331 ways = 8;
333 type = "L310"; 332 type = "L310";
333#ifdef CONFIG_PL310_ERRATA_753970
334 /* Unmapped register. */
335 sync_reg_offset = L2X0_DUMMY_REG;
336#endif
337 outer_cache.set_debug = pl310_set_debug;
334 break; 338 break;
335 case L2X0_CACHE_ID_PART_L210: 339 case L2X0_CACHE_ID_PART_L210:
336 ways = (aux >> 13) & 0xf; 340 ways = (aux >> 13) & 0xf;
@@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
379 outer_cache.flush_all = l2x0_flush_all; 383 outer_cache.flush_all = l2x0_flush_all;
380 outer_cache.inv_all = l2x0_inv_all; 384 outer_cache.inv_all = l2x0_inv_all;
381 outer_cache.disable = l2x0_disable; 385 outer_cache.disable = l2x0_disable;
382 outer_cache.set_debug = l2x0_set_debug;
383 386
384 printk(KERN_INFO "%s cache controller enabled\n", type); 387 printk(KERN_INFO "%s cache controller enabled\n", type);
385 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 388 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 595079fa9d1d..8f5813bbffb5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid);
293#endif 293#endif
294 294
295#ifndef CONFIG_SPARSEMEM 295#ifndef CONFIG_SPARSEMEM
296static void arm_memory_present(void) 296static void __init arm_memory_present(void)
297{ 297{
298} 298}
299#else 299#else
300static void arm_memory_present(void) 300static void __init arm_memory_present(void)
301{ 301{
302 struct memblock_region *reg; 302 struct memblock_region *reg;
303 303
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b86f8933ff91..2c7cf2f9c837 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
618 } 618 }
619} 619}
620 620
621static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 621static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
622 unsigned long phys, const struct mem_type *type) 622 unsigned long end, unsigned long phys, const struct mem_type *type)
623{ 623{
624 pud_t *pud = pud_offset(pgd, addr); 624 pud_t *pud = pud_offset(pgd, addr);
625 unsigned long next; 625 unsigned long next;
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index ecdb3da0dea9..c58d896cd5c3 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -916,6 +916,13 @@ void omap_start_dma(int lch)
916 l |= OMAP_DMA_CCR_BUFFERING_DISABLE; 916 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
917 l |= OMAP_DMA_CCR_EN; 917 l |= OMAP_DMA_CCR_EN;
918 918
919 /*
920 * As dma_write() uses IO accessors which are weakly ordered, there
921 * is no guarantee that data in coherent DMA memory will be visible
922 * to the DMA device. Add a memory barrier here to ensure that any
923 * such data is visible prior to enabling DMA.
924 */
925 mb();
919 p->dma_write(l, CCR, lch); 926 p->dma_write(l, CCR, lch);
920 927
921 dma_chan[lch].flags |= OMAP_DMA_ACTIVE; 928 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
@@ -965,6 +972,13 @@ void omap_stop_dma(int lch)
965 p->dma_write(l, CCR, lch); 972 p->dma_write(l, CCR, lch);
966 } 973 }
967 974
975 /*
976 * Ensure that data transferred by DMA is visible to any access
977 * after DMA has been disabled. This is important for coherent
978 * DMA regions.
979 */
980 mb();
981
968 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { 982 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
969 int next_lch, cur_lch = lch; 983 int next_lch, cur_lch = lch;
970 char dma_chan_link_map[dma_lch_count]; 984 char dma_chan_link_map[dma_lch_count];
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 858748eaa144..bc683b8219b5 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -17,6 +17,8 @@
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/uaccess.h>
21#include <linux/user.h>
20 22
21#include <asm/cp15.h> 23#include <asm/cp15.h>
22#include <asm/cputype.h> 24#include <asm/cputype.h>
@@ -529,6 +531,103 @@ void vfp_flush_hwstate(struct thread_info *thread)
529} 531}
530 532
531/* 533/*
534 * Save the current VFP state into the provided structures and prepare
535 * for entry into a new function (signal handler).
536 */
537int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
538 struct user_vfp_exc __user *ufp_exc)
539{
540 struct thread_info *thread = current_thread_info();
541 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
542 int err = 0;
543
544 /* Ensure that the saved hwstate is up-to-date. */
545 vfp_sync_hwstate(thread);
546
547 /*
548 * Copy the floating point registers. There can be unused
549 * registers see asm/hwcap.h for details.
550 */
551 err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
552 sizeof(hwstate->fpregs));
553 /*
554 * Copy the status and control register.
555 */
556 __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
557
558 /*
559 * Copy the exception registers.
560 */
561 __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
562 __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
563 __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
564
565 if (err)
566 return -EFAULT;
567
568 /* Ensure that VFP is disabled. */
569 vfp_flush_hwstate(thread);
570
571 /*
572 * As per the PCS, clear the length and stride bits for function
573 * entry.
574 */
575 hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
576
577 /*
578 * Disable VFP in the hwstate so that we can detect if it gets
579 * used.
580 */
581 hwstate->fpexc &= ~FPEXC_EN;
582 return 0;
583}
584
585/* Sanitise and restore the current VFP state from the provided structures. */
586int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
587 struct user_vfp_exc __user *ufp_exc)
588{
589 struct thread_info *thread = current_thread_info();
590 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
591 unsigned long fpexc;
592 int err = 0;
593
594 /*
595 * If VFP has been used, then disable it to avoid corrupting
596 * the new thread state.
597 */
598 if (hwstate->fpexc & FPEXC_EN)
599 vfp_flush_hwstate(thread);
600
601 /*
602 * Copy the floating point registers. There can be unused
603 * registers see asm/hwcap.h for details.
604 */
605 err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
606 sizeof(hwstate->fpregs));
607 /*
608 * Copy the status and control register.
609 */
610 __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
611
612 /*
613 * Sanitise and restore the exception registers.
614 */
615 __get_user_error(fpexc, &ufp_exc->fpexc, err);
616
617 /* Ensure the VFP is enabled. */
618 fpexc |= FPEXC_EN;
619
620 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
621 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
622 hwstate->fpexc = fpexc;
623
624 __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
625 __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
626
627 return err ? -EFAULT : 0;
628}
629
630/*
532 * VFP hardware can lose all context when a CPU goes offline. 631 * VFP hardware can lose all context when a CPU goes offline.
533 * As we will be running in SMP mode with CPU hotplug, we will save the 632 * As we will be running in SMP mode with CPU hotplug, we will save the
534 * hardware state at every thread switch. We clear our held state when 633 * hardware state at every thread switch. We clear our held state when