aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/head-xscale.S7
-rw-r--r--arch/arm/kernel/entry-armv.S16
-rw-r--r--arch/arm/kernel/traps.c49
-rw-r--r--arch/arm/lib/io-writesw-armv4.S6
-rw-r--r--arch/arm/mach-pxa/mainstone.c9
-rw-r--r--arch/arm/mach-pxa/pm.c32
-rw-r--r--arch/arm/mach-pxa/pxa25x.c29
-rw-r--r--arch/arm/mach-pxa/pxa27x.c32
-rw-r--r--arch/arm/mach-s3c2410/dma.c4
-rw-r--r--arch/arm/mm/Kconfig15
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/copypage-xscale.S113
-rw-r--r--arch/arm/mm/copypage-xscale.c131
-rw-r--r--arch/arm/mm/minicache.c73
-rw-r--r--arch/ppc/kernel/cputable.c11
-rw-r--r--arch/ppc64/boot/prom.c28
-rw-r--r--arch/ppc64/kernel/kprobes.c2
17 files changed, 334 insertions, 225 deletions
diff --git a/arch/arm/boot/compressed/head-xscale.S b/arch/arm/boot/compressed/head-xscale.S
index 665bd2c20743..d3fe2533907e 100644
--- a/arch/arm/boot/compressed/head-xscale.S
+++ b/arch/arm/boot/compressed/head-xscale.S
@@ -47,3 +47,10 @@ __XScale_start:
47 orr r7, r7, #(MACH_TYPE_GTWX5715 & 0xff00) 47 orr r7, r7, #(MACH_TYPE_GTWX5715 & 0xff00)
48#endif 48#endif
49 49
50#ifdef CONFIG_ARCH_IXP2000
51 mov r1, #-1
52 mov r0, #0xd6000000
53 str r1, [r0, #0x14]
54 str r1, [r0, #0x18]
55#endif
56
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 4eb36155dc93..e14278d59882 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -269,7 +269,7 @@ __pabt_svc:
269 add r5, sp, #S_PC 269 add r5, sp, #S_PC
270 ldmia r7, {r2 - r4} @ Get USR pc, cpsr 270 ldmia r7, {r2 - r4} @ Get USR pc, cpsr
271 271
272#if __LINUX_ARM_ARCH__ < 6 272#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
273 @ make sure our user space atomic helper is aborted 273 @ make sure our user space atomic helper is aborted
274 cmp r2, #VIRT_OFFSET 274 cmp r2, #VIRT_OFFSET
275 bichs r3, r3, #PSR_Z_BIT 275 bichs r3, r3, #PSR_Z_BIT
@@ -616,11 +616,17 @@ __kuser_helper_start:
616 616
617__kuser_cmpxchg: @ 0xffff0fc0 617__kuser_cmpxchg: @ 0xffff0fc0
618 618
619#if __LINUX_ARM_ARCH__ < 6 619#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
620 620
621#ifdef CONFIG_SMP /* sanity check */ 621 /*
622#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" 622 * Poor you. No fast solution possible...
623#endif 623 * The kernel itself must perform the operation.
624 * A special ghost syscall is used for that (see traps.c).
625 */
626 swi #0x9ffff0
627 mov pc, lr
628
629#elif __LINUX_ARM_ARCH__ < 6
624 630
625 /* 631 /*
626 * Theory of operation: 632 * Theory of operation:
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 14df16b983f4..45d2a032d890 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -464,6 +464,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
464#endif 464#endif
465 return 0; 465 return 0;
466 466
467#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
468 /*
469 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
470 * Return zero in r0 if *MEM was changed or non-zero if no exchange
471 * happened. Also set the user C flag accordingly.
472 * If access permissions have to be fixed up then non-zero is
473 * returned and the operation has to be re-attempted.
474 *
475 * *NOTE*: This is a ghost syscall private to the kernel. Only the
476 * __kuser_cmpxchg code in entry-armv.S should be aware of its
477 * existence. Don't ever use this from user code.
478 */
479 case 0xfff0:
480 {
481 extern void do_DataAbort(unsigned long addr, unsigned int fsr,
482 struct pt_regs *regs);
483 unsigned long val;
484 unsigned long addr = regs->ARM_r2;
485 struct mm_struct *mm = current->mm;
486 pgd_t *pgd; pmd_t *pmd; pte_t *pte;
487
488 regs->ARM_cpsr &= ~PSR_C_BIT;
489 spin_lock(&mm->page_table_lock);
490 pgd = pgd_offset(mm, addr);
491 if (!pgd_present(*pgd))
492 goto bad_access;
493 pmd = pmd_offset(pgd, addr);
494 if (!pmd_present(*pmd))
495 goto bad_access;
496 pte = pte_offset_map(pmd, addr);
497 if (!pte_present(*pte) || !pte_write(*pte))
498 goto bad_access;
499 val = *(unsigned long *)addr;
500 val -= regs->ARM_r0;
501 if (val == 0) {
502 *(unsigned long *)addr = regs->ARM_r1;
503 regs->ARM_cpsr |= PSR_C_BIT;
504 }
505 spin_unlock(&mm->page_table_lock);
506 return val;
507
508 bad_access:
509 spin_unlock(&mm->page_table_lock);
510 /* simulate a read access fault */
511 do_DataAbort(addr, 15 + (1 << 11), regs);
512 return -1;
513 }
514#endif
515
467 default: 516 default:
468 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS 517 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
469 if not implemented, rather than raising SIGILL. This 518 if not implemented, rather than raising SIGILL. This
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index 6d1d7c27806e..5e240e452af6 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -87,9 +87,9 @@ ENTRY(__raw_writesw)
87 subs r2, r2, #2 87 subs r2, r2, #2
88 orr ip, ip, r3, push_hbyte1 88 orr ip, ip, r3, push_hbyte1
89 strh ip, [r0] 89 strh ip, [r0]
90 bpl 2b 90 bpl 1b
91 91
923: tst r2, #1 92 tst r2, #1
932: movne ip, r3, lsr #8 933: movne ip, r3, lsr #8
94 strneh ip, [r0] 94 strneh ip, [r0]
95 mov pc, lr 95 mov pc, lr
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 3f952237ae3d..6823ae28ae6a 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -304,6 +304,15 @@ static void __init mainstone_map_io(void)
304 PWER = 0xC0000002; 304 PWER = 0xC0000002;
305 PRER = 0x00000002; 305 PRER = 0x00000002;
306 PFER = 0x00000002; 306 PFER = 0x00000002;
307 /* for use I SRAM as framebuffer. */
308 PSLR |= 0xF04;
309 PCFR = 0x66;
310 /* For Keypad wakeup. */
311 KPC &=~KPC_ASACT;
312 KPC |=KPC_AS;
313 PKWR = 0x000FD000;
314 /* Need read PKWR back after set it. */
315 PKWR;
307} 316}
308 317
309MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") 318MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 82a4bf34c251..9799fe80df23 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -29,9 +29,6 @@
29 */ 29 */
30#undef DEBUG 30#undef DEBUG
31 31
32extern void pxa_cpu_suspend(void);
33extern void pxa_cpu_resume(void);
34
35#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x 32#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
36#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] 33#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
37 34
@@ -63,6 +60,12 @@ enum { SLEEP_SAVE_START = 0,
63 SLEEP_SAVE_ICMR, 60 SLEEP_SAVE_ICMR,
64 SLEEP_SAVE_CKEN, 61 SLEEP_SAVE_CKEN,
65 62
63#ifdef CONFIG_PXA27x
64 SLEEP_SAVE_MDREFR,
65 SLEEP_SAVE_PWER, SLEEP_SAVE_PCFR, SLEEP_SAVE_PRER,
66 SLEEP_SAVE_PFER, SLEEP_SAVE_PKWR,
67#endif
68
66 SLEEP_SAVE_CKSUM, 69 SLEEP_SAVE_CKSUM,
67 70
68 SLEEP_SAVE_SIZE 71 SLEEP_SAVE_SIZE
@@ -75,9 +78,7 @@ static int pxa_pm_enter(suspend_state_t state)
75 unsigned long checksum = 0; 78 unsigned long checksum = 0;
76 struct timespec delta, rtc; 79 struct timespec delta, rtc;
77 int i; 80 int i;
78 81 extern void pxa_cpu_pm_enter(suspend_state_t state);
79 if (state != PM_SUSPEND_MEM)
80 return -EINVAL;
81 82
82#ifdef CONFIG_IWMMXT 83#ifdef CONFIG_IWMMXT
83 /* force any iWMMXt context to ram **/ 84 /* force any iWMMXt context to ram **/
@@ -100,16 +101,17 @@ static int pxa_pm_enter(suspend_state_t state)
100 SAVE(GAFR2_L); SAVE(GAFR2_U); 101 SAVE(GAFR2_L); SAVE(GAFR2_U);
101 102
102#ifdef CONFIG_PXA27x 103#ifdef CONFIG_PXA27x
104 SAVE(MDREFR);
103 SAVE(GPLR3); SAVE(GPDR3); SAVE(GRER3); SAVE(GFER3); SAVE(PGSR3); 105 SAVE(GPLR3); SAVE(GPDR3); SAVE(GRER3); SAVE(GFER3); SAVE(PGSR3);
104 SAVE(GAFR3_L); SAVE(GAFR3_U); 106 SAVE(GAFR3_L); SAVE(GAFR3_U);
107 SAVE(PWER); SAVE(PCFR); SAVE(PRER);
108 SAVE(PFER); SAVE(PKWR);
105#endif 109#endif
106 110
107 SAVE(ICMR); 111 SAVE(ICMR);
108 ICMR = 0; 112 ICMR = 0;
109 113
110 SAVE(CKEN); 114 SAVE(CKEN);
111 CKEN = 0;
112
113 SAVE(PSTR); 115 SAVE(PSTR);
114 116
115 /* Note: wake up source are set up in each machine specific files */ 117 /* Note: wake up source are set up in each machine specific files */
@@ -123,16 +125,13 @@ static int pxa_pm_enter(suspend_state_t state)
123 /* Clear sleep reset status */ 125 /* Clear sleep reset status */
124 RCSR = RCSR_SMR; 126 RCSR = RCSR_SMR;
125 127
126 /* set resume return address */
127 PSPR = virt_to_phys(pxa_cpu_resume);
128
129 /* before sleeping, calculate and save a checksum */ 128 /* before sleeping, calculate and save a checksum */
130 for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++) 129 for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++)
131 checksum += sleep_save[i]; 130 checksum += sleep_save[i];
132 sleep_save[SLEEP_SAVE_CKSUM] = checksum; 131 sleep_save[SLEEP_SAVE_CKSUM] = checksum;
133 132
134 /* *** go zzz *** */ 133 /* *** go zzz *** */
135 pxa_cpu_suspend(); 134 pxa_cpu_pm_enter(state);
136 135
137 /* after sleeping, validate the checksum */ 136 /* after sleeping, validate the checksum */
138 checksum = 0; 137 checksum = 0;
@@ -145,7 +144,7 @@ static int pxa_pm_enter(suspend_state_t state)
145 LUB_HEXLED = 0xbadbadc5; 144 LUB_HEXLED = 0xbadbadc5;
146#endif 145#endif
147 while (1) 146 while (1)
148 pxa_cpu_suspend(); 147 pxa_cpu_pm_enter(state);
149 } 148 }
150 149
151 /* ensure not to come back here if it wasn't intended */ 150 /* ensure not to come back here if it wasn't intended */
@@ -162,8 +161,11 @@ static int pxa_pm_enter(suspend_state_t state)
162 RESTORE(PGSR0); RESTORE(PGSR1); RESTORE(PGSR2); 161 RESTORE(PGSR0); RESTORE(PGSR1); RESTORE(PGSR2);
163 162
164#ifdef CONFIG_PXA27x 163#ifdef CONFIG_PXA27x
164 RESTORE(MDREFR);
165 RESTORE(GAFR3_L); RESTORE(GAFR3_U); RESTORE_GPLEVEL(3); 165 RESTORE(GAFR3_L); RESTORE(GAFR3_U); RESTORE_GPLEVEL(3);
166 RESTORE(GPDR3); RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3); 166 RESTORE(GPDR3); RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3);
167 RESTORE(PWER); RESTORE(PCFR); RESTORE(PRER);
168 RESTORE(PFER); RESTORE(PKWR);
167#endif 169#endif
168 170
169 PSSR = PSSR_RDH | PSSR_PH; 171 PSSR = PSSR_RDH | PSSR_PH;
@@ -197,7 +199,9 @@ unsigned long sleep_phys_sp(void *sp)
197 */ 199 */
198static int pxa_pm_prepare(suspend_state_t state) 200static int pxa_pm_prepare(suspend_state_t state)
199{ 201{
200 return 0; 202 extern int pxa_cpu_pm_prepare(suspend_state_t state);
203
204 return pxa_cpu_pm_prepare(state);
201} 205}
202 206
203/* 207/*
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index e887b7175ef3..b6d945a6e774 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -102,3 +102,32 @@ unsigned int get_lcdclk_frequency_10khz(void)
102} 102}
103 103
104EXPORT_SYMBOL(get_lcdclk_frequency_10khz); 104EXPORT_SYMBOL(get_lcdclk_frequency_10khz);
105
106
107int pxa_cpu_pm_prepare(suspend_state_t state)
108{
109 switch (state) {
110 case PM_SUSPEND_MEM:
111 break;
112 default:
113 return -EINVAL;
114 }
115
116 return 0;
117}
118
119void pxa_cpu_pm_enter(suspend_state_t state)
120{
121 extern void pxa_cpu_suspend(unsigned int);
122 extern void pxa_cpu_resume(void);
123
124 CKEN = 0;
125
126 switch (state) {
127 case PM_SUSPEND_MEM:
128 /* set resume return address */
129 PSPR = virt_to_phys(pxa_cpu_resume);
130 pxa_cpu_suspend(3);
131 break;
132 }
133}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 7e863afefb53..aa3c3b2ab75e 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -120,6 +120,38 @@ EXPORT_SYMBOL(get_clk_frequency_khz);
120EXPORT_SYMBOL(get_memclk_frequency_10khz); 120EXPORT_SYMBOL(get_memclk_frequency_10khz);
121EXPORT_SYMBOL(get_lcdclk_frequency_10khz); 121EXPORT_SYMBOL(get_lcdclk_frequency_10khz);
122 122
123int pxa_cpu_pm_prepare(suspend_state_t state)
124{
125 switch (state) {
126 case PM_SUSPEND_MEM:
127 return 0;
128 default:
129 return -EINVAL;
130 }
131}
132
133void pxa_cpu_pm_enter(suspend_state_t state)
134{
135 extern void pxa_cpu_standby(void);
136 extern void pxa_cpu_suspend(unsigned int);
137 extern void pxa_cpu_resume(void);
138
139 CKEN = CKEN22_MEMC | CKEN9_OSTIMER;
140
141 /* ensure voltage-change sequencer not initiated, which hangs */
142 PCFR &= ~PCFR_FVC;
143
144 /* Clear edge-detect status register. */
145 PEDR = 0xDF12FE1B;
146
147 switch (state) {
148 case PM_SUSPEND_MEM:
149 /* set resume return address */
150 PSPR = virt_to_phys(pxa_cpu_resume);
151 pxa_cpu_suspend(3);
152 break;
153 }
154}
123 155
124/* 156/*
125 * device registration specific to PXA27x. 157 * device registration specific to PXA27x.
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index bc229fab86d4..c7c28890d406 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -785,6 +785,10 @@ int s3c2410_dma_free(dmach_t channel, s3c2410_dma_client_t *client)
785 chan->client = NULL; 785 chan->client = NULL;
786 chan->in_use = 0; 786 chan->in_use = 0;
787 787
788 if (chan->irq_claimed)
789 free_irq(chan->irq, (void *)chan);
790 chan->irq_claimed = 0;
791
788 local_irq_restore(flags); 792 local_irq_restore(flags);
789 793
790 return 0; 794 return 0;
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 48bac7da8c70..3fefb43c67f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -228,7 +228,6 @@ config CPU_SA1100
228 select CPU_CACHE_V4WB 228 select CPU_CACHE_V4WB
229 select CPU_CACHE_VIVT 229 select CPU_CACHE_VIVT
230 select CPU_TLB_V4WB 230 select CPU_TLB_V4WB
231 select CPU_MINICACHE
232 231
233# XScale 232# XScale
234config CPU_XSCALE 233config CPU_XSCALE
@@ -239,7 +238,6 @@ config CPU_XSCALE
239 select CPU_ABRT_EV5T 238 select CPU_ABRT_EV5T
240 select CPU_CACHE_VIVT 239 select CPU_CACHE_VIVT
241 select CPU_TLB_V4WBI 240 select CPU_TLB_V4WBI
242 select CPU_MINICACHE
243 241
244# ARMv6 242# ARMv6
245config CPU_V6 243config CPU_V6
@@ -345,11 +343,6 @@ config CPU_TLB_V4WBI
345config CPU_TLB_V6 343config CPU_TLB_V6
346 bool 344 bool
347 345
348config CPU_MINICACHE
349 bool
350 help
351 Processor has a minicache.
352
353comment "Processor Features" 346comment "Processor Features"
354 347
355config ARM_THUMB 348config ARM_THUMB
@@ -429,3 +422,11 @@ config HAS_TLS_REG
429 assume directly accessing that register and always obtain the 422 assume directly accessing that register and always obtain the
430 expected value only on ARMv7 and above. 423 expected value only on ARMv7 and above.
431 424
425config NEEDS_SYSCALL_FOR_CMPXCHG
426 bool
427 default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
428 help
429 SMP on a pre-ARMv6 processor? Well OK then.
430 Forget about fast user space cmpxchg support.
431 It is just not possible.
432
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index ccf316c11e02..59f47d4c2dfe 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -31,8 +31,6 @@ obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
31obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o 31obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
32obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o 32obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
33 33
34obj-$(CONFIG_CPU_MINICACHE) += minicache.o
35
36obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o 34obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
37obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o 35obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
38obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o 36obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
diff --git a/arch/arm/mm/copypage-xscale.S b/arch/arm/mm/copypage-xscale.S
deleted file mode 100644
index bb277316ef52..000000000000
--- a/arch/arm/mm/copypage-xscale.S
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * linux/arch/arm/lib/copypage-xscale.S
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/constants.h>
13
14/*
15 * General note:
16 * We don't really want write-allocate cache behaviour for these functions
17 * since that will just eat through 8K of the cache.
18 */
19
20 .text
21 .align 5
22/*
23 * XScale optimised copy_user_page
24 * r0 = destination
25 * r1 = source
26 * r2 = virtual user address of ultimate destination page
27 *
28 * The source page may have some clean entries in the cache already, but we
29 * can safely ignore them - break_cow() will flush them out of the cache
30 * if we eventually end up using our copied page.
31 *
32 * What we could do is use the mini-cache to buffer reads from the source
33 * page. We rely on the mini-cache being smaller than one page, so we'll
34 * cycle through the complete cache anyway.
35 */
36ENTRY(xscale_mc_copy_user_page)
37 stmfd sp!, {r4, r5, lr}
38 mov r5, r0
39 mov r0, r1
40 bl map_page_minicache
41 mov r1, r5
42 mov lr, #PAGE_SZ/64-1
43
44 /*
45 * Strangely enough, best performance is achieved
46 * when prefetching destination as well. (NP)
47 */
48 pld [r0, #0]
49 pld [r0, #32]
50 pld [r1, #0]
51 pld [r1, #32]
52
531: pld [r0, #64]
54 pld [r0, #96]
55 pld [r1, #64]
56 pld [r1, #96]
57
582: ldrd r2, [r0], #8
59 ldrd r4, [r0], #8
60 mov ip, r1
61 strd r2, [r1], #8
62 ldrd r2, [r0], #8
63 strd r4, [r1], #8
64 ldrd r4, [r0], #8
65 strd r2, [r1], #8
66 strd r4, [r1], #8
67 mcr p15, 0, ip, c7, c10, 1 @ clean D line
68 ldrd r2, [r0], #8
69 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
70 ldrd r4, [r0], #8
71 mov ip, r1
72 strd r2, [r1], #8
73 ldrd r2, [r0], #8
74 strd r4, [r1], #8
75 ldrd r4, [r0], #8
76 strd r2, [r1], #8
77 strd r4, [r1], #8
78 mcr p15, 0, ip, c7, c10, 1 @ clean D line
79 subs lr, lr, #1
80 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
81 bgt 1b
82 beq 2b
83
84 ldmfd sp!, {r4, r5, pc}
85
86 .align 5
87/*
88 * XScale optimised clear_user_page
89 * r0 = destination
90 * r1 = virtual user address of ultimate destination page
91 */
92ENTRY(xscale_mc_clear_user_page)
93 mov r1, #PAGE_SZ/32
94 mov r2, #0
95 mov r3, #0
961: mov ip, r0
97 strd r2, [r0], #8
98 strd r2, [r0], #8
99 strd r2, [r0], #8
100 strd r2, [r0], #8
101 mcr p15, 0, ip, c7, c10, 1 @ clean D line
102 subs r1, r1, #1
103 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
104 bne 1b
105 mov pc, lr
106
107 __INITDATA
108
109 .type xscale_mc_user_fns, #object
110ENTRY(xscale_mc_user_fns)
111 .long xscale_mc_clear_user_page
112 .long xscale_mc_copy_user_page
113 .size xscale_mc_user_fns, . - xscale_mc_user_fns
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
new file mode 100644
index 000000000000..42a6ee255ce0
--- /dev/null
+++ b/arch/arm/mm/copypage-xscale.c
@@ -0,0 +1,131 @@
1/*
2 * linux/arch/arm/lib/copypage-xscale.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22
23/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently.
26 */
27#define COPYPAGE_MINICACHE 0xffff8000
28
29#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
30 L_PTE_CACHEABLE)
31
32#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
33
34static DEFINE_SPINLOCK(minicache_lock);
35
36/*
37 * XScale mini-dcache optimised copy_user_page
38 *
39 * We flush the destination cache lines just before we write the data into the
40 * corresponding address. Since the Dcache is read-allocate, this removes the
41 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
42 * and merged as appropriate.
43 */
44static void __attribute__((naked))
45mc_copy_user_page(void *from, void *to)
46{
47 /*
48 * Strangely enough, best performance is achieved
49 * when prefetching destination as well. (NP)
50 */
51 asm volatile(
52 "stmfd sp!, {r4, r5, lr} \n\
53 mov lr, %2 \n\
54 pld [r0, #0] \n\
55 pld [r0, #32] \n\
56 pld [r1, #0] \n\
57 pld [r1, #32] \n\
581: pld [r0, #64] \n\
59 pld [r0, #96] \n\
60 pld [r1, #64] \n\
61 pld [r1, #96] \n\
622: ldrd r2, [r0], #8 \n\
63 ldrd r4, [r0], #8 \n\
64 mov ip, r1 \n\
65 strd r2, [r1], #8 \n\
66 ldrd r2, [r0], #8 \n\
67 strd r4, [r1], #8 \n\
68 ldrd r4, [r0], #8 \n\
69 strd r2, [r1], #8 \n\
70 strd r4, [r1], #8 \n\
71 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
72 ldrd r2, [r0], #8 \n\
73 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
74 ldrd r4, [r0], #8 \n\
75 mov ip, r1 \n\
76 strd r2, [r1], #8 \n\
77 ldrd r2, [r0], #8 \n\
78 strd r4, [r1], #8 \n\
79 ldrd r4, [r0], #8 \n\
80 strd r2, [r1], #8 \n\
81 strd r4, [r1], #8 \n\
82 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
83 subs lr, lr, #1 \n\
84 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
85 bgt 1b \n\
86 beq 2b \n\
87 ldmfd sp!, {r4, r5, pc} "
88 :
89 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
90}
91
92void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
93{
94 spin_lock(&minicache_lock);
95
96 set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));
97 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
98
99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
100
101 spin_unlock(&minicache_lock);
102}
103
104/*
105 * XScale optimised clear_user_page
106 */
107void __attribute__((naked))
108xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
109{
110 asm volatile(
111 "mov r1, %0 \n\
112 mov r2, #0 \n\
113 mov r3, #0 \n\
1141: mov ip, r0 \n\
115 strd r2, [r0], #8 \n\
116 strd r2, [r0], #8 \n\
117 strd r2, [r0], #8 \n\
118 strd r2, [r0], #8 \n\
119 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
120 subs r1, r1, #1 \n\
121 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
122 bne 1b \n\
123 mov pc, lr"
124 :
125 : "I" (PAGE_SIZE / 32));
126}
127
128struct cpu_user_fns xscale_mc_user_fns __initdata = {
129 .cpu_clear_user_page = xscale_mc_clear_user_page,
130 .cpu_copy_user_page = xscale_mc_copy_user_page,
131};
diff --git a/arch/arm/mm/minicache.c b/arch/arm/mm/minicache.c
index dedf2ab01b2a..e69de29bb2d1 100644
--- a/arch/arm/mm/minicache.c
+++ b/arch/arm/mm/minicache.c
@@ -1,73 +0,0 @@
1/*
2 * linux/arch/arm/mm/minicache.c
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22
23/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently.
26 */
27#define minicache_address (0xffff8000)
28#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
29 L_PTE_CACHEABLE)
30
31static pte_t *minicache_pte;
32
33/*
34 * Note that this is intended to be called only from the copy_user_page
35 * asm code; anything else will require special locking to prevent the
36 * mini-cache space being re-used. (Note: probably preempt unsafe).
37 *
38 * We rely on the fact that the minicache is 2K, and we'll be pushing
39 * 4K of data through it, so we don't actually have to specifically
40 * flush the minicache when we change the mapping.
41 *
42 * Note also: assert(PAGE_OFFSET <= virt < high_memory).
43 * Unsafe: preempt, kmap.
44 */
45unsigned long map_page_minicache(unsigned long virt)
46{
47 set_pte(minicache_pte, pfn_pte(__pa(virt) >> PAGE_SHIFT, minicache_pgprot));
48 flush_tlb_kernel_page(minicache_address);
49
50 return minicache_address;
51}
52
53static int __init minicache_init(void)
54{
55 pgd_t *pgd;
56 pmd_t *pmd;
57
58 spin_lock(&init_mm.page_table_lock);
59
60 pgd = pgd_offset_k(minicache_address);
61 pmd = pmd_alloc(&init_mm, pgd, minicache_address);
62 if (!pmd)
63 BUG();
64 minicache_pte = pte_alloc_kernel(&init_mm, pmd, minicache_address);
65 if (!minicache_pte)
66 BUG();
67
68 spin_unlock(&init_mm.page_table_lock);
69
70 return 0;
71}
72
73core_initcall(minicache_init);
diff --git a/arch/ppc/kernel/cputable.c b/arch/ppc/kernel/cputable.c
index 8aa5e8c69009..d44b7dc5390a 100644
--- a/arch/ppc/kernel/cputable.c
+++ b/arch/ppc/kernel/cputable.c
@@ -838,6 +838,17 @@ struct cpu_spec cpu_specs[] = {
838 .icache_bsize = 32, 838 .icache_bsize = 32,
839 .dcache_bsize = 32, 839 .dcache_bsize = 32,
840 }, 840 },
841 { /* 405EP */
842 .pvr_mask = 0xffff0000,
843 .pvr_value = 0x51210000,
844 .cpu_name = "405EP",
845 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
846 CPU_FTR_USE_TB,
847 .cpu_user_features = PPC_FEATURE_32 |
848 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
849 .icache_bsize = 32,
850 .dcache_bsize = 32,
851 },
841 852
842#endif /* CONFIG_40x */ 853#endif /* CONFIG_40x */
843#ifdef CONFIG_44x 854#ifdef CONFIG_44x
diff --git a/arch/ppc64/boot/prom.c b/arch/ppc64/boot/prom.c
index 7b607d1862cb..d5218b15824e 100644
--- a/arch/ppc64/boot/prom.c
+++ b/arch/ppc64/boot/prom.c
@@ -11,6 +11,23 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/ctype.h> 12#include <linux/ctype.h>
13 13
14extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor);
15
16/* The unnecessary pointer compare is there
17 * to check for type safety (n must be 64bit)
18 */
19# define do_div(n,base) ({ \
20 __u32 __base = (base); \
21 __u32 __rem; \
22 (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
23 if (((n) >> 32) == 0) { \
24 __rem = (__u32)(n) % __base; \
25 (n) = (__u32)(n) / __base; \
26 } else \
27 __rem = __div64_32(&(n), __base); \
28 __rem; \
29 })
30
14int (*prom)(void *); 31int (*prom)(void *);
15 32
16void *chosen_handle; 33void *chosen_handle;
@@ -352,7 +369,7 @@ static int skip_atoi(const char **s)
352#define SPECIAL 32 /* 0x */ 369#define SPECIAL 32 /* 0x */
353#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ 370#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
354 371
355static char * number(char * str, long num, int base, int size, int precision, int type) 372static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
356{ 373{
357 char c,sign,tmp[66]; 374 char c,sign,tmp[66];
358 const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; 375 const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
@@ -367,9 +384,9 @@ static char * number(char * str, long num, int base, int size, int precision, in
367 c = (type & ZEROPAD) ? '0' : ' '; 384 c = (type & ZEROPAD) ? '0' : ' ';
368 sign = 0; 385 sign = 0;
369 if (type & SIGN) { 386 if (type & SIGN) {
370 if (num < 0) { 387 if ((signed long long)num < 0) {
371 sign = '-'; 388 sign = '-';
372 num = -num; 389 num = - (signed long long)num;
373 size--; 390 size--;
374 } else if (type & PLUS) { 391 } else if (type & PLUS) {
375 sign = '+'; 392 sign = '+';
@@ -389,8 +406,7 @@ static char * number(char * str, long num, int base, int size, int precision, in
389 if (num == 0) 406 if (num == 0)
390 tmp[i++]='0'; 407 tmp[i++]='0';
391 else while (num != 0) { 408 else while (num != 0) {
392 tmp[i++] = digits[num % base]; 409 tmp[i++] = digits[do_div(num, base)];
393 num /= base;
394 } 410 }
395 if (i > precision) 411 if (i > precision)
396 precision = i; 412 precision = i;
@@ -426,7 +442,7 @@ int sprintf(char * buf, const char *fmt, ...);
426int vsprintf(char *buf, const char *fmt, va_list args) 442int vsprintf(char *buf, const char *fmt, va_list args)
427{ 443{
428 int len; 444 int len;
429 unsigned long num; 445 unsigned long long num;
430 int i, base; 446 int i, base;
431 char * str; 447 char * str;
432 const char *s; 448 const char *s;
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 103daaf73573..5a9f47b18c45 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -233,8 +233,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
233 */ 233 */
234 preempt_disable(); 234 preempt_disable();
235 switch (val) { 235 switch (val) {
236 case DIE_IABR_MATCH:
237 case DIE_DABR_MATCH:
238 case DIE_BPT: 236 case DIE_BPT:
239 if (kprobe_handler(args->regs)) 237 if (kprobe_handler(args->regs))
240 ret = NOTIFY_STOP; 238 ret = NOTIFY_STOP;