aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-20 17:27:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-20 17:27:58 -0500
commit32f9aab8ebd886211a7b3e552753af014c3e5225 (patch)
treef95ca8b49a73c6c4baa8cf9d47c65db67a1f5f58
parente177bb587ea0fade3d9a86f6667da8d89bea3607 (diff)
parent1b1c7409b75a8b62906b78b84c8469002072b738 (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates (part one) from Russell King: - MMC patches from Ulf Hansson and Pawel Moll. These add support for DDR mode and the latest variant found on ARM Versatile Express, as well as a number of cleanups. - A fix for to improve the behaviour of ARMs sched_clock() - Changes to the ARM ioremap() code. I'm not convinced with the primary arguments for this, but it's been around for a while, and people seem happy with it - and the "other" justification for this is at http://lkml.org/lkml/2012/12/6/184 - Add SCHED_HRTICK to ARMs Kconfig - Making the ARM SHA/AES code Thumb-2 compatible - A collection of other small updates. * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (26 commits) ARM: add SCHED_HRTICK config option ARM: 7650/1: mm: replace direct access to mm->context.id with new macro ARM: 7649/1: mm: mm->context.id fix for big-endian ARM: 7648/1: pci: Allow passing per-controller private data ARM: 7647/1: pci: Keep pci_common_init() around after init ARM: fix warnings introduced by previous patch ARM: 7646/1: mm: use static_vm for managing static mapped areas ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area ARM: 7644/1: vmregion: remove vmregion code entirely MAINTAINERS: Re-assert MMCI driver maintainer status MAINTAINERS: add additional file for MMCI driver MAINTAINERS: add maintainer entry for AMBA serial drivers ARM: 7637/1: memory: use SZ_ constants for defining the virtual memory layout ARM: 7643/1: sched: correct update_sched_clock() ARM: 7635/1: versatile: fix the PCI IRQ regression ARM: 7639/1: cache-l2x0: add missed dummy outer_resume entry ARM: 7630/1: mmc: mmci: Fixup and cleanup code for DMA handling ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path ARM: 7631/1: mmc: mmci: Add new VE MMCI variant ARM: 7623/1: mmc: mmci: Fixup clock gating when freq is 0 for ST-variants ...
-rw-r--r--MAINTAINERS10
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/crypto/aes-armv4.S64
-rw-r--r--arch/arm/crypto/sha1-armv4-large.S24
-rw-r--r--arch/arm/include/asm/mach/pci.h1
-rw-r--r--arch/arm/include/asm/memory.h8
-rw-r--r--arch/arm/include/asm/outercache.h1
-rw-r--r--arch/arm/include/asm/spinlock.h16
-rw-r--r--arch/arm/kernel/bios32.c9
-rw-r--r--arch/arm/kernel/sched_clock.c4
-rw-r--r--arch/arm/kernel/smp.c31
-rw-r--r--arch/arm/mach-versatile/core.c15
-rw-r--r--arch/arm/mach-versatile/pci.c11
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/ioremap.c135
-rw-r--r--arch/arm/mm/mm.h12
-rw-r--r--arch/arm/mm/mmu.c36
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/vmregion.c205
-rw-r--r--arch/arm/mm/vmregion.h31
-rw-r--r--drivers/mmc/host/mmci.c306
-rw-r--r--drivers/mmc/host/mmci.h3
26 files changed, 401 insertions, 540 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 70f025fbbd33..122418c21653 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.*
670F: include/linux/amba/kmi.h 670F: include/linux/amba/kmi.h
671 671
672ARM PRIMECELL MMCI PL180/1 DRIVER 672ARM PRIMECELL MMCI PL180/1 DRIVER
673S: Orphan 673M: Russell King <linux@arm.linux.org.uk>
674S: Maintained
674F: drivers/mmc/host/mmci.* 675F: drivers/mmc/host/mmci.*
676F: include/linux/amba/mmci.h
677
678ARM PRIMECELL UART PL010 AND PL011 DRIVERS
679M: Russell King <linux@arm.linux.org.uk>
680S: Maintained
681F: drivers/tty/serial/amba-pl01*.c
682F: include/linux/amba/serial.h
675 683
676ARM PRIMECELL BUS SUPPORT 684ARM PRIMECELL BUS SUPPORT
677M: Russell King <linux@arm.linux.org.uk> 685M: Russell King <linux@arm.linux.org.uk>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6c0900a9bf5c..2e56e73a279d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1654,6 +1654,9 @@ config HZ
1654 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE 1654 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
1655 default 100 1655 default 100
1656 1656
1657config SCHED_HRTICK
1658 def_bool HIGH_RES_TIMERS
1659
1657config THUMB2_KERNEL 1660config THUMB2_KERNEL
1658 bool "Compile the kernel in Thumb-2 mode" 1661 bool "Compile the kernel in Thumb-2 mode"
1659 depends on CPU_V7 && !CPU_V6 && !CPU_V6K 1662 depends on CPU_V7 && !CPU_V6 && !CPU_V6K
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index e59b1d505d6c..19d6cd6f29f9 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -34,8 +34,9 @@
34@ A little glue here to select the correct code below for the ARM CPU 34@ A little glue here to select the correct code below for the ARM CPU
35@ that is being targetted. 35@ that is being targetted.
36 36
37#include <linux/linkage.h>
38
37.text 39.text
38.code 32
39 40
40.type AES_Te,%object 41.type AES_Te,%object
41.align 5 42.align 5
@@ -145,10 +146,8 @@ AES_Te:
145 146
146@ void AES_encrypt(const unsigned char *in, unsigned char *out, 147@ void AES_encrypt(const unsigned char *in, unsigned char *out,
147@ const AES_KEY *key) { 148@ const AES_KEY *key) {
148.global AES_encrypt
149.type AES_encrypt,%function
150.align 5 149.align 5
151AES_encrypt: 150ENTRY(AES_encrypt)
152 sub r3,pc,#8 @ AES_encrypt 151 sub r3,pc,#8 @ AES_encrypt
153 stmdb sp!,{r1,r4-r12,lr} 152 stmdb sp!,{r1,r4-r12,lr}
154 mov r12,r0 @ inp 153 mov r12,r0 @ inp
@@ -239,15 +238,8 @@ AES_encrypt:
239 strb r6,[r12,#14] 238 strb r6,[r12,#14]
240 strb r3,[r12,#15] 239 strb r3,[r12,#15]
241#endif 240#endif
242#if __ARM_ARCH__>=5
243 ldmia sp!,{r4-r12,pc} 241 ldmia sp!,{r4-r12,pc}
244#else 242ENDPROC(AES_encrypt)
245 ldmia sp!,{r4-r12,lr}
246 tst lr,#1
247 moveq pc,lr @ be binary compatible with V4, yet
248 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
249#endif
250.size AES_encrypt,.-AES_encrypt
251 243
252.type _armv4_AES_encrypt,%function 244.type _armv4_AES_encrypt,%function
253.align 2 245.align 2
@@ -386,10 +378,8 @@ _armv4_AES_encrypt:
386 ldr pc,[sp],#4 @ pop and return 378 ldr pc,[sp],#4 @ pop and return
387.size _armv4_AES_encrypt,.-_armv4_AES_encrypt 379.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
388 380
389.global private_AES_set_encrypt_key
390.type private_AES_set_encrypt_key,%function
391.align 5 381.align 5
392private_AES_set_encrypt_key: 382ENTRY(private_AES_set_encrypt_key)
393_armv4_AES_set_encrypt_key: 383_armv4_AES_set_encrypt_key:
394 sub r3,pc,#8 @ AES_set_encrypt_key 384 sub r3,pc,#8 @ AES_set_encrypt_key
395 teq r0,#0 385 teq r0,#0
@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:
658 648
659.Ldone: mov r0,#0 649.Ldone: mov r0,#0
660 ldmia sp!,{r4-r12,lr} 650 ldmia sp!,{r4-r12,lr}
661.Labrt: tst lr,#1 651.Labrt: mov pc,lr
662 moveq pc,lr @ be binary compatible with V4, yet 652ENDPROC(private_AES_set_encrypt_key)
663 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
664.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
665 653
666.global private_AES_set_decrypt_key
667.type private_AES_set_decrypt_key,%function
668.align 5 654.align 5
669private_AES_set_decrypt_key: 655ENTRY(private_AES_set_decrypt_key)
670 str lr,[sp,#-4]! @ push lr 656 str lr,[sp,#-4]! @ push lr
671#if 0 657#if 0
672 @ kernel does both of these in setkey so optimise this bit out by 658 @ kernel does both of these in setkey so optimise this bit out by
@@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
748 bne .Lmix 734 bne .Lmix
749 735
750 mov r0,#0 736 mov r0,#0
751#if __ARM_ARCH__>=5
752 ldmia sp!,{r4-r12,pc} 737 ldmia sp!,{r4-r12,pc}
753#else 738ENDPROC(private_AES_set_decrypt_key)
754 ldmia sp!,{r4-r12,lr}
755 tst lr,#1
756 moveq pc,lr @ be binary compatible with V4, yet
757 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
758#endif
759.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
760 739
761.type AES_Td,%object 740.type AES_Td,%object
762.align 5 741.align 5
@@ -862,10 +841,8 @@ AES_Td:
862 841
863@ void AES_decrypt(const unsigned char *in, unsigned char *out, 842@ void AES_decrypt(const unsigned char *in, unsigned char *out,
864@ const AES_KEY *key) { 843@ const AES_KEY *key) {
865.global AES_decrypt
866.type AES_decrypt,%function
867.align 5 844.align 5
868AES_decrypt: 845ENTRY(AES_decrypt)
869 sub r3,pc,#8 @ AES_decrypt 846 sub r3,pc,#8 @ AES_decrypt
870 stmdb sp!,{r1,r4-r12,lr} 847 stmdb sp!,{r1,r4-r12,lr}
871 mov r12,r0 @ inp 848 mov r12,r0 @ inp
@@ -956,15 +933,8 @@ AES_decrypt:
956 strb r6,[r12,#14] 933 strb r6,[r12,#14]
957 strb r3,[r12,#15] 934 strb r3,[r12,#15]
958#endif 935#endif
959#if __ARM_ARCH__>=5
960 ldmia sp!,{r4-r12,pc} 936 ldmia sp!,{r4-r12,pc}
961#else 937ENDPROC(AES_decrypt)
962 ldmia sp!,{r4-r12,lr}
963 tst lr,#1
964 moveq pc,lr @ be binary compatible with V4, yet
965 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
966#endif
967.size AES_decrypt,.-AES_decrypt
968 938
969.type _armv4_AES_decrypt,%function 939.type _armv4_AES_decrypt,%function
970.align 2 940.align 2
@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
1064 and r9,lr,r1,lsr#8 1034 and r9,lr,r1,lsr#8
1065 1035
1066 ldrb r7,[r10,r7] @ Td4[s1>>0] 1036 ldrb r7,[r10,r7] @ Td4[s1>>0]
1067 ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] 1037 ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24]
1038 THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24]
1039 THUMB( ldrb r1,[r1] )
1068 ldrb r8,[r10,r8] @ Td4[s1>>16] 1040 ldrb r8,[r10,r8] @ Td4[s1>>16]
1069 eor r0,r7,r0,lsl#24 1041 eor r0,r7,r0,lsl#24
1070 ldrb r9,[r10,r9] @ Td4[s1>>8] 1042 ldrb r9,[r10,r9] @ Td4[s1>>8]
@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
1077 ldrb r8,[r10,r8] @ Td4[s2>>0] 1049 ldrb r8,[r10,r8] @ Td4[s2>>0]
1078 and r9,lr,r2,lsr#16 1050 and r9,lr,r2,lsr#16
1079 1051
1080 ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] 1052 ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24]
1053 THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24]
1054 THUMB( ldrb r2,[r2] )
1081 eor r0,r0,r7,lsl#8 1055 eor r0,r0,r7,lsl#8
1082 ldrb r9,[r10,r9] @ Td4[s2>>16] 1056 ldrb r9,[r10,r9] @ Td4[s2>>16]
1083 eor r1,r8,r1,lsl#16 1057 eor r1,r8,r1,lsl#16
@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
1090 and r9,lr,r3 @ i2 1064 and r9,lr,r3 @ i2
1091 1065
1092 ldrb r9,[r10,r9] @ Td4[s3>>0] 1066 ldrb r9,[r10,r9] @ Td4[s3>>0]
1093 ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] 1067 ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24]
1068 THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24]
1069 THUMB( ldrb r3,[r3] )
1094 eor r0,r0,r7,lsl#16 1070 eor r0,r0,r7,lsl#16
1095 ldr r7,[r11,#0] 1071 ldr r7,[r11,#0]
1096 eor r1,r1,r8,lsl#8 1072 eor r1,r1,r8,lsl#8
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S
index 7050ab133b9d..92c6eed7aac9 100644
--- a/arch/arm/crypto/sha1-armv4-large.S
+++ b/arch/arm/crypto/sha1-armv4-large.S
@@ -51,13 +51,12 @@
51@ Profiler-assisted and platform-specific optimization resulted in 10% 51@ Profiler-assisted and platform-specific optimization resulted in 10%
52@ improvement on Cortex A8 core and 12.2 cycles per byte. 52@ improvement on Cortex A8 core and 12.2 cycles per byte.
53 53
54.text 54#include <linux/linkage.h>
55 55
56.global sha1_block_data_order 56.text
57.type sha1_block_data_order,%function
58 57
59.align 2 58.align 2
60sha1_block_data_order: 59ENTRY(sha1_block_data_order)
61 stmdb sp!,{r4-r12,lr} 60 stmdb sp!,{r4-r12,lr}
62 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 61 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
63 ldmia r0,{r3,r4,r5,r6,r7} 62 ldmia r0,{r3,r4,r5,r6,r7}
@@ -194,7 +193,7 @@ sha1_block_data_order:
194 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) 193 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
195 str r9,[r14,#-4]! 194 str r9,[r14,#-4]!
196 add r3,r3,r10 @ E+=F_00_19(B,C,D) 195 add r3,r3,r10 @ E+=F_00_19(B,C,D)
197 teq r14,sp 196 cmp r14,sp
198 bne .L_00_15 @ [((11+4)*5+2)*3] 197 bne .L_00_15 @ [((11+4)*5+2)*3]
199#if __ARM_ARCH__<7 198#if __ARM_ARCH__<7
200 ldrb r10,[r1,#2] 199 ldrb r10,[r1,#2]
@@ -374,7 +373,9 @@ sha1_block_data_order:
374 @ F_xx_xx 373 @ F_xx_xx
375 add r3,r3,r9 @ E+=X[i] 374 add r3,r3,r9 @ E+=X[i]
376 add r3,r3,r10 @ E+=F_20_39(B,C,D) 375 add r3,r3,r10 @ E+=F_20_39(B,C,D)
377 teq r14,sp @ preserve carry 376 ARM( teq r14,sp ) @ preserve carry
377 THUMB( mov r11,sp )
378 THUMB( teq r14,r11 ) @ preserve carry
378 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] 379 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
379 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes 380 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
380 381
@@ -466,7 +467,7 @@ sha1_block_data_order:
466 add r3,r3,r9 @ E+=X[i] 467 add r3,r3,r9 @ E+=X[i]
467 add r3,r3,r10 @ E+=F_40_59(B,C,D) 468 add r3,r3,r10 @ E+=F_40_59(B,C,D)
468 add r3,r3,r11,ror#2 469 add r3,r3,r11,ror#2
469 teq r14,sp 470 cmp r14,sp
470 bne .L_40_59 @ [+((12+5)*5+2)*4] 471 bne .L_40_59 @ [+((12+5)*5+2)*4]
471 472
472 ldr r8,.LK_60_79 473 ldr r8,.LK_60_79
@@ -485,19 +486,12 @@ sha1_block_data_order:
485 teq r1,r2 486 teq r1,r2
486 bne .Lloop @ [+18], total 1307 487 bne .Lloop @ [+18], total 1307
487 488
488#if __ARM_ARCH__>=5
489 ldmia sp!,{r4-r12,pc} 489 ldmia sp!,{r4-r12,pc}
490#else
491 ldmia sp!,{r4-r12,lr}
492 tst lr,#1
493 moveq pc,lr @ be binary compatible with V4, yet
494 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
495#endif
496.align 2 490.align 2
497.LK_00_19: .word 0x5a827999 491.LK_00_19: .word 0x5a827999
498.LK_20_39: .word 0x6ed9eba1 492.LK_20_39: .word 0x6ed9eba1
499.LK_40_59: .word 0x8f1bbcdc 493.LK_40_59: .word 0x8f1bbcdc
500.LK_60_79: .word 0xca62c1d6 494.LK_60_79: .word 0xca62c1d6
501.size sha1_block_data_order,.-sha1_block_data_order 495ENDPROC(sha1_block_data_order)
502.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" 496.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
503.align 2 497.align 2
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index db9fedb57f2c..5cf2e979b4be 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -23,6 +23,7 @@ struct hw_pci {
23#endif 23#endif
24 struct pci_ops *ops; 24 struct pci_ops *ops;
25 int nr_controllers; 25 int nr_controllers;
26 void **private_data;
26 int (*setup)(int nr, struct pci_sys_data *); 27 int (*setup)(int nr, struct pci_sys_data *);
27 struct pci_bus *(*scan)(int nr, struct pci_sys_data *); 28 struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
28 void (*preinit)(void); 29 void (*preinit)(void);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 1c4df27f9332..64c770d24198 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -36,23 +36,23 @@
36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
37 */ 37 */
38#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 38#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
39#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) 39#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
40#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 40#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
41 41
42/* 42/*
43 * The maximum size of a 26-bit user space task. 43 * The maximum size of a 26-bit user space task.
44 */ 44 */
45#define TASK_SIZE_26 UL(0x04000000) 45#define TASK_SIZE_26 (UL(1) << 26)
46 46
47/* 47/*
48 * The module space lives between the addresses given by TASK_SIZE 48 * The module space lives between the addresses given by TASK_SIZE
49 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 49 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
50 */ 50 */
51#ifndef CONFIG_THUMB2_KERNEL 51#ifndef CONFIG_THUMB2_KERNEL
52#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) 52#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
53#else 53#else
54/* smaller range for Thumb-2 symbols relocation (2^24)*/ 54/* smaller range for Thumb-2 symbols relocation (2^24)*/
55#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) 55#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
56#endif 56#endif
57 57
58#if TASK_SIZE > MODULES_VADDR 58#if TASK_SIZE > MODULES_VADDR
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352a..12f71a190422 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
92static inline void outer_flush_all(void) { } 92static inline void outer_flush_all(void) { }
93static inline void outer_inv_all(void) { } 93static inline void outer_inv_all(void) { }
94static inline void outer_disable(void) { } 94static inline void outer_disable(void) { }
95static inline void outer_resume(void) { }
95 96
96#endif 97#endif
97 98
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b4ca707d0a69..6220e9fdf4c7 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
119 119
120static inline void arch_spin_unlock(arch_spinlock_t *lock) 120static inline void arch_spin_unlock(arch_spinlock_t *lock)
121{ 121{
122 unsigned long tmp;
123 u32 slock;
124
125 smp_mb(); 122 smp_mb();
126 123 lock->tickets.owner++;
127 __asm__ __volatile__(
128" mov %1, #1\n"
129"1: ldrex %0, [%2]\n"
130" uadd16 %0, %0, %1\n"
131" strex %1, %0, [%2]\n"
132" teq %1, #0\n"
133" bne 1b"
134 : "=&r" (slock), "=&r" (tmp)
135 : "r" (&lock->slock)
136 : "cc");
137
138 dsb_sev(); 124 dsb_sev();
139} 125}
140 126
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 379cf3292390..a1f73b502ef0 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
413 return irq; 413 return irq;
414} 414}
415 415
416static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) 416static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
417{ 417{
418 int ret; 418 int ret;
419 struct pci_host_bridge_window *window; 419 struct pci_host_bridge_window *window;
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
445 return 0; 445 return 0;
446} 446}
447 447
448static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) 448static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
449{ 449{
450 struct pci_sys_data *sys = NULL; 450 struct pci_sys_data *sys = NULL;
451 int ret; 451 int ret;
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
464 sys->map_irq = hw->map_irq; 464 sys->map_irq = hw->map_irq;
465 INIT_LIST_HEAD(&sys->resources); 465 INIT_LIST_HEAD(&sys->resources);
466 466
467 if (hw->private_data)
468 sys->private_data = hw->private_data[nr];
469
467 ret = hw->setup(nr, sys); 470 ret = hw->setup(nr, sys);
468 471
469 if (ret > 0) { 472 if (ret > 0) {
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
493 } 496 }
494} 497}
495 498
496void __init pci_common_init(struct hw_pci *hw) 499void pci_common_init(struct hw_pci *hw)
497{ 500{
498 struct pci_sys_data *sys; 501 struct pci_sys_data *sys;
499 LIST_HEAD(head); 502 LIST_HEAD(head);
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index fc6692e2b603..bd6f56b9ec21 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
93 * detectable in cyc_to_fixed_sched_clock(). 93 * detectable in cyc_to_fixed_sched_clock().
94 */ 94 */
95 raw_local_irq_save(flags); 95 raw_local_irq_save(flags);
96 cd.epoch_cyc = cyc; 96 cd.epoch_cyc_copy = cyc;
97 smp_wmb(); 97 smp_wmb();
98 cd.epoch_ns = ns; 98 cd.epoch_ns = ns;
99 smp_wmb(); 99 smp_wmb();
100 cd.epoch_cyc_copy = cyc; 100 cd.epoch_cyc = cyc;
101 raw_local_irq_restore(flags); 101 raw_local_irq_restore(flags);
102} 102}
103 103
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 84f4cbf652e5..365c8d92e2eb 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
125 smp_ops.smp_init_cpus(); 125 smp_ops.smp_init_cpus();
126} 126}
127 127
128static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
129{
130 if (smp_ops.smp_prepare_cpus)
131 smp_ops.smp_prepare_cpus(max_cpus);
132}
133
134static void __cpuinit platform_secondary_init(unsigned int cpu)
135{
136 if (smp_ops.smp_secondary_init)
137 smp_ops.smp_secondary_init(cpu);
138}
139
140int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 128int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
141{ 129{
142 if (smp_ops.smp_boot_secondary) 130 if (smp_ops.smp_boot_secondary)
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
154 return 1; 142 return 1;
155} 143}
156 144
157static void platform_cpu_die(unsigned int cpu)
158{
159 if (smp_ops.cpu_die)
160 smp_ops.cpu_die(cpu);
161}
162
163static int platform_cpu_disable(unsigned int cpu) 145static int platform_cpu_disable(unsigned int cpu)
164{ 146{
165 if (smp_ops.cpu_disable) 147 if (smp_ops.cpu_disable)
@@ -257,7 +239,8 @@ void __ref cpu_die(void)
257 * actual CPU shutdown procedure is at least platform (if not 239 * actual CPU shutdown procedure is at least platform (if not
258 * CPU) specific. 240 * CPU) specific.
259 */ 241 */
260 platform_cpu_die(cpu); 242 if (smp_ops.cpu_die)
243 smp_ops.cpu_die(cpu);
261 244
262 /* 245 /*
263 * Do not return to the idle loop - jump back to the secondary 246 * Do not return to the idle loop - jump back to the secondary
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
324 /* 307 /*
325 * Give the platform a chance to do its own initialisation. 308 * Give the platform a chance to do its own initialisation.
326 */ 309 */
327 platform_secondary_init(cpu); 310 if (smp_ops.smp_secondary_init)
311 smp_ops.smp_secondary_init(cpu);
328 312
329 notify_cpu_starting(cpu); 313 notify_cpu_starting(cpu);
330 314
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
399 /* 383 /*
400 * Initialise the present map, which describes the set of CPUs 384 * Initialise the present map, which describes the set of CPUs
401 * actually populated at the present time. A platform should 385 * actually populated at the present time. A platform should
402 * re-initialize the map in platform_smp_prepare_cpus() if 386 * re-initialize the map in the platforms smp_prepare_cpus()
403 * present != possible (e.g. physical hotplug). 387 * if present != possible (e.g. physical hotplug).
404 */ 388 */
405 init_cpu_present(cpu_possible_mask); 389 init_cpu_present(cpu_possible_mask);
406 390
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
408 * Initialise the SCU if there are more than one CPU 392 * Initialise the SCU if there are more than one CPU
409 * and let them know where to start. 393 * and let them know where to start.
410 */ 394 */
411 platform_smp_prepare_cpus(max_cpus); 395 if (smp_ops.smp_prepare_cpus)
396 smp_ops.smp_prepare_cpus(max_cpus);
412 } 397 }
413} 398}
414 399
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 5d5929450366..a78827b70270 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -36,6 +36,7 @@
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/clkdev.h> 37#include <linux/clkdev.h>
38#include <linux/mtd/physmap.h> 38#include <linux/mtd/physmap.h>
39#include <linux/bitops.h>
39 40
40#include <asm/irq.h> 41#include <asm/irq.h>
41#include <asm/hardware/arm_timer.h> 42#include <asm/hardware/arm_timer.h>
@@ -65,16 +66,28 @@
65#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) 66#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
66#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) 67#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
67 68
69/* These PIC IRQs are valid in each configuration */
70#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \
71 BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \
72 BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \
73 BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \
74 BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \
75 BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \
76 BIT(SIC_INT_PCI3)
68#if 1 77#if 1
69#define IRQ_MMCI0A IRQ_VICSOURCE22 78#define IRQ_MMCI0A IRQ_VICSOURCE22
70#define IRQ_AACI IRQ_VICSOURCE24 79#define IRQ_AACI IRQ_VICSOURCE24
71#define IRQ_ETH IRQ_VICSOURCE25 80#define IRQ_ETH IRQ_VICSOURCE25
72#define PIC_MASK 0xFFD00000 81#define PIC_MASK 0xFFD00000
82#define PIC_VALID PIC_VALID_ALL
73#else 83#else
74#define IRQ_MMCI0A IRQ_SIC_MMCI0A 84#define IRQ_MMCI0A IRQ_SIC_MMCI0A
75#define IRQ_AACI IRQ_SIC_AACI 85#define IRQ_AACI IRQ_SIC_AACI
76#define IRQ_ETH IRQ_SIC_ETH 86#define IRQ_ETH IRQ_SIC_ETH
77#define PIC_MASK 0 87#define PIC_MASK 0
88#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \
89 BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \
90 BIT(SIC_INT_ETH)
78#endif 91#endif
79 92
80/* Lookup table for finding a DT node that represents the vic instance */ 93/* Lookup table for finding a DT node that represents the vic instance */
@@ -102,7 +115,7 @@ void __init versatile_init_irq(void)
102 VERSATILE_SIC_BASE); 115 VERSATILE_SIC_BASE);
103 116
104 fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, 117 fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START,
105 IRQ_VICSOURCE31, ~PIC_MASK, np); 118 IRQ_VICSOURCE31, PIC_VALID, np);
106 119
107 /* 120 /*
108 * Interrupts on secondary controller from 0 to 8 are routed to 121 * Interrupts on secondary controller from 0 to 8 are routed to
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index 2f84f4094f13..e92e5e0705bc 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <mach/irqs.h>
26#include <asm/irq.h> 27#include <asm/irq.h>
27#include <asm/mach/pci.h> 28#include <asm/mach/pci.h>
28 29
@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
327 int irq; 328 int irq;
328 329
329 /* slot, pin, irq 330 /* slot, pin, irq
330 * 24 1 27 331 * 24 1 IRQ_SIC_PCI0
331 * 25 1 28 332 * 25 1 IRQ_SIC_PCI1
332 * 26 1 29 333 * 26 1 IRQ_SIC_PCI2
333 * 27 1 30 334 * 27 1 IRQ_SIC_PCI3
334 */ 335 */
335 irq = 27 + ((slot - 24 + pin - 1) & 3); 336 irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
336 337
337 return irq; 338 return irq;
338} 339}
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 8a9c4cb50a93..4e333fa2756f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
9 mmap.o pgd.o mmu.o vmregion.o 9 mmap.o pgd.o mmu.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index bc4a5e9ebb78..7a0511191f6b 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -34,6 +34,9 @@
34 * The ASID is used to tag entries in the CPU caches and TLBs. 34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and 35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes. 36 * should be unique within all running processes.
37 *
38 * In big endian operation, the two 32 bit words are swapped if accesed by
39 * non 64-bit operations.
37 */ 40 */
38#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 41#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
39#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) 42#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 88fd86cf3d9a..04d9006eab1f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -39,6 +39,70 @@
39#include <asm/mach/pci.h> 39#include <asm/mach/pci.h>
40#include "mm.h" 40#include "mm.h"
41 41
42
43LIST_HEAD(static_vmlist);
44
45static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
46 size_t size, unsigned int mtype)
47{
48 struct static_vm *svm;
49 struct vm_struct *vm;
50
51 list_for_each_entry(svm, &static_vmlist, list) {
52 vm = &svm->vm;
53 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
54 continue;
55 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
56 continue;
57
58 if (vm->phys_addr > paddr ||
59 paddr + size - 1 > vm->phys_addr + vm->size - 1)
60 continue;
61
62 return svm;
63 }
64
65 return NULL;
66}
67
68struct static_vm *find_static_vm_vaddr(void *vaddr)
69{
70 struct static_vm *svm;
71 struct vm_struct *vm;
72
73 list_for_each_entry(svm, &static_vmlist, list) {
74 vm = &svm->vm;
75
76 /* static_vmlist is ascending order */
77 if (vm->addr > vaddr)
78 break;
79
80 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
81 return svm;
82 }
83
84 return NULL;
85}
86
87void __init add_static_vm_early(struct static_vm *svm)
88{
89 struct static_vm *curr_svm;
90 struct vm_struct *vm;
91 void *vaddr;
92
93 vm = &svm->vm;
94 vm_area_add_early(vm);
95 vaddr = vm->addr;
96
97 list_for_each_entry(curr_svm, &static_vmlist, list) {
98 vm = &curr_svm->vm;
99
100 if (vm->addr > vaddr)
101 break;
102 }
103 list_add_tail(&svm->list, &curr_svm->list);
104}
105
42int ioremap_page(unsigned long virt, unsigned long phys, 106int ioremap_page(unsigned long virt, unsigned long phys,
43 const struct mem_type *mtype) 107 const struct mem_type *mtype)
44{ 108{
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
197 const struct mem_type *type; 261 const struct mem_type *type;
198 int err; 262 int err;
199 unsigned long addr; 263 unsigned long addr;
200 struct vm_struct * area; 264 struct vm_struct *area;
265 phys_addr_t paddr = __pfn_to_phys(pfn);
201 266
202#ifndef CONFIG_ARM_LPAE 267#ifndef CONFIG_ARM_LPAE
203 /* 268 /*
204 * High mappings must be supersection aligned 269 * High mappings must be supersection aligned
205 */ 270 */
206 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 271 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
207 return NULL; 272 return NULL;
208#endif 273#endif
209 274
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
219 /* 284 /*
220 * Try to reuse one of the static mapping whenever possible. 285 * Try to reuse one of the static mapping whenever possible.
221 */ 286 */
222 read_lock(&vmlist_lock); 287 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
223 for (area = vmlist; area; area = area->next) { 288 struct static_vm *svm;
224 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) 289
225 break; 290 svm = find_static_vm_paddr(paddr, size, mtype);
226 if (!(area->flags & VM_ARM_STATIC_MAPPING)) 291 if (svm) {
227 continue; 292 addr = (unsigned long)svm->vm.addr;
228 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 293 addr += paddr - svm->vm.phys_addr;
229 continue; 294 return (void __iomem *) (offset + addr);
230 if (__phys_to_pfn(area->phys_addr) > pfn || 295 }
231 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
232 continue;
233 /* we can drop the lock here as we know *area is static */
234 read_unlock(&vmlist_lock);
235 addr = (unsigned long)area->addr;
236 addr += __pfn_to_phys(pfn) - area->phys_addr;
237 return (void __iomem *) (offset + addr);
238 } 296 }
239 read_unlock(&vmlist_lock);
240 297
241 /* 298 /*
242 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 299 * Don't allow RAM to be mapped - this causes problems with ARMv6+
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
248 if (!area) 305 if (!area)
249 return NULL; 306 return NULL;
250 addr = (unsigned long)area->addr; 307 addr = (unsigned long)area->addr;
251 area->phys_addr = __pfn_to_phys(pfn); 308 area->phys_addr = paddr;
252 309
253#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 310#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
254 if (DOMAIN_IO == 0 && 311 if (DOMAIN_IO == 0 &&
255 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 312 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
256 cpu_is_xsc3()) && pfn >= 0x100000 && 313 cpu_is_xsc3()) && pfn >= 0x100000 &&
257 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 314 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
258 area->flags |= VM_ARM_SECTION_MAPPING; 315 area->flags |= VM_ARM_SECTION_MAPPING;
259 err = remap_area_supersections(addr, pfn, size, type); 316 err = remap_area_supersections(addr, pfn, size, type);
260 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 317 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
261 area->flags |= VM_ARM_SECTION_MAPPING; 318 area->flags |= VM_ARM_SECTION_MAPPING;
262 err = remap_area_sections(addr, pfn, size, type); 319 err = remap_area_sections(addr, pfn, size, type);
263 } else 320 } else
264#endif 321#endif
265 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), 322 err = ioremap_page_range(addr, addr + size, paddr,
266 __pgprot(type->prot_pte)); 323 __pgprot(type->prot_pte));
267 324
268 if (err) { 325 if (err) {
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
346void __iounmap(volatile void __iomem *io_addr) 403void __iounmap(volatile void __iomem *io_addr)
347{ 404{
348 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 405 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
349 struct vm_struct *vm; 406 struct static_vm *svm;
407
408 /* If this is a static mapping, we must leave it alone */
409 svm = find_static_vm_vaddr(addr);
410 if (svm)
411 return;
350 412
351 read_lock(&vmlist_lock);
352 for (vm = vmlist; vm; vm = vm->next) {
353 if (vm->addr > addr)
354 break;
355 if (!(vm->flags & VM_IOREMAP))
356 continue;
357 /* If this is a static mapping we must leave it alone */
358 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
359 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
360 read_unlock(&vmlist_lock);
361 return;
362 }
363#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 413#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
414 {
415 struct vm_struct *vm;
416
417 vm = find_vm_area(addr);
418
364 /* 419 /*
365 * If this is a section based mapping we need to handle it 420 * If this is a section based mapping we need to handle it
366 * specially as the VM subsystem does not know how to handle 421 * specially as the VM subsystem does not know how to handle
367 * such a beast. 422 * such a beast.
368 */ 423 */
369 if ((vm->addr == addr) && 424 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
370 (vm->flags & VM_ARM_SECTION_MAPPING)) {
371 unmap_area_sections((unsigned long)vm->addr, vm->size); 425 unmap_area_sections((unsigned long)vm->addr, vm->size);
372 break;
373 }
374#endif
375 } 426 }
376 read_unlock(&vmlist_lock); 427#endif
377 428
378 vunmap(addr); 429 vunmap(addr);
379} 430}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a8ee92da3544..d5a4e9ad8f0f 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -1,4 +1,6 @@
1#ifdef CONFIG_MMU 1#ifdef CONFIG_MMU
2#include <linux/list.h>
3#include <linux/vmalloc.h>
2 4
3/* the upper-most page table pointer */ 5/* the upper-most page table pointer */
4extern pmd_t *top_pmd; 6extern pmd_t *top_pmd;
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
65/* consistent regions used by dma_alloc_attrs() */ 67/* consistent regions used by dma_alloc_attrs() */
66#define VM_ARM_DMA_CONSISTENT 0x20000000 68#define VM_ARM_DMA_CONSISTENT 0x20000000
67 69
70
71struct static_vm {
72 struct vm_struct vm;
73 struct list_head list;
74};
75
76extern struct list_head static_vmlist;
77extern struct static_vm *find_static_vm_vaddr(void *vaddr);
78extern __init void add_static_vm_early(struct static_vm *svm);
79
68#endif 80#endif
69 81
70#ifdef CONFIG_ZONE_DMA 82#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ce328c7f5c94..69bb735474fe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
757{ 757{
758 struct map_desc *md; 758 struct map_desc *md;
759 struct vm_struct *vm; 759 struct vm_struct *vm;
760 struct static_vm *svm;
760 761
761 if (!nr) 762 if (!nr)
762 return; 763 return;
763 764
764 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); 765 svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
765 766
766 for (md = io_desc; nr; md++, nr--) { 767 for (md = io_desc; nr; md++, nr--) {
767 create_mapping(md); 768 create_mapping(md);
769
770 vm = &svm->vm;
768 vm->addr = (void *)(md->virtual & PAGE_MASK); 771 vm->addr = (void *)(md->virtual & PAGE_MASK);
769 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 772 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
770 vm->phys_addr = __pfn_to_phys(md->pfn); 773 vm->phys_addr = __pfn_to_phys(md->pfn);
771 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 774 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
772 vm->flags |= VM_ARM_MTYPE(md->type); 775 vm->flags |= VM_ARM_MTYPE(md->type);
773 vm->caller = iotable_init; 776 vm->caller = iotable_init;
774 vm_area_add_early(vm++); 777 add_static_vm_early(svm++);
775 } 778 }
776} 779}
777 780
@@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
779 void *caller) 782 void *caller)
780{ 783{
781 struct vm_struct *vm; 784 struct vm_struct *vm;
785 struct static_vm *svm;
786
787 svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
782 788
783 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); 789 vm = &svm->vm;
784 vm->addr = (void *)addr; 790 vm->addr = (void *)addr;
785 vm->size = size; 791 vm->size = size;
786 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; 792 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
787 vm->caller = caller; 793 vm->caller = caller;
788 vm_area_add_early(vm); 794 add_static_vm_early(svm);
789} 795}
790 796
791#ifndef CONFIG_ARM_LPAE 797#ifndef CONFIG_ARM_LPAE
@@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
810 816
811static void __init fill_pmd_gaps(void) 817static void __init fill_pmd_gaps(void)
812{ 818{
819 struct static_vm *svm;
813 struct vm_struct *vm; 820 struct vm_struct *vm;
814 unsigned long addr, next = 0; 821 unsigned long addr, next = 0;
815 pmd_t *pmd; 822 pmd_t *pmd;
816 823
817 /* we're still single threaded hence no lock needed here */ 824 list_for_each_entry(svm, &static_vmlist, list) {
818 for (vm = vmlist; vm; vm = vm->next) { 825 vm = &svm->vm;
819 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
820 continue;
821 addr = (unsigned long)vm->addr; 826 addr = (unsigned long)vm->addr;
822 if (addr < next) 827 if (addr < next)
823 continue; 828 continue;
@@ -857,19 +862,12 @@ static void __init fill_pmd_gaps(void)
857#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) 862#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
858static void __init pci_reserve_io(void) 863static void __init pci_reserve_io(void)
859{ 864{
860 struct vm_struct *vm; 865 struct static_vm *svm;
861 unsigned long addr;
862 866
863 /* we're still single threaded hence no lock needed here */ 867 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
864 for (vm = vmlist; vm; vm = vm->next) { 868 if (svm)
865 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) 869 return;
866 continue;
867 addr = (unsigned long)vm->addr;
868 addr &= ~(SZ_2M - 1);
869 if (addr == PCI_IO_VIRT_BASE)
870 return;
871 870
872 }
873 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); 871 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
874} 872}
875#else 873#else
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index eb6aa73bc8b7..f9a0aa725ea9 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -38,9 +38,14 @@
38 38
39/* 39/*
40 * mmid - get context id from mm pointer (mm->context.id) 40 * mmid - get context id from mm pointer (mm->context.id)
41 * note, this field is 64bit, so in big-endian the two words are swapped too.
41 */ 42 */
42 .macro mmid, rd, rn 43 .macro mmid, rd, rn
44#ifdef __ARMEB__
45 ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
46#else
43 ldr \rd, [\rn, #MM_CONTEXT_ID] 47 ldr \rd, [\rn, #MM_CONTEXT_ID]
48#endif
44 .endm 49 .endm
45 50
46/* 51/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 09c5233f4dfc..bcaaa8de9325 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
101ENTRY(cpu_v6_switch_mm) 101ENTRY(cpu_v6_switch_mm)
102#ifdef CONFIG_MMU 102#ifdef CONFIG_MMU
103 mov r2, #0 103 mov r2, #0
104 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 104 mmid r1, r1 @ get mm->context.id
105 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 105 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
106 ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 106 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
107 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 107 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 6d98c13ab827..78f520bc0e99 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -40,7 +40,7 @@
40ENTRY(cpu_v7_switch_mm) 40ENTRY(cpu_v7_switch_mm)
41#ifdef CONFIG_MMU 41#ifdef CONFIG_MMU
42 mov r2, #0 42 mov r2, #0
43 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 43 mmid r1, r1 @ get mm->context.id
44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
46#ifdef CONFIG_ARM_ERRATA_430973 46#ifdef CONFIG_ARM_ERRATA_430973
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 7b56386f9496..50bf1dafc9ea 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -47,7 +47,7 @@
47 */ 47 */
48ENTRY(cpu_v7_switch_mm) 48ENTRY(cpu_v7_switch_mm)
49#ifdef CONFIG_MMU 49#ifdef CONFIG_MMU
50 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 50 mmid r1, r1 @ get mm->context.id
51 and r3, r1, #0xff 51 and r3, r1, #0xff
52 mov r3, r3, lsl #(48 - 32) @ ASID 52 mov r3, r3, lsl #(48 - 32) @ ASID
53 mcrr p15, 0, r0, r3, c2 @ set TTB 0 53 mcrr p15, 0, r0, r3, c2 @ set TTB 0
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
deleted file mode 100644
index a631016e1f8f..000000000000
--- a/arch/arm/mm/vmregion.c
+++ /dev/null
@@ -1,205 +0,0 @@
1#include <linux/fs.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <linux/proc_fs.h>
5#include <linux/seq_file.h>
6#include <linux/slab.h>
7
8#include "vmregion.h"
9
10/*
11 * VM region handling support.
12 *
13 * This should become something generic, handling VM region allocations for
14 * vmalloc and similar (ioremap, module space, etc).
15 *
16 * I envisage vmalloc()'s supporting vm_struct becoming:
17 *
18 * struct vm_struct {
19 * struct vmregion region;
20 * unsigned long flags;
21 * struct page **pages;
22 * unsigned int nr_pages;
23 * unsigned long phys_addr;
24 * };
25 *
26 * get_vm_area() would then call vmregion_alloc with an appropriate
27 * struct vmregion head (eg):
28 *
29 * struct vmregion vmalloc_head = {
30 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
31 * .vm_start = VMALLOC_START,
32 * .vm_end = VMALLOC_END,
33 * };
34 *
35 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
36 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
37 * would have to initialise this each time prior to calling vmregion_alloc().
38 */
39
40struct arm_vmregion *
41arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
42 size_t size, gfp_t gfp, const void *caller)
43{
44 unsigned long start = head->vm_start, addr = head->vm_end;
45 unsigned long flags;
46 struct arm_vmregion *c, *new;
47
48 if (head->vm_end - head->vm_start < size) {
49 printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
50 __func__, size);
51 goto out;
52 }
53
54 new = kmalloc(sizeof(struct arm_vmregion), gfp);
55 if (!new)
56 goto out;
57
58 new->caller = caller;
59
60 spin_lock_irqsave(&head->vm_lock, flags);
61
62 addr = rounddown(addr - size, align);
63 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
64 if (addr >= c->vm_end)
65 goto found;
66 addr = rounddown(c->vm_start - size, align);
67 if (addr < start)
68 goto nospc;
69 }
70
71 found:
72 /*
73 * Insert this entry after the one we found.
74 */
75 list_add(&new->vm_list, &c->vm_list);
76 new->vm_start = addr;
77 new->vm_end = addr + size;
78 new->vm_active = 1;
79
80 spin_unlock_irqrestore(&head->vm_lock, flags);
81 return new;
82
83 nospc:
84 spin_unlock_irqrestore(&head->vm_lock, flags);
85 kfree(new);
86 out:
87 return NULL;
88}
89
90static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
91{
92 struct arm_vmregion *c;
93
94 list_for_each_entry(c, &head->vm_list, vm_list) {
95 if (c->vm_active && c->vm_start == addr)
96 goto out;
97 }
98 c = NULL;
99 out:
100 return c;
101}
102
103struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
104{
105 struct arm_vmregion *c;
106 unsigned long flags;
107
108 spin_lock_irqsave(&head->vm_lock, flags);
109 c = __arm_vmregion_find(head, addr);
110 spin_unlock_irqrestore(&head->vm_lock, flags);
111 return c;
112}
113
114struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
115{
116 struct arm_vmregion *c;
117 unsigned long flags;
118
119 spin_lock_irqsave(&head->vm_lock, flags);
120 c = __arm_vmregion_find(head, addr);
121 if (c)
122 c->vm_active = 0;
123 spin_unlock_irqrestore(&head->vm_lock, flags);
124 return c;
125}
126
127void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
128{
129 unsigned long flags;
130
131 spin_lock_irqsave(&head->vm_lock, flags);
132 list_del(&c->vm_list);
133 spin_unlock_irqrestore(&head->vm_lock, flags);
134
135 kfree(c);
136}
137
138#ifdef CONFIG_PROC_FS
139static int arm_vmregion_show(struct seq_file *m, void *p)
140{
141 struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
142
143 seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
144 c->vm_end - c->vm_start);
145 if (c->caller)
146 seq_printf(m, " %pS", (void *)c->caller);
147 seq_putc(m, '\n');
148 return 0;
149}
150
151static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
152{
153 struct arm_vmregion_head *h = m->private;
154 spin_lock_irq(&h->vm_lock);
155 return seq_list_start(&h->vm_list, *pos);
156}
157
158static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
159{
160 struct arm_vmregion_head *h = m->private;
161 return seq_list_next(p, &h->vm_list, pos);
162}
163
164static void arm_vmregion_stop(struct seq_file *m, void *p)
165{
166 struct arm_vmregion_head *h = m->private;
167 spin_unlock_irq(&h->vm_lock);
168}
169
170static const struct seq_operations arm_vmregion_ops = {
171 .start = arm_vmregion_start,
172 .stop = arm_vmregion_stop,
173 .next = arm_vmregion_next,
174 .show = arm_vmregion_show,
175};
176
177static int arm_vmregion_open(struct inode *inode, struct file *file)
178{
179 struct arm_vmregion_head *h = PDE(inode)->data;
180 int ret = seq_open(file, &arm_vmregion_ops);
181 if (!ret) {
182 struct seq_file *m = file->private_data;
183 m->private = h;
184 }
185 return ret;
186}
187
188static const struct file_operations arm_vmregion_fops = {
189 .open = arm_vmregion_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = seq_release,
193};
194
195int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
196{
197 proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
198 return 0;
199}
200#else
201int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
202{
203 return 0;
204}
205#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
deleted file mode 100644
index 0f5a5f2a2c7b..000000000000
--- a/arch/arm/mm/vmregion.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef VMREGION_H
2#define VMREGION_H
3
4#include <linux/spinlock.h>
5#include <linux/list.h>
6
7struct page;
8
9struct arm_vmregion_head {
10 spinlock_t vm_lock;
11 struct list_head vm_list;
12 unsigned long vm_start;
13 unsigned long vm_end;
14};
15
16struct arm_vmregion {
17 struct list_head vm_list;
18 unsigned long vm_start;
19 unsigned long vm_end;
20 int vm_active;
21 const void *caller;
22};
23
24struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
25struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
26struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
27void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
28
29int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
30
31#endif
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 150772395cc6..372e921389c8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -20,6 +20,7 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/log2.h> 22#include <linux/log2.h>
23#include <linux/mmc/pm.h>
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24#include <linux/mmc/card.h> 25#include <linux/mmc/card.h>
25#include <linux/amba/bus.h> 26#include <linux/amba/bus.h>
@@ -59,6 +60,7 @@ static unsigned int fmax = 515633;
59 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
60 * @pwrreg_powerup: power up value for MMCIPOWER register 61 * @pwrreg_powerup: power up value for MMCIPOWER register
61 * @signal_direction: input/out direction of bus signals can be indicated 62 * @signal_direction: input/out direction of bus signals can be indicated
63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
62 */ 64 */
63struct variant_data { 65struct variant_data {
64 unsigned int clkreg; 66 unsigned int clkreg;
@@ -71,6 +73,7 @@ struct variant_data {
71 bool blksz_datactrl16; 73 bool blksz_datactrl16;
72 u32 pwrreg_powerup; 74 u32 pwrreg_powerup;
73 bool signal_direction; 75 bool signal_direction;
76 bool pwrreg_clkgate;
74}; 77};
75 78
76static struct variant_data variant_arm = { 79static struct variant_data variant_arm = {
@@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = {
87 .pwrreg_powerup = MCI_PWR_UP, 90 .pwrreg_powerup = MCI_PWR_UP,
88}; 91};
89 92
93static struct variant_data variant_arm_extended_fifo_hwfc = {
94 .fifosize = 128 * 4,
95 .fifohalfsize = 64 * 4,
96 .clkreg_enable = MCI_ARM_HWFCEN,
97 .datalength_bits = 16,
98 .pwrreg_powerup = MCI_PWR_UP,
99};
100
90static struct variant_data variant_u300 = { 101static struct variant_data variant_u300 = {
91 .fifosize = 16 * 4, 102 .fifosize = 16 * 4,
92 .fifohalfsize = 8 * 4, 103 .fifohalfsize = 8 * 4,
@@ -95,6 +106,7 @@ static struct variant_data variant_u300 = {
95 .sdio = true, 106 .sdio = true,
96 .pwrreg_powerup = MCI_PWR_ON, 107 .pwrreg_powerup = MCI_PWR_ON,
97 .signal_direction = true, 108 .signal_direction = true,
109 .pwrreg_clkgate = true,
98}; 110};
99 111
100static struct variant_data variant_nomadik = { 112static struct variant_data variant_nomadik = {
@@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = {
106 .st_clkdiv = true, 118 .st_clkdiv = true,
107 .pwrreg_powerup = MCI_PWR_ON, 119 .pwrreg_powerup = MCI_PWR_ON,
108 .signal_direction = true, 120 .signal_direction = true,
121 .pwrreg_clkgate = true,
109}; 122};
110 123
111static struct variant_data variant_ux500 = { 124static struct variant_data variant_ux500 = {
@@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = {
118 .st_clkdiv = true, 131 .st_clkdiv = true,
119 .pwrreg_powerup = MCI_PWR_ON, 132 .pwrreg_powerup = MCI_PWR_ON,
120 .signal_direction = true, 133 .signal_direction = true,
134 .pwrreg_clkgate = true,
121}; 135};
122 136
123static struct variant_data variant_ux500v2 = { 137static struct variant_data variant_ux500v2 = {
@@ -131,9 +145,28 @@ static struct variant_data variant_ux500v2 = {
131 .blksz_datactrl16 = true, 145 .blksz_datactrl16 = true,
132 .pwrreg_powerup = MCI_PWR_ON, 146 .pwrreg_powerup = MCI_PWR_ON,
133 .signal_direction = true, 147 .signal_direction = true,
148 .pwrreg_clkgate = true,
134}; 149};
135 150
136/* 151/*
152 * Validate mmc prerequisites
153 */
154static int mmci_validate_data(struct mmci_host *host,
155 struct mmc_data *data)
156{
157 if (!data)
158 return 0;
159
160 if (!is_power_of_2(data->blksz)) {
161 dev_err(mmc_dev(host->mmc),
162 "unsupported block size (%d bytes)\n", data->blksz);
163 return -EINVAL;
164 }
165
166 return 0;
167}
168
169/*
137 * This must be called with host->lock held 170 * This must be called with host->lock held
138 */ 171 */
139static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 172static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
@@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
202 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 235 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
203 clk |= MCI_ST_8BIT_BUS; 236 clk |= MCI_ST_8BIT_BUS;
204 237
238 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
239 clk |= MCI_ST_UX500_NEG_EDGE;
240
205 mmci_write_clkreg(host, clk); 241 mmci_write_clkreg(host, clk);
206} 242}
207 243
@@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
352 host->dma_rx_channel = host->dma_tx_channel = NULL; 388 host->dma_rx_channel = host->dma_tx_channel = NULL;
353} 389}
354 390
391static void mmci_dma_data_error(struct mmci_host *host)
392{
393 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
394 dmaengine_terminate_all(host->dma_current);
395 host->dma_current = NULL;
396 host->dma_desc_current = NULL;
397 host->data->host_cookie = 0;
398}
399
355static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 400static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
356{ 401{
357 struct dma_chan *chan = host->dma_current; 402 struct dma_chan *chan;
358 enum dma_data_direction dir; 403 enum dma_data_direction dir;
404
405 if (data->flags & MMC_DATA_READ) {
406 dir = DMA_FROM_DEVICE;
407 chan = host->dma_rx_channel;
408 } else {
409 dir = DMA_TO_DEVICE;
410 chan = host->dma_tx_channel;
411 }
412
413 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
414}
415
416static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
417{
359 u32 status; 418 u32 status;
360 int i; 419 int i;
361 420
@@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
374 * contiguous buffers. On TX, we'll get a FIFO underrun error. 433 * contiguous buffers. On TX, we'll get a FIFO underrun error.
375 */ 434 */
376 if (status & MCI_RXDATAAVLBLMASK) { 435 if (status & MCI_RXDATAAVLBLMASK) {
377 dmaengine_terminate_all(chan); 436 mmci_dma_data_error(host);
378 if (!data->error) 437 if (!data->error)
379 data->error = -EIO; 438 data->error = -EIO;
380 } 439 }
381 440
382 if (data->flags & MMC_DATA_WRITE) {
383 dir = DMA_TO_DEVICE;
384 } else {
385 dir = DMA_FROM_DEVICE;
386 }
387
388 if (!data->host_cookie) 441 if (!data->host_cookie)
389 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 442 mmci_dma_unmap(host, data);
390 443
391 /* 444 /*
392 * Use of DMA with scatter-gather is impossible. 445 * Use of DMA with scatter-gather is impossible.
@@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
396 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 449 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
397 mmci_dma_release(host); 450 mmci_dma_release(host);
398 } 451 }
399}
400 452
401static void mmci_dma_data_error(struct mmci_host *host) 453 host->dma_current = NULL;
402{ 454 host->dma_desc_current = NULL;
403 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
404 dmaengine_terminate_all(host->dma_current);
405} 455}
406 456
407static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 457/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
408 struct mmci_host_next *next) 458static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
459 struct dma_chan **dma_chan,
460 struct dma_async_tx_descriptor **dma_desc)
409{ 461{
410 struct variant_data *variant = host->variant; 462 struct variant_data *variant = host->variant;
411 struct dma_slave_config conf = { 463 struct dma_slave_config conf = {
@@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
423 enum dma_data_direction buffer_dirn; 475 enum dma_data_direction buffer_dirn;
424 int nr_sg; 476 int nr_sg;
425 477
426 /* Check if next job is already prepared */
427 if (data->host_cookie && !next &&
428 host->dma_current && host->dma_desc_current)
429 return 0;
430
431 if (!next) {
432 host->dma_current = NULL;
433 host->dma_desc_current = NULL;
434 }
435
436 if (data->flags & MMC_DATA_READ) { 478 if (data->flags & MMC_DATA_READ) {
437 conf.direction = DMA_DEV_TO_MEM; 479 conf.direction = DMA_DEV_TO_MEM;
438 buffer_dirn = DMA_FROM_DEVICE; 480 buffer_dirn = DMA_FROM_DEVICE;
@@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
462 if (!desc) 504 if (!desc)
463 goto unmap_exit; 505 goto unmap_exit;
464 506
465 if (next) { 507 *dma_chan = chan;
466 next->dma_chan = chan; 508 *dma_desc = desc;
467 next->dma_desc = desc;
468 } else {
469 host->dma_current = chan;
470 host->dma_desc_current = desc;
471 }
472 509
473 return 0; 510 return 0;
474 511
475 unmap_exit: 512 unmap_exit:
476 if (!next)
477 dmaengine_terminate_all(chan);
478 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 513 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
479 return -ENOMEM; 514 return -ENOMEM;
480} 515}
481 516
517static inline int mmci_dma_prep_data(struct mmci_host *host,
518 struct mmc_data *data)
519{
520 /* Check if next job is already prepared. */
521 if (host->dma_current && host->dma_desc_current)
522 return 0;
523
524 /* No job were prepared thus do it now. */
525 return __mmci_dma_prep_data(host, data, &host->dma_current,
526 &host->dma_desc_current);
527}
528
529static inline int mmci_dma_prep_next(struct mmci_host *host,
530 struct mmc_data *data)
531{
532 struct mmci_host_next *nd = &host->next_data;
533 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
534}
535
482static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 536static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
483{ 537{
484 int ret; 538 int ret;
485 struct mmc_data *data = host->data; 539 struct mmc_data *data = host->data;
486 540
487 ret = mmci_dma_prep_data(host, host->data, NULL); 541 ret = mmci_dma_prep_data(host, host->data);
488 if (ret) 542 if (ret)
489 return ret; 543 return ret;
490 544
@@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
514{ 568{
515 struct mmci_host_next *next = &host->next_data; 569 struct mmci_host_next *next = &host->next_data;
516 570
517 if (data->host_cookie && data->host_cookie != next->cookie) { 571 WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
518 pr_warning("[%s] invalid cookie: data->host_cookie %d" 572 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
519 " host->next_data.cookie %d\n",
520 __func__, data->host_cookie, host->next_data.cookie);
521 data->host_cookie = 0;
522 }
523
524 if (!data->host_cookie)
525 return;
526 573
527 host->dma_desc_current = next->dma_desc; 574 host->dma_desc_current = next->dma_desc;
528 host->dma_current = next->dma_chan; 575 host->dma_current = next->dma_chan;
529
530 next->dma_desc = NULL; 576 next->dma_desc = NULL;
531 next->dma_chan = NULL; 577 next->dma_chan = NULL;
532} 578}
@@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
541 if (!data) 587 if (!data)
542 return; 588 return;
543 589
544 if (data->host_cookie) { 590 BUG_ON(data->host_cookie);
545 data->host_cookie = 0; 591
592 if (mmci_validate_data(host, data))
546 return; 593 return;
547 }
548 594
549 /* if config for dma */ 595 if (!mmci_dma_prep_next(host, data))
550 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 596 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
551 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
552 if (mmci_dma_prep_data(host, data, nd))
553 data->host_cookie = 0;
554 else
555 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
556 }
557} 597}
558 598
559static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 599static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
561{ 601{
562 struct mmci_host *host = mmc_priv(mmc); 602 struct mmci_host *host = mmc_priv(mmc);
563 struct mmc_data *data = mrq->data; 603 struct mmc_data *data = mrq->data;
564 struct dma_chan *chan;
565 enum dma_data_direction dir;
566 604
567 if (!data) 605 if (!data || !data->host_cookie)
568 return; 606 return;
569 607
570 if (data->flags & MMC_DATA_READ) { 608 mmci_dma_unmap(host, data);
571 dir = DMA_FROM_DEVICE;
572 chan = host->dma_rx_channel;
573 } else {
574 dir = DMA_TO_DEVICE;
575 chan = host->dma_tx_channel;
576 }
577 609
610 if (err) {
611 struct mmci_host_next *next = &host->next_data;
612 struct dma_chan *chan;
613 if (data->flags & MMC_DATA_READ)
614 chan = host->dma_rx_channel;
615 else
616 chan = host->dma_tx_channel;
617 dmaengine_terminate_all(chan);
578 618
579 /* if config for dma */ 619 next->dma_desc = NULL;
580 if (chan) { 620 next->dma_chan = NULL;
581 if (err)
582 dmaengine_terminate_all(chan);
583 if (data->host_cookie)
584 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
585 data->sg_len, dir);
586 mrq->data->host_cookie = 0;
587 } 621 }
588} 622}
589 623
@@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
604{ 638{
605} 639}
606 640
641static inline void mmci_dma_finalize(struct mmci_host *host,
642 struct mmc_data *data)
643{
644}
645
607static inline void mmci_dma_data_error(struct mmci_host *host) 646static inline void mmci_dma_data_error(struct mmci_host *host)
608{ 647{
609} 648}
@@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
680 mmci_write_clkreg(host, clk); 719 mmci_write_clkreg(host, clk);
681 } 720 }
682 721
722 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
723 datactrl |= MCI_ST_DPSM_DDRMODE;
724
683 /* 725 /*
684 * Attempt to use DMA operation mode, if this 726 * Attempt to use DMA operation mode, if this
685 * should fail, fall back to PIO mode 727 * should fail, fall back to PIO mode
@@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
751 u32 remain, success; 793 u32 remain, success;
752 794
753 /* Terminate the DMA transfer */ 795 /* Terminate the DMA transfer */
754 if (dma_inprogress(host)) 796 if (dma_inprogress(host)) {
755 mmci_dma_data_error(host); 797 mmci_dma_data_error(host);
798 mmci_dma_unmap(host, data);
799 }
756 800
757 /* 801 /*
758 * Calculate how far we are into the transfer. Note that 802 * Calculate how far we are into the transfer. Note that
@@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
791 835
792 if (status & MCI_DATAEND || data->error) { 836 if (status & MCI_DATAEND || data->error) {
793 if (dma_inprogress(host)) 837 if (dma_inprogress(host))
794 mmci_dma_unmap(host, data); 838 mmci_dma_finalize(host, data);
795 mmci_stop_data(host); 839 mmci_stop_data(host);
796 840
797 if (!data->error) 841 if (!data->error)
@@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
828 if (!cmd->data || cmd->error) { 872 if (!cmd->data || cmd->error) {
829 if (host->data) { 873 if (host->data) {
830 /* Terminate the DMA transfer */ 874 /* Terminate the DMA transfer */
831 if (dma_inprogress(host)) 875 if (dma_inprogress(host)) {
832 mmci_dma_data_error(host); 876 mmci_dma_data_error(host);
877 mmci_dma_unmap(host, host->data);
878 }
833 mmci_stop_data(host); 879 mmci_stop_data(host);
834 } 880 }
835 mmci_request_end(host, cmd->mrq); 881 mmci_request_end(host, cmd->mrq);
@@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1055 1101
1056 WARN_ON(host->mrq != NULL); 1102 WARN_ON(host->mrq != NULL);
1057 1103
1058 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1104 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1059 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1105 if (mrq->cmd->error) {
1060 mrq->data->blksz);
1061 mrq->cmd->error = -EINVAL;
1062 mmc_request_done(mmc, mrq); 1106 mmc_request_done(mmc, mrq);
1063 return; 1107 return;
1064 } 1108 }
@@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1086 struct variant_data *variant = host->variant; 1130 struct variant_data *variant = host->variant;
1087 u32 pwr = 0; 1131 u32 pwr = 0;
1088 unsigned long flags; 1132 unsigned long flags;
1089 int ret;
1090 1133
1091 pm_runtime_get_sync(mmc_dev(mmc)); 1134 pm_runtime_get_sync(mmc_dev(mmc));
1092 1135
@@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1096 1139
1097 switch (ios->power_mode) { 1140 switch (ios->power_mode) {
1098 case MMC_POWER_OFF: 1141 case MMC_POWER_OFF:
1099 if (host->vcc) 1142 if (!IS_ERR(mmc->supply.vmmc))
1100 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1143 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1101 break; 1144 break;
1102 case MMC_POWER_UP: 1145 case MMC_POWER_UP:
1103 if (host->vcc) { 1146 if (!IS_ERR(mmc->supply.vmmc))
1104 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1147 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1105 if (ret) { 1148
1106 dev_err(mmc_dev(mmc), "unable to set OCR\n");
1107 /*
1108 * The .set_ios() function in the mmc_host_ops
1109 * struct return void, and failing to set the
1110 * power should be rare so we print an error
1111 * and return here.
1112 */
1113 goto out;
1114 }
1115 }
1116 /* 1149 /*
1117 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1150 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1118 * and instead uses MCI_PWR_ON so apply whatever value is 1151 * and instead uses MCI_PWR_ON so apply whatever value is
@@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1154 } 1187 }
1155 } 1188 }
1156 1189
1190 /*
1191 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1192 * gating the clock, the MCI_PWR_ON bit is cleared.
1193 */
1194 if (!ios->clock && variant->pwrreg_clkgate)
1195 pwr &= ~MCI_PWR_ON;
1196
1157 spin_lock_irqsave(&host->lock, flags); 1197 spin_lock_irqsave(&host->lock, flags);
1158 1198
1159 mmci_set_clkreg(host, ios->clock); 1199 mmci_set_clkreg(host, ios->clock);
@@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1161 1201
1162 spin_unlock_irqrestore(&host->lock, flags); 1202 spin_unlock_irqrestore(&host->lock, flags);
1163 1203
1164 out:
1165 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1204 pm_runtime_mark_last_busy(mmc_dev(mmc));
1166 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1205 pm_runtime_put_autosuspend(mmc_dev(mmc));
1167} 1206}
@@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev,
1384 } else 1423 } else
1385 dev_warn(&dev->dev, "could not get default pinstate\n"); 1424 dev_warn(&dev->dev, "could not get default pinstate\n");
1386 1425
1387#ifdef CONFIG_REGULATOR 1426 /* Get regulators and the supported OCR mask */
1388 /* If we're using the regulator framework, try to fetch a regulator */ 1427 mmc_regulator_get_supply(mmc);
1389 host->vcc = regulator_get(&dev->dev, "vmmc"); 1428 if (!mmc->ocr_avail)
1390 if (IS_ERR(host->vcc))
1391 host->vcc = NULL;
1392 else {
1393 int mask = mmc_regulator_get_ocrmask(host->vcc);
1394
1395 if (mask < 0)
1396 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1397 mask);
1398 else {
1399 host->mmc->ocr_avail = (u32) mask;
1400 if (plat->ocr_mask)
1401 dev_warn(&dev->dev,
1402 "Provided ocr_mask/setpower will not be used "
1403 "(using regulator instead)\n");
1404 }
1405 }
1406#endif
1407 /* Fall back to platform data if no regulator is found */
1408 if (host->vcc == NULL)
1409 mmc->ocr_avail = plat->ocr_mask; 1429 mmc->ocr_avail = plat->ocr_mask;
1430 else if (plat->ocr_mask)
1431 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1432
1410 mmc->caps = plat->capabilities; 1433 mmc->caps = plat->capabilities;
1411 mmc->caps2 = plat->capabilities2; 1434 mmc->caps2 = plat->capabilities2;
1412 1435
1436 /* We support these PM capabilities. */
1437 mmc->pm_caps = MMC_PM_KEEP_POWER;
1438
1413 /* 1439 /*
1414 * We can do SGIO 1440 * We can do SGIO
1415 */ 1441 */
@@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev)
1585 clk_disable_unprepare(host->clk); 1611 clk_disable_unprepare(host->clk);
1586 clk_put(host->clk); 1612 clk_put(host->clk);
1587 1613
1588 if (host->vcc)
1589 mmc_regulator_set_ocr(mmc, host->vcc, 0);
1590 regulator_put(host->vcc);
1591
1592 mmc_free_host(mmc); 1614 mmc_free_host(mmc);
1593 1615
1594 amba_release_regions(dev); 1616 amba_release_regions(dev);
@@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev)
1636} 1658}
1637#endif 1659#endif
1638 1660
1661#ifdef CONFIG_PM_RUNTIME
1662static int mmci_runtime_suspend(struct device *dev)
1663{
1664 struct amba_device *adev = to_amba_device(dev);
1665 struct mmc_host *mmc = amba_get_drvdata(adev);
1666
1667 if (mmc) {
1668 struct mmci_host *host = mmc_priv(mmc);
1669 clk_disable_unprepare(host->clk);
1670 }
1671
1672 return 0;
1673}
1674
1675static int mmci_runtime_resume(struct device *dev)
1676{
1677 struct amba_device *adev = to_amba_device(dev);
1678 struct mmc_host *mmc = amba_get_drvdata(adev);
1679
1680 if (mmc) {
1681 struct mmci_host *host = mmc_priv(mmc);
1682 clk_prepare_enable(host->clk);
1683 }
1684
1685 return 0;
1686}
1687#endif
1688
1639static const struct dev_pm_ops mmci_dev_pm_ops = { 1689static const struct dev_pm_ops mmci_dev_pm_ops = {
1640 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1690 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
1691 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1641}; 1692};
1642 1693
1643static struct amba_id mmci_ids[] = { 1694static struct amba_id mmci_ids[] = {
@@ -1652,6 +1703,11 @@ static struct amba_id mmci_ids[] = {
1652 .data = &variant_arm_extended_fifo, 1703 .data = &variant_arm_extended_fifo,
1653 }, 1704 },
1654 { 1705 {
1706 .id = 0x02041180,
1707 .mask = 0xff0fffff,
1708 .data = &variant_arm_extended_fifo_hwfc,
1709 },
1710 {
1655 .id = 0x00041181, 1711 .id = 0x00041181,
1656 .mask = 0x000fffff, 1712 .mask = 0x000fffff,
1657 .data = &variant_arm, 1713 .data = &variant_arm,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index d34d8c0add8e..1f33ad5333a0 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -28,6 +28,8 @@
28#define MCI_ST_UX500_NEG_EDGE (1 << 13) 28#define MCI_ST_UX500_NEG_EDGE (1 << 13)
29#define MCI_ST_UX500_HWFCEN (1 << 14) 29#define MCI_ST_UX500_HWFCEN (1 << 14)
30#define MCI_ST_UX500_CLK_INV (1 << 15) 30#define MCI_ST_UX500_CLK_INV (1 << 15)
31/* Modified PL180 on Versatile Express platform */
32#define MCI_ARM_HWFCEN (1 << 12)
31 33
32#define MMCIARGUMENT 0x008 34#define MMCIARGUMENT 0x008
33#define MMCICOMMAND 0x00c 35#define MMCICOMMAND 0x00c
@@ -193,7 +195,6 @@ struct mmci_host {
193 /* pio stuff */ 195 /* pio stuff */
194 struct sg_mapping_iter sg_miter; 196 struct sg_mapping_iter sg_miter;
195 unsigned int size; 197 unsigned int size;
196 struct regulator *vcc;
197 198
198 /* pinctrl handles */ 199 /* pinctrl handles */
199 struct pinctrl *pinctrl; 200 struct pinctrl *pinctrl;