diff options
-rw-r--r-- | MAINTAINERS | 10 | ||||
-rw-r--r-- | arch/arm/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm/crypto/aes-armv4.S | 64 | ||||
-rw-r--r-- | arch/arm/crypto/sha1-armv4-large.S | 24 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/pci.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/outercache.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 16 | ||||
-rw-r--r-- | arch/arm/kernel/bios32.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 31 | ||||
-rw-r--r-- | arch/arm/mach-versatile/core.c | 15 | ||||
-rw-r--r-- | arch/arm/mach-versatile/pci.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 135 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 12 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 36 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 5 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/vmregion.c | 205 | ||||
-rw-r--r-- | arch/arm/mm/vmregion.h | 31 |
23 files changed, 216 insertions, 412 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 35a56bcd5e75..1f8e194bdd6c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.* | |||
670 | F: include/linux/amba/kmi.h | 670 | F: include/linux/amba/kmi.h |
671 | 671 | ||
672 | ARM PRIMECELL MMCI PL180/1 DRIVER | 672 | ARM PRIMECELL MMCI PL180/1 DRIVER |
673 | S: Orphan | 673 | M: Russell King <linux@arm.linux.org.uk> |
674 | S: Maintained | ||
674 | F: drivers/mmc/host/mmci.* | 675 | F: drivers/mmc/host/mmci.* |
676 | F: include/linux/amba/mmci.h | ||
677 | |||
678 | ARM PRIMECELL UART PL010 AND PL011 DRIVERS | ||
679 | M: Russell King <linux@arm.linux.org.uk> | ||
680 | S: Maintained | ||
681 | F: drivers/tty/serial/amba-pl01*.c | ||
682 | F: include/linux/amba/serial.h | ||
675 | 683 | ||
676 | ARM PRIMECELL BUS SUPPORT | 684 | ARM PRIMECELL BUS SUPPORT |
677 | M: Russell King <linux@arm.linux.org.uk> | 685 | M: Russell King <linux@arm.linux.org.uk> |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67874b82a4ed..a192a5c1984c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1655,6 +1655,9 @@ config HZ | |||
1655 | default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE | 1655 | default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE |
1656 | default 100 | 1656 | default 100 |
1657 | 1657 | ||
1658 | config SCHED_HRTICK | ||
1659 | def_bool HIGH_RES_TIMERS | ||
1660 | |||
1658 | config THUMB2_KERNEL | 1661 | config THUMB2_KERNEL |
1659 | bool "Compile the kernel in Thumb-2 mode" | 1662 | bool "Compile the kernel in Thumb-2 mode" |
1660 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K | 1663 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K |
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S index e59b1d505d6c..19d6cd6f29f9 100644 --- a/arch/arm/crypto/aes-armv4.S +++ b/arch/arm/crypto/aes-armv4.S | |||
@@ -34,8 +34,9 @@ | |||
34 | @ A little glue here to select the correct code below for the ARM CPU | 34 | @ A little glue here to select the correct code below for the ARM CPU |
35 | @ that is being targetted. | 35 | @ that is being targetted. |
36 | 36 | ||
37 | #include <linux/linkage.h> | ||
38 | |||
37 | .text | 39 | .text |
38 | .code 32 | ||
39 | 40 | ||
40 | .type AES_Te,%object | 41 | .type AES_Te,%object |
41 | .align 5 | 42 | .align 5 |
@@ -145,10 +146,8 @@ AES_Te: | |||
145 | 146 | ||
146 | @ void AES_encrypt(const unsigned char *in, unsigned char *out, | 147 | @ void AES_encrypt(const unsigned char *in, unsigned char *out, |
147 | @ const AES_KEY *key) { | 148 | @ const AES_KEY *key) { |
148 | .global AES_encrypt | ||
149 | .type AES_encrypt,%function | ||
150 | .align 5 | 149 | .align 5 |
151 | AES_encrypt: | 150 | ENTRY(AES_encrypt) |
152 | sub r3,pc,#8 @ AES_encrypt | 151 | sub r3,pc,#8 @ AES_encrypt |
153 | stmdb sp!,{r1,r4-r12,lr} | 152 | stmdb sp!,{r1,r4-r12,lr} |
154 | mov r12,r0 @ inp | 153 | mov r12,r0 @ inp |
@@ -239,15 +238,8 @@ AES_encrypt: | |||
239 | strb r6,[r12,#14] | 238 | strb r6,[r12,#14] |
240 | strb r3,[r12,#15] | 239 | strb r3,[r12,#15] |
241 | #endif | 240 | #endif |
242 | #if __ARM_ARCH__>=5 | ||
243 | ldmia sp!,{r4-r12,pc} | 241 | ldmia sp!,{r4-r12,pc} |
244 | #else | 242 | ENDPROC(AES_encrypt) |
245 | ldmia sp!,{r4-r12,lr} | ||
246 | tst lr,#1 | ||
247 | moveq pc,lr @ be binary compatible with V4, yet | ||
248 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
249 | #endif | ||
250 | .size AES_encrypt,.-AES_encrypt | ||
251 | 243 | ||
252 | .type _armv4_AES_encrypt,%function | 244 | .type _armv4_AES_encrypt,%function |
253 | .align 2 | 245 | .align 2 |
@@ -386,10 +378,8 @@ _armv4_AES_encrypt: | |||
386 | ldr pc,[sp],#4 @ pop and return | 378 | ldr pc,[sp],#4 @ pop and return |
387 | .size _armv4_AES_encrypt,.-_armv4_AES_encrypt | 379 | .size _armv4_AES_encrypt,.-_armv4_AES_encrypt |
388 | 380 | ||
389 | .global private_AES_set_encrypt_key | ||
390 | .type private_AES_set_encrypt_key,%function | ||
391 | .align 5 | 381 | .align 5 |
392 | private_AES_set_encrypt_key: | 382 | ENTRY(private_AES_set_encrypt_key) |
393 | _armv4_AES_set_encrypt_key: | 383 | _armv4_AES_set_encrypt_key: |
394 | sub r3,pc,#8 @ AES_set_encrypt_key | 384 | sub r3,pc,#8 @ AES_set_encrypt_key |
395 | teq r0,#0 | 385 | teq r0,#0 |
@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key: | |||
658 | 648 | ||
659 | .Ldone: mov r0,#0 | 649 | .Ldone: mov r0,#0 |
660 | ldmia sp!,{r4-r12,lr} | 650 | ldmia sp!,{r4-r12,lr} |
661 | .Labrt: tst lr,#1 | 651 | .Labrt: mov pc,lr |
662 | moveq pc,lr @ be binary compatible with V4, yet | 652 | ENDPROC(private_AES_set_encrypt_key) |
663 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
664 | .size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key | ||
665 | 653 | ||
666 | .global private_AES_set_decrypt_key | ||
667 | .type private_AES_set_decrypt_key,%function | ||
668 | .align 5 | 654 | .align 5 |
669 | private_AES_set_decrypt_key: | 655 | ENTRY(private_AES_set_decrypt_key) |
670 | str lr,[sp,#-4]! @ push lr | 656 | str lr,[sp,#-4]! @ push lr |
671 | #if 0 | 657 | #if 0 |
672 | @ kernel does both of these in setkey so optimise this bit out by | 658 | @ kernel does both of these in setkey so optimise this bit out by |
@@ -748,15 +734,8 @@ private_AES_set_decrypt_key: | |||
748 | bne .Lmix | 734 | bne .Lmix |
749 | 735 | ||
750 | mov r0,#0 | 736 | mov r0,#0 |
751 | #if __ARM_ARCH__>=5 | ||
752 | ldmia sp!,{r4-r12,pc} | 737 | ldmia sp!,{r4-r12,pc} |
753 | #else | 738 | ENDPROC(private_AES_set_decrypt_key) |
754 | ldmia sp!,{r4-r12,lr} | ||
755 | tst lr,#1 | ||
756 | moveq pc,lr @ be binary compatible with V4, yet | ||
757 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
758 | #endif | ||
759 | .size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key | ||
760 | 739 | ||
761 | .type AES_Td,%object | 740 | .type AES_Td,%object |
762 | .align 5 | 741 | .align 5 |
@@ -862,10 +841,8 @@ AES_Td: | |||
862 | 841 | ||
863 | @ void AES_decrypt(const unsigned char *in, unsigned char *out, | 842 | @ void AES_decrypt(const unsigned char *in, unsigned char *out, |
864 | @ const AES_KEY *key) { | 843 | @ const AES_KEY *key) { |
865 | .global AES_decrypt | ||
866 | .type AES_decrypt,%function | ||
867 | .align 5 | 844 | .align 5 |
868 | AES_decrypt: | 845 | ENTRY(AES_decrypt) |
869 | sub r3,pc,#8 @ AES_decrypt | 846 | sub r3,pc,#8 @ AES_decrypt |
870 | stmdb sp!,{r1,r4-r12,lr} | 847 | stmdb sp!,{r1,r4-r12,lr} |
871 | mov r12,r0 @ inp | 848 | mov r12,r0 @ inp |
@@ -956,15 +933,8 @@ AES_decrypt: | |||
956 | strb r6,[r12,#14] | 933 | strb r6,[r12,#14] |
957 | strb r3,[r12,#15] | 934 | strb r3,[r12,#15] |
958 | #endif | 935 | #endif |
959 | #if __ARM_ARCH__>=5 | ||
960 | ldmia sp!,{r4-r12,pc} | 936 | ldmia sp!,{r4-r12,pc} |
961 | #else | 937 | ENDPROC(AES_decrypt) |
962 | ldmia sp!,{r4-r12,lr} | ||
963 | tst lr,#1 | ||
964 | moveq pc,lr @ be binary compatible with V4, yet | ||
965 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
966 | #endif | ||
967 | .size AES_decrypt,.-AES_decrypt | ||
968 | 938 | ||
969 | .type _armv4_AES_decrypt,%function | 939 | .type _armv4_AES_decrypt,%function |
970 | .align 2 | 940 | .align 2 |
@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt: | |||
1064 | and r9,lr,r1,lsr#8 | 1034 | and r9,lr,r1,lsr#8 |
1065 | 1035 | ||
1066 | ldrb r7,[r10,r7] @ Td4[s1>>0] | 1036 | ldrb r7,[r10,r7] @ Td4[s1>>0] |
1067 | ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] | 1037 | ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24] |
1038 | THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24] | ||
1039 | THUMB( ldrb r1,[r1] ) | ||
1068 | ldrb r8,[r10,r8] @ Td4[s1>>16] | 1040 | ldrb r8,[r10,r8] @ Td4[s1>>16] |
1069 | eor r0,r7,r0,lsl#24 | 1041 | eor r0,r7,r0,lsl#24 |
1070 | ldrb r9,[r10,r9] @ Td4[s1>>8] | 1042 | ldrb r9,[r10,r9] @ Td4[s1>>8] |
@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt: | |||
1077 | ldrb r8,[r10,r8] @ Td4[s2>>0] | 1049 | ldrb r8,[r10,r8] @ Td4[s2>>0] |
1078 | and r9,lr,r2,lsr#16 | 1050 | and r9,lr,r2,lsr#16 |
1079 | 1051 | ||
1080 | ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] | 1052 | ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24] |
1053 | THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24] | ||
1054 | THUMB( ldrb r2,[r2] ) | ||
1081 | eor r0,r0,r7,lsl#8 | 1055 | eor r0,r0,r7,lsl#8 |
1082 | ldrb r9,[r10,r9] @ Td4[s2>>16] | 1056 | ldrb r9,[r10,r9] @ Td4[s2>>16] |
1083 | eor r1,r8,r1,lsl#16 | 1057 | eor r1,r8,r1,lsl#16 |
@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt: | |||
1090 | and r9,lr,r3 @ i2 | 1064 | and r9,lr,r3 @ i2 |
1091 | 1065 | ||
1092 | ldrb r9,[r10,r9] @ Td4[s3>>0] | 1066 | ldrb r9,[r10,r9] @ Td4[s3>>0] |
1093 | ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] | 1067 | ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24] |
1068 | THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24] | ||
1069 | THUMB( ldrb r3,[r3] ) | ||
1094 | eor r0,r0,r7,lsl#16 | 1070 | eor r0,r0,r7,lsl#16 |
1095 | ldr r7,[r11,#0] | 1071 | ldr r7,[r11,#0] |
1096 | eor r1,r1,r8,lsl#8 | 1072 | eor r1,r1,r8,lsl#8 |
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index 7050ab133b9d..92c6eed7aac9 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S | |||
@@ -51,13 +51,12 @@ | |||
51 | @ Profiler-assisted and platform-specific optimization resulted in 10% | 51 | @ Profiler-assisted and platform-specific optimization resulted in 10% |
52 | @ improvement on Cortex A8 core and 12.2 cycles per byte. | 52 | @ improvement on Cortex A8 core and 12.2 cycles per byte. |
53 | 53 | ||
54 | .text | 54 | #include <linux/linkage.h> |
55 | 55 | ||
56 | .global sha1_block_data_order | 56 | .text |
57 | .type sha1_block_data_order,%function | ||
58 | 57 | ||
59 | .align 2 | 58 | .align 2 |
60 | sha1_block_data_order: | 59 | ENTRY(sha1_block_data_order) |
61 | stmdb sp!,{r4-r12,lr} | 60 | stmdb sp!,{r4-r12,lr} |
62 | add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 | 61 | add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 |
63 | ldmia r0,{r3,r4,r5,r6,r7} | 62 | ldmia r0,{r3,r4,r5,r6,r7} |
@@ -194,7 +193,7 @@ sha1_block_data_order: | |||
194 | eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) | 193 | eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) |
195 | str r9,[r14,#-4]! | 194 | str r9,[r14,#-4]! |
196 | add r3,r3,r10 @ E+=F_00_19(B,C,D) | 195 | add r3,r3,r10 @ E+=F_00_19(B,C,D) |
197 | teq r14,sp | 196 | cmp r14,sp |
198 | bne .L_00_15 @ [((11+4)*5+2)*3] | 197 | bne .L_00_15 @ [((11+4)*5+2)*3] |
199 | #if __ARM_ARCH__<7 | 198 | #if __ARM_ARCH__<7 |
200 | ldrb r10,[r1,#2] | 199 | ldrb r10,[r1,#2] |
@@ -374,7 +373,9 @@ sha1_block_data_order: | |||
374 | @ F_xx_xx | 373 | @ F_xx_xx |
375 | add r3,r3,r9 @ E+=X[i] | 374 | add r3,r3,r9 @ E+=X[i] |
376 | add r3,r3,r10 @ E+=F_20_39(B,C,D) | 375 | add r3,r3,r10 @ E+=F_20_39(B,C,D) |
377 | teq r14,sp @ preserve carry | 376 | ARM( teq r14,sp ) @ preserve carry |
377 | THUMB( mov r11,sp ) | ||
378 | THUMB( teq r14,r11 ) @ preserve carry | ||
378 | bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] | 379 | bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] |
379 | bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes | 380 | bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes |
380 | 381 | ||
@@ -466,7 +467,7 @@ sha1_block_data_order: | |||
466 | add r3,r3,r9 @ E+=X[i] | 467 | add r3,r3,r9 @ E+=X[i] |
467 | add r3,r3,r10 @ E+=F_40_59(B,C,D) | 468 | add r3,r3,r10 @ E+=F_40_59(B,C,D) |
468 | add r3,r3,r11,ror#2 | 469 | add r3,r3,r11,ror#2 |
469 | teq r14,sp | 470 | cmp r14,sp |
470 | bne .L_40_59 @ [+((12+5)*5+2)*4] | 471 | bne .L_40_59 @ [+((12+5)*5+2)*4] |
471 | 472 | ||
472 | ldr r8,.LK_60_79 | 473 | ldr r8,.LK_60_79 |
@@ -485,19 +486,12 @@ sha1_block_data_order: | |||
485 | teq r1,r2 | 486 | teq r1,r2 |
486 | bne .Lloop @ [+18], total 1307 | 487 | bne .Lloop @ [+18], total 1307 |
487 | 488 | ||
488 | #if __ARM_ARCH__>=5 | ||
489 | ldmia sp!,{r4-r12,pc} | 489 | ldmia sp!,{r4-r12,pc} |
490 | #else | ||
491 | ldmia sp!,{r4-r12,lr} | ||
492 | tst lr,#1 | ||
493 | moveq pc,lr @ be binary compatible with V4, yet | ||
494 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
495 | #endif | ||
496 | .align 2 | 490 | .align 2 |
497 | .LK_00_19: .word 0x5a827999 | 491 | .LK_00_19: .word 0x5a827999 |
498 | .LK_20_39: .word 0x6ed9eba1 | 492 | .LK_20_39: .word 0x6ed9eba1 |
499 | .LK_40_59: .word 0x8f1bbcdc | 493 | .LK_40_59: .word 0x8f1bbcdc |
500 | .LK_60_79: .word 0xca62c1d6 | 494 | .LK_60_79: .word 0xca62c1d6 |
501 | .size sha1_block_data_order,.-sha1_block_data_order | 495 | ENDPROC(sha1_block_data_order) |
502 | .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" | 496 | .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" |
503 | .align 2 | 497 | .align 2 |
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index db9fedb57f2c..5cf2e979b4be 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h | |||
@@ -23,6 +23,7 @@ struct hw_pci { | |||
23 | #endif | 23 | #endif |
24 | struct pci_ops *ops; | 24 | struct pci_ops *ops; |
25 | int nr_controllers; | 25 | int nr_controllers; |
26 | void **private_data; | ||
26 | int (*setup)(int nr, struct pci_sys_data *); | 27 | int (*setup)(int nr, struct pci_sys_data *); |
27 | struct pci_bus *(*scan)(int nr, struct pci_sys_data *); | 28 | struct pci_bus *(*scan)(int nr, struct pci_sys_data *); |
28 | void (*preinit)(void); | 29 | void (*preinit)(void); |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 1c4df27f9332..64c770d24198 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -36,23 +36,23 @@ | |||
36 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area | 36 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area |
37 | */ | 37 | */ |
38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) | 38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) |
39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) | 39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) |
40 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) | 40 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * The maximum size of a 26-bit user space task. | 43 | * The maximum size of a 26-bit user space task. |
44 | */ | 44 | */ |
45 | #define TASK_SIZE_26 UL(0x04000000) | 45 | #define TASK_SIZE_26 (UL(1) << 26) |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * The module space lives between the addresses given by TASK_SIZE | 48 | * The module space lives between the addresses given by TASK_SIZE |
49 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. | 49 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. |
50 | */ | 50 | */ |
51 | #ifndef CONFIG_THUMB2_KERNEL | 51 | #ifndef CONFIG_THUMB2_KERNEL |
52 | #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) | 52 | #define MODULES_VADDR (PAGE_OFFSET - SZ_16M) |
53 | #else | 53 | #else |
54 | /* smaller range for Thumb-2 symbols relocation (2^24)*/ | 54 | /* smaller range for Thumb-2 symbols relocation (2^24)*/ |
55 | #define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) | 55 | #define MODULES_VADDR (PAGE_OFFSET - SZ_8M) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #if TASK_SIZE > MODULES_VADDR | 58 | #if TASK_SIZE > MODULES_VADDR |
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 53426c66352a..12f71a190422 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h | |||
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) | |||
92 | static inline void outer_flush_all(void) { } | 92 | static inline void outer_flush_all(void) { } |
93 | static inline void outer_inv_all(void) { } | 93 | static inline void outer_inv_all(void) { } |
94 | static inline void outer_disable(void) { } | 94 | static inline void outer_disable(void) { } |
95 | static inline void outer_resume(void) { } | ||
95 | 96 | ||
96 | #endif | 97 | #endif |
97 | 98 | ||
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index b4ca707d0a69..6220e9fdf4c7 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
119 | 119 | ||
120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
121 | { | 121 | { |
122 | unsigned long tmp; | ||
123 | u32 slock; | ||
124 | |||
125 | smp_mb(); | 122 | smp_mb(); |
126 | 123 | lock->tickets.owner++; | |
127 | __asm__ __volatile__( | ||
128 | " mov %1, #1\n" | ||
129 | "1: ldrex %0, [%2]\n" | ||
130 | " uadd16 %0, %0, %1\n" | ||
131 | " strex %1, %0, [%2]\n" | ||
132 | " teq %1, #0\n" | ||
133 | " bne 1b" | ||
134 | : "=&r" (slock), "=&r" (tmp) | ||
135 | : "r" (&lock->slock) | ||
136 | : "cc"); | ||
137 | |||
138 | dsb_sev(); | 124 | dsb_sev(); |
139 | } | 125 | } |
140 | 126 | ||
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 379cf3292390..a1f73b502ef0 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
413 | return irq; | 413 | return irq; |
414 | } | 414 | } |
415 | 415 | ||
416 | static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) | 416 | static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) |
417 | { | 417 | { |
418 | int ret; | 418 | int ret; |
419 | struct pci_host_bridge_window *window; | 419 | struct pci_host_bridge_window *window; |
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) | |||
445 | return 0; | 445 | return 0; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | 448 | static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) |
449 | { | 449 | { |
450 | struct pci_sys_data *sys = NULL; | 450 | struct pci_sys_data *sys = NULL; |
451 | int ret; | 451 | int ret; |
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
464 | sys->map_irq = hw->map_irq; | 464 | sys->map_irq = hw->map_irq; |
465 | INIT_LIST_HEAD(&sys->resources); | 465 | INIT_LIST_HEAD(&sys->resources); |
466 | 466 | ||
467 | if (hw->private_data) | ||
468 | sys->private_data = hw->private_data[nr]; | ||
469 | |||
467 | ret = hw->setup(nr, sys); | 470 | ret = hw->setup(nr, sys); |
468 | 471 | ||
469 | if (ret > 0) { | 472 | if (ret > 0) { |
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
493 | } | 496 | } |
494 | } | 497 | } |
495 | 498 | ||
496 | void __init pci_common_init(struct hw_pci *hw) | 499 | void pci_common_init(struct hw_pci *hw) |
497 | { | 500 | { |
498 | struct pci_sys_data *sys; | 501 | struct pci_sys_data *sys; |
499 | LIST_HEAD(head); | 502 | LIST_HEAD(head); |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e5..365c8d92e2eb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void) | |||
125 | smp_ops.smp_init_cpus(); | 125 | smp_ops.smp_init_cpus(); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void __init platform_smp_prepare_cpus(unsigned int max_cpus) | ||
129 | { | ||
130 | if (smp_ops.smp_prepare_cpus) | ||
131 | smp_ops.smp_prepare_cpus(max_cpus); | ||
132 | } | ||
133 | |||
134 | static void __cpuinit platform_secondary_init(unsigned int cpu) | ||
135 | { | ||
136 | if (smp_ops.smp_secondary_init) | ||
137 | smp_ops.smp_secondary_init(cpu); | ||
138 | } | ||
139 | |||
140 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 128 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
141 | { | 129 | { |
142 | if (smp_ops.smp_boot_secondary) | 130 | if (smp_ops.smp_boot_secondary) |
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) | |||
154 | return 1; | 142 | return 1; |
155 | } | 143 | } |
156 | 144 | ||
157 | static void platform_cpu_die(unsigned int cpu) | ||
158 | { | ||
159 | if (smp_ops.cpu_die) | ||
160 | smp_ops.cpu_die(cpu); | ||
161 | } | ||
162 | |||
163 | static int platform_cpu_disable(unsigned int cpu) | 145 | static int platform_cpu_disable(unsigned int cpu) |
164 | { | 146 | { |
165 | if (smp_ops.cpu_disable) | 147 | if (smp_ops.cpu_disable) |
@@ -257,7 +239,8 @@ void __ref cpu_die(void) | |||
257 | * actual CPU shutdown procedure is at least platform (if not | 239 | * actual CPU shutdown procedure is at least platform (if not |
258 | * CPU) specific. | 240 | * CPU) specific. |
259 | */ | 241 | */ |
260 | platform_cpu_die(cpu); | 242 | if (smp_ops.cpu_die) |
243 | smp_ops.cpu_die(cpu); | ||
261 | 244 | ||
262 | /* | 245 | /* |
263 | * Do not return to the idle loop - jump back to the secondary | 246 | * Do not return to the idle loop - jump back to the secondary |
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
324 | /* | 307 | /* |
325 | * Give the platform a chance to do its own initialisation. | 308 | * Give the platform a chance to do its own initialisation. |
326 | */ | 309 | */ |
327 | platform_secondary_init(cpu); | 310 | if (smp_ops.smp_secondary_init) |
311 | smp_ops.smp_secondary_init(cpu); | ||
328 | 312 | ||
329 | notify_cpu_starting(cpu); | 313 | notify_cpu_starting(cpu); |
330 | 314 | ||
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
399 | /* | 383 | /* |
400 | * Initialise the present map, which describes the set of CPUs | 384 | * Initialise the present map, which describes the set of CPUs |
401 | * actually populated at the present time. A platform should | 385 | * actually populated at the present time. A platform should |
402 | * re-initialize the map in platform_smp_prepare_cpus() if | 386 | * re-initialize the map in the platforms smp_prepare_cpus() |
403 | * present != possible (e.g. physical hotplug). | 387 | * if present != possible (e.g. physical hotplug). |
404 | */ | 388 | */ |
405 | init_cpu_present(cpu_possible_mask); | 389 | init_cpu_present(cpu_possible_mask); |
406 | 390 | ||
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
408 | * Initialise the SCU if there are more than one CPU | 392 | * Initialise the SCU if there are more than one CPU |
409 | * and let them know where to start. | 393 | * and let them know where to start. |
410 | */ | 394 | */ |
411 | platform_smp_prepare_cpus(max_cpus); | 395 | if (smp_ops.smp_prepare_cpus) |
396 | smp_ops.smp_prepare_cpus(max_cpus); | ||
412 | } | 397 | } |
413 | } | 398 | } |
414 | 399 | ||
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 5d5929450366..a78827b70270 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
37 | #include <linux/clkdev.h> | 37 | #include <linux/clkdev.h> |
38 | #include <linux/mtd/physmap.h> | 38 | #include <linux/mtd/physmap.h> |
39 | #include <linux/bitops.h> | ||
39 | 40 | ||
40 | #include <asm/irq.h> | 41 | #include <asm/irq.h> |
41 | #include <asm/hardware/arm_timer.h> | 42 | #include <asm/hardware/arm_timer.h> |
@@ -65,16 +66,28 @@ | |||
65 | #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) | 66 | #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) |
66 | #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) | 67 | #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) |
67 | 68 | ||
69 | /* These PIC IRQs are valid in each configuration */ | ||
70 | #define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \ | ||
71 | BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \ | ||
72 | BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \ | ||
73 | BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \ | ||
74 | BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \ | ||
75 | BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \ | ||
76 | BIT(SIC_INT_PCI3) | ||
68 | #if 1 | 77 | #if 1 |
69 | #define IRQ_MMCI0A IRQ_VICSOURCE22 | 78 | #define IRQ_MMCI0A IRQ_VICSOURCE22 |
70 | #define IRQ_AACI IRQ_VICSOURCE24 | 79 | #define IRQ_AACI IRQ_VICSOURCE24 |
71 | #define IRQ_ETH IRQ_VICSOURCE25 | 80 | #define IRQ_ETH IRQ_VICSOURCE25 |
72 | #define PIC_MASK 0xFFD00000 | 81 | #define PIC_MASK 0xFFD00000 |
82 | #define PIC_VALID PIC_VALID_ALL | ||
73 | #else | 83 | #else |
74 | #define IRQ_MMCI0A IRQ_SIC_MMCI0A | 84 | #define IRQ_MMCI0A IRQ_SIC_MMCI0A |
75 | #define IRQ_AACI IRQ_SIC_AACI | 85 | #define IRQ_AACI IRQ_SIC_AACI |
76 | #define IRQ_ETH IRQ_SIC_ETH | 86 | #define IRQ_ETH IRQ_SIC_ETH |
77 | #define PIC_MASK 0 | 87 | #define PIC_MASK 0 |
88 | #define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \ | ||
89 | BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \ | ||
90 | BIT(SIC_INT_ETH) | ||
78 | #endif | 91 | #endif |
79 | 92 | ||
80 | /* Lookup table for finding a DT node that represents the vic instance */ | 93 | /* Lookup table for finding a DT node that represents the vic instance */ |
@@ -102,7 +115,7 @@ void __init versatile_init_irq(void) | |||
102 | VERSATILE_SIC_BASE); | 115 | VERSATILE_SIC_BASE); |
103 | 116 | ||
104 | fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, | 117 | fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, |
105 | IRQ_VICSOURCE31, ~PIC_MASK, np); | 118 | IRQ_VICSOURCE31, PIC_VALID, np); |
106 | 119 | ||
107 | /* | 120 | /* |
108 | * Interrupts on secondary controller from 0 to 8 are routed to | 121 | * Interrupts on secondary controller from 0 to 8 are routed to |
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 2f84f4094f13..e92e5e0705bc 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | 24 | ||
25 | #include <mach/hardware.h> | 25 | #include <mach/hardware.h> |
26 | #include <mach/irqs.h> | ||
26 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
27 | #include <asm/mach/pci.h> | 28 | #include <asm/mach/pci.h> |
28 | 29 | ||
@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
327 | int irq; | 328 | int irq; |
328 | 329 | ||
329 | /* slot, pin, irq | 330 | /* slot, pin, irq |
330 | * 24 1 27 | 331 | * 24 1 IRQ_SIC_PCI0 |
331 | * 25 1 28 | 332 | * 25 1 IRQ_SIC_PCI1 |
332 | * 26 1 29 | 333 | * 26 1 IRQ_SIC_PCI2 |
333 | * 27 1 30 | 334 | * 27 1 IRQ_SIC_PCI3 |
334 | */ | 335 | */ |
335 | irq = 27 + ((slot - 24 + pin - 1) & 3); | 336 | irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3); |
336 | 337 | ||
337 | return irq; | 338 | return irq; |
338 | } | 339 | } |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 8a9c4cb50a93..4e333fa2756f 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ |
9 | mmap.o pgd.o mmu.o vmregion.o | 9 | mmap.o pgd.o mmu.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9ebb78..7a0511191f6b 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -34,6 +34,9 @@ | |||
34 | * The ASID is used to tag entries in the CPU caches and TLBs. | 34 | * The ASID is used to tag entries in the CPU caches and TLBs. |
35 | * The context ID is used by debuggers and trace logic, and | 35 | * The context ID is used by debuggers and trace logic, and |
36 | * should be unique within all running processes. | 36 | * should be unique within all running processes. |
37 | * | ||
38 | * In big endian operation, the two 32 bit words are swapped if accesed by | ||
39 | * non 64-bit operations. | ||
37 | */ | 40 | */ |
38 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | 41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
39 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | 42 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 88fd86cf3d9a..04d9006eab1f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -39,6 +39,70 @@ | |||
39 | #include <asm/mach/pci.h> | 39 | #include <asm/mach/pci.h> |
40 | #include "mm.h" | 40 | #include "mm.h" |
41 | 41 | ||
42 | |||
43 | LIST_HEAD(static_vmlist); | ||
44 | |||
45 | static struct static_vm *find_static_vm_paddr(phys_addr_t paddr, | ||
46 | size_t size, unsigned int mtype) | ||
47 | { | ||
48 | struct static_vm *svm; | ||
49 | struct vm_struct *vm; | ||
50 | |||
51 | list_for_each_entry(svm, &static_vmlist, list) { | ||
52 | vm = &svm->vm; | ||
53 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | ||
54 | continue; | ||
55 | if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | ||
56 | continue; | ||
57 | |||
58 | if (vm->phys_addr > paddr || | ||
59 | paddr + size - 1 > vm->phys_addr + vm->size - 1) | ||
60 | continue; | ||
61 | |||
62 | return svm; | ||
63 | } | ||
64 | |||
65 | return NULL; | ||
66 | } | ||
67 | |||
68 | struct static_vm *find_static_vm_vaddr(void *vaddr) | ||
69 | { | ||
70 | struct static_vm *svm; | ||
71 | struct vm_struct *vm; | ||
72 | |||
73 | list_for_each_entry(svm, &static_vmlist, list) { | ||
74 | vm = &svm->vm; | ||
75 | |||
76 | /* static_vmlist is ascending order */ | ||
77 | if (vm->addr > vaddr) | ||
78 | break; | ||
79 | |||
80 | if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) | ||
81 | return svm; | ||
82 | } | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | void __init add_static_vm_early(struct static_vm *svm) | ||
88 | { | ||
89 | struct static_vm *curr_svm; | ||
90 | struct vm_struct *vm; | ||
91 | void *vaddr; | ||
92 | |||
93 | vm = &svm->vm; | ||
94 | vm_area_add_early(vm); | ||
95 | vaddr = vm->addr; | ||
96 | |||
97 | list_for_each_entry(curr_svm, &static_vmlist, list) { | ||
98 | vm = &curr_svm->vm; | ||
99 | |||
100 | if (vm->addr > vaddr) | ||
101 | break; | ||
102 | } | ||
103 | list_add_tail(&svm->list, &curr_svm->list); | ||
104 | } | ||
105 | |||
42 | int ioremap_page(unsigned long virt, unsigned long phys, | 106 | int ioremap_page(unsigned long virt, unsigned long phys, |
43 | const struct mem_type *mtype) | 107 | const struct mem_type *mtype) |
44 | { | 108 | { |
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
197 | const struct mem_type *type; | 261 | const struct mem_type *type; |
198 | int err; | 262 | int err; |
199 | unsigned long addr; | 263 | unsigned long addr; |
200 | struct vm_struct * area; | 264 | struct vm_struct *area; |
265 | phys_addr_t paddr = __pfn_to_phys(pfn); | ||
201 | 266 | ||
202 | #ifndef CONFIG_ARM_LPAE | 267 | #ifndef CONFIG_ARM_LPAE |
203 | /* | 268 | /* |
204 | * High mappings must be supersection aligned | 269 | * High mappings must be supersection aligned |
205 | */ | 270 | */ |
206 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 271 | if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) |
207 | return NULL; | 272 | return NULL; |
208 | #endif | 273 | #endif |
209 | 274 | ||
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
219 | /* | 284 | /* |
220 | * Try to reuse one of the static mapping whenever possible. | 285 | * Try to reuse one of the static mapping whenever possible. |
221 | */ | 286 | */ |
222 | read_lock(&vmlist_lock); | 287 | if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { |
223 | for (area = vmlist; area; area = area->next) { | 288 | struct static_vm *svm; |
224 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | 289 | |
225 | break; | 290 | svm = find_static_vm_paddr(paddr, size, mtype); |
226 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | 291 | if (svm) { |
227 | continue; | 292 | addr = (unsigned long)svm->vm.addr; |
228 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | 293 | addr += paddr - svm->vm.phys_addr; |
229 | continue; | 294 | return (void __iomem *) (offset + addr); |
230 | if (__phys_to_pfn(area->phys_addr) > pfn || | 295 | } |
231 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) | ||
232 | continue; | ||
233 | /* we can drop the lock here as we know *area is static */ | ||
234 | read_unlock(&vmlist_lock); | ||
235 | addr = (unsigned long)area->addr; | ||
236 | addr += __pfn_to_phys(pfn) - area->phys_addr; | ||
237 | return (void __iomem *) (offset + addr); | ||
238 | } | 296 | } |
239 | read_unlock(&vmlist_lock); | ||
240 | 297 | ||
241 | /* | 298 | /* |
242 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 299 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
248 | if (!area) | 305 | if (!area) |
249 | return NULL; | 306 | return NULL; |
250 | addr = (unsigned long)area->addr; | 307 | addr = (unsigned long)area->addr; |
251 | area->phys_addr = __pfn_to_phys(pfn); | 308 | area->phys_addr = paddr; |
252 | 309 | ||
253 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 310 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
254 | if (DOMAIN_IO == 0 && | 311 | if (DOMAIN_IO == 0 && |
255 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 312 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
256 | cpu_is_xsc3()) && pfn >= 0x100000 && | 313 | cpu_is_xsc3()) && pfn >= 0x100000 && |
257 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { | 314 | !((paddr | size | addr) & ~SUPERSECTION_MASK)) { |
258 | area->flags |= VM_ARM_SECTION_MAPPING; | 315 | area->flags |= VM_ARM_SECTION_MAPPING; |
259 | err = remap_area_supersections(addr, pfn, size, type); | 316 | err = remap_area_supersections(addr, pfn, size, type); |
260 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | 317 | } else if (!((paddr | size | addr) & ~PMD_MASK)) { |
261 | area->flags |= VM_ARM_SECTION_MAPPING; | 318 | area->flags |= VM_ARM_SECTION_MAPPING; |
262 | err = remap_area_sections(addr, pfn, size, type); | 319 | err = remap_area_sections(addr, pfn, size, type); |
263 | } else | 320 | } else |
264 | #endif | 321 | #endif |
265 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), | 322 | err = ioremap_page_range(addr, addr + size, paddr, |
266 | __pgprot(type->prot_pte)); | 323 | __pgprot(type->prot_pte)); |
267 | 324 | ||
268 | if (err) { | 325 | if (err) { |
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |||
346 | void __iounmap(volatile void __iomem *io_addr) | 403 | void __iounmap(volatile void __iomem *io_addr) |
347 | { | 404 | { |
348 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 405 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
349 | struct vm_struct *vm; | 406 | struct static_vm *svm; |
407 | |||
408 | /* If this is a static mapping, we must leave it alone */ | ||
409 | svm = find_static_vm_vaddr(addr); | ||
410 | if (svm) | ||
411 | return; | ||
350 | 412 | ||
351 | read_lock(&vmlist_lock); | ||
352 | for (vm = vmlist; vm; vm = vm->next) { | ||
353 | if (vm->addr > addr) | ||
354 | break; | ||
355 | if (!(vm->flags & VM_IOREMAP)) | ||
356 | continue; | ||
357 | /* If this is a static mapping we must leave it alone */ | ||
358 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && | ||
359 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { | ||
360 | read_unlock(&vmlist_lock); | ||
361 | return; | ||
362 | } | ||
363 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 413 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
414 | { | ||
415 | struct vm_struct *vm; | ||
416 | |||
417 | vm = find_vm_area(addr); | ||
418 | |||
364 | /* | 419 | /* |
365 | * If this is a section based mapping we need to handle it | 420 | * If this is a section based mapping we need to handle it |
366 | * specially as the VM subsystem does not know how to handle | 421 | * specially as the VM subsystem does not know how to handle |
367 | * such a beast. | 422 | * such a beast. |
368 | */ | 423 | */ |
369 | if ((vm->addr == addr) && | 424 | if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) |
370 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | ||
371 | unmap_area_sections((unsigned long)vm->addr, vm->size); | 425 | unmap_area_sections((unsigned long)vm->addr, vm->size); |
372 | break; | ||
373 | } | ||
374 | #endif | ||
375 | } | 426 | } |
376 | read_unlock(&vmlist_lock); | 427 | #endif |
377 | 428 | ||
378 | vunmap(addr); | 429 | vunmap(addr); |
379 | } | 430 | } |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a8ee92da3544..d5a4e9ad8f0f 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -1,4 +1,6 @@ | |||
1 | #ifdef CONFIG_MMU | 1 | #ifdef CONFIG_MMU |
2 | #include <linux/list.h> | ||
3 | #include <linux/vmalloc.h> | ||
2 | 4 | ||
3 | /* the upper-most page table pointer */ | 5 | /* the upper-most page table pointer */ |
4 | extern pmd_t *top_pmd; | 6 | extern pmd_t *top_pmd; |
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
65 | /* consistent regions used by dma_alloc_attrs() */ | 67 | /* consistent regions used by dma_alloc_attrs() */ |
66 | #define VM_ARM_DMA_CONSISTENT 0x20000000 | 68 | #define VM_ARM_DMA_CONSISTENT 0x20000000 |
67 | 69 | ||
70 | |||
71 | struct static_vm { | ||
72 | struct vm_struct vm; | ||
73 | struct list_head list; | ||
74 | }; | ||
75 | |||
76 | extern struct list_head static_vmlist; | ||
77 | extern struct static_vm *find_static_vm_vaddr(void *vaddr); | ||
78 | extern __init void add_static_vm_early(struct static_vm *svm); | ||
79 | |||
68 | #endif | 80 | #endif |
69 | 81 | ||
70 | #ifdef CONFIG_ZONE_DMA | 82 | #ifdef CONFIG_ZONE_DMA |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce328c7f5c94..69bb735474fe 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
757 | { | 757 | { |
758 | struct map_desc *md; | 758 | struct map_desc *md; |
759 | struct vm_struct *vm; | 759 | struct vm_struct *vm; |
760 | struct static_vm *svm; | ||
760 | 761 | ||
761 | if (!nr) | 762 | if (!nr) |
762 | return; | 763 | return; |
763 | 764 | ||
764 | vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); | 765 | svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); |
765 | 766 | ||
766 | for (md = io_desc; nr; md++, nr--) { | 767 | for (md = io_desc; nr; md++, nr--) { |
767 | create_mapping(md); | 768 | create_mapping(md); |
769 | |||
770 | vm = &svm->vm; | ||
768 | vm->addr = (void *)(md->virtual & PAGE_MASK); | 771 | vm->addr = (void *)(md->virtual & PAGE_MASK); |
769 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 772 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
770 | vm->phys_addr = __pfn_to_phys(md->pfn); | 773 | vm->phys_addr = __pfn_to_phys(md->pfn); |
771 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; | 774 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; |
772 | vm->flags |= VM_ARM_MTYPE(md->type); | 775 | vm->flags |= VM_ARM_MTYPE(md->type); |
773 | vm->caller = iotable_init; | 776 | vm->caller = iotable_init; |
774 | vm_area_add_early(vm++); | 777 | add_static_vm_early(svm++); |
775 | } | 778 | } |
776 | } | 779 | } |
777 | 780 | ||
@@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, | |||
779 | void *caller) | 782 | void *caller) |
780 | { | 783 | { |
781 | struct vm_struct *vm; | 784 | struct vm_struct *vm; |
785 | struct static_vm *svm; | ||
786 | |||
787 | svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); | ||
782 | 788 | ||
783 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | 789 | vm = &svm->vm; |
784 | vm->addr = (void *)addr; | 790 | vm->addr = (void *)addr; |
785 | vm->size = size; | 791 | vm->size = size; |
786 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; | 792 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; |
787 | vm->caller = caller; | 793 | vm->caller = caller; |
788 | vm_area_add_early(vm); | 794 | add_static_vm_early(svm); |
789 | } | 795 | } |
790 | 796 | ||
791 | #ifndef CONFIG_ARM_LPAE | 797 | #ifndef CONFIG_ARM_LPAE |
@@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) | |||
810 | 816 | ||
811 | static void __init fill_pmd_gaps(void) | 817 | static void __init fill_pmd_gaps(void) |
812 | { | 818 | { |
819 | struct static_vm *svm; | ||
813 | struct vm_struct *vm; | 820 | struct vm_struct *vm; |
814 | unsigned long addr, next = 0; | 821 | unsigned long addr, next = 0; |
815 | pmd_t *pmd; | 822 | pmd_t *pmd; |
816 | 823 | ||
817 | /* we're still single threaded hence no lock needed here */ | 824 | list_for_each_entry(svm, &static_vmlist, list) { |
818 | for (vm = vmlist; vm; vm = vm->next) { | 825 | vm = &svm->vm; |
819 | if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) | ||
820 | continue; | ||
821 | addr = (unsigned long)vm->addr; | 826 | addr = (unsigned long)vm->addr; |
822 | if (addr < next) | 827 | if (addr < next) |
823 | continue; | 828 | continue; |
@@ -857,19 +862,12 @@ static void __init fill_pmd_gaps(void) | |||
857 | #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) | 862 | #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) |
858 | static void __init pci_reserve_io(void) | 863 | static void __init pci_reserve_io(void) |
859 | { | 864 | { |
860 | struct vm_struct *vm; | 865 | struct static_vm *svm; |
861 | unsigned long addr; | ||
862 | 866 | ||
863 | /* we're still single threaded hence no lock needed here */ | 867 | svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); |
864 | for (vm = vmlist; vm; vm = vm->next) { | 868 | if (svm) |
865 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | 869 | return; |
866 | continue; | ||
867 | addr = (unsigned long)vm->addr; | ||
868 | addr &= ~(SZ_2M - 1); | ||
869 | if (addr == PCI_IO_VIRT_BASE) | ||
870 | return; | ||
871 | 870 | ||
872 | } | ||
873 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); | 871 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); |
874 | } | 872 | } |
875 | #else | 873 | #else |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index eb6aa73bc8b7..f9a0aa725ea9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -38,9 +38,14 @@ | |||
38 | 38 | ||
39 | /* | 39 | /* |
40 | * mmid - get context id from mm pointer (mm->context.id) | 40 | * mmid - get context id from mm pointer (mm->context.id) |
41 | * note, this field is 64bit, so in big-endian the two words are swapped too. | ||
41 | */ | 42 | */ |
42 | .macro mmid, rd, rn | 43 | .macro mmid, rd, rn |
44 | #ifdef __ARMEB__ | ||
45 | ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ] | ||
46 | #else | ||
43 | ldr \rd, [\rn, #MM_CONTEXT_ID] | 47 | ldr \rd, [\rn, #MM_CONTEXT_ID] |
48 | #endif | ||
44 | .endm | 49 | .endm |
45 | 50 | ||
46 | /* | 51 | /* |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09c5233f4dfc..bcaaa8de9325 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area) | |||
101 | ENTRY(cpu_v6_switch_mm) | 101 | ENTRY(cpu_v6_switch_mm) |
102 | #ifdef CONFIG_MMU | 102 | #ifdef CONFIG_MMU |
103 | mov r2, #0 | 103 | mov r2, #0 |
104 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 104 | mmid r1, r1 @ get mm->context.id |
105 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | 105 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
106 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | 106 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) |
107 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 107 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 6d98c13ab827..78f520bc0e99 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -40,7 +40,7 @@ | |||
40 | ENTRY(cpu_v7_switch_mm) | 40 | ENTRY(cpu_v7_switch_mm) |
41 | #ifdef CONFIG_MMU | 41 | #ifdef CONFIG_MMU |
42 | mov r2, #0 | 42 | mov r2, #0 |
43 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 43 | mmid r1, r1 @ get mm->context.id |
44 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | 44 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
45 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | 45 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) |
46 | #ifdef CONFIG_ARM_ERRATA_430973 | 46 | #ifdef CONFIG_ARM_ERRATA_430973 |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 7b56386f9496..50bf1dafc9ea 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -47,7 +47,7 @@ | |||
47 | */ | 47 | */ |
48 | ENTRY(cpu_v7_switch_mm) | 48 | ENTRY(cpu_v7_switch_mm) |
49 | #ifdef CONFIG_MMU | 49 | #ifdef CONFIG_MMU |
50 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 50 | mmid r1, r1 @ get mm->context.id |
51 | and r3, r1, #0xff | 51 | and r3, r1, #0xff |
52 | mov r3, r3, lsl #(48 - 32) @ ASID | 52 | mov r3, r3, lsl #(48 - 32) @ ASID |
53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | 53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c deleted file mode 100644 index a631016e1f8f..000000000000 --- a/arch/arm/mm/vmregion.c +++ /dev/null | |||
@@ -1,205 +0,0 @@ | |||
1 | #include <linux/fs.h> | ||
2 | #include <linux/spinlock.h> | ||
3 | #include <linux/list.h> | ||
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/seq_file.h> | ||
6 | #include <linux/slab.h> | ||
7 | |||
8 | #include "vmregion.h" | ||
9 | |||
10 | /* | ||
11 | * VM region handling support. | ||
12 | * | ||
13 | * This should become something generic, handling VM region allocations for | ||
14 | * vmalloc and similar (ioremap, module space, etc). | ||
15 | * | ||
16 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
17 | * | ||
18 | * struct vm_struct { | ||
19 | * struct vmregion region; | ||
20 | * unsigned long flags; | ||
21 | * struct page **pages; | ||
22 | * unsigned int nr_pages; | ||
23 | * unsigned long phys_addr; | ||
24 | * }; | ||
25 | * | ||
26 | * get_vm_area() would then call vmregion_alloc with an appropriate | ||
27 | * struct vmregion head (eg): | ||
28 | * | ||
29 | * struct vmregion vmalloc_head = { | ||
30 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
31 | * .vm_start = VMALLOC_START, | ||
32 | * .vm_end = VMALLOC_END, | ||
33 | * }; | ||
34 | * | ||
35 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
36 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
37 | * would have to initialise this each time prior to calling vmregion_alloc(). | ||
38 | */ | ||
39 | |||
40 | struct arm_vmregion * | ||
41 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | ||
42 | size_t size, gfp_t gfp, const void *caller) | ||
43 | { | ||
44 | unsigned long start = head->vm_start, addr = head->vm_end; | ||
45 | unsigned long flags; | ||
46 | struct arm_vmregion *c, *new; | ||
47 | |||
48 | if (head->vm_end - head->vm_start < size) { | ||
49 | printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", | ||
50 | __func__, size); | ||
51 | goto out; | ||
52 | } | ||
53 | |||
54 | new = kmalloc(sizeof(struct arm_vmregion), gfp); | ||
55 | if (!new) | ||
56 | goto out; | ||
57 | |||
58 | new->caller = caller; | ||
59 | |||
60 | spin_lock_irqsave(&head->vm_lock, flags); | ||
61 | |||
62 | addr = rounddown(addr - size, align); | ||
63 | list_for_each_entry_reverse(c, &head->vm_list, vm_list) { | ||
64 | if (addr >= c->vm_end) | ||
65 | goto found; | ||
66 | addr = rounddown(c->vm_start - size, align); | ||
67 | if (addr < start) | ||
68 | goto nospc; | ||
69 | } | ||
70 | |||
71 | found: | ||
72 | /* | ||
73 | * Insert this entry after the one we found. | ||
74 | */ | ||
75 | list_add(&new->vm_list, &c->vm_list); | ||
76 | new->vm_start = addr; | ||
77 | new->vm_end = addr + size; | ||
78 | new->vm_active = 1; | ||
79 | |||
80 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
81 | return new; | ||
82 | |||
83 | nospc: | ||
84 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
85 | kfree(new); | ||
86 | out: | ||
87 | return NULL; | ||
88 | } | ||
89 | |||
90 | static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
91 | { | ||
92 | struct arm_vmregion *c; | ||
93 | |||
94 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
95 | if (c->vm_active && c->vm_start == addr) | ||
96 | goto out; | ||
97 | } | ||
98 | c = NULL; | ||
99 | out: | ||
100 | return c; | ||
101 | } | ||
102 | |||
103 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
104 | { | ||
105 | struct arm_vmregion *c; | ||
106 | unsigned long flags; | ||
107 | |||
108 | spin_lock_irqsave(&head->vm_lock, flags); | ||
109 | c = __arm_vmregion_find(head, addr); | ||
110 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
111 | return c; | ||
112 | } | ||
113 | |||
114 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) | ||
115 | { | ||
116 | struct arm_vmregion *c; | ||
117 | unsigned long flags; | ||
118 | |||
119 | spin_lock_irqsave(&head->vm_lock, flags); | ||
120 | c = __arm_vmregion_find(head, addr); | ||
121 | if (c) | ||
122 | c->vm_active = 0; | ||
123 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
124 | return c; | ||
125 | } | ||
126 | |||
127 | void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | |||
131 | spin_lock_irqsave(&head->vm_lock, flags); | ||
132 | list_del(&c->vm_list); | ||
133 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
134 | |||
135 | kfree(c); | ||
136 | } | ||
137 | |||
138 | #ifdef CONFIG_PROC_FS | ||
139 | static int arm_vmregion_show(struct seq_file *m, void *p) | ||
140 | { | ||
141 | struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); | ||
142 | |||
143 | seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, | ||
144 | c->vm_end - c->vm_start); | ||
145 | if (c->caller) | ||
146 | seq_printf(m, " %pS", (void *)c->caller); | ||
147 | seq_putc(m, '\n'); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) | ||
152 | { | ||
153 | struct arm_vmregion_head *h = m->private; | ||
154 | spin_lock_irq(&h->vm_lock); | ||
155 | return seq_list_start(&h->vm_list, *pos); | ||
156 | } | ||
157 | |||
158 | static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) | ||
159 | { | ||
160 | struct arm_vmregion_head *h = m->private; | ||
161 | return seq_list_next(p, &h->vm_list, pos); | ||
162 | } | ||
163 | |||
164 | static void arm_vmregion_stop(struct seq_file *m, void *p) | ||
165 | { | ||
166 | struct arm_vmregion_head *h = m->private; | ||
167 | spin_unlock_irq(&h->vm_lock); | ||
168 | } | ||
169 | |||
170 | static const struct seq_operations arm_vmregion_ops = { | ||
171 | .start = arm_vmregion_start, | ||
172 | .stop = arm_vmregion_stop, | ||
173 | .next = arm_vmregion_next, | ||
174 | .show = arm_vmregion_show, | ||
175 | }; | ||
176 | |||
177 | static int arm_vmregion_open(struct inode *inode, struct file *file) | ||
178 | { | ||
179 | struct arm_vmregion_head *h = PDE(inode)->data; | ||
180 | int ret = seq_open(file, &arm_vmregion_ops); | ||
181 | if (!ret) { | ||
182 | struct seq_file *m = file->private_data; | ||
183 | m->private = h; | ||
184 | } | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static const struct file_operations arm_vmregion_fops = { | ||
189 | .open = arm_vmregion_open, | ||
190 | .read = seq_read, | ||
191 | .llseek = seq_lseek, | ||
192 | .release = seq_release, | ||
193 | }; | ||
194 | |||
195 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
196 | { | ||
197 | proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); | ||
198 | return 0; | ||
199 | } | ||
200 | #else | ||
201 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | #endif | ||
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h deleted file mode 100644 index 0f5a5f2a2c7b..000000000000 --- a/arch/arm/mm/vmregion.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef VMREGION_H | ||
2 | #define VMREGION_H | ||
3 | |||
4 | #include <linux/spinlock.h> | ||
5 | #include <linux/list.h> | ||
6 | |||
7 | struct page; | ||
8 | |||
9 | struct arm_vmregion_head { | ||
10 | spinlock_t vm_lock; | ||
11 | struct list_head vm_list; | ||
12 | unsigned long vm_start; | ||
13 | unsigned long vm_end; | ||
14 | }; | ||
15 | |||
16 | struct arm_vmregion { | ||
17 | struct list_head vm_list; | ||
18 | unsigned long vm_start; | ||
19 | unsigned long vm_end; | ||
20 | int vm_active; | ||
21 | const void *caller; | ||
22 | }; | ||
23 | |||
24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); | ||
25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); | ||
26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); | ||
27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); | ||
28 | |||
29 | int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); | ||
30 | |||
31 | #endif | ||