diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 16:21:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 16:21:09 -0500 |
commit | 2c2b8285dcd4d0674b6e77269cf32721fffea59e (patch) | |
tree | 47569549a3f1587c82401bd56a3422f553131389 /arch/arc | |
parent | 0921f1efb605d8fda43d794734222d1ad39c6840 (diff) | |
parent | 5a364c2a1762e8a78721fafc93144509c0b6cb84 (diff) |
Merge tag 'arc-4.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta:
- Support for new MM features in ARCv2 cores (THP, PAE40) Some generic
THP bits are touched - all ACKed by Kirill
- Platform framework updates to prepare for EZChip arrival (still in works)
- ARC Public Mailing list setup finally (linux-snps-arc@lists.infraded.org)
* tag 'arc-4.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (42 commits)
ARC: mm: PAE40 support
ARC: mm: PAE40: tlbex.S: Explicitify the size of pte_t
ARC: mm: PAE40: switch to using phys_addr_t for physical addresses
ARC: mm: HIGHMEM: populate high memory from DT
ARC: mm: HIGHMEM: kmap API implementation
ARC: mm: preps ahead of HIGHMEM support #2
ARC: mm: preps ahead of HIGHMEM support
ARC: mm: use generic macros _BITUL()/_AC()
ARC: mm: Improve Duplicate PD Fault handler
MAINTAINERS: Add public mailing list for ARC
ARC: Ensure DT mem base is same as what kernel is built with
ARC: boot: Non Master cpus only need to call EARLY_CPU_SETUP once
ARCv2: smp: [plat-*]: No need to explicitly call mcip_init_smp()
ARC: smp: Introduce smp hook @init_irq_cpu called for all cores
ARC: smp: Rename platform hook @init_smp -> @init_cpu_smp
ARCv2: smp: [plat-*]: No need to explicitly call mcip_init_early_smp()
ARC: smp: Introduce smp hook @init_early_smp for Master core
ARC: remove @init_time, @init_irq platform callbacks
ARC: smp: irqchip: handle IPI as percpu irq like timer
ARC: boot: Support Halt-on-reset and Run-on-reset SMP booting modes
...
Diffstat (limited to 'arch/arc')
47 files changed, 993 insertions, 417 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 78c0621d5819..2c2ac3f3ff80 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -76,6 +76,10 @@ config STACKTRACE_SUPPORT | |||
76 | config HAVE_LATENCYTOP_SUPPORT | 76 | config HAVE_LATENCYTOP_SUPPORT |
77 | def_bool y | 77 | def_bool y |
78 | 78 | ||
79 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE | ||
80 | def_bool y | ||
81 | depends on ARC_MMU_V4 | ||
82 | |||
79 | source "init/Kconfig" | 83 | source "init/Kconfig" |
80 | source "kernel/Kconfig.freezer" | 84 | source "kernel/Kconfig.freezer" |
81 | 85 | ||
@@ -190,6 +194,16 @@ config NR_CPUS | |||
190 | range 2 4096 | 194 | range 2 4096 |
191 | default "4" | 195 | default "4" |
192 | 196 | ||
197 | config ARC_SMP_HALT_ON_RESET | ||
198 | bool "Enable Halt-on-reset boot mode" | ||
199 | default y if ARC_UBOOT_SUPPORT | ||
200 | help | ||
201 | In SMP configuration cores can be configured as Halt-on-reset | ||
202 | or they could all start at same time. For Halt-on-reset, non | ||
203 | masters are parked until Master kicks them so they can start of | ||
204 | at designated entry point. For other case, all jump to common | ||
205 | entry point and spin wait for Master's signal. | ||
206 | |||
193 | endif #SMP | 207 | endif #SMP |
194 | 208 | ||
195 | menuconfig ARC_CACHE | 209 | menuconfig ARC_CACHE |
@@ -278,6 +292,8 @@ choice | |||
278 | default ARC_MMU_V2 if ARC_CPU_750D | 292 | default ARC_MMU_V2 if ARC_CPU_750D |
279 | default ARC_MMU_V4 if ARC_CPU_HS | 293 | default ARC_MMU_V4 if ARC_CPU_HS |
280 | 294 | ||
295 | if ISA_ARCOMPACT | ||
296 | |||
281 | config ARC_MMU_V1 | 297 | config ARC_MMU_V1 |
282 | bool "MMU v1" | 298 | bool "MMU v1" |
283 | help | 299 | help |
@@ -297,6 +313,8 @@ config ARC_MMU_V3 | |||
297 | Variable Page size (1k-16k), var JTLB size 128 x (2 or 4) | 313 | Variable Page size (1k-16k), var JTLB size 128 x (2 or 4) |
298 | Shared Address Spaces (SASID) | 314 | Shared Address Spaces (SASID) |
299 | 315 | ||
316 | endif | ||
317 | |||
300 | config ARC_MMU_V4 | 318 | config ARC_MMU_V4 |
301 | bool "MMU v4" | 319 | bool "MMU v4" |
302 | depends on ISA_ARCV2 | 320 | depends on ISA_ARCV2 |
@@ -428,6 +446,28 @@ config LINUX_LINK_BASE | |||
428 | Linux needs to be scooted a bit. | 446 | Linux needs to be scooted a bit. |
429 | If you don't know what the above means, leave this setting alone. | 447 | If you don't know what the above means, leave this setting alone. |
430 | 448 | ||
449 | config HIGHMEM | ||
450 | bool "High Memory Support" | ||
451 | help | ||
452 | With ARC 2G:2G address split, only upper 2G is directly addressable by | ||
453 | kernel. Enable this to potentially allow access to rest of 2G and PAE | ||
454 | in future | ||
455 | |||
456 | config ARC_HAS_PAE40 | ||
457 | bool "Support for the 40-bit Physical Address Extension" | ||
458 | default n | ||
459 | depends on ISA_ARCV2 | ||
460 | select HIGHMEM | ||
461 | help | ||
462 | Enable access to physical memory beyond 4G, only supported on | ||
463 | ARC cores with 40 bit Physical Addressing support | ||
464 | |||
465 | config ARCH_PHYS_ADDR_T_64BIT | ||
466 | def_bool ARC_HAS_PAE40 | ||
467 | |||
468 | config ARCH_DMA_ADDR_T_64BIT | ||
469 | bool | ||
470 | |||
431 | config ARC_CURR_IN_REG | 471 | config ARC_CURR_IN_REG |
432 | bool "Dedicate Register r25 for current_task pointer" | 472 | bool "Dedicate Register r25 for current_task pointer" |
433 | default y | 473 | default y |
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index a5e2726a067e..420dcfde289f 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi | |||
@@ -95,6 +95,6 @@ | |||
95 | #size-cells = <1>; | 95 | #size-cells = <1>; |
96 | ranges = <0x00000000 0x80000000 0x40000000>; | 96 | ranges = <0x00000000 0x80000000 0x40000000>; |
97 | device_type = "memory"; | 97 | device_type = "memory"; |
98 | reg = <0x00000000 0x20000000>; /* 512MiB */ | 98 | reg = <0x80000000 0x20000000>; /* 512MiB */ |
99 | }; | 99 | }; |
100 | }; | 100 | }; |
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index 846481f37eef..f90fadf7f94e 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi | |||
@@ -98,6 +98,6 @@ | |||
98 | #size-cells = <1>; | 98 | #size-cells = <1>; |
99 | ranges = <0x00000000 0x80000000 0x40000000>; | 99 | ranges = <0x00000000 0x80000000 0x40000000>; |
100 | device_type = "memory"; | 100 | device_type = "memory"; |
101 | reg = <0x00000000 0x20000000>; /* 512MiB */ | 101 | reg = <0x80000000 0x20000000>; /* 512MiB */ |
102 | }; | 102 | }; |
103 | }; | 103 | }; |
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 2f0b33257db2..06a9f294a2e6 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi | |||
@@ -121,6 +121,6 @@ | |||
121 | #size-cells = <1>; | 121 | #size-cells = <1>; |
122 | ranges = <0x00000000 0x80000000 0x40000000>; | 122 | ranges = <0x00000000 0x80000000 0x40000000>; |
123 | device_type = "memory"; | 123 | device_type = "memory"; |
124 | reg = <0x00000000 0x20000000>; /* 512MiB */ | 124 | reg = <0x80000000 0x20000000>; /* 512MiB */ |
125 | }; | 125 | }; |
126 | }; | 126 | }; |
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts index 911f069e0540..b0eb0e7fe21d 100644 --- a/arch/arc/boot/dts/nsim_hs.dts +++ b/arch/arc/boot/dts/nsim_hs.dts | |||
@@ -11,8 +11,16 @@ | |||
11 | 11 | ||
12 | / { | 12 | / { |
13 | compatible = "snps,nsim_hs"; | 13 | compatible = "snps,nsim_hs"; |
14 | #address-cells = <2>; | ||
15 | #size-cells = <2>; | ||
14 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
15 | 17 | ||
18 | memory { | ||
19 | device_type = "memory"; | ||
20 | reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ | ||
21 | 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ | ||
22 | }; | ||
23 | |||
16 | chosen { | 24 | chosen { |
17 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; | 25 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; |
18 | }; | 26 | }; |
@@ -26,8 +34,8 @@ | |||
26 | #address-cells = <1>; | 34 | #address-cells = <1>; |
27 | #size-cells = <1>; | 35 | #size-cells = <1>; |
28 | 36 | ||
29 | /* child and parent address space 1:1 mapped */ | 37 | /* only perip space at end of low mem accessible */ |
30 | ranges; | 38 | ranges = <0x80000000 0x0 0x80000000 0x80000000>; |
31 | 39 | ||
32 | core_intc: core-interrupt-controller { | 40 | core_intc: core-interrupt-controller { |
33 | compatible = "snps,archs-intc"; | 41 | compatible = "snps,archs-intc"; |
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi index a870bdd5e404..296d371a335c 100644 --- a/arch/arc/boot/dts/skeleton.dtsi +++ b/arch/arc/boot/dts/skeleton.dtsi | |||
@@ -32,6 +32,6 @@ | |||
32 | 32 | ||
33 | memory { | 33 | memory { |
34 | device_type = "memory"; | 34 | device_type = "memory"; |
35 | reg = <0x00000000 0x10000000>; /* 256M */ | 35 | reg = <0x80000000 0x10000000>; /* 256M */ |
36 | }; | 36 | }; |
37 | }; | 37 | }; |
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi index 9393fd902f0d..84226bd48baf 100644 --- a/arch/arc/boot/dts/vdk_axc003.dtsi +++ b/arch/arc/boot/dts/vdk_axc003.dtsi | |||
@@ -56,6 +56,6 @@ | |||
56 | #size-cells = <1>; | 56 | #size-cells = <1>; |
57 | ranges = <0x00000000 0x80000000 0x40000000>; | 57 | ranges = <0x00000000 0x80000000 0x40000000>; |
58 | device_type = "memory"; | 58 | device_type = "memory"; |
59 | reg = <0x00000000 0x20000000>; /* 512MiB */ | 59 | reg = <0x80000000 0x20000000>; /* 512MiB */ |
60 | }; | 60 | }; |
61 | }; | 61 | }; |
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi index 9bee8ed09eb0..31f0fb5fc91d 100644 --- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi +++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi | |||
@@ -71,6 +71,6 @@ | |||
71 | #size-cells = <1>; | 71 | #size-cells = <1>; |
72 | ranges = <0x00000000 0x80000000 0x40000000>; | 72 | ranges = <0x00000000 0x80000000 0x40000000>; |
73 | device_type = "memory"; | 73 | device_type = "memory"; |
74 | reg = <0x00000000 0x20000000>; /* 512MiB */ | 74 | reg = <0x80000000 0x20000000>; /* 512MiB */ |
75 | }; | 75 | }; |
76 | }; | 76 | }; |
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index d8023bc8d1ad..7fac7d85ed6a 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
@@ -120,7 +120,7 @@ | |||
120 | 120 | ||
121 | /* gcc builtin sr needs reg param to be long immediate */ | 121 | /* gcc builtin sr needs reg param to be long immediate */ |
122 | #define write_aux_reg(reg_immed, val) \ | 122 | #define write_aux_reg(reg_immed, val) \ |
123 | __builtin_arc_sr((unsigned int)val, reg_immed) | 123 | __builtin_arc_sr((unsigned int)(val), reg_immed) |
124 | 124 | ||
125 | #else | 125 | #else |
126 | 126 | ||
@@ -327,8 +327,8 @@ struct bcr_generic { | |||
327 | */ | 327 | */ |
328 | 328 | ||
329 | struct cpuinfo_arc_mmu { | 329 | struct cpuinfo_arc_mmu { |
330 | unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, u_dtlb:6, u_itlb:6; | 330 | unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1; |
331 | unsigned int num_tlb:16, sets:12, ways:4; | 331 | unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8; |
332 | }; | 332 | }; |
333 | 333 | ||
334 | struct cpuinfo_arc_cache { | 334 | struct cpuinfo_arc_cache { |
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index e23ea6e7633a..abf06e81c929 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h | |||
@@ -65,6 +65,7 @@ extern int ioc_exists; | |||
65 | #if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4) | 65 | #if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4) |
66 | #define ARC_REG_IC_PTAG 0x1E | 66 | #define ARC_REG_IC_PTAG 0x1E |
67 | #endif | 67 | #endif |
68 | #define ARC_REG_IC_PTAG_HI 0x1F | ||
68 | 69 | ||
69 | /* Bit val in IC_CTRL */ | 70 | /* Bit val in IC_CTRL */ |
70 | #define IC_CTRL_CACHE_DISABLE 0x1 | 71 | #define IC_CTRL_CACHE_DISABLE 0x1 |
@@ -77,6 +78,7 @@ extern int ioc_exists; | |||
77 | #define ARC_REG_DC_FLSH 0x4B | 78 | #define ARC_REG_DC_FLSH 0x4B |
78 | #define ARC_REG_DC_FLDL 0x4C | 79 | #define ARC_REG_DC_FLDL 0x4C |
79 | #define ARC_REG_DC_PTAG 0x5C | 80 | #define ARC_REG_DC_PTAG 0x5C |
81 | #define ARC_REG_DC_PTAG_HI 0x5F | ||
80 | 82 | ||
81 | /* Bit val in DC_CTRL */ | 83 | /* Bit val in DC_CTRL */ |
82 | #define DC_CTRL_INV_MODE_FLUSH 0x40 | 84 | #define DC_CTRL_INV_MODE_FLUSH 0x40 |
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index 0992d3dbcc65..fbe3587c4f36 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h | |||
@@ -31,10 +31,10 @@ | |||
31 | 31 | ||
32 | void flush_cache_all(void); | 32 | void flush_cache_all(void); |
33 | 33 | ||
34 | void flush_icache_range(unsigned long start, unsigned long end); | 34 | void flush_icache_range(unsigned long kstart, unsigned long kend); |
35 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); | 35 | void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len); |
36 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr); | 36 | void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr); |
37 | void __flush_dcache_page(unsigned long paddr, unsigned long vaddr); | 37 | void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); |
38 | 38 | ||
39 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | 39 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
40 | 40 | ||
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index 415443c2a8c4..1aff3be91075 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h | |||
@@ -110,13 +110,12 @@ | |||
110 | 110 | ||
111 | .macro FAKE_RET_FROM_EXCPN | 111 | .macro FAKE_RET_FROM_EXCPN |
112 | 112 | ||
113 | ld r9, [sp, PT_status32] | 113 | lr r9, [status32] |
114 | bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK) | 114 | bclr r9, r9, STATUS_AE_BIT |
115 | bset r9, r9, STATUS_L_BIT | 115 | or r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK) |
116 | sr r9, [erstatus] | 116 | sr r9, [erstatus] |
117 | mov r9, 55f | 117 | mov r9, 55f |
118 | sr r9, [eret] | 118 | sr r9, [eret] |
119 | |||
120 | rtie | 119 | rtie |
121 | 55: | 120 | 55: |
122 | .endm | 121 | .endm |
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h new file mode 100644 index 000000000000..b1585c96324a --- /dev/null +++ b/arch/arc/include/asm/highmem.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_HIGHMEM_H | ||
11 | #define _ASM_HIGHMEM_H | ||
12 | |||
13 | #ifdef CONFIG_HIGHMEM | ||
14 | |||
15 | #include <uapi/asm/page.h> | ||
16 | #include <asm/kmap_types.h> | ||
17 | |||
18 | /* start after vmalloc area */ | ||
19 | #define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE) | ||
20 | #define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */ | ||
21 | #define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS) | ||
22 | #define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
23 | |||
24 | /* start after fixmap area */ | ||
25 | #define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE) | ||
26 | #define PKMAP_SIZE PGDIR_SIZE | ||
27 | #define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT) | ||
28 | #define LAST_PKMAP_MASK (LAST_PKMAP - 1) | ||
29 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
30 | #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) | ||
31 | |||
32 | #define kmap_prot PAGE_KERNEL | ||
33 | |||
34 | |||
35 | #include <asm/cacheflush.h> | ||
36 | |||
37 | extern void *kmap(struct page *page); | ||
38 | extern void *kmap_high(struct page *page); | ||
39 | extern void *kmap_atomic(struct page *page); | ||
40 | extern void __kunmap_atomic(void *kvaddr); | ||
41 | extern void kunmap_high(struct page *page); | ||
42 | |||
43 | extern void kmap_init(void); | ||
44 | |||
45 | static inline void flush_cache_kmaps(void) | ||
46 | { | ||
47 | flush_cache_all(); | ||
48 | } | ||
49 | |||
50 | static inline void kunmap(struct page *page) | ||
51 | { | ||
52 | BUG_ON(in_interrupt()); | ||
53 | if (!PageHighMem(page)) | ||
54 | return; | ||
55 | kunmap_high(page); | ||
56 | } | ||
57 | |||
58 | |||
59 | #endif | ||
60 | |||
61 | #endif | ||
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h new file mode 100644 index 000000000000..c5094de86403 --- /dev/null +++ b/arch/arc/include/asm/hugepage.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | |||
10 | #ifndef _ASM_ARC_HUGEPAGE_H | ||
11 | #define _ASM_ARC_HUGEPAGE_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <asm-generic/pgtable-nopmd.h> | ||
15 | |||
16 | static inline pte_t pmd_pte(pmd_t pmd) | ||
17 | { | ||
18 | return __pte(pmd_val(pmd)); | ||
19 | } | ||
20 | |||
21 | static inline pmd_t pte_pmd(pte_t pte) | ||
22 | { | ||
23 | return __pmd(pte_val(pte)); | ||
24 | } | ||
25 | |||
26 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | ||
27 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | ||
28 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | ||
29 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | ||
30 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | ||
31 | #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd))) | ||
32 | #define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd))) | ||
33 | #define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd))) | ||
34 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) | ||
35 | |||
36 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | ||
37 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | ||
38 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | ||
39 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | ||
40 | #define pmd_special(pmd) pte_special(pmd_pte(pmd)) | ||
41 | |||
42 | #define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot)) | ||
43 | |||
44 | #define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ) | ||
45 | #define pmd_trans_splitting(pmd) (pmd_trans_huge(pmd) && pmd_special(pmd)) | ||
46 | |||
47 | #define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
48 | |||
49 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
50 | { | ||
51 | /* | ||
52 | * open-coded pte_modify() with additional retaining of HW_SZ bit | ||
53 | * so that pmd_trans_huge() remains true for this PMD | ||
54 | */ | ||
55 | return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot)); | ||
56 | } | ||
57 | |||
58 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
59 | pmd_t *pmdp, pmd_t pmd) | ||
60 | { | ||
61 | *pmdp = pmd; | ||
62 | } | ||
63 | |||
64 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
65 | pmd_t *pmd); | ||
66 | |||
67 | #define has_transparent_hugepage() 1 | ||
68 | |||
69 | /* Generic variants assume pgtable_t is struct page *, hence need for these */ | ||
70 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
71 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | ||
72 | pgtable_t pgtable); | ||
73 | |||
74 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
75 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | ||
76 | |||
77 | #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE | ||
78 | extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
79 | unsigned long end); | ||
80 | |||
81 | #endif | ||
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index bc5103637326..4fd7d62a6e30 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #ifdef CONFIG_ISA_ARCOMPACT | 16 | #ifdef CONFIG_ISA_ARCOMPACT |
17 | #define TIMER0_IRQ 3 | 17 | #define TIMER0_IRQ 3 |
18 | #define TIMER1_IRQ 4 | 18 | #define TIMER1_IRQ 4 |
19 | #define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */ | ||
19 | #else | 20 | #else |
20 | #define TIMER0_IRQ 16 | 21 | #define TIMER0_IRQ 16 |
21 | #define TIMER1_IRQ 17 | 22 | #define TIMER1_IRQ 17 |
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h index aa805575c320..d8c608174617 100644 --- a/arch/arc/include/asm/irqflags-compact.h +++ b/arch/arc/include/asm/irqflags-compact.h | |||
@@ -23,11 +23,13 @@ | |||
23 | #define STATUS_E2_BIT 2 /* Int 2 enable */ | 23 | #define STATUS_E2_BIT 2 /* Int 2 enable */ |
24 | #define STATUS_A1_BIT 3 /* Int 1 active */ | 24 | #define STATUS_A1_BIT 3 /* Int 1 active */ |
25 | #define STATUS_A2_BIT 4 /* Int 2 active */ | 25 | #define STATUS_A2_BIT 4 /* Int 2 active */ |
26 | #define STATUS_AE_BIT 5 /* Exception active */ | ||
26 | 27 | ||
27 | #define STATUS_E1_MASK (1<<STATUS_E1_BIT) | 28 | #define STATUS_E1_MASK (1<<STATUS_E1_BIT) |
28 | #define STATUS_E2_MASK (1<<STATUS_E2_BIT) | 29 | #define STATUS_E2_MASK (1<<STATUS_E2_BIT) |
29 | #define STATUS_A1_MASK (1<<STATUS_A1_BIT) | 30 | #define STATUS_A1_MASK (1<<STATUS_A1_BIT) |
30 | #define STATUS_A2_MASK (1<<STATUS_A2_BIT) | 31 | #define STATUS_A2_MASK (1<<STATUS_A2_BIT) |
32 | #define STATUS_AE_MASK (1<<STATUS_AE_BIT) | ||
31 | #define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK) | 33 | #define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK) |
32 | 34 | ||
33 | /* Other Interrupt Handling related Aux regs */ | 35 | /* Other Interrupt Handling related Aux regs */ |
@@ -91,7 +93,19 @@ static inline void arch_local_irq_restore(unsigned long flags) | |||
91 | /* | 93 | /* |
92 | * Unconditionally Enable IRQs | 94 | * Unconditionally Enable IRQs |
93 | */ | 95 | */ |
94 | extern void arch_local_irq_enable(void); | 96 | static inline void arch_local_irq_enable(void) |
97 | { | ||
98 | unsigned long temp; | ||
99 | |||
100 | __asm__ __volatile__( | ||
101 | " lr %0, [status32] \n" | ||
102 | " or %0, %0, %1 \n" | ||
103 | " flag %0 \n" | ||
104 | : "=&r"(temp) | ||
105 | : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) | ||
106 | : "cc", "memory"); | ||
107 | } | ||
108 | |||
95 | 109 | ||
96 | /* | 110 | /* |
97 | * Unconditionally Disable IRQs | 111 | * Unconditionally Disable IRQs |
diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h new file mode 100644 index 000000000000..f0d7f6acea4e --- /dev/null +++ b/arch/arc/include/asm/kmap_types.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_KMAP_TYPES_H | ||
11 | #define _ASM_KMAP_TYPES_H | ||
12 | |||
13 | /* | ||
14 | * We primarily need to define KM_TYPE_NR here but that in turn | ||
15 | * is a function of PGDIR_SIZE etc. | ||
16 | * To avoid circular deps issue, put everything in asm/highmem.h | ||
17 | */ | ||
18 | #endif | ||
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index e8993a2be6c2..6ff657a904b6 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h | |||
@@ -23,11 +23,8 @@ | |||
23 | * @dt_compat: Array of device tree 'compatible' strings | 23 | * @dt_compat: Array of device tree 'compatible' strings |
24 | * (XXX: although only 1st entry is looked at) | 24 | * (XXX: although only 1st entry is looked at) |
25 | * @init_early: Very early callback [called from setup_arch()] | 25 | * @init_early: Very early callback [called from setup_arch()] |
26 | * @init_irq: setup external IRQ controllers [called from init_IRQ()] | 26 | * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP) |
27 | * @init_smp: for each CPU (e.g. setup IPI) | ||
28 | * [(M):init_IRQ(), (o):start_kernel_secondary()] | 27 | * [(M):init_IRQ(), (o):start_kernel_secondary()] |
29 | * @init_time: platform specific clocksource/clockevent registration | ||
30 | * [called from time_init()] | ||
31 | * @init_machine: arch initcall level callback (e.g. populate static | 28 | * @init_machine: arch initcall level callback (e.g. populate static |
32 | * platform devices or parse Devicetree) | 29 | * platform devices or parse Devicetree) |
33 | * @init_late: Late initcall level callback | 30 | * @init_late: Late initcall level callback |
@@ -36,13 +33,10 @@ | |||
36 | struct machine_desc { | 33 | struct machine_desc { |
37 | const char *name; | 34 | const char *name; |
38 | const char **dt_compat; | 35 | const char **dt_compat; |
39 | |||
40 | void (*init_early)(void); | 36 | void (*init_early)(void); |
41 | void (*init_irq)(void); | ||
42 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
43 | void (*init_smp)(unsigned int); | 38 | void (*init_cpu_smp)(unsigned int); |
44 | #endif | 39 | #endif |
45 | void (*init_time)(void); | ||
46 | void (*init_machine)(void); | 40 | void (*init_machine)(void); |
47 | void (*init_late)(void); | 41 | void (*init_late)(void); |
48 | 42 | ||
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h index 52c11f0bb0e5..46f4e5351b2a 100644 --- a/arch/arc/include/asm/mcip.h +++ b/arch/arc/include/asm/mcip.h | |||
@@ -86,9 +86,6 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param, | |||
86 | __mcip_cmd(cmd, param); | 86 | __mcip_cmd(cmd, param); |
87 | } | 87 | } |
88 | 88 | ||
89 | extern void mcip_init_early_smp(void); | ||
90 | extern void mcip_init_smp(unsigned int cpu); | ||
91 | |||
92 | #endif | 89 | #endif |
93 | 90 | ||
94 | #endif | 91 | #endif |
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index 0f9c3eb5327e..b144d7ca7d20 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #if (CONFIG_ARC_MMU_VER < 4) | 24 | #if (CONFIG_ARC_MMU_VER < 4) |
25 | #define ARC_REG_TLBPD0 0x405 | 25 | #define ARC_REG_TLBPD0 0x405 |
26 | #define ARC_REG_TLBPD1 0x406 | 26 | #define ARC_REG_TLBPD1 0x406 |
27 | #define ARC_REG_TLBPD1HI 0 /* Dummy: allows code sharing with ARC700 */ | ||
27 | #define ARC_REG_TLBINDEX 0x407 | 28 | #define ARC_REG_TLBINDEX 0x407 |
28 | #define ARC_REG_TLBCOMMAND 0x408 | 29 | #define ARC_REG_TLBCOMMAND 0x408 |
29 | #define ARC_REG_PID 0x409 | 30 | #define ARC_REG_PID 0x409 |
@@ -31,6 +32,7 @@ | |||
31 | #else | 32 | #else |
32 | #define ARC_REG_TLBPD0 0x460 | 33 | #define ARC_REG_TLBPD0 0x460 |
33 | #define ARC_REG_TLBPD1 0x461 | 34 | #define ARC_REG_TLBPD1 0x461 |
35 | #define ARC_REG_TLBPD1HI 0x463 | ||
34 | #define ARC_REG_TLBINDEX 0x464 | 36 | #define ARC_REG_TLBINDEX 0x464 |
35 | #define ARC_REG_TLBCOMMAND 0x465 | 37 | #define ARC_REG_TLBCOMMAND 0x465 |
36 | #define ARC_REG_PID 0x468 | 38 | #define ARC_REG_PID 0x468 |
@@ -83,6 +85,11 @@ void arc_mmu_init(void); | |||
83 | extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); | 85 | extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); |
84 | void read_decode_mmu_bcr(void); | 86 | void read_decode_mmu_bcr(void); |
85 | 87 | ||
88 | static inline int is_pae40_enabled(void) | ||
89 | { | ||
90 | return IS_ENABLED(CONFIG_ARC_HAS_PAE40); | ||
91 | } | ||
92 | |||
86 | #endif /* !__ASSEMBLY__ */ | 93 | #endif /* !__ASSEMBLY__ */ |
87 | 94 | ||
88 | #endif | 95 | #endif |
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 9c8aa41e45c2..429957f1c236 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h | |||
@@ -43,7 +43,6 @@ typedef struct { | |||
43 | typedef struct { | 43 | typedef struct { |
44 | unsigned long pgprot; | 44 | unsigned long pgprot; |
45 | } pgprot_t; | 45 | } pgprot_t; |
46 | typedef unsigned long pgtable_t; | ||
47 | 46 | ||
48 | #define pte_val(x) ((x).pte) | 47 | #define pte_val(x) ((x).pte) |
49 | #define pgd_val(x) ((x).pgd) | 48 | #define pgd_val(x) ((x).pgd) |
@@ -57,20 +56,26 @@ typedef unsigned long pgtable_t; | |||
57 | 56 | ||
58 | #else /* !STRICT_MM_TYPECHECKS */ | 57 | #else /* !STRICT_MM_TYPECHECKS */ |
59 | 58 | ||
59 | #ifdef CONFIG_ARC_HAS_PAE40 | ||
60 | typedef unsigned long long pte_t; | ||
61 | #else | ||
60 | typedef unsigned long pte_t; | 62 | typedef unsigned long pte_t; |
63 | #endif | ||
61 | typedef unsigned long pgd_t; | 64 | typedef unsigned long pgd_t; |
62 | typedef unsigned long pgprot_t; | 65 | typedef unsigned long pgprot_t; |
63 | typedef unsigned long pgtable_t; | ||
64 | 66 | ||
65 | #define pte_val(x) (x) | 67 | #define pte_val(x) (x) |
66 | #define pgd_val(x) (x) | 68 | #define pgd_val(x) (x) |
67 | #define pgprot_val(x) (x) | 69 | #define pgprot_val(x) (x) |
68 | #define __pte(x) (x) | 70 | #define __pte(x) (x) |
71 | #define __pgd(x) (x) | ||
69 | #define __pgprot(x) (x) | 72 | #define __pgprot(x) (x) |
70 | #define pte_pgprot(x) (x) | 73 | #define pte_pgprot(x) (x) |
71 | 74 | ||
72 | #endif | 75 | #endif |
73 | 76 | ||
77 | typedef pte_t * pgtable_t; | ||
78 | |||
74 | #define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT) | 79 | #define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT) |
75 | 80 | ||
76 | #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) | 81 | #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) |
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 81208bfd9dcb..86ed671286df 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h | |||
@@ -49,7 +49,7 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) | |||
49 | 49 | ||
50 | static inline int __get_order_pgd(void) | 50 | static inline int __get_order_pgd(void) |
51 | { | 51 | { |
52 | return get_order(PTRS_PER_PGD * 4); | 52 | return get_order(PTRS_PER_PGD * sizeof(pgd_t)); |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 55 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
@@ -87,7 +87,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
87 | 87 | ||
88 | static inline int __get_order_pte(void) | 88 | static inline int __get_order_pte(void) |
89 | { | 89 | { |
90 | return get_order(PTRS_PER_PTE * 4); | 90 | return get_order(PTRS_PER_PTE * sizeof(pte_t)); |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 93 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
@@ -107,10 +107,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
107 | pgtable_t pte_pg; | 107 | pgtable_t pte_pg; |
108 | struct page *page; | 108 | struct page *page; |
109 | 109 | ||
110 | pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); | 110 | pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); |
111 | if (!pte_pg) | 111 | if (!pte_pg) |
112 | return 0; | 112 | return 0; |
113 | memzero((void *)pte_pg, PTRS_PER_PTE * 4); | 113 | memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); |
114 | page = virt_to_page(pte_pg); | 114 | page = virt_to_page(pte_pg); |
115 | if (!pgtable_page_ctor(page)) { | 115 | if (!pgtable_page_ctor(page)) { |
116 | __free_page(page); | 116 | __free_page(page); |
@@ -128,12 +128,12 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |||
128 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) | 128 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) |
129 | { | 129 | { |
130 | pgtable_page_dtor(virt_to_page(ptep)); | 130 | pgtable_page_dtor(virt_to_page(ptep)); |
131 | free_pages(ptep, __get_order_pte()); | 131 | free_pages((unsigned long)ptep, __get_order_pte()); |
132 | } | 132 | } |
133 | 133 | ||
134 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) | 134 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) |
135 | 135 | ||
136 | #define check_pgt_cache() do { } while (0) | 136 | #define check_pgt_cache() do { } while (0) |
137 | #define pmd_pgtable(pmd) pmd_page_vaddr(pmd) | 137 | #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) |
138 | 138 | ||
139 | #endif /* _ASM_ARC_PGALLOC_H */ | 139 | #endif /* _ASM_ARC_PGALLOC_H */ |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 1281718802f7..57af2f05ae84 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/page.h> | 38 | #include <asm/page.h> |
39 | #include <asm/mmu.h> | 39 | #include <asm/mmu.h> |
40 | #include <asm-generic/pgtable-nopmd.h> | 40 | #include <asm-generic/pgtable-nopmd.h> |
41 | #include <linux/const.h> | ||
41 | 42 | ||
42 | /************************************************************************** | 43 | /************************************************************************** |
43 | * Page Table Flags | 44 | * Page Table Flags |
@@ -60,7 +61,8 @@ | |||
60 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ | 61 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
61 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ | 62 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
62 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ | 63 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
63 | #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ | 64 | #define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */ |
65 | #define _PAGE_SPECIAL (1<<7) | ||
64 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ | 66 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
65 | #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ | 67 | #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ |
66 | 68 | ||
@@ -71,7 +73,8 @@ | |||
71 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ | 73 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
72 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ | 74 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
73 | #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ | 75 | #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ |
74 | #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ | 76 | #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */ |
77 | #define _PAGE_SPECIAL (1<<6) | ||
75 | 78 | ||
76 | #if (CONFIG_ARC_MMU_VER >= 4) | 79 | #if (CONFIG_ARC_MMU_VER >= 4) |
77 | #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */ | 80 | #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */ |
@@ -81,32 +84,33 @@ | |||
81 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ | 84 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ |
82 | 85 | ||
83 | #if (CONFIG_ARC_MMU_VER >= 4) | 86 | #if (CONFIG_ARC_MMU_VER >= 4) |
84 | #define _PAGE_SZ (1<<10) /* Page Size indicator (H) */ | 87 | #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */ |
85 | #endif | 88 | #endif |
86 | 89 | ||
87 | #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr | 90 | #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr |
88 | usable for shared TLB entries (H) */ | 91 | usable for shared TLB entries (H) */ |
92 | |||
93 | #define _PAGE_UNUSED_BIT (1<<12) | ||
89 | #endif | 94 | #endif |
90 | 95 | ||
91 | /* vmalloc permissions */ | 96 | /* vmalloc permissions */ |
92 | #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ | 97 | #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ |
93 | _PAGE_GLOBAL | _PAGE_PRESENT) | 98 | _PAGE_GLOBAL | _PAGE_PRESENT) |
94 | 99 | ||
95 | #ifdef CONFIG_ARC_CACHE_PAGES | 100 | #ifndef CONFIG_ARC_CACHE_PAGES |
96 | #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE | 101 | #undef _PAGE_CACHEABLE |
97 | #else | 102 | #define _PAGE_CACHEABLE 0 |
98 | #define _PAGE_DEF_CACHEABLE (0) | ||
99 | #endif | 103 | #endif |
100 | 104 | ||
101 | /* Helper for every "user" page | 105 | #ifndef _PAGE_HW_SZ |
102 | * -kernel can R/W/X | 106 | #define _PAGE_HW_SZ 0 |
103 | * -by default cached, unless config otherwise | 107 | #endif |
104 | * -present in memory | 108 | |
105 | */ | 109 | /* Defaults for every user page */ |
106 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) | 110 | #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) |
107 | 111 | ||
108 | /* Set of bits not changed in pte_modify */ | 112 | /* Set of bits not changed in pte_modify */ |
109 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) | 113 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
110 | 114 | ||
111 | /* More Abbrevaited helpers */ | 115 | /* More Abbrevaited helpers */ |
112 | #define PAGE_U_NONE __pgprot(___DEF) | 116 | #define PAGE_U_NONE __pgprot(___DEF) |
@@ -122,15 +126,20 @@ | |||
122 | * user vaddr space - visible in all addr spaces, but kernel mode only | 126 | * user vaddr space - visible in all addr spaces, but kernel mode only |
123 | * Thus Global, all-kernel-access, no-user-access, cached | 127 | * Thus Global, all-kernel-access, no-user-access, cached |
124 | */ | 128 | */ |
125 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) | 129 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE) |
126 | 130 | ||
127 | /* ioremap */ | 131 | /* ioremap */ |
128 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) | 132 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) |
129 | 133 | ||
130 | /* Masks for actual TLB "PD"s */ | 134 | /* Masks for actual TLB "PD"s */ |
131 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) | 135 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ) |
132 | #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) | 136 | #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) |
137 | |||
138 | #ifdef CONFIG_ARC_HAS_PAE40 | ||
139 | #define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE) | ||
140 | #else | ||
133 | #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) | 141 | #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) |
142 | #endif | ||
134 | 143 | ||
135 | /************************************************************************** | 144 | /************************************************************************** |
136 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) | 145 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) |
@@ -191,26 +200,22 @@ | |||
191 | 200 | ||
192 | /* Optimal Sizing of Pg Tbl - based on MMU page size */ | 201 | /* Optimal Sizing of Pg Tbl - based on MMU page size */ |
193 | #if defined(CONFIG_ARC_PAGE_SIZE_8K) | 202 | #if defined(CONFIG_ARC_PAGE_SIZE_8K) |
194 | #define BITS_FOR_PTE 8 | 203 | #define BITS_FOR_PTE 8 /* 11:8:13 */ |
195 | #elif defined(CONFIG_ARC_PAGE_SIZE_16K) | 204 | #elif defined(CONFIG_ARC_PAGE_SIZE_16K) |
196 | #define BITS_FOR_PTE 8 | 205 | #define BITS_FOR_PTE 8 /* 10:8:14 */ |
197 | #elif defined(CONFIG_ARC_PAGE_SIZE_4K) | 206 | #elif defined(CONFIG_ARC_PAGE_SIZE_4K) |
198 | #define BITS_FOR_PTE 9 | 207 | #define BITS_FOR_PTE 9 /* 11:9:12 */ |
199 | #endif | 208 | #endif |
200 | 209 | ||
201 | #define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) | 210 | #define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) |
202 | 211 | ||
203 | #define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE) | 212 | #define PGDIR_SHIFT (32 - BITS_FOR_PGD) |
204 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ | 213 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ |
205 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 214 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
206 | 215 | ||
207 | #ifdef __ASSEMBLY__ | 216 | #define PTRS_PER_PTE _BITUL(BITS_FOR_PTE) |
208 | #define PTRS_PER_PTE (1 << BITS_FOR_PTE) | 217 | #define PTRS_PER_PGD _BITUL(BITS_FOR_PGD) |
209 | #define PTRS_PER_PGD (1 << BITS_FOR_PGD) | 218 | |
210 | #else | ||
211 | #define PTRS_PER_PTE (1UL << BITS_FOR_PTE) | ||
212 | #define PTRS_PER_PGD (1UL << BITS_FOR_PGD) | ||
213 | #endif | ||
214 | /* | 219 | /* |
215 | * Number of entries a user land program use. | 220 | * Number of entries a user land program use. |
216 | * TASK_SIZE is the maximum vaddr that can be used by a userland program. | 221 | * TASK_SIZE is the maximum vaddr that can be used by a userland program. |
@@ -270,15 +275,10 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) | |||
270 | (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ | 275 | (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ |
271 | PAGE_SHIFT))) | 276 | PAGE_SHIFT))) |
272 | 277 | ||
273 | #define mk_pte(page, pgprot) \ | 278 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
274 | ({ \ | ||
275 | pte_t pte; \ | ||
276 | pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \ | ||
277 | pte; \ | ||
278 | }) | ||
279 | |||
280 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | 279 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
281 | #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) | 280 | #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \ |
281 | pgprot_val(prot))) | ||
282 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 282 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
283 | 283 | ||
284 | /* | 284 | /* |
@@ -295,23 +295,26 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) | |||
295 | /* Zoo of pte_xxx function */ | 295 | /* Zoo of pte_xxx function */ |
296 | #define pte_read(pte) (pte_val(pte) & _PAGE_READ) | 296 | #define pte_read(pte) (pte_val(pte) & _PAGE_READ) |
297 | #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) | 297 | #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) |
298 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) | 298 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) |
299 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) | 299 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) |
300 | #define pte_special(pte) (0) | 300 | #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL) |
301 | 301 | ||
302 | #define PTE_BIT_FUNC(fn, op) \ | 302 | #define PTE_BIT_FUNC(fn, op) \ |
303 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | 303 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
304 | 304 | ||
305 | PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT)); | ||
305 | PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); | 306 | PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); |
306 | PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); | 307 | PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); |
307 | PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED)); | 308 | PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); |
308 | PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED)); | 309 | PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY)); |
309 | PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); | 310 | PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); |
310 | PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); | 311 | PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); |
311 | PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); | 312 | PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); |
312 | PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); | 313 | PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); |
314 | PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); | ||
315 | PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ)); | ||
313 | 316 | ||
314 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | 317 | #define __HAVE_ARCH_PTE_SPECIAL |
315 | 318 | ||
316 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 319 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
317 | { | 320 | { |
@@ -357,7 +360,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
357 | #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) | 360 | #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) |
358 | #endif | 361 | #endif |
359 | 362 | ||
360 | extern void paging_init(void); | ||
361 | extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); | 363 | extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); |
362 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | 364 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
363 | pte_t *ptep); | 365 | pte_t *ptep); |
@@ -383,6 +385,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
383 | * remap a physical page `pfn' of size `size' with page protection `prot' | 385 | * remap a physical page `pfn' of size `size' with page protection `prot' |
384 | * into virtual address `from' | 386 | * into virtual address `from' |
385 | */ | 387 | */ |
388 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
389 | #include <asm/hugepage.h> | ||
390 | #endif | ||
391 | |||
386 | #include <asm-generic/pgtable.h> | 392 | #include <asm-generic/pgtable.h> |
387 | 393 | ||
388 | /* to cope with aliasing VIPT cache */ | 394 | /* to cope with aliasing VIPT cache */ |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index ee682d8e0213..44545354e9e8 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -114,7 +114,12 @@ extern unsigned int get_wchan(struct task_struct *p); | |||
114 | * ----------------------------------------------------------------------------- | 114 | * ----------------------------------------------------------------------------- |
115 | */ | 115 | */ |
116 | #define VMALLOC_START 0x70000000 | 116 | #define VMALLOC_START 0x70000000 |
117 | #define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START) | 117 | |
118 | /* | ||
119 | * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter | ||
120 | * See asm/highmem.h for details | ||
121 | */ | ||
122 | #define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4) | ||
118 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | 123 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) |
119 | 124 | ||
120 | #define USER_KERNEL_GUTTER 0x10000000 | 125 | #define USER_KERNEL_GUTTER 0x10000000 |
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index 6e3ef5ba4f74..307846691be6 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h | |||
@@ -33,4 +33,11 @@ extern int root_mountflags, end_mem; | |||
33 | void setup_processor(void); | 33 | void setup_processor(void); |
34 | void __init setup_arch_memory(void); | 34 | void __init setup_arch_memory(void); |
35 | 35 | ||
36 | /* Helpers used in arc_*_mumbojumbo routines */ | ||
37 | #define IS_AVAIL1(v, s) ((v) ? s : "") | ||
38 | #define IS_DISABLED_RUN(v) ((v) ? "" : "(disabled) ") | ||
39 | #define IS_USED_RUN(v) ((v) ? "" : "(not used) ") | ||
40 | #define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) | ||
41 | #define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) | ||
42 | |||
36 | #endif /* __ASMARC_SETUP_H */ | 43 | #endif /* __ASMARC_SETUP_H */ |
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 3845b9e94f69..133c867d15af 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h | |||
@@ -45,12 +45,19 @@ extern int smp_ipi_irq_setup(int cpu, int irq); | |||
45 | * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP | 45 | * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP |
46 | * | 46 | * |
47 | * @info: SoC SMP specific info for /proc/cpuinfo etc | 47 | * @info: SoC SMP specific info for /proc/cpuinfo etc |
48 | * @init_early_smp: A SMP specific h/w block can init itself | ||
49 | * Could be common across platforms so not covered by | ||
50 | * mach_desc->init_early() | ||
51 | * @init_irq_cpu: Called for each core so SMP h/w block driver can do | ||
52 | * any needed setup per cpu (e.g. IPI request) | ||
48 | * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) | 53 | * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) |
49 | * @ipi_send: To send IPI to a @cpu | 54 | * @ipi_send: To send IPI to a @cpu |
50 | * @ips_clear: To clear IPI received at @irq | 55 | * @ips_clear: To clear IPI received at @irq |
51 | */ | 56 | */ |
52 | struct plat_smp_ops { | 57 | struct plat_smp_ops { |
53 | const char *info; | 58 | const char *info; |
59 | void (*init_early_smp)(void); | ||
60 | void (*init_irq_cpu)(int cpu); | ||
54 | void (*cpu_kick)(int cpu, unsigned long pc); | 61 | void (*cpu_kick)(int cpu, unsigned long pc); |
55 | void (*ipi_send)(int cpu); | 62 | void (*ipi_send)(int cpu); |
56 | void (*ipi_clear)(int irq); | 63 | void (*ipi_clear)(int irq); |
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h index 71c7b2e4b874..1fe9c8c80280 100644 --- a/arch/arc/include/asm/tlbflush.h +++ b/arch/arc/include/asm/tlbflush.h | |||
@@ -17,6 +17,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | |||
17 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); | 17 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); |
18 | void local_flush_tlb_range(struct vm_area_struct *vma, | 18 | void local_flush_tlb_range(struct vm_area_struct *vma, |
19 | unsigned long start, unsigned long end); | 19 | unsigned long start, unsigned long end); |
20 | void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
21 | unsigned long end); | ||
20 | 22 | ||
21 | #ifndef CONFIG_SMP | 23 | #ifndef CONFIG_SMP |
22 | #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) | 24 | #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) |
@@ -24,6 +26,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, | |||
24 | #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) | 26 | #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) |
25 | #define flush_tlb_all() local_flush_tlb_all() | 27 | #define flush_tlb_all() local_flush_tlb_all() |
26 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) | 28 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) |
29 | #define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e) | ||
27 | #else | 30 | #else |
28 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 31 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
29 | unsigned long end); | 32 | unsigned long end); |
@@ -31,5 +34,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); | |||
31 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 34 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
32 | extern void flush_tlb_all(void); | 35 | extern void flush_tlb_all(void); |
33 | extern void flush_tlb_mm(struct mm_struct *mm); | 36 | extern void flush_tlb_mm(struct mm_struct *mm); |
37 | extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
38 | |||
34 | #endif /* CONFIG_SMP */ | 39 | #endif /* CONFIG_SMP */ |
35 | #endif | 40 | #endif |
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h index 9d129a2a1351..059aff38f10a 100644 --- a/arch/arc/include/uapi/asm/page.h +++ b/arch/arc/include/uapi/asm/page.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #ifndef _UAPI__ASM_ARC_PAGE_H | 9 | #ifndef _UAPI__ASM_ARC_PAGE_H |
10 | #define _UAPI__ASM_ARC_PAGE_H | 10 | #define _UAPI__ASM_ARC_PAGE_H |
11 | 11 | ||
12 | #include <linux/const.h> | ||
13 | |||
12 | /* PAGE_SHIFT determines the page size */ | 14 | /* PAGE_SHIFT determines the page size */ |
13 | #if defined(CONFIG_ARC_PAGE_SIZE_16K) | 15 | #if defined(CONFIG_ARC_PAGE_SIZE_16K) |
14 | #define PAGE_SHIFT 14 | 16 | #define PAGE_SHIFT 14 |
@@ -25,13 +27,8 @@ | |||
25 | #define PAGE_SHIFT 13 | 27 | #define PAGE_SHIFT 13 |
26 | #endif | 28 | #endif |
27 | 29 | ||
28 | #ifdef __ASSEMBLY__ | 30 | #define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */ |
29 | #define PAGE_SIZE (1 << PAGE_SHIFT) | 31 | #define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */ |
30 | #define PAGE_OFFSET (0x80000000) | ||
31 | #else | ||
32 | #define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */ | ||
33 | #define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */ | ||
34 | #endif | ||
35 | 32 | ||
36 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 33 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
37 | 34 | ||
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index 8fa76567e402..445e63a10754 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S | |||
@@ -24,7 +24,7 @@ | |||
24 | .align 4 | 24 | .align 4 |
25 | 25 | ||
26 | # Initial 16 slots are Exception Vectors | 26 | # Initial 16 slots are Exception Vectors |
27 | VECTOR stext ; Restart Vector (jump to entry point) | 27 | VECTOR res_service ; Reset Vector |
28 | VECTOR mem_service ; Mem exception | 28 | VECTOR mem_service ; Mem exception |
29 | VECTOR instr_service ; Instrn Error | 29 | VECTOR instr_service ; Instrn Error |
30 | VECTOR EV_MachineCheck ; Fatal Machine check | 30 | VECTOR EV_MachineCheck ; Fatal Machine check |
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 15d457b4403a..59f52035b4ea 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S | |||
@@ -86,7 +86,7 @@ | |||
86 | */ | 86 | */ |
87 | 87 | ||
88 | ; ********* Critical System Events ********************** | 88 | ; ********* Critical System Events ********************** |
89 | VECTOR res_service ; 0x0, Restart Vector (0x0) | 89 | VECTOR res_service ; 0x0, Reset Vector (0x0) |
90 | VECTOR mem_service ; 0x8, Mem exception (0x1) | 90 | VECTOR mem_service ; 0x8, Mem exception (0x1) |
91 | VECTOR instr_service ; 0x10, Instrn Error (0x2) | 91 | VECTOR instr_service ; 0x10, Instrn Error (0x2) |
92 | 92 | ||
@@ -155,13 +155,9 @@ int2_saved_reg: | |||
155 | ; --------------------------------------------- | 155 | ; --------------------------------------------- |
156 | .section .text, "ax",@progbits | 156 | .section .text, "ax",@progbits |
157 | 157 | ||
158 | res_service: ; processor restart | ||
159 | flag 0x1 ; not implemented | ||
160 | nop | ||
161 | nop | ||
162 | 158 | ||
163 | reserved: ; processor restart | 159 | reserved: |
164 | rtie ; jump to processor initializations | 160 | flag 1 ; Unexpected event, halt |
165 | 161 | ||
166 | ;##################### Interrupt Handling ############################## | 162 | ;##################### Interrupt Handling ############################## |
167 | 163 | ||
@@ -175,12 +171,25 @@ ENTRY(handle_interrupt_level2) | |||
175 | 171 | ||
176 | ;------------------------------------------------------ | 172 | ;------------------------------------------------------ |
177 | ; if L2 IRQ interrupted a L1 ISR, disable preemption | 173 | ; if L2 IRQ interrupted a L1 ISR, disable preemption |
174 | ; | ||
175 | ; This is to avoid a potential L1-L2-L1 scenario | ||
176 | ; -L1 IRQ taken | ||
177 | ; -L2 interrupts L1 (before L1 ISR could run) | ||
178 | ; -preemption off IRQ, user task in syscall picked to run | ||
179 | ; -RTIE to userspace | ||
180 | ; Returns from L2 context fine | ||
181 | ; But both L1 and L2 re-enabled, so another L1 can be taken | ||
182 | ; while prev L1 is still unserviced | ||
183 | ; | ||
178 | ;------------------------------------------------------ | 184 | ;------------------------------------------------------ |
179 | 185 | ||
186 | ; L2 interrupting L1 implies both L2 and L1 active | ||
187 | ; However both A2 and A1 are NOT set in STATUS32, thus | ||
188 | ; need to check STATUS32_L2 to determine if L1 was active | ||
189 | |||
180 | ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) | 190 | ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) |
181 | bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal | 191 | bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal |
182 | 192 | ||
183 | ; A1 is set in status32_l2 | ||
184 | ; bump thread_info->preempt_count (Disable preemption) | 193 | ; bump thread_info->preempt_count (Disable preemption) |
185 | GET_CURR_THR_INFO_FROM_SP r10 | 194 | GET_CURR_THR_INFO_FROM_SP r10 |
186 | ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] | 195 | ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] |
@@ -320,11 +329,10 @@ END(call_do_page_fault) | |||
320 | ; Note that we use realtime STATUS32 (not pt_regs->status32) to | 329 | ; Note that we use realtime STATUS32 (not pt_regs->status32) to |
321 | ; decide that. | 330 | ; decide that. |
322 | 331 | ||
323 | ; if Returning from Exception | 332 | and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK) |
324 | btst r10, STATUS_AE_BIT | 333 | bz .Lexcep_or_pure_K_ret |
325 | bnz .Lexcep_ret | ||
326 | 334 | ||
327 | ; Not Exception so maybe Interrupts (Level 1 or 2) | 335 | ; Returning from Interrupts (Level 1 or 2) |
328 | 336 | ||
329 | #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS | 337 | #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS |
330 | 338 | ||
@@ -365,8 +373,7 @@ END(call_do_page_fault) | |||
365 | st r9, [r10, THREAD_INFO_PREEMPT_COUNT] | 373 | st r9, [r10, THREAD_INFO_PREEMPT_COUNT] |
366 | 374 | ||
367 | 149: | 375 | 149: |
368 | ;return from level 2 | 376 | INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt |
369 | INTERRUPT_EPILOGUE 2 | ||
370 | debug_marker_l2: | 377 | debug_marker_l2: |
371 | rtie | 378 | rtie |
372 | 379 | ||
@@ -374,15 +381,11 @@ not_level2_interrupt: | |||
374 | 381 | ||
375 | #endif | 382 | #endif |
376 | 383 | ||
377 | bbit0 r10, STATUS_A1_BIT, .Lpure_k_mode_ret | 384 | INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt |
378 | |||
379 | ;return from level 1 | ||
380 | INTERRUPT_EPILOGUE 1 | ||
381 | debug_marker_l1: | 385 | debug_marker_l1: |
382 | rtie | 386 | rtie |
383 | 387 | ||
384 | .Lexcep_ret: | 388 | .Lexcep_or_pure_K_ret: |
385 | .Lpure_k_mode_ret: | ||
386 | 389 | ||
387 | ;this case is for syscalls or Exceptions or pure kernel mode | 390 | ;this case is for syscalls or Exceptions or pure kernel mode |
388 | 391 | ||
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 812f95e6ae69..689dd867fdff 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S | |||
@@ -50,28 +50,37 @@ | |||
50 | .endm | 50 | .endm |
51 | 51 | ||
52 | .section .init.text, "ax",@progbits | 52 | .section .init.text, "ax",@progbits |
53 | .type stext, @function | 53 | |
54 | .globl stext | 54 | ;---------------------------------------------------------------- |
55 | stext: | 55 | ; Default Reset Handler (jumped into from Reset vector) |
56 | ;------------------------------------------------------------------- | 56 | ; - Don't clobber r0,r1,r2 as they might have u-boot provided args |
57 | ; Don't clobber r0-r2 yet. It might have bootloader provided info | 57 | ; - Platforms can override this weak version if needed |
58 | ;------------------------------------------------------------------- | 58 | ;---------------------------------------------------------------- |
59 | WEAK(res_service) | ||
60 | j stext | ||
61 | END(res_service) | ||
62 | |||
63 | ;---------------------------------------------------------------- | ||
64 | ; Kernel Entry point | ||
65 | ;---------------------------------------------------------------- | ||
66 | ENTRY(stext) | ||
59 | 67 | ||
60 | CPU_EARLY_SETUP | 68 | CPU_EARLY_SETUP |
61 | 69 | ||
62 | #ifdef CONFIG_SMP | 70 | #ifdef CONFIG_SMP |
63 | ; Ensure Boot (Master) proceeds. Others wait in platform dependent way | ||
64 | ; IDENTITY Reg [ 3 2 1 0 ] | ||
65 | ; (cpu-id) ^^^ => Zero for UP ARC700 | ||
66 | ; => #Core-ID if SMP (Master 0) | ||
67 | ; Note that non-boot CPUs might not land here if halt-on-reset and | ||
68 | ; instead breath life from @first_lines_of_secondary, but we still | ||
69 | ; need to make sure only boot cpu takes this path. | ||
70 | GET_CPU_ID r5 | 71 | GET_CPU_ID r5 |
71 | cmp r5, 0 | 72 | cmp r5, 0 |
72 | mov.ne r0, r5 | 73 | mov.nz r0, r5 |
73 | jne arc_platform_smp_wait_to_boot | 74 | #ifdef CONFIG_ARC_SMP_HALT_ON_RESET |
75 | ; Non-Master can proceed as system would be booted sufficiently | ||
76 | jnz first_lines_of_secondary | ||
77 | #else | ||
78 | ; Non-Masters wait for Master to boot enough and bring them up | ||
79 | jnz arc_platform_smp_wait_to_boot | ||
74 | #endif | 80 | #endif |
81 | ; Master falls thru | ||
82 | #endif | ||
83 | |||
75 | ; Clear BSS before updating any globals | 84 | ; Clear BSS before updating any globals |
76 | ; XXX: use ZOL here | 85 | ; XXX: use ZOL here |
77 | mov r5, __bss_start | 86 | mov r5, __bss_start |
@@ -102,18 +111,14 @@ stext: | |||
102 | GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) | 111 | GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) |
103 | 112 | ||
104 | j start_kernel ; "C" entry point | 113 | j start_kernel ; "C" entry point |
114 | END(stext) | ||
105 | 115 | ||
106 | #ifdef CONFIG_SMP | 116 | #ifdef CONFIG_SMP |
107 | ;---------------------------------------------------------------- | 117 | ;---------------------------------------------------------------- |
108 | ; First lines of code run by secondary before jumping to 'C' | 118 | ; First lines of code run by secondary before jumping to 'C' |
109 | ;---------------------------------------------------------------- | 119 | ;---------------------------------------------------------------- |
110 | .section .text, "ax",@progbits | 120 | .section .text, "ax",@progbits |
111 | .type first_lines_of_secondary, @function | 121 | ENTRY(first_lines_of_secondary) |
112 | .globl first_lines_of_secondary | ||
113 | |||
114 | first_lines_of_secondary: | ||
115 | |||
116 | CPU_EARLY_SETUP | ||
117 | 122 | ||
118 | ; setup per-cpu idle task as "current" on this CPU | 123 | ; setup per-cpu idle task as "current" on this CPU |
119 | ld r0, [@secondary_idle_tsk] | 124 | ld r0, [@secondary_idle_tsk] |
@@ -126,5 +131,5 @@ first_lines_of_secondary: | |||
126 | GET_TSK_STACK_BASE r0, sp | 131 | GET_TSK_STACK_BASE r0, sp |
127 | 132 | ||
128 | j start_kernel_secondary | 133 | j start_kernel_secondary |
129 | 134 | END(first_lines_of_secondary) | |
130 | #endif | 135 | #endif |
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index 039fac30b5c1..06bcedf19b62 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c | |||
@@ -79,17 +79,16 @@ static struct irq_chip onchip_intc = { | |||
79 | static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, | 79 | static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, |
80 | irq_hw_number_t hw) | 80 | irq_hw_number_t hw) |
81 | { | 81 | { |
82 | /* | 82 | switch (irq) { |
83 | * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core | 83 | case TIMER0_IRQ: |
84 | * code doesn't own it (like TIMER0). ISS IDU / ezchip define it | 84 | #ifdef CONFIG_SMP |
85 | * in platform header which can't be included here as it goes | 85 | case IPI_IRQ: |
86 | * against multi-platform image philisophy | 86 | #endif |
87 | */ | ||
88 | if (irq == TIMER0_IRQ) | ||
89 | irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); | 87 | irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); |
90 | else | 88 | break; |
89 | default: | ||
91 | irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); | 90 | irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); |
92 | 91 | } | |
93 | return 0; | 92 | return 0; |
94 | } | 93 | } |
95 | 94 | ||
@@ -148,78 +147,15 @@ IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); | |||
148 | 147 | ||
149 | void arch_local_irq_enable(void) | 148 | void arch_local_irq_enable(void) |
150 | { | 149 | { |
151 | |||
152 | unsigned long flags = arch_local_save_flags(); | 150 | unsigned long flags = arch_local_save_flags(); |
153 | 151 | ||
154 | /* Allow both L1 and L2 at the onset */ | 152 | if (flags & STATUS_A2_MASK) |
155 | flags |= (STATUS_E1_MASK | STATUS_E2_MASK); | 153 | flags |= STATUS_E2_MASK; |
156 | 154 | else if (flags & STATUS_A1_MASK) | |
157 | /* Called from hard ISR (between irq_enter and irq_exit) */ | 155 | flags |= STATUS_E1_MASK; |
158 | if (in_irq()) { | ||
159 | |||
160 | /* If in L2 ISR, don't re-enable any further IRQs as this can | ||
161 | * cause IRQ priorities to get upside down. e.g. it could allow | ||
162 | * L1 be taken while in L2 hard ISR which is wrong not only in | ||
163 | * theory, it can also cause the dreaded L1-L2-L1 scenario | ||
164 | */ | ||
165 | if (flags & STATUS_A2_MASK) | ||
166 | flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); | ||
167 | |||
168 | /* Even if in L1 ISR, allowe Higher prio L2 IRQs */ | ||
169 | else if (flags & STATUS_A1_MASK) | ||
170 | flags &= ~(STATUS_E1_MASK); | ||
171 | } | ||
172 | |||
173 | /* called from soft IRQ, ideally we want to re-enable all levels */ | ||
174 | |||
175 | else if (in_softirq()) { | ||
176 | |||
177 | /* However if this is case of L1 interrupted by L2, | ||
178 | * re-enabling both may cause whaco L1-L2-L1 scenario | ||
179 | * because ARC700 allows level 1 to interrupt an active L2 ISR | ||
180 | * Thus we disable both | ||
181 | * However some code, executing in soft ISR wants some IRQs | ||
182 | * to be enabled so we re-enable L2 only | ||
183 | * | ||
184 | * How do we determine L1 intr by L2 | ||
185 | * -A2 is set (means in L2 ISR) | ||
186 | * -E1 is set in this ISR's pt_regs->status32 which is | ||
187 | * saved copy of status32_l2 when l2 ISR happened | ||
188 | */ | ||
189 | struct pt_regs *pt = get_irq_regs(); | ||
190 | |||
191 | if ((flags & STATUS_A2_MASK) && pt && | ||
192 | (pt->status32 & STATUS_A1_MASK)) { | ||
193 | /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */ | ||
194 | flags &= ~(STATUS_E1_MASK); | ||
195 | } | ||
196 | } | ||
197 | 156 | ||
198 | arch_local_irq_restore(flags); | 157 | arch_local_irq_restore(flags); |
199 | } | 158 | } |
200 | 159 | ||
201 | #else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */ | ||
202 | |||
203 | /* | ||
204 | * Simpler version for only 1 level of interrupt | ||
205 | * Here we only Worry about Level 1 Bits | ||
206 | */ | ||
207 | void arch_local_irq_enable(void) | ||
208 | { | ||
209 | unsigned long flags; | ||
210 | |||
211 | /* | ||
212 | * ARC IDE Drivers tries to re-enable interrupts from hard-isr | ||
213 | * context which is simply wrong | ||
214 | */ | ||
215 | if (in_irq()) { | ||
216 | WARN_ONCE(1, "IRQ enabled from hard-isr"); | ||
217 | return; | ||
218 | } | ||
219 | |||
220 | flags = arch_local_save_flags(); | ||
221 | flags |= (STATUS_E1_MASK | STATUS_E2_MASK); | ||
222 | arch_local_irq_restore(flags); | ||
223 | } | ||
224 | #endif | ||
225 | EXPORT_SYMBOL(arch_local_irq_enable); | 160 | EXPORT_SYMBOL(arch_local_irq_enable); |
161 | #endif | ||
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 2989a7bcf8a8..2ee226546c6a 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/irqchip.h> | 11 | #include <linux/irqchip.h> |
12 | #include <asm/mach_desc.h> | 12 | #include <asm/mach_desc.h> |
13 | #include <asm/smp.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * Late Interrupt system init called from start_kernel for Boot CPU only | 16 | * Late Interrupt system init called from start_kernel for Boot CPU only |
@@ -19,17 +20,20 @@ | |||
19 | */ | 20 | */ |
20 | void __init init_IRQ(void) | 21 | void __init init_IRQ(void) |
21 | { | 22 | { |
22 | /* Any external intc can be setup here */ | 23 | /* |
23 | if (machine_desc->init_irq) | 24 | * process the entire interrupt tree in one go |
24 | machine_desc->init_irq(); | 25 | * Any external intc will be setup provided DT chains them |
25 | 26 | * properly | |
26 | /* process the entire interrupt tree in one go */ | 27 | */ |
27 | irqchip_init(); | 28 | irqchip_init(); |
28 | 29 | ||
29 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
30 | /* Master CPU can initialize it's side of IPI */ | 31 | /* a SMP H/w block could do IPI IRQ request here */ |
31 | if (machine_desc->init_smp) | 32 | if (plat_smp_ops.init_irq_cpu) |
32 | machine_desc->init_smp(smp_processor_id()); | 33 | plat_smp_ops.init_irq_cpu(smp_processor_id()); |
34 | |||
35 | if (machine_desc->init_cpu_smp) | ||
36 | machine_desc->init_cpu_smp(smp_processor_id()); | ||
33 | #endif | 37 | #endif |
34 | } | 38 | } |
35 | 39 | ||
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 4ffd1855f1bd..74a9b074ac3e 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -12,20 +12,14 @@ | |||
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <asm/mcip.h> | 14 | #include <asm/mcip.h> |
15 | #include <asm/setup.h> | ||
15 | 16 | ||
16 | static char smp_cpuinfo_buf[128]; | 17 | static char smp_cpuinfo_buf[128]; |
17 | static int idu_detected; | 18 | static int idu_detected; |
18 | 19 | ||
19 | static DEFINE_RAW_SPINLOCK(mcip_lock); | 20 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
20 | 21 | ||
21 | /* | 22 | static void mcip_setup_per_cpu(int cpu) |
22 | * Any SMP specific init any CPU does when it comes up. | ||
23 | * Here we setup the CPU to enable Inter-Processor-Interrupts | ||
24 | * Called for each CPU | ||
25 | * -Master : init_IRQ() | ||
26 | * -Other(s) : start_kernel_secondary() | ||
27 | */ | ||
28 | void mcip_init_smp(unsigned int cpu) | ||
29 | { | 23 | { |
30 | smp_ipi_irq_setup(cpu, IPI_IRQ); | 24 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
31 | } | 25 | } |
@@ -96,34 +90,8 @@ static void mcip_ipi_clear(int irq) | |||
96 | #endif | 90 | #endif |
97 | } | 91 | } |
98 | 92 | ||
99 | volatile int wake_flag; | 93 | static void mcip_probe_n_setup(void) |
100 | |||
101 | static void mcip_wakeup_cpu(int cpu, unsigned long pc) | ||
102 | { | ||
103 | BUG_ON(cpu == 0); | ||
104 | wake_flag = cpu; | ||
105 | } | ||
106 | |||
107 | void arc_platform_smp_wait_to_boot(int cpu) | ||
108 | { | 94 | { |
109 | while (wake_flag != cpu) | ||
110 | ; | ||
111 | |||
112 | wake_flag = 0; | ||
113 | __asm__ __volatile__("j @first_lines_of_secondary \n"); | ||
114 | } | ||
115 | |||
116 | struct plat_smp_ops plat_smp_ops = { | ||
117 | .info = smp_cpuinfo_buf, | ||
118 | .cpu_kick = mcip_wakeup_cpu, | ||
119 | .ipi_send = mcip_ipi_send, | ||
120 | .ipi_clear = mcip_ipi_clear, | ||
121 | }; | ||
122 | |||
123 | void mcip_init_early_smp(void) | ||
124 | { | ||
125 | #define IS_AVAIL1(var, str) ((var) ? str : "") | ||
126 | |||
127 | struct mcip_bcr { | 95 | struct mcip_bcr { |
128 | #ifdef CONFIG_CPU_BIG_ENDIAN | 96 | #ifdef CONFIG_CPU_BIG_ENDIAN |
129 | unsigned int pad3:8, | 97 | unsigned int pad3:8, |
@@ -161,6 +129,14 @@ void mcip_init_early_smp(void) | |||
161 | panic("kernel trying to use non-existent GRTC\n"); | 129 | panic("kernel trying to use non-existent GRTC\n"); |
162 | } | 130 | } |
163 | 131 | ||
132 | struct plat_smp_ops plat_smp_ops = { | ||
133 | .info = smp_cpuinfo_buf, | ||
134 | .init_early_smp = mcip_probe_n_setup, | ||
135 | .init_irq_cpu = mcip_setup_per_cpu, | ||
136 | .ipi_send = mcip_ipi_send, | ||
137 | .ipi_clear = mcip_ipi_clear, | ||
138 | }; | ||
139 | |||
164 | /*************************************************************************** | 140 | /*************************************************************************** |
165 | * ARCv2 Interrupt Distribution Unit (IDU) | 141 | * ARCv2 Interrupt Distribution Unit (IDU) |
166 | * | 142 | * |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index cabde9dc0696..c33e77c0ad3e 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -160,10 +160,6 @@ static const struct cpuinfo_data arc_cpu_tbl[] = { | |||
160 | { {0x00, NULL } } | 160 | { {0x00, NULL } } |
161 | }; | 161 | }; |
162 | 162 | ||
163 | #define IS_AVAIL1(v, s) ((v) ? s : "") | ||
164 | #define IS_USED_RUN(v) ((v) ? "" : "(not used) ") | ||
165 | #define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) | ||
166 | #define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) | ||
167 | 163 | ||
168 | static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) | 164 | static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) |
169 | { | 165 | { |
@@ -415,8 +411,9 @@ void __init setup_arch(char **cmdline_p) | |||
415 | if (machine_desc->init_early) | 411 | if (machine_desc->init_early) |
416 | machine_desc->init_early(); | 412 | machine_desc->init_early(); |
417 | 413 | ||
418 | setup_processor(); | ||
419 | smp_init_cpus(); | 414 | smp_init_cpus(); |
415 | |||
416 | setup_processor(); | ||
420 | setup_arch_memory(); | 417 | setup_arch_memory(); |
421 | 418 | ||
422 | /* copy flat DT out of .init and then unflatten it */ | 419 | /* copy flat DT out of .init and then unflatten it */ |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index be13d12420ba..580587805fa3 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -42,8 +42,13 @@ void __init smp_prepare_boot_cpu(void) | |||
42 | } | 42 | } |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Initialise the CPU possible map early - this describes the CPUs | 45 | * Called from setup_arch() before calling setup_processor() |
46 | * which may be present or become present in the system. | 46 | * |
47 | * - Initialise the CPU possible map early - this describes the CPUs | ||
48 | * which may be present or become present in the system. | ||
49 | * - Call early smp init hook. This can initialize a specific multi-core | ||
50 | * IP which is say common to several platforms (hence not part of | ||
51 | * platform specific int_early() hook) | ||
47 | */ | 52 | */ |
48 | void __init smp_init_cpus(void) | 53 | void __init smp_init_cpus(void) |
49 | { | 54 | { |
@@ -51,6 +56,9 @@ void __init smp_init_cpus(void) | |||
51 | 56 | ||
52 | for (i = 0; i < NR_CPUS; i++) | 57 | for (i = 0; i < NR_CPUS; i++) |
53 | set_cpu_possible(i, true); | 58 | set_cpu_possible(i, true); |
59 | |||
60 | if (plat_smp_ops.init_early_smp) | ||
61 | plat_smp_ops.init_early_smp(); | ||
54 | } | 62 | } |
55 | 63 | ||
56 | /* called from init ( ) => process 1 */ | 64 | /* called from init ( ) => process 1 */ |
@@ -72,35 +80,29 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
72 | } | 80 | } |
73 | 81 | ||
74 | /* | 82 | /* |
75 | * After power-up, a non Master CPU needs to wait for Master to kick start it | 83 | * Default smp boot helper for Run-on-reset case where all cores start off |
76 | * | 84 | * together. Non-masters need to wait for Master to start running. |
77 | * The default implementation halts | 85 | * This is implemented using a flag in memory, which Non-masters spin-wait on. |
78 | * | 86 | * Master sets it to cpu-id of core to "ungate" it. |
79 | * This relies on platform specific support allowing Master to directly set | ||
80 | * this CPU's PC (to be @first_lines_of_secondary() and kick start it. | ||
81 | * | ||
82 | * In lack of such h/w assist, platforms can override this function | ||
83 | * - make this function busy-spin on a token, eventually set by Master | ||
84 | * (from arc_platform_smp_wakeup_cpu()) | ||
85 | * - Once token is available, jump to @first_lines_of_secondary | ||
86 | * (using inline asm). | ||
87 | * | ||
88 | * Alert: can NOT use stack here as it has not been determined/setup for CPU. | ||
89 | * If it turns out to be elaborate, it's better to code it in assembly | ||
90 | * | ||
91 | */ | 87 | */ |
92 | void __weak arc_platform_smp_wait_to_boot(int cpu) | 88 | static volatile int wake_flag; |
89 | |||
90 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) | ||
93 | { | 91 | { |
94 | /* | 92 | BUG_ON(cpu == 0); |
95 | * As a hack for debugging - since debugger will single-step over the | 93 | wake_flag = cpu; |
96 | * FLAG insn - wrap the halt itself it in a self loop | 94 | } |
97 | */ | 95 | |
98 | __asm__ __volatile__( | 96 | void arc_platform_smp_wait_to_boot(int cpu) |
99 | "1: \n" | 97 | { |
100 | " flag 1 \n" | 98 | while (wake_flag != cpu) |
101 | " b 1b \n"); | 99 | ; |
100 | |||
101 | wake_flag = 0; | ||
102 | __asm__ __volatile__("j @first_lines_of_secondary \n"); | ||
102 | } | 103 | } |
103 | 104 | ||
105 | |||
104 | const char *arc_platform_smp_cpuinfo(void) | 106 | const char *arc_platform_smp_cpuinfo(void) |
105 | { | 107 | { |
106 | return plat_smp_ops.info ? : ""; | 108 | return plat_smp_ops.info ? : ""; |
@@ -129,8 +131,12 @@ void start_kernel_secondary(void) | |||
129 | 131 | ||
130 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | 132 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); |
131 | 133 | ||
132 | if (machine_desc->init_smp) | 134 | /* Some SMP H/w setup - for each cpu */ |
133 | machine_desc->init_smp(cpu); | 135 | if (plat_smp_ops.init_irq_cpu) |
136 | plat_smp_ops.init_irq_cpu(cpu); | ||
137 | |||
138 | if (machine_desc->init_cpu_smp) | ||
139 | machine_desc->init_cpu_smp(cpu); | ||
134 | 140 | ||
135 | arc_local_timer_setup(); | 141 | arc_local_timer_setup(); |
136 | 142 | ||
@@ -161,6 +167,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
161 | if (plat_smp_ops.cpu_kick) | 167 | if (plat_smp_ops.cpu_kick) |
162 | plat_smp_ops.cpu_kick(cpu, | 168 | plat_smp_ops.cpu_kick(cpu, |
163 | (unsigned long)first_lines_of_secondary); | 169 | (unsigned long)first_lines_of_secondary); |
170 | else | ||
171 | arc_default_smp_cpu_kick(cpu, (unsigned long)NULL); | ||
164 | 172 | ||
165 | /* wait for 1 sec after kicking the secondary */ | 173 | /* wait for 1 sec after kicking the secondary */ |
166 | wait_till = jiffies + HZ; | 174 | wait_till = jiffies + HZ; |
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 4294761a2b3e..dfad287f1db1 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c | |||
@@ -285,7 +285,4 @@ void __init time_init(void) | |||
285 | 285 | ||
286 | /* sets up the periodic event timer */ | 286 | /* sets up the periodic event timer */ |
287 | arc_local_timer_setup(); | 287 | arc_local_timer_setup(); |
288 | |||
289 | if (machine_desc->init_time) | ||
290 | machine_desc->init_time(); | ||
291 | } | 288 | } |
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index dd35bde39f69..894e696bddaa 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/thread_info.h> | 12 | #include <asm/thread_info.h> |
13 | 13 | ||
14 | OUTPUT_ARCH(arc) | 14 | OUTPUT_ARCH(arc) |
15 | ENTRY(_stext) | 15 | ENTRY(res_service) |
16 | 16 | ||
17 | #ifdef CONFIG_CPU_BIG_ENDIAN | 17 | #ifdef CONFIG_CPU_BIG_ENDIAN |
18 | jiffies = jiffies_64 + 4; | 18 | jiffies = jiffies_64 + 4; |
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile index 7beb941556c3..3703a4969349 100644 --- a/arch/arc/mm/Makefile +++ b/arch/arc/mm/Makefile | |||
@@ -8,3 +8,4 @@ | |||
8 | 8 | ||
9 | obj-y := extable.o ioremap.o dma.o fault.o init.o | 9 | obj-y := extable.o ioremap.o dma.o fault.o init.o |
10 | obj-y += tlb.o tlbex.o cache.o mmap.o | 10 | obj-y += tlb.o tlbex.o cache.o mmap.o |
11 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 0d1a6e96839f..ff7ff6cbb811 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
@@ -25,7 +25,7 @@ static int l2_line_sz; | |||
25 | int ioc_exists; | 25 | int ioc_exists; |
26 | volatile int slc_enable = 1, ioc_enable = 1; | 26 | volatile int slc_enable = 1, ioc_enable = 1; |
27 | 27 | ||
28 | void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, | 28 | void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, |
29 | unsigned long sz, const int cacheop); | 29 | unsigned long sz, const int cacheop); |
30 | 30 | ||
31 | void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); | 31 | void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); |
@@ -37,7 +37,6 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) | |||
37 | int n = 0; | 37 | int n = 0; |
38 | struct cpuinfo_arc_cache *p; | 38 | struct cpuinfo_arc_cache *p; |
39 | 39 | ||
40 | #define IS_USED_RUN(v) ((v) ? "" : "(disabled) ") | ||
41 | #define PR_CACHE(p, cfg, str) \ | 40 | #define PR_CACHE(p, cfg, str) \ |
42 | if (!(p)->ver) \ | 41 | if (!(p)->ver) \ |
43 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ | 42 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ |
@@ -47,7 +46,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) | |||
47 | (p)->sz_k, (p)->assoc, (p)->line_len, \ | 46 | (p)->sz_k, (p)->assoc, (p)->line_len, \ |
48 | (p)->vipt ? "VIPT" : "PIPT", \ | 47 | (p)->vipt ? "VIPT" : "PIPT", \ |
49 | (p)->alias ? " aliasing" : "", \ | 48 | (p)->alias ? " aliasing" : "", \ |
50 | IS_ENABLED(cfg) ? "" : " (not used)"); | 49 | IS_USED_CFG(cfg)); |
51 | 50 | ||
52 | PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); | 51 | PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); |
53 | PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); | 52 | PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); |
@@ -63,7 +62,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) | |||
63 | 62 | ||
64 | if (ioc_exists) | 63 | if (ioc_exists) |
65 | n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", | 64 | n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", |
66 | IS_USED_RUN(ioc_enable)); | 65 | IS_DISABLED_RUN(ioc_enable)); |
67 | 66 | ||
68 | return buf; | 67 | return buf; |
69 | } | 68 | } |
@@ -217,7 +216,7 @@ slc_chk: | |||
217 | */ | 216 | */ |
218 | 217 | ||
219 | static inline | 218 | static inline |
220 | void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, | 219 | void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr, |
221 | unsigned long sz, const int op) | 220 | unsigned long sz, const int op) |
222 | { | 221 | { |
223 | unsigned int aux_cmd; | 222 | unsigned int aux_cmd; |
@@ -254,8 +253,12 @@ void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, | |||
254 | } | 253 | } |
255 | } | 254 | } |
256 | 255 | ||
256 | /* | ||
257 | * For ARC700 MMUv3 I-cache and D-cache flushes | ||
258 | * Also reused for HS38 aliasing I-cache configuration | ||
259 | */ | ||
257 | static inline | 260 | static inline |
258 | void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, | 261 | void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, |
259 | unsigned long sz, const int op) | 262 | unsigned long sz, const int op) |
260 | { | 263 | { |
261 | unsigned int aux_cmd, aux_tag; | 264 | unsigned int aux_cmd, aux_tag; |
@@ -290,6 +293,16 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, | |||
290 | if (full_page) | 293 | if (full_page) |
291 | write_aux_reg(aux_tag, paddr); | 294 | write_aux_reg(aux_tag, paddr); |
292 | 295 | ||
296 | /* | ||
297 | * This is technically for MMU v4, using the MMU v3 programming model | ||
298 | * Special work for HS38 aliasing I-cache configuratino with PAE40 | ||
299 | * - upper 8 bits of paddr need to be written into PTAG_HI | ||
300 | * - (and needs to be written before the lower 32 bits) | ||
301 | * Note that PTAG_HI is hoisted outside the line loop | ||
302 | */ | ||
303 | if (is_pae40_enabled() && op == OP_INV_IC) | ||
304 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); | ||
305 | |||
293 | while (num_lines-- > 0) { | 306 | while (num_lines-- > 0) { |
294 | if (!full_page) { | 307 | if (!full_page) { |
295 | write_aux_reg(aux_tag, paddr); | 308 | write_aux_reg(aux_tag, paddr); |
@@ -302,14 +315,20 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, | |||
302 | } | 315 | } |
303 | 316 | ||
304 | /* | 317 | /* |
305 | * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache | 318 | * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT |
306 | * maintenance ops (in IVIL reg), as long as icache doesn't alias. | 319 | * Here's how cache ops are implemented |
320 | * | ||
321 | * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL) | ||
322 | * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL) | ||
323 | * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG | ||
324 | * respectively, similar to MMU v3 programming model, hence | ||
325 | * __cache_line_loop_v3() is used) | ||
307 | * | 326 | * |
308 | * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is | 327 | * If PAE40 is enabled, independent of aliasing considerations, the higher bits |
309 | * specified in PTAG (similar to MMU v3) | 328 | * needs to be written into PTAG_HI |
310 | */ | 329 | */ |
311 | static inline | 330 | static inline |
312 | void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr, | 331 | void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, |
313 | unsigned long sz, const int cacheop) | 332 | unsigned long sz, const int cacheop) |
314 | { | 333 | { |
315 | unsigned int aux_cmd; | 334 | unsigned int aux_cmd; |
@@ -336,6 +355,22 @@ void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr, | |||
336 | 355 | ||
337 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); | 356 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
338 | 357 | ||
358 | /* | ||
359 | * For HS38 PAE40 configuration | ||
360 | * - upper 8 bits of paddr need to be written into PTAG_HI | ||
361 | * - (and needs to be written before the lower 32 bits) | ||
362 | */ | ||
363 | if (is_pae40_enabled()) { | ||
364 | if (cacheop == OP_INV_IC) | ||
365 | /* | ||
366 | * Non aliasing I-cache in HS38, | ||
367 | * aliasing I-cache handled in __cache_line_loop_v3() | ||
368 | */ | ||
369 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); | ||
370 | else | ||
371 | write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); | ||
372 | } | ||
373 | |||
339 | while (num_lines-- > 0) { | 374 | while (num_lines-- > 0) { |
340 | write_aux_reg(aux_cmd, paddr); | 375 | write_aux_reg(aux_cmd, paddr); |
341 | paddr += L1_CACHE_BYTES; | 376 | paddr += L1_CACHE_BYTES; |
@@ -413,7 +448,7 @@ static inline void __dc_entire_op(const int op) | |||
413 | /* | 448 | /* |
414 | * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) | 449 | * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) |
415 | */ | 450 | */ |
416 | static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, | 451 | static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, |
417 | unsigned long sz, const int op) | 452 | unsigned long sz, const int op) |
418 | { | 453 | { |
419 | unsigned long flags; | 454 | unsigned long flags; |
@@ -446,7 +481,7 @@ static inline void __ic_entire_inv(void) | |||
446 | } | 481 | } |
447 | 482 | ||
448 | static inline void | 483 | static inline void |
449 | __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, | 484 | __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr, |
450 | unsigned long sz) | 485 | unsigned long sz) |
451 | { | 486 | { |
452 | unsigned long flags; | 487 | unsigned long flags; |
@@ -463,7 +498,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, | |||
463 | #else | 498 | #else |
464 | 499 | ||
465 | struct ic_inv_args { | 500 | struct ic_inv_args { |
466 | unsigned long paddr, vaddr; | 501 | phys_addr_t paddr, vaddr; |
467 | int sz; | 502 | int sz; |
468 | }; | 503 | }; |
469 | 504 | ||
@@ -474,7 +509,7 @@ static void __ic_line_inv_vaddr_helper(void *info) | |||
474 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); | 509 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
475 | } | 510 | } |
476 | 511 | ||
477 | static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, | 512 | static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, |
478 | unsigned long sz) | 513 | unsigned long sz) |
479 | { | 514 | { |
480 | struct ic_inv_args ic_inv = { | 515 | struct ic_inv_args ic_inv = { |
@@ -495,7 +530,7 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, | |||
495 | 530 | ||
496 | #endif /* CONFIG_ARC_HAS_ICACHE */ | 531 | #endif /* CONFIG_ARC_HAS_ICACHE */ |
497 | 532 | ||
498 | noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) | 533 | noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) |
499 | { | 534 | { |
500 | #ifdef CONFIG_ISA_ARCV2 | 535 | #ifdef CONFIG_ISA_ARCV2 |
501 | /* | 536 | /* |
@@ -585,7 +620,7 @@ void flush_dcache_page(struct page *page) | |||
585 | } else if (page_mapped(page)) { | 620 | } else if (page_mapped(page)) { |
586 | 621 | ||
587 | /* kernel reading from page with U-mapping */ | 622 | /* kernel reading from page with U-mapping */ |
588 | unsigned long paddr = (unsigned long)page_address(page); | 623 | phys_addr_t paddr = (unsigned long)page_address(page); |
589 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; | 624 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; |
590 | 625 | ||
591 | if (addr_not_cache_congruent(paddr, vaddr)) | 626 | if (addr_not_cache_congruent(paddr, vaddr)) |
@@ -733,14 +768,14 @@ EXPORT_SYMBOL(flush_icache_range); | |||
733 | * builtin kernel page will not have any virtual mappings. | 768 | * builtin kernel page will not have any virtual mappings. |
734 | * kprobe on loadable module will be kernel vaddr. | 769 | * kprobe on loadable module will be kernel vaddr. |
735 | */ | 770 | */ |
736 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) | 771 | void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) |
737 | { | 772 | { |
738 | __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); | 773 | __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); |
739 | __ic_line_inv_vaddr(paddr, vaddr, len); | 774 | __ic_line_inv_vaddr(paddr, vaddr, len); |
740 | } | 775 | } |
741 | 776 | ||
742 | /* wrapper to compile time eliminate alignment checks in flush loop */ | 777 | /* wrapper to compile time eliminate alignment checks in flush loop */ |
743 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr) | 778 | void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr) |
744 | { | 779 | { |
745 | __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); | 780 | __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); |
746 | } | 781 | } |
@@ -749,7 +784,7 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr) | |||
749 | * wrapper to clearout kernel or userspace mappings of a page | 784 | * wrapper to clearout kernel or userspace mappings of a page |
750 | * For kernel mappings @vaddr == @paddr | 785 | * For kernel mappings @vaddr == @paddr |
751 | */ | 786 | */ |
752 | void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) | 787 | void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr) |
753 | { | 788 | { |
754 | __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); | 789 | __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); |
755 | } | 790 | } |
@@ -807,8 +842,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, | |||
807 | void copy_user_highpage(struct page *to, struct page *from, | 842 | void copy_user_highpage(struct page *to, struct page *from, |
808 | unsigned long u_vaddr, struct vm_area_struct *vma) | 843 | unsigned long u_vaddr, struct vm_area_struct *vma) |
809 | { | 844 | { |
810 | unsigned long kfrom = (unsigned long)page_address(from); | 845 | void *kfrom = kmap_atomic(from); |
811 | unsigned long kto = (unsigned long)page_address(to); | 846 | void *kto = kmap_atomic(to); |
812 | int clean_src_k_mappings = 0; | 847 | int clean_src_k_mappings = 0; |
813 | 848 | ||
814 | /* | 849 | /* |
@@ -818,13 +853,16 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
818 | * | 853 | * |
819 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is | 854 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is |
820 | * equally valid for SRC page as well | 855 | * equally valid for SRC page as well |
856 | * | ||
857 | * For !VIPT cache, all of this gets compiled out as | ||
858 | * addr_not_cache_congruent() is 0 | ||
821 | */ | 859 | */ |
822 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { | 860 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { |
823 | __flush_dcache_page(kfrom, u_vaddr); | 861 | __flush_dcache_page((unsigned long)kfrom, u_vaddr); |
824 | clean_src_k_mappings = 1; | 862 | clean_src_k_mappings = 1; |
825 | } | 863 | } |
826 | 864 | ||
827 | copy_page((void *)kto, (void *)kfrom); | 865 | copy_page(kto, kfrom); |
828 | 866 | ||
829 | /* | 867 | /* |
830 | * Mark DST page K-mapping as dirty for a later finalization by | 868 | * Mark DST page K-mapping as dirty for a later finalization by |
@@ -841,11 +879,14 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
841 | * sync the kernel mapping back to physical page | 879 | * sync the kernel mapping back to physical page |
842 | */ | 880 | */ |
843 | if (clean_src_k_mappings) { | 881 | if (clean_src_k_mappings) { |
844 | __flush_dcache_page(kfrom, kfrom); | 882 | __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom); |
845 | set_bit(PG_dc_clean, &from->flags); | 883 | set_bit(PG_dc_clean, &from->flags); |
846 | } else { | 884 | } else { |
847 | clear_bit(PG_dc_clean, &from->flags); | 885 | clear_bit(PG_dc_clean, &from->flags); |
848 | } | 886 | } |
887 | |||
888 | kunmap_atomic(kto); | ||
889 | kunmap_atomic(kfrom); | ||
849 | } | 890 | } |
850 | 891 | ||
851 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) | 892 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index d948e4e9d89c..af63f4a13e60 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -18,7 +18,14 @@ | |||
18 | #include <asm/pgalloc.h> | 18 | #include <asm/pgalloc.h> |
19 | #include <asm/mmu.h> | 19 | #include <asm/mmu.h> |
20 | 20 | ||
21 | static int handle_vmalloc_fault(unsigned long address) | 21 | /* |
22 | * kernel virtual address is required to implement vmalloc/pkmap/fixmap | ||
23 | * Refer to asm/processor.h for System Memory Map | ||
24 | * | ||
25 | * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) | ||
26 | * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared | ||
27 | */ | ||
28 | noinline static int handle_kernel_vaddr_fault(unsigned long address) | ||
22 | { | 29 | { |
23 | /* | 30 | /* |
24 | * Synchronize this task's top level page-table | 31 | * Synchronize this task's top level page-table |
@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) | |||
72 | * only copy the information from the master page table, | 79 | * only copy the information from the master page table, |
73 | * nothing more. | 80 | * nothing more. |
74 | */ | 81 | */ |
75 | if (address >= VMALLOC_START && address <= VMALLOC_END) { | 82 | if (address >= VMALLOC_START) { |
76 | ret = handle_vmalloc_fault(address); | 83 | ret = handle_kernel_vaddr_fault(address); |
77 | if (unlikely(ret)) | 84 | if (unlikely(ret)) |
78 | goto bad_area_nosemaphore; | 85 | goto bad_area_nosemaphore; |
79 | else | 86 | else |
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c new file mode 100644 index 000000000000..065ee6bfa82a --- /dev/null +++ b/arch/arc/mm/highmem.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/bootmem.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/highmem.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | #include <asm/pgalloc.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | |||
18 | /* | ||
19 | * HIGHMEM API: | ||
20 | * | ||
21 | * kmap() API provides sleep semantics hence refered to as "permanent maps" | ||
22 | * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor | ||
23 | * for book-keeping | ||
24 | * | ||
25 | * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides | ||
26 | * shortlived ala "temporary mappings" which historically were implemented as | ||
27 | * fixmaps (compile time addr etc). Their book-keeping is done per cpu. | ||
28 | * | ||
29 | * Both these facts combined (preemption disabled and per-cpu allocation) | ||
30 | * means the total number of concurrent fixmaps will be limited to max | ||
31 | * such allocations in a single control path. Thus KM_TYPE_NR (another | ||
32 | * historic relic) is a small'ish number which caps max percpu fixmaps | ||
33 | * | ||
34 | * ARC HIGHMEM Details | ||
35 | * | ||
36 | * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module) | ||
37 | * is now shared between vmalloc and kmap (non overlapping though) | ||
38 | * | ||
39 | * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD | ||
40 | * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means | ||
41 | * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split) | ||
42 | * | ||
43 | * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE | ||
44 | * slots across NR_CPUS would be more than sufficient (generic code defines | ||
45 | * KM_TYPE_NR as 20). | ||
46 | * | ||
47 | * - pkmap being preemptible, in theory could do with more than 256 concurrent | ||
48 | * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse | ||
49 | * the PGD and only works with a single page table @pkmap_page_table, hence | ||
50 | * sets the limit | ||
51 | */ | ||
52 | |||
53 | extern pte_t * pkmap_page_table; | ||
54 | static pte_t * fixmap_page_table; | ||
55 | |||
56 | void *kmap(struct page *page) | ||
57 | { | ||
58 | BUG_ON(in_interrupt()); | ||
59 | if (!PageHighMem(page)) | ||
60 | return page_address(page); | ||
61 | |||
62 | return kmap_high(page); | ||
63 | } | ||
64 | |||
65 | void *kmap_atomic(struct page *page) | ||
66 | { | ||
67 | int idx, cpu_idx; | ||
68 | unsigned long vaddr; | ||
69 | |||
70 | preempt_disable(); | ||
71 | pagefault_disable(); | ||
72 | if (!PageHighMem(page)) | ||
73 | return page_address(page); | ||
74 | |||
75 | cpu_idx = kmap_atomic_idx_push(); | ||
76 | idx = cpu_idx + KM_TYPE_NR * smp_processor_id(); | ||
77 | vaddr = FIXMAP_ADDR(idx); | ||
78 | |||
79 | set_pte_at(&init_mm, vaddr, fixmap_page_table + idx, | ||
80 | mk_pte(page, kmap_prot)); | ||
81 | |||
82 | return (void *)vaddr; | ||
83 | } | ||
84 | EXPORT_SYMBOL(kmap_atomic); | ||
85 | |||
86 | void __kunmap_atomic(void *kv) | ||
87 | { | ||
88 | unsigned long kvaddr = (unsigned long)kv; | ||
89 | |||
90 | if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) { | ||
91 | |||
92 | /* | ||
93 | * Because preemption is disabled, this vaddr can be associated | ||
94 | * with the current allocated index. | ||
95 | * But in case of multiple live kmap_atomic(), it still relies on | ||
96 | * callers to unmap in right order. | ||
97 | */ | ||
98 | int cpu_idx = kmap_atomic_idx(); | ||
99 | int idx = cpu_idx + KM_TYPE_NR * smp_processor_id(); | ||
100 | |||
101 | WARN_ON(kvaddr != FIXMAP_ADDR(idx)); | ||
102 | |||
103 | pte_clear(&init_mm, kvaddr, fixmap_page_table + idx); | ||
104 | local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE); | ||
105 | |||
106 | kmap_atomic_idx_pop(); | ||
107 | } | ||
108 | |||
109 | pagefault_enable(); | ||
110 | preempt_enable(); | ||
111 | } | ||
112 | EXPORT_SYMBOL(__kunmap_atomic); | ||
113 | |||
114 | noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) | ||
115 | { | ||
116 | pgd_t *pgd_k; | ||
117 | pud_t *pud_k; | ||
118 | pmd_t *pmd_k; | ||
119 | pte_t *pte_k; | ||
120 | |||
121 | pgd_k = pgd_offset_k(kvaddr); | ||
122 | pud_k = pud_offset(pgd_k, kvaddr); | ||
123 | pmd_k = pmd_offset(pud_k, kvaddr); | ||
124 | |||
125 | pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
126 | pmd_populate_kernel(&init_mm, pmd_k, pte_k); | ||
127 | return pte_k; | ||
128 | } | ||
129 | |||
130 | void kmap_init(void) | ||
131 | { | ||
132 | /* Due to recursive include hell, we can't do this in processor.h */ | ||
133 | BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE)); | ||
134 | |||
135 | BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE); | ||
136 | pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE); | ||
137 | |||
138 | BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE); | ||
139 | fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE); | ||
140 | } | ||
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index d44eedd8c322..a9305b5a2cd4 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #endif | 15 | #endif |
16 | #include <linux/swap.h> | 16 | #include <linux/swap.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/highmem.h> | ||
18 | #include <asm/page.h> | 19 | #include <asm/page.h> |
19 | #include <asm/pgalloc.h> | 20 | #include <asm/pgalloc.h> |
20 | #include <asm/sections.h> | 21 | #include <asm/sections.h> |
@@ -24,16 +25,22 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); | |||
24 | char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE); | 25 | char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE); |
25 | EXPORT_SYMBOL(empty_zero_page); | 26 | EXPORT_SYMBOL(empty_zero_page); |
26 | 27 | ||
27 | /* Default tot mem from .config */ | 28 | static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE; |
28 | static unsigned long arc_mem_sz = 0x20000000; /* some default */ | 29 | static unsigned long low_mem_sz; |
30 | |||
31 | #ifdef CONFIG_HIGHMEM | ||
32 | static unsigned long min_high_pfn; | ||
33 | static u64 high_mem_start; | ||
34 | static u64 high_mem_sz; | ||
35 | #endif | ||
29 | 36 | ||
30 | /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ | 37 | /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ |
31 | static int __init setup_mem_sz(char *str) | 38 | static int __init setup_mem_sz(char *str) |
32 | { | 39 | { |
33 | arc_mem_sz = memparse(str, NULL) & PAGE_MASK; | 40 | low_mem_sz = memparse(str, NULL) & PAGE_MASK; |
34 | 41 | ||
35 | /* early console might not be setup yet - it will show up later */ | 42 | /* early console might not be setup yet - it will show up later */ |
36 | pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz)); | 43 | pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz)); |
37 | 44 | ||
38 | return 0; | 45 | return 0; |
39 | } | 46 | } |
@@ -41,8 +48,22 @@ early_param("mem", setup_mem_sz); | |||
41 | 48 | ||
42 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | 49 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
43 | { | 50 | { |
44 | arc_mem_sz = size & PAGE_MASK; | 51 | int in_use = 0; |
45 | pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz)); | 52 | |
53 | if (!low_mem_sz) { | ||
54 | BUG_ON(base != low_mem_start); | ||
55 | low_mem_sz = size; | ||
56 | in_use = 1; | ||
57 | } else { | ||
58 | #ifdef CONFIG_HIGHMEM | ||
59 | high_mem_start = base; | ||
60 | high_mem_sz = size; | ||
61 | in_use = 1; | ||
62 | #endif | ||
63 | } | ||
64 | |||
65 | pr_info("Memory @ %llx [%lldM] %s\n", | ||
66 | base, TO_MB(size), !in_use ? "Not used":""); | ||
46 | } | 67 | } |
47 | 68 | ||
48 | #ifdef CONFIG_BLK_DEV_INITRD | 69 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -72,46 +93,62 @@ early_param("initrd", early_initrd); | |||
72 | void __init setup_arch_memory(void) | 93 | void __init setup_arch_memory(void) |
73 | { | 94 | { |
74 | unsigned long zones_size[MAX_NR_ZONES]; | 95 | unsigned long zones_size[MAX_NR_ZONES]; |
75 | unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz; | 96 | unsigned long zones_holes[MAX_NR_ZONES]; |
76 | 97 | ||
77 | init_mm.start_code = (unsigned long)_text; | 98 | init_mm.start_code = (unsigned long)_text; |
78 | init_mm.end_code = (unsigned long)_etext; | 99 | init_mm.end_code = (unsigned long)_etext; |
79 | init_mm.end_data = (unsigned long)_edata; | 100 | init_mm.end_data = (unsigned long)_edata; |
80 | init_mm.brk = (unsigned long)_end; | 101 | init_mm.brk = (unsigned long)_end; |
81 | 102 | ||
82 | /* | ||
83 | * We do it here, so that memory is correctly instantiated | ||
84 | * even if "mem=xxx" cmline over-ride is given and/or | ||
85 | * DT has memory node. Each causes an update to @arc_mem_sz | ||
86 | * and we finally add memory one here | ||
87 | */ | ||
88 | memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz); | ||
89 | |||
90 | /*------------- externs in mm need setting up ---------------*/ | ||
91 | |||
92 | /* first page of system - kernel .vector starts here */ | 103 | /* first page of system - kernel .vector starts here */ |
93 | min_low_pfn = ARCH_PFN_OFFSET; | 104 | min_low_pfn = ARCH_PFN_OFFSET; |
94 | 105 | ||
95 | /* Last usable page of low mem (no HIGHMEM yet for ARC port) */ | 106 | /* Last usable page of low mem */ |
96 | max_low_pfn = max_pfn = PFN_DOWN(end_mem); | 107 | max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz); |
97 | 108 | ||
98 | max_mapnr = max_low_pfn - min_low_pfn; | 109 | #ifdef CONFIG_HIGHMEM |
110 | min_high_pfn = PFN_DOWN(high_mem_start); | ||
111 | max_pfn = PFN_DOWN(high_mem_start + high_mem_sz); | ||
112 | #endif | ||
113 | |||
114 | max_mapnr = max_pfn - min_low_pfn; | ||
99 | 115 | ||
100 | /*------------- reserve kernel image -----------------------*/ | 116 | /*------------- bootmem allocator setup -----------------------*/ |
101 | memblock_reserve(CONFIG_LINUX_LINK_BASE, | 117 | |
102 | __pa(_end) - CONFIG_LINUX_LINK_BASE); | 118 | /* |
119 | * seed the bootmem allocator after any DT memory node parsing or | ||
120 | * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz | ||
121 | * | ||
122 | * Only low mem is added, otherwise we have crashes when allocating | ||
123 | * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of | ||
124 | * avail memory, ending in highmem with a > 32-bit address. However | ||
125 | * it then tries to memset it with a truncaed 32-bit handle, causing | ||
126 | * the crash | ||
127 | */ | ||
128 | |||
129 | memblock_add(low_mem_start, low_mem_sz); | ||
130 | memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); | ||
103 | 131 | ||
104 | #ifdef CONFIG_BLK_DEV_INITRD | 132 | #ifdef CONFIG_BLK_DEV_INITRD |
105 | /*------------- reserve initrd image -----------------------*/ | ||
106 | if (initrd_start) | 133 | if (initrd_start) |
107 | memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); | 134 | memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); |
108 | #endif | 135 | #endif |
109 | 136 | ||
110 | memblock_dump_all(); | 137 | memblock_dump_all(); |
111 | 138 | ||
112 | /*-------------- node setup --------------------------------*/ | 139 | /*----------------- node/zones setup --------------------------*/ |
113 | memset(zones_size, 0, sizeof(zones_size)); | 140 | memset(zones_size, 0, sizeof(zones_size)); |
114 | zones_size[ZONE_NORMAL] = max_mapnr; | 141 | memset(zones_holes, 0, sizeof(zones_holes)); |
142 | |||
143 | zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; | ||
144 | zones_holes[ZONE_NORMAL] = 0; | ||
145 | |||
146 | #ifdef CONFIG_HIGHMEM | ||
147 | zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; | ||
148 | |||
149 | /* This handles the peripheral address space hole */ | ||
150 | zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn; | ||
151 | #endif | ||
115 | 152 | ||
116 | /* | 153 | /* |
117 | * We can't use the helper free_area_init(zones[]) because it uses | 154 | * We can't use the helper free_area_init(zones[]) because it uses |
@@ -122,9 +159,12 @@ void __init setup_arch_memory(void) | |||
122 | free_area_init_node(0, /* node-id */ | 159 | free_area_init_node(0, /* node-id */ |
123 | zones_size, /* num pages per zone */ | 160 | zones_size, /* num pages per zone */ |
124 | min_low_pfn, /* first pfn of node */ | 161 | min_low_pfn, /* first pfn of node */ |
125 | NULL); /* NO holes */ | 162 | zones_holes); /* holes */ |
126 | 163 | ||
127 | high_memory = (void *)end_mem; | 164 | #ifdef CONFIG_HIGHMEM |
165 | high_memory = (void *)(min_high_pfn << PAGE_SHIFT); | ||
166 | kmap_init(); | ||
167 | #endif | ||
128 | } | 168 | } |
129 | 169 | ||
130 | /* | 170 | /* |
@@ -135,6 +175,14 @@ void __init setup_arch_memory(void) | |||
135 | */ | 175 | */ |
136 | void __init mem_init(void) | 176 | void __init mem_init(void) |
137 | { | 177 | { |
178 | #ifdef CONFIG_HIGHMEM | ||
179 | unsigned long tmp; | ||
180 | |||
181 | reset_all_zones_managed_pages(); | ||
182 | for (tmp = min_high_pfn; tmp < max_pfn; tmp++) | ||
183 | free_highmem_page(pfn_to_page(tmp)); | ||
184 | #endif | ||
185 | |||
138 | free_all_bootmem(); | 186 | free_all_bootmem(); |
139 | mem_init_print_info(NULL); | 187 | mem_init_print_info(NULL); |
140 | } | 188 | } |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 2c7ce8bb7475..0ee739846847 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -109,6 +109,10 @@ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; | |||
109 | static inline void __tlb_entry_erase(void) | 109 | static inline void __tlb_entry_erase(void) |
110 | { | 110 | { |
111 | write_aux_reg(ARC_REG_TLBPD1, 0); | 111 | write_aux_reg(ARC_REG_TLBPD1, 0); |
112 | |||
113 | if (is_pae40_enabled()) | ||
114 | write_aux_reg(ARC_REG_TLBPD1HI, 0); | ||
115 | |||
112 | write_aux_reg(ARC_REG_TLBPD0, 0); | 116 | write_aux_reg(ARC_REG_TLBPD0, 0); |
113 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | 117 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
114 | } | 118 | } |
@@ -182,7 +186,7 @@ static void utlb_invalidate(void) | |||
182 | 186 | ||
183 | } | 187 | } |
184 | 188 | ||
185 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) | 189 | static void tlb_entry_insert(unsigned int pd0, pte_t pd1) |
186 | { | 190 | { |
187 | unsigned int idx; | 191 | unsigned int idx; |
188 | 192 | ||
@@ -225,10 +229,14 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid) | |||
225 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry); | 229 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry); |
226 | } | 230 | } |
227 | 231 | ||
228 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) | 232 | static void tlb_entry_insert(unsigned int pd0, pte_t pd1) |
229 | { | 233 | { |
230 | write_aux_reg(ARC_REG_TLBPD0, pd0); | 234 | write_aux_reg(ARC_REG_TLBPD0, pd0); |
231 | write_aux_reg(ARC_REG_TLBPD1, pd1); | 235 | write_aux_reg(ARC_REG_TLBPD1, pd1); |
236 | |||
237 | if (is_pae40_enabled()) | ||
238 | write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32); | ||
239 | |||
232 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry); | 240 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry); |
233 | } | 241 | } |
234 | 242 | ||
@@ -240,22 +248,39 @@ static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) | |||
240 | 248 | ||
241 | noinline void local_flush_tlb_all(void) | 249 | noinline void local_flush_tlb_all(void) |
242 | { | 250 | { |
251 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | ||
243 | unsigned long flags; | 252 | unsigned long flags; |
244 | unsigned int entry; | 253 | unsigned int entry; |
245 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | 254 | int num_tlb = mmu->sets * mmu->ways; |
246 | 255 | ||
247 | local_irq_save(flags); | 256 | local_irq_save(flags); |
248 | 257 | ||
249 | /* Load PD0 and PD1 with template for a Blank Entry */ | 258 | /* Load PD0 and PD1 with template for a Blank Entry */ |
250 | write_aux_reg(ARC_REG_TLBPD1, 0); | 259 | write_aux_reg(ARC_REG_TLBPD1, 0); |
260 | |||
261 | if (is_pae40_enabled()) | ||
262 | write_aux_reg(ARC_REG_TLBPD1HI, 0); | ||
263 | |||
251 | write_aux_reg(ARC_REG_TLBPD0, 0); | 264 | write_aux_reg(ARC_REG_TLBPD0, 0); |
252 | 265 | ||
253 | for (entry = 0; entry < mmu->num_tlb; entry++) { | 266 | for (entry = 0; entry < num_tlb; entry++) { |
254 | /* write this entry to the TLB */ | 267 | /* write this entry to the TLB */ |
255 | write_aux_reg(ARC_REG_TLBINDEX, entry); | 268 | write_aux_reg(ARC_REG_TLBINDEX, entry); |
256 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | 269 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
257 | } | 270 | } |
258 | 271 | ||
272 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { | ||
273 | const int stlb_idx = 0x800; | ||
274 | |||
275 | /* Blank sTLB entry */ | ||
276 | write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ); | ||
277 | |||
278 | for (entry = stlb_idx; entry < stlb_idx + 16; entry++) { | ||
279 | write_aux_reg(ARC_REG_TLBINDEX, entry); | ||
280 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | ||
281 | } | ||
282 | } | ||
283 | |||
259 | utlb_invalidate(); | 284 | utlb_invalidate(); |
260 | 285 | ||
261 | local_irq_restore(flags); | 286 | local_irq_restore(flags); |
@@ -409,6 +434,15 @@ static inline void ipi_flush_tlb_range(void *arg) | |||
409 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | 434 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); |
410 | } | 435 | } |
411 | 436 | ||
437 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
438 | static inline void ipi_flush_pmd_tlb_range(void *arg) | ||
439 | { | ||
440 | struct tlb_args *ta = arg; | ||
441 | |||
442 | local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | ||
443 | } | ||
444 | #endif | ||
445 | |||
412 | static inline void ipi_flush_tlb_kernel_range(void *arg) | 446 | static inline void ipi_flush_tlb_kernel_range(void *arg) |
413 | { | 447 | { |
414 | struct tlb_args *ta = (struct tlb_args *)arg; | 448 | struct tlb_args *ta = (struct tlb_args *)arg; |
@@ -449,6 +483,20 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
449 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); | 483 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); |
450 | } | 484 | } |
451 | 485 | ||
486 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
487 | void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
488 | unsigned long end) | ||
489 | { | ||
490 | struct tlb_args ta = { | ||
491 | .ta_vma = vma, | ||
492 | .ta_start = start, | ||
493 | .ta_end = end | ||
494 | }; | ||
495 | |||
496 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); | ||
497 | } | ||
498 | #endif | ||
499 | |||
452 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 500 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
453 | { | 501 | { |
454 | struct tlb_args ta = { | 502 | struct tlb_args ta = { |
@@ -463,11 +511,12 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
463 | /* | 511 | /* |
464 | * Routine to create a TLB entry | 512 | * Routine to create a TLB entry |
465 | */ | 513 | */ |
466 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 514 | void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep) |
467 | { | 515 | { |
468 | unsigned long flags; | 516 | unsigned long flags; |
469 | unsigned int asid_or_sasid, rwx; | 517 | unsigned int asid_or_sasid, rwx; |
470 | unsigned long pd0, pd1; | 518 | unsigned long pd0; |
519 | pte_t pd1; | ||
471 | 520 | ||
472 | /* | 521 | /* |
473 | * create_tlb() assumes that current->mm == vma->mm, since | 522 | * create_tlb() assumes that current->mm == vma->mm, since |
@@ -499,9 +548,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
499 | 548 | ||
500 | local_irq_save(flags); | 549 | local_irq_save(flags); |
501 | 550 | ||
502 | tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); | 551 | tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr); |
503 | 552 | ||
504 | address &= PAGE_MASK; | 553 | vaddr &= PAGE_MASK; |
505 | 554 | ||
506 | /* update this PTE credentials */ | 555 | /* update this PTE credentials */ |
507 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | 556 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); |
@@ -511,7 +560,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
511 | /* ASID for this task */ | 560 | /* ASID for this task */ |
512 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | 561 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; |
513 | 562 | ||
514 | pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); | 563 | pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); |
515 | 564 | ||
516 | /* | 565 | /* |
517 | * ARC MMU provides fully orthogonal access bits for K/U mode, | 566 | * ARC MMU provides fully orthogonal access bits for K/U mode, |
@@ -547,7 +596,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, | |||
547 | pte_t *ptep) | 596 | pte_t *ptep) |
548 | { | 597 | { |
549 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; | 598 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
550 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; | 599 | phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK; |
551 | struct page *page = pfn_to_page(pte_pfn(*ptep)); | 600 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
552 | 601 | ||
553 | create_tlb(vma, vaddr, ptep); | 602 | create_tlb(vma, vaddr, ptep); |
@@ -580,6 +629,95 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, | |||
580 | } | 629 | } |
581 | } | 630 | } |
582 | 631 | ||
632 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
633 | |||
634 | /* | ||
635 | * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP | ||
636 | * support. | ||
637 | * | ||
638 | * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a | ||
639 | * new bit "SZ" in TLB page desciptor to distinguish between them. | ||
640 | * Super Page size is configurable in hardware (4K to 16M), but fixed once | ||
641 | * RTL builds. | ||
642 | * | ||
643 | * The exact THP size a Linx configuration will support is a function of: | ||
644 | * - MMU page size (typical 8K, RTL fixed) | ||
645 | * - software page walker address split between PGD:PTE:PFN (typical | ||
646 | * 11:8:13, but can be changed with 1 line) | ||
647 | * So for above default, THP size supported is 8K * (2^8) = 2M | ||
648 | * | ||
649 | * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime | ||
650 | * reduces to 1 level (as PTE is folded into PGD and canonically referred | ||
651 | * to as PMD). | ||
652 | * Thus THP PMD accessors are implemented in terms of PTE (just like sparc) | ||
653 | */ | ||
654 | |||
655 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
656 | pmd_t *pmd) | ||
657 | { | ||
658 | pte_t pte = __pte(pmd_val(*pmd)); | ||
659 | update_mmu_cache(vma, addr, &pte); | ||
660 | } | ||
661 | |||
662 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | ||
663 | pgtable_t pgtable) | ||
664 | { | ||
665 | struct list_head *lh = (struct list_head *) pgtable; | ||
666 | |||
667 | assert_spin_locked(&mm->page_table_lock); | ||
668 | |||
669 | /* FIFO */ | ||
670 | if (!pmd_huge_pte(mm, pmdp)) | ||
671 | INIT_LIST_HEAD(lh); | ||
672 | else | ||
673 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); | ||
674 | pmd_huge_pte(mm, pmdp) = pgtable; | ||
675 | } | ||
676 | |||
677 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | ||
678 | { | ||
679 | struct list_head *lh; | ||
680 | pgtable_t pgtable; | ||
681 | |||
682 | assert_spin_locked(&mm->page_table_lock); | ||
683 | |||
684 | pgtable = pmd_huge_pte(mm, pmdp); | ||
685 | lh = (struct list_head *) pgtable; | ||
686 | if (list_empty(lh)) | ||
687 | pmd_huge_pte(mm, pmdp) = NULL; | ||
688 | else { | ||
689 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; | ||
690 | list_del(lh); | ||
691 | } | ||
692 | |||
693 | pte_val(pgtable[0]) = 0; | ||
694 | pte_val(pgtable[1]) = 0; | ||
695 | |||
696 | return pgtable; | ||
697 | } | ||
698 | |||
699 | void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
700 | unsigned long end) | ||
701 | { | ||
702 | unsigned int cpu; | ||
703 | unsigned long flags; | ||
704 | |||
705 | local_irq_save(flags); | ||
706 | |||
707 | cpu = smp_processor_id(); | ||
708 | |||
709 | if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { | ||
710 | unsigned int asid = hw_pid(vma->vm_mm, cpu); | ||
711 | |||
712 | /* No need to loop here: this will always be for 1 Huge Page */ | ||
713 | tlb_entry_erase(start | _PAGE_HW_SZ | asid); | ||
714 | } | ||
715 | |||
716 | local_irq_restore(flags); | ||
717 | } | ||
718 | |||
719 | #endif | ||
720 | |||
583 | /* Read the Cache Build Confuration Registers, Decode them and save into | 721 | /* Read the Cache Build Confuration Registers, Decode them and save into |
584 | * the cpuinfo structure for later use. | 722 | * the cpuinfo structure for later use. |
585 | * No Validation is done here, simply read/convert the BCRs | 723 | * No Validation is done here, simply read/convert the BCRs |
@@ -598,10 +736,10 @@ void read_decode_mmu_bcr(void) | |||
598 | 736 | ||
599 | struct bcr_mmu_3 { | 737 | struct bcr_mmu_3 { |
600 | #ifdef CONFIG_CPU_BIG_ENDIAN | 738 | #ifdef CONFIG_CPU_BIG_ENDIAN |
601 | unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, | 739 | unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4, |
602 | u_itlb:4, u_dtlb:4; | 740 | u_itlb:4, u_dtlb:4; |
603 | #else | 741 | #else |
604 | unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, | 742 | unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4, |
605 | ways:4, ver:8; | 743 | ways:4, ver:8; |
606 | #endif | 744 | #endif |
607 | } *mmu3; | 745 | } *mmu3; |
@@ -622,7 +760,7 @@ void read_decode_mmu_bcr(void) | |||
622 | 760 | ||
623 | if (mmu->ver <= 2) { | 761 | if (mmu->ver <= 2) { |
624 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; | 762 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; |
625 | mmu->pg_sz_k = TO_KB(PAGE_SIZE); | 763 | mmu->pg_sz_k = TO_KB(0x2000); |
626 | mmu->sets = 1 << mmu2->sets; | 764 | mmu->sets = 1 << mmu2->sets; |
627 | mmu->ways = 1 << mmu2->ways; | 765 | mmu->ways = 1 << mmu2->ways; |
628 | mmu->u_dtlb = mmu2->u_dtlb; | 766 | mmu->u_dtlb = mmu2->u_dtlb; |
@@ -634,6 +772,7 @@ void read_decode_mmu_bcr(void) | |||
634 | mmu->ways = 1 << mmu3->ways; | 772 | mmu->ways = 1 << mmu3->ways; |
635 | mmu->u_dtlb = mmu3->u_dtlb; | 773 | mmu->u_dtlb = mmu3->u_dtlb; |
636 | mmu->u_itlb = mmu3->u_itlb; | 774 | mmu->u_itlb = mmu3->u_itlb; |
775 | mmu->sasid = mmu3->sasid; | ||
637 | } else { | 776 | } else { |
638 | mmu4 = (struct bcr_mmu_4 *)&tmp; | 777 | mmu4 = (struct bcr_mmu_4 *)&tmp; |
639 | mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); | 778 | mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); |
@@ -642,9 +781,9 @@ void read_decode_mmu_bcr(void) | |||
642 | mmu->ways = mmu4->n_ways * 2; | 781 | mmu->ways = mmu4->n_ways * 2; |
643 | mmu->u_dtlb = mmu4->u_dtlb * 4; | 782 | mmu->u_dtlb = mmu4->u_dtlb * 4; |
644 | mmu->u_itlb = mmu4->u_itlb * 4; | 783 | mmu->u_itlb = mmu4->u_itlb * 4; |
784 | mmu->sasid = mmu4->sasid; | ||
785 | mmu->pae = mmu4->pae; | ||
645 | } | 786 | } |
646 | |||
647 | mmu->num_tlb = mmu->sets * mmu->ways; | ||
648 | } | 787 | } |
649 | 788 | ||
650 | char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) | 789 | char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) |
@@ -655,14 +794,15 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) | |||
655 | 794 | ||
656 | if (p_mmu->s_pg_sz_m) | 795 | if (p_mmu->s_pg_sz_m) |
657 | scnprintf(super_pg, 64, "%dM Super Page%s, ", | 796 | scnprintf(super_pg, 64, "%dM Super Page%s, ", |
658 | p_mmu->s_pg_sz_m, " (not used)"); | 797 | p_mmu->s_pg_sz_m, |
798 | IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE)); | ||
659 | 799 | ||
660 | n += scnprintf(buf + n, len - n, | 800 | n += scnprintf(buf + n, len - n, |
661 | "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n", | 801 | "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n", |
662 | p_mmu->ver, p_mmu->pg_sz_k, super_pg, | 802 | p_mmu->ver, p_mmu->pg_sz_k, super_pg, |
663 | p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, | 803 | p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, |
664 | p_mmu->u_dtlb, p_mmu->u_itlb, | 804 | p_mmu->u_dtlb, p_mmu->u_itlb, |
665 | IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : ""); | 805 | IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40)); |
666 | 806 | ||
667 | return buf; | 807 | return buf; |
668 | } | 808 | } |
@@ -690,6 +830,14 @@ void arc_mmu_init(void) | |||
690 | if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) | 830 | if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) |
691 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); | 831 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); |
692 | 832 | ||
833 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | ||
834 | mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) | ||
835 | panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n", | ||
836 | (unsigned long)TO_MB(HPAGE_PMD_SIZE)); | ||
837 | |||
838 | if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae) | ||
839 | panic("Hardware doesn't support PAE40\n"); | ||
840 | |||
693 | /* Enable the MMU */ | 841 | /* Enable the MMU */ |
694 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | 842 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); |
695 | 843 | ||
@@ -725,15 +873,15 @@ void arc_mmu_init(void) | |||
725 | * the duplicate one. | 873 | * the duplicate one. |
726 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) | 874 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) |
727 | */ | 875 | */ |
728 | volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ | 876 | volatile int dup_pd_silent; /* Be slient abt it or complain (default) */ |
729 | 877 | ||
730 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | 878 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, |
731 | struct pt_regs *regs) | 879 | struct pt_regs *regs) |
732 | { | 880 | { |
733 | int set, way, n; | ||
734 | unsigned long flags, is_valid; | ||
735 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | 881 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
736 | unsigned int pd0[mmu->ways], pd1[mmu->ways]; | 882 | unsigned int pd0[mmu->ways]; |
883 | unsigned long flags; | ||
884 | int set; | ||
737 | 885 | ||
738 | local_irq_save(flags); | 886 | local_irq_save(flags); |
739 | 887 | ||
@@ -743,14 +891,16 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | |||
743 | /* loop thru all sets of TLB */ | 891 | /* loop thru all sets of TLB */ |
744 | for (set = 0; set < mmu->sets; set++) { | 892 | for (set = 0; set < mmu->sets; set++) { |
745 | 893 | ||
894 | int is_valid, way; | ||
895 | |||
746 | /* read out all the ways of current set */ | 896 | /* read out all the ways of current set */ |
747 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { | 897 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { |
748 | write_aux_reg(ARC_REG_TLBINDEX, | 898 | write_aux_reg(ARC_REG_TLBINDEX, |
749 | SET_WAY_TO_IDX(mmu, set, way)); | 899 | SET_WAY_TO_IDX(mmu, set, way)); |
750 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); | 900 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); |
751 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); | 901 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); |
752 | pd1[way] = read_aux_reg(ARC_REG_TLBPD1); | ||
753 | is_valid |= pd0[way] & _PAGE_PRESENT; | 902 | is_valid |= pd0[way] & _PAGE_PRESENT; |
903 | pd0[way] &= PAGE_MASK; | ||
754 | } | 904 | } |
755 | 905 | ||
756 | /* If all the WAYS in SET are empty, skip to next SET */ | 906 | /* If all the WAYS in SET are empty, skip to next SET */ |
@@ -759,30 +909,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | |||
759 | 909 | ||
760 | /* Scan the set for duplicate ways: needs a nested loop */ | 910 | /* Scan the set for duplicate ways: needs a nested loop */ |
761 | for (way = 0; way < mmu->ways - 1; way++) { | 911 | for (way = 0; way < mmu->ways - 1; way++) { |
912 | |||
913 | int n; | ||
914 | |||
762 | if (!pd0[way]) | 915 | if (!pd0[way]) |
763 | continue; | 916 | continue; |
764 | 917 | ||
765 | for (n = way + 1; n < mmu->ways; n++) { | 918 | for (n = way + 1; n < mmu->ways; n++) { |
766 | if ((pd0[way] & PAGE_MASK) == | 919 | if (pd0[way] != pd0[n]) |
767 | (pd0[n] & PAGE_MASK)) { | 920 | continue; |
768 | 921 | ||
769 | if (dup_pd_verbose) { | 922 | if (!dup_pd_silent) |
770 | pr_info("Duplicate PD's @" | 923 | pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n", |
771 | "[%d:%d]/[%d:%d]\n", | 924 | pd0[way], set, way, n); |
772 | set, way, set, n); | 925 | |
773 | pr_info("TLBPD0[%u]: %08x\n", | 926 | /* |
774 | way, pd0[way]); | 927 | * clear entry @way and not @n. |
775 | } | 928 | * This is critical to our optimised loop |
776 | 929 | */ | |
777 | /* | 930 | pd0[way] = 0; |
778 | * clear entry @way and not @n. This is | 931 | write_aux_reg(ARC_REG_TLBINDEX, |
779 | * critical to our optimised loop | ||
780 | */ | ||
781 | pd0[way] = pd1[way] = 0; | ||
782 | write_aux_reg(ARC_REG_TLBINDEX, | ||
783 | SET_WAY_TO_IDX(mmu, set, way)); | 932 | SET_WAY_TO_IDX(mmu, set, way)); |
784 | __tlb_entry_erase(); | 933 | __tlb_entry_erase(); |
785 | } | ||
786 | } | 934 | } |
787 | } | 935 | } |
788 | } | 936 | } |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index f6f4c3cb505d..63860adc4814 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
@@ -205,20 +205,38 @@ ex_saved_reg1: | |||
205 | #endif | 205 | #endif |
206 | 206 | ||
207 | lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD | 207 | lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD |
208 | ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr | 208 | ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr |
209 | and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags | 209 | tst r3, r3 |
210 | ; contains Ptr to Page Table | 210 | bz do_slow_path_pf ; if no Page Table, do page fault |
211 | bz.d do_slow_path_pf ; if no Page Table, do page fault | 211 | |
212 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
213 | and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp) | ||
214 | add2.nz r1, r1, r0 | ||
215 | bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk | ||
216 | mov.nz r0, r3 | ||
217 | |||
218 | #endif | ||
219 | and r1, r3, PAGE_MASK | ||
212 | 220 | ||
213 | ; Get the PTE entry: The idea is | 221 | ; Get the PTE entry: The idea is |
214 | ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr | 222 | ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr |
215 | ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index | 223 | ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index |
216 | ; (3) z = pgtbl[y] | 224 | ; (3) z = (pgtbl + y * 4) |
217 | ; To avoid the multiply by in end, we do the -2, <<2 below | 225 | |
226 | #ifdef CONFIG_ARC_HAS_PAE40 | ||
227 | #define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */ | ||
228 | #else | ||
229 | #define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */ | ||
230 | #endif | ||
231 | |||
232 | ; multiply in step (3) above avoided by shifting lesser in step (1) | ||
233 | lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG ) | ||
234 | and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG ) | ||
235 | ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40) | ||
236 | ; r1: PTE ptr | ||
237 | |||
238 | 2: | ||
218 | 239 | ||
219 | lsr r0, r2, (PAGE_SHIFT - 2) | ||
220 | and r0, r0, ( (PTRS_PER_PTE - 1) << 2) | ||
221 | ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr | ||
222 | #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT | 240 | #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT |
223 | and.f 0, r0, _PAGE_PRESENT | 241 | and.f 0, r0, _PAGE_PRESENT |
224 | bz 1f | 242 | bz 1f |
@@ -233,18 +251,23 @@ ex_saved_reg1: | |||
233 | ;----------------------------------------------------------------- | 251 | ;----------------------------------------------------------------- |
234 | ; Convert Linux PTE entry into TLB entry | 252 | ; Convert Linux PTE entry into TLB entry |
235 | ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu | 253 | ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu |
254 | ; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI]) | ||
236 | ; IN: r0 = PTE, r1 = ptr to PTE | 255 | ; IN: r0 = PTE, r1 = ptr to PTE |
237 | 256 | ||
238 | .macro CONV_PTE_TO_TLB | 257 | .macro CONV_PTE_TO_TLB |
239 | and r3, r0, PTE_BITS_RWX ; r w x | 258 | and r3, r0, PTE_BITS_RWX ; r w x |
240 | lsl r2, r3, 3 ; r w x 0 0 0 (GLOBAL, kernel only) | 259 | lsl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only) |
241 | and.f 0, r0, _PAGE_GLOBAL | 260 | and.f 0, r0, _PAGE_GLOBAL |
242 | or.z r2, r2, r3 ; r w x r w x (!GLOBAL, user page) | 261 | or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page) |
243 | 262 | ||
244 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE | 263 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE |
245 | or r3, r3, r2 | 264 | or r3, r3, r2 |
246 | 265 | ||
247 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 | 266 | sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C |
267 | #ifdef CONFIG_ARC_HAS_PAE40 | ||
268 | ld r3, [r1, 4] ; paddr[39..32] | ||
269 | sr r3, [ARC_REG_TLBPD1HI] | ||
270 | #endif | ||
248 | 271 | ||
249 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb | 272 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb |
250 | 273 | ||
@@ -365,7 +388,7 @@ ENTRY(EV_TLBMissD) | |||
365 | lr r3, [ecr] | 388 | lr r3, [ecr] |
366 | or r0, r0, _PAGE_ACCESSED ; Accessed bit always | 389 | or r0, r0, _PAGE_ACCESSED ; Accessed bit always |
367 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? | 390 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? |
368 | or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well | 391 | or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well |
369 | st_s r0, [r1] ; Write back PTE | 392 | st_s r0, [r1] ; Write back PTE |
370 | 393 | ||
371 | CONV_PTE_TO_TLB | 394 | CONV_PTE_TO_TLB |
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index 0a77b19e1df8..1b0f0f458a2b 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c | |||
@@ -455,11 +455,6 @@ static void __init axs103_early_init(void) | |||
455 | axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card"); | 455 | axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card"); |
456 | 456 | ||
457 | axs10x_early_init(); | 457 | axs10x_early_init(); |
458 | |||
459 | #ifdef CONFIG_ARC_MCIP | ||
460 | /* No Hardware init, but filling the smp ops callbacks */ | ||
461 | mcip_init_early_smp(); | ||
462 | #endif | ||
463 | } | 458 | } |
464 | #endif | 459 | #endif |
465 | 460 | ||
@@ -487,9 +482,6 @@ static const char *axs103_compat[] __initconst = { | |||
487 | MACHINE_START(AXS103, "axs103") | 482 | MACHINE_START(AXS103, "axs103") |
488 | .dt_compat = axs103_compat, | 483 | .dt_compat = axs103_compat, |
489 | .init_early = axs103_early_init, | 484 | .init_early = axs103_early_init, |
490 | #ifdef CONFIG_ARC_MCIP | ||
491 | .init_smp = mcip_init_smp, | ||
492 | #endif | ||
493 | MACHINE_END | 485 | MACHINE_END |
494 | 486 | ||
495 | /* | 487 | /* |
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c index d9e35b4a2f08..dde692812bc1 100644 --- a/arch/arc/plat-sim/platform.c +++ b/arch/arc/plat-sim/platform.c | |||
@@ -30,8 +30,4 @@ static const char *simulation_compat[] __initconst = { | |||
30 | 30 | ||
31 | MACHINE_START(SIMULATION, "simulation") | 31 | MACHINE_START(SIMULATION, "simulation") |
32 | .dt_compat = simulation_compat, | 32 | .dt_compat = simulation_compat, |
33 | #ifdef CONFIG_ARC_MCIP | ||
34 | .init_early = mcip_init_early_smp, | ||
35 | .init_smp = mcip_init_smp, | ||
36 | #endif | ||
37 | MACHINE_END | 33 | MACHINE_END |