diff options
130 files changed, 11864 insertions, 21 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt b/Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt new file mode 100644 index 000000000000..f4b4193d830e --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | * Andestech Internal Vector Interrupt Controller | ||
| 2 | |||
| 3 | The Internal Vector Interrupt Controller (IVIC) is a basic interrupt controller | ||
| 4 | suitable for a simpler SoC platform not requiring a more sophisticated and | ||
| 5 | bigger External Vector Interrupt Controller. | ||
| 6 | |||
| 7 | |||
| 8 | Main node required properties: | ||
| 9 | |||
| 10 | - compatible : should at least contain "andestech,ativic32". | ||
| 11 | - interrupt-controller : Identifies the node as an interrupt controller | ||
| 12 | - #interrupt-cells: 1 cells and refer to interrupt-controller/interrupts | ||
| 13 | |||
| 14 | Examples: | ||
| 15 | intc: interrupt-controller { | ||
| 16 | compatible = "andestech,ativic32"; | ||
| 17 | #interrupt-cells = <1>; | ||
| 18 | interrupt-controller; | ||
| 19 | }; | ||
diff --git a/Documentation/devicetree/bindings/nds32/andestech-boards b/Documentation/devicetree/bindings/nds32/andestech-boards new file mode 100644 index 000000000000..f5d75693e3c7 --- /dev/null +++ b/Documentation/devicetree/bindings/nds32/andestech-boards | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | Andestech(nds32) AE3XX Platform | ||
| 2 | ----------------------------------------------------------------------------- | ||
| 3 | The AE3XX prototype demonstrates the AE3XX example platform on the FPGA. It | ||
| 4 | is composed of one Andestech(nds32) processor and AE3XX. | ||
| 5 | |||
| 6 | Required properties (in root node): | ||
| 7 | - compatible = "andestech,ae3xx"; | ||
| 8 | |||
| 9 | Example: | ||
| 10 | /dts-v1/; | ||
| 11 | / { | ||
| 12 | compatible = "andestech,ae3xx"; | ||
| 13 | #address-cells = <1>; | ||
| 14 | #size-cells = <1>; | ||
| 15 | interrupt-parent = <&intc>; | ||
| 16 | }; | ||
| 17 | |||
| 18 | Andestech(nds32) AG101P Platform | ||
| 19 | ----------------------------------------------------------------------------- | ||
| 20 | AG101P is a generic SoC Platform IP that works with any of Andestech(nds32) | ||
| 21 | processors to provide a cost-effective and high performance solution for | ||
| 22 | majority of embedded systems in variety of application domains. Users may | ||
| 23 | simply attach their IP on one of the system buses together with certain glue | ||
| 24 | logics to complete a SoC solution for a specific application. With | ||
| 25 | comprehensive simulation and design environments, users may evaluate the | ||
| 26 | system performance of their applications and track bugs of their designs | ||
| 27 | efficiently. The optional hardware development platform further provides real | ||
| 28 | system environment for early prototyping and software/hardware co-development. | ||
| 29 | |||
| 30 | Required properties (in root node): | ||
| 31 | compatible = "andestech,ag101p"; | ||
| 32 | |||
| 33 | Example: | ||
| 34 | /dts-v1/; | ||
| 35 | / { | ||
| 36 | compatible = "andestech,ag101p"; | ||
| 37 | #address-cells = <1>; | ||
| 38 | #size-cells = <1>; | ||
| 39 | interrupt-parent = <&intc>; | ||
| 40 | }; | ||
diff --git a/Documentation/devicetree/bindings/nds32/atl2c.txt b/Documentation/devicetree/bindings/nds32/atl2c.txt new file mode 100644 index 000000000000..da8ab8e7ae9b --- /dev/null +++ b/Documentation/devicetree/bindings/nds32/atl2c.txt | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | * Andestech L2 cache Controller | ||
| 2 | |||
| 3 | The level-2 cache controller plays an important role in reducing memory latency | ||
| 4 | for high performance systems, such as thoese designs with AndesCore processors. | ||
| 5 | Level-2 cache controller in general enhances overall system performance | ||
| 6 | signigicantly and the system power consumption might be reduced as well by | ||
| 7 | reducing DRAM accesses. | ||
| 8 | |||
| 9 | This binding specifies what properties must be available in the device tree | ||
| 10 | representation of an Andestech L2 cache controller. | ||
| 11 | |||
| 12 | Required properties: | ||
| 13 | - compatible: | ||
| 14 | Usage: required | ||
| 15 | Value type: <string> | ||
| 16 | Definition: "andestech,atl2c" | ||
| 17 | - reg : Physical base address and size of cache controller's memory mapped | ||
| 18 | - cache-unified : Specifies the cache is a unified cache. | ||
| 19 | - cache-level : Should be set to 2 for a level 2 cache. | ||
| 20 | |||
| 21 | * Example | ||
| 22 | |||
| 23 | cache-controller@e0500000 { | ||
| 24 | compatible = "andestech,atl2c"; | ||
| 25 | reg = <0xe0500000 0x1000>; | ||
| 26 | cache-unified; | ||
| 27 | cache-level = <2>; | ||
| 28 | }; | ||
diff --git a/Documentation/devicetree/bindings/nds32/cpus.txt b/Documentation/devicetree/bindings/nds32/cpus.txt new file mode 100644 index 000000000000..6f9e311b6589 --- /dev/null +++ b/Documentation/devicetree/bindings/nds32/cpus.txt | |||
| @@ -0,0 +1,38 @@ | |||
| 1 | * Andestech Processor Binding | ||
| 2 | |||
| 3 | This binding specifies what properties must be available in the device tree | ||
| 4 | representation of a Andestech Processor Core, which is the root node in the | ||
| 5 | tree. | ||
| 6 | |||
| 7 | Required properties: | ||
| 8 | |||
| 9 | - compatible: | ||
| 10 | Usage: required | ||
| 11 | Value type: <string> | ||
| 12 | Definition: Should be "andestech,<core_name>", "andestech,nds32v3" as fallback. | ||
| 13 | Must contain "andestech,nds32v3" as the most generic value, in addition to | ||
| 14 | one of the following identifiers for a particular CPU core: | ||
| 15 | "andestech,n13" | ||
| 16 | "andestech,n15" | ||
| 17 | "andestech,d15" | ||
| 18 | "andestech,n10" | ||
| 19 | "andestech,d10" | ||
| 20 | - device_type | ||
| 21 | Usage: required | ||
| 22 | Value type: <string> | ||
| 23 | Definition: must be "cpu" | ||
| 24 | - reg: Contains CPU index. | ||
| 25 | - clock-frequency: Contains the clock frequency for CPU, in Hz. | ||
| 26 | |||
| 27 | * Examples | ||
| 28 | |||
| 29 | / { | ||
| 30 | cpus { | ||
| 31 | cpu@0 { | ||
| 32 | device_type = "cpu"; | ||
| 33 | compatible = "andestech,n13", "andestech,nds32v3"; | ||
| 34 | reg = <0x0>; | ||
| 35 | clock-frequency = <60000000> | ||
| 36 | }; | ||
| 37 | }; | ||
| 38 | }; | ||
diff --git a/Documentation/devicetree/bindings/timer/andestech,atcpit100-timer.txt b/Documentation/devicetree/bindings/timer/andestech,atcpit100-timer.txt new file mode 100644 index 000000000000..4c9ea5989e35 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/andestech,atcpit100-timer.txt | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | Andestech ATCPIT100 timer | ||
| 2 | ------------------------------------------------------------------ | ||
| 3 | ATCPIT100 is a generic IP block from Andes Technology, embedded in | ||
| 4 | Andestech AE3XX platforms and other designs. | ||
| 5 | |||
| 6 | This timer is a set of compact multi-function timers, which can be | ||
| 7 | used as pulse width modulators (PWM) as well as simple timers. | ||
| 8 | |||
| 9 | It supports up to 4 PIT channels. Each PIT channel is a | ||
| 10 | multi-function timer and provide the following usage scenarios: | ||
| 11 | One 32-bit timer | ||
| 12 | Two 16-bit timers | ||
| 13 | Four 8-bit timers | ||
| 14 | One 16-bit PWM | ||
| 15 | One 16-bit timer and one 8-bit PWM | ||
| 16 | Two 8-bit timer and one 8-bit PWM | ||
| 17 | |||
| 18 | Required properties: | ||
| 19 | - compatible : Should be "andestech,atcpit100" | ||
| 20 | - reg : Address and length of the register set | ||
| 21 | - interrupts : Reference to the timer interrupt | ||
| 22 | - clocks : a clock to provide the tick rate for "andestech,atcpit100" | ||
| 23 | - clock-names : should be "PCLK" for the peripheral clock source. | ||
| 24 | |||
| 25 | Examples: | ||
| 26 | |||
| 27 | timer0: timer@f0400000 { | ||
| 28 | compatible = "andestech,atcpit100"; | ||
| 29 | reg = <0xf0400000 0x1000>; | ||
| 30 | interrupts = <2>; | ||
| 31 | clocks = <&apb>; | ||
| 32 | clock-names = "PCLK"; | ||
| 33 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 689f875b47fa..b587fd48b924 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -870,6 +870,17 @@ X: drivers/iio/*/adjd* | |||
| 870 | F: drivers/staging/iio/*/ad* | 870 | F: drivers/staging/iio/*/ad* |
| 871 | F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c | 871 | F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c |
| 872 | 872 | ||
| 873 | ANDES ARCHITECTURE | ||
| 874 | M: Greentime Hu <green.hu@gmail.com> | ||
| 875 | M: Vincent Chen <deanbo422@gmail.com> | ||
| 876 | T: git https://github.com/andestech/linux.git | ||
| 877 | S: Supported | ||
| 878 | F: arch/nds32/ | ||
| 879 | F: Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt | ||
| 880 | F: Documentation/devicetree/bindings/nds32/ | ||
| 881 | K: nds32 | ||
| 882 | N: nds32 | ||
| 883 | |||
| 873 | ANDROID CONFIG FRAGMENTS | 884 | ANDROID CONFIG FRAGMENTS |
| 874 | M: Rob Herring <robh@kernel.org> | 885 | M: Rob Herring <robh@kernel.org> |
| 875 | S: Supported | 886 | S: Supported |
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig new file mode 100644 index 000000000000..249f38d3388f --- /dev/null +++ b/arch/nds32/Kconfig | |||
| @@ -0,0 +1,103 @@ | |||
| 1 | # | ||
| 2 | # For a description of the syntax of this configuration file, | ||
| 3 | # see Documentation/kbuild/kconfig-language.txt. | ||
| 4 | # | ||
| 5 | |||
| 6 | config NDS32 | ||
| 7 | def_bool y | ||
| 8 | select ARCH_WANT_FRAME_POINTERS if FTRACE | ||
| 9 | select CLKSRC_MMIO | ||
| 10 | select CLONE_BACKWARDS | ||
| 11 | select COMMON_CLK | ||
| 12 | select GENERIC_ATOMIC64 | ||
| 13 | select GENERIC_CPU_DEVICES | ||
| 14 | select GENERIC_CLOCKEVENTS | ||
| 15 | select GENERIC_IRQ_CHIP | ||
| 16 | select GENERIC_IRQ_SHOW | ||
| 17 | select GENERIC_STRNCPY_FROM_USER | ||
| 18 | select GENERIC_STRNLEN_USER | ||
| 19 | select GENERIC_TIME_VSYSCALL | ||
| 20 | select HANDLE_DOMAIN_IRQ | ||
| 21 | select HAVE_ARCH_TRACEHOOK | ||
| 22 | select HAVE_DEBUG_KMEMLEAK | ||
| 23 | select HAVE_MEMBLOCK | ||
| 24 | select HAVE_REGS_AND_STACK_ACCESS_API | ||
| 25 | select IRQ_DOMAIN | ||
| 26 | select LOCKDEP_SUPPORT | ||
| 27 | select MODULES_USE_ELF_RELA | ||
| 28 | select OF | ||
| 29 | select OF_EARLY_FLATTREE | ||
| 30 | select NO_BOOTMEM | ||
| 31 | select NO_IOPORT_MAP | ||
| 32 | select RTC_LIB | ||
| 33 | select THREAD_INFO_IN_TASK | ||
| 34 | help | ||
| 35 | Andes(nds32) Linux support. | ||
| 36 | |||
| 37 | config GENERIC_CALIBRATE_DELAY | ||
| 38 | def_bool y | ||
| 39 | |||
| 40 | config GENERIC_CSUM | ||
| 41 | def_bool y | ||
| 42 | |||
| 43 | config GENERIC_HWEIGHT | ||
| 44 | def_bool y | ||
| 45 | |||
| 46 | config GENERIC_LOCKBREAK | ||
| 47 | def_bool y | ||
| 48 | depends on PREEMPT | ||
| 49 | |||
| 50 | config RWSEM_GENERIC_SPINLOCK | ||
| 51 | def_bool y | ||
| 52 | |||
| 53 | config TRACE_IRQFLAGS_SUPPORT | ||
| 54 | def_bool y | ||
| 55 | |||
| 56 | config STACKTRACE_SUPPORT | ||
| 57 | def_bool y | ||
| 58 | |||
| 59 | config FIX_EARLYCON_MEM | ||
| 60 | def_bool y | ||
| 61 | |||
| 62 | config PGTABLE_LEVELS | ||
| 63 | default 2 | ||
| 64 | |||
| 65 | source "init/Kconfig" | ||
| 66 | |||
| 67 | menu "System Type" | ||
| 68 | source "arch/nds32/Kconfig.cpu" | ||
| 69 | config NR_CPUS | ||
| 70 | int | ||
| 71 | default 1 | ||
| 72 | |||
| 73 | config MMU | ||
| 74 | def_bool y | ||
| 75 | |||
| 76 | config NDS32_BUILTIN_DTB | ||
| 77 | string "Builtin DTB" | ||
| 78 | default "" | ||
| 79 | help | ||
| 80 | User can use it to specify the dts of the SoC | ||
| 81 | endmenu | ||
| 82 | |||
| 83 | menu "Kernel Features" | ||
| 84 | source "kernel/Kconfig.preempt" | ||
| 85 | source "mm/Kconfig" | ||
| 86 | source "kernel/Kconfig.hz" | ||
| 87 | endmenu | ||
| 88 | |||
| 89 | menu "Executable file formats" | ||
| 90 | source "fs/Kconfig.binfmt" | ||
| 91 | endmenu | ||
| 92 | |||
| 93 | source "net/Kconfig" | ||
| 94 | source "drivers/Kconfig" | ||
| 95 | source "fs/Kconfig" | ||
| 96 | |||
| 97 | menu "Kernel hacking" | ||
| 98 | source "lib/Kconfig.debug" | ||
| 99 | endmenu | ||
| 100 | |||
| 101 | source "security/Kconfig" | ||
| 102 | source "crypto/Kconfig" | ||
| 103 | source "lib/Kconfig" | ||
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu new file mode 100644 index 000000000000..ba44cc539da9 --- /dev/null +++ b/arch/nds32/Kconfig.cpu | |||
| @@ -0,0 +1,174 @@ | |||
| 1 | comment "Processor Features" | ||
| 2 | |||
| 3 | config CPU_BIG_ENDIAN | ||
| 4 | bool "Big endian" | ||
| 5 | |||
| 6 | config CPU_LITTLE_ENDIAN | ||
| 7 | def_bool !CPU_BIG_ENDIAN | ||
| 8 | |||
| 9 | config HWZOL | ||
| 10 | bool "hardware zero overhead loop support" | ||
| 11 | depends on CPU_D10 || CPU_D15 | ||
| 12 | default n | ||
| 13 | help | ||
| 14 | A set of Zero-Overhead Loop mechanism is provided to reduce the | ||
| 15 | instruction fetch and execution overhead of loop-control instructions. | ||
| 16 | It will save 3 registers($LB, $LC, $LE) for context saving if say Y. | ||
| 17 | You don't need to save these registers if you can make sure your user | ||
| 18 | program doesn't use these registers. | ||
| 19 | |||
| 20 | If unsure, say N. | ||
| 21 | |||
| 22 | config CPU_CACHE_ALIASING | ||
| 23 | bool "Aliasing cache" | ||
| 24 | depends on CPU_N10 || CPU_D10 || CPU_N13 || CPU_V3 | ||
| 25 | default y | ||
| 26 | help | ||
| 27 | If this CPU is using VIPT data cache and its cache way size is larger | ||
| 28 | than page size, say Y. If it is using PIPT data cache, say N. | ||
| 29 | |||
| 30 | If unsure, say Y. | ||
| 31 | |||
| 32 | choice | ||
| 33 | prompt "minimum CPU type" | ||
| 34 | default CPU_V3 | ||
| 35 | help | ||
| 36 | The data cache of N15/D15 is implemented as PIPT and it will not cause | ||
| 37 | the cache aliasing issue. The rest cpus(N13, N10 and D10) are | ||
| 38 | implemented as VIPT data cache. It may cause the cache aliasing issue | ||
| 39 | if its cache way size is larger than page size. You can specify the | ||
| 40 | CPU type direcly or choose CPU_V3 if unsure. | ||
| 41 | |||
| 42 | A kernel built for N10 is able to run on N15, D15, N13, N10 or D10. | ||
| 43 | A kernel built for N15 is able to run on N15 or D15. | ||
| 44 | A kernel built for D10 is able to run on D10 or D15. | ||
| 45 | A kernel built for D15 is able to run on D15. | ||
| 46 | A kernel built for N13 is able to run on N15, N13 or D15. | ||
| 47 | |||
| 48 | config CPU_N15 | ||
| 49 | bool "AndesCore N15" | ||
| 50 | config CPU_N13 | ||
| 51 | bool "AndesCore N13" | ||
| 52 | select CPU_CACHE_ALIASING if ANDES_PAGE_SIZE_4KB | ||
| 53 | config CPU_N10 | ||
| 54 | bool "AndesCore N10" | ||
| 55 | select CPU_CACHE_ALIASING | ||
| 56 | config CPU_D15 | ||
| 57 | bool "AndesCore D15" | ||
| 58 | config CPU_D10 | ||
| 59 | bool "AndesCore D10" | ||
| 60 | select CPU_CACHE_ALIASING | ||
| 61 | config CPU_V3 | ||
| 62 | bool "AndesCore v3 compatible" | ||
| 63 | select CPU_CACHE_ALIASING | ||
| 64 | endchoice | ||
| 65 | choice | ||
| 66 | prompt "Paging -- page size " | ||
| 67 | default ANDES_PAGE_SIZE_4KB | ||
| 68 | config ANDES_PAGE_SIZE_4KB | ||
| 69 | bool "use 4KB page size" | ||
| 70 | config ANDES_PAGE_SIZE_8KB | ||
| 71 | bool "use 8KB page size" | ||
| 72 | endchoice | ||
| 73 | |||
| 74 | config CPU_ICACHE_DISABLE | ||
| 75 | bool "Disable I-Cache" | ||
| 76 | help | ||
| 77 | Say Y here to disable the processor instruction cache. Unless | ||
| 78 | you have a reason not to or are unsure, say N. | ||
| 79 | |||
| 80 | config CPU_DCACHE_DISABLE | ||
| 81 | bool "Disable D-Cache" | ||
| 82 | help | ||
| 83 | Say Y here to disable the processor data cache. Unless | ||
| 84 | you have a reason not to or are unsure, say N. | ||
| 85 | |||
| 86 | config CPU_DCACHE_WRITETHROUGH | ||
| 87 | bool "Force write through D-cache" | ||
| 88 | depends on !CPU_DCACHE_DISABLE | ||
| 89 | help | ||
| 90 | Say Y here to use the data cache in writethrough mode. Unless you | ||
| 91 | specifically require this or are unsure, say N. | ||
| 92 | |||
| 93 | config WBNA | ||
| 94 | bool "WBNA" | ||
| 95 | default n | ||
| 96 | help | ||
| 97 | Say Y here to enable write-back memory with no-write-allocation policy. | ||
| 98 | |||
| 99 | config ALIGNMENT_TRAP | ||
| 100 | bool "Kernel support unaligned access handling by sw" | ||
| 101 | depends on PROC_FS | ||
| 102 | default n | ||
| 103 | help | ||
| 104 | Andes processors cannot load/store information which is not | ||
| 105 | naturally aligned on the bus, i.e., a 4 byte load must start at an | ||
| 106 | address divisible by 4. On 32-bit Andes processors, these non-aligned | ||
| 107 | load/store instructions will be emulated in software if you say Y | ||
| 108 | here, which has a severe performance impact. With an IP-only | ||
| 109 | configuration it is safe to say N, otherwise say Y. | ||
| 110 | |||
| 111 | config HW_SUPPORT_UNALIGNMENT_ACCESS | ||
| 112 | bool "Kernel support unaligned access handling by hw" | ||
| 113 | depends on !ALIGNMENT_TRAP | ||
| 114 | default n | ||
| 115 | help | ||
| 116 | Andes processors load/store world/half-word instructions can access | ||
| 117 | unaligned memory locations without generating the Data Alignment | ||
| 118 | Check exceptions. With an IP-only configuration it is safe to say N, | ||
| 119 | otherwise say Y. | ||
| 120 | |||
| 121 | config HIGHMEM | ||
| 122 | bool "High Memory Support" | ||
| 123 | depends on MMU && !CPU_CACHE_ALIASING | ||
| 124 | help | ||
| 125 | The address space of Andes processors is only 4 Gigabytes large | ||
| 126 | and it has to accommodate user address space, kernel address | ||
| 127 | space as well as some memory mapped IO. That means that, if you | ||
| 128 | have a large amount of physical memory and/or IO, not all of the | ||
| 129 | memory can be "permanently mapped" by the kernel. The physical | ||
| 130 | memory that is not permanently mapped is called "high memory". | ||
| 131 | |||
| 132 | Depending on the selected kernel/user memory split, minimum | ||
| 133 | vmalloc space and actual amount of RAM, you may not need this | ||
| 134 | option which should result in a slightly faster kernel. | ||
| 135 | |||
| 136 | If unsure, say N. | ||
| 137 | |||
| 138 | config CACHE_L2 | ||
| 139 | bool "Support L2 cache" | ||
| 140 | default y | ||
| 141 | help | ||
| 142 | Say Y here to enable L2 cache if your SoC are integrated with L2CC. | ||
| 143 | If unsure, say N. | ||
| 144 | |||
| 145 | menu "Memory configuration" | ||
| 146 | |||
| 147 | choice | ||
| 148 | prompt "Memory split" | ||
| 149 | depends on MMU | ||
| 150 | default VMSPLIT_3G_OPT | ||
| 151 | help | ||
| 152 | Select the desired split between kernel and user memory. | ||
| 153 | |||
| 154 | If you are not absolutely sure what you are doing, leave this | ||
| 155 | option alone! | ||
| 156 | |||
| 157 | config VMSPLIT_3G | ||
| 158 | bool "3G/1G user/kernel split" | ||
| 159 | config VMSPLIT_3G_OPT | ||
| 160 | bool "3G/1G user/kernel split (for full 1G low memory)" | ||
| 161 | config VMSPLIT_2G | ||
| 162 | bool "2G/2G user/kernel split" | ||
| 163 | config VMSPLIT_1G | ||
| 164 | bool "1G/3G user/kernel split" | ||
| 165 | endchoice | ||
| 166 | |||
| 167 | config PAGE_OFFSET | ||
| 168 | hex | ||
| 169 | default 0x40000000 if VMSPLIT_1G | ||
| 170 | default 0x80000000 if VMSPLIT_2G | ||
| 171 | default 0xB0000000 if VMSPLIT_3G_OPT | ||
| 172 | default 0xC0000000 | ||
| 173 | |||
| 174 | endmenu | ||
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile new file mode 100644 index 000000000000..91f933d5a962 --- /dev/null +++ b/arch/nds32/Makefile | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | LDFLAGS_vmlinux := --no-undefined -X | ||
| 2 | OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S | ||
| 3 | |||
| 4 | KBUILD_DEFCONFIG := defconfig | ||
| 5 | |||
| 6 | comma = , | ||
| 7 | |||
| 8 | KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) | ||
| 9 | KBUILD_CFLAGS += -mcmodel=large | ||
| 10 | |||
| 11 | KBUILD_CFLAGS +=$(arch-y) $(tune-y) | ||
| 12 | KBUILD_AFLAGS +=$(arch-y) $(tune-y) | ||
| 13 | |||
| 14 | #Default value | ||
| 15 | head-y := arch/nds32/kernel/head.o | ||
| 16 | textaddr-y := $(CONFIG_PAGE_OFFSET)+0xc000 | ||
| 17 | |||
| 18 | TEXTADDR := $(textaddr-y) | ||
| 19 | |||
| 20 | export TEXTADDR | ||
| 21 | |||
| 22 | |||
| 23 | # If we have a machine-specific directory, then include it in the build. | ||
| 24 | core-y += arch/nds32/kernel/ arch/nds32/mm/ | ||
| 25 | libs-y += arch/nds32/lib/ | ||
| 26 | LIBGCC_PATH := \ | ||
| 27 | $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) | ||
| 28 | libs-y += $(LIBGCC_PATH) | ||
| 29 | |||
| 30 | ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' | ||
| 31 | BUILTIN_DTB := y | ||
| 32 | else | ||
| 33 | BUILTIN_DTB := n | ||
| 34 | endif | ||
| 35 | |||
| 36 | ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
| 37 | KBUILD_CFLAGS += $(call cc-option, -EL) | ||
| 38 | else | ||
| 39 | KBUILD_CFLAGS += $(call cc-option, -EB) | ||
| 40 | endif | ||
| 41 | |||
| 42 | boot := arch/nds32/boot | ||
| 43 | core-$(BUILTIN_DTB) += $(boot)/dts/ | ||
| 44 | |||
| 45 | .PHONY: FORCE | ||
| 46 | |||
| 47 | Image: vmlinux | ||
| 48 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
| 49 | |||
| 50 | |||
| 51 | PHONY += vdso_install | ||
| 52 | vdso_install: | ||
| 53 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso $@ | ||
| 54 | |||
| 55 | prepare: vdso_prepare | ||
| 56 | vdso_prepare: prepare0 | ||
| 57 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h | ||
| 58 | |||
| 59 | CLEAN_FILES += include/asm-nds32/constants.h* | ||
| 60 | |||
| 61 | # We use MRPROPER_FILES and CLEAN_FILES now | ||
| 62 | archclean: | ||
| 63 | $(Q)$(MAKE) $(clean)=$(boot) | ||
| 64 | |||
| 65 | define archhelp | ||
| 66 | echo ' Image - kernel image (arch/$(ARCH)/boot/Image)' | ||
| 67 | endef | ||
diff --git a/arch/nds32/boot/Makefile b/arch/nds32/boot/Makefile new file mode 100644 index 000000000000..3f9b86f68d8f --- /dev/null +++ b/arch/nds32/boot/Makefile | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | targets := Image Image.gz | ||
| 2 | |||
| 3 | $(obj)/Image: vmlinux FORCE | ||
| 4 | $(call if_changed,objcopy) | ||
| 5 | |||
| 6 | $(obj)/Image.gz: $(obj)/Image FORCE | ||
| 7 | $(call if_changed,gzip) | ||
| 8 | |||
| 9 | install: $(obj)/Image | ||
| 10 | $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ | ||
| 11 | $(obj)/Image System.map "$(INSTALL_PATH)" | ||
| 12 | |||
| 13 | zinstall: $(obj)/Image.gz | ||
| 14 | $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ | ||
| 15 | $(obj)/Image.gz System.map "$(INSTALL_PATH)" | ||
diff --git a/arch/nds32/boot/dts/Makefile b/arch/nds32/boot/dts/Makefile new file mode 100644 index 000000000000..d31faa8a1d50 --- /dev/null +++ b/arch/nds32/boot/dts/Makefile | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' | ||
| 2 | BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_NDS32_BUILTIN_DTB)).dtb.o | ||
| 3 | else | ||
| 4 | BUILTIN_DTB := | ||
| 5 | endif | ||
| 6 | obj-$(CONFIG_OF) += $(BUILTIN_DTB) | ||
| 7 | |||
| 8 | clean-files := *.dtb *.dtb.S | ||
diff --git a/arch/nds32/boot/dts/ae3xx.dts b/arch/nds32/boot/dts/ae3xx.dts new file mode 100644 index 000000000000..bb39749a6673 --- /dev/null +++ b/arch/nds32/boot/dts/ae3xx.dts | |||
| @@ -0,0 +1,85 @@ | |||
| 1 | /dts-v1/; | ||
| 2 | / { | ||
| 3 | compatible = "andestech,ae3xx"; | ||
| 4 | #address-cells = <1>; | ||
| 5 | #size-cells = <1>; | ||
| 6 | interrupt-parent = <&intc>; | ||
| 7 | |||
| 8 | chosen { | ||
| 9 | stdout-path = &serial0; | ||
| 10 | }; | ||
| 11 | |||
| 12 | memory@0 { | ||
| 13 | device_type = "memory"; | ||
| 14 | reg = <0x00000000 0x40000000>; | ||
| 15 | }; | ||
| 16 | |||
| 17 | cpus { | ||
| 18 | #address-cells = <1>; | ||
| 19 | #size-cells = <0>; | ||
| 20 | cpu@0 { | ||
| 21 | device_type = "cpu"; | ||
| 22 | compatible = "andestech,n13", "andestech,nds32v3"; | ||
| 23 | reg = <0>; | ||
| 24 | clock-frequency = <60000000>; | ||
| 25 | next-level-cache = <&L2>; | ||
| 26 | }; | ||
| 27 | }; | ||
| 28 | |||
| 29 | intc: interrupt-controller { | ||
| 30 | compatible = "andestech,ativic32"; | ||
| 31 | #interrupt-cells = <1>; | ||
| 32 | interrupt-controller; | ||
| 33 | }; | ||
| 34 | |||
| 35 | clock: clk { | ||
| 36 | #clock-cells = <0>; | ||
| 37 | compatible = "fixed-clock"; | ||
| 38 | clock-frequency = <30000000>; | ||
| 39 | }; | ||
| 40 | |||
| 41 | apb { | ||
| 42 | compatible = "simple-bus"; | ||
| 43 | #address-cells = <1>; | ||
| 44 | #size-cells = <1>; | ||
| 45 | ranges; | ||
| 46 | |||
| 47 | serial0: serial@f0300000 { | ||
| 48 | compatible = "andestech,uart16550", "ns16550a"; | ||
| 49 | reg = <0xf0300000 0x1000>; | ||
| 50 | interrupts = <8>; | ||
| 51 | clock-frequency = <14745600>; | ||
| 52 | reg-shift = <2>; | ||
| 53 | reg-offset = <32>; | ||
| 54 | no-loopback-test = <1>; | ||
| 55 | }; | ||
| 56 | |||
| 57 | timer0: timer@f0400000 { | ||
| 58 | compatible = "andestech,atcpit100"; | ||
| 59 | reg = <0xf0400000 0x1000>; | ||
| 60 | interrupts = <2>; | ||
| 61 | clocks = <&clock>; | ||
| 62 | clock-names = "PCLK"; | ||
| 63 | }; | ||
| 64 | }; | ||
| 65 | |||
| 66 | ahb { | ||
| 67 | compatible = "simple-bus"; | ||
| 68 | #address-cells = <1>; | ||
| 69 | #size-cells = <1>; | ||
| 70 | ranges; | ||
| 71 | |||
| 72 | L2: cache-controller@e0500000 { | ||
| 73 | compatible = "andestech,atl2c"; | ||
| 74 | reg = <0xe0500000 0x1000>; | ||
| 75 | cache-unified; | ||
| 76 | cache-level = <2>; | ||
| 77 | }; | ||
| 78 | |||
| 79 | mac0: ethernet@e0100000 { | ||
| 80 | compatible = "andestech,atmac100"; | ||
| 81 | reg = <0xe0100000 0x1000>; | ||
| 82 | interrupts = <18>; | ||
| 83 | }; | ||
| 84 | }; | ||
| 85 | }; | ||
diff --git a/arch/nds32/configs/defconfig b/arch/nds32/configs/defconfig new file mode 100644 index 000000000000..2546d8770785 --- /dev/null +++ b/arch/nds32/configs/defconfig | |||
| @@ -0,0 +1,104 @@ | |||
| 1 | CONFIG_CROSS_COMPILE="nds32le-linux-" | ||
| 2 | CONFIG_SYSVIPC=y | ||
| 3 | CONFIG_POSIX_MQUEUE=y | ||
| 4 | CONFIG_HIGH_RES_TIMERS=y | ||
| 5 | CONFIG_BSD_PROCESS_ACCT=y | ||
| 6 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
| 7 | CONFIG_IKCONFIG=y | ||
| 8 | CONFIG_IKCONFIG_PROC=y | ||
| 9 | CONFIG_LOG_BUF_SHIFT=14 | ||
| 10 | CONFIG_USER_NS=y | ||
| 11 | CONFIG_RELAY=y | ||
| 12 | CONFIG_BLK_DEV_INITRD=y | ||
| 13 | CONFIG_KALLSYMS_ALL=y | ||
| 14 | CONFIG_PROFILING=y | ||
| 15 | CONFIG_MODULES=y | ||
| 16 | CONFIG_MODULE_UNLOAD=y | ||
| 17 | # CONFIG_BLK_DEV_BSG is not set | ||
| 18 | # CONFIG_CACHE_L2 is not set | ||
| 19 | CONFIG_PREEMPT=y | ||
| 20 | # CONFIG_COMPACTION is not set | ||
| 21 | CONFIG_HZ_100=y | ||
| 22 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
| 23 | CONFIG_NET=y | ||
| 24 | CONFIG_PACKET=y | ||
| 25 | CONFIG_UNIX=y | ||
| 26 | CONFIG_NET_KEY=y | ||
| 27 | CONFIG_INET=y | ||
| 28 | CONFIG_IP_MULTICAST=y | ||
| 29 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
| 30 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
| 31 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
| 32 | # CONFIG_INET_DIAG is not set | ||
| 33 | # CONFIG_IPV6 is not set | ||
| 34 | # CONFIG_BLK_DEV is not set | ||
| 35 | CONFIG_NETDEVICES=y | ||
| 36 | # CONFIG_NET_CADENCE is not set | ||
| 37 | # CONFIG_NET_VENDOR_BROADCOM is not set | ||
| 38 | CONFIG_FTMAC100=y | ||
| 39 | # CONFIG_NET_VENDOR_INTEL is not set | ||
| 40 | # CONFIG_NET_VENDOR_MARVELL is not set | ||
| 41 | # CONFIG_NET_VENDOR_MICREL is not set | ||
| 42 | # CONFIG_NET_VENDOR_NATSEMI is not set | ||
| 43 | # CONFIG_NET_VENDOR_SEEQ is not set | ||
| 44 | # CONFIG_NET_VENDOR_STMICRO is not set | ||
| 45 | # CONFIG_NET_VENDOR_WIZNET is not set | ||
| 46 | CONFIG_INPUT_EVDEV=y | ||
| 47 | # CONFIG_INPUT_KEYBOARD is not set | ||
| 48 | # CONFIG_INPUT_MOUSE is not set | ||
| 49 | CONFIG_INPUT_TOUCHSCREEN=y | ||
| 50 | # CONFIG_SERIO is not set | ||
| 51 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
| 52 | CONFIG_SERIAL_8250=y | ||
| 53 | # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set | ||
| 54 | CONFIG_SERIAL_8250_CONSOLE=y | ||
| 55 | CONFIG_SERIAL_8250_NR_UARTS=3 | ||
| 56 | CONFIG_SERIAL_8250_RUNTIME_UARTS=3 | ||
| 57 | CONFIG_SERIAL_OF_PLATFORM=y | ||
| 58 | # CONFIG_HW_RANDOM is not set | ||
| 59 | # CONFIG_HWMON is not set | ||
| 60 | # CONFIG_HID_A4TECH is not set | ||
| 61 | # CONFIG_HID_APPLE is not set | ||
| 62 | # CONFIG_HID_BELKIN is not set | ||
| 63 | # CONFIG_HID_CHERRY is not set | ||
| 64 | # CONFIG_HID_CHICONY is not set | ||
| 65 | # CONFIG_HID_CYPRESS is not set | ||
| 66 | # CONFIG_HID_EZKEY is not set | ||
| 67 | # CONFIG_HID_ITE is not set | ||
| 68 | # CONFIG_HID_KENSINGTON is not set | ||
| 69 | # CONFIG_HID_LOGITECH is not set | ||
| 70 | # CONFIG_HID_MICROSOFT is not set | ||
| 71 | # CONFIG_HID_MONTEREY is not set | ||
| 72 | # CONFIG_USB_SUPPORT is not set | ||
| 73 | CONFIG_GENERIC_PHY=y | ||
| 74 | CONFIG_EXT4_FS=y | ||
| 75 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
| 76 | CONFIG_EXT4_FS_SECURITY=y | ||
| 77 | CONFIG_EXT4_ENCRYPTION=y | ||
| 78 | CONFIG_FUSE_FS=y | ||
| 79 | CONFIG_MSDOS_FS=y | ||
| 80 | CONFIG_VFAT_FS=y | ||
| 81 | CONFIG_TMPFS=y | ||
| 82 | CONFIG_TMPFS_POSIX_ACL=y | ||
| 83 | CONFIG_CONFIGFS_FS=y | ||
| 84 | CONFIG_NFS_FS=y | ||
| 85 | CONFIG_NFS_V3_ACL=y | ||
| 86 | CONFIG_NFS_V4=y | ||
| 87 | CONFIG_NFS_V4_1=y | ||
| 88 | CONFIG_NFS_USE_LEGACY_DNS=y | ||
| 89 | CONFIG_NLS_CODEPAGE_437=y | ||
| 90 | CONFIG_NLS_ISO8859_1=y | ||
| 91 | CONFIG_DEBUG_INFO=y | ||
| 92 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 93 | CONFIG_GDB_SCRIPTS=y | ||
| 94 | CONFIG_READABLE_ASM=y | ||
| 95 | CONFIG_HEADERS_CHECK=y | ||
| 96 | CONFIG_DEBUG_SECTION_MISMATCH=y | ||
| 97 | CONFIG_MAGIC_SYSRQ=y | ||
| 98 | CONFIG_DEBUG_KERNEL=y | ||
| 99 | CONFIG_PANIC_ON_OOPS=y | ||
| 100 | # CONFIG_SCHED_DEBUG is not set | ||
| 101 | # CONFIG_DEBUG_PREEMPT is not set | ||
| 102 | CONFIG_STACKTRACE=y | ||
| 103 | CONFIG_RCU_CPU_STALL_TIMEOUT=300 | ||
| 104 | # CONFIG_CRYPTO_HW is not set | ||
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild new file mode 100644 index 000000000000..06bdf8167f5a --- /dev/null +++ b/arch/nds32/include/asm/Kbuild | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | generic-y += asm-offsets.h | ||
| 2 | generic-y += atomic.h | ||
| 3 | generic-y += bitops.h | ||
| 4 | generic-y += bitsperlong.h | ||
| 5 | generic-y += bpf_perf_event.h | ||
| 6 | generic-y += bug.h | ||
| 7 | generic-y += bugs.h | ||
| 8 | generic-y += checksum.h | ||
| 9 | generic-y += clkdev.h | ||
| 10 | generic-y += cmpxchg.h | ||
| 11 | generic-y += cmpxchg-local.h | ||
| 12 | generic-y += cputime.h | ||
| 13 | generic-y += device.h | ||
| 14 | generic-y += div64.h | ||
| 15 | generic-y += dma.h | ||
| 16 | generic-y += emergency-restart.h | ||
| 17 | generic-y += errno.h | ||
| 18 | generic-y += exec.h | ||
| 19 | generic-y += fb.h | ||
| 20 | generic-y += fcntl.h | ||
| 21 | generic-y += ftrace.h | ||
| 22 | generic-y += gpio.h | ||
| 23 | generic-y += hardirq.h | ||
| 24 | generic-y += hw_irq.h | ||
| 25 | generic-y += ioctl.h | ||
| 26 | generic-y += ioctls.h | ||
| 27 | generic-y += irq.h | ||
| 28 | generic-y += irq_regs.h | ||
| 29 | generic-y += irq_work.h | ||
| 30 | generic-y += kdebug.h | ||
| 31 | generic-y += kmap_types.h | ||
| 32 | generic-y += kprobes.h | ||
| 33 | generic-y += kvm_para.h | ||
| 34 | generic-y += limits.h | ||
| 35 | generic-y += local.h | ||
| 36 | generic-y += mm-arch-hooks.h | ||
| 37 | generic-y += mman.h | ||
| 38 | generic-y += parport.h | ||
| 39 | generic-y += pci.h | ||
| 40 | generic-y += percpu.h | ||
| 41 | generic-y += preempt.h | ||
| 42 | generic-y += sections.h | ||
| 43 | generic-y += segment.h | ||
| 44 | generic-y += serial.h | ||
| 45 | generic-y += shmbuf.h | ||
| 46 | generic-y += sizes.h | ||
| 47 | generic-y += stat.h | ||
| 48 | generic-y += switch_to.h | ||
| 49 | generic-y += timex.h | ||
| 50 | generic-y += topology.h | ||
| 51 | generic-y += trace_clock.h | ||
| 52 | generic-y += unaligned.h | ||
| 53 | generic-y += user.h | ||
| 54 | generic-y += vga.h | ||
| 55 | generic-y += word-at-a-time.h | ||
diff --git a/arch/nds32/include/asm/assembler.h b/arch/nds32/include/asm/assembler.h new file mode 100644 index 000000000000..c3855782a541 --- /dev/null +++ b/arch/nds32/include/asm/assembler.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_ASSEMBLER_H__ | ||
| 5 | #define __NDS32_ASSEMBLER_H__ | ||
| 6 | |||
| 7 | .macro gie_disable | ||
| 8 | setgie.d | ||
| 9 | dsb | ||
| 10 | .endm | ||
| 11 | |||
| 12 | .macro gie_enable | ||
| 13 | setgie.e | ||
| 14 | dsb | ||
| 15 | .endm | ||
| 16 | |||
| 17 | .macro gie_save oldpsw | ||
| 18 | mfsr \oldpsw, $ir0 | ||
| 19 | setgie.d | ||
| 20 | dsb | ||
| 21 | .endm | ||
| 22 | |||
| 23 | .macro gie_restore oldpsw | ||
| 24 | andi \oldpsw, \oldpsw, #0x1 | ||
| 25 | beqz \oldpsw, 7001f | ||
| 26 | setgie.e | ||
| 27 | dsb | ||
| 28 | 7001: | ||
| 29 | .endm | ||
| 30 | |||
| 31 | |||
| 32 | #define USER(insn, reg, addr, opr) \ | ||
| 33 | 9999: insn reg, addr, opr; \ | ||
| 34 | .section __ex_table,"a"; \ | ||
| 35 | .align 3; \ | ||
| 36 | .long 9999b, 9001f; \ | ||
| 37 | .previous | ||
| 38 | |||
| 39 | #endif /* __NDS32_ASSEMBLER_H__ */ | ||
diff --git a/arch/nds32/include/asm/barrier.h b/arch/nds32/include/asm/barrier.h new file mode 100644 index 000000000000..faafc373ea6c --- /dev/null +++ b/arch/nds32/include/asm/barrier.h | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_ASM_BARRIER_H | ||
| 5 | #define __NDS32_ASM_BARRIER_H | ||
| 6 | |||
| 7 | #ifndef __ASSEMBLY__ | ||
| 8 | #define mb() asm volatile("msync all":::"memory") | ||
| 9 | #define rmb() asm volatile("msync all":::"memory") | ||
| 10 | #define wmb() asm volatile("msync store":::"memory") | ||
| 11 | #include <asm-generic/barrier.h> | ||
| 12 | |||
| 13 | #endif /* __ASSEMBLY__ */ | ||
| 14 | |||
| 15 | #endif /* __NDS32_ASM_BARRIER_H */ | ||
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h new file mode 100644 index 000000000000..c73f71d67744 --- /dev/null +++ b/arch/nds32/include/asm/bitfield.h | |||
| @@ -0,0 +1,963 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_BITFIELD_H__ | ||
| 5 | #define __NDS32_BITFIELD_H__ | ||
| 6 | /****************************************************************************** | ||
| 7 | * cr0: CPU_VER (CPU Version Register) | ||
| 8 | *****************************************************************************/ | ||
| 9 | #define CPU_VER_offCFGID 0 /* Minor configuration */ | ||
| 10 | #define CPU_VER_offREV 16 /* Revision of the CPU version */ | ||
| 11 | #define CPU_VER_offCPUID 24 /* Major CPU versions */ | ||
| 12 | |||
| 13 | #define CPU_VER_mskCFGID ( 0xFFFF << CPU_VER_offCFGID ) | ||
| 14 | #define CPU_VER_mskREV ( 0xFF << CPU_VER_offREV ) | ||
| 15 | #define CPU_VER_mskCPUID ( 0xFF << CPU_VER_offCPUID ) | ||
| 16 | |||
| 17 | /****************************************************************************** | ||
| 18 | * cr1: ICM_CFG (Instruction Cache/Memory Configuration Register) | ||
| 19 | *****************************************************************************/ | ||
| 20 | #define ICM_CFG_offISET 0 /* I-cache sets (# of cache lines) per way */ | ||
| 21 | #define ICM_CFG_offIWAY 3 /* I-cache ways */ | ||
| 22 | #define ICM_CFG_offISZ 6 /* I-cache line size */ | ||
| 23 | #define ICM_CFG_offILCK 9 /* I-cache locking support */ | ||
| 24 | #define ICM_CFG_offILMB 10 /* On-chip ILM banks */ | ||
| 25 | #define ICM_CFG_offBSAV 13 /* ILM base register alignment version */ | ||
| 26 | /* bit 15:31 reserved */ | ||
| 27 | |||
| 28 | #define ICM_CFG_mskISET ( 0x7 << ICM_CFG_offISET ) | ||
| 29 | #define ICM_CFG_mskIWAY ( 0x7 << ICM_CFG_offIWAY ) | ||
| 30 | #define ICM_CFG_mskISZ ( 0x7 << ICM_CFG_offISZ ) | ||
| 31 | #define ICM_CFG_mskILCK ( 0x1 << ICM_CFG_offILCK ) | ||
| 32 | #define ICM_CFG_mskILMB ( 0x7 << ICM_CFG_offILMB ) | ||
| 33 | #define ICM_CFG_mskBSAV ( 0x3 << ICM_CFG_offBSAV ) | ||
| 34 | |||
| 35 | /****************************************************************************** | ||
| 36 | * cr2: DCM_CFG (Data Cache/Memory Configuration Register) | ||
| 37 | *****************************************************************************/ | ||
| 38 | #define DCM_CFG_offDSET 0 /* D-cache sets (# of cache lines) per way */ | ||
| 39 | #define DCM_CFG_offDWAY 3 /* D-cache ways */ | ||
| 40 | #define DCM_CFG_offDSZ 6 /* D-cache line size */ | ||
| 41 | #define DCM_CFG_offDLCK 9 /* D-cache locking support */ | ||
| 42 | #define DCM_CFG_offDLMB 10 /* On-chip DLM banks */ | ||
| 43 | #define DCM_CFG_offBSAV 13 /* DLM base register alignment version */ | ||
| 44 | /* bit 15:31 reserved */ | ||
| 45 | |||
| 46 | #define DCM_CFG_mskDSET ( 0x7 << DCM_CFG_offDSET ) | ||
| 47 | #define DCM_CFG_mskDWAY ( 0x7 << DCM_CFG_offDWAY ) | ||
| 48 | #define DCM_CFG_mskDSZ ( 0x7 << DCM_CFG_offDSZ ) | ||
| 49 | #define DCM_CFG_mskDLCK ( 0x1 << DCM_CFG_offDLCK ) | ||
| 50 | #define DCM_CFG_mskDLMB ( 0x7 << DCM_CFG_offDLMB ) | ||
| 51 | #define DCM_CFG_mskBSAV ( 0x3 << DCM_CFG_offBSAV ) | ||
| 52 | |||
| 53 | /****************************************************************************** | ||
| 54 | * cr3: MMU_CFG (MMU Configuration Register) | ||
| 55 | *****************************************************************************/ | ||
| 56 | #define MMU_CFG_offMMPS 0 /* Memory management protection scheme */ | ||
| 57 | #define MMU_CFG_offMMPV 2 /* Memory management protection version number */ | ||
| 58 | #define MMU_CFG_offFATB 7 /* Fully-associative or non-fully-associative TLB */ | ||
| 59 | |||
| 60 | #define MMU_CFG_offTBW 8 /* TLB ways(non-associative) TBS */ | ||
| 61 | #define MMU_CFG_offTBS 11 /* TLB sets per way(non-associative) TBS */ | ||
| 62 | /* bit 14:14 reserved */ | ||
| 63 | |||
| 64 | #define MMU_CFG_offEP8MIN4 15 /* 8KB page supported while minimum page is 4KB */ | ||
| 65 | #define MMU_CFG_offfEPSZ 16 /* Extra page size supported */ | ||
| 66 | #define MMU_CFG_offTLBLCK 24 /* TLB locking support */ | ||
| 67 | #define MMU_CFG_offHPTWK 25 /* Hardware Page Table Walker implemented */ | ||
| 68 | #define MMU_CFG_offDE 26 /* Default endian */ | ||
| 69 | #define MMU_CFG_offNTPT 27 /* Partitions for non-translated attributes */ | ||
| 70 | #define MMU_CFG_offIVTB 28 /* Invisible TLB */ | ||
| 71 | #define MMU_CFG_offVLPT 29 /* VLPT for fast TLB fill handling implemented */ | ||
| 72 | #define MMU_CFG_offNTME 30 /* Non-translated VA to PA mapping */ | ||
| 73 | /* bit 31 reserved */ | ||
| 74 | |||
| 75 | #define MMU_CFG_mskMMPS ( 0x3 << MMU_CFG_offMMPS ) | ||
| 76 | #define MMU_CFG_mskMMPV ( 0x1F << MMU_CFG_offMMPV ) | ||
| 77 | #define MMU_CFG_mskFATB ( 0x1 << MMU_CFG_offFATB ) | ||
| 78 | #define MMU_CFG_mskTBW ( 0x7 << MMU_CFG_offTBW ) | ||
| 79 | #define MMU_CFG_mskTBS ( 0x7 << MMU_CFG_offTBS ) | ||
| 80 | #define MMU_CFG_mskEP8MIN4 ( 0x1 << MMU_CFG_offEP8MIN4 ) | ||
| 81 | #define MMU_CFG_mskfEPSZ ( 0xFF << MMU_CFG_offfEPSZ ) | ||
| 82 | #define MMU_CFG_mskTLBLCK ( 0x1 << MMU_CFG_offTLBLCK ) | ||
| 83 | #define MMU_CFG_mskHPTWK ( 0x1 << MMU_CFG_offHPTWK ) | ||
| 84 | #define MMU_CFG_mskDE ( 0x1 << MMU_CFG_offDE ) | ||
| 85 | #define MMU_CFG_mskNTPT ( 0x1 << MMU_CFG_offNTPT ) | ||
| 86 | #define MMU_CFG_mskIVTB ( 0x1 << MMU_CFG_offIVTB ) | ||
| 87 | #define MMU_CFG_mskVLPT ( 0x1 << MMU_CFG_offVLPT ) | ||
| 88 | #define MMU_CFG_mskNTME ( 0x1 << MMU_CFG_offNTME ) | ||
| 89 | |||
| 90 | /****************************************************************************** | ||
| 91 | * cr4: MSC_CFG (Misc Configuration Register) | ||
| 92 | *****************************************************************************/ | ||
| 93 | #define MSC_CFG_offEDM 0 | ||
| 94 | #define MSC_CFG_offLMDMA 1 | ||
| 95 | #define MSC_CFG_offPFM 2 | ||
| 96 | #define MSC_CFG_offHSMP 3 | ||
| 97 | #define MSC_CFG_offTRACE 4 | ||
| 98 | #define MSC_CFG_offDIV 5 | ||
| 99 | #define MSC_CFG_offMAC 6 | ||
| 100 | #define MSC_CFG_offAUDIO 7 | ||
| 101 | #define MSC_CFG_offL2C 9 | ||
| 102 | #define MSC_CFG_offRDREG 10 | ||
| 103 | #define MSC_CFG_offADR24 11 | ||
| 104 | #define MSC_CFG_offINTLC 12 | ||
| 105 | #define MSC_CFG_offBASEV 13 | ||
| 106 | #define MSC_CFG_offNOD 16 | ||
| 107 | /* bit 13:31 reserved */ | ||
| 108 | |||
| 109 | #define MSC_CFG_mskEDM ( 0x1 << MSC_CFG_offEDM ) | ||
| 110 | #define MSC_CFG_mskLMDMA ( 0x1 << MSC_CFG_offLMDMA ) | ||
| 111 | #define MSC_CFG_mskPFM ( 0x1 << MSC_CFG_offPFM ) | ||
| 112 | #define MSC_CFG_mskHSMP ( 0x1 << MSC_CFG_offHSMP ) | ||
| 113 | #define MSC_CFG_mskTRACE ( 0x1 << MSC_CFG_offTRACE ) | ||
| 114 | #define MSC_CFG_mskDIV ( 0x1 << MSC_CFG_offDIV ) | ||
| 115 | #define MSC_CFG_mskMAC ( 0x1 << MSC_CFG_offMAC ) | ||
| 116 | #define MSC_CFG_mskAUDIO ( 0x3 << MSC_CFG_offAUDIO ) | ||
| 117 | #define MSC_CFG_mskL2C ( 0x1 << MSC_CFG_offL2C ) | ||
| 118 | #define MSC_CFG_mskRDREG ( 0x1 << MSC_CFG_offRDREG ) | ||
| 119 | #define MSC_CFG_mskADR24 ( 0x1 << MSC_CFG_offADR24 ) | ||
| 120 | #define MSC_CFG_mskINTLC ( 0x1 << MSC_CFG_offINTLC ) | ||
| 121 | #define MSC_CFG_mskBASEV ( 0x7 << MSC_CFG_offBASEV ) | ||
| 122 | #define MSC_CFG_mskNOD ( 0x1 << MSC_CFG_offNOD ) | ||
| 123 | |||
| 124 | /****************************************************************************** | ||
| 125 | * cr5: CORE_CFG (Core Identification Register) | ||
| 126 | *****************************************************************************/ | ||
| 127 | #define CORE_ID_offCOREID 0 | ||
| 128 | /* bit 4:31 reserved */ | ||
| 129 | |||
| 130 | #define CORE_ID_mskCOREID ( 0xF << CORE_ID_offCOREID ) | ||
| 131 | |||
| 132 | /****************************************************************************** | ||
| 133 | * cr6: FUCOP_EXIST (FPU and Coprocessor Existence Configuration Register) | ||
| 134 | *****************************************************************************/ | ||
| 135 | #define FUCOP_EXIST_offCP0EX 0 | ||
| 136 | #define FUCOP_EXIST_offCP1EX 1 | ||
| 137 | #define FUCOP_EXIST_offCP2EX 2 | ||
| 138 | #define FUCOP_EXIST_offCP3EX 3 | ||
| 139 | #define FUCOP_EXIST_offCP0ISFPU 31 | ||
| 140 | |||
| 141 | #define FUCOP_EXIST_mskCP0EX ( 0x1 << FUCOP_EXIST_offCP0EX ) | ||
| 142 | #define FUCOP_EXIST_mskCP1EX ( 0x1 << FUCOP_EXIST_offCP1EX ) | ||
| 143 | #define FUCOP_EXIST_mskCP2EX ( 0x1 << FUCOP_EXIST_offCP2EX ) | ||
| 144 | #define FUCOP_EXIST_mskCP3EX ( 0x1 << FUCOP_EXIST_offCP3EX ) | ||
| 145 | #define FUCOP_EXIST_mskCP0ISFPU ( 0x1 << FUCOP_EXIST_offCP0ISFPU ) | ||
| 146 | |||
| 147 | /****************************************************************************** | ||
| 148 | * ir0: PSW (Processor Status Word Register) | ||
| 149 | * ir1: IPSW (Interruption PSW Register) | ||
| 150 | * ir2: P_IPSW (Previous IPSW Register) | ||
| 151 | *****************************************************************************/ | ||
| 152 | #define PSW_offGIE 0 /* Global Interrupt Enable */ | ||
| 153 | #define PSW_offINTL 1 /* Interruption Stack Level */ | ||
| 154 | #define PSW_offPOM 3 /* Processor Operation Mode, User/Superuser */ | ||
| 155 | #define PSW_offBE 5 /* Endianness for data memory access, 1:MSB, 0:LSB */ | ||
| 156 | #define PSW_offIT 6 /* Enable instruction address translation */ | ||
| 157 | #define PSW_offDT 7 /* Enable data address translation */ | ||
| 158 | #define PSW_offIME 8 /* Instruction Machine Error flag */ | ||
| 159 | #define PSW_offDME 9 /* Data Machine Error flag */ | ||
| 160 | #define PSW_offDEX 10 /* Debug Exception */ | ||
| 161 | #define PSW_offHSS 11 /* Hardware Single Stepping */ | ||
| 162 | #define PSW_offDRBE 12 /* Device Register Endian Mode */ | ||
| 163 | #define PSW_offAEN 13 /* Audio ISA special feature */ | ||
| 164 | #define PSW_offWBNA 14 /* Write Back Non-Allocate */ | ||
| 165 | #define PSW_offIFCON 15 /* IFC On */ | ||
| 166 | #define PSW_offCPL 16 /* Current Priority Level */ | ||
| 167 | /* bit 19:31 reserved */ | ||
| 168 | |||
| 169 | #define PSW_mskGIE ( 0x1 << PSW_offGIE ) | ||
| 170 | #define PSW_mskINTL ( 0x3 << PSW_offINTL ) | ||
| 171 | #define PSW_mskPOM ( 0x3 << PSW_offPOM ) | ||
| 172 | #define PSW_mskBE ( 0x1 << PSW_offBE ) | ||
| 173 | #define PSW_mskIT ( 0x1 << PSW_offIT ) | ||
| 174 | #define PSW_mskDT ( 0x1 << PSW_offDT ) | ||
| 175 | #define PSW_mskIME ( 0x1 << PSW_offIME ) | ||
| 176 | #define PSW_mskDME ( 0x1 << PSW_offDME ) | ||
| 177 | #define PSW_mskDEX ( 0x1 << PSW_offDEX ) | ||
| 178 | #define PSW_mskHSS ( 0x1 << PSW_offHSS ) | ||
| 179 | #define PSW_mskDRBE ( 0x1 << PSW_offDRBE ) | ||
| 180 | #define PSW_mskAEN ( 0x1 << PSW_offAEN ) | ||
| 181 | #define PSW_mskWBNA ( 0x1 << PSW_offWBNA ) | ||
| 182 | #define PSW_mskIFCON ( 0x1 << PSW_offIFCON ) | ||
| 183 | #define PSW_mskCPL ( 0x7 << PSW_offCPL ) | ||
| 184 | |||
| 185 | #define PSW_SYSTEM ( 1 << PSW_offPOM ) | ||
| 186 | #define PSW_INTL_1 ( 1 << PSW_offINTL ) | ||
| 187 | #define PSW_CPL_NO ( 0 << PSW_offCPL ) | ||
| 188 | #define PSW_CPL_ANY ( 7 << PSW_offCPL ) | ||
| 189 | |||
| 190 | #define PSW_clr (PSW_mskGIE|PSW_mskINTL|PSW_mskPOM|PSW_mskIT|PSW_mskDT|PSW_mskIME|PSW_mskWBNA) | ||
| 191 | #ifdef __NDS32_EB__ | ||
| 192 | #ifdef CONFIG_WBNA | ||
| 193 | #define PSW_init (PSW_mskWBNA|(1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT|PSW_mskBE) | ||
| 194 | #else | ||
| 195 | #define PSW_init ((1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT|PSW_mskBE) | ||
| 196 | #endif | ||
| 197 | #else | ||
| 198 | #ifdef CONFIG_WBNA | ||
| 199 | #define PSW_init (PSW_mskWBNA|(1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT) | ||
| 200 | #else | ||
| 201 | #define PSW_init ((1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT) | ||
| 202 | #endif | ||
| 203 | #endif | ||
| 204 | /****************************************************************************** | ||
| 205 | * ir3: IVB (Interruption Vector Base Register) | ||
| 206 | *****************************************************************************/ | ||
| 207 | /* bit 0:12 reserved */ | ||
| 208 | #define IVB_offNIVIC 1 /* Number of input for IVIC Controller */ | ||
| 209 | #define IVB_offIVIC_VER 11 /* IVIC Version */ | ||
| 210 | #define IVB_offEVIC 13 /* External Vector Interrupt Controller mode */ | ||
| 211 | #define IVB_offESZ 14 /* Size of each vector entry */ | ||
| 212 | #define IVB_offIVBASE 16 /* BasePA of interrupt vector table */ | ||
| 213 | |||
| 214 | #define IVB_mskNIVIC ( 0x7 << IVB_offNIVIC ) | ||
| 215 | #define IVB_mskIVIC_VER ( 0x3 << IVB_offIVIC_VER ) | ||
| 216 | #define IVB_mskEVIC ( 0x1 << IVB_offEVIC ) | ||
| 217 | #define IVB_mskESZ ( 0x3 << IVB_offESZ ) | ||
| 218 | #define IVB_mskIVBASE ( 0xFFFF << IVB_offIVBASE ) | ||
| 219 | |||
| 220 | #define IVB_valESZ4 0 | ||
| 221 | #define IVB_valESZ16 1 | ||
| 222 | #define IVB_valESZ64 2 | ||
| 223 | #define IVB_valESZ256 3 | ||
| 224 | /****************************************************************************** | ||
| 225 | * ir4: EVA (Exception Virtual Address Register) | ||
| 226 | * ir5: P_EVA (Previous EVA Register) | ||
| 227 | *****************************************************************************/ | ||
| 228 | |||
| 229 | /* This register contains the VA that causes the exception */ | ||
| 230 | |||
| 231 | /****************************************************************************** | ||
| 232 | * ir6: ITYPE (Interruption Type Register) | ||
| 233 | * ir7: P_ITYPE (Previous ITYPE Register) | ||
| 234 | *****************************************************************************/ | ||
| 235 | #define ITYPE_offETYPE 0 /* Exception Type */ | ||
| 236 | #define ITYPE_offINST 4 /* Exception caused by insn fetch or data access */ | ||
| 237 | /* bit 5:15 reserved */ | ||
| 238 | #define ITYPE_offVECTOR 5 /* Vector */ | ||
| 239 | #define ITYPE_offSWID 16 /* SWID of debugging exception */ | ||
| 240 | /* bit 31:31 reserved */ | ||
| 241 | |||
| 242 | #define ITYPE_mskETYPE ( 0xF << ITYPE_offETYPE ) | ||
| 243 | #define ITYPE_mskINST ( 0x1 << ITYPE_offINST ) | ||
| 244 | #define ITYPE_mskVECTOR ( 0x7F << ITYPE_offVECTOR ) | ||
| 245 | #define ITYPE_mskSWID ( 0x7FFF << ITYPE_offSWID ) | ||
| 246 | |||
| 247 | /* Additional definitions for ITYPE register */ | ||
| 248 | #define ITYPE_offSTYPE 16 /* Arithmetic Sub Type */ | ||
| 249 | #define ITYPE_offCPID 20 /* Co-Processor ID which generate the exception */ | ||
| 250 | |||
| 251 | #define ITYPE_mskSTYPE ( 0xF << ITYPE_offSTYPE ) | ||
| 252 | #define ITYPE_mskCPID ( 0x3 << ITYPE_offCPID ) | ||
| 253 | |||
| 254 | #define NDS32_VECTOR_mskNONEXCEPTION 0x78 | ||
| 255 | #define NDS32_VECTOR_offEXCEPTION 8 | ||
| 256 | #define NDS32_VECTOR_offINTERRUPT 9 | ||
| 257 | |||
| 258 | /* Interrupt vector entry */ | ||
| 259 | #define ENTRY_RESET_NMI 0 | ||
| 260 | #define ENTRY_TLB_FILL 1 | ||
| 261 | #define ENTRY_PTE_NOT_PRESENT 2 | ||
| 262 | #define ENTRY_TLB_MISC 3 | ||
| 263 | #define ENTRY_TLB_VLPT_MISS 4 | ||
| 264 | #define ENTRY_MACHINE_ERROR 5 | ||
| 265 | #define ENTRY_DEBUG_RELATED 6 | ||
| 266 | #define ENTRY_GENERAL_EXCPETION 7 | ||
| 267 | #define ENTRY_SYSCALL 8 | ||
| 268 | |||
| 269 | /* PTE not present exception definition */ | ||
| 270 | #define ETYPE_NON_LEAF_PTE_NOT_PRESENT 0 | ||
| 271 | #define ETYPE_LEAF_PTE_NOT_PRESENT 1 | ||
| 272 | |||
| 273 | /* General exception ETYPE definition */ | ||
| 274 | #define ETYPE_ALIGNMENT_CHECK 0 | ||
| 275 | #define ETYPE_RESERVED_INSTRUCTION 1 | ||
| 276 | #define ETYPE_TRAP 2 | ||
| 277 | #define ETYPE_ARITHMETIC 3 | ||
| 278 | #define ETYPE_PRECISE_BUS_ERROR 4 | ||
| 279 | #define ETYPE_IMPRECISE_BUS_ERROR 5 | ||
| 280 | #define ETYPE_COPROCESSOR 6 | ||
| 281 | #define ETYPE_RESERVED_VALUE 7 | ||
| 282 | #define ETYPE_NONEXISTENT_MEM_ADDRESS 8 | ||
| 283 | #define ETYPE_MPZIU_CONTROL 9 | ||
| 284 | #define ETYPE_NEXT_PRECISE_STACK_OFL 10 | ||
| 285 | |||
| 286 | /* Kerenl reserves software ID */ | ||
| 287 | #define SWID_RAISE_INTERRUPT_LEVEL 0x1a /* SWID_RAISE_INTERRUPT_LEVEL is used to | ||
| 288 | * raise interrupt level for debug exception | ||
| 289 | */ | ||
| 290 | |||
| 291 | /****************************************************************************** | ||
| 292 | * ir8: MERR (Machine Error Log Register) | ||
| 293 | *****************************************************************************/ | ||
| 294 | /* bit 0:30 reserved */ | ||
| 295 | #define MERR_offBUSERR 31 /* Bus error caused by a load insn */ | ||
| 296 | |||
| 297 | #define MERR_mskBUSERR ( 0x1 << MERR_offBUSERR ) | ||
| 298 | |||
| 299 | /****************************************************************************** | ||
| 300 | * ir9: IPC (Interruption Program Counter Register) | ||
| 301 | * ir10: P_IPC (Previous IPC Register) | ||
| 302 | * ir11: OIPC (Overflow Interruption Program Counter Register) | ||
| 303 | *****************************************************************************/ | ||
| 304 | |||
| 305 | /* This is the shadow stack register of the Program Counter */ | ||
| 306 | |||
| 307 | /****************************************************************************** | ||
| 308 | * ir12: P_P0 (Previous P0 Register) | ||
| 309 | * ir13: P_P1 (Previous P1 Register) | ||
| 310 | *****************************************************************************/ | ||
| 311 | |||
| 312 | /* These are shadow registers of $p0 and $p1 */ | ||
| 313 | |||
| 314 | /****************************************************************************** | ||
| 315 | * ir14: INT_MASK (Interruption Masking Register) | ||
| 316 | *****************************************************************************/ | ||
| 317 | #define INT_MASK_offH0IM 0 /* Hardware Interrupt 0 Mask bit */ | ||
| 318 | #define INT_MASK_offH1IM 1 /* Hardware Interrupt 1 Mask bit */ | ||
| 319 | #define INT_MASK_offH2IM 2 /* Hardware Interrupt 2 Mask bit */ | ||
| 320 | #define INT_MASK_offH3IM 3 /* Hardware Interrupt 3 Mask bit */ | ||
| 321 | #define INT_MASK_offH4IM 4 /* Hardware Interrupt 4 Mask bit */ | ||
| 322 | #define INT_MASK_offH5IM 5 /* Hardware Interrupt 5 Mask bit */ | ||
| 323 | /* bit 6:15 reserved */ | ||
| 324 | #define INT_MASK_offSIM 16 /* Software Interrupt Mask bit */ | ||
| 325 | /* bit 17:29 reserved */ | ||
| 326 | #define INT_MASK_offIDIVZE 30 /* Enable detection for Divide-By-Zero */ | ||
| 327 | #define INT_MASK_offDSSIM 31 /* Default Single Stepping Interruption Mask */ | ||
| 328 | |||
| 329 | #define INT_MASK_mskH0IM ( 0x1 << INT_MASK_offH0IM ) | ||
| 330 | #define INT_MASK_mskH1IM ( 0x1 << INT_MASK_offH1IM ) | ||
| 331 | #define INT_MASK_mskH2IM ( 0x1 << INT_MASK_offH2IM ) | ||
| 332 | #define INT_MASK_mskH3IM ( 0x1 << INT_MASK_offH3IM ) | ||
| 333 | #define INT_MASK_mskH4IM ( 0x1 << INT_MASK_offH4IM ) | ||
| 334 | #define INT_MASK_mskH5IM ( 0x1 << INT_MASK_offH5IM ) | ||
| 335 | #define INT_MASK_mskSIM ( 0x1 << INT_MASK_offSIM ) | ||
| 336 | #define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) | ||
| 337 | #define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) | ||
| 338 | |||
| 339 | #define INT_MASK_INITAIAL_VAL 0x10003 | ||
| 340 | |||
| 341 | /****************************************************************************** | ||
| 342 | * ir15: INT_PEND (Interrupt Pending Register) | ||
| 343 | *****************************************************************************/ | ||
| 344 | #define INT_PEND_offH0I 0 /* Hardware Interrupt 0 pending bit */ | ||
| 345 | #define INT_PEND_offH1I 1 /* Hardware Interrupt 1 pending bit */ | ||
| 346 | #define INT_PEND_offH2I 2 /* Hardware Interrupt 2 pending bit */ | ||
| 347 | #define INT_PEND_offH3I 3 /* Hardware Interrupt 3 pending bit */ | ||
| 348 | #define INT_PEND_offH4I 4 /* Hardware Interrupt 4 pending bit */ | ||
| 349 | #define INT_PEND_offH5I 5 /* Hardware Interrupt 5 pending bit */ | ||
| 350 | |||
| 351 | #define INT_PEND_offCIPL 0 /* Current Interrupt Priority Level */ | ||
| 352 | |||
| 353 | /* bit 6:15 reserved */ | ||
| 354 | #define INT_PEND_offSWI 16 /* Software Interrupt pending bit */ | ||
| 355 | /* bit 17:31 reserved */ | ||
| 356 | |||
| 357 | #define INT_PEND_mskH0I ( 0x1 << INT_PEND_offH0I ) | ||
| 358 | #define INT_PEND_mskH1I ( 0x1 << INT_PEND_offH1I ) | ||
| 359 | #define INT_PEND_mskH2I ( 0x1 << INT_PEND_offH2I ) | ||
| 360 | #define INT_PEND_mskH3I ( 0x1 << INT_PEND_offH3I ) | ||
| 361 | #define INT_PEND_mskH4I ( 0x1 << INT_PEND_offH4I ) | ||
| 362 | #define INT_PEND_mskH5I ( 0x1 << INT_PEND_offH5I ) | ||
| 363 | #define INT_PEND_mskCIPL ( 0x1 << INT_PEND_offCIPL ) | ||
| 364 | #define INT_PEND_mskSWI ( 0x1 << INT_PEND_offSWI ) | ||
| 365 | |||
| 366 | /****************************************************************************** | ||
| 367 | * mr0: MMU_CTL (MMU Control Register) | ||
| 368 | *****************************************************************************/ | ||
| 369 | #define MMU_CTL_offD 0 /* Default minimum page size */ | ||
| 370 | #define MMU_CTL_offNTC0 1 /* Non-Translated Cachebility of partition 0 */ | ||
| 371 | #define MMU_CTL_offNTC1 3 /* Non-Translated Cachebility of partition 1 */ | ||
| 372 | #define MMU_CTL_offNTC2 5 /* Non-Translated Cachebility of partition 2 */ | ||
| 373 | #define MMU_CTL_offNTC3 7 /* Non-Translated Cachebility of partition 3 */ | ||
| 374 | #define MMU_CTL_offTBALCK 9 /* TLB all-lock resolution scheme */ | ||
| 375 | #define MMU_CTL_offMPZIU 10 /* Multiple Page Size In Use bit */ | ||
| 376 | #define MMU_CTL_offNTM0 11 /* Non-Translated VA to PA of partition 0 */ | ||
| 377 | #define MMU_CTL_offNTM1 13 /* Non-Translated VA to PA of partition 1 */ | ||
| 378 | #define MMU_CTL_offNTM2 15 /* Non-Translated VA to PA of partition 2 */ | ||
| 379 | #define MMU_CTL_offNTM3 17 /* Non-Translated VA to PA of partition 3 */ | ||
| 380 | #define MMU_CTL_offUNA 23 /* Unaligned access */ | ||
| 381 | /* bit 24:31 reserved */ | ||
| 382 | |||
| 383 | #define MMU_CTL_mskD ( 0x1 << MMU_CTL_offD ) | ||
| 384 | #define MMU_CTL_mskNTC0 ( 0x3 << MMU_CTL_offNTC0 ) | ||
| 385 | #define MMU_CTL_mskNTC1 ( 0x3 << MMU_CTL_offNTC1 ) | ||
| 386 | #define MMU_CTL_mskNTC2 ( 0x3 << MMU_CTL_offNTC2 ) | ||
| 387 | #define MMU_CTL_mskNTC3 ( 0x3 << MMU_CTL_offNTC3 ) | ||
| 388 | #define MMU_CTL_mskTBALCK ( 0x1 << MMU_CTL_offTBALCK ) | ||
| 389 | #define MMU_CTL_mskMPZIU ( 0x1 << MMU_CTL_offMPZIU ) | ||
| 390 | #define MMU_CTL_mskNTM0 ( 0x3 << MMU_CTL_offNTM0 ) | ||
| 391 | #define MMU_CTL_mskNTM1 ( 0x3 << MMU_CTL_offNTM1 ) | ||
| 392 | #define MMU_CTL_mskNTM2 ( 0x3 << MMU_CTL_offNTM2 ) | ||
| 393 | #define MMU_CTL_mskNTM3 ( 0x3 << MMU_CTL_offNTM3 ) | ||
| 394 | |||
| 395 | #define MMU_CTL_D4KB 0 | ||
| 396 | #define MMU_CTL_D8KB 1 | ||
| 397 | #define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) | ||
| 398 | |||
| 399 | #define MMU_CTL_CACHEABLE_WB 2 | ||
| 400 | #define MMU_CTL_CACHEABLE_WT 3 | ||
| 401 | |||
| 402 | /****************************************************************************** | ||
| 403 | * mr1: L1_PPTB (L1 Physical Page Table Base Register) | ||
| 404 | *****************************************************************************/ | ||
| 405 | #define L1_PPTB_offNV 0 /* Enable Hardware Page Table Walker (HPTWK) */ | ||
| 406 | /* bit 1:11 reserved */ | ||
| 407 | #define L1_PPTB_offBASE 12 /* First level physical page table base address */ | ||
| 408 | |||
| 409 | #define L1_PPTB_mskNV ( 0x1 << L1_PPTB_offNV ) | ||
| 410 | #define L1_PPTB_mskBASE ( 0xFFFFF << L1_PPTB_offBASE ) | ||
| 411 | |||
| 412 | /****************************************************************************** | ||
| 413 | * mr2: TLB_VPN (TLB Access VPN Register) | ||
| 414 | *****************************************************************************/ | ||
| 415 | /* bit 0:11 reserved */ | ||
| 416 | #define TLB_VPN_offVPN 12 /* Virtual Page Number */ | ||
| 417 | |||
| 418 | #define TLB_VPN_mskVPN ( 0xFFFFF << TLB_VPN_offVPN ) | ||
| 419 | |||
| 420 | /****************************************************************************** | ||
| 421 | * mr3: TLB_DATA (TLB Access Data Register) | ||
| 422 | *****************************************************************************/ | ||
| 423 | #define TLB_DATA_offV 0 /* PTE is valid and present */ | ||
| 424 | #define TLB_DATA_offM 1 /* Page read/write access privilege */ | ||
| 425 | #define TLB_DATA_offD 4 /* Dirty bit */ | ||
| 426 | #define TLB_DATA_offX 5 /* Executable bit */ | ||
| 427 | #define TLB_DATA_offA 6 /* Access bit */ | ||
| 428 | #define TLB_DATA_offG 7 /* Global page (shared across contexts) */ | ||
| 429 | #define TLB_DATA_offC 8 /* Cacheability atribute */ | ||
| 430 | /* bit 11:11 reserved */ | ||
| 431 | #define TLB_DATA_offPPN 12 /* Phisical Page Number */ | ||
| 432 | |||
| 433 | #define TLB_DATA_mskV ( 0x1 << TLB_DATA_offV ) | ||
| 434 | #define TLB_DATA_mskM ( 0x7 << TLB_DATA_offM ) | ||
| 435 | #define TLB_DATA_mskD ( 0x1 << TLB_DATA_offD ) | ||
| 436 | #define TLB_DATA_mskX ( 0x1 << TLB_DATA_offX ) | ||
| 437 | #define TLB_DATA_mskA ( 0x1 << TLB_DATA_offA ) | ||
| 438 | #define TLB_DATA_mskG ( 0x1 << TLB_DATA_offG ) | ||
| 439 | #define TLB_DATA_mskC ( 0x7 << TLB_DATA_offC ) | ||
| 440 | #define TLB_DATA_mskPPN ( 0xFFFFF << TLB_DATA_offPPN ) | ||
| 441 | |||
| 442 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 443 | #define TLB_DATA_kernel_text_attr (TLB_DATA_mskV|TLB_DATA_mskM|TLB_DATA_mskD|TLB_DATA_mskX|TLB_DATA_mskG|TLB_DATA_mskC) | ||
| 444 | #else | ||
| 445 | #define TLB_DATA_kernel_text_attr (TLB_DATA_mskV|TLB_DATA_mskM|TLB_DATA_mskD|TLB_DATA_mskX|TLB_DATA_mskG|(0x6 << TLB_DATA_offC)) | ||
| 446 | #endif | ||
| 447 | |||
| 448 | /****************************************************************************** | ||
| 449 | * mr4: TLB_MISC (TLB Access Misc Register) | ||
| 450 | *****************************************************************************/ | ||
| 451 | #define TLB_MISC_offACC_PSZ 0 /* Page size of a PTE entry */ | ||
| 452 | #define TLB_MISC_offCID 4 /* Context id */ | ||
| 453 | /* bit 13:31 reserved */ | ||
| 454 | |||
| 455 | #define TLB_MISC_mskACC_PSZ ( 0xF << TLB_MISC_offACC_PSZ ) | ||
| 456 | #define TLB_MISC_mskCID ( 0x1FF << TLB_MISC_offCID ) | ||
| 457 | |||
| 458 | /****************************************************************************** | ||
| 459 | * mr5: VLPT_IDX (Virtual Linear Page Table Index Register) | ||
| 460 | *****************************************************************************/ | ||
| 461 | #define VLPT_IDX_offZERO 0 /* Always 0 */ | ||
| 462 | #define VLPT_IDX_offEVPN 2 /* Exception Virtual Page Number */ | ||
| 463 | #define VLPT_IDX_offVLPTB 22 /* Base VA of VLPT */ | ||
| 464 | |||
| 465 | #define VLPT_IDX_mskZERO ( 0x3 << VLPT_IDX_offZERO ) | ||
| 466 | #define VLPT_IDX_mskEVPN ( 0xFFFFF << VLPT_IDX_offEVPN ) | ||
| 467 | #define VLPT_IDX_mskVLPTB ( 0x3FF << VLPT_IDX_offVLPTB ) | ||
| 468 | |||
| 469 | /****************************************************************************** | ||
| 470 | * mr6: ILMB (Instruction Local Memory Base Register) | ||
| 471 | *****************************************************************************/ | ||
| 472 | #define ILMB_offIEN 0 /* Enable ILM */ | ||
| 473 | #define ILMB_offILMSZ 1 /* Size of ILM */ | ||
| 474 | /* bit 5:19 reserved */ | ||
| 475 | #define ILMB_offIBPA 20 /* Base PA of ILM */ | ||
| 476 | |||
| 477 | #define ILMB_mskIEN ( 0x1 << ILMB_offIEN ) | ||
| 478 | #define ILMB_mskILMSZ ( 0xF << ILMB_offILMSZ ) | ||
| 479 | #define ILMB_mskIBPA ( 0xFFF << ILMB_offIBPA ) | ||
| 480 | |||
| 481 | /****************************************************************************** | ||
| 482 | * mr7: DLMB (Data Local Memory Base Register) | ||
| 483 | *****************************************************************************/ | ||
| 484 | #define DLMB_offDEN 0 /* Enable DLM */ | ||
| 485 | #define DLMB_offDLMSZ 1 /* Size of DLM */ | ||
| 486 | #define DLMB_offDBM 5 /* Enable Double-Buffer Mode for DLM */ | ||
| 487 | #define DLMB_offDBB 6 /* Double-buffer bank which can be accessed by the processor */ | ||
| 488 | /* bit 7:19 reserved */ | ||
| 489 | #define DLMB_offDBPA 20 /* Base PA of DLM */ | ||
| 490 | |||
| 491 | #define DLMB_mskDEN ( 0x1 << DLMB_offDEN ) | ||
| 492 | #define DLMB_mskDLMSZ ( 0xF << DLMB_offDLMSZ ) | ||
| 493 | #define DLMB_mskDBM ( 0x1 << DLMB_offDBM ) | ||
| 494 | #define DLMB_mskDBB ( 0x1 << DLMB_offDBB ) | ||
| 495 | #define DLMB_mskDBPA ( 0xFFF << DLMB_offDBPA ) | ||
| 496 | |||
| 497 | /****************************************************************************** | ||
| 498 | * mr8: CACHE_CTL (Cache Control Register) | ||
| 499 | *****************************************************************************/ | ||
| 500 | #define CACHE_CTL_offIC_EN 0 /* Enable I-cache */ | ||
| 501 | #define CACHE_CTL_offDC_EN 1 /* Enable D-cache */ | ||
| 502 | #define CACHE_CTL_offICALCK 2 /* I-cache all-lock resolution scheme */ | ||
| 503 | #define CACHE_CTL_offDCALCK 3 /* D-cache all-lock resolution scheme */ | ||
| 504 | #define CACHE_CTL_offDCCWF 4 /* Enable D-cache Critical Word Forwarding */ | ||
| 505 | #define CACHE_CTL_offDCPMW 5 /* Enable D-cache concurrent miss and write-back processing */ | ||
| 506 | /* bit 6:31 reserved */ | ||
| 507 | |||
| 508 | #define CACHE_CTL_mskIC_EN ( 0x1 << CACHE_CTL_offIC_EN ) | ||
| 509 | #define CACHE_CTL_mskDC_EN ( 0x1 << CACHE_CTL_offDC_EN ) | ||
| 510 | #define CACHE_CTL_mskICALCK ( 0x1 << CACHE_CTL_offICALCK ) | ||
| 511 | #define CACHE_CTL_mskDCALCK ( 0x1 << CACHE_CTL_offDCALCK ) | ||
| 512 | #define CACHE_CTL_mskDCCWF ( 0x1 << CACHE_CTL_offDCCWF ) | ||
| 513 | #define CACHE_CTL_mskDCPMW ( 0x1 << CACHE_CTL_offDCPMW ) | ||
| 514 | |||
| 515 | /****************************************************************************** | ||
| 516 | * mr9: HSMP_SADDR (High Speed Memory Port Starting Address) | ||
| 517 | *****************************************************************************/ | ||
| 518 | #define HSMP_SADDR_offEN 0 /* Enable control bit for the High Speed Memory port */ | ||
| 519 | /* bit 1:19 reserved */ | ||
| 520 | |||
| 521 | #define HSMP_SADDR_offRANGE 1 /* Denote the address range (only defined in HSMP v2 ) */ | ||
| 522 | #define HSMP_SADDR_offSADDR 20 /* Starting base PA of the High Speed Memory Port region */ | ||
| 523 | |||
| 524 | #define HSMP_SADDR_mskEN ( 0x1 << HSMP_SADDR_offEN ) | ||
| 525 | #define HSMP_SADDR_mskRANGE ( 0xFFF << HSMP_SADDR_offRANGE ) | ||
| 526 | #define HSMP_SADDR_mskSADDR ( 0xFFF << HSMP_SADDR_offSADDR ) | ||
| 527 | |||
| 528 | /****************************************************************************** | ||
| 529 | * mr10: HSMP_EADDR (High Speed Memory Port Ending Address) | ||
| 530 | *****************************************************************************/ | ||
| 531 | /* bit 0:19 reserved */ | ||
| 532 | #define HSMP_EADDR_offEADDR 20 | ||
| 533 | |||
| 534 | #define HSMP_EADDR_mskEADDR ( 0xFFF << HSMP_EADDR_offEADDR ) | ||
| 535 | |||
| 536 | /****************************************************************************** | ||
| 537 | * dr0+(n*5): BPCn (n=0-7) (Breakpoint Control Register) | ||
| 538 | *****************************************************************************/ | ||
| 539 | #define BPC_offWP 0 /* Configuration of BPAn */ | ||
| 540 | #define BPC_offEL 1 /* Enable BPAn */ | ||
| 541 | #define BPC_offS 2 /* Data address comparison for a store instruction */ | ||
| 542 | #define BPC_offP 3 /* Compared data address is PA */ | ||
| 543 | #define BPC_offC 4 /* CID value is compared with the BPCIDn register */ | ||
| 544 | #define BPC_offBE0 5 /* Enable byte mask for the comparison with register */ | ||
| 545 | #define BPC_offBE1 6 /* Enable byte mask for the comparison with register */ | ||
| 546 | #define BPC_offBE2 7 /* Enable byte mask for the comparison with register */ | ||
| 547 | #define BPC_offBE3 8 /* Enable byte mask for the comparison with register */ | ||
| 548 | #define BPC_offT 9 /* Enable breakpoint Embedded Tracer triggering operation */ | ||
| 549 | |||
| 550 | #define BPC_mskWP ( 0x1 << BPC_offWP ) | ||
| 551 | #define BPC_mskEL ( 0x1 << BPC_offEL ) | ||
| 552 | #define BPC_mskS ( 0x1 << BPC_offS ) | ||
| 553 | #define BPC_mskP ( 0x1 << BPC_offP ) | ||
| 554 | #define BPC_mskC ( 0x1 << BPC_offC ) | ||
| 555 | #define BPC_mskBE0 ( 0x1 << BPC_offBE0 ) | ||
| 556 | #define BPC_mskBE1 ( 0x1 << BPC_offBE1 ) | ||
| 557 | #define BPC_mskBE2 ( 0x1 << BPC_offBE2 ) | ||
| 558 | #define BPC_mskBE3 ( 0x1 << BPC_offBE3 ) | ||
| 559 | #define BPC_mskT ( 0x1 << BPC_offT ) | ||
| 560 | |||
| 561 | /****************************************************************************** | ||
| 562 | * dr1+(n*5): BPAn (n=0-7) (Breakpoint Address Register) | ||
| 563 | *****************************************************************************/ | ||
| 564 | |||
| 565 | /* These registers contain break point address */ | ||
| 566 | |||
| 567 | /****************************************************************************** | ||
| 568 | * dr2+(n*5): BPAMn (n=0-7) (Breakpoint Address Mask Register) | ||
| 569 | *****************************************************************************/ | ||
| 570 | |||
| 571 | /* These registerd contain the address comparison mask for the BPAn register */ | ||
| 572 | |||
| 573 | /****************************************************************************** | ||
| 574 | * dr3+(n*5): BPVn (n=0-7) Breakpoint Data Value Register | ||
| 575 | *****************************************************************************/ | ||
| 576 | |||
| 577 | /* The BPVn register contains the data value that will be compared with the | ||
| 578 | * incoming load/store data value */ | ||
| 579 | |||
| 580 | /****************************************************************************** | ||
| 581 | * dr4+(n*5): BPCIDn (n=0-7) (Breakpoint Context ID Register) | ||
| 582 | *****************************************************************************/ | ||
| 583 | #define BPCID_offCID 0 /* CID that will be compared with a process's CID */ | ||
| 584 | /* bit 9:31 reserved */ | ||
| 585 | |||
| 586 | #define BPCID_mskCID ( 0x1FF << BPCID_offCID ) | ||
| 587 | |||
| 588 | /****************************************************************************** | ||
| 589 | * dr40: EDM_CFG (EDM Configuration Register) | ||
| 590 | *****************************************************************************/ | ||
| 591 | #define EDM_CFG_offBC 0 /* Number of hardware breakpoint sets implemented */ | ||
| 592 | #define EDM_CFG_offDIMU 3 /* Debug Instruction Memory Unit exists */ | ||
| 593 | /* bit 4:15 reserved */ | ||
| 594 | #define EDM_CFG_offVER 16 /* EDM version */ | ||
| 595 | |||
| 596 | #define EDM_CFG_mskBC ( 0x7 << EDM_CFG_offBC ) | ||
| 597 | #define EDM_CFG_mskDIMU ( 0x1 << EDM_CFG_offDIMU ) | ||
| 598 | #define EDM_CFG_mskVER ( 0xFFFF << EDM_CFG_offVER ) | ||
| 599 | |||
| 600 | /****************************************************************************** | ||
| 601 | * dr41: EDMSW (EDM Status Word) | ||
| 602 | *****************************************************************************/ | ||
| 603 | #define EDMSW_offWV 0 /* Write Valid */ | ||
| 604 | #define EDMSW_offRV 1 /* Read Valid */ | ||
| 605 | #define EDMSW_offDE 2 /* Debug exception has occurred for this core */ | ||
| 606 | /* bit 3:31 reserved */ | ||
| 607 | |||
| 608 | #define EDMSW_mskWV ( 0x1 << EDMSW_offWV ) | ||
| 609 | #define EDMSW_mskRV ( 0x1 << EDMSW_offRV ) | ||
| 610 | #define EDMSW_mskDE ( 0x1 << EDMSW_offDE ) | ||
| 611 | |||
| 612 | /****************************************************************************** | ||
| 613 | * dr42: EDM_CTL (EDM Control Register) | ||
| 614 | *****************************************************************************/ | ||
| 615 | /* bit 0:30 reserved */ | ||
| 616 | #define EDM_CTL_offV3_EDM_MODE 6 /* EDM compatibility control bit */ | ||
| 617 | #define EDM_CTL_offDEH_SEL 31 /* Controls where debug exception is directed to */ | ||
| 618 | |||
| 619 | #define EDM_CTL_mskV3_EDM_MODE ( 0x1 << EDM_CTL_offV3_EDM_MODE ) | ||
| 620 | #define EDM_CTL_mskDEH_SEL ( 0x1 << EDM_CTL_offDEH_SEL ) | ||
| 621 | |||
| 622 | /****************************************************************************** | ||
| 623 | * dr43: EDM_DTR (EDM Data Transfer Register) | ||
| 624 | *****************************************************************************/ | ||
| 625 | |||
| 626 | /* This is used to exchange data between the embedded EDM logic | ||
| 627 | * and the processor core */ | ||
| 628 | |||
| 629 | /****************************************************************************** | ||
| 630 | * dr44: BPMTC (Breakpoint Match Trigger Counter Register) | ||
| 631 | *****************************************************************************/ | ||
| 632 | #define BPMTC_offBPMTC 0 /* Breakpoint match trigger counter value */ | ||
| 633 | /* bit 16:31 reserved */ | ||
| 634 | |||
| 635 | #define BPMTC_mskBPMTC ( 0xFFFF << BPMTC_offBPMTC ) | ||
| 636 | |||
| 637 | /****************************************************************************** | ||
| 638 | * dr45: DIMBR (Debug Instruction Memory Base Register) | ||
| 639 | *****************************************************************************/ | ||
| 640 | /* bit 0:11 reserved */ | ||
| 641 | #define DIMBR_offDIMB 12 /* Base address of the Debug Instruction Memory (DIM) */ | ||
| 642 | #define DIMBR_mskDIMB ( 0xFFFFF << DIMBR_offDIMB ) | ||
| 643 | |||
| 644 | /****************************************************************************** | ||
| 645 | * dr46: TECR0(Trigger Event Control register 0) | ||
| 646 | * dr47: TECR1 (Trigger Event Control register 1) | ||
| 647 | *****************************************************************************/ | ||
| 648 | #define TECR_offBP 0 /* Controld which BP is used as a trigger source */ | ||
| 649 | #define TECR_offNMI 8 /* Use NMI as a trigger source */ | ||
| 650 | #define TECR_offHWINT 9 /* Corresponding interrupt is used as a trigger source */ | ||
| 651 | #define TECR_offEVIC 15 /* Enable HWINT as a trigger source in EVIC mode */ | ||
| 652 | #define TECR_offSYS 16 /* Enable SYSCALL instruction as a trigger source */ | ||
| 653 | #define TECR_offDBG 17 /* Enable debug exception as a trigger source */ | ||
| 654 | #define TECR_offMRE 18 /* Enable MMU related exception as a trigger source */ | ||
| 655 | #define TECR_offE 19 /* An exception is used as a trigger source */ | ||
| 656 | /* bit 20:30 reserved */ | ||
| 657 | #define TECR_offL 31 /* Link/Cascade TECR0 trigger event to TECR1 trigger event */ | ||
| 658 | |||
| 659 | #define TECR_mskBP ( 0xFF << TECR_offBP ) | ||
| 660 | #define TECR_mskNMI ( 0x1 << TECR_offBNMI ) | ||
| 661 | #define TECR_mskHWINT ( 0x3F << TECR_offBHWINT ) | ||
| 662 | #define TECR_mskEVIC ( 0x1 << TECR_offBEVIC ) | ||
| 663 | #define TECR_mskSYS ( 0x1 << TECR_offBSYS ) | ||
| 664 | #define TECR_mskDBG ( 0x1 << TECR_offBDBG ) | ||
| 665 | #define TECR_mskMRE ( 0x1 << TECR_offBMRE ) | ||
| 666 | #define TECR_mskE ( 0x1 << TECR_offE ) | ||
| 667 | #define TECR_mskL ( 0x1 << TECR_offL ) | ||
| 668 | |||
| 669 | /****************************************************************************** | ||
| 670 | * pfr0-2: PFMC0-2 (Performance Counter Register 0-2) | ||
| 671 | *****************************************************************************/ | ||
| 672 | |||
| 673 | /* These registers contains performance event count */ | ||
| 674 | |||
| 675 | /****************************************************************************** | ||
| 676 | * pfr3: PFM_CTL (Performance Counter Control Register) | ||
| 677 | *****************************************************************************/ | ||
| 678 | #define PFM_CTL_offEN0 0 /* Enable PFMC0 */ | ||
| 679 | #define PFM_CTL_offEN1 1 /* Enable PFMC1 */ | ||
| 680 | #define PFM_CTL_offEN2 2 /* Enable PFMC2 */ | ||
| 681 | #define PFM_CTL_offIE0 3 /* Enable interrupt for PFMC0 */ | ||
| 682 | #define PFM_CTL_offIE1 4 /* Enable interrupt for PFMC1 */ | ||
| 683 | #define PFM_CTL_offIE2 5 /* Enable interrupt for PFMC2 */ | ||
| 684 | #define PFM_CTL_offOVF0 6 /* Overflow bit of PFMC0 */ | ||
| 685 | #define PFM_CTL_offOVF1 7 /* Overflow bit of PFMC1 */ | ||
| 686 | #define PFM_CTL_offOVF2 8 /* Overflow bit of PFMC2 */ | ||
| 687 | #define PFM_CTL_offKS0 9 /* Enable superuser mode event counting for PFMC0 */ | ||
| 688 | #define PFM_CTL_offKS1 10 /* Enable superuser mode event counting for PFMC1 */ | ||
| 689 | #define PFM_CTL_offKS2 11 /* Enable superuser mode event counting for PFMC2 */ | ||
| 690 | #define PFM_CTL_offKU0 12 /* Enable user mode event counting for PFMC0 */ | ||
| 691 | #define PFM_CTL_offKU1 13 /* Enable user mode event counting for PFMC1 */ | ||
| 692 | #define PFM_CTL_offKU2 14 /* Enable user mode event counting for PFMC2 */ | ||
| 693 | #define PFM_CTL_offSEL0 15 /* The event selection for PFMC0 */ | ||
| 694 | #define PFM_CTL_offSEL1 21 /* The event selection for PFMC1 */ | ||
| 695 | #define PFM_CTL_offSEL2 27 /* The event selection for PFMC2 */ | ||
| 696 | /* bit 28:31 reserved */ | ||
| 697 | |||
| 698 | #define PFM_CTL_mskEN0 ( 0x01 << PFM_CTL_offEN0 ) | ||
| 699 | #define PFM_CTL_mskEN1 ( 0x01 << PFM_CTL_offEN1 ) | ||
| 700 | #define PFM_CTL_mskEN2 ( 0x01 << PFM_CTL_offEN2 ) | ||
| 701 | #define PFM_CTL_mskIE0 ( 0x01 << PFM_CTL_offIE0 ) | ||
| 702 | #define PFM_CTL_mskIE1 ( 0x01 << PFM_CTL_offIE1 ) | ||
| 703 | #define PFM_CTL_mskIE2 ( 0x01 << PFM_CTL_offIE2 ) | ||
| 704 | #define PFM_CTL_mskOVF0 ( 0x01 << PFM_CTL_offOVF0 ) | ||
| 705 | #define PFM_CTL_mskOVF1 ( 0x01 << PFM_CTL_offOVF1 ) | ||
| 706 | #define PFM_CTL_mskOVF2 ( 0x01 << PFM_CTL_offOVF2 ) | ||
| 707 | #define PFM_CTL_mskKS0 ( 0x01 << PFM_CTL_offKS0 ) | ||
| 708 | #define PFM_CTL_mskKS1 ( 0x01 << PFM_CTL_offKS1 ) | ||
| 709 | #define PFM_CTL_mskKS2 ( 0x01 << PFM_CTL_offKS2 ) | ||
| 710 | #define PFM_CTL_mskKU0 ( 0x01 << PFM_CTL_offKU0 ) | ||
| 711 | #define PFM_CTL_mskKU1 ( 0x01 << PFM_CTL_offKU1 ) | ||
| 712 | #define PFM_CTL_mskKU2 ( 0x01 << PFM_CTL_offKU2 ) | ||
| 713 | #define PFM_CTL_mskSEL0 ( 0x01 << PFM_CTL_offSEL0 ) | ||
| 714 | #define PFM_CTL_mskSEL1 ( 0x3F << PFM_CTL_offSEL1 ) | ||
| 715 | #define PFM_CTL_mskSEL2 ( 0x3F << PFM_CTL_offSEL2 ) | ||
| 716 | |||
| 717 | /****************************************************************************** | ||
| 718 | * SDZ_CTL (Structure Downsizing Control Register) | ||
| 719 | *****************************************************************************/ | ||
| 720 | #define SDZ_CTL_offICDZ 0 /* I-cache downsizing control */ | ||
| 721 | #define SDZ_CTL_offDCDZ 3 /* D-cache downsizing control */ | ||
| 722 | #define SDZ_CTL_offMTBDZ 6 /* MTLB downsizing control */ | ||
| 723 | #define SDZ_CTL_offBTBDZ 9 /* Branch Target Table downsizing control */ | ||
| 724 | /* bit 12:31 reserved */ | ||
| 725 | #define SDZ_CTL_mskICDZ ( 0x07 << SDZ_CTL_offICDZ ) | ||
| 726 | #define SDZ_CTL_mskDCDZ ( 0x07 << SDZ_CTL_offDCDZ ) | ||
| 727 | #define SDZ_CTL_mskMTBDZ ( 0x07 << SDZ_CTL_offMTBDZ ) | ||
| 728 | #define SDZ_CTL_mskBTBDZ ( 0x07 << SDZ_CTL_offBTBDZ ) | ||
| 729 | |||
| 730 | /****************************************************************************** | ||
| 731 | * N13MISC_CTL (N13 Miscellaneous Control Register) | ||
| 732 | *****************************************************************************/ | ||
| 733 | #define N13MISC_CTL_offBTB 0 /* Disable Branch Target Buffer */ | ||
| 734 | #define N13MISC_CTL_offRTP 1 /* Disable Return Target Predictor */ | ||
| 735 | #define N13MISC_CTL_offPTEPF 2 /* Disable HPTWK L2 PTE pefetch */ | ||
| 736 | #define N13MISC_CTL_offSP_SHADOW_EN 4 /* Enable shadow stack pointers */ | ||
| 737 | /* bit 6, 9:31 reserved */ | ||
| 738 | |||
| 739 | #define N13MISC_CTL_makBTB ( 0x1 << N13MISC_CTL_offBTB ) | ||
| 740 | #define N13MISC_CTL_makRTP ( 0x1 << N13MISC_CTL_offRTP ) | ||
| 741 | #define N13MISC_CTL_makPTEPF ( 0x1 << N13MISC_CTL_offPTEPF ) | ||
| 742 | #define N13MISC_CTL_makSP_SHADOW_EN ( 0x1 << N13MISC_CTL_offSP_SHADOW_EN ) | ||
| 743 | |||
| 744 | #define MISC_init (N13MISC_CTL_makBTB|N13MISC_CTL_makRTP|N13MISC_CTL_makSP_SHADOW_EN) | ||
| 745 | |||
| 746 | /****************************************************************************** | ||
| 747 | * PRUSR_ACC_CTL (Privileged Resource User Access Control Registers) | ||
| 748 | *****************************************************************************/ | ||
| 749 | #define PRUSR_ACC_CTL_offDMA_EN 0 /* Allow user mode access of DMA registers */ | ||
| 750 | #define PRUSR_ACC_CTL_offPFM_EN 1 /* Allow user mode access of PFM registers */ | ||
| 751 | |||
| 752 | #define PRUSR_ACC_CTL_mskDMA_EN ( 0x1 << PRUSR_ACC_CTL_offDMA_EN ) | ||
| 753 | #define PRUSR_ACC_CTL_mskPFM_EN ( 0x1 << PRUSR_ACC_CTL_offPFM_EN ) | ||
| 754 | |||
| 755 | /****************************************************************************** | ||
| 756 | * dmar0: DMA_CFG (DMA Configuration Register) | ||
| 757 | *****************************************************************************/ | ||
| 758 | #define DMA_CFG_offNCHN 0 /* The number of DMA channels implemented */ | ||
| 759 | #define DMA_CFG_offUNEA 2 /* Un-aligned External Address transfer feature */ | ||
| 760 | #define DMA_CFG_off2DET 3 /* 2-D Element Transfer feature */ | ||
| 761 | /* bit 4:15 reserved */ | ||
| 762 | #define DMA_CFG_offVER 16 /* DMA architecture and implementation version */ | ||
| 763 | |||
| 764 | #define DMA_CFG_mskNCHN ( 0x3 << DMA_CFG_offNCHN ) | ||
| 765 | #define DMA_CFG_mskUNEA ( 0x1 << DMA_CFG_offUNEA ) | ||
| 766 | #define DMA_CFG_msk2DET ( 0x1 << DMA_CFG_off2DET ) | ||
| 767 | #define DMA_CFG_mskVER ( 0xFFFF << DMA_CFG_offVER ) | ||
| 768 | |||
| 769 | /****************************************************************************** | ||
| 770 | * dmar1: DMA_GCSW (DMA Global Control and Status Word Register) | ||
| 771 | *****************************************************************************/ | ||
| 772 | #define DMA_GCSW_offC0STAT 0 /* DMA channel 0 state */ | ||
| 773 | #define DMA_GCSW_offC1STAT 3 /* DMA channel 1 state */ | ||
| 774 | /* bit 6:11 reserved */ | ||
| 775 | #define DMA_GCSW_offC0INT 12 /* DMA channel 0 generate interrupt */ | ||
| 776 | #define DMA_GCSW_offC1INT 13 /* DMA channel 1 generate interrupt */ | ||
| 777 | /* bit 14:30 reserved */ | ||
| 778 | #define DMA_GCSW_offEN 31 /* Enable DMA engine */ | ||
| 779 | |||
| 780 | #define DMA_GCSW_mskC0STAT ( 0x7 << DMA_GCSW_offC0STAT ) | ||
| 781 | #define DMA_GCSW_mskC1STAT ( 0x7 << DMA_GCSW_offC1STAT ) | ||
| 782 | #define DMA_GCSW_mskC0INT ( 0x1 << DMA_GCSW_offC0INT ) | ||
| 783 | #define DMA_GCSW_mskC1INT ( 0x1 << DMA_GCSW_offC1INT ) | ||
| 784 | #define DMA_GCSW_mskEN ( 0x1 << DMA_GCSW_offEN ) | ||
| 785 | |||
| 786 | /****************************************************************************** | ||
| 787 | * dmar2: DMA_CHNSEL (DMA Channel Selection Register) | ||
| 788 | *****************************************************************************/ | ||
| 789 | #define DMA_CHNSEL_offCHAN 0 /* Selected channel number */ | ||
| 790 | /* bit 2:31 reserved */ | ||
| 791 | |||
| 792 | #define DMA_CHNSEL_mskCHAN ( 0x3 << DMA_CHNSEL_offCHAN ) | ||
| 793 | |||
| 794 | /****************************************************************************** | ||
| 795 | * dmar3: DMA_ACT (DMA Action Register) | ||
| 796 | *****************************************************************************/ | ||
| 797 | #define DMA_ACT_offACMD 0 /* DMA Action Command */ | ||
| 798 | /* bit 2:31 reserved */ | ||
| 799 | #define DMA_ACT_mskACMD ( 0x3 << DMA_ACT_offACMD ) | ||
| 800 | |||
| 801 | /****************************************************************************** | ||
| 802 | * dmar4: DMA_SETUP (DMA Setup Register) | ||
| 803 | *****************************************************************************/ | ||
| 804 | #define DMA_SETUP_offLM 0 /* Local Memory Selection */ | ||
| 805 | #define DMA_SETUP_offTDIR 1 /* Transfer Direction */ | ||
| 806 | #define DMA_SETUP_offTES 2 /* Transfer Element Size */ | ||
| 807 | #define DMA_SETUP_offESTR 4 /* External memory transfer Stride */ | ||
| 808 | #define DMA_SETUP_offCIE 16 /* Interrupt Enable on Completion */ | ||
| 809 | #define DMA_SETUP_offSIE 17 /* Interrupt Enable on explicit Stop */ | ||
| 810 | #define DMA_SETUP_offEIE 18 /* Interrupt Enable on Error */ | ||
| 811 | #define DMA_SETUP_offUE 19 /* Enable the Un-aligned External Address */ | ||
| 812 | #define DMA_SETUP_off2DE 20 /* Enable the 2-D External Transfer */ | ||
| 813 | #define DMA_SETUP_offCOA 21 /* Transfer Coalescable */ | ||
| 814 | /* bit 22:31 reserved */ | ||
| 815 | |||
| 816 | #define DMA_SETUP_mskLM ( 0x1 << DMA_SETUP_offLM ) | ||
| 817 | #define DMA_SETUP_mskTDIR ( 0x1 << DMA_SETUP_offTDIR ) | ||
| 818 | #define DMA_SETUP_mskTES ( 0x3 << DMA_SETUP_offTES ) | ||
| 819 | #define DMA_SETUP_mskESTR ( 0xFFF << DMA_SETUP_offESTR ) | ||
| 820 | #define DMA_SETUP_mskCIE ( 0x1 << DMA_SETUP_offCIE ) | ||
| 821 | #define DMA_SETUP_mskSIE ( 0x1 << DMA_SETUP_offSIE ) | ||
| 822 | #define DMA_SETUP_mskEIE ( 0x1 << DMA_SETUP_offEIE ) | ||
| 823 | #define DMA_SETUP_mskUE ( 0x1 << DMA_SETUP_offUE ) | ||
| 824 | #define DMA_SETUP_msk2DE ( 0x1 << DMA_SETUP_off2DE ) | ||
| 825 | #define DMA_SETUP_mskCOA ( 0x1 << DMA_SETUP_offCOA ) | ||
| 826 | |||
| 827 | /****************************************************************************** | ||
| 828 | * dmar5: DMA_ISADDR (DMA Internal Start Address Register) | ||
| 829 | *****************************************************************************/ | ||
| 830 | #define DMA_ISADDR_offISADDR 0 /* Internal Start Address */ | ||
| 831 | /* bit 20:31 reserved */ | ||
| 832 | #define DMA_ISADDR_mskISADDR ( 0xFFFFF << DMA_ISADDR_offISADDR ) | ||
| 833 | |||
| 834 | /****************************************************************************** | ||
| 835 | * dmar6: DMA_ESADDR (DMA External Start Address Register) | ||
| 836 | *****************************************************************************/ | ||
| 837 | /* This register holds External Start Address */ | ||
| 838 | |||
| 839 | /****************************************************************************** | ||
| 840 | * dmar7: DMA_TCNT (DMA Transfer Element Count Register) | ||
| 841 | *****************************************************************************/ | ||
| 842 | #define DMA_TCNT_offTCNT 0 /* DMA transfer element count */ | ||
| 843 | /* bit 18:31 reserved */ | ||
| 844 | #define DMA_TCNT_mskTCNT ( 0x3FFFF << DMA_TCNT_offTCNT ) | ||
| 845 | |||
| 846 | /****************************************************************************** | ||
| 847 | * dmar8: DMA_STATUS (DMA Status Register) | ||
| 848 | *****************************************************************************/ | ||
| 849 | #define DMA_STATUS_offSTAT 0 /* DMA channel state */ | ||
| 850 | #define DMA_STATUS_offSTUNA 3 /* Un-aligned error on External Stride value */ | ||
| 851 | #define DMA_STATUS_offDERR 4 /* DMA Transfer Disruption Error */ | ||
| 852 | #define DMA_STATUS_offEUNA 5 /* Un-aligned error on the External address */ | ||
| 853 | #define DMA_STATUS_offIUNA 6 /* Un-aligned error on the Internal address */ | ||
| 854 | #define DMA_STATUS_offIOOR 7 /* Out-Of-Range error on the Internal address */ | ||
| 855 | #define DMA_STATUS_offEBUS 8 /* Bus Error on an External DMA transfer */ | ||
| 856 | #define DMA_STATUS_offESUP 9 /* DMA setup error */ | ||
| 857 | /* bit 10:31 reserved */ | ||
| 858 | |||
| 859 | #define DMA_STATUS_mskSTAT ( 0x7 << DMA_STATUS_offSTAT ) | ||
| 860 | #define DMA_STATUS_mskSTUNA ( 0x1 << DMDMA_STATUS_offSTUNA ) | ||
| 861 | #define DMA_STATUS_mskDERR ( 0x1 << DMDMA_STATUS_offDERR ) | ||
| 862 | #define DMA_STATUS_mskEUNA ( 0x1 << DMDMA_STATUS_offEUNA ) | ||
| 863 | #define DMA_STATUS_mskIUNA ( 0x1 << DMDMA_STATUS_offIUNA ) | ||
| 864 | #define DMA_STATUS_mskIOOR ( 0x1 << DMDMA_STATUS_offIOOR ) | ||
| 865 | #define DMA_STATUS_mskEBUS ( 0x1 << DMDMA_STATUS_offEBUS ) | ||
| 866 | #define DMA_STATUS_mskESUP ( 0x1 << DMDMA_STATUS_offESUP ) | ||
| 867 | |||
| 868 | /****************************************************************************** | ||
| 869 | * dmar9: DMA_2DSET (DMA 2D Setup Register) | ||
| 870 | *****************************************************************************/ | ||
| 871 | #define DMA_2DSET_offWECNT 0 /* The Width Element Count for a 2-D region */ | ||
| 872 | #define DMA_2DSET_offHTSTR 16 /* The Height Stride for a 2-D region */ | ||
| 873 | |||
| 874 | #define DMA_2DSET_mskHTSTR ( 0xFFFF << DMA_2DSET_offHTSTR ) | ||
| 875 | #define DMA_2DSET_mskWECNT ( 0xFFFF << DMA_2DSET_offWECNT ) | ||
| 876 | |||
| 877 | /****************************************************************************** | ||
| 878 | * dmar10: DMA_2DSCTL (DMA 2D Startup Control Register) | ||
| 879 | *****************************************************************************/ | ||
| 880 | #define DMA_2DSCTL_offSTWECNT 0 /* Startup Width Element Count for a 2-D region */ | ||
| 881 | /* bit 16:31 reserved */ | ||
| 882 | |||
| 883 | #define DMA_2DSCTL_mskSTWECNT ( 0xFFFF << DMA_2DSCTL_offSTWECNT ) | ||
| 884 | |||
| 885 | /****************************************************************************** | ||
| 886 | * fpcsr: FPCSR (Floating-Point Control Status Register) | ||
| 887 | *****************************************************************************/ | ||
| 888 | #define FPCSR_offRM 0 | ||
| 889 | #define FPCSR_offIVO 2 | ||
| 890 | #define FPCSR_offDBZ 3 | ||
| 891 | #define FPCSR_offOVF 4 | ||
| 892 | #define FPCSR_offUDF 5 | ||
| 893 | #define FPCSR_offIEX 6 | ||
| 894 | #define FPCSR_offIVOE 7 | ||
| 895 | #define FPCSR_offDBZE 8 | ||
| 896 | #define FPCSR_offOVFE 9 | ||
| 897 | #define FPCSR_offUDFE 10 | ||
| 898 | #define FPCSR_offIEXE 11 | ||
| 899 | #define FPCSR_offDNZ 12 | ||
| 900 | #define FPCSR_offIVOT 13 | ||
| 901 | #define FPCSR_offDBZT 14 | ||
| 902 | #define FPCSR_offOVFT 15 | ||
| 903 | #define FPCSR_offUDFT 16 | ||
| 904 | #define FPCSR_offIEXT 17 | ||
| 905 | #define FPCSR_offDNIT 18 | ||
| 906 | #define FPCSR_offRIT 19 | ||
| 907 | |||
| 908 | #define FPCSR_mskRM ( 0x3 << FPCSR_offRM ) | ||
| 909 | #define FPCSR_mskIVO ( 0x1 << FPCSR_offIVO ) | ||
| 910 | #define FPCSR_mskDBZ ( 0x1 << FPCSR_offDBZ ) | ||
| 911 | #define FPCSR_mskOVF ( 0x1 << FPCSR_offOVF ) | ||
| 912 | #define FPCSR_mskUDF ( 0x1 << FPCSR_offUDF ) | ||
| 913 | #define FPCSR_mskIEX ( 0x1 << FPCSR_offIEX ) | ||
| 914 | #define FPCSR_mskIVOE ( 0x1 << FPCSR_offIVOE ) | ||
| 915 | #define FPCSR_mskDBZE ( 0x1 << FPCSR_offDBZE ) | ||
| 916 | #define FPCSR_mskOVFE ( 0x1 << FPCSR_offOVFE ) | ||
| 917 | #define FPCSR_mskUDFE ( 0x1 << FPCSR_offUDFE ) | ||
| 918 | #define FPCSR_mskIEXE ( 0x1 << FPCSR_offIEXE ) | ||
| 919 | #define FPCSR_mskDNZ ( 0x1 << FPCSR_offDNZ ) | ||
| 920 | #define FPCSR_mskIVOT ( 0x1 << FPCSR_offIVOT ) | ||
| 921 | #define FPCSR_mskDBZT ( 0x1 << FPCSR_offDBZT ) | ||
| 922 | #define FPCSR_mskOVFT ( 0x1 << FPCSR_offOVFT ) | ||
| 923 | #define FPCSR_mskUDFT ( 0x1 << FPCSR_offUDFT ) | ||
| 924 | #define FPCSR_mskIEXT ( 0x1 << FPCSR_offIEXT ) | ||
| 925 | #define FPCSR_mskDNIT ( 0x1 << FPCSR_offDNIT ) | ||
| 926 | #define FPCSR_mskRIT ( 0x1 << FPCSR_offRIT ) | ||
| 927 | #define FPCSR_mskALL (FPCSR_mskIVO | FPCSR_mskDBZ | FPCSR_mskOVF | FPCSR_mskUDF | FPCSR_mskIEX) | ||
| 928 | #define FPCSR_mskALLE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskUDFE | FPCSR_mskIEXE) | ||
| 929 | #define FPCSR_mskALLT (FPCSR_mskIVOT | FPCSR_mskDBZT | FPCSR_mskOVFT | FPCSR_mskUDFT | FPCSR_mskIEXT |FPCSR_mskDNIT | FPCSR_mskRIT) | ||
| 930 | |||
| 931 | /****************************************************************************** | ||
| 932 | * fpcfg: FPCFG (Floating-Point Configuration Register) | ||
| 933 | *****************************************************************************/ | ||
| 934 | #define FPCFG_offSP 0 | ||
| 935 | #define FPCFG_offDP 1 | ||
| 936 | #define FPCFG_offFREG 2 | ||
| 937 | #define FPCFG_offFMA 4 | ||
| 938 | #define FPCFG_offIMVER 22 | ||
| 939 | #define FPCFG_offAVER 27 | ||
| 940 | |||
| 941 | #define FPCFG_mskSP ( 0x1 << FPCFG_offSP ) | ||
| 942 | #define FPCFG_mskDP ( 0x1 << FPCFG_offDP ) | ||
| 943 | #define FPCFG_mskFREG ( 0x3 << FPCFG_offFREG ) | ||
| 944 | #define FPCFG_mskFMA ( 0x1 << FPCFG_offFMA ) | ||
| 945 | #define FPCFG_mskIMVER ( 0x1F << FPCFG_offIMVER ) | ||
| 946 | #define FPCFG_mskAVER ( 0x1F << FPCFG_offAVER ) | ||
| 947 | |||
| 948 | /****************************************************************************** | ||
| 949 | * fucpr: FUCOP_CTL (FPU and Coprocessor Enable Control Register) | ||
| 950 | *****************************************************************************/ | ||
| 951 | #define FUCOP_CTL_offCP0EN 0 | ||
| 952 | #define FUCOP_CTL_offCP1EN 1 | ||
| 953 | #define FUCOP_CTL_offCP2EN 2 | ||
| 954 | #define FUCOP_CTL_offCP3EN 3 | ||
| 955 | #define FUCOP_CTL_offAUEN 31 | ||
| 956 | |||
| 957 | #define FUCOP_CTL_mskCP0EN ( 0x1 << FUCOP_CTL_offCP0EN ) | ||
| 958 | #define FUCOP_CTL_mskCP1EN ( 0x1 << FUCOP_CTL_offCP1EN ) | ||
| 959 | #define FUCOP_CTL_mskCP2EN ( 0x1 << FUCOP_CTL_offCP2EN ) | ||
| 960 | #define FUCOP_CTL_mskCP3EN ( 0x1 << FUCOP_CTL_offCP3EN ) | ||
| 961 | #define FUCOP_CTL_mskAUEN ( 0x1 << FUCOP_CTL_offAUEN ) | ||
| 962 | |||
| 963 | #endif /* __NDS32_BITFIELD_H__ */ | ||
diff --git a/arch/nds32/include/asm/cache.h b/arch/nds32/include/asm/cache.h new file mode 100644 index 000000000000..347db4881c5f --- /dev/null +++ b/arch/nds32/include/asm/cache.h | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_CACHE_H__ | ||
| 5 | #define __NDS32_CACHE_H__ | ||
| 6 | |||
| 7 | #define L1_CACHE_BYTES 32 | ||
| 8 | #define L1_CACHE_SHIFT 5 | ||
| 9 | |||
| 10 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES | ||
| 11 | |||
| 12 | #endif /* __NDS32_CACHE_H__ */ | ||
diff --git a/arch/nds32/include/asm/cache_info.h b/arch/nds32/include/asm/cache_info.h new file mode 100644 index 000000000000..38ec458ba543 --- /dev/null +++ b/arch/nds32/include/asm/cache_info.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | struct cache_info { | ||
| 5 | unsigned char ways; | ||
| 6 | unsigned char line_size; | ||
| 7 | unsigned short sets; | ||
| 8 | unsigned short size; | ||
| 9 | #if defined(CONFIG_CPU_CACHE_ALIASING) | ||
| 10 | unsigned short aliasing_num; | ||
| 11 | unsigned int aliasing_mask; | ||
| 12 | #endif | ||
| 13 | }; | ||
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h new file mode 100644 index 000000000000..7b9b20a381cb --- /dev/null +++ b/arch/nds32/include/asm/cacheflush.h | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_CACHEFLUSH_H__ | ||
| 5 | #define __NDS32_CACHEFLUSH_H__ | ||
| 6 | |||
| 7 | #include <linux/mm.h> | ||
| 8 | |||
| 9 | #define PG_dcache_dirty PG_arch_1 | ||
| 10 | |||
| 11 | #ifdef CONFIG_CPU_CACHE_ALIASING | ||
| 12 | void flush_cache_mm(struct mm_struct *mm); | ||
| 13 | void flush_cache_dup_mm(struct mm_struct *mm); | ||
| 14 | void flush_cache_range(struct vm_area_struct *vma, | ||
| 15 | unsigned long start, unsigned long end); | ||
| 16 | void flush_cache_page(struct vm_area_struct *vma, | ||
| 17 | unsigned long addr, unsigned long pfn); | ||
| 18 | void flush_cache_kmaps(void); | ||
| 19 | void flush_cache_vmap(unsigned long start, unsigned long end); | ||
| 20 | void flush_cache_vunmap(unsigned long start, unsigned long end); | ||
| 21 | |||
| 22 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | ||
| 23 | void flush_dcache_page(struct page *page); | ||
| 24 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
| 25 | unsigned long vaddr, void *dst, void *src, int len); | ||
| 26 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
| 27 | unsigned long vaddr, void *dst, void *src, int len); | ||
| 28 | |||
| 29 | #define ARCH_HAS_FLUSH_ANON_PAGE | ||
| 30 | void flush_anon_page(struct vm_area_struct *vma, | ||
| 31 | struct page *page, unsigned long vaddr); | ||
| 32 | |||
| 33 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | ||
| 34 | void flush_kernel_dcache_page(struct page *page); | ||
| 35 | void flush_icache_range(unsigned long start, unsigned long end); | ||
| 36 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); | ||
| 37 | #define flush_dcache_mmap_lock(mapping) spin_lock_irq(&(mapping)->tree_lock) | ||
| 38 | #define flush_dcache_mmap_unlock(mapping) spin_unlock_irq(&(mapping)->tree_lock) | ||
| 39 | |||
| 40 | #else | ||
| 41 | #include <asm-generic/cacheflush.h> | ||
| 42 | #endif | ||
| 43 | |||
| 44 | #endif /* __NDS32_CACHEFLUSH_H__ */ | ||
diff --git a/arch/nds32/include/asm/current.h b/arch/nds32/include/asm/current.h new file mode 100644 index 000000000000..b4dcd22b7bcb --- /dev/null +++ b/arch/nds32/include/asm/current.h | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASM_NDS32_CURRENT_H | ||
| 5 | #define _ASM_NDS32_CURRENT_H | ||
| 6 | |||
| 7 | #ifndef __ASSEMBLY__ | ||
| 8 | register struct task_struct *current asm("$r25"); | ||
| 9 | #endif /* __ASSEMBLY__ */ | ||
| 10 | #define tsk $r25 | ||
| 11 | |||
| 12 | #endif /* _ASM_NDS32_CURRENT_H */ | ||
diff --git a/arch/nds32/include/asm/delay.h b/arch/nds32/include/asm/delay.h new file mode 100644 index 000000000000..519ba97acb6e --- /dev/null +++ b/arch/nds32/include/asm/delay.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_DELAY_H__ | ||
| 5 | #define __NDS32_DELAY_H__ | ||
| 6 | |||
| 7 | #include <asm/param.h> | ||
| 8 | |||
| 9 | /* There is no clocksource cycle counter in the CPU. */ | ||
| 10 | static inline void __delay(unsigned long loops) | ||
| 11 | { | ||
| 12 | __asm__ __volatile__(".align 2\n" | ||
| 13 | "1:\n" | ||
| 14 | "\taddi\t%0, %0, -1\n" | ||
| 15 | "\tbgtz\t%0, 1b\n" | ||
| 16 | :"=r"(loops) | ||
| 17 | :"0"(loops)); | ||
| 18 | } | ||
| 19 | |||
| 20 | static inline void __udelay(unsigned long usecs, unsigned long lpj) | ||
| 21 | { | ||
| 22 | usecs *= (unsigned long)(((0x8000000000000000ULL / (500000 / HZ)) + | ||
| 23 | 0x80000000ULL) >> 32); | ||
| 24 | usecs = (unsigned long)(((unsigned long long)usecs * lpj) >> 32); | ||
| 25 | __delay(usecs); | ||
| 26 | } | ||
| 27 | |||
| 28 | #define udelay(usecs) __udelay((usecs), loops_per_jiffy) | ||
| 29 | |||
| 30 | /* make sure "usecs *= ..." in udelay do not overflow. */ | ||
| 31 | #if HZ >= 1000 | ||
| 32 | #define MAX_UDELAY_MS 1 | ||
| 33 | #elif HZ <= 200 | ||
| 34 | #define MAX_UDELAY_MS 5 | ||
| 35 | #else | ||
| 36 | #define MAX_UDELAY_MS (1000 / HZ) | ||
| 37 | #endif | ||
| 38 | |||
| 39 | #endif | ||
diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h new file mode 100644 index 000000000000..2dd47d245c25 --- /dev/null +++ b/arch/nds32/include/asm/dma-mapping.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef ASMNDS32_DMA_MAPPING_H | ||
| 5 | #define ASMNDS32_DMA_MAPPING_H | ||
| 6 | |||
| 7 | extern struct dma_map_ops nds32_dma_ops; | ||
| 8 | |||
| 9 | static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | ||
| 10 | { | ||
| 11 | return &nds32_dma_ops; | ||
| 12 | } | ||
| 13 | |||
| 14 | #endif | ||
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h new file mode 100644 index 000000000000..56c479058802 --- /dev/null +++ b/arch/nds32/include/asm/elf.h | |||
| @@ -0,0 +1,171 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASMNDS32_ELF_H | ||
| 5 | #define __ASMNDS32_ELF_H | ||
| 6 | |||
| 7 | /* | ||
| 8 | * ELF register definitions.. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <asm/ptrace.h> | ||
| 12 | |||
| 13 | typedef unsigned long elf_greg_t; | ||
| 14 | typedef unsigned long elf_freg_t[3]; | ||
| 15 | |||
| 16 | extern unsigned int elf_hwcap; | ||
| 17 | |||
| 18 | #define EM_NDS32 167 | ||
| 19 | |||
| 20 | #define R_NDS32_NONE 0 | ||
| 21 | #define R_NDS32_16_RELA 19 | ||
| 22 | #define R_NDS32_32_RELA 20 | ||
| 23 | #define R_NDS32_9_PCREL_RELA 22 | ||
| 24 | #define R_NDS32_15_PCREL_RELA 23 | ||
| 25 | #define R_NDS32_17_PCREL_RELA 24 | ||
| 26 | #define R_NDS32_25_PCREL_RELA 25 | ||
| 27 | #define R_NDS32_HI20_RELA 26 | ||
| 28 | #define R_NDS32_LO12S3_RELA 27 | ||
| 29 | #define R_NDS32_LO12S2_RELA 28 | ||
| 30 | #define R_NDS32_LO12S1_RELA 29 | ||
| 31 | #define R_NDS32_LO12S0_RELA 30 | ||
| 32 | #define R_NDS32_SDA15S3_RELA 31 | ||
| 33 | #define R_NDS32_SDA15S2_RELA 32 | ||
| 34 | #define R_NDS32_SDA15S1_RELA 33 | ||
| 35 | #define R_NDS32_SDA15S0_RELA 34 | ||
| 36 | #define R_NDS32_GOT20 37 | ||
| 37 | #define R_NDS32_25_PLTREL 38 | ||
| 38 | #define R_NDS32_COPY 39 | ||
| 39 | #define R_NDS32_GLOB_DAT 40 | ||
| 40 | #define R_NDS32_JMP_SLOT 41 | ||
| 41 | #define R_NDS32_RELATIVE 42 | ||
| 42 | #define R_NDS32_GOTOFF 43 | ||
| 43 | #define R_NDS32_GOTPC20 44 | ||
| 44 | #define R_NDS32_GOT_HI20 45 | ||
| 45 | #define R_NDS32_GOT_LO12 46 | ||
| 46 | #define R_NDS32_GOTPC_HI20 47 | ||
| 47 | #define R_NDS32_GOTPC_LO12 48 | ||
| 48 | #define R_NDS32_GOTOFF_HI20 49 | ||
| 49 | #define R_NDS32_GOTOFF_LO12 50 | ||
| 50 | #define R_NDS32_INSN16 51 | ||
| 51 | #define R_NDS32_LABEL 52 | ||
| 52 | #define R_NDS32_LONGCALL1 53 | ||
| 53 | #define R_NDS32_LONGCALL2 54 | ||
| 54 | #define R_NDS32_LONGCALL3 55 | ||
| 55 | #define R_NDS32_LONGJUMP1 56 | ||
| 56 | #define R_NDS32_LONGJUMP2 57 | ||
| 57 | #define R_NDS32_LONGJUMP3 58 | ||
| 58 | #define R_NDS32_LOADSTORE 59 | ||
| 59 | #define R_NDS32_9_FIXED_RELA 60 | ||
| 60 | #define R_NDS32_15_FIXED_RELA 61 | ||
| 61 | #define R_NDS32_17_FIXED_RELA 62 | ||
| 62 | #define R_NDS32_25_FIXED_RELA 63 | ||
| 63 | #define R_NDS32_PLTREL_HI20 64 | ||
| 64 | #define R_NDS32_PLTREL_LO12 65 | ||
| 65 | #define R_NDS32_PLT_GOTREL_HI20 66 | ||
| 66 | #define R_NDS32_PLT_GOTREL_LO12 67 | ||
| 67 | #define R_NDS32_LO12S0_ORI_RELA 72 | ||
| 68 | #define R_NDS32_DWARF2_OP1_RELA 77 | ||
| 69 | #define R_NDS32_DWARF2_OP2_RELA 78 | ||
| 70 | #define R_NDS32_DWARF2_LEB_RELA 79 | ||
| 71 | #define R_NDS32_WORD_9_PCREL_RELA 94 | ||
| 72 | #define R_NDS32_LONGCALL4 107 | ||
| 73 | #define R_NDS32_RELA_NOP_MIX 192 | ||
| 74 | #define R_NDS32_RELA_NOP_MAX 255 | ||
| 75 | |||
| 76 | #define ELF_NGREG (sizeof (struct user_pt_regs) / sizeof(elf_greg_t)) | ||
| 77 | #define ELF_CORE_COPY_REGS(dest, regs) \ | ||
| 78 | *(struct user_pt_regs *)&(dest) = (regs)->user_regs; | ||
| 79 | |||
| 80 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
| 81 | |||
| 82 | /* Core file format: The core file is written in such a way that gdb | ||
| 83 | can understand it and provide useful information to the user (under | ||
| 84 | linux we use the 'trad-core' bfd). There are quite a number of | ||
| 85 | obstacles to being able to view the contents of the floating point | ||
| 86 | registers, and until these are solved you will not be able to view the | ||
| 87 | contents of them. Actually, you can read in the core file and look at | ||
| 88 | the contents of the user struct to find out what the floating point | ||
| 89 | registers contain. | ||
| 90 | The actual file contents are as follows: | ||
| 91 | UPAGE: 1 page consisting of a user struct that tells gdb what is present | ||
| 92 | in the file. Directly after this is a copy of the task_struct, which | ||
| 93 | is currently not used by gdb, but it may come in useful at some point. | ||
| 94 | All of the registers are stored as part of the upage. The upage should | ||
| 95 | always be only one page. | ||
| 96 | DATA: The data area is stored. We use current->end_text to | ||
| 97 | current->brk to pick up all of the user variables, plus any memory | ||
| 98 | that may have been malloced. No attempt is made to determine if a page | ||
| 99 | is demand-zero or if a page is totally unused, we just cover the entire | ||
| 100 | range. All of the addresses are rounded in such a way that an integral | ||
| 101 | number of pages is written. | ||
| 102 | STACK: We need the stack information in order to get a meaningful | ||
| 103 | backtrace. We need to write the data from (esp) to | ||
| 104 | current->start_stack, so we round each of these off in order to be able | ||
| 105 | to write an integer number of pages. | ||
| 106 | The minimum core file size is 3 pages, or 12288 bytes. | ||
| 107 | */ | ||
| 108 | |||
| 109 | struct user_fp { | ||
| 110 | unsigned long long fd_regs[32]; | ||
| 111 | unsigned long fpcsr; | ||
| 112 | }; | ||
| 113 | |||
| 114 | typedef struct user_fp elf_fpregset_t; | ||
| 115 | |||
| 116 | struct elf32_hdr; | ||
| 117 | #define elf_check_arch(x) ((x)->e_machine == EM_NDS32) | ||
| 118 | |||
| 119 | /* | ||
| 120 | * These are used to set parameters in the core dumps. | ||
| 121 | */ | ||
| 122 | #define ELF_CLASS ELFCLASS32 | ||
| 123 | #ifdef __NDS32_EB__ | ||
| 124 | #define ELF_DATA ELFDATA2MSB; | ||
| 125 | #else | ||
| 126 | #define ELF_DATA ELFDATA2LSB; | ||
| 127 | #endif | ||
| 128 | #define ELF_ARCH EM_NDS32 | ||
| 129 | #define USE_ELF_CORE_DUMP | ||
| 130 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | ||
| 131 | |||
| 132 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
| 133 | use of this is to invoke "./ld.so someprog" to test out a new version of | ||
| 134 | the loader. We need to make sure that it is out of the way of the program | ||
| 135 | that it will "exec", and that there is sufficient room for the brk. */ | ||
| 136 | |||
| 137 | #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) | ||
| 138 | |||
| 139 | /* When the program starts, a1 contains a pointer to a function to be | ||
| 140 | registered with atexit, as per the SVR4 ABI. A value of 0 means we | ||
| 141 | have no such handler. */ | ||
| 142 | #define ELF_PLAT_INIT(_r, load_addr) (_r)->uregs[0] = 0 | ||
| 143 | |||
| 144 | /* This yields a mask that user programs can use to figure out what | ||
| 145 | instruction set this cpu supports. */ | ||
| 146 | |||
| 147 | #define ELF_HWCAP (elf_hwcap) | ||
| 148 | |||
| 149 | #ifdef __KERNEL__ | ||
| 150 | |||
| 151 | #define ELF_PLATFORM (NULL) | ||
| 152 | |||
| 153 | /* Old NetWinder binaries were compiled in such a way that the iBCS | ||
| 154 | heuristic always trips on them. Until these binaries become uncommon | ||
| 155 | enough not to care, don't trust the `ibcs' flag here. In any case | ||
| 156 | there is no other ELF system currently supported by iBCS. | ||
| 157 | @@ Could print a warning message to encourage users to upgrade. */ | ||
| 158 | #define SET_PERSONALITY(ex) set_personality(PER_LINUX) | ||
| 159 | |||
| 160 | #endif | ||
| 161 | |||
| 162 | #define ARCH_DLINFO \ | ||
| 163 | do { \ | ||
| 164 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ | ||
| 165 | (elf_addr_t)current->mm->context.vdso); \ | ||
| 166 | } while (0) | ||
| 167 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | ||
| 168 | struct linux_binprm; | ||
| 169 | int arch_setup_additional_pages(struct linux_binprm *, int); | ||
| 170 | |||
| 171 | #endif | ||
diff --git a/arch/nds32/include/asm/fixmap.h b/arch/nds32/include/asm/fixmap.h new file mode 100644 index 000000000000..0e60e153a71a --- /dev/null +++ b/arch/nds32/include/asm/fixmap.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_FIXMAP_H | ||
| 5 | #define __ASM_NDS32_FIXMAP_H | ||
| 6 | |||
| 7 | #ifdef CONFIG_HIGHMEM | ||
| 8 | #include <linux/threads.h> | ||
| 9 | #include <asm/kmap_types.h> | ||
| 10 | #endif | ||
| 11 | |||
| 12 | enum fixed_addresses { | ||
| 13 | FIX_HOLE, | ||
| 14 | FIX_KMAP_RESERVED, | ||
| 15 | FIX_KMAP_BEGIN, | ||
| 16 | #ifdef CONFIG_HIGHMEM | ||
| 17 | FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS), | ||
| 18 | #endif | ||
| 19 | FIX_EARLYCON_MEM_BASE, | ||
| 20 | __end_of_fixed_addresses | ||
| 21 | }; | ||
| 22 | #define FIXADDR_TOP ((unsigned long) (-(16 * PAGE_SIZE))) | ||
| 23 | #define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT) | ||
| 24 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | ||
| 25 | #define FIXMAP_PAGE_IO __pgprot(PAGE_DEVICE) | ||
| 26 | void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); | ||
| 27 | |||
| 28 | #include <asm-generic/fixmap.h> | ||
| 29 | #endif /* __ASM_NDS32_FIXMAP_H */ | ||
diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h new file mode 100644 index 000000000000..eab5e84bd991 --- /dev/null +++ b/arch/nds32/include/asm/futex.h | |||
| @@ -0,0 +1,103 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_FUTEX_H__ | ||
| 5 | #define __NDS32_FUTEX_H__ | ||
| 6 | |||
| 7 | #include <linux/futex.h> | ||
| 8 | #include <linux/uaccess.h> | ||
| 9 | #include <asm/errno.h> | ||
| 10 | |||
| 11 | #define __futex_atomic_ex_table(err_reg) \ | ||
| 12 | " .pushsection __ex_table,\"a\"\n" \ | ||
| 13 | " .align 3\n" \ | ||
| 14 | " .long 1b, 4f\n" \ | ||
| 15 | " .long 2b, 4f\n" \ | ||
| 16 | " .popsection\n" \ | ||
| 17 | " .pushsection .fixup,\"ax\"\n" \ | ||
| 18 | "4: move %0, " err_reg "\n" \ | ||
| 19 | " j 3b\n" \ | ||
| 20 | " .popsection" | ||
| 21 | |||
| 22 | #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ | ||
| 23 | smp_mb(); \ | ||
| 24 | asm volatile( \ | ||
| 25 | " movi $ta, #0\n" \ | ||
| 26 | "1: llw %1, [%2+$ta]\n" \ | ||
| 27 | " " insn "\n" \ | ||
| 28 | "2: scw %0, [%2+$ta]\n" \ | ||
| 29 | " beqz %0, 1b\n" \ | ||
| 30 | " movi %0, #0\n" \ | ||
| 31 | "3:\n" \ | ||
| 32 | __futex_atomic_ex_table("%4") \ | ||
| 33 | : "=&r" (ret), "=&r" (oldval) \ | ||
| 34 | : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ | ||
| 35 | : "cc", "memory") | ||
| 36 | static inline int | ||
| 37 | futex_atomic_cmpxchg_inatomic(u32 * uval, u32 __user * uaddr, | ||
| 38 | u32 oldval, u32 newval) | ||
| 39 | { | ||
| 40 | int ret = 0; | ||
| 41 | u32 val, tmp, flags; | ||
| 42 | |||
| 43 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | ||
| 44 | return -EFAULT; | ||
| 45 | |||
| 46 | smp_mb(); | ||
| 47 | asm volatile (" movi $ta, #0\n" | ||
| 48 | "1: llw %1, [%6 + $ta]\n" | ||
| 49 | " sub %3, %1, %4\n" | ||
| 50 | " cmovz %2, %5, %3\n" | ||
| 51 | " cmovn %2, %1, %3\n" | ||
| 52 | "2: scw %2, [%6 + $ta]\n" | ||
| 53 | " beqz %2, 1b\n" | ||
| 54 | "3:\n " __futex_atomic_ex_table("%7") | ||
| 55 | :"+&r"(ret), "=&r"(val), "=&r"(tmp), "=&r"(flags) | ||
| 56 | :"r"(oldval), "r"(newval), "r"(uaddr), "i"(-EFAULT) | ||
| 57 | :"$ta", "memory"); | ||
| 58 | smp_mb(); | ||
| 59 | |||
| 60 | *uval = val; | ||
| 61 | return ret; | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline int | ||
| 65 | arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) | ||
| 66 | { | ||
| 67 | int oldval = 0, ret; | ||
| 68 | |||
| 69 | |||
| 70 | pagefault_disable(); | ||
| 71 | switch (op) { | ||
| 72 | case FUTEX_OP_SET: | ||
| 73 | __futex_atomic_op("move %0, %3", ret, oldval, tmp, uaddr, | ||
| 74 | oparg); | ||
| 75 | break; | ||
| 76 | case FUTEX_OP_ADD: | ||
| 77 | __futex_atomic_op("add %0, %1, %3", ret, oldval, tmp, uaddr, | ||
| 78 | oparg); | ||
| 79 | break; | ||
| 80 | case FUTEX_OP_OR: | ||
| 81 | __futex_atomic_op("or %0, %1, %3", ret, oldval, tmp, uaddr, | ||
| 82 | oparg); | ||
| 83 | break; | ||
| 84 | case FUTEX_OP_ANDN: | ||
| 85 | __futex_atomic_op("and %0, %1, %3", ret, oldval, tmp, uaddr, | ||
| 86 | ~oparg); | ||
| 87 | break; | ||
| 88 | case FUTEX_OP_XOR: | ||
| 89 | __futex_atomic_op("xor %0, %1, %3", ret, oldval, tmp, uaddr, | ||
| 90 | oparg); | ||
| 91 | break; | ||
| 92 | default: | ||
| 93 | ret = -ENOSYS; | ||
| 94 | } | ||
| 95 | |||
| 96 | pagefault_enable(); | ||
| 97 | |||
| 98 | if (!ret) | ||
| 99 | *oval = oldval; | ||
| 100 | |||
| 101 | return ret; | ||
| 102 | } | ||
| 103 | #endif /* __NDS32_FUTEX_H__ */ | ||
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h new file mode 100644 index 000000000000..425d546cb059 --- /dev/null +++ b/arch/nds32/include/asm/highmem.h | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASM_HIGHMEM_H | ||
| 5 | #define _ASM_HIGHMEM_H | ||
| 6 | |||
| 7 | #include <asm/proc-fns.h> | ||
| 8 | #include <asm/kmap_types.h> | ||
| 9 | #include <asm/fixmap.h> | ||
| 10 | #include <asm/pgtable.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Right now we initialize only a single pte table. It can be extended | ||
| 14 | * easily, subsequent pte tables have to be allocated in one physical | ||
| 15 | * chunk of RAM. | ||
| 16 | */ | ||
| 17 | /* | ||
| 18 | * Ordering is (from lower to higher memory addresses): | ||
| 19 | * | ||
| 20 | * high_memory | ||
| 21 | * Persistent kmap area | ||
| 22 | * PKMAP_BASE | ||
| 23 | * fixed_addresses | ||
| 24 | * FIXADDR_START | ||
| 25 | * FIXADDR_TOP | ||
| 26 | * Vmalloc area | ||
| 27 | * VMALLOC_START | ||
| 28 | * VMALLOC_END | ||
| 29 | */ | ||
| 30 | #define PKMAP_BASE ((FIXADDR_START - PGDIR_SIZE) & (PGDIR_MASK)) | ||
| 31 | #define LAST_PKMAP PTRS_PER_PTE | ||
| 32 | #define LAST_PKMAP_MASK (LAST_PKMAP - 1) | ||
| 33 | #define PKMAP_NR(virt) (((virt) - (PKMAP_BASE)) >> PAGE_SHIFT) | ||
| 34 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
| 35 | #define kmap_prot PAGE_KERNEL | ||
| 36 | |||
| 37 | static inline void flush_cache_kmaps(void) | ||
| 38 | { | ||
| 39 | cpu_dcache_wbinval_all(); | ||
| 40 | } | ||
| 41 | |||
| 42 | /* declarations for highmem.c */ | ||
| 43 | extern unsigned long highstart_pfn, highend_pfn; | ||
| 44 | |||
| 45 | extern pte_t *pkmap_page_table; | ||
| 46 | |||
| 47 | extern void *kmap_high(struct page *page); | ||
| 48 | extern void kunmap_high(struct page *page); | ||
| 49 | |||
| 50 | extern void kmap_init(void); | ||
| 51 | |||
| 52 | /* | ||
| 53 | * The following functions are already defined by <linux/highmem.h> | ||
| 54 | * when CONFIG_HIGHMEM is not set. | ||
| 55 | */ | ||
| 56 | #ifdef CONFIG_HIGHMEM | ||
| 57 | extern void *kmap(struct page *page); | ||
| 58 | extern void kunmap(struct page *page); | ||
| 59 | extern void *kmap_atomic(struct page *page); | ||
| 60 | extern void __kunmap_atomic(void *kvaddr); | ||
| 61 | extern void *kmap_atomic_pfn(unsigned long pfn); | ||
| 62 | extern struct page *kmap_atomic_to_page(void *ptr); | ||
| 63 | #endif | ||
| 64 | |||
| 65 | #endif | ||
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h new file mode 100644 index 000000000000..966e71b3c960 --- /dev/null +++ b/arch/nds32/include/asm/io.h | |||
| @@ -0,0 +1,83 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_IO_H | ||
| 5 | #define __ASM_NDS32_IO_H | ||
| 6 | |||
| 7 | extern void iounmap(volatile void __iomem *addr); | ||
| 8 | #define __raw_writeb __raw_writeb | ||
| 9 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) | ||
| 10 | { | ||
| 11 | asm volatile("sbi %0, [%1]" : : "r" (val), "r" (addr)); | ||
| 12 | } | ||
| 13 | |||
| 14 | #define __raw_writew __raw_writew | ||
| 15 | static inline void __raw_writew(u16 val, volatile void __iomem *addr) | ||
| 16 | { | ||
| 17 | asm volatile("shi %0, [%1]" : : "r" (val), "r" (addr)); | ||
| 18 | } | ||
| 19 | |||
| 20 | #define __raw_writel __raw_writel | ||
| 21 | static inline void __raw_writel(u32 val, volatile void __iomem *addr) | ||
| 22 | { | ||
| 23 | asm volatile("swi %0, [%1]" : : "r" (val), "r" (addr)); | ||
| 24 | } | ||
| 25 | |||
| 26 | #define __raw_readb __raw_readb | ||
| 27 | static inline u8 __raw_readb(const volatile void __iomem *addr) | ||
| 28 | { | ||
| 29 | u8 val; | ||
| 30 | |||
| 31 | asm volatile("lbi %0, [%1]" : "=r" (val) : "r" (addr)); | ||
| 32 | return val; | ||
| 33 | } | ||
| 34 | |||
| 35 | #define __raw_readw __raw_readw | ||
| 36 | static inline u16 __raw_readw(const volatile void __iomem *addr) | ||
| 37 | { | ||
| 38 | u16 val; | ||
| 39 | |||
| 40 | asm volatile("lhi %0, [%1]" : "=r" (val) : "r" (addr)); | ||
| 41 | return val; | ||
| 42 | } | ||
| 43 | |||
| 44 | #define __raw_readl __raw_readl | ||
| 45 | static inline u32 __raw_readl(const volatile void __iomem *addr) | ||
| 46 | { | ||
| 47 | u32 val; | ||
| 48 | |||
| 49 | asm volatile("lwi %0, [%1]" : "=r" (val) : "r" (addr)); | ||
| 50 | return val; | ||
| 51 | } | ||
| 52 | |||
| 53 | #define __iormb() rmb() | ||
| 54 | #define __iowmb() wmb() | ||
| 55 | |||
| 56 | #define mmiowb() __asm__ __volatile__ ("msync all" : : : "memory"); | ||
| 57 | |||
| 58 | /* | ||
| 59 | * {read,write}{b,w,l,q}_relaxed() are like the regular version, but | ||
| 60 | * are not guaranteed to provide ordering against spinlocks or memory | ||
| 61 | * accesses. | ||
| 62 | */ | ||
| 63 | |||
| 64 | #define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) | ||
| 65 | #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) | ||
| 66 | #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) | ||
| 67 | #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) | ||
| 68 | #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) | ||
| 69 | #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) | ||
| 70 | |||
| 71 | /* | ||
| 72 | * {read,write}{b,w,l,q}() access little endian memory and return result in | ||
| 73 | * native endianness. | ||
| 74 | */ | ||
| 75 | #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) | ||
| 76 | #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) | ||
| 77 | #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) | ||
| 78 | |||
| 79 | #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) | ||
| 80 | #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) | ||
| 81 | #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) | ||
| 82 | #include <asm-generic/io.h> | ||
| 83 | #endif /* __ASM_NDS32_IO_H */ | ||
diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h new file mode 100644 index 000000000000..2bfd00f8bc48 --- /dev/null +++ b/arch/nds32/include/asm/irqflags.h | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <asm/nds32.h> | ||
| 5 | #include <nds32_intrinsic.h> | ||
| 6 | |||
| 7 | #define arch_local_irq_disable() \ | ||
| 8 | GIE_DISABLE(); | ||
| 9 | |||
| 10 | #define arch_local_irq_enable() \ | ||
| 11 | GIE_ENABLE(); | ||
| 12 | static inline unsigned long arch_local_irq_save(void) | ||
| 13 | { | ||
| 14 | unsigned long flags; | ||
| 15 | flags = __nds32__mfsr(NDS32_SR_PSW) & PSW_mskGIE; | ||
| 16 | GIE_DISABLE(); | ||
| 17 | return flags; | ||
| 18 | } | ||
| 19 | |||
| 20 | static inline unsigned long arch_local_save_flags(void) | ||
| 21 | { | ||
| 22 | unsigned long flags; | ||
| 23 | flags = __nds32__mfsr(NDS32_SR_PSW) & PSW_mskGIE; | ||
| 24 | return flags; | ||
| 25 | } | ||
| 26 | |||
| 27 | static inline void arch_local_irq_restore(unsigned long flags) | ||
| 28 | { | ||
| 29 | if(flags) | ||
| 30 | GIE_ENABLE(); | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline int arch_irqs_disabled_flags(unsigned long flags) | ||
| 34 | { | ||
| 35 | return !flags; | ||
| 36 | } | ||
diff --git a/arch/nds32/include/asm/l2_cache.h b/arch/nds32/include/asm/l2_cache.h new file mode 100644 index 000000000000..37dd5ef61de8 --- /dev/null +++ b/arch/nds32/include/asm/l2_cache.h | |||
| @@ -0,0 +1,137 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef L2_CACHE_H | ||
| 5 | #define L2_CACHE_H | ||
| 6 | |||
| 7 | /* CCTL_CMD_OP */ | ||
| 8 | #define L2_CA_CONF_OFF 0x0 | ||
| 9 | #define L2_IF_CONF_OFF 0x4 | ||
| 10 | #define L2CC_SETUP_OFF 0x8 | ||
| 11 | #define L2CC_PROT_OFF 0xC | ||
| 12 | #define L2CC_CTRL_OFF 0x10 | ||
| 13 | #define L2_INT_EN_OFF 0x20 | ||
| 14 | #define L2_STA_OFF 0x24 | ||
| 15 | #define RDERR_ADDR_OFF 0x28 | ||
| 16 | #define WRERR_ADDR_OFF 0x2c | ||
| 17 | #define EVDPTERR_ADDR_OFF 0x30 | ||
| 18 | #define IMPL3ERR_ADDR_OFF 0x34 | ||
| 19 | #define L2_CNT0_CTRL_OFF 0x40 | ||
| 20 | #define L2_EVNT_CNT0_OFF 0x44 | ||
| 21 | #define L2_CNT1_CTRL_OFF 0x48 | ||
| 22 | #define L2_EVNT_CNT1_OFF 0x4c | ||
| 23 | #define L2_CCTL_CMD_OFF 0x60 | ||
| 24 | #define L2_CCTL_STATUS_OFF 0x64 | ||
| 25 | #define L2_LINE_TAG_OFF 0x68 | ||
| 26 | #define L2_LINE_DPT_OFF 0x70 | ||
| 27 | |||
| 28 | #define CCTL_CMD_L2_IX_INVAL 0x0 | ||
| 29 | #define CCTL_CMD_L2_PA_INVAL 0x1 | ||
| 30 | #define CCTL_CMD_L2_IX_WB 0x2 | ||
| 31 | #define CCTL_CMD_L2_PA_WB 0x3 | ||
| 32 | #define CCTL_CMD_L2_PA_WBINVAL 0x5 | ||
| 33 | #define CCTL_CMD_L2_SYNC 0xa | ||
| 34 | |||
| 35 | /* CCTL_CMD_TYPE */ | ||
| 36 | #define CCTL_SINGLE_CMD 0 | ||
| 37 | #define CCTL_BLOCK_CMD 0x10 | ||
| 38 | #define CCTL_ALL_CMD 0x10 | ||
| 39 | |||
| 40 | /****************************************************************************** | ||
| 41 | * L2_CA_CONF (Cache architecture configuration) | ||
| 42 | *****************************************************************************/ | ||
| 43 | #define L2_CA_CONF_offL2SET 0 | ||
| 44 | #define L2_CA_CONF_offL2WAY 4 | ||
| 45 | #define L2_CA_CONF_offL2CLSZ 8 | ||
| 46 | #define L2_CA_CONF_offL2DW 11 | ||
| 47 | #define L2_CA_CONF_offL2PT 14 | ||
| 48 | #define L2_CA_CONF_offL2VER 16 | ||
| 49 | |||
| 50 | #define L2_CA_CONF_mskL2SET (0xFUL << L2_CA_CONF_offL2SET) | ||
| 51 | #define L2_CA_CONF_mskL2WAY (0xFUL << L2_CA_CONF_offL2WAY) | ||
| 52 | #define L2_CA_CONF_mskL2CLSZ (0x7UL << L2_CA_CONF_offL2CLSZ) | ||
| 53 | #define L2_CA_CONF_mskL2DW (0x7UL << L2_CA_CONF_offL2DW) | ||
| 54 | #define L2_CA_CONF_mskL2PT (0x3UL << L2_CA_CONF_offL2PT) | ||
| 55 | #define L2_CA_CONF_mskL2VER (0xFFFFUL << L2_CA_CONF_offL2VER) | ||
| 56 | |||
| 57 | /****************************************************************************** | ||
| 58 | * L2CC_SETUP (L2CC Setup register) | ||
| 59 | *****************************************************************************/ | ||
| 60 | #define L2CC_SETUP_offPART 0 | ||
| 61 | #define L2CC_SETUP_mskPART (0x3UL << L2CC_SETUP_offPART) | ||
| 62 | #define L2CC_SETUP_offDDLATC 4 | ||
| 63 | #define L2CC_SETUP_mskDDLATC (0x3UL << L2CC_SETUP_offDDLATC) | ||
| 64 | #define L2CC_SETUP_offTDLATC 8 | ||
| 65 | #define L2CC_SETUP_mskTDLATC (0x3UL << L2CC_SETUP_offTDLATC) | ||
| 66 | |||
| 67 | /****************************************************************************** | ||
| 68 | * L2CC_PROT (L2CC Protect register) | ||
| 69 | *****************************************************************************/ | ||
| 70 | #define L2CC_PROT_offMRWEN 31 | ||
| 71 | #define L2CC_PROT_mskMRWEN (0x1UL << L2CC_PROT_offMRWEN) | ||
| 72 | |||
| 73 | /****************************************************************************** | ||
| 74 | * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n) | ||
| 75 | *****************************************************************************/ | ||
| 76 | #define L2CC_CTRL_offEN 31 | ||
| 77 | #define L2CC_CTRL_mskEN (0x1UL << L2CC_CTRL_offEN) | ||
| 78 | |||
| 79 | /****************************************************************************** | ||
| 80 | * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n) | ||
| 81 | *****************************************************************************/ | ||
| 82 | #define L2_CCTL_STATUS_offCMD_COMP 31 | ||
| 83 | #define L2_CCTL_STATUS_mskCMD_COMP (0x1 << L2_CCTL_STATUS_offCMD_COMP) | ||
| 84 | |||
| 85 | extern void __iomem *atl2c_base; | ||
| 86 | #include <linux/smp.h> | ||
| 87 | #include <asm/io.h> | ||
| 88 | #include <asm/bitfield.h> | ||
| 89 | |||
| 90 | #define L2C_R_REG(offset) readl(atl2c_base + offset) | ||
| 91 | #define L2C_W_REG(offset, value) writel(value, atl2c_base + offset) | ||
| 92 | |||
| 93 | #define L2_CMD_RDY() \ | ||
| 94 | do{;}while((L2C_R_REG(L2_CCTL_STATUS_OFF) & L2_CCTL_STATUS_mskCMD_COMP) == 0) | ||
| 95 | |||
| 96 | static inline unsigned long L2_CACHE_SET(void) | ||
| 97 | { | ||
| 98 | return 64 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >> | ||
| 99 | L2_CA_CONF_offL2SET); | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline unsigned long L2_CACHE_WAY(void) | ||
| 103 | { | ||
| 104 | return 1 + | ||
| 105 | ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >> | ||
| 106 | L2_CA_CONF_offL2WAY); | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline unsigned long L2_CACHE_LINE_SIZE(void) | ||
| 110 | { | ||
| 111 | |||
| 112 | return 4 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >> | ||
| 113 | L2_CA_CONF_offL2CLSZ); | ||
| 114 | } | ||
| 115 | |||
| 116 | static inline unsigned long GET_L2CC_CTRL_CPU(unsigned long cpu) | ||
| 117 | { | ||
| 118 | if (cpu == smp_processor_id()) | ||
| 119 | return L2C_R_REG(L2CC_CTRL_OFF); | ||
| 120 | return L2C_R_REG(L2CC_CTRL_OFF + (cpu << 8)); | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline void SET_L2CC_CTRL_CPU(unsigned long cpu, unsigned long val) | ||
| 124 | { | ||
| 125 | if (cpu == smp_processor_id()) | ||
| 126 | L2C_W_REG(L2CC_CTRL_OFF, val); | ||
| 127 | else | ||
| 128 | L2C_W_REG(L2CC_CTRL_OFF + (cpu << 8), val); | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline unsigned long GET_L2CC_STATUS_CPU(unsigned long cpu) | ||
| 132 | { | ||
| 133 | if (cpu == smp_processor_id()) | ||
| 134 | return L2C_R_REG(L2_CCTL_STATUS_OFF); | ||
| 135 | return L2C_R_REG(L2_CCTL_STATUS_OFF + (cpu << 8)); | ||
| 136 | } | ||
| 137 | #endif | ||
diff --git a/arch/nds32/include/asm/linkage.h b/arch/nds32/include/asm/linkage.h new file mode 100644 index 000000000000..e708c8bdb926 --- /dev/null +++ b/arch/nds32/include/asm/linkage.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_LINKAGE_H | ||
| 5 | #define __ASM_LINKAGE_H | ||
| 6 | |||
| 7 | /* This file is required by include/linux/linkage.h */ | ||
| 8 | #define __ALIGN .align 2 | ||
| 9 | #define __ALIGN_STR ".align 2" | ||
| 10 | |||
| 11 | #endif | ||
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h new file mode 100644 index 000000000000..60efc726b56e --- /dev/null +++ b/arch/nds32/include/asm/memory.h | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_MEMORY_H | ||
| 5 | #define __ASM_NDS32_MEMORY_H | ||
| 6 | |||
| 7 | #include <linux/compiler.h> | ||
| 8 | #include <linux/sizes.h> | ||
| 9 | |||
| 10 | #ifndef __ASSEMBLY__ | ||
| 11 | #include <asm/page.h> | ||
| 12 | #endif | ||
| 13 | |||
| 14 | #ifndef PHYS_OFFSET | ||
| 15 | #define PHYS_OFFSET (0x0) | ||
| 16 | #endif | ||
| 17 | |||
| 18 | #ifndef __virt_to_bus | ||
| 19 | #define __virt_to_bus __virt_to_phys | ||
| 20 | #endif | ||
| 21 | |||
| 22 | #ifndef __bus_to_virt | ||
| 23 | #define __bus_to_virt __phys_to_virt | ||
| 24 | #endif | ||
| 25 | |||
| 26 | /* | ||
| 27 | * TASK_SIZE - the maximum size of a user space task. | ||
| 28 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area | ||
| 29 | */ | ||
| 30 | #define TASK_SIZE ((CONFIG_PAGE_OFFSET) - (SZ_32M)) | ||
| 31 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_32M) | ||
| 32 | #define PAGE_OFFSET (CONFIG_PAGE_OFFSET) | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Physical vs virtual RAM address space conversion. These are | ||
| 36 | * private definitions which should NOT be used outside memory.h | ||
| 37 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. | ||
| 38 | */ | ||
| 39 | #ifndef __virt_to_phys | ||
| 40 | #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) | ||
| 41 | #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) | ||
| 42 | #endif | ||
| 43 | |||
| 44 | /* | ||
| 45 | * The module space lives between the addresses given by TASK_SIZE | ||
| 46 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. | ||
| 47 | */ | ||
| 48 | #define MODULES_END (PAGE_OFFSET) | ||
| 49 | #define MODULES_VADDR (MODULES_END - SZ_32M) | ||
| 50 | |||
| 51 | #if TASK_SIZE > MODULES_VADDR | ||
| 52 | #error Top of user space clashes with start of module space | ||
| 53 | #endif | ||
| 54 | |||
| 55 | #ifndef __ASSEMBLY__ | ||
| 56 | |||
| 57 | /* | ||
| 58 | * PFNs are used to describe any physical page; this means | ||
| 59 | * PFN 0 == physical address 0. | ||
| 60 | * | ||
| 61 | * This is the PFN of the first RAM page in the kernel | ||
| 62 | * direct-mapped view. We assume this is the first page | ||
| 63 | * of RAM in the mem_map as well. | ||
| 64 | */ | ||
| 65 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Drivers should NOT use these either. | ||
| 69 | */ | ||
| 70 | #define __pa(x) __virt_to_phys((unsigned long)(x)) | ||
| 71 | #define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Conversion between a struct page and a physical address. | ||
| 75 | * | ||
| 76 | * Note: when converting an unknown physical address to a | ||
| 77 | * struct page, the resulting pointer must be validated | ||
| 78 | * using VALID_PAGE(). It must return an invalid struct page | ||
| 79 | * for any physical address not corresponding to a system | ||
| 80 | * RAM address. | ||
| 81 | * | ||
| 82 | * pfn_valid(pfn) indicates whether a PFN number is valid | ||
| 83 | * | ||
| 84 | * virt_to_page(k) convert a _valid_ virtual address to struct page * | ||
| 85 | * virt_addr_valid(k) indicates whether a virtual address is valid | ||
| 86 | */ | ||
| 87 | #ifndef CONFIG_DISCONTIGMEM | ||
| 88 | |||
| 89 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET | ||
| 90 | #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) | ||
| 91 | |||
| 92 | #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) | ||
| 93 | #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) | ||
| 94 | |||
| 95 | #else /* CONFIG_DISCONTIGMEM */ | ||
| 96 | #error CONFIG_DISCONTIGMEM is not supported yet. | ||
| 97 | #endif /* !CONFIG_DISCONTIGMEM */ | ||
| 98 | |||
| 99 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | ||
| 100 | |||
| 101 | #endif | ||
| 102 | |||
| 103 | #include <asm-generic/memory_model.h> | ||
| 104 | |||
| 105 | #endif | ||
diff --git a/arch/nds32/include/asm/mmu.h b/arch/nds32/include/asm/mmu.h new file mode 100644 index 000000000000..88b9ee8c1064 --- /dev/null +++ b/arch/nds32/include/asm/mmu.h | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_MMU_H | ||
| 5 | #define __NDS32_MMU_H | ||
| 6 | |||
| 7 | typedef struct { | ||
| 8 | unsigned int id; | ||
| 9 | void *vdso; | ||
| 10 | } mm_context_t; | ||
| 11 | |||
| 12 | #endif | ||
diff --git a/arch/nds32/include/asm/mmu_context.h b/arch/nds32/include/asm/mmu_context.h new file mode 100644 index 000000000000..fd7d13cefccc --- /dev/null +++ b/arch/nds32/include/asm/mmu_context.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_MMU_CONTEXT_H | ||
| 5 | #define __ASM_NDS32_MMU_CONTEXT_H | ||
| 6 | |||
| 7 | #include <linux/spinlock.h> | ||
| 8 | #include <asm/tlbflush.h> | ||
| 9 | #include <asm/proc-fns.h> | ||
| 10 | #include <asm-generic/mm_hooks.h> | ||
| 11 | |||
| 12 | static inline int | ||
| 13 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
| 14 | { | ||
| 15 | mm->context.id = 0; | ||
| 16 | return 0; | ||
| 17 | } | ||
| 18 | |||
| 19 | #define destroy_context(mm) do { } while(0) | ||
| 20 | |||
| 21 | #define CID_BITS 9 | ||
| 22 | extern spinlock_t cid_lock; | ||
| 23 | extern unsigned int cpu_last_cid; | ||
| 24 | |||
| 25 | static inline void __new_context(struct mm_struct *mm) | ||
| 26 | { | ||
| 27 | unsigned int cid; | ||
| 28 | unsigned long flags; | ||
| 29 | |||
| 30 | spin_lock_irqsave(&cid_lock, flags); | ||
| 31 | cid = cpu_last_cid; | ||
| 32 | cpu_last_cid += 1 << TLB_MISC_offCID; | ||
| 33 | if (cpu_last_cid == 0) | ||
| 34 | cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS; | ||
| 35 | |||
| 36 | if ((cid & TLB_MISC_mskCID) == 0) | ||
| 37 | flush_tlb_all(); | ||
| 38 | spin_unlock_irqrestore(&cid_lock, flags); | ||
| 39 | |||
| 40 | mm->context.id = cid; | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline void check_context(struct mm_struct *mm) | ||
| 44 | { | ||
| 45 | if (unlikely | ||
| 46 | ((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS)) | ||
| 47 | __new_context(mm); | ||
| 48 | } | ||
| 49 | |||
| 50 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
| 51 | { | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
| 55 | struct task_struct *tsk) | ||
| 56 | { | ||
| 57 | unsigned int cpu = smp_processor_id(); | ||
| 58 | |||
| 59 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { | ||
| 60 | check_context(next); | ||
| 61 | cpu_switch_mm(next); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
| 66 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | ||
| 67 | |||
| 68 | #endif | ||
diff --git a/arch/nds32/include/asm/module.h b/arch/nds32/include/asm/module.h new file mode 100644 index 000000000000..16cf9c7237ad --- /dev/null +++ b/arch/nds32/include/asm/module.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASM_NDS32_MODULE_H | ||
| 5 | #define _ASM_NDS32_MODULE_H | ||
| 6 | |||
| 7 | #include <asm-generic/module.h> | ||
| 8 | |||
| 9 | #define MODULE_ARCH_VERMAGIC "NDS32v3" | ||
| 10 | |||
| 11 | #endif /* _ASM_NDS32_MODULE_H */ | ||
diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h new file mode 100644 index 000000000000..19b19394a936 --- /dev/null +++ b/arch/nds32/include/asm/nds32.h | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASM_NDS32_NDS32_H_ | ||
| 5 | #define _ASM_NDS32_NDS32_H_ | ||
| 6 | |||
| 7 | #include <asm/bitfield.h> | ||
| 8 | #include <asm/cachectl.h> | ||
| 9 | |||
| 10 | #ifndef __ASSEMBLY__ | ||
| 11 | #include <linux/init.h> | ||
| 12 | #include <asm/barrier.h> | ||
| 13 | #include <nds32_intrinsic.h> | ||
| 14 | |||
| 15 | #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE | ||
| 16 | #define FP_OFFSET (-3) | ||
| 17 | #else | ||
| 18 | #define FP_OFFSET (-2) | ||
| 19 | #endif | ||
| 20 | |||
| 21 | extern void __init early_trap_init(void); | ||
| 22 | static inline void GIE_ENABLE(void) | ||
| 23 | { | ||
| 24 | mb(); | ||
| 25 | __nds32__gie_en(); | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void GIE_DISABLE(void) | ||
| 29 | { | ||
| 30 | mb(); | ||
| 31 | __nds32__gie_dis(); | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline unsigned long CACHE_SET(unsigned char cache) | ||
| 35 | { | ||
| 36 | |||
| 37 | if (cache == ICACHE) | ||
| 38 | return 64 << ((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskISET) >> | ||
| 39 | ICM_CFG_offISET); | ||
| 40 | else | ||
| 41 | return 64 << ((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDSET) >> | ||
| 42 | DCM_CFG_offDSET); | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline unsigned long CACHE_WAY(unsigned char cache) | ||
| 46 | { | ||
| 47 | |||
| 48 | if (cache == ICACHE) | ||
| 49 | return 1 + | ||
| 50 | ((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskIWAY) >> ICM_CFG_offIWAY); | ||
| 51 | else | ||
| 52 | return 1 + | ||
| 53 | ((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDWAY) >> DCM_CFG_offDWAY); | ||
| 54 | } | ||
| 55 | |||
| 56 | static inline unsigned long CACHE_LINE_SIZE(unsigned char cache) | ||
| 57 | { | ||
| 58 | |||
| 59 | if (cache == ICACHE) | ||
| 60 | return 8 << | ||
| 61 | (((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskISZ) >> ICM_CFG_offISZ) - 1); | ||
| 62 | else | ||
| 63 | return 8 << | ||
| 64 | (((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDSZ) >> DCM_CFG_offDSZ) - 1); | ||
| 65 | } | ||
| 66 | |||
| 67 | #endif /* __ASSEMBLY__ */ | ||
| 68 | |||
| 69 | #define IVB_BASE PHYS_OFFSET /* in user space for intr/exc/trap/break table base, 64KB aligned | ||
| 70 | * We defined at the start of the physical memory */ | ||
| 71 | |||
| 72 | /* dispatched sub-entry exception handler numbering */ | ||
| 73 | #define RD_PROT 0 /* read protrection */ | ||
| 74 | #define WRT_PROT 1 /* write protection */ | ||
| 75 | #define NOEXEC 2 /* non executable */ | ||
| 76 | #define PAGE_MODIFY 3 /* page modified */ | ||
| 77 | #define ACC_BIT 4 /* access bit */ | ||
| 78 | #define RESVED_PTE 5 /* reserved PTE attribute */ | ||
| 79 | /* reserved 6 ~ 16 */ | ||
| 80 | |||
| 81 | #endif /* _ASM_NDS32_NDS32_H_ */ | ||
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h new file mode 100644 index 000000000000..e27365c097b6 --- /dev/null +++ b/arch/nds32/include/asm/page.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | /* | ||
| 2 | * SPDX-License-Identifier: GPL-2.0 | ||
| 3 | * Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef _ASMNDS32_PAGE_H | ||
| 7 | #define _ASMNDS32_PAGE_H | ||
| 8 | |||
| 9 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB | ||
| 10 | #define PAGE_SHIFT 12 | ||
| 11 | #endif | ||
| 12 | #ifdef CONFIG_ANDES_PAGE_SIZE_8KB | ||
| 13 | #define PAGE_SHIFT 13 | ||
| 14 | #endif | ||
| 15 | #include <linux/const.h> | ||
| 16 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | ||
| 17 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
| 18 | |||
| 19 | #ifdef __KERNEL__ | ||
| 20 | |||
| 21 | #ifndef __ASSEMBLY__ | ||
| 22 | |||
| 23 | struct page; | ||
| 24 | struct vm_area_struct; | ||
| 25 | #ifdef CONFIG_CPU_CACHE_ALIASING | ||
| 26 | extern void copy_user_highpage(struct page *to, struct page *from, | ||
| 27 | unsigned long vaddr, struct vm_area_struct *vma); | ||
| 28 | extern void clear_user_highpage(struct page *page, unsigned long vaddr); | ||
| 29 | |||
| 30 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | ||
| 31 | #define clear_user_highpage clear_user_highpage | ||
| 32 | #else | ||
| 33 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
| 34 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
| 35 | #endif | ||
| 36 | |||
| 37 | void clear_page(void *page); | ||
| 38 | void copy_page(void *to, void *from); | ||
| 39 | |||
| 40 | typedef unsigned long pte_t; | ||
| 41 | typedef unsigned long pmd_t; | ||
| 42 | typedef unsigned long pgd_t; | ||
| 43 | typedef unsigned long pgprot_t; | ||
| 44 | |||
| 45 | #define pte_val(x) (x) | ||
| 46 | #define pmd_val(x) (x) | ||
| 47 | #define pgd_val(x) (x) | ||
| 48 | #define pgprot_val(x) (x) | ||
| 49 | |||
| 50 | #define __pte(x) (x) | ||
| 51 | #define __pmd(x) (x) | ||
| 52 | #define __pgd(x) (x) | ||
| 53 | #define __pgprot(x) (x) | ||
| 54 | |||
| 55 | typedef struct page *pgtable_t; | ||
| 56 | |||
| 57 | #include <asm/memory.h> | ||
| 58 | #include <asm-generic/getorder.h> | ||
| 59 | |||
| 60 | #endif /* !__ASSEMBLY__ */ | ||
| 61 | |||
| 62 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
| 63 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
| 64 | |||
| 65 | #endif /* __KERNEL__ */ | ||
| 66 | |||
| 67 | #endif | ||
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h new file mode 100644 index 000000000000..27448869131a --- /dev/null +++ b/arch/nds32/include/asm/pgalloc.h | |||
| @@ -0,0 +1,96 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASMNDS32_PGALLOC_H | ||
| 5 | #define _ASMNDS32_PGALLOC_H | ||
| 6 | |||
| 7 | #include <asm/processor.h> | ||
| 8 | #include <asm/cacheflush.h> | ||
| 9 | #include <asm/tlbflush.h> | ||
| 10 | #include <asm/proc-fns.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Since we have only two-level page tables, these are trivial | ||
| 14 | */ | ||
| 15 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) | ||
| 16 | #define pmd_free(mm, pmd) do { } while (0) | ||
| 17 | #define pgd_populate(mm, pmd, pte) BUG() | ||
| 18 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
| 19 | |||
| 20 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | ||
| 21 | extern void pgd_free(struct mm_struct *mm, pgd_t * pgd); | ||
| 22 | |||
| 23 | #define check_pgt_cache() do { } while (0) | ||
| 24 | |||
| 25 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
| 26 | unsigned long addr) | ||
| 27 | { | ||
| 28 | pte_t *pte; | ||
| 29 | |||
| 30 | pte = | ||
| 31 | (pte_t *) __get_free_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | | ||
| 32 | __GFP_ZERO); | ||
| 33 | |||
| 34 | return pte; | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
| 38 | { | ||
| 39 | pgtable_t pte; | ||
| 40 | |||
| 41 | pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO, 0); | ||
| 42 | if (pte) | ||
| 43 | cpu_dcache_wb_page((unsigned long)page_address(pte)); | ||
| 44 | |||
| 45 | return pte; | ||
| 46 | } | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Free one PTE table. | ||
| 50 | */ | ||
| 51 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t * pte) | ||
| 52 | { | ||
| 53 | if (pte) { | ||
| 54 | free_page((unsigned long)pte); | ||
| 55 | } | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | ||
| 59 | { | ||
| 60 | __free_page(pte); | ||
| 61 | } | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Populate the pmdp entry with a pointer to the pte. This pmd is part | ||
| 65 | * of the mm address space. | ||
| 66 | * | ||
| 67 | * Ensure that we always set both PMD entries. | ||
| 68 | */ | ||
| 69 | static inline void | ||
| 70 | pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmdp, pte_t * ptep) | ||
| 71 | { | ||
| 72 | unsigned long pte_ptr = (unsigned long)ptep; | ||
| 73 | unsigned long pmdval; | ||
| 74 | |||
| 75 | BUG_ON(mm != &init_mm); | ||
| 76 | |||
| 77 | /* | ||
| 78 | * The pmd must be loaded with the physical | ||
| 79 | * address of the PTE table | ||
| 80 | */ | ||
| 81 | pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; | ||
| 82 | set_pmd(pmdp, __pmd(pmdval)); | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline void | ||
| 86 | pmd_populate(struct mm_struct *mm, pmd_t * pmdp, pgtable_t ptep) | ||
| 87 | { | ||
| 88 | unsigned long pmdval; | ||
| 89 | |||
| 90 | BUG_ON(mm == &init_mm); | ||
| 91 | |||
| 92 | pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; | ||
| 93 | set_pmd(pmdp, __pmd(pmdval)); | ||
| 94 | } | ||
| 95 | |||
| 96 | #endif | ||
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h new file mode 100644 index 000000000000..6783937edbeb --- /dev/null +++ b/arch/nds32/include/asm/pgtable.h | |||
| @@ -0,0 +1,409 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASMNDS32_PGTABLE_H | ||
| 5 | #define _ASMNDS32_PGTABLE_H | ||
| 6 | |||
| 7 | #define __PAGETABLE_PMD_FOLDED | ||
| 8 | #include <asm-generic/4level-fixup.h> | ||
| 9 | #include <asm-generic/sizes.h> | ||
| 10 | |||
| 11 | #include <asm/memory.h> | ||
| 12 | #include <asm/nds32.h> | ||
| 13 | #ifndef __ASSEMBLY__ | ||
| 14 | #include <asm/fixmap.h> | ||
| 15 | #include <asm/io.h> | ||
| 16 | #include <nds32_intrinsic.h> | ||
| 17 | #endif | ||
| 18 | |||
| 19 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB | ||
| 20 | #define PGDIR_SHIFT 22 | ||
| 21 | #define PTRS_PER_PGD 1024 | ||
| 22 | #define PMD_SHIFT 22 | ||
| 23 | #define PTRS_PER_PMD 1 | ||
| 24 | #define PTRS_PER_PTE 1024 | ||
| 25 | #endif | ||
| 26 | |||
| 27 | #ifdef CONFIG_ANDES_PAGE_SIZE_8KB | ||
| 28 | #define PGDIR_SHIFT 24 | ||
| 29 | #define PTRS_PER_PGD 256 | ||
| 30 | #define PMD_SHIFT 24 | ||
| 31 | #define PTRS_PER_PMD 1 | ||
| 32 | #define PTRS_PER_PTE 2048 | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #ifndef __ASSEMBLY__ | ||
| 36 | extern void __pte_error(const char *file, int line, unsigned long val); | ||
| 37 | extern void __pmd_error(const char *file, int line, unsigned long val); | ||
| 38 | extern void __pgd_error(const char *file, int line, unsigned long val); | ||
| 39 | |||
| 40 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | ||
| 41 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) | ||
| 42 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | ||
| 43 | #endif /* !__ASSEMBLY__ */ | ||
| 44 | |||
| 45 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
| 46 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
| 47 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
| 48 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * This is the lowest virtual address we can permit any user space | ||
| 52 | * mapping to be mapped at. This is particularly important for | ||
| 53 | * non-high vector CPUs. | ||
| 54 | */ | ||
| 55 | #define FIRST_USER_ADDRESS 0x8000 | ||
| 56 | |||
| 57 | #ifdef CONFIG_HIGHMEM | ||
| 58 | #define CONSISTENT_BASE ((PKMAP_BASE) - (SZ_2M)) | ||
| 59 | #define CONSISTENT_END (PKMAP_BASE) | ||
| 60 | #else | ||
| 61 | #define CONSISTENT_BASE (FIXADDR_START - SZ_2M) | ||
| 62 | #define CONSISTENT_END (FIXADDR_START) | ||
| 63 | #endif | ||
| 64 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
| 65 | |||
| 66 | #ifdef CONFIG_HIGHMEM | ||
| 67 | #ifndef __ASSEMBLY__ | ||
| 68 | #include <asm/highmem.h> | ||
| 69 | #endif | ||
| 70 | #endif | ||
| 71 | |||
| 72 | #define VMALLOC_RESERVE SZ_128M | ||
| 73 | #define VMALLOC_END (CONSISTENT_BASE - PAGE_SIZE) | ||
| 74 | #define VMALLOC_START ((VMALLOC_END) - VMALLOC_RESERVE) | ||
| 75 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) | ||
| 76 | #define MAXMEM __pa(VMALLOC_START) | ||
| 77 | #define MAXMEM_PFN PFN_DOWN(MAXMEM) | ||
| 78 | |||
| 79 | #define FIRST_USER_PGD_NR 0 | ||
| 80 | #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) + FIRST_USER_PGD_NR) | ||
| 81 | |||
| 82 | /* L2 PTE */ | ||
| 83 | #define _PAGE_V (1UL << 0) | ||
| 84 | |||
| 85 | #define _PAGE_M_XKRW (0UL << 1) | ||
| 86 | #define _PAGE_M_UR_KR (1UL << 1) | ||
| 87 | #define _PAGE_M_UR_KRW (2UL << 1) | ||
| 88 | #define _PAGE_M_URW_KRW (3UL << 1) | ||
| 89 | #define _PAGE_M_KR (5UL << 1) | ||
| 90 | #define _PAGE_M_KRW (7UL << 1) | ||
| 91 | |||
| 92 | #define _PAGE_D (1UL << 4) | ||
| 93 | #define _PAGE_E (1UL << 5) | ||
| 94 | #define _PAGE_A (1UL << 6) | ||
| 95 | #define _PAGE_G (1UL << 7) | ||
| 96 | |||
| 97 | #define _PAGE_C_DEV (0UL << 8) | ||
| 98 | #define _PAGE_C_DEV_WB (1UL << 8) | ||
| 99 | #define _PAGE_C_MEM (2UL << 8) | ||
| 100 | #define _PAGE_C_MEM_SHRD_WB (4UL << 8) | ||
| 101 | #define _PAGE_C_MEM_SHRD_WT (5UL << 8) | ||
| 102 | #define _PAGE_C_MEM_WB (6UL << 8) | ||
| 103 | #define _PAGE_C_MEM_WT (7UL << 8) | ||
| 104 | |||
| 105 | #define _PAGE_L (1UL << 11) | ||
| 106 | |||
| 107 | #define _HAVE_PAGE_L (_PAGE_L) | ||
| 108 | #define _PAGE_FILE (1UL << 1) | ||
| 109 | #define _PAGE_YOUNG 0 | ||
| 110 | #define _PAGE_M_MASK _PAGE_M_KRW | ||
| 111 | #define _PAGE_C_MASK _PAGE_C_MEM_WT | ||
| 112 | |||
| 113 | #ifdef CONFIG_SMP | ||
| 114 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 115 | #define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WT | ||
| 116 | #else | ||
| 117 | #define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WB | ||
| 118 | #endif | ||
| 119 | #else | ||
| 120 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 121 | #define _PAGE_CACHE_SHRD _PAGE_C_MEM_WT | ||
| 122 | #else | ||
| 123 | #define _PAGE_CACHE_SHRD _PAGE_C_MEM_WB | ||
| 124 | #endif | ||
| 125 | #endif | ||
| 126 | |||
| 127 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 128 | #define _PAGE_CACHE _PAGE_C_MEM_WT | ||
| 129 | #else | ||
| 130 | #define _PAGE_CACHE _PAGE_C_MEM_WB | ||
| 131 | #endif | ||
| 132 | |||
| 133 | /* | ||
| 134 | * + Level 1 descriptor (PMD) | ||
| 135 | */ | ||
| 136 | #define PMD_TYPE_TABLE 0 | ||
| 137 | |||
| 138 | #ifndef __ASSEMBLY__ | ||
| 139 | |||
| 140 | #define _PAGE_USER_TABLE PMD_TYPE_TABLE | ||
| 141 | #define _PAGE_KERNEL_TABLE PMD_TYPE_TABLE | ||
| 142 | |||
| 143 | #define PAGE_EXEC __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_E) | ||
| 144 | #define PAGE_NONE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_A) | ||
| 145 | #define PAGE_READ __pgprot(_PAGE_V | _PAGE_M_UR_KR) | ||
| 146 | #define PAGE_RDWR __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D) | ||
| 147 | #define PAGE_COPY __pgprot(_PAGE_V | _PAGE_M_UR_KR) | ||
| 148 | |||
| 149 | #define PAGE_UXKRWX_V1 __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | ||
| 150 | #define PAGE_UXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | ||
| 151 | #define PAGE_URXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_UR_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | ||
| 152 | #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) | ||
| 153 | #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | ||
| 154 | #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | ||
| 155 | #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) | ||
| 156 | #endif /* __ASSEMBLY__ */ | ||
| 157 | |||
| 158 | /* xwr */ | ||
| 159 | #define __P000 (PAGE_NONE | _PAGE_CACHE_SHRD) | ||
| 160 | #define __P001 (PAGE_READ | _PAGE_CACHE_SHRD) | ||
| 161 | #define __P010 (PAGE_COPY | _PAGE_CACHE_SHRD) | ||
| 162 | #define __P011 (PAGE_COPY | _PAGE_CACHE_SHRD) | ||
| 163 | #define __P100 (PAGE_EXEC | _PAGE_CACHE_SHRD) | ||
| 164 | #define __P101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD) | ||
| 165 | #define __P110 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD) | ||
| 166 | #define __P111 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD) | ||
| 167 | |||
| 168 | #define __S000 (PAGE_NONE | _PAGE_CACHE_SHRD) | ||
| 169 | #define __S001 (PAGE_READ | _PAGE_CACHE_SHRD) | ||
| 170 | #define __S010 (PAGE_RDWR | _PAGE_CACHE_SHRD) | ||
| 171 | #define __S011 (PAGE_RDWR | _PAGE_CACHE_SHRD) | ||
| 172 | #define __S100 (PAGE_EXEC | _PAGE_CACHE_SHRD) | ||
| 173 | #define __S101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD) | ||
| 174 | #define __S110 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD) | ||
| 175 | #define __S111 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD) | ||
| 176 | |||
| 177 | #ifndef __ASSEMBLY__ | ||
| 178 | /* | ||
| 179 | * ZERO_PAGE is a global shared page that is always zero: used | ||
| 180 | * for zero-mapped memory areas etc.. | ||
| 181 | */ | ||
| 182 | extern struct page *empty_zero_page; | ||
| 183 | extern void paging_init(void); | ||
| 184 | #define ZERO_PAGE(vaddr) (empty_zero_page) | ||
| 185 | |||
| 186 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
| 187 | #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
| 188 | |||
| 189 | #define pte_none(pte) !(pte_val(pte)) | ||
| 190 | #define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) | ||
| 191 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | ||
| 192 | |||
| 193 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
| 194 | #define pte_offset_kernel(dir, address) ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address)) | ||
| 195 | #define pte_offset_map(dir, address) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | ||
| 196 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | ||
| 197 | #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
| 198 | |||
| 199 | #define pte_unmap(pte) do { } while (0) | ||
| 200 | #define pte_unmap_nested(pte) do { } while (0) | ||
| 201 | |||
| 202 | #define pmd_off_k(address) pmd_offset(pgd_offset_k(address), address) | ||
| 203 | |||
| 204 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
| 205 | /* | ||
| 206 | * Set a level 1 translation table entry, and clean it out of | ||
| 207 | * any caches such that the MMUs can load it correctly. | ||
| 208 | */ | ||
| 209 | static inline void set_pmd(pmd_t * pmdp, pmd_t pmd) | ||
| 210 | { | ||
| 211 | |||
| 212 | *pmdp = pmd; | ||
| 213 | #if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | ||
| 214 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (pmdp):"memory"); | ||
| 215 | __nds32__msync_all(); | ||
| 216 | __nds32__dsb(); | ||
| 217 | #endif | ||
| 218 | } | ||
| 219 | |||
| 220 | /* | ||
| 221 | * Set a PTE and flush it out | ||
| 222 | */ | ||
| 223 | static inline void set_pte(pte_t * ptep, pte_t pte) | ||
| 224 | { | ||
| 225 | |||
| 226 | *ptep = pte; | ||
| 227 | #if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | ||
| 228 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (ptep):"memory"); | ||
| 229 | __nds32__msync_all(); | ||
| 230 | __nds32__dsb(); | ||
| 231 | #endif | ||
| 232 | } | ||
| 233 | |||
| 234 | /* | ||
| 235 | * The following only work if pte_present() is true. | ||
| 236 | * Undefined behaviour if not.. | ||
| 237 | */ | ||
| 238 | |||
| 239 | /* | ||
| 240 | * pte_write: this page is writeable for user mode | ||
| 241 | * pte_read: this page is readable for user mode | ||
| 242 | * pte_kernel_write: this page is writeable for kernel mode | ||
| 243 | * | ||
| 244 | * We don't have pte_kernel_read because kernel always can read. | ||
| 245 | * | ||
| 246 | * */ | ||
| 247 | |||
| 248 | #define pte_present(pte) (pte_val(pte) & _PAGE_V) | ||
| 249 | #define pte_write(pte) ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) | ||
| 250 | #define pte_read(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KR) || \ | ||
| 251 | ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \ | ||
| 252 | ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW)) | ||
| 253 | #define pte_kernel_write(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) || \ | ||
| 254 | ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \ | ||
| 255 | ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_KRW) || \ | ||
| 256 | (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_XKRW) && pte_exec(pte))) | ||
| 257 | #define pte_exec(pte) (pte_val(pte) & _PAGE_E) | ||
| 258 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_D) | ||
| 259 | #define pte_young(pte) (pte_val(pte) & _PAGE_YOUNG) | ||
| 260 | |||
| 261 | /* | ||
| 262 | * The following only works if pte_present() is not true. | ||
| 263 | */ | ||
| 264 | #define pte_file(pte) (pte_val(pte) & _PAGE_FILE) | ||
| 265 | #define pte_to_pgoff(x) (pte_val(x) >> 2) | ||
| 266 | #define pgoff_to_pte(x) __pte(((x) << 2) | _PAGE_FILE) | ||
| 267 | |||
| 268 | #define PTE_FILE_MAX_BITS 29 | ||
| 269 | |||
| 270 | #define PTE_BIT_FUNC(fn,op) \ | ||
| 271 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | ||
| 272 | |||
| 273 | static inline pte_t pte_wrprotect(pte_t pte) | ||
| 274 | { | ||
| 275 | pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK; | ||
| 276 | pte_val(pte) = pte_val(pte) | _PAGE_M_UR_KR; | ||
| 277 | return pte; | ||
| 278 | } | ||
| 279 | |||
| 280 | static inline pte_t pte_mkwrite(pte_t pte) | ||
| 281 | { | ||
| 282 | pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK; | ||
| 283 | pte_val(pte) = pte_val(pte) | _PAGE_M_URW_KRW; | ||
| 284 | return pte; | ||
| 285 | } | ||
| 286 | |||
| 287 | PTE_BIT_FUNC(exprotect, &=~_PAGE_E); | ||
| 288 | PTE_BIT_FUNC(mkexec, |=_PAGE_E); | ||
| 289 | PTE_BIT_FUNC(mkclean, &=~_PAGE_D); | ||
| 290 | PTE_BIT_FUNC(mkdirty, |=_PAGE_D); | ||
| 291 | PTE_BIT_FUNC(mkold, &=~_PAGE_YOUNG); | ||
| 292 | PTE_BIT_FUNC(mkyoung, |=_PAGE_YOUNG); | ||
| 293 | static inline int pte_special(pte_t pte) | ||
| 294 | { | ||
| 295 | return 0; | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline pte_t pte_mkspecial(pte_t pte) | ||
| 299 | { | ||
| 300 | return pte; | ||
| 301 | } | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Mark the prot value as uncacheable and unbufferable. | ||
| 305 | */ | ||
| 306 | #define pgprot_noncached(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV) | ||
| 307 | #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV_WB) | ||
| 308 | |||
| 309 | #define pmd_none(pmd) (pmd_val(pmd)&0x1) | ||
| 310 | #define pmd_present(pmd) (!pmd_none(pmd)) | ||
| 311 | #define pmd_bad(pmd) pmd_none(pmd) | ||
| 312 | |||
| 313 | #define copy_pmd(pmdpd,pmdps) set_pmd((pmdpd), *(pmdps)) | ||
| 314 | #define pmd_clear(pmdp) set_pmd((pmdp), __pmd(1)) | ||
| 315 | |||
| 316 | static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot) | ||
| 317 | { | ||
| 318 | unsigned long ptr = (unsigned long)ptep; | ||
| 319 | pmd_t pmd; | ||
| 320 | |||
| 321 | /* | ||
| 322 | * The pmd must be loaded with the physical | ||
| 323 | * address of the PTE table | ||
| 324 | */ | ||
| 325 | |||
| 326 | pmd_val(pmd) = __virt_to_phys(ptr) | prot; | ||
| 327 | return pmd; | ||
| 328 | } | ||
| 329 | |||
| 330 | #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) | ||
| 331 | |||
| 332 | /* | ||
| 333 | * Permanent address of a page. We never have highmem, so this is trivial. | ||
| 334 | */ | ||
| 335 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) | ||
| 336 | |||
| 337 | /* | ||
| 338 | * Conversion functions: convert a page and protection to a page entry, | ||
| 339 | * and a page entry and page directory to the page they refer to. | ||
| 340 | */ | ||
| 341 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | ||
| 342 | |||
| 343 | /* | ||
| 344 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
| 345 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
| 346 | * into the pgd entry) | ||
| 347 | */ | ||
| 348 | #define pgd_none(pgd) (0) | ||
| 349 | #define pgd_bad(pgd) (0) | ||
| 350 | #define pgd_present(pgd) (1) | ||
| 351 | #define pgd_clear(pgdp) do { } while (0) | ||
| 352 | |||
| 353 | #define page_pte_prot(page,prot) mk_pte(page, prot) | ||
| 354 | #define page_pte(page) mk_pte(page, __pgprot(0)) | ||
| 355 | /* | ||
| 356 | * L1PTE = $mr1 + ((virt >> PMD_SHIFT) << 2); | ||
| 357 | * L2PTE = (((virt >> PAGE_SHIFT) & (PTRS_PER_PTE -1 )) << 2); | ||
| 358 | * PPN = (phys & 0xfffff000); | ||
| 359 | * | ||
| 360 | */ | ||
| 361 | |||
| 362 | /* to find an entry in a page-table-directory */ | ||
| 363 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | ||
| 364 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
| 365 | /* to find an entry in a kernel page-table-directory */ | ||
| 366 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | ||
| 367 | |||
| 368 | /* Find an entry in the second-level page table.. */ | ||
| 369 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) | ||
| 370 | |||
| 371 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
| 372 | { | ||
| 373 | const unsigned long mask = 0xfff; | ||
| 374 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | ||
| 375 | return pte; | ||
| 376 | } | ||
| 377 | |||
| 378 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
| 379 | |||
| 380 | /* Encode and decode a swap entry. | ||
| 381 | * | ||
| 382 | * We support up to 32GB of swap on 4k machines | ||
| 383 | */ | ||
| 384 | #define __swp_type(x) (((x).val >> 2) & 0x7f) | ||
| 385 | #define __swp_offset(x) ((x).val >> 9) | ||
| 386 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) | ||
| 387 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
| 388 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | ||
| 389 | |||
| 390 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | ||
| 391 | #define kern_addr_valid(addr) (1) | ||
| 392 | |||
| 393 | #include <asm-generic/pgtable.h> | ||
| 394 | |||
| 395 | /* | ||
| 396 | * We provide our own arch_get_unmapped_area to cope with VIPT caches. | ||
| 397 | */ | ||
| 398 | #define HAVE_ARCH_UNMAPPED_AREA | ||
| 399 | |||
| 400 | /* | ||
| 401 | * remap a physical address `phys' of size `size' with page protection `prot' | ||
| 402 | * into virtual address `from' | ||
| 403 | */ | ||
| 404 | |||
| 405 | #define pgtable_cache_init() do { } while (0) | ||
| 406 | |||
| 407 | #endif /* !__ASSEMBLY__ */ | ||
| 408 | |||
| 409 | #endif /* _ASMNDS32_PGTABLE_H */ | ||
diff --git a/arch/nds32/include/asm/proc-fns.h b/arch/nds32/include/asm/proc-fns.h new file mode 100644 index 000000000000..bedc4f59e064 --- /dev/null +++ b/arch/nds32/include/asm/proc-fns.h | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_PROCFNS_H__ | ||
| 5 | #define __NDS32_PROCFNS_H__ | ||
| 6 | |||
| 7 | #ifdef __KERNEL__ | ||
| 8 | #include <asm/page.h> | ||
| 9 | |||
| 10 | struct mm_struct; | ||
| 11 | struct vm_area_struct; | ||
| 12 | extern void cpu_proc_init(void); | ||
| 13 | extern void cpu_proc_fin(void); | ||
| 14 | extern void cpu_do_idle(void); | ||
| 15 | extern void cpu_reset(unsigned long reset); | ||
| 16 | extern void cpu_switch_mm(struct mm_struct *mm); | ||
| 17 | |||
| 18 | extern void cpu_dcache_inval_all(void); | ||
| 19 | extern void cpu_dcache_wbinval_all(void); | ||
| 20 | extern void cpu_dcache_inval_page(unsigned long page); | ||
| 21 | extern void cpu_dcache_wb_page(unsigned long page); | ||
| 22 | extern void cpu_dcache_wbinval_page(unsigned long page); | ||
| 23 | extern void cpu_dcache_inval_range(unsigned long start, unsigned long end); | ||
| 24 | extern void cpu_dcache_wb_range(unsigned long start, unsigned long end); | ||
| 25 | extern void cpu_dcache_wbinval_range(unsigned long start, unsigned long end); | ||
| 26 | |||
| 27 | extern void cpu_icache_inval_all(void); | ||
| 28 | extern void cpu_icache_inval_page(unsigned long page); | ||
| 29 | extern void cpu_icache_inval_range(unsigned long start, unsigned long end); | ||
| 30 | |||
| 31 | extern void cpu_cache_wbinval_page(unsigned long page, int flushi); | ||
| 32 | extern void cpu_cache_wbinval_range(unsigned long start, | ||
| 33 | unsigned long end, int flushi); | ||
| 34 | extern void cpu_cache_wbinval_range_check(struct vm_area_struct *vma, | ||
| 35 | unsigned long start, | ||
| 36 | unsigned long end, bool flushi, | ||
| 37 | bool wbd); | ||
| 38 | |||
| 39 | extern void cpu_dma_wb_range(unsigned long start, unsigned long end); | ||
| 40 | extern void cpu_dma_inval_range(unsigned long start, unsigned long end); | ||
| 41 | extern void cpu_dma_wbinval_range(unsigned long start, unsigned long end); | ||
| 42 | |||
| 43 | #endif /* __KERNEL__ */ | ||
| 44 | #endif /* __NDS32_PROCFNS_H__ */ | ||
diff --git a/arch/nds32/include/asm/processor.h b/arch/nds32/include/asm/processor.h new file mode 100644 index 000000000000..9c83caf4269f --- /dev/null +++ b/arch/nds32/include/asm/processor.h | |||
| @@ -0,0 +1,103 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_PROCESSOR_H | ||
| 5 | #define __ASM_NDS32_PROCESSOR_H | ||
| 6 | |||
| 7 | /* | ||
| 8 | * Default implementation of macro that returns current | ||
| 9 | * instruction pointer ("program counter"). | ||
| 10 | */ | ||
| 11 | #define current_text_addr() ({ __label__ _l; _l: &&_l;}) | ||
| 12 | |||
| 13 | #ifdef __KERNEL__ | ||
| 14 | |||
| 15 | #include <asm/ptrace.h> | ||
| 16 | #include <asm/types.h> | ||
| 17 | #include <asm/sigcontext.h> | ||
| 18 | |||
| 19 | #define KERNEL_STACK_SIZE PAGE_SIZE | ||
| 20 | #define STACK_TOP TASK_SIZE | ||
| 21 | #define STACK_TOP_MAX TASK_SIZE | ||
| 22 | |||
| 23 | struct cpu_context { | ||
| 24 | unsigned long r6; | ||
| 25 | unsigned long r7; | ||
| 26 | unsigned long r8; | ||
| 27 | unsigned long r9; | ||
| 28 | unsigned long r10; | ||
| 29 | unsigned long r11; | ||
| 30 | unsigned long r12; | ||
| 31 | unsigned long r13; | ||
| 32 | unsigned long r14; | ||
| 33 | unsigned long fp; | ||
| 34 | unsigned long pc; | ||
| 35 | unsigned long sp; | ||
| 36 | }; | ||
| 37 | |||
| 38 | struct thread_struct { | ||
| 39 | struct cpu_context cpu_context; /* cpu context */ | ||
| 40 | /* fault info */ | ||
| 41 | unsigned long address; | ||
| 42 | unsigned long trap_no; | ||
| 43 | unsigned long error_code; | ||
| 44 | }; | ||
| 45 | |||
| 46 | #define INIT_THREAD { } | ||
| 47 | |||
| 48 | #ifdef __NDS32_EB__ | ||
| 49 | #define PSW_DE PSW_mskBE | ||
| 50 | #else | ||
| 51 | #define PSW_DE 0x0 | ||
| 52 | #endif | ||
| 53 | |||
| 54 | #ifdef CONFIG_WBNA | ||
| 55 | #define PSW_valWBNA PSW_mskWBNA | ||
| 56 | #else | ||
| 57 | #define PSW_valWBNA 0x0 | ||
| 58 | #endif | ||
| 59 | |||
| 60 | #ifdef CONFIG_HWZOL | ||
| 61 | #define PSW_valINIT (PSW_CPL_ANY | PSW_mskAEN | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_mskGIE) | ||
| 62 | #else | ||
| 63 | #define PSW_valINIT (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_mskGIE) | ||
| 64 | #endif | ||
| 65 | |||
| 66 | #define start_thread(regs,pc,stack) \ | ||
| 67 | ({ \ | ||
| 68 | memzero(regs, sizeof(struct pt_regs)); \ | ||
| 69 | forget_syscall(regs); \ | ||
| 70 | regs->ipsw = PSW_valINIT; \ | ||
| 71 | regs->ir0 = (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_SYSTEM | PSW_INTL_1); \ | ||
| 72 | regs->ipc = pc; \ | ||
| 73 | regs->sp = stack; \ | ||
| 74 | }) | ||
| 75 | |||
| 76 | /* Forward declaration, a strange C thing */ | ||
| 77 | struct task_struct; | ||
| 78 | |||
| 79 | /* Free all resources held by a thread. */ | ||
| 80 | #define release_thread(thread) do { } while(0) | ||
| 81 | |||
| 82 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
| 83 | #define prepare_to_copy(tsk) do { } while (0) | ||
| 84 | |||
| 85 | unsigned long get_wchan(struct task_struct *p); | ||
| 86 | |||
| 87 | #define cpu_relax() barrier() | ||
| 88 | |||
| 89 | #define task_pt_regs(task) \ | ||
| 90 | ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ | ||
| 91 | - 8) - 1) | ||
| 92 | |||
| 93 | /* | ||
| 94 | * Create a new kernel thread | ||
| 95 | */ | ||
| 96 | extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags); | ||
| 97 | |||
| 98 | #define KSTK_EIP(tsk) instruction_pointer(task_pt_regs(tsk)) | ||
| 99 | #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) | ||
| 100 | |||
| 101 | #endif | ||
| 102 | |||
| 103 | #endif /* __ASM_NDS32_PROCESSOR_H */ | ||
diff --git a/arch/nds32/include/asm/ptrace.h b/arch/nds32/include/asm/ptrace.h new file mode 100644 index 000000000000..c4538839055c --- /dev/null +++ b/arch/nds32/include/asm/ptrace.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_PTRACE_H | ||
| 5 | #define __ASM_NDS32_PTRACE_H | ||
| 6 | |||
| 7 | #include <uapi/asm/ptrace.h> | ||
| 8 | |||
| 9 | /* | ||
| 10 | * If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing | ||
| 11 | * a syscall -- i.e., its most recent entry into the kernel from | ||
| 12 | * userspace was not via syscall, or otherwise a tracer cancelled the | ||
| 13 | * syscall. | ||
| 14 | * | ||
| 15 | * This must have the value -1, for ABI compatibility with ptrace etc. | ||
| 16 | */ | ||
| 17 | #define NO_SYSCALL (-1) | ||
| 18 | #ifndef __ASSEMBLY__ | ||
| 19 | #include <linux/types.h> | ||
| 20 | |||
| 21 | struct pt_regs { | ||
| 22 | union { | ||
| 23 | struct user_pt_regs user_regs; | ||
| 24 | struct { | ||
| 25 | long uregs[26]; | ||
| 26 | long fp; | ||
| 27 | long gp; | ||
| 28 | long lp; | ||
| 29 | long sp; | ||
| 30 | long ipc; | ||
| 31 | #if defined(CONFIG_HWZOL) | ||
| 32 | long lb; | ||
| 33 | long le; | ||
| 34 | long lc; | ||
| 35 | #else | ||
| 36 | long dummy[3]; | ||
| 37 | #endif | ||
| 38 | long syscallno; | ||
| 39 | }; | ||
| 40 | }; | ||
| 41 | long orig_r0; | ||
| 42 | long ir0; | ||
| 43 | long ipsw; | ||
| 44 | long pipsw; | ||
| 45 | long pipc; | ||
| 46 | long pp0; | ||
| 47 | long pp1; | ||
| 48 | long fucop_ctl; | ||
| 49 | long osp; | ||
| 50 | }; | ||
| 51 | |||
| 52 | static inline bool in_syscall(struct pt_regs const *regs) | ||
| 53 | { | ||
| 54 | return regs->syscallno != NO_SYSCALL; | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void forget_syscall(struct pt_regs *regs) | ||
| 58 | { | ||
| 59 | regs->syscallno = NO_SYSCALL; | ||
| 60 | } | ||
| 61 | static inline unsigned long regs_return_value(struct pt_regs *regs) | ||
| 62 | { | ||
| 63 | return regs->uregs[0]; | ||
| 64 | } | ||
| 65 | extern void show_regs(struct pt_regs *); | ||
| 66 | /* Avoid circular header include via sched.h */ | ||
| 67 | struct task_struct; | ||
| 68 | |||
| 69 | #define arch_has_single_step() (1) | ||
| 70 | #define user_mode(regs) (((regs)->ipsw & PSW_mskPOM) == 0) | ||
| 71 | #define interrupts_enabled(regs) (!!((regs)->ipsw & PSW_mskGIE)) | ||
| 72 | #define user_stack_pointer(regs) ((regs)->sp) | ||
| 73 | #define instruction_pointer(regs) ((regs)->ipc) | ||
| 74 | #define profile_pc(regs) instruction_pointer(regs) | ||
| 75 | |||
| 76 | #endif /* __ASSEMBLY__ */ | ||
| 77 | #endif | ||
diff --git a/arch/nds32/include/asm/shmparam.h b/arch/nds32/include/asm/shmparam.h new file mode 100644 index 000000000000..fd1cff64b68e --- /dev/null +++ b/arch/nds32/include/asm/shmparam.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASMNDS32_SHMPARAM_H | ||
| 5 | #define _ASMNDS32_SHMPARAM_H | ||
| 6 | |||
| 7 | /* | ||
| 8 | * This should be the size of the virtually indexed cache/ways, | ||
| 9 | * whichever is greater since the cache aliases every size/ways | ||
| 10 | * bytes. | ||
| 11 | */ | ||
| 12 | #define SHMLBA (4 * SZ_8K) /* attach addr a multiple of this */ | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Enforce SHMLBA in shmat | ||
| 16 | */ | ||
| 17 | #define __ARCH_FORCE_SHMLBA | ||
| 18 | |||
| 19 | #endif /* _ASMNDS32_SHMPARAM_H */ | ||
diff --git a/arch/nds32/include/asm/string.h b/arch/nds32/include/asm/string.h new file mode 100644 index 000000000000..179272caa540 --- /dev/null +++ b/arch/nds32/include/asm/string.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_STRING_H | ||
| 5 | #define __ASM_NDS32_STRING_H | ||
| 6 | |||
| 7 | #define __HAVE_ARCH_MEMCPY | ||
| 8 | extern void *memcpy(void *, const void *, __kernel_size_t); | ||
| 9 | |||
| 10 | #define __HAVE_ARCH_MEMMOVE | ||
| 11 | extern void *memmove(void *, const void *, __kernel_size_t); | ||
| 12 | |||
| 13 | #define __HAVE_ARCH_MEMSET | ||
| 14 | extern void *memset(void *, int, __kernel_size_t); | ||
| 15 | |||
| 16 | extern void *memzero(void *ptr, __kernel_size_t n); | ||
| 17 | #endif | ||
diff --git a/arch/nds32/include/asm/swab.h b/arch/nds32/include/asm/swab.h new file mode 100644 index 000000000000..e01a755a37d2 --- /dev/null +++ b/arch/nds32/include/asm/swab.h | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_SWAB_H__ | ||
| 5 | #define __NDS32_SWAB_H__ | ||
| 6 | |||
| 7 | #include <linux/types.h> | ||
| 8 | #include <linux/compiler.h> | ||
| 9 | |||
| 10 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | ||
| 11 | { | ||
| 12 | __asm__("wsbh %0, %0\n\t" /* word swap byte within halfword */ | ||
| 13 | "rotri %0, %0, #16\n" | ||
| 14 | :"=r"(x) | ||
| 15 | :"0"(x)); | ||
| 16 | return x; | ||
| 17 | } | ||
| 18 | |||
| 19 | static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) | ||
| 20 | { | ||
| 21 | __asm__("wsbh %0, %0\n" /* word swap byte within halfword */ | ||
| 22 | :"=r"(x) | ||
| 23 | :"0"(x)); | ||
| 24 | return x; | ||
| 25 | } | ||
| 26 | |||
| 27 | #define __arch_swab32(x) ___arch__swab32(x) | ||
| 28 | #define __arch_swab16(x) ___arch__swab16(x) | ||
| 29 | |||
| 30 | #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) | ||
| 31 | #define __BYTEORDER_HAS_U64__ | ||
| 32 | #define __SWAB_64_THRU_32__ | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #endif /* __NDS32_SWAB_H__ */ | ||
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h new file mode 100644 index 000000000000..f7e5e86765fe --- /dev/null +++ b/arch/nds32/include/asm/syscall.h | |||
| @@ -0,0 +1,188 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. | ||
| 3 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | |||
| 5 | #ifndef _ASM_NDS32_SYSCALL_H | ||
| 6 | #define _ASM_NDS32_SYSCALL_H 1 | ||
| 7 | |||
| 8 | #include <linux/err.h> | ||
| 9 | struct task_struct; | ||
| 10 | struct pt_regs; | ||
| 11 | |||
| 12 | /** | ||
| 13 | * syscall_get_nr - find what system call a task is executing | ||
| 14 | * @task: task of interest, must be blocked | ||
| 15 | * @regs: task_pt_regs() of @task | ||
| 16 | * | ||
| 17 | * If @task is executing a system call or is at system call | ||
| 18 | * tracing about to attempt one, returns the system call number. | ||
| 19 | * If @task is not executing a system call, i.e. it's blocked | ||
| 20 | * inside the kernel for a fault or signal, returns -1. | ||
| 21 | * | ||
| 22 | * Note this returns int even on 64-bit machines. Only 32 bits of | ||
| 23 | * system call number can be meaningful. If the actual arch value | ||
| 24 | * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. | ||
| 25 | * | ||
| 26 | * It's only valid to call this when @task is known to be blocked. | ||
| 27 | */ | ||
| 28 | int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | ||
| 29 | { | ||
| 30 | return regs->syscallno; | ||
| 31 | } | ||
| 32 | |||
| 33 | /** | ||
| 34 | * syscall_rollback - roll back registers after an aborted system call | ||
| 35 | * @task: task of interest, must be in system call exit tracing | ||
| 36 | * @regs: task_pt_regs() of @task | ||
| 37 | * | ||
| 38 | * It's only valid to call this when @task is stopped for system | ||
| 39 | * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), | ||
| 40 | * after tracehook_report_syscall_entry() returned nonzero to prevent | ||
| 41 | * the system call from taking place. | ||
| 42 | * | ||
| 43 | * This rolls back the register state in @regs so it's as if the | ||
| 44 | * system call instruction was a no-op. The registers containing | ||
| 45 | * the system call number and arguments are as they were before the | ||
| 46 | * system call instruction. This may not be the same as what the | ||
| 47 | * register state looked like at system call entry tracing. | ||
| 48 | */ | ||
| 49 | void syscall_rollback(struct task_struct *task, struct pt_regs *regs) | ||
| 50 | { | ||
| 51 | regs->uregs[0] = regs->orig_r0; | ||
| 52 | } | ||
| 53 | |||
| 54 | /** | ||
| 55 | * syscall_get_error - check result of traced system call | ||
| 56 | * @task: task of interest, must be blocked | ||
| 57 | * @regs: task_pt_regs() of @task | ||
| 58 | * | ||
| 59 | * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. | ||
| 60 | * | ||
| 61 | * It's only valid to call this when @task is stopped for tracing on exit | ||
| 62 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
| 63 | */ | ||
| 64 | long syscall_get_error(struct task_struct *task, struct pt_regs *regs) | ||
| 65 | { | ||
| 66 | unsigned long error = regs->uregs[0]; | ||
| 67 | return IS_ERR_VALUE(error) ? error : 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | /** | ||
| 71 | * syscall_get_return_value - get the return value of a traced system call | ||
| 72 | * @task: task of interest, must be blocked | ||
| 73 | * @regs: task_pt_regs() of @task | ||
| 74 | * | ||
| 75 | * Returns the return value of the successful system call. | ||
| 76 | * This value is meaningless if syscall_get_error() returned nonzero. | ||
| 77 | * | ||
| 78 | * It's only valid to call this when @task is stopped for tracing on exit | ||
| 79 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
| 80 | */ | ||
| 81 | long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) | ||
| 82 | { | ||
| 83 | return regs->uregs[0]; | ||
| 84 | } | ||
| 85 | |||
| 86 | /** | ||
| 87 | * syscall_set_return_value - change the return value of a traced system call | ||
| 88 | * @task: task of interest, must be blocked | ||
| 89 | * @regs: task_pt_regs() of @task | ||
| 90 | * @error: negative error code, or zero to indicate success | ||
| 91 | * @val: user return value if @error is zero | ||
| 92 | * | ||
| 93 | * This changes the results of the system call that user mode will see. | ||
| 94 | * If @error is zero, the user sees a successful system call with a | ||
| 95 | * return value of @val. If @error is nonzero, it's a negated errno | ||
| 96 | * code; the user sees a failed system call with this errno code. | ||
| 97 | * | ||
| 98 | * It's only valid to call this when @task is stopped for tracing on exit | ||
| 99 | * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
| 100 | */ | ||
| 101 | void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | ||
| 102 | int error, long val) | ||
| 103 | { | ||
| 104 | regs->uregs[0] = (long)error ? error : val; | ||
| 105 | } | ||
| 106 | |||
| 107 | /** | ||
| 108 | * syscall_get_arguments - extract system call parameter values | ||
| 109 | * @task: task of interest, must be blocked | ||
| 110 | * @regs: task_pt_regs() of @task | ||
| 111 | * @i: argument index [0,5] | ||
| 112 | * @n: number of arguments; n+i must be [1,6]. | ||
| 113 | * @args: array filled with argument values | ||
| 114 | * | ||
| 115 | * Fetches @n arguments to the system call starting with the @i'th argument | ||
| 116 | * (from 0 through 5). Argument @i is stored in @args[0], and so on. | ||
| 117 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 118 | * | ||
| 119 | * It's only valid to call this when @task is stopped for tracing on | ||
| 120 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
| 121 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 122 | * taking up to 6 arguments. | ||
| 123 | */ | ||
| 124 | #define SYSCALL_MAX_ARGS 6 | ||
| 125 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | ||
| 126 | unsigned int i, unsigned int n, unsigned long *args) | ||
| 127 | { | ||
| 128 | if (n == 0) | ||
| 129 | return; | ||
| 130 | if (i + n > SYSCALL_MAX_ARGS) { | ||
| 131 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | ||
| 132 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | ||
| 133 | pr_warning("%s called with max args %d, handling only %d\n", | ||
| 134 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 135 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 136 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 137 | } | ||
| 138 | |||
| 139 | if (i == 0) { | ||
| 140 | args[0] = regs->orig_r0; | ||
| 141 | args++; | ||
| 142 | i++; | ||
| 143 | n--; | ||
| 144 | } | ||
| 145 | |||
| 146 | memcpy(args, ®s->uregs[0] + i, n * sizeof(args[0])); | ||
| 147 | } | ||
| 148 | |||
| 149 | /** | ||
| 150 | * syscall_set_arguments - change system call parameter value | ||
| 151 | * @task: task of interest, must be in system call entry tracing | ||
| 152 | * @regs: task_pt_regs() of @task | ||
| 153 | * @i: argument index [0,5] | ||
| 154 | * @n: number of arguments; n+i must be [1,6]. | ||
| 155 | * @args: array of argument values to store | ||
| 156 | * | ||
| 157 | * Changes @n arguments to the system call starting with the @i'th argument. | ||
| 158 | * Argument @i gets value @args[0], and so on. | ||
| 159 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 160 | * | ||
| 161 | * It's only valid to call this when @task is stopped for tracing on | ||
| 162 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | ||
| 163 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 164 | * taking up to 6 arguments. | ||
| 165 | */ | ||
| 166 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | ||
| 167 | unsigned int i, unsigned int n, | ||
| 168 | const unsigned long *args) | ||
| 169 | { | ||
| 170 | if (n == 0) | ||
| 171 | return; | ||
| 172 | |||
| 173 | if (i + n > SYSCALL_MAX_ARGS) { | ||
| 174 | pr_warn("%s called with max args %d, handling only %d\n", | ||
| 175 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 176 | n = SYSCALL_MAX_ARGS - i; | ||
| 177 | } | ||
| 178 | |||
| 179 | if (i == 0) { | ||
| 180 | regs->orig_r0 = args[0]; | ||
| 181 | args++; | ||
| 182 | i++; | ||
| 183 | n--; | ||
| 184 | } | ||
| 185 | |||
| 186 | memcpy(®s->uregs[0] + i, args, n * sizeof(args[0])); | ||
| 187 | } | ||
| 188 | #endif /* _ASM_NDS32_SYSCALL_H */ | ||
diff --git a/arch/nds32/include/asm/syscalls.h b/arch/nds32/include/asm/syscalls.h new file mode 100644 index 000000000000..78778ecff60c --- /dev/null +++ b/arch/nds32/include/asm/syscalls.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_SYSCALLS_H | ||
| 5 | #define __ASM_NDS32_SYSCALLS_H | ||
| 6 | |||
| 7 | asmlinkage long sys_cacheflush(unsigned long addr, unsigned long len, unsigned int op); | ||
| 8 | asmlinkage long sys_fadvise64_64_wrapper(int fd, int advice, loff_t offset, loff_t len); | ||
| 9 | asmlinkage long sys_rt_sigreturn_wrapper(void); | ||
| 10 | |||
| 11 | #include <asm-generic/syscalls.h> | ||
| 12 | |||
| 13 | #endif /* __ASM_NDS32_SYSCALLS_H */ | ||
diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h new file mode 100644 index 000000000000..bff741ff337b --- /dev/null +++ b/arch/nds32/include/asm/thread_info.h | |||
| @@ -0,0 +1,76 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_THREAD_INFO_H | ||
| 5 | #define __ASM_NDS32_THREAD_INFO_H | ||
| 6 | |||
| 7 | #ifdef __KERNEL__ | ||
| 8 | |||
| 9 | #define THREAD_SIZE_ORDER (1) | ||
| 10 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
| 11 | |||
| 12 | #ifndef __ASSEMBLY__ | ||
| 13 | |||
| 14 | struct task_struct; | ||
| 15 | |||
| 16 | #include <asm/ptrace.h> | ||
| 17 | #include <asm/types.h> | ||
| 18 | |||
| 19 | typedef unsigned long mm_segment_t; | ||
| 20 | |||
| 21 | /* | ||
| 22 | * low level task data that entry.S needs immediate access to. | ||
| 23 | * __switch_to() assumes cpu_context follows immediately after cpu_domain. | ||
| 24 | */ | ||
| 25 | struct thread_info { | ||
| 26 | unsigned long flags; /* low level flags */ | ||
| 27 | __s32 preempt_count; /* 0 => preemptable, <0 => bug */ | ||
| 28 | mm_segment_t addr_limit; /* address limit */ | ||
| 29 | }; | ||
| 30 | #define INIT_THREAD_INFO(tsk) \ | ||
| 31 | { \ | ||
| 32 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
| 33 | .addr_limit = KERNEL_DS, \ | ||
| 34 | } | ||
| 35 | #define thread_saved_pc(tsk) ((unsigned long)(tsk->thread.cpu_context.pc)) | ||
| 36 | #define thread_saved_fp(tsk) ((unsigned long)(tsk->thread.cpu_context.fp)) | ||
| 37 | #endif | ||
| 38 | |||
| 39 | /* | ||
| 40 | * thread information flags: | ||
| 41 | * TIF_SYSCALL_TRACE - syscall trace active | ||
| 42 | * TIF_SIGPENDING - signal pending | ||
| 43 | * TIF_NEED_RESCHED - rescheduling necessary | ||
| 44 | * TIF_NOTIFY_RESUME - callback before returning to user | ||
| 45 | * TIF_USEDFPU - FPU was used by this task this quantum (SMP) | ||
| 46 | * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED | ||
| 47 | */ | ||
| 48 | #define TIF_SIGPENDING 1 | ||
| 49 | #define TIF_NEED_RESCHED 2 | ||
| 50 | #define TIF_SINGLESTEP 3 | ||
| 51 | #define TIF_NOTIFY_RESUME 4 /* callback before returning to user */ | ||
| 52 | #define TIF_SYSCALL_TRACE 8 | ||
| 53 | #define TIF_USEDFPU 16 | ||
| 54 | #define TIF_POLLING_NRFLAG 17 | ||
| 55 | #define TIF_MEMDIE 18 | ||
| 56 | #define TIF_FREEZE 19 | ||
| 57 | #define TIF_RESTORE_SIGMASK 20 | ||
| 58 | |||
| 59 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | ||
| 60 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | ||
| 61 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | ||
| 62 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | ||
| 63 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | ||
| 64 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | ||
| 65 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
| 66 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | ||
| 67 | |||
| 68 | /* | ||
| 69 | * Change these and you break ASM code in entry-common.S | ||
| 70 | */ | ||
| 71 | #define _TIF_WORK_MASK 0x000000ff | ||
| 72 | #define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP) | ||
| 73 | #define _TIF_WORK_SYSCALL_LEAVE (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP) | ||
| 74 | |||
| 75 | #endif /* __KERNEL__ */ | ||
| 76 | #endif /* __ASM_NDS32_THREAD_INFO_H */ | ||
diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h new file mode 100644 index 000000000000..b35ae5eae3ab --- /dev/null +++ b/arch/nds32/include/asm/tlb.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASMNDS32_TLB_H | ||
| 5 | #define __ASMNDS32_TLB_H | ||
| 6 | |||
| 7 | #define tlb_start_vma(tlb,vma) \ | ||
| 8 | do { \ | ||
| 9 | if (!tlb->fullmm) \ | ||
| 10 | flush_cache_range(vma, vma->vm_start, vma->vm_end); \ | ||
| 11 | } while (0) | ||
| 12 | |||
| 13 | #define tlb_end_vma(tlb,vma) \ | ||
| 14 | do { \ | ||
| 15 | if(!tlb->fullmm) \ | ||
| 16 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ | ||
| 17 | } while (0) | ||
| 18 | |||
| 19 | #define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) | ||
| 20 | |||
| 21 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
| 22 | |||
| 23 | #include <asm-generic/tlb.h> | ||
| 24 | |||
| 25 | #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) | ||
| 26 | #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tln)->mm, pmd) | ||
| 27 | |||
| 28 | #endif | ||
diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h new file mode 100644 index 000000000000..9b411f401903 --- /dev/null +++ b/arch/nds32/include/asm/tlbflush.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASMNDS32_TLBFLUSH_H | ||
| 5 | #define _ASMNDS32_TLBFLUSH_H | ||
| 6 | |||
| 7 | #include <linux/spinlock.h> | ||
| 8 | #include <linux/mm.h> | ||
| 9 | #include <nds32_intrinsic.h> | ||
| 10 | |||
| 11 | static inline void local_flush_tlb_all(void) | ||
| 12 | { | ||
| 13 | __nds32__tlbop_flua(); | ||
| 14 | __nds32__isb(); | ||
| 15 | } | ||
| 16 | |||
| 17 | static inline void local_flush_tlb_mm(struct mm_struct *mm) | ||
| 18 | { | ||
| 19 | __nds32__tlbop_flua(); | ||
| 20 | __nds32__isb(); | ||
| 21 | } | ||
| 22 | |||
| 23 | static inline void local_flush_tlb_kernel_range(unsigned long start, | ||
| 24 | unsigned long end) | ||
| 25 | { | ||
| 26 | while (start < end) { | ||
| 27 | __nds32__tlbop_inv(start); | ||
| 28 | __nds32__isb(); | ||
| 29 | start += PAGE_SIZE; | ||
| 30 | } | ||
| 31 | } | ||
| 32 | |||
| 33 | void local_flush_tlb_range(struct vm_area_struct *vma, | ||
| 34 | unsigned long start, unsigned long end); | ||
| 35 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); | ||
| 36 | |||
| 37 | #define flush_tlb_all local_flush_tlb_all | ||
| 38 | #define flush_tlb_mm local_flush_tlb_mm | ||
| 39 | #define flush_tlb_range local_flush_tlb_range | ||
| 40 | #define flush_tlb_page local_flush_tlb_page | ||
| 41 | #define flush_tlb_kernel_range local_flush_tlb_kernel_range | ||
| 42 | |||
| 43 | void update_mmu_cache(struct vm_area_struct *vma, | ||
| 44 | unsigned long address, pte_t * pte); | ||
| 45 | void tlb_migrate_finish(struct mm_struct *mm); | ||
| 46 | |||
| 47 | #endif | ||
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h new file mode 100644 index 000000000000..18a009f3804d --- /dev/null +++ b/arch/nds32/include/asm/uaccess.h | |||
| @@ -0,0 +1,283 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASMANDES_UACCESS_H | ||
| 5 | #define _ASMANDES_UACCESS_H | ||
| 6 | |||
| 7 | /* | ||
| 8 | * User space memory access functions | ||
| 9 | */ | ||
| 10 | #include <linux/sched.h> | ||
| 11 | #include <asm/errno.h> | ||
| 12 | #include <asm/memory.h> | ||
| 13 | #include <asm/types.h> | ||
| 14 | #include <linux/mm.h> | ||
| 15 | |||
| 16 | #define VERIFY_READ 0 | ||
| 17 | #define VERIFY_WRITE 1 | ||
| 18 | |||
| 19 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | ||
| 20 | |||
| 21 | /* | ||
| 22 | * The exception table consists of pairs of addresses: the first is the | ||
| 23 | * address of an instruction that is allowed to fault, and the second is | ||
| 24 | * the address at which the program should continue. No registers are | ||
| 25 | * modified, so it is entirely up to the continuation code to figure out | ||
| 26 | * what to do. | ||
| 27 | * | ||
| 28 | * All the routines below use bits of fixup code that are out of line | ||
| 29 | * with the main instruction path. This means when everything is well, | ||
| 30 | * we don't even have to jump over them. Further, they do not intrude | ||
| 31 | * on our cache or tlb entries. | ||
| 32 | */ | ||
| 33 | |||
| 34 | struct exception_table_entry { | ||
| 35 | unsigned long insn, fixup; | ||
| 36 | }; | ||
| 37 | |||
| 38 | extern int fixup_exception(struct pt_regs *regs); | ||
| 39 | |||
| 40 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) | ||
| 41 | #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) | ||
| 42 | |||
| 43 | #define get_ds() (KERNEL_DS) | ||
| 44 | #define get_fs() (current_thread_info()->addr_limit) | ||
| 45 | #define user_addr_max get_fs | ||
| 46 | |||
| 47 | static inline void set_fs(mm_segment_t fs) | ||
| 48 | { | ||
| 49 | current_thread_info()->addr_limit = fs; | ||
| 50 | } | ||
| 51 | |||
| 52 | #define segment_eq(a, b) ((a) == (b)) | ||
| 53 | |||
| 54 | #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) | ||
| 55 | |||
| 56 | #define access_ok(type, addr, size) \ | ||
| 57 | __range_ok((unsigned long)addr, (unsigned long)size) | ||
| 58 | /* | ||
| 59 | * Single-value transfer routines. They automatically use the right | ||
| 60 | * size if we just have the right pointer type. Note that the functions | ||
| 61 | * which read from user space (*get_*) need to take care not to leak | ||
| 62 | * kernel data even if the calling code is buggy and fails to check | ||
| 63 | * the return value. This means zeroing out the destination variable | ||
| 64 | * or buffer on error. Normally this is done out of line by the | ||
| 65 | * fixup code, but there are a few places where it intrudes on the | ||
| 66 | * main code path. When we only write to user space, there is no | ||
| 67 | * problem. | ||
| 68 | * | ||
| 69 | * The "__xxx" versions of the user access functions do not verify the | ||
| 70 | * address space - it must have been done previously with a separate | ||
| 71 | * "access_ok()" call. | ||
| 72 | * | ||
| 73 | * The "xxx_error" versions set the third argument to EFAULT if an | ||
| 74 | * error occurs, and leave it unchanged on success. Note that these | ||
| 75 | * versions are void (ie, don't return a value as such). | ||
| 76 | */ | ||
| 77 | |||
| 78 | #define get_user(x,p) \ | ||
| 79 | ({ \ | ||
| 80 | long __e = -EFAULT; \ | ||
| 81 | if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \ | ||
| 82 | __e = __get_user(x,p); \ | ||
| 83 | } else \ | ||
| 84 | x = 0; \ | ||
| 85 | __e; \ | ||
| 86 | }) | ||
| 87 | #define __get_user(x,ptr) \ | ||
| 88 | ({ \ | ||
| 89 | long __gu_err = 0; \ | ||
| 90 | __get_user_err((x),(ptr),__gu_err); \ | ||
| 91 | __gu_err; \ | ||
| 92 | }) | ||
| 93 | |||
| 94 | #define __get_user_error(x,ptr,err) \ | ||
| 95 | ({ \ | ||
| 96 | __get_user_err((x),(ptr),err); \ | ||
| 97 | (void) 0; \ | ||
| 98 | }) | ||
| 99 | |||
| 100 | #define __get_user_err(x,ptr,err) \ | ||
| 101 | do { \ | ||
| 102 | unsigned long __gu_addr = (unsigned long)(ptr); \ | ||
| 103 | unsigned long __gu_val; \ | ||
| 104 | __chk_user_ptr(ptr); \ | ||
| 105 | switch (sizeof(*(ptr))) { \ | ||
| 106 | case 1: \ | ||
| 107 | __get_user_asm("lbi",__gu_val,__gu_addr,err); \ | ||
| 108 | break; \ | ||
| 109 | case 2: \ | ||
| 110 | __get_user_asm("lhi",__gu_val,__gu_addr,err); \ | ||
| 111 | break; \ | ||
| 112 | case 4: \ | ||
| 113 | __get_user_asm("lwi",__gu_val,__gu_addr,err); \ | ||
| 114 | break; \ | ||
| 115 | case 8: \ | ||
| 116 | __get_user_asm_dword(__gu_val,__gu_addr,err); \ | ||
| 117 | break; \ | ||
| 118 | default: \ | ||
| 119 | BUILD_BUG(); \ | ||
| 120 | break; \ | ||
| 121 | } \ | ||
| 122 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 123 | } while (0) | ||
| 124 | |||
| 125 | #define __get_user_asm(inst,x,addr,err) \ | ||
| 126 | asm volatile( \ | ||
| 127 | "1: "inst" %1,[%2]\n" \ | ||
| 128 | "2:\n" \ | ||
| 129 | " .section .fixup,\"ax\"\n" \ | ||
| 130 | " .align 2\n" \ | ||
| 131 | "3: move %0, %3\n" \ | ||
| 132 | " move %1, #0\n" \ | ||
| 133 | " b 2b\n" \ | ||
| 134 | " .previous\n" \ | ||
| 135 | " .section __ex_table,\"a\"\n" \ | ||
| 136 | " .align 3\n" \ | ||
| 137 | " .long 1b, 3b\n" \ | ||
| 138 | " .previous" \ | ||
| 139 | : "+r" (err), "=&r" (x) \ | ||
| 140 | : "r" (addr), "i" (-EFAULT) \ | ||
| 141 | : "cc") | ||
| 142 | |||
| 143 | #ifdef __NDS32_EB__ | ||
| 144 | #define __gu_reg_oper0 "%H1" | ||
| 145 | #define __gu_reg_oper1 "%L1" | ||
| 146 | #else | ||
| 147 | #define __gu_reg_oper0 "%L1" | ||
| 148 | #define __gu_reg_oper1 "%H1" | ||
| 149 | #endif | ||
| 150 | |||
| 151 | #define __get_user_asm_dword(x, addr, err) \ | ||
| 152 | asm volatile( \ | ||
| 153 | "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ | ||
| 154 | "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ | ||
| 155 | "3:\n" \ | ||
| 156 | " .section .fixup,\"ax\"\n" \ | ||
| 157 | " .align 2\n" \ | ||
| 158 | "4: move %0, %3\n" \ | ||
| 159 | " b 3b\n" \ | ||
| 160 | " .previous\n" \ | ||
| 161 | " .section __ex_table,\"a\"\n" \ | ||
| 162 | " .align 3\n" \ | ||
| 163 | " .long 1b, 4b\n" \ | ||
| 164 | " .long 2b, 4b\n" \ | ||
| 165 | " .previous" \ | ||
| 166 | : "+r"(err), "=&r"(x) \ | ||
| 167 | : "r"(addr), "i"(-EFAULT) \ | ||
| 168 | : "cc") | ||
| 169 | #define put_user(x,p) \ | ||
| 170 | ({ \ | ||
| 171 | long __e = -EFAULT; \ | ||
| 172 | if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \ | ||
| 173 | __e = __put_user(x,p); \ | ||
| 174 | } \ | ||
| 175 | __e; \ | ||
| 176 | }) | ||
| 177 | #define __put_user(x,ptr) \ | ||
| 178 | ({ \ | ||
| 179 | long __pu_err = 0; \ | ||
| 180 | __put_user_err((x),(ptr),__pu_err); \ | ||
| 181 | __pu_err; \ | ||
| 182 | }) | ||
| 183 | |||
| 184 | #define __put_user_error(x,ptr,err) \ | ||
| 185 | ({ \ | ||
| 186 | __put_user_err((x),(ptr),err); \ | ||
| 187 | (void) 0; \ | ||
| 188 | }) | ||
| 189 | |||
| 190 | #define __put_user_err(x,ptr,err) \ | ||
| 191 | do { \ | ||
| 192 | unsigned long __pu_addr = (unsigned long)(ptr); \ | ||
| 193 | __typeof__(*(ptr)) __pu_val = (x); \ | ||
| 194 | __chk_user_ptr(ptr); \ | ||
| 195 | switch (sizeof(*(ptr))) { \ | ||
| 196 | case 1: \ | ||
| 197 | __put_user_asm("sbi",__pu_val,__pu_addr,err); \ | ||
| 198 | break; \ | ||
| 199 | case 2: \ | ||
| 200 | __put_user_asm("shi",__pu_val,__pu_addr,err); \ | ||
| 201 | break; \ | ||
| 202 | case 4: \ | ||
| 203 | __put_user_asm("swi",__pu_val,__pu_addr,err); \ | ||
| 204 | break; \ | ||
| 205 | case 8: \ | ||
| 206 | __put_user_asm_dword(__pu_val,__pu_addr,err); \ | ||
| 207 | break; \ | ||
| 208 | default: \ | ||
| 209 | BUILD_BUG(); \ | ||
| 210 | break; \ | ||
| 211 | } \ | ||
| 212 | } while (0) | ||
| 213 | |||
| 214 | #define __put_user_asm(inst,x,addr,err) \ | ||
| 215 | asm volatile( \ | ||
| 216 | "1: "inst" %1,[%2]\n" \ | ||
| 217 | "2:\n" \ | ||
| 218 | " .section .fixup,\"ax\"\n" \ | ||
| 219 | " .align 2\n" \ | ||
| 220 | "3: move %0, %3\n" \ | ||
| 221 | " b 2b\n" \ | ||
| 222 | " .previous\n" \ | ||
| 223 | " .section __ex_table,\"a\"\n" \ | ||
| 224 | " .align 3\n" \ | ||
| 225 | " .long 1b, 3b\n" \ | ||
| 226 | " .previous" \ | ||
| 227 | : "+r" (err) \ | ||
| 228 | : "r" (x), "r" (addr), "i" (-EFAULT) \ | ||
| 229 | : "cc") | ||
| 230 | |||
| 231 | #ifdef __NDS32_EB__ | ||
| 232 | #define __pu_reg_oper0 "%H2" | ||
| 233 | #define __pu_reg_oper1 "%L2" | ||
| 234 | #else | ||
| 235 | #define __pu_reg_oper0 "%L2" | ||
| 236 | #define __pu_reg_oper1 "%H2" | ||
| 237 | #endif | ||
| 238 | |||
| 239 | #define __put_user_asm_dword(x, addr, err) \ | ||
| 240 | asm volatile( \ | ||
| 241 | "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ | ||
| 242 | "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ | ||
| 243 | "3:\n" \ | ||
| 244 | " .section .fixup,\"ax\"\n" \ | ||
| 245 | " .align 2\n" \ | ||
| 246 | "4: move %0, %3\n" \ | ||
| 247 | " b 3b\n" \ | ||
| 248 | " .previous\n" \ | ||
| 249 | " .section __ex_table,\"a\"\n" \ | ||
| 250 | " .align 3\n" \ | ||
| 251 | " .long 1b, 4b\n" \ | ||
| 252 | " .long 2b, 4b\n" \ | ||
| 253 | " .previous" \ | ||
| 254 | : "+r"(err) \ | ||
| 255 | : "r"(addr), "r"(x), "i"(-EFAULT) \ | ||
| 256 | : "cc") | ||
| 257 | extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); | ||
| 258 | extern long strncpy_from_user(char *dest, const char __user * src, long count); | ||
| 259 | extern __must_check long strlen_user(const char __user * str); | ||
| 260 | extern __must_check long strnlen_user(const char __user * str, long n); | ||
| 261 | extern unsigned long __arch_copy_from_user(void *to, const void __user * from, | ||
| 262 | unsigned long n); | ||
| 263 | extern unsigned long __arch_copy_to_user(void __user * to, const void *from, | ||
| 264 | unsigned long n); | ||
| 265 | |||
| 266 | #define raw_copy_from_user __arch_copy_from_user | ||
| 267 | #define raw_copy_to_user __arch_copy_to_user | ||
| 268 | |||
| 269 | #define INLINE_COPY_FROM_USER | ||
| 270 | #define INLINE_COPY_TO_USER | ||
| 271 | static inline unsigned long clear_user(void __user * to, unsigned long n) | ||
| 272 | { | ||
| 273 | if (access_ok(VERIFY_WRITE, to, n)) | ||
| 274 | n = __arch_clear_user(to, n); | ||
| 275 | return n; | ||
| 276 | } | ||
| 277 | |||
| 278 | static inline unsigned long __clear_user(void __user * to, unsigned long n) | ||
| 279 | { | ||
| 280 | return __arch_clear_user(to, n); | ||
| 281 | } | ||
| 282 | |||
| 283 | #endif /* _ASMNDS32_UACCESS_H */ | ||
diff --git a/arch/nds32/include/asm/unistd.h b/arch/nds32/include/asm/unistd.h new file mode 100644 index 000000000000..b586a2862beb --- /dev/null +++ b/arch/nds32/include/asm/unistd.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #define __ARCH_WANT_SYS_CLONE | ||
| 5 | |||
| 6 | #include <uapi/asm/unistd.h> | ||
diff --git a/arch/nds32/include/asm/vdso.h b/arch/nds32/include/asm/vdso.h new file mode 100644 index 000000000000..af2c6afc2469 --- /dev/null +++ b/arch/nds32/include/asm/vdso.h | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | /* | ||
| 2 | * SPDX-License-Identifier: GPL-2.0 | ||
| 3 | * Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef __ASM_VDSO_H | ||
| 7 | #define __ASM_VDSO_H | ||
| 8 | |||
| 9 | #ifdef __KERNEL__ | ||
| 10 | |||
| 11 | #ifndef __ASSEMBLY__ | ||
| 12 | |||
| 13 | #include <generated/vdso-offsets.h> | ||
| 14 | |||
| 15 | #define VDSO_SYMBOL(base, name) \ | ||
| 16 | ({ \ | ||
| 17 | (unsigned long)(vdso_offset_##name + (unsigned long)(base)); \ | ||
| 18 | }) | ||
| 19 | |||
| 20 | #endif /* !__ASSEMBLY__ */ | ||
| 21 | |||
| 22 | #endif /* __KERNEL__ */ | ||
| 23 | |||
| 24 | #endif /* __ASM_VDSO_H */ | ||
diff --git a/arch/nds32/include/asm/vdso_datapage.h b/arch/nds32/include/asm/vdso_datapage.h new file mode 100644 index 000000000000..79db5a12ca5e --- /dev/null +++ b/arch/nds32/include/asm/vdso_datapage.h | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2012 ARM Limited | ||
| 3 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | #ifndef __ASM_VDSO_DATAPAGE_H | ||
| 5 | #define __ASM_VDSO_DATAPAGE_H | ||
| 6 | |||
| 7 | #ifdef __KERNEL__ | ||
| 8 | |||
| 9 | #ifndef __ASSEMBLY__ | ||
| 10 | |||
| 11 | struct vdso_data { | ||
| 12 | bool cycle_count_down; /* timer cyclye counter is decrease with time */ | ||
| 13 | u32 cycle_count_offset; /* offset of timer cycle counter register */ | ||
| 14 | u32 seq_count; /* sequence count - odd during updates */ | ||
| 15 | u32 xtime_coarse_sec; /* coarse time */ | ||
| 16 | u32 xtime_coarse_nsec; | ||
| 17 | |||
| 18 | u32 wtm_clock_sec; /* wall to monotonic offset */ | ||
| 19 | u32 wtm_clock_nsec; | ||
| 20 | u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */ | ||
| 21 | u32 cs_mult; /* clocksource multiplier */ | ||
| 22 | u32 cs_shift; /* Cycle to nanosecond divisor (power of two) */ | ||
| 23 | |||
| 24 | u64 cs_cycle_last; /* last cycle value */ | ||
| 25 | u64 cs_mask; /* clocksource mask */ | ||
| 26 | |||
| 27 | u64 xtime_clock_nsec; /* CLOCK_REALTIME sub-ns base */ | ||
| 28 | u32 tz_minuteswest; /* timezone info for gettimeofday(2) */ | ||
| 29 | u32 tz_dsttime; | ||
| 30 | }; | ||
| 31 | |||
| 32 | #endif /* !__ASSEMBLY__ */ | ||
| 33 | |||
| 34 | #endif /* __KERNEL__ */ | ||
| 35 | |||
| 36 | #endif /* __ASM_VDSO_DATAPAGE_H */ | ||
diff --git a/arch/nds32/include/asm/vdso_timer_info.h b/arch/nds32/include/asm/vdso_timer_info.h new file mode 100644 index 000000000000..50ba117cff12 --- /dev/null +++ b/arch/nds32/include/asm/vdso_timer_info.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | extern struct timer_info_t timer_info; | ||
| 5 | #define EMPTY_VALUE ~(0UL) | ||
| 6 | #define EMPTY_TIMER_MAPPING EMPTY_VALUE | ||
| 7 | #define EMPTY_REG_OFFSET EMPTY_VALUE | ||
| 8 | |||
| 9 | struct timer_info_t | ||
| 10 | { | ||
| 11 | bool cycle_count_down; | ||
| 12 | unsigned long mapping_base; | ||
| 13 | unsigned long cycle_count_reg_offset; | ||
| 14 | }; | ||
diff --git a/arch/nds32/include/uapi/asm/Kbuild b/arch/nds32/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..40be972faf9e --- /dev/null +++ b/arch/nds32/include/uapi/asm/Kbuild | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | # UAPI Header export list | ||
| 2 | include include/uapi/asm-generic/Kbuild.asm | ||
| 3 | |||
| 4 | generic-y += bpf_perf_event.h | ||
| 5 | generic-y += errno.h | ||
| 6 | generic-y += ioctl.h | ||
| 7 | generic-y += ioctls.h | ||
| 8 | generic-y += ipcbuf.h | ||
| 9 | generic-y += shmbuf.h | ||
| 10 | generic-y += bitsperlong.h | ||
| 11 | generic-y += fcntl.h | ||
| 12 | generic-y += stat.h | ||
| 13 | generic-y += mman.h | ||
| 14 | generic-y += msgbuf.h | ||
| 15 | generic-y += poll.h | ||
| 16 | generic-y += posix_types.h | ||
| 17 | generic-y += resource.h | ||
| 18 | generic-y += sembuf.h | ||
| 19 | generic-y += setup.h | ||
| 20 | generic-y += siginfo.h | ||
| 21 | generic-y += signal.h | ||
| 22 | generic-y += socket.h | ||
| 23 | generic-y += sockios.h | ||
| 24 | generic-y += swab.h | ||
| 25 | generic-y += statfs.h | ||
| 26 | generic-y += termbits.h | ||
| 27 | generic-y += termios.h | ||
| 28 | generic-y += types.h | ||
| 29 | generic-y += ucontext.h | ||
diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h new file mode 100644 index 000000000000..56043ce4972f --- /dev/null +++ b/arch/nds32/include/uapi/asm/auxvec.h | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_AUXVEC_H | ||
| 5 | #define __ASM_AUXVEC_H | ||
| 6 | |||
| 7 | /* VDSO location */ | ||
| 8 | #define AT_SYSINFO_EHDR 33 | ||
| 9 | |||
| 10 | #define AT_VECTOR_SIZE_ARCH 1 | ||
| 11 | |||
| 12 | #endif | ||
diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h new file mode 100644 index 000000000000..a23f6f3a2468 --- /dev/null +++ b/arch/nds32/include/uapi/asm/byteorder.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __NDS32_BYTEORDER_H__ | ||
| 5 | #define __NDS32_BYTEORDER_H__ | ||
| 6 | |||
| 7 | #ifdef __NDS32_EB__ | ||
| 8 | #include <linux/byteorder/big_endian.h> | ||
| 9 | #else | ||
| 10 | #include <linux/byteorder/little_endian.h> | ||
| 11 | #endif | ||
| 12 | |||
| 13 | #endif /* __NDS32_BYTEORDER_H__ */ | ||
diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h new file mode 100644 index 000000000000..4cdca9b23974 --- /dev/null +++ b/arch/nds32/include/uapi/asm/cachectl.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 1994, 1995, 1996 by Ralf Baechle | ||
| 3 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | #ifndef _ASM_CACHECTL | ||
| 5 | #define _ASM_CACHECTL | ||
| 6 | |||
| 7 | /* | ||
| 8 | * Options for cacheflush system call | ||
| 9 | */ | ||
| 10 | #define ICACHE 0 /* flush instruction cache */ | ||
| 11 | #define DCACHE 1 /* writeback and flush data cache */ | ||
| 12 | #define BCACHE 2 /* flush instruction cache + writeback and flush data cache */ | ||
| 13 | |||
| 14 | #endif /* _ASM_CACHECTL */ | ||
diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h new file mode 100644 index 000000000000..e3fb723ee362 --- /dev/null +++ b/arch/nds32/include/uapi/asm/param.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __ASM_NDS32_PARAM_H | ||
| 5 | #define __ASM_NDS32_PARAM_H | ||
| 6 | |||
| 7 | #define EXEC_PAGESIZE 8192 | ||
| 8 | |||
| 9 | #include <asm-generic/param.h> | ||
| 10 | |||
| 11 | #endif /* __ASM_NDS32_PARAM_H */ | ||
diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..358c99e399d0 --- /dev/null +++ b/arch/nds32/include/uapi/asm/ptrace.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef __UAPI_ASM_NDS32_PTRACE_H | ||
| 5 | #define __UAPI_ASM_NDS32_PTRACE_H | ||
| 6 | |||
| 7 | #ifndef __ASSEMBLY__ | ||
| 8 | |||
| 9 | /* | ||
| 10 | * User structures for general purpose register. | ||
| 11 | */ | ||
| 12 | struct user_pt_regs { | ||
| 13 | long uregs[26]; | ||
| 14 | long fp; | ||
| 15 | long gp; | ||
| 16 | long lp; | ||
| 17 | long sp; | ||
| 18 | long ipc; | ||
| 19 | long lb; | ||
| 20 | long le; | ||
| 21 | long lc; | ||
| 22 | long syscallno; | ||
| 23 | }; | ||
| 24 | #endif | ||
| 25 | #endif | ||
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..00567b237b0c --- /dev/null +++ b/arch/nds32/include/uapi/asm/sigcontext.h | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #ifndef _ASMNDS32_SIGCONTEXT_H | ||
| 5 | #define _ASMNDS32_SIGCONTEXT_H | ||
| 6 | |||
| 7 | /* | ||
| 8 | * Signal context structure - contains all info to do with the state | ||
| 9 | * before the signal handler was invoked. Note: only add new entries | ||
| 10 | * to the end of the structure. | ||
| 11 | */ | ||
| 12 | |||
| 13 | struct zol_struct { | ||
| 14 | unsigned long nds32_lc; /* $LC */ | ||
| 15 | unsigned long nds32_le; /* $LE */ | ||
| 16 | unsigned long nds32_lb; /* $LB */ | ||
| 17 | }; | ||
| 18 | |||
| 19 | struct sigcontext { | ||
| 20 | unsigned long trap_no; | ||
| 21 | unsigned long error_code; | ||
| 22 | unsigned long oldmask; | ||
| 23 | unsigned long nds32_r0; | ||
| 24 | unsigned long nds32_r1; | ||
| 25 | unsigned long nds32_r2; | ||
| 26 | unsigned long nds32_r3; | ||
| 27 | unsigned long nds32_r4; | ||
| 28 | unsigned long nds32_r5; | ||
| 29 | unsigned long nds32_r6; | ||
| 30 | unsigned long nds32_r7; | ||
| 31 | unsigned long nds32_r8; | ||
| 32 | unsigned long nds32_r9; | ||
| 33 | unsigned long nds32_r10; | ||
| 34 | unsigned long nds32_r11; | ||
| 35 | unsigned long nds32_r12; | ||
| 36 | unsigned long nds32_r13; | ||
| 37 | unsigned long nds32_r14; | ||
| 38 | unsigned long nds32_r15; | ||
| 39 | unsigned long nds32_r16; | ||
| 40 | unsigned long nds32_r17; | ||
| 41 | unsigned long nds32_r18; | ||
| 42 | unsigned long nds32_r19; | ||
| 43 | unsigned long nds32_r20; | ||
| 44 | unsigned long nds32_r21; | ||
| 45 | unsigned long nds32_r22; | ||
| 46 | unsigned long nds32_r23; | ||
| 47 | unsigned long nds32_r24; | ||
| 48 | unsigned long nds32_r25; | ||
| 49 | unsigned long nds32_fp; /* $r28 */ | ||
| 50 | unsigned long nds32_gp; /* $r29 */ | ||
| 51 | unsigned long nds32_lp; /* $r30 */ | ||
| 52 | unsigned long nds32_sp; /* $r31 */ | ||
| 53 | unsigned long nds32_ipc; | ||
| 54 | unsigned long fault_address; | ||
| 55 | unsigned long used_math_flag; | ||
| 56 | /* FPU Registers */ | ||
| 57 | struct zol_struct zol; | ||
| 58 | }; | ||
| 59 | |||
| 60 | #endif | ||
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..6e95901cabe3 --- /dev/null +++ b/arch/nds32/include/uapi/asm/unistd.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #define __ARCH_WANT_SYNC_FILE_RANGE2 | ||
| 5 | |||
| 6 | /* Use the standard ABI for syscalls */ | ||
| 7 | #include <asm-generic/unistd.h> | ||
| 8 | |||
| 9 | /* Additional NDS32 specific syscalls. */ | ||
| 10 | #define __NR_cacheflush (__NR_arch_specific_syscall) | ||
| 11 | __SYSCALL(__NR_cacheflush, sys_cacheflush) | ||
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile new file mode 100644 index 000000000000..42792743e8b9 --- /dev/null +++ b/arch/nds32/kernel/Makefile | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | # | ||
| 2 | # Makefile for the linux kernel. | ||
| 3 | # | ||
| 4 | |||
| 5 | CPPFLAGS_vmlinux.lds := -DTEXTADDR=$(TEXTADDR) | ||
| 6 | AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR) | ||
| 7 | |||
| 8 | # Object file lists. | ||
| 9 | |||
| 10 | obj-y := ex-entry.o ex-exit.o ex-scall.o irq.o \ | ||
| 11 | process.o ptrace.o setup.o signal.o \ | ||
| 12 | sys_nds32.o time.o traps.o cacheinfo.o \ | ||
| 13 | dma.o syscall_table.o vdso.o | ||
| 14 | |||
| 15 | obj-$(CONFIG_MODULES) += nds32_ksyms.o module.o | ||
| 16 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | ||
| 17 | obj-$(CONFIG_OF) += devtree.o | ||
| 18 | obj-$(CONFIG_CACHE_L2) += atl2c.o | ||
| 19 | |||
| 20 | extra-y := head.o vmlinux.lds | ||
| 21 | |||
| 22 | |||
| 23 | obj-y += vdso/ | ||
diff --git a/arch/nds32/kernel/asm-offsets.c b/arch/nds32/kernel/asm-offsets.c new file mode 100644 index 000000000000..3541d5981de7 --- /dev/null +++ b/arch/nds32/kernel/asm-offsets.c | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/sched.h> | ||
| 5 | #include <linux/sched/task_stack.h> | ||
| 6 | #include <linux/kbuild.h> | ||
| 7 | #include <asm/thread_info.h> | ||
| 8 | #include <asm/ptrace.h> | ||
| 9 | |||
| 10 | int main(void) | ||
| 11 | { | ||
| 12 | DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); | ||
| 13 | DEFINE(TSK_TI_PREEMPT, | ||
| 14 | offsetof(struct task_struct, thread_info.preempt_count)); | ||
| 15 | DEFINE(THREAD_CPU_CONTEXT, | ||
| 16 | offsetof(struct task_struct, thread.cpu_context)); | ||
| 17 | DEFINE(OSP_OFFSET, offsetof(struct pt_regs, osp)); | ||
| 18 | DEFINE(SP_OFFSET, offsetof(struct pt_regs, sp)); | ||
| 19 | DEFINE(FUCOP_CTL_OFFSET, offsetof(struct pt_regs, fucop_ctl)); | ||
| 20 | DEFINE(IPSW_OFFSET, offsetof(struct pt_regs, ipsw)); | ||
| 21 | DEFINE(SYSCALLNO_OFFSET, offsetof(struct pt_regs, syscallno)); | ||
| 22 | DEFINE(IPC_OFFSET, offsetof(struct pt_regs, ipc)); | ||
| 23 | DEFINE(R0_OFFSET, offsetof(struct pt_regs, uregs[0])); | ||
| 24 | DEFINE(R15_OFFSET, offsetof(struct pt_regs, uregs[15])); | ||
| 25 | DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); | ||
| 26 | DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); | ||
| 27 | return 0; | ||
| 28 | } | ||
diff --git a/arch/nds32/kernel/atl2c.c b/arch/nds32/kernel/atl2c.c new file mode 100644 index 000000000000..0c6d031a1c4a --- /dev/null +++ b/arch/nds32/kernel/atl2c.c | |||
| @@ -0,0 +1,64 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/compiler.h> | ||
| 5 | #include <linux/of_address.h> | ||
| 6 | #include <linux/of_fdt.h> | ||
| 7 | #include <linux/of_platform.h> | ||
| 8 | #include <asm/l2_cache.h> | ||
| 9 | |||
| 10 | void __iomem *atl2c_base; | ||
| 11 | static const struct of_device_id atl2c_ids[] __initconst = { | ||
| 12 | {.compatible = "andestech,atl2c",} | ||
| 13 | }; | ||
| 14 | |||
| 15 | static int __init atl2c_of_init(void) | ||
| 16 | { | ||
| 17 | struct device_node *np; | ||
| 18 | struct resource res; | ||
| 19 | unsigned long tmp = 0; | ||
| 20 | unsigned long l2set, l2way, l2clsz; | ||
| 21 | |||
| 22 | if (!(__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)) | ||
| 23 | return -ENODEV; | ||
| 24 | |||
| 25 | np = of_find_matching_node(NULL, atl2c_ids); | ||
| 26 | if (!np) | ||
| 27 | return -ENODEV; | ||
| 28 | |||
| 29 | if (of_address_to_resource(np, 0, &res)) | ||
| 30 | return -ENODEV; | ||
| 31 | |||
| 32 | atl2c_base = ioremap(res.start, resource_size(&res)); | ||
| 33 | if (!atl2c_base) | ||
| 34 | return -ENOMEM; | ||
| 35 | |||
| 36 | l2set = | ||
| 37 | 64 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >> | ||
| 38 | L2_CA_CONF_offL2SET); | ||
| 39 | l2way = | ||
| 40 | 1 + | ||
| 41 | ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >> | ||
| 42 | L2_CA_CONF_offL2WAY); | ||
| 43 | l2clsz = | ||
| 44 | 4 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >> | ||
| 45 | L2_CA_CONF_offL2CLSZ); | ||
| 46 | pr_info("L2:%luKB/%luS/%luW/%luB\n", | ||
| 47 | l2set * l2way * l2clsz / 1024, l2set, l2way, l2clsz); | ||
| 48 | |||
| 49 | tmp = L2C_R_REG(L2CC_PROT_OFF); | ||
| 50 | tmp &= ~L2CC_PROT_mskMRWEN; | ||
| 51 | L2C_W_REG(L2CC_PROT_OFF, tmp); | ||
| 52 | |||
| 53 | tmp = L2C_R_REG(L2CC_SETUP_OFF); | ||
| 54 | tmp &= ~L2CC_SETUP_mskPART; | ||
| 55 | L2C_W_REG(L2CC_SETUP_OFF, tmp); | ||
| 56 | |||
| 57 | tmp = L2C_R_REG(L2CC_CTRL_OFF); | ||
| 58 | tmp |= L2CC_CTRL_mskEN; | ||
| 59 | L2C_W_REG(L2CC_CTRL_OFF, tmp); | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | subsys_initcall(atl2c_of_init); | ||
diff --git a/arch/nds32/kernel/cacheinfo.c b/arch/nds32/kernel/cacheinfo.c new file mode 100644 index 000000000000..0a7bc696dd55 --- /dev/null +++ b/arch/nds32/kernel/cacheinfo.c | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/bitops.h> | ||
| 5 | #include <linux/cacheinfo.h> | ||
| 6 | #include <linux/cpu.h> | ||
| 7 | |||
| 8 | static void ci_leaf_init(struct cacheinfo *this_leaf, | ||
| 9 | enum cache_type type, unsigned int level) | ||
| 10 | { | ||
| 11 | char cache_type = (type & CACHE_TYPE_INST ? ICACHE : DCACHE); | ||
| 12 | |||
| 13 | this_leaf->level = level; | ||
| 14 | this_leaf->type = type; | ||
| 15 | this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type); | ||
| 16 | this_leaf->number_of_sets = CACHE_SET(cache_type);; | ||
| 17 | this_leaf->ways_of_associativity = CACHE_WAY(cache_type); | ||
| 18 | this_leaf->size = this_leaf->number_of_sets * | ||
| 19 | this_leaf->coherency_line_size * this_leaf->ways_of_associativity; | ||
| 20 | #if defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | ||
| 21 | this_leaf->attributes = CACHE_WRITE_THROUGH; | ||
| 22 | #else | ||
| 23 | this_leaf->attributes = CACHE_WRITE_BACK; | ||
| 24 | #endif | ||
| 25 | } | ||
| 26 | |||
| 27 | int init_cache_level(unsigned int cpu) | ||
| 28 | { | ||
| 29 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | ||
| 30 | |||
| 31 | /* Only 1 level and I/D cache seperate. */ | ||
| 32 | this_cpu_ci->num_levels = 1; | ||
| 33 | this_cpu_ci->num_leaves = 2; | ||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | |||
| 37 | int populate_cache_leaves(unsigned int cpu) | ||
| 38 | { | ||
| 39 | unsigned int level, idx; | ||
| 40 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | ||
| 41 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; | ||
| 42 | |||
| 43 | for (idx = 0, level = 1; level <= this_cpu_ci->num_levels && | ||
| 44 | idx < this_cpu_ci->num_leaves; idx++, level++) { | ||
| 45 | ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); | ||
| 46 | ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); | ||
| 47 | } | ||
| 48 | return 0; | ||
| 49 | } | ||
diff --git a/arch/nds32/kernel/devtree.c b/arch/nds32/kernel/devtree.c new file mode 100644 index 000000000000..bdce0fe5af9f --- /dev/null +++ b/arch/nds32/kernel/devtree.c | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/bug.h> | ||
| 5 | #include <linux/printk.h> | ||
| 6 | #include <linux/of_fdt.h> | ||
| 7 | |||
| 8 | void __init early_init_devtree(void *params) | ||
| 9 | { | ||
| 10 | if (!params || !early_init_dt_scan(params)) { | ||
| 11 | pr_crit("\n" | ||
| 12 | "Error: invalid device tree blob at (virtual address 0x%p)\n" | ||
| 13 | "\nPlease check your bootloader.", params); | ||
| 14 | |||
| 15 | BUG_ON(1); | ||
| 16 | } | ||
| 17 | |||
| 18 | dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); | ||
| 19 | } | ||
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c new file mode 100644 index 000000000000..d291800fc621 --- /dev/null +++ b/arch/nds32/kernel/dma.c | |||
| @@ -0,0 +1,477 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | #include <linux/mm.h> | ||
| 6 | #include <linux/export.h> | ||
| 7 | #include <linux/string.h> | ||
| 8 | #include <linux/scatterlist.h> | ||
| 9 | #include <linux/dma-mapping.h> | ||
| 10 | #include <linux/io.h> | ||
| 11 | #include <linux/cache.h> | ||
| 12 | #include <linux/highmem.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <asm/cacheflush.h> | ||
| 15 | #include <asm/tlbflush.h> | ||
| 16 | #include <asm/dma-mapping.h> | ||
| 17 | #include <asm/proc-fns.h> | ||
| 18 | |||
| 19 | /* | ||
| 20 | * This is the page table (2MB) covering uncached, DMA consistent allocations | ||
| 21 | */ | ||
| 22 | static pte_t *consistent_pte; | ||
| 23 | static DEFINE_RAW_SPINLOCK(consistent_lock); | ||
| 24 | |||
| 25 | enum master_type { | ||
| 26 | FOR_CPU = 0, | ||
| 27 | FOR_DEVICE = 1, | ||
| 28 | }; | ||
| 29 | |||
| 30 | /* | ||
| 31 | * VM region handling support. | ||
| 32 | * | ||
| 33 | * This should become something generic, handling VM region allocations for | ||
| 34 | * vmalloc and similar (ioremap, module space, etc). | ||
| 35 | * | ||
| 36 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
| 37 | * | ||
| 38 | * struct vm_struct { | ||
| 39 | * struct vm_region region; | ||
| 40 | * unsigned long flags; | ||
| 41 | * struct page **pages; | ||
| 42 | * unsigned int nr_pages; | ||
| 43 | * unsigned long phys_addr; | ||
| 44 | * }; | ||
| 45 | * | ||
| 46 | * get_vm_area() would then call vm_region_alloc with an appropriate | ||
| 47 | * struct vm_region head (eg): | ||
| 48 | * | ||
| 49 | * struct vm_region vmalloc_head = { | ||
| 50 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
| 51 | * .vm_start = VMALLOC_START, | ||
| 52 | * .vm_end = VMALLOC_END, | ||
| 53 | * }; | ||
| 54 | * | ||
| 55 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
| 56 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
| 57 | * would have to initialise this each time prior to calling vm_region_alloc(). | ||
| 58 | */ | ||
| 59 | struct arch_vm_region { | ||
| 60 | struct list_head vm_list; | ||
| 61 | unsigned long vm_start; | ||
| 62 | unsigned long vm_end; | ||
| 63 | struct page *vm_pages; | ||
| 64 | }; | ||
| 65 | |||
| 66 | static struct arch_vm_region consistent_head = { | ||
| 67 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | ||
| 68 | .vm_start = CONSISTENT_BASE, | ||
| 69 | .vm_end = CONSISTENT_END, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static struct arch_vm_region *vm_region_alloc(struct arch_vm_region *head, | ||
| 73 | size_t size, int gfp) | ||
| 74 | { | ||
| 75 | unsigned long addr = head->vm_start, end = head->vm_end - size; | ||
| 76 | unsigned long flags; | ||
| 77 | struct arch_vm_region *c, *new; | ||
| 78 | |||
| 79 | new = kmalloc(sizeof(struct arch_vm_region), gfp); | ||
| 80 | if (!new) | ||
| 81 | goto out; | ||
| 82 | |||
| 83 | raw_spin_lock_irqsave(&consistent_lock, flags); | ||
| 84 | |||
| 85 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
| 86 | if ((addr + size) < addr) | ||
| 87 | goto nospc; | ||
| 88 | if ((addr + size) <= c->vm_start) | ||
| 89 | goto found; | ||
| 90 | addr = c->vm_end; | ||
| 91 | if (addr > end) | ||
| 92 | goto nospc; | ||
| 93 | } | ||
| 94 | |||
| 95 | found: | ||
| 96 | /* | ||
| 97 | * Insert this entry _before_ the one we found. | ||
| 98 | */ | ||
| 99 | list_add_tail(&new->vm_list, &c->vm_list); | ||
| 100 | new->vm_start = addr; | ||
| 101 | new->vm_end = addr + size; | ||
| 102 | |||
| 103 | raw_spin_unlock_irqrestore(&consistent_lock, flags); | ||
| 104 | return new; | ||
| 105 | |||
| 106 | nospc: | ||
| 107 | raw_spin_unlock_irqrestore(&consistent_lock, flags); | ||
| 108 | kfree(new); | ||
| 109 | out: | ||
| 110 | return NULL; | ||
| 111 | } | ||
| 112 | |||
| 113 | static struct arch_vm_region *vm_region_find(struct arch_vm_region *head, | ||
| 114 | unsigned long addr) | ||
| 115 | { | ||
| 116 | struct arch_vm_region *c; | ||
| 117 | |||
| 118 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
| 119 | if (c->vm_start == addr) | ||
| 120 | goto out; | ||
| 121 | } | ||
| 122 | c = NULL; | ||
| 123 | out: | ||
| 124 | return c; | ||
| 125 | } | ||
| 126 | |||
| 127 | /* FIXME: attrs is not used. */ | ||
| 128 | static void *nds32_dma_alloc_coherent(struct device *dev, size_t size, | ||
| 129 | dma_addr_t * handle, gfp_t gfp, | ||
| 130 | unsigned long attrs) | ||
| 131 | { | ||
| 132 | struct page *page; | ||
| 133 | struct arch_vm_region *c; | ||
| 134 | unsigned long order; | ||
| 135 | u64 mask = ~0ULL, limit; | ||
| 136 | pgprot_t prot = pgprot_noncached(PAGE_KERNEL); | ||
| 137 | |||
| 138 | if (!consistent_pte) { | ||
| 139 | pr_err("%s: not initialized\n", __func__); | ||
| 140 | dump_stack(); | ||
| 141 | return NULL; | ||
| 142 | } | ||
| 143 | |||
| 144 | if (dev) { | ||
| 145 | mask = dev->coherent_dma_mask; | ||
| 146 | |||
| 147 | /* | ||
| 148 | * Sanity check the DMA mask - it must be non-zero, and | ||
| 149 | * must be able to be satisfied by a DMA allocation. | ||
| 150 | */ | ||
| 151 | if (mask == 0) { | ||
| 152 | dev_warn(dev, "coherent DMA mask is unset\n"); | ||
| 153 | goto no_page; | ||
| 154 | } | ||
| 155 | |||
| 156 | } | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Sanity check the allocation size. | ||
| 160 | */ | ||
| 161 | size = PAGE_ALIGN(size); | ||
| 162 | limit = (mask + 1) & ~mask; | ||
| 163 | if ((limit && size >= limit) || | ||
| 164 | size >= (CONSISTENT_END - CONSISTENT_BASE)) { | ||
| 165 | pr_warn("coherent allocation too big " | ||
| 166 | "(requested %#x mask %#llx)\n", size, mask); | ||
| 167 | goto no_page; | ||
| 168 | } | ||
| 169 | |||
| 170 | order = get_order(size); | ||
| 171 | |||
| 172 | if (mask != 0xffffffff) | ||
| 173 | gfp |= GFP_DMA; | ||
| 174 | |||
| 175 | page = alloc_pages(gfp, order); | ||
| 176 | if (!page) | ||
| 177 | goto no_page; | ||
| 178 | |||
| 179 | /* | ||
| 180 | * Invalidate any data that might be lurking in the | ||
| 181 | * kernel direct-mapped region for device DMA. | ||
| 182 | */ | ||
| 183 | { | ||
| 184 | unsigned long kaddr = (unsigned long)page_address(page); | ||
| 185 | memset(page_address(page), 0, size); | ||
| 186 | cpu_dma_wbinval_range(kaddr, kaddr + size); | ||
| 187 | } | ||
| 188 | |||
| 189 | /* | ||
| 190 | * Allocate a virtual address in the consistent mapping region. | ||
| 191 | */ | ||
| 192 | c = vm_region_alloc(&consistent_head, size, | ||
| 193 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | ||
| 194 | if (c) { | ||
| 195 | pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | ||
| 196 | struct page *end = page + (1 << order); | ||
| 197 | |||
| 198 | c->vm_pages = page; | ||
| 199 | |||
| 200 | /* | ||
| 201 | * Set the "dma handle" | ||
| 202 | */ | ||
| 203 | *handle = page_to_phys(page); | ||
| 204 | |||
| 205 | do { | ||
| 206 | BUG_ON(!pte_none(*pte)); | ||
| 207 | |||
| 208 | /* | ||
| 209 | * x86 does not mark the pages reserved... | ||
| 210 | */ | ||
| 211 | SetPageReserved(page); | ||
| 212 | set_pte(pte, mk_pte(page, prot)); | ||
| 213 | page++; | ||
| 214 | pte++; | ||
| 215 | } while (size -= PAGE_SIZE); | ||
| 216 | |||
| 217 | /* | ||
| 218 | * Free the otherwise unused pages. | ||
| 219 | */ | ||
| 220 | while (page < end) { | ||
| 221 | __free_page(page); | ||
| 222 | page++; | ||
| 223 | } | ||
| 224 | |||
| 225 | return (void *)c->vm_start; | ||
| 226 | } | ||
| 227 | |||
| 228 | if (page) | ||
| 229 | __free_pages(page, order); | ||
| 230 | no_page: | ||
| 231 | *handle = ~0; | ||
| 232 | return NULL; | ||
| 233 | } | ||
| 234 | |||
| 235 | static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr, | ||
| 236 | dma_addr_t handle, unsigned long attrs) | ||
| 237 | { | ||
| 238 | struct arch_vm_region *c; | ||
| 239 | unsigned long flags, addr; | ||
| 240 | pte_t *ptep; | ||
| 241 | |||
| 242 | size = PAGE_ALIGN(size); | ||
| 243 | |||
| 244 | raw_spin_lock_irqsave(&consistent_lock, flags); | ||
| 245 | |||
| 246 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | ||
| 247 | if (!c) | ||
| 248 | goto no_area; | ||
| 249 | |||
| 250 | if ((c->vm_end - c->vm_start) != size) { | ||
| 251 | pr_err("%s: freeing wrong coherent size (%ld != %d)\n", | ||
| 252 | __func__, c->vm_end - c->vm_start, size); | ||
| 253 | dump_stack(); | ||
| 254 | size = c->vm_end - c->vm_start; | ||
| 255 | } | ||
| 256 | |||
| 257 | ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | ||
| 258 | addr = c->vm_start; | ||
| 259 | do { | ||
| 260 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
| 261 | unsigned long pfn; | ||
| 262 | |||
| 263 | ptep++; | ||
| 264 | addr += PAGE_SIZE; | ||
| 265 | |||
| 266 | if (!pte_none(pte) && pte_present(pte)) { | ||
| 267 | pfn = pte_pfn(pte); | ||
| 268 | |||
| 269 | if (pfn_valid(pfn)) { | ||
| 270 | struct page *page = pfn_to_page(pfn); | ||
| 271 | |||
| 272 | /* | ||
| 273 | * x86 does not mark the pages reserved... | ||
| 274 | */ | ||
| 275 | ClearPageReserved(page); | ||
| 276 | |||
| 277 | __free_page(page); | ||
| 278 | continue; | ||
| 279 | } | ||
| 280 | } | ||
| 281 | |||
| 282 | pr_crit("%s: bad page in kernel page table\n", __func__); | ||
| 283 | } while (size -= PAGE_SIZE); | ||
| 284 | |||
| 285 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
| 286 | |||
| 287 | list_del(&c->vm_list); | ||
| 288 | |||
| 289 | raw_spin_unlock_irqrestore(&consistent_lock, flags); | ||
| 290 | |||
| 291 | kfree(c); | ||
| 292 | return; | ||
| 293 | |||
| 294 | no_area: | ||
| 295 | raw_spin_unlock_irqrestore(&consistent_lock, flags); | ||
| 296 | pr_err("%s: trying to free invalid coherent area: %p\n", | ||
| 297 | __func__, cpu_addr); | ||
| 298 | dump_stack(); | ||
| 299 | } | ||
| 300 | |||
| 301 | /* | ||
| 302 | * Initialise the consistent memory allocation. | ||
| 303 | */ | ||
| 304 | static int __init consistent_init(void) | ||
| 305 | { | ||
| 306 | pgd_t *pgd; | ||
| 307 | pmd_t *pmd; | ||
| 308 | pte_t *pte; | ||
| 309 | int ret = 0; | ||
| 310 | |||
| 311 | do { | ||
| 312 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | ||
| 313 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); | ||
| 314 | if (!pmd) { | ||
| 315 | pr_err("%s: no pmd tables\n", __func__); | ||
| 316 | ret = -ENOMEM; | ||
| 317 | break; | ||
| 318 | } | ||
| 319 | /* The first level mapping may be created in somewhere. | ||
| 320 | * It's not necessary to warn here. */ | ||
| 321 | /* WARN_ON(!pmd_none(*pmd)); */ | ||
| 322 | |||
| 323 | pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); | ||
| 324 | if (!pte) { | ||
| 325 | ret = -ENOMEM; | ||
| 326 | break; | ||
| 327 | } | ||
| 328 | |||
| 329 | consistent_pte = pte; | ||
| 330 | } while (0); | ||
| 331 | |||
| 332 | return ret; | ||
| 333 | } | ||
| 334 | |||
| 335 | core_initcall(consistent_init); | ||
| 336 | static void consistent_sync(void *vaddr, size_t size, int direction, int master_type); | ||
| 337 | static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page, | ||
| 338 | unsigned long offset, size_t size, | ||
| 339 | enum dma_data_direction dir, | ||
| 340 | unsigned long attrs) | ||
| 341 | { | ||
| 342 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 343 | consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE); | ||
| 344 | return page_to_phys(page) + offset; | ||
| 345 | } | ||
| 346 | |||
| 347 | static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
| 348 | size_t size, enum dma_data_direction dir, | ||
| 349 | unsigned long attrs) | ||
| 350 | { | ||
| 351 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| 352 | consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU); | ||
| 353 | } | ||
| 354 | |||
| 355 | /* | ||
| 356 | * Make an area consistent for devices. | ||
| 357 | */ | ||
| 358 | static void consistent_sync(void *vaddr, size_t size, int direction, int master_type) | ||
| 359 | { | ||
| 360 | unsigned long start = (unsigned long)vaddr; | ||
| 361 | unsigned long end = start + size; | ||
| 362 | |||
| 363 | if (master_type == FOR_CPU) { | ||
| 364 | switch (direction) { | ||
| 365 | case DMA_TO_DEVICE: | ||
| 366 | break; | ||
| 367 | case DMA_FROM_DEVICE: | ||
| 368 | case DMA_BIDIRECTIONAL: | ||
| 369 | cpu_dma_inval_range(start, end); | ||
| 370 | break; | ||
| 371 | default: | ||
| 372 | BUG(); | ||
| 373 | } | ||
| 374 | } else { | ||
| 375 | /* FOR_DEVICE */ | ||
| 376 | switch (direction) { | ||
| 377 | case DMA_FROM_DEVICE: | ||
| 378 | break; | ||
| 379 | case DMA_TO_DEVICE: | ||
| 380 | case DMA_BIDIRECTIONAL: | ||
| 381 | cpu_dma_wb_range(start, end); | ||
| 382 | break; | ||
| 383 | default: | ||
| 384 | BUG(); | ||
| 385 | } | ||
| 386 | } | ||
| 387 | } | ||
| 388 | |||
| 389 | static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
| 390 | int nents, enum dma_data_direction dir, | ||
| 391 | unsigned long attrs) | ||
| 392 | { | ||
| 393 | int i; | ||
| 394 | |||
| 395 | for (i = 0; i < nents; i++, sg++) { | ||
| 396 | void *virt; | ||
| 397 | unsigned long pfn; | ||
| 398 | struct page *page = sg_page(sg); | ||
| 399 | |||
| 400 | sg->dma_address = sg_phys(sg); | ||
| 401 | pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE; | ||
| 402 | page = pfn_to_page(pfn); | ||
| 403 | if (PageHighMem(page)) { | ||
| 404 | virt = kmap_atomic(page); | ||
| 405 | consistent_sync(virt, sg->length, dir, FOR_CPU); | ||
| 406 | kunmap_atomic(virt); | ||
| 407 | } else { | ||
| 408 | if (sg->offset > PAGE_SIZE) | ||
| 409 | panic("sg->offset:%08x > PAGE_SIZE\n", | ||
| 410 | sg->offset); | ||
| 411 | virt = page_address(page) + sg->offset; | ||
| 412 | consistent_sync(virt, sg->length, dir, FOR_CPU); | ||
| 413 | } | ||
| 414 | } | ||
| 415 | return nents; | ||
| 416 | } | ||
| 417 | |||
| 418 | static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
| 419 | int nhwentries, enum dma_data_direction dir, | ||
| 420 | unsigned long attrs) | ||
| 421 | { | ||
| 422 | } | ||
| 423 | |||
| 424 | static void | ||
| 425 | nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | ||
| 426 | size_t size, enum dma_data_direction dir) | ||
| 427 | { | ||
| 428 | consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU); | ||
| 429 | } | ||
| 430 | |||
| 431 | static void | ||
| 432 | nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, | ||
| 433 | size_t size, enum dma_data_direction dir) | ||
| 434 | { | ||
| 435 | consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE); | ||
| 436 | } | ||
| 437 | |||
| 438 | static void | ||
| 439 | nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
| 440 | enum dma_data_direction dir) | ||
| 441 | { | ||
| 442 | int i; | ||
| 443 | |||
| 444 | for (i = 0; i < nents; i++, sg++) { | ||
| 445 | char *virt = | ||
| 446 | page_address((struct page *)sg->page_link) + sg->offset; | ||
| 447 | consistent_sync(virt, sg->length, dir, FOR_CPU); | ||
| 448 | } | ||
| 449 | } | ||
| 450 | |||
| 451 | static void | ||
| 452 | nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
| 453 | int nents, enum dma_data_direction dir) | ||
| 454 | { | ||
| 455 | int i; | ||
| 456 | |||
| 457 | for (i = 0; i < nents; i++, sg++) { | ||
| 458 | char *virt = | ||
| 459 | page_address((struct page *)sg->page_link) + sg->offset; | ||
| 460 | consistent_sync(virt, sg->length, dir, FOR_DEVICE); | ||
| 461 | } | ||
| 462 | } | ||
| 463 | |||
| 464 | struct dma_map_ops nds32_dma_ops = { | ||
| 465 | .alloc = nds32_dma_alloc_coherent, | ||
| 466 | .free = nds32_dma_free, | ||
| 467 | .map_page = nds32_dma_map_page, | ||
| 468 | .unmap_page = nds32_dma_unmap_page, | ||
| 469 | .map_sg = nds32_dma_map_sg, | ||
| 470 | .unmap_sg = nds32_dma_unmap_sg, | ||
| 471 | .sync_single_for_device = nds32_dma_sync_single_for_device, | ||
| 472 | .sync_single_for_cpu = nds32_dma_sync_single_for_cpu, | ||
| 473 | .sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu, | ||
| 474 | .sync_sg_for_device = nds32_dma_sync_sg_for_device, | ||
| 475 | }; | ||
| 476 | |||
| 477 | EXPORT_SYMBOL(nds32_dma_ops); | ||
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S new file mode 100644 index 000000000000..a72e83d804f5 --- /dev/null +++ b/arch/nds32/kernel/ex-entry.S | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/memory.h> | ||
| 6 | #include <asm/nds32.h> | ||
| 7 | #include <asm/errno.h> | ||
| 8 | #include <asm/asm-offsets.h> | ||
| 9 | #include <asm/page.h> | ||
| 10 | |||
| 11 | #ifdef CONFIG_HWZOL | ||
| 12 | .macro push_zol | ||
| 13 | mfusr $r14, $LB | ||
| 14 | mfusr $r15, $LE | ||
| 15 | mfusr $r16, $LC | ||
| 16 | .endm | ||
| 17 | #endif | ||
| 18 | |||
| 19 | .macro save_user_regs | ||
| 20 | |||
| 21 | smw.adm $sp, [$sp], $sp, #0x1 | ||
| 22 | /* move $SP to the bottom of pt_regs */ | ||
| 23 | addi $sp, $sp, -OSP_OFFSET | ||
| 24 | |||
| 25 | /* push $r0 ~ $r25 */ | ||
| 26 | smw.bim $r0, [$sp], $r25 | ||
| 27 | /* push $fp, $gp, $lp */ | ||
| 28 | smw.bim $sp, [$sp], $sp, #0xe | ||
| 29 | |||
| 30 | mfsr $r12, $SP_USR | ||
| 31 | mfsr $r13, $IPC | ||
| 32 | #ifdef CONFIG_HWZOL | ||
| 33 | push_zol | ||
| 34 | #endif | ||
| 35 | movi $r17, -1 | ||
| 36 | move $r18, $r0 | ||
| 37 | mfsr $r19, $PSW | ||
| 38 | mfsr $r20, $IPSW | ||
| 39 | mfsr $r21, $P_IPSW | ||
| 40 | mfsr $r22, $P_IPC | ||
| 41 | mfsr $r23, $P_P0 | ||
| 42 | mfsr $r24, $P_P1 | ||
| 43 | smw.bim $r12, [$sp], $r24, #0 | ||
| 44 | addi $sp, $sp, -FUCOP_CTL_OFFSET | ||
| 45 | |||
| 46 | /* Initialize kernel space $fp */ | ||
| 47 | andi $p0, $r20, #PSW_mskPOM | ||
| 48 | movi $p1, #0x0 | ||
| 49 | cmovz $fp, $p1, $p0 | ||
| 50 | |||
| 51 | andi $r16, $r19, #PSW_mskINTL | ||
| 52 | slti $r17, $r16, #4 | ||
| 53 | bnez $r17, 1f | ||
| 54 | addi $r17, $r19, #-2 | ||
| 55 | mtsr $r17, $PSW | ||
| 56 | isb | ||
| 57 | 1: | ||
| 58 | /* If it was superuser mode, we don't need to update $r25 */ | ||
| 59 | bnez $p0, 2f | ||
| 60 | la $p0, __entry_task | ||
| 61 | lw $r25, [$p0] | ||
| 62 | 2: | ||
| 63 | .endm | ||
| 64 | |||
| 65 | .text | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Exception Vector | ||
| 69 | */ | ||
| 70 | exception_handlers: | ||
| 71 | .long unhandled_exceptions !Reset/NMI | ||
| 72 | .long unhandled_exceptions !TLB fill | ||
| 73 | .long do_page_fault !PTE not present | ||
| 74 | .long do_dispatch_tlb_misc !TLB misc | ||
| 75 | .long unhandled_exceptions !TLB VLPT | ||
| 76 | .long unhandled_exceptions !Machine Error | ||
| 77 | .long do_debug_trap !Debug related | ||
| 78 | .long do_dispatch_general !General exception | ||
| 79 | .long eh_syscall !Syscall | ||
| 80 | .long asm_do_IRQ !IRQ | ||
| 81 | |||
| 82 | common_exception_handler: | ||
| 83 | save_user_regs | ||
| 84 | mfsr $p0, $ITYPE | ||
| 85 | andi $p0, $p0, #ITYPE_mskVECTOR | ||
| 86 | srli $p0, $p0, #ITYPE_offVECTOR | ||
| 87 | andi $p1, $p0, #NDS32_VECTOR_mskNONEXCEPTION | ||
| 88 | bnez $p1, 1f | ||
| 89 | sethi $lp, hi20(ret_from_exception) | ||
| 90 | ori $lp, $lp, lo12(ret_from_exception) | ||
| 91 | sethi $p1, hi20(exception_handlers) | ||
| 92 | ori $p1, $p1, lo12(exception_handlers) | ||
| 93 | lw $p1, [$p1+$p0<<2] | ||
| 94 | move $r0, $p0 | ||
| 95 | mfsr $r1, $EVA | ||
| 96 | mfsr $r2, $ITYPE | ||
| 97 | move $r3, $sp | ||
| 98 | mfsr $r4, $OIPC | ||
| 99 | /* enable gie if it is enabled in IPSW. */ | ||
| 100 | mfsr $r21, $PSW | ||
| 101 | andi $r20, $r20, #PSW_mskGIE /* r20 is $IPSW*/ | ||
| 102 | or $r21, $r21, $r20 | ||
| 103 | mtsr $r21, $PSW | ||
| 104 | dsb | ||
| 105 | jr $p1 | ||
| 106 | |||
| 107 | /* syscall */ | ||
| 108 | 1: | ||
| 109 | addi $p1, $p0, #-NDS32_VECTOR_offEXCEPTION | ||
| 110 | bnez $p1, 2f | ||
| 111 | sethi $lp, hi20(ret_from_exception) | ||
| 112 | ori $lp, $lp, lo12(ret_from_exception) | ||
| 113 | sethi $p1, hi20(exception_handlers) | ||
| 114 | ori $p1, $p1, lo12(exception_handlers) | ||
| 115 | lwi $p1, [$p1+#NDS32_VECTOR_offEXCEPTION<<2] | ||
| 116 | jr $p1 | ||
| 117 | |||
| 118 | /* interrupt */ | ||
| 119 | 2: | ||
| 120 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 121 | jal arch_trace_hardirqs_off | ||
| 122 | #endif | ||
| 123 | move $r0, $sp | ||
| 124 | sethi $lp, hi20(ret_from_intr) | ||
| 125 | ori $lp, $lp, lo12(ret_from_intr) | ||
| 126 | sethi $p0, hi20(exception_handlers) | ||
| 127 | ori $p0, $p0, lo12(exception_handlers) | ||
| 128 | lwi $p0, [$p0+#NDS32_VECTOR_offINTERRUPT<<2] | ||
| 129 | jr $p0 | ||
| 130 | |||
| 131 | .macro EXCEPTION_VECTOR_DEBUG | ||
| 132 | .align 4 | ||
| 133 | mfsr $p0, $EDM_CTL | ||
| 134 | andi $p0, $p0, EDM_CTL_mskV3_EDM_MODE | ||
| 135 | tnez $p0, SWID_RAISE_INTERRUPT_LEVEL | ||
| 136 | .endm | ||
| 137 | |||
| 138 | .macro EXCEPTION_VECTOR | ||
| 139 | .align 4 | ||
| 140 | sethi $p0, hi20(common_exception_handler) | ||
| 141 | ori $p0, $p0, lo12(common_exception_handler) | ||
| 142 | jral.ton $p0, $p0 | ||
| 143 | .endm | ||
| 144 | |||
| 145 | .section ".text.init", #alloc, #execinstr | ||
| 146 | .global exception_vector | ||
| 147 | exception_vector: | ||
| 148 | .rept 6 | ||
| 149 | EXCEPTION_VECTOR | ||
| 150 | .endr | ||
| 151 | EXCEPTION_VECTOR_DEBUG | ||
| 152 | .rept 121 | ||
| 153 | EXCEPTION_VECTOR | ||
| 154 | .endr | ||
| 155 | .align 4 | ||
| 156 | .global exception_vector_end | ||
| 157 | exception_vector_end: | ||
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S new file mode 100644 index 000000000000..03e4f7788a18 --- /dev/null +++ b/arch/nds32/kernel/ex-exit.S | |||
| @@ -0,0 +1,184 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/unistd.h> | ||
| 6 | #include <asm/assembler.h> | ||
| 7 | #include <asm/nds32.h> | ||
| 8 | #include <asm/asm-offsets.h> | ||
| 9 | #include <asm/thread_info.h> | ||
| 10 | #include <asm/current.h> | ||
| 11 | |||
| 12 | |||
| 13 | |||
| 14 | #ifdef CONFIG_HWZOL | ||
| 15 | .macro pop_zol | ||
| 16 | mtusr $r14, $LB | ||
| 17 | mtusr $r15, $LE | ||
| 18 | mtusr $r16, $LC | ||
| 19 | .endm | ||
| 20 | #endif | ||
| 21 | |||
| 22 | .macro restore_user_regs_first | ||
| 23 | setgie.d | ||
| 24 | isb | ||
| 25 | |||
| 26 | addi $sp, $sp, FUCOP_CTL_OFFSET | ||
| 27 | |||
| 28 | lmw.adm $r12, [$sp], $r24, #0x0 | ||
| 29 | mtsr $r12, $SP_USR | ||
| 30 | mtsr $r13, $IPC | ||
| 31 | #ifdef CONFIG_HWZOL | ||
| 32 | pop_zol | ||
| 33 | #endif | ||
| 34 | mtsr $r19, $PSW | ||
| 35 | mtsr $r20, $IPSW | ||
| 36 | mtsr $r21, $P_IPSW | ||
| 37 | mtsr $r22, $P_IPC | ||
| 38 | mtsr $r23, $P_P0 | ||
| 39 | mtsr $r24, $P_P1 | ||
| 40 | lmw.adm $sp, [$sp], $sp, #0xe | ||
| 41 | .endm | ||
| 42 | |||
| 43 | .macro restore_user_regs_last | ||
| 44 | pop $p0 | ||
| 45 | cmovn $sp, $p0, $p0 | ||
| 46 | |||
| 47 | iret | ||
| 48 | nop | ||
| 49 | |||
| 50 | .endm | ||
| 51 | |||
| 52 | .macro restore_user_regs | ||
| 53 | restore_user_regs_first | ||
| 54 | lmw.adm $r0, [$sp], $r25, #0x0 | ||
| 55 | addi $sp, $sp, OSP_OFFSET | ||
| 56 | restore_user_regs_last | ||
| 57 | .endm | ||
| 58 | |||
| 59 | .macro fast_restore_user_regs | ||
| 60 | restore_user_regs_first | ||
| 61 | lmw.adm $r1, [$sp], $r25, #0x0 | ||
| 62 | addi $sp, $sp, OSP_OFFSET-4 | ||
| 63 | restore_user_regs_last | ||
| 64 | .endm | ||
| 65 | |||
| 66 | #ifdef CONFIG_PREEMPT | ||
| 67 | .macro preempt_stop | ||
| 68 | .endm | ||
| 69 | #else | ||
| 70 | .macro preempt_stop | ||
| 71 | setgie.d | ||
| 72 | isb | ||
| 73 | .endm | ||
| 74 | #define resume_kernel no_work_pending | ||
| 75 | #endif | ||
| 76 | |||
| 77 | ENTRY(ret_from_exception) | ||
| 78 | preempt_stop | ||
| 79 | ENTRY(ret_from_intr) | ||
| 80 | |||
| 81 | /* | ||
| 82 | * judge Kernel or user mode | ||
| 83 | * | ||
| 84 | */ | ||
| 85 | lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt | ||
| 86 | andi $p0, $p0, #PSW_mskINTL | ||
| 87 | bnez $p0, resume_kernel ! done with iret | ||
| 88 | j resume_userspace | ||
| 89 | |||
| 90 | |||
| 91 | /* | ||
| 92 | * This is the fast syscall return path. We do as little as | ||
| 93 | * possible here, and this includes saving $r0 back into the SVC | ||
| 94 | * stack. | ||
| 95 | * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8 | ||
| 96 | */ | ||
| 97 | ENTRY(ret_fast_syscall) | ||
| 98 | gie_disable | ||
| 99 | lwi $r1, [tsk+#TSK_TI_FLAGS] | ||
| 100 | andi $p1, $r1, #_TIF_WORK_MASK | ||
| 101 | bnez $p1, fast_work_pending | ||
| 102 | fast_restore_user_regs ! iret | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Ok, we need to do extra processing, | ||
| 106 | * enter the slow path returning from syscall, while pending work. | ||
| 107 | */ | ||
| 108 | fast_work_pending: | ||
| 109 | swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception | ||
| 110 | work_pending: | ||
| 111 | andi $p1, $r1, #_TIF_NEED_RESCHED | ||
| 112 | bnez $p1, work_resched | ||
| 113 | |||
| 114 | andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME | ||
| 115 | beqz $p1, no_work_pending | ||
| 116 | |||
| 117 | move $r0, $sp ! 'regs' | ||
| 118 | gie_enable | ||
| 119 | bal do_notify_resume | ||
| 120 | b ret_slow_syscall | ||
| 121 | work_resched: | ||
| 122 | bal schedule ! path, return to user mode | ||
| 123 | |||
| 124 | /* | ||
| 125 | * "slow" syscall return path. | ||
| 126 | */ | ||
| 127 | ENTRY(resume_userspace) | ||
| 128 | ENTRY(ret_slow_syscall) | ||
| 129 | gie_disable | ||
| 130 | lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt | ||
| 131 | andi $p0, $p0, #PSW_mskINTL | ||
| 132 | bnez $p0, no_work_pending ! done with iret | ||
| 133 | lwi $r1, [tsk+#TSK_TI_FLAGS] | ||
| 134 | andi $p1, $r1, #_TIF_WORK_MASK | ||
| 135 | bnez $p1, work_pending ! handle work_resched, sig_pend | ||
| 136 | |||
| 137 | no_work_pending: | ||
| 138 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 139 | lwi $p0, [$sp+(#IPSW_OFFSET)] | ||
| 140 | andi $p0, $p0, #0x1 | ||
| 141 | la $r10, trace_hardirqs_off | ||
| 142 | la $r9, trace_hardirqs_on | ||
| 143 | cmovz $r9, $p0, $r10 | ||
| 144 | jral $r9 | ||
| 145 | #endif | ||
| 146 | restore_user_regs ! return from iret | ||
| 147 | |||
| 148 | |||
| 149 | /* | ||
| 150 | * preemptive kernel | ||
| 151 | */ | ||
| 152 | #ifdef CONFIG_PREEMPT | ||
| 153 | resume_kernel: | ||
| 154 | gie_disable | ||
| 155 | lwi $t0, [tsk+#TSK_TI_PREEMPT] | ||
| 156 | bnez $t0, no_work_pending | ||
| 157 | need_resched: | ||
| 158 | lwi $t0, [tsk+#TSK_TI_FLAGS] | ||
| 159 | andi $p1, $t0, #_TIF_NEED_RESCHED | ||
| 160 | beqz $p1, no_work_pending | ||
| 161 | |||
| 162 | lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off? | ||
| 163 | andi $t0, $t0, #1 | ||
| 164 | beqz $t0, no_work_pending | ||
| 165 | |||
| 166 | jal preempt_schedule_irq | ||
| 167 | b need_resched | ||
| 168 | #endif | ||
| 169 | |||
| 170 | /* | ||
| 171 | * This is how we return from a fork. | ||
| 172 | */ | ||
| 173 | ENTRY(ret_from_fork) | ||
| 174 | bal schedule_tail | ||
| 175 | beqz $r6, 1f ! r6 stores fn for kernel thread | ||
| 176 | move $r0, $r7 ! prepare kernel thread arg | ||
| 177 | jral $r6 | ||
| 178 | 1: | ||
| 179 | lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing | ||
| 180 | andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls? | ||
| 181 | beqz $p1, ret_slow_syscall | ||
| 182 | move $r0, $sp | ||
| 183 | bal syscall_trace_leave | ||
| 184 | b ret_slow_syscall | ||
diff --git a/arch/nds32/kernel/ex-scall.S b/arch/nds32/kernel/ex-scall.S new file mode 100644 index 000000000000..36aa87ecdabd --- /dev/null +++ b/arch/nds32/kernel/ex-scall.S | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/unistd.h> | ||
| 6 | #include <asm/assembler.h> | ||
| 7 | #include <asm/nds32.h> | ||
| 8 | #include <asm/asm-offsets.h> | ||
| 9 | #include <asm/thread_info.h> | ||
| 10 | #include <asm/current.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * $r0 = previous task_struct, | ||
| 14 | * $r1 = next task_struct, | ||
| 15 | * previous and next are guaranteed not to be the same. | ||
| 16 | */ | ||
| 17 | |||
| 18 | ENTRY(__switch_to) | ||
| 19 | |||
| 20 | la $p0, __entry_task | ||
| 21 | sw $r1, [$p0] | ||
| 22 | move $p1, $r0 | ||
| 23 | addi $p1, $p1, #THREAD_CPU_CONTEXT | ||
| 24 | smw.bi $r6, [$p1], $r14, #0xb ! push r6~r14, fp, lp, sp | ||
| 25 | move $r25, $r1 | ||
| 26 | addi $r1, $r1, #THREAD_CPU_CONTEXT | ||
| 27 | lmw.bi $r6, [$r1], $r14, #0xb ! pop r6~r14, fp, lp, sp | ||
| 28 | ret | ||
| 29 | |||
| 30 | |||
| 31 | #define tbl $r8 | ||
| 32 | |||
| 33 | /* | ||
| 34 | * $r7 will be writen as syscall nr | ||
| 35 | */ | ||
| 36 | .macro get_scno | ||
| 37 | lwi $r7, [$sp + R15_OFFSET] | ||
| 38 | swi $r7, [$sp + SYSCALLNO_OFFSET] | ||
| 39 | .endm | ||
| 40 | |||
| 41 | .macro updateipc | ||
| 42 | addi $r17, $r13, #4 ! $r13 is $IPC | ||
| 43 | swi $r17, [$sp + IPC_OFFSET] | ||
| 44 | .endm | ||
| 45 | |||
| 46 | ENTRY(eh_syscall) | ||
| 47 | updateipc | ||
| 48 | |||
| 49 | get_scno | ||
| 50 | gie_enable | ||
| 51 | |||
| 52 | lwi $p0, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing | ||
| 53 | |||
| 54 | andi $p1, $p0, #_TIF_WORK_SYSCALL_ENTRY ! are we tracing syscalls? | ||
| 55 | bnez $p1, __sys_trace | ||
| 56 | |||
| 57 | la $lp, ret_fast_syscall ! return address | ||
| 58 | jmp_systbl: | ||
| 59 | addi $p1, $r7, #-__NR_syscalls ! syscall number of syscall instruction is guarded by addembler | ||
| 60 | bgez $p1, _SCNO_EXCEED ! call sys_* routine | ||
| 61 | la tbl, sys_call_table ! load syscall table pointer | ||
| 62 | slli $p1, $r7, #2 | ||
| 63 | add $p1, tbl, $p1 | ||
| 64 | lwi $p1, [$p1] | ||
| 65 | jr $p1 ! no return | ||
| 66 | |||
| 67 | _SCNO_EXCEED: | ||
| 68 | ori $r0, $r7, #0 | ||
| 69 | ori $r1, $sp, #0 | ||
| 70 | b bad_syscall | ||
| 71 | |||
| 72 | /* | ||
| 73 | * This is the really slow path. We're going to be doing | ||
| 74 | * context switches, and waiting for our parent to respond. | ||
| 75 | */ | ||
| 76 | __sys_trace: | ||
| 77 | move $r0, $sp | ||
| 78 | bal syscall_trace_enter | ||
| 79 | move $r7, $r0 | ||
| 80 | la $lp, __sys_trace_return ! return address | ||
| 81 | |||
| 82 | addi $p1, $r7, #1 | ||
| 83 | beqz $p1, ret_slow_syscall ! fatal signal is pending | ||
| 84 | |||
| 85 | addi $p1, $sp, #R0_OFFSET ! pointer to regs | ||
| 86 | lmw.bi $r0, [$p1], $r5 ! have to reload $r0 - $r5 | ||
| 87 | b jmp_systbl | ||
| 88 | |||
| 89 | __sys_trace_return: | ||
| 90 | swi $r0, [$sp+#R0_OFFSET] ! T: save returned $r0 | ||
| 91 | move $r0, $sp ! set pt_regs for syscall_trace_leave | ||
| 92 | bal syscall_trace_leave | ||
| 93 | b ret_slow_syscall | ||
| 94 | |||
| 95 | ENTRY(sys_rt_sigreturn_wrapper) | ||
| 96 | addi $r0, $sp, #0 | ||
| 97 | b sys_rt_sigreturn | ||
| 98 | ENDPROC(sys_rt_sigreturn_wrapper) | ||
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S new file mode 100644 index 000000000000..71f57bd70f3b --- /dev/null +++ b/arch/nds32/kernel/head.S | |||
| @@ -0,0 +1,188 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <linux/init.h> | ||
| 6 | #include <asm/ptrace.h> | ||
| 7 | #include <asm/asm-offsets.h> | ||
| 8 | #include <asm/page.h> | ||
| 9 | #include <asm/pgtable.h> | ||
| 10 | #include <asm/sizes.h> | ||
| 11 | #include <asm/thread_info.h> | ||
| 12 | |||
| 13 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
| 14 | #define OF_DT_MAGIC 0xd00dfeed | ||
| 15 | #else | ||
| 16 | #define OF_DT_MAGIC 0xedfe0dd0 | ||
| 17 | #endif | ||
| 18 | |||
| 19 | .globl swapper_pg_dir | ||
| 20 | .equ swapper_pg_dir, TEXTADDR - 0x4000 | ||
| 21 | |||
| 22 | /* | ||
| 23 | * Kernel startup entry point. | ||
| 24 | */ | ||
| 25 | .section ".head.text", "ax" | ||
| 26 | .type _stext, %function | ||
| 27 | ENTRY(_stext) | ||
| 28 | setgie.d ! Disable interrupt | ||
| 29 | isb | ||
| 30 | /* | ||
| 31 | * Disable I/D-cache and enable it at a proper time | ||
| 32 | */ | ||
| 33 | mfsr $r0, $mr8 | ||
| 34 | li $r1, #~(CACHE_CTL_mskIC_EN|CACHE_CTL_mskDC_EN) | ||
| 35 | and $r0, $r0, $r1 | ||
| 36 | mtsr $r0, $mr8 | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Process device tree blob | ||
| 40 | */ | ||
| 41 | andi $r0,$r2,#0x3 | ||
| 42 | li $r10, 0 | ||
| 43 | bne $r0, $r10, _nodtb | ||
| 44 | lwi $r0, [$r2] | ||
| 45 | li $r1, OF_DT_MAGIC | ||
| 46 | bne $r0, $r1, _nodtb | ||
| 47 | move $r10, $r2 | ||
| 48 | _nodtb: | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Create a temporary mapping area for booting, before start_kernel | ||
| 52 | */ | ||
| 53 | sethi $r4, hi20(swapper_pg_dir) | ||
| 54 | li $p0, (PAGE_OFFSET - PHYS_OFFSET) | ||
| 55 | sub $r4, $r4, $p0 | ||
| 56 | tlbop FlushAll ! invalidate TLB\n" | ||
| 57 | isb | ||
| 58 | mtsr $r4, $L1_PPTB ! load page table pointer\n" | ||
| 59 | |||
| 60 | /* set NTC0 cacheable/writeback, mutliple page size in use */ | ||
| 61 | mfsr $r3, $MMU_CTL | ||
| 62 | li $r0, #~MMU_CTL_mskNTC0 | ||
| 63 | and $r3, $r3, $r0 | ||
| 64 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB | ||
| 65 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)) | ||
| 66 | #else | ||
| 67 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB) | ||
| 68 | #endif | ||
| 69 | #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS | ||
| 70 | li $r0, #MMU_CTL_UNA | ||
| 71 | or $r3, $r3, $r0 | ||
| 72 | #endif | ||
| 73 | mtsr $r3, $MMU_CTL | ||
| 74 | isb | ||
| 75 | |||
| 76 | /* set page size and size of kernel image */ | ||
| 77 | mfsr $r0, $MMU_CFG | ||
| 78 | srli $r3, $r0, MMU_CFG_offfEPSZ | ||
| 79 | zeb $r3, $r3 | ||
| 80 | bnez $r3, _extra_page_size_support | ||
| 81 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB | ||
| 82 | li $r5, #SZ_4K ! Use 4KB page size | ||
| 83 | #else | ||
| 84 | li $r5, #SZ_8K ! Use 8KB page size | ||
| 85 | li $r3, #1 | ||
| 86 | #endif | ||
| 87 | mtsr $r3, $TLB_MISC | ||
| 88 | b _image_size_check | ||
| 89 | |||
| 90 | _extra_page_size_support: ! Use epzs pages size | ||
| 91 | clz $r6, $r3 | ||
| 92 | subri $r2, $r6, #31 | ||
| 93 | li $r3, #1 | ||
| 94 | sll $r3, $r3, $r2 | ||
| 95 | /* MMU_CFG.EPSZ value -> meaning */ | ||
| 96 | mul $r5, $r3, $r3 | ||
| 97 | slli $r5, $r5, #14 | ||
| 98 | /* MMU_CFG.EPSZ -> TLB_MISC.ACC_PSZ */ | ||
| 99 | addi $r3, $r2, #0x2 | ||
| 100 | mtsr $r3, $TLB_MISC | ||
| 101 | |||
| 102 | _image_size_check: | ||
| 103 | /* calculate the image maximum size accepted by TLB config */ | ||
| 104 | andi $r6, $r0, MMU_CFG_mskTBW | ||
| 105 | andi $r0, $r0, MMU_CFG_mskTBS | ||
| 106 | srli $r6, $r6, MMU_CFG_offTBW | ||
| 107 | srli $r0, $r0, MMU_CFG_offTBS | ||
| 108 | /* | ||
| 109 | * we just map the kernel to the maximum way - 1 of tlb | ||
| 110 | * reserver one way for UART VA mapping | ||
| 111 | * it will cause page fault if UART mapping cover the kernel mapping | ||
| 112 | * | ||
| 113 | * direct mapping is not supported now. | ||
| 114 | */ | ||
| 115 | li $r2, 't' | ||
| 116 | beqz $r6, __error ! MMU_CFG.TBW = 0 is direct mappin | ||
| 117 | addi $r0, $r0, #0x2 ! MMU_CFG.TBS value -> meaning | ||
| 118 | sll $r0, $r6, $r0 ! entries = k-way * n-set | ||
| 119 | mul $r6, $r0, $r5 ! max size = entries * page size | ||
| 120 | /* check kernel image size */ | ||
| 121 | la $r3, (_end - PAGE_OFFSET) | ||
| 122 | li $r2, 's' | ||
| 123 | bgt $r3, $r6, __error | ||
| 124 | |||
| 125 | li $r2, #(PHYS_OFFSET + TLB_DATA_kernel_text_attr) | ||
| 126 | li $r3, PAGE_OFFSET | ||
| 127 | add $r6, $r6, $r3 | ||
| 128 | |||
| 129 | _tlb: | ||
| 130 | mtsr $r3, $TLB_VPN | ||
| 131 | dsb | ||
| 132 | tlbop $r2, RWR | ||
| 133 | isb | ||
| 134 | add $r3, $r3, $r5 | ||
| 135 | add $r2, $r2, $r5 | ||
| 136 | bgt $r6, $r3, _tlb | ||
| 137 | mfsr $r3, $TLB_MISC ! setup access page size | ||
| 138 | li $r2, #~0xf | ||
| 139 | and $r3, $r3, $r2 | ||
| 140 | #ifdef CONFIG_ANDES_PAGE_SIZE_8KB | ||
| 141 | ori $r3, $r3, #0x1 | ||
| 142 | #endif | ||
| 143 | mtsr $r3, $TLB_MISC | ||
| 144 | |||
| 145 | mfsr $r0, $MISC_CTL ! Enable BTB and RTP and shadow sp | ||
| 146 | ori $r0, $r0, #MISC_init | ||
| 147 | mtsr $r0, $MISC_CTL | ||
| 148 | |||
| 149 | mfsr $p1, $PSW | ||
| 150 | li $r15, #~PSW_clr ! clear WBNA|DME|IME|DT|IT|POM|INTL|GIE | ||
| 151 | and $p1, $p1, $r15 | ||
| 152 | ori $p1, $p1, #PSW_init | ||
| 153 | mtsr $p1, $IPSW ! when iret, it will automatically enable MMU | ||
| 154 | la $lp, __mmap_switched | ||
| 155 | mtsr $lp, $IPC | ||
| 156 | iret | ||
| 157 | nop | ||
| 158 | |||
| 159 | .type __switch_data, %object | ||
| 160 | __switch_data: | ||
| 161 | .long __bss_start ! $r6 | ||
| 162 | .long _end ! $r7 | ||
| 163 | .long __atags_pointer ! $atag_pointer | ||
| 164 | .long init_task ! $r9, move to $r25 | ||
| 165 | .long init_thread_union + THREAD_SIZE ! $sp | ||
| 166 | |||
| 167 | |||
| 168 | /* | ||
| 169 | * The following fragment of code is executed with the MMU on in MMU mode, | ||
| 170 | * and uses absolute addresses; this is not position independent. | ||
| 171 | */ | ||
| 172 | .align | ||
| 173 | .type __mmap_switched, %function | ||
| 174 | __mmap_switched: | ||
| 175 | la $r3, __switch_data | ||
| 176 | lmw.bim $r6, [$r3], $r9, #0b0001 | ||
| 177 | move $r25, $r9 | ||
| 178 | move $fp, #0 ! Clear BSS (and zero $fp) | ||
| 179 | beq $r7, $r6, _RRT | ||
| 180 | 1: swi.bi $fp, [$r6], #4 | ||
| 181 | bne $r7, $r6, 1b | ||
| 182 | swi $r10, [$r8] | ||
| 183 | |||
| 184 | _RRT: | ||
| 185 | b start_kernel | ||
| 186 | |||
| 187 | __error: | ||
| 188 | b __error | ||
diff --git a/arch/nds32/kernel/irq.c b/arch/nds32/kernel/irq.c new file mode 100644 index 000000000000..6ff5a672be27 --- /dev/null +++ b/arch/nds32/kernel/irq.c | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/irqchip.h> | ||
| 5 | |||
| 6 | void __init init_IRQ(void) | ||
| 7 | { | ||
| 8 | irqchip_init(); | ||
| 9 | } | ||
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c new file mode 100644 index 000000000000..4167283d8293 --- /dev/null +++ b/arch/nds32/kernel/module.c | |||
| @@ -0,0 +1,278 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/module.h> | ||
| 5 | #include <linux/elf.h> | ||
| 6 | #include <linux/vmalloc.h> | ||
| 7 | #include <linux/moduleloader.h> | ||
| 8 | #include <asm/pgtable.h> | ||
| 9 | |||
| 10 | void *module_alloc(unsigned long size) | ||
| 11 | { | ||
| 12 | return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, | ||
| 13 | GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, | ||
| 14 | __builtin_return_address(0)); | ||
| 15 | } | ||
| 16 | |||
| 17 | void module_free(struct module *module, void *region) | ||
| 18 | { | ||
| 19 | vfree(region); | ||
| 20 | } | ||
| 21 | |||
| 22 | int module_frob_arch_sections(Elf_Ehdr * hdr, | ||
| 23 | Elf_Shdr * sechdrs, | ||
| 24 | char *secstrings, struct module *mod) | ||
| 25 | { | ||
| 26 | return 0; | ||
| 27 | } | ||
| 28 | |||
| 29 | void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask, | ||
| 30 | unsigned int val_shift, unsigned int loc_mask, | ||
| 31 | unsigned int partial_in_place, unsigned int swap) | ||
| 32 | { | ||
| 33 | unsigned int tmp = 0, tmp2 = 0; | ||
| 34 | |||
| 35 | __asm__ __volatile__("\tlhi.bi\t%0, [%2], 0\n" | ||
| 36 | "\tbeqz\t%3, 1f\n" | ||
| 37 | "\twsbh\t%0, %1\n" | ||
| 38 | "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap) | ||
| 39 | ); | ||
| 40 | |||
| 41 | tmp2 = tmp & loc_mask; | ||
| 42 | if (partial_in_place) { | ||
| 43 | tmp &= (!loc_mask); | ||
| 44 | tmp = | ||
| 45 | tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); | ||
| 46 | } else { | ||
| 47 | tmp = tmp2 | ((val & val_mask) >> val_shift); | ||
| 48 | } | ||
| 49 | |||
| 50 | __asm__ __volatile__("\tbeqz\t%3, 2f\n" | ||
| 51 | "\twsbh\t%0, %1\n" | ||
| 52 | "2:\n" | ||
| 53 | "\tshi.bi\t%0, [%2], 0\n":"=r"(tmp):"0"(tmp), | ||
| 54 | "r"(loc), "r"(swap) | ||
| 55 | ); | ||
| 56 | } | ||
| 57 | |||
| 58 | void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask, | ||
| 59 | unsigned int val_shift, unsigned int loc_mask, | ||
| 60 | unsigned int partial_in_place, unsigned int swap) | ||
| 61 | { | ||
| 62 | unsigned int tmp = 0, tmp2 = 0; | ||
| 63 | |||
| 64 | __asm__ __volatile__("\tlmw.bi\t%0, [%2], %0, 0\n" | ||
| 65 | "\tbeqz\t%3, 1f\n" | ||
| 66 | "\twsbh\t%0, %1\n" | ||
| 67 | "\trotri\t%0, %1, 16\n" | ||
| 68 | "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap) | ||
| 69 | ); | ||
| 70 | |||
| 71 | tmp2 = tmp & loc_mask; | ||
| 72 | if (partial_in_place) { | ||
| 73 | tmp &= (!loc_mask); | ||
| 74 | tmp = | ||
| 75 | tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); | ||
| 76 | } else { | ||
| 77 | tmp = tmp2 | ((val & val_mask) >> val_shift); | ||
| 78 | } | ||
| 79 | |||
| 80 | __asm__ __volatile__("\tbeqz\t%3, 2f\n" | ||
| 81 | "\twsbh\t%0, %1\n" | ||
| 82 | "\trotri\t%0, %1, 16\n" | ||
| 83 | "2:\n" | ||
| 84 | "\tsmw.bi\t%0, [%2], %0, 0\n":"=r"(tmp):"0"(tmp), | ||
| 85 | "r"(loc), "r"(swap) | ||
| 86 | ); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline int exceed_limit(int offset, unsigned int val_mask, | ||
| 90 | struct module *module, Elf32_Rela * rel, | ||
| 91 | unsigned int relindex, unsigned int reloc_order) | ||
| 92 | { | ||
| 93 | int abs_off = offset < 0 ? ~offset : offset; | ||
| 94 | |||
| 95 | if (abs_off & (~val_mask)) { | ||
| 96 | pr_err("\n%s: relocation type %d out of range.\n" | ||
| 97 | "please rebuild the kernel module with gcc option \"-Wa,-mno-small-text\".\n", | ||
| 98 | module->name, ELF32_R_TYPE(rel->r_info)); | ||
| 99 | pr_err("section %d reloc %d offset 0x%x relative 0x%x.\n", | ||
| 100 | relindex, reloc_order, rel->r_offset, offset); | ||
| 101 | return true; | ||
| 102 | } | ||
| 103 | return false; | ||
| 104 | } | ||
| 105 | |||
| 106 | #ifdef __NDS32_EL__ | ||
| 107 | #define NEED_SWAP 1 | ||
| 108 | #else | ||
| 109 | #define NEED_SWAP 0 | ||
| 110 | #endif | ||
| 111 | |||
| 112 | int | ||
| 113 | apply_relocate_add(Elf32_Shdr * sechdrs, const char *strtab, | ||
| 114 | unsigned int symindex, unsigned int relindex, | ||
| 115 | struct module *module) | ||
| 116 | { | ||
| 117 | Elf32_Shdr *symsec = sechdrs + symindex; | ||
| 118 | Elf32_Shdr *relsec = sechdrs + relindex; | ||
| 119 | Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; | ||
| 120 | Elf32_Rela *rel = (void *)relsec->sh_addr; | ||
| 121 | unsigned int i; | ||
| 122 | |||
| 123 | for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rela); i++, rel++) { | ||
| 124 | Elf32_Addr *loc; | ||
| 125 | Elf32_Sym *sym; | ||
| 126 | Elf32_Addr v; | ||
| 127 | s32 offset; | ||
| 128 | |||
| 129 | offset = ELF32_R_SYM(rel->r_info); | ||
| 130 | if (offset < 0 | ||
| 131 | || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { | ||
| 132 | pr_err("%s: bad relocation\n", module->name); | ||
| 133 | pr_err("section %d reloc %d\n", relindex, i); | ||
| 134 | return -ENOEXEC; | ||
| 135 | } | ||
| 136 | |||
| 137 | sym = ((Elf32_Sym *) symsec->sh_addr) + offset; | ||
| 138 | |||
| 139 | if (rel->r_offset < 0 | ||
| 140 | || rel->r_offset > dstsec->sh_size - sizeof(u16)) { | ||
| 141 | pr_err("%s: out of bounds relocation\n", module->name); | ||
| 142 | pr_err("section %d reloc %d offset 0x%0x size %d\n", | ||
| 143 | relindex, i, rel->r_offset, dstsec->sh_size); | ||
| 144 | return -ENOEXEC; | ||
| 145 | } | ||
| 146 | |||
| 147 | loc = (Elf32_Addr *) (dstsec->sh_addr + rel->r_offset); | ||
| 148 | v = sym->st_value + rel->r_addend; | ||
| 149 | |||
| 150 | switch (ELF32_R_TYPE(rel->r_info)) { | ||
| 151 | case R_NDS32_NONE: | ||
| 152 | case R_NDS32_INSN16: | ||
| 153 | case R_NDS32_LABEL: | ||
| 154 | case R_NDS32_LONGCALL1: | ||
| 155 | case R_NDS32_LONGCALL2: | ||
| 156 | case R_NDS32_LONGCALL3: | ||
| 157 | case R_NDS32_LONGCALL4: | ||
| 158 | case R_NDS32_LONGJUMP1: | ||
| 159 | case R_NDS32_LONGJUMP2: | ||
| 160 | case R_NDS32_LONGJUMP3: | ||
| 161 | case R_NDS32_9_FIXED_RELA: | ||
| 162 | case R_NDS32_15_FIXED_RELA: | ||
| 163 | case R_NDS32_17_FIXED_RELA: | ||
| 164 | case R_NDS32_25_FIXED_RELA: | ||
| 165 | case R_NDS32_LOADSTORE: | ||
| 166 | case R_NDS32_DWARF2_OP1_RELA: | ||
| 167 | case R_NDS32_DWARF2_OP2_RELA: | ||
| 168 | case R_NDS32_DWARF2_LEB_RELA: | ||
| 169 | case R_NDS32_RELA_NOP_MIX ... R_NDS32_RELA_NOP_MAX: | ||
| 170 | break; | ||
| 171 | |||
| 172 | case R_NDS32_32_RELA: | ||
| 173 | do_reloc32(v, loc, 0xffffffff, 0, 0, 0, 0); | ||
| 174 | break; | ||
| 175 | |||
| 176 | case R_NDS32_HI20_RELA: | ||
| 177 | do_reloc32(v, loc, 0xfffff000, 12, 0xfff00000, 0, | ||
| 178 | NEED_SWAP); | ||
| 179 | break; | ||
| 180 | |||
| 181 | case R_NDS32_LO12S3_RELA: | ||
| 182 | do_reloc32(v, loc, 0x00000fff, 3, 0xfffff000, 0, | ||
| 183 | NEED_SWAP); | ||
| 184 | break; | ||
| 185 | |||
| 186 | case R_NDS32_LO12S2_RELA: | ||
| 187 | do_reloc32(v, loc, 0x00000fff, 2, 0xfffff000, 0, | ||
| 188 | NEED_SWAP); | ||
| 189 | break; | ||
| 190 | |||
| 191 | case R_NDS32_LO12S1_RELA: | ||
| 192 | do_reloc32(v, loc, 0x00000fff, 1, 0xfffff000, 0, | ||
| 193 | NEED_SWAP); | ||
| 194 | break; | ||
| 195 | |||
| 196 | case R_NDS32_LO12S0_RELA: | ||
| 197 | case R_NDS32_LO12S0_ORI_RELA: | ||
| 198 | do_reloc32(v, loc, 0x00000fff, 0, 0xfffff000, 0, | ||
| 199 | NEED_SWAP); | ||
| 200 | break; | ||
| 201 | |||
| 202 | case R_NDS32_9_PCREL_RELA: | ||
| 203 | if (exceed_limit | ||
| 204 | ((v - (Elf32_Addr) loc), 0x000000ff, module, rel, | ||
| 205 | relindex, i)) | ||
| 206 | return -ENOEXEC; | ||
| 207 | do_reloc16(v - (Elf32_Addr) loc, loc, 0x000001ff, 1, | ||
| 208 | 0xffffff00, 0, NEED_SWAP); | ||
| 209 | break; | ||
| 210 | |||
| 211 | case R_NDS32_15_PCREL_RELA: | ||
| 212 | if (exceed_limit | ||
| 213 | ((v - (Elf32_Addr) loc), 0x00003fff, module, rel, | ||
| 214 | relindex, i)) | ||
| 215 | return -ENOEXEC; | ||
| 216 | do_reloc32(v - (Elf32_Addr) loc, loc, 0x00007fff, 1, | ||
| 217 | 0xffffc000, 0, NEED_SWAP); | ||
| 218 | break; | ||
| 219 | |||
| 220 | case R_NDS32_17_PCREL_RELA: | ||
| 221 | if (exceed_limit | ||
| 222 | ((v - (Elf32_Addr) loc), 0x0000ffff, module, rel, | ||
| 223 | relindex, i)) | ||
| 224 | return -ENOEXEC; | ||
| 225 | do_reloc32(v - (Elf32_Addr) loc, loc, 0x0001ffff, 1, | ||
| 226 | 0xffff0000, 0, NEED_SWAP); | ||
| 227 | break; | ||
| 228 | |||
| 229 | case R_NDS32_25_PCREL_RELA: | ||
| 230 | if (exceed_limit | ||
| 231 | ((v - (Elf32_Addr) loc), 0x00ffffff, module, rel, | ||
| 232 | relindex, i)) | ||
| 233 | return -ENOEXEC; | ||
| 234 | do_reloc32(v - (Elf32_Addr) loc, loc, 0x01ffffff, 1, | ||
| 235 | 0xff000000, 0, NEED_SWAP); | ||
| 236 | break; | ||
| 237 | case R_NDS32_WORD_9_PCREL_RELA: | ||
| 238 | if (exceed_limit | ||
| 239 | ((v - (Elf32_Addr) loc), 0x000000ff, module, rel, | ||
| 240 | relindex, i)) | ||
| 241 | return -ENOEXEC; | ||
| 242 | do_reloc32(v - (Elf32_Addr) loc, loc, 0x000001ff, 1, | ||
| 243 | 0xffffff00, 0, NEED_SWAP); | ||
| 244 | break; | ||
| 245 | |||
| 246 | case R_NDS32_SDA15S3_RELA: | ||
| 247 | case R_NDS32_SDA15S2_RELA: | ||
| 248 | case R_NDS32_SDA15S1_RELA: | ||
| 249 | case R_NDS32_SDA15S0_RELA: | ||
| 250 | pr_err("%s: unsupported relocation type %d.\n", | ||
| 251 | module->name, ELF32_R_TYPE(rel->r_info)); | ||
| 252 | pr_err | ||
| 253 | ("Small data section access doesn't work in the kernel space; " | ||
| 254 | "please rebuild the kernel module with gcc option -mcmodel=large.\n"); | ||
| 255 | pr_err("section %d reloc %d offset 0x%x size %d\n", | ||
| 256 | relindex, i, rel->r_offset, dstsec->sh_size); | ||
| 257 | break; | ||
| 258 | |||
| 259 | default: | ||
| 260 | pr_err("%s: unsupported relocation type %d.\n", | ||
| 261 | module->name, ELF32_R_TYPE(rel->r_info)); | ||
| 262 | pr_err("section %d reloc %d offset 0x%x size %d\n", | ||
| 263 | relindex, i, rel->r_offset, dstsec->sh_size); | ||
| 264 | } | ||
| 265 | } | ||
| 266 | return 0; | ||
| 267 | } | ||
| 268 | |||
| 269 | int | ||
| 270 | module_finalize(const Elf32_Ehdr * hdr, const Elf_Shdr * sechdrs, | ||
| 271 | struct module *module) | ||
| 272 | { | ||
| 273 | return 0; | ||
| 274 | } | ||
| 275 | |||
| 276 | void module_arch_cleanup(struct module *mod) | ||
| 277 | { | ||
| 278 | } | ||
diff --git a/arch/nds32/kernel/nds32_ksyms.c b/arch/nds32/kernel/nds32_ksyms.c new file mode 100644 index 000000000000..5ecebd0e60cb --- /dev/null +++ b/arch/nds32/kernel/nds32_ksyms.c | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/module.h> | ||
| 5 | #include <linux/string.h> | ||
| 6 | #include <linux/delay.h> | ||
| 7 | #include <linux/in6.h> | ||
| 8 | #include <linux/syscalls.h> | ||
| 9 | #include <linux/uaccess.h> | ||
| 10 | |||
| 11 | #include <asm/checksum.h> | ||
| 12 | #include <asm/io.h> | ||
| 13 | #include <asm/ftrace.h> | ||
| 14 | #include <asm/proc-fns.h> | ||
| 15 | |||
| 16 | /* mem functions */ | ||
| 17 | EXPORT_SYMBOL(memset); | ||
| 18 | EXPORT_SYMBOL(memcpy); | ||
| 19 | EXPORT_SYMBOL(memmove); | ||
| 20 | EXPORT_SYMBOL(memzero); | ||
| 21 | |||
| 22 | /* user mem (segment) */ | ||
| 23 | EXPORT_SYMBOL(__arch_copy_from_user); | ||
| 24 | EXPORT_SYMBOL(__arch_copy_to_user); | ||
| 25 | EXPORT_SYMBOL(__arch_clear_user); | ||
| 26 | |||
| 27 | /* cache handling */ | ||
| 28 | EXPORT_SYMBOL(cpu_icache_inval_all); | ||
| 29 | EXPORT_SYMBOL(cpu_dcache_wbinval_all); | ||
| 30 | EXPORT_SYMBOL(cpu_dma_inval_range); | ||
| 31 | EXPORT_SYMBOL(cpu_dma_wb_range); | ||
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c new file mode 100644 index 000000000000..65fda986e55f --- /dev/null +++ b/arch/nds32/kernel/process.c | |||
| @@ -0,0 +1,208 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/sched.h> | ||
| 5 | #include <linux/sched/debug.h> | ||
| 6 | #include <linux/sched/task_stack.h> | ||
| 7 | #include <linux/delay.h> | ||
| 8 | #include <linux/kallsyms.h> | ||
| 9 | #include <linux/uaccess.h> | ||
| 10 | #include <asm/elf.h> | ||
| 11 | #include <asm/proc-fns.h> | ||
| 12 | #include <linux/ptrace.h> | ||
| 13 | #include <linux/reboot.h> | ||
| 14 | |||
| 15 | extern void setup_mm_for_reboot(char mode); | ||
| 16 | #ifdef CONFIG_PROC_FS | ||
| 17 | struct proc_dir_entry *proc_dir_cpu; | ||
| 18 | EXPORT_SYMBOL(proc_dir_cpu); | ||
| 19 | #endif | ||
| 20 | |||
| 21 | extern inline void arch_reset(char mode) | ||
| 22 | { | ||
| 23 | if (mode == 's') { | ||
| 24 | /* Use cpu handler, jump to 0 */ | ||
| 25 | cpu_reset(0); | ||
| 26 | } | ||
| 27 | } | ||
| 28 | |||
| 29 | void (*pm_power_off) (void); | ||
| 30 | EXPORT_SYMBOL(pm_power_off); | ||
| 31 | |||
| 32 | static char reboot_mode_nds32 = 'h'; | ||
| 33 | |||
| 34 | int __init reboot_setup(char *str) | ||
| 35 | { | ||
| 36 | reboot_mode_nds32 = str[0]; | ||
| 37 | return 1; | ||
| 38 | } | ||
| 39 | |||
| 40 | static int cpub_pwroff(void) | ||
| 41 | { | ||
| 42 | return 0; | ||
| 43 | } | ||
| 44 | |||
| 45 | __setup("reboot=", reboot_setup); | ||
| 46 | |||
| 47 | void machine_halt(void) | ||
| 48 | { | ||
| 49 | cpub_pwroff(); | ||
| 50 | } | ||
| 51 | |||
| 52 | EXPORT_SYMBOL(machine_halt); | ||
| 53 | |||
| 54 | void machine_power_off(void) | ||
| 55 | { | ||
| 56 | if (pm_power_off) | ||
| 57 | pm_power_off(); | ||
| 58 | } | ||
| 59 | |||
| 60 | EXPORT_SYMBOL(machine_power_off); | ||
| 61 | |||
| 62 | void machine_restart(char *cmd) | ||
| 63 | { | ||
| 64 | /* | ||
| 65 | * Clean and disable cache, and turn off interrupts | ||
| 66 | */ | ||
| 67 | cpu_proc_fin(); | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Tell the mm system that we are going to reboot - | ||
| 71 | * we may need it to insert some 1:1 mappings so that | ||
| 72 | * soft boot works. | ||
| 73 | */ | ||
| 74 | setup_mm_for_reboot(reboot_mode_nds32); | ||
| 75 | |||
| 76 | /* Execute kernel restart handler call chain */ | ||
| 77 | do_kernel_restart(cmd); | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Now call the architecture specific reboot code. | ||
| 81 | */ | ||
| 82 | arch_reset(reboot_mode_nds32); | ||
| 83 | |||
| 84 | /* | ||
| 85 | * Whoops - the architecture was unable to reboot. | ||
| 86 | * Tell the user! | ||
| 87 | */ | ||
| 88 | mdelay(1000); | ||
| 89 | pr_info("Reboot failed -- System halted\n"); | ||
| 90 | while (1) ; | ||
| 91 | } | ||
| 92 | |||
| 93 | EXPORT_SYMBOL(machine_restart); | ||
| 94 | |||
| 95 | void show_regs(struct pt_regs *regs) | ||
| 96 | { | ||
| 97 | printk("PC is at %pS\n", (void *)instruction_pointer(regs)); | ||
| 98 | printk("LP is at %pS\n", (void *)regs->lp); | ||
| 99 | pr_info("pc : [<%08lx>] lp : [<%08lx>] %s\n" | ||
| 100 | "sp : %08lx fp : %08lx gp : %08lx\n", | ||
| 101 | instruction_pointer(regs), | ||
| 102 | regs->lp, print_tainted(), regs->sp, regs->fp, regs->gp); | ||
| 103 | pr_info("r25: %08lx r24: %08lx\n", regs->uregs[25], regs->uregs[24]); | ||
| 104 | |||
| 105 | pr_info("r23: %08lx r22: %08lx r21: %08lx r20: %08lx\n", | ||
| 106 | regs->uregs[23], regs->uregs[22], | ||
| 107 | regs->uregs[21], regs->uregs[20]); | ||
| 108 | pr_info("r19: %08lx r18: %08lx r17: %08lx r16: %08lx\n", | ||
| 109 | regs->uregs[19], regs->uregs[18], | ||
| 110 | regs->uregs[17], regs->uregs[16]); | ||
| 111 | pr_info("r15: %08lx r14: %08lx r13: %08lx r12: %08lx\n", | ||
| 112 | regs->uregs[15], regs->uregs[14], | ||
| 113 | regs->uregs[13], regs->uregs[12]); | ||
| 114 | pr_info("r11: %08lx r10: %08lx r9 : %08lx r8 : %08lx\n", | ||
| 115 | regs->uregs[11], regs->uregs[10], | ||
| 116 | regs->uregs[9], regs->uregs[8]); | ||
| 117 | pr_info("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", | ||
| 118 | regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]); | ||
| 119 | pr_info("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", | ||
| 120 | regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]); | ||
| 121 | pr_info(" IRQs o%s Segment %s\n", | ||
| 122 | interrupts_enabled(regs) ? "n" : "ff", | ||
| 123 | segment_eq(get_fs(), get_ds())? "kernel" : "user"); | ||
| 124 | } | ||
| 125 | |||
| 126 | EXPORT_SYMBOL(show_regs); | ||
| 127 | |||
| 128 | void flush_thread(void) | ||
| 129 | { | ||
| 130 | } | ||
| 131 | |||
| 132 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | ||
| 133 | |||
| 134 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | ||
| 135 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, | ||
| 136 | unsigned long stk_sz, struct task_struct *p) | ||
| 137 | { | ||
| 138 | struct pt_regs *childregs = task_pt_regs(p); | ||
| 139 | |||
| 140 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); | ||
| 141 | |||
| 142 | if (unlikely(p->flags & PF_KTHREAD)) { | ||
| 143 | memset(childregs, 0, sizeof(struct pt_regs)); | ||
| 144 | /* kernel thread fn */ | ||
| 145 | p->thread.cpu_context.r6 = stack_start; | ||
| 146 | /* kernel thread argument */ | ||
| 147 | p->thread.cpu_context.r7 = stk_sz; | ||
| 148 | } else { | ||
| 149 | *childregs = *current_pt_regs(); | ||
| 150 | if (stack_start) | ||
| 151 | childregs->sp = stack_start; | ||
| 152 | /* child get zero as ret. */ | ||
| 153 | childregs->uregs[0] = 0; | ||
| 154 | childregs->osp = 0; | ||
| 155 | if (clone_flags & CLONE_SETTLS) | ||
| 156 | childregs->uregs[25] = childregs->uregs[3]; | ||
| 157 | } | ||
| 158 | /* cpu context switching */ | ||
| 159 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; | ||
| 160 | p->thread.cpu_context.sp = (unsigned long)childregs; | ||
| 161 | |||
| 162 | #ifdef CONFIG_HWZOL | ||
| 163 | childregs->lb = 0; | ||
| 164 | childregs->le = 0; | ||
| 165 | childregs->lc = 0; | ||
| 166 | #endif | ||
| 167 | |||
| 168 | return 0; | ||
| 169 | } | ||
| 170 | |||
| 171 | /* | ||
| 172 | * fill in the fpe structure for a core dump... | ||
| 173 | */ | ||
| 174 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) | ||
| 175 | { | ||
| 176 | int fpvalid = 0; | ||
| 177 | return fpvalid; | ||
| 178 | } | ||
| 179 | |||
| 180 | EXPORT_SYMBOL(dump_fpu); | ||
| 181 | |||
| 182 | unsigned long get_wchan(struct task_struct *p) | ||
| 183 | { | ||
| 184 | unsigned long fp, lr; | ||
| 185 | unsigned long stack_start, stack_end; | ||
| 186 | int count = 0; | ||
| 187 | |||
| 188 | if (!p || p == current || p->state == TASK_RUNNING) | ||
| 189 | return 0; | ||
| 190 | |||
| 191 | if (IS_ENABLED(CONFIG_FRAME_POINTER)) { | ||
| 192 | stack_start = (unsigned long)end_of_stack(p); | ||
| 193 | stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE; | ||
| 194 | |||
| 195 | fp = thread_saved_fp(p); | ||
| 196 | do { | ||
| 197 | if (fp < stack_start || fp > stack_end) | ||
| 198 | return 0; | ||
| 199 | lr = ((unsigned long *)fp)[0]; | ||
| 200 | if (!in_sched_functions(lr)) | ||
| 201 | return lr; | ||
| 202 | fp = *(unsigned long *)(fp + 4); | ||
| 203 | } while (count++ < 16); | ||
| 204 | } | ||
| 205 | return 0; | ||
| 206 | } | ||
| 207 | |||
| 208 | EXPORT_SYMBOL(get_wchan); | ||
diff --git a/arch/nds32/kernel/ptrace.c b/arch/nds32/kernel/ptrace.c new file mode 100644 index 000000000000..eaaf7a999b20 --- /dev/null +++ b/arch/nds32/kernel/ptrace.c | |||
| @@ -0,0 +1,119 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/ptrace.h> | ||
| 5 | #include <linux/regset.h> | ||
| 6 | #include <linux/tracehook.h> | ||
| 7 | #include <linux/elf.h> | ||
| 8 | #include <linux/sched/task_stack.h> | ||
| 9 | |||
| 10 | enum nds32_regset { | ||
| 11 | REGSET_GPR, | ||
| 12 | }; | ||
| 13 | |||
| 14 | static int gpr_get(struct task_struct *target, | ||
| 15 | const struct user_regset *regset, | ||
| 16 | unsigned int pos, unsigned int count, | ||
| 17 | void *kbuf, void __user * ubuf) | ||
| 18 | { | ||
| 19 | struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; | ||
| 20 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); | ||
| 21 | } | ||
| 22 | |||
| 23 | static int gpr_set(struct task_struct *target, const struct user_regset *regset, | ||
| 24 | unsigned int pos, unsigned int count, | ||
| 25 | const void *kbuf, const void __user * ubuf) | ||
| 26 | { | ||
| 27 | int err; | ||
| 28 | struct user_pt_regs newregs = task_pt_regs(target)->user_regs; | ||
| 29 | |||
| 30 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); | ||
| 31 | if (err) | ||
| 32 | return err; | ||
| 33 | |||
| 34 | task_pt_regs(target)->user_regs = newregs; | ||
| 35 | return 0; | ||
| 36 | } | ||
| 37 | |||
| 38 | static const struct user_regset nds32_regsets[] = { | ||
| 39 | [REGSET_GPR] = { | ||
| 40 | .core_note_type = NT_PRSTATUS, | ||
| 41 | .n = sizeof(struct user_pt_regs) / sizeof(u32), | ||
| 42 | .size = sizeof(elf_greg_t), | ||
| 43 | .align = sizeof(elf_greg_t), | ||
| 44 | .get = gpr_get, | ||
| 45 | .set = gpr_set} | ||
| 46 | }; | ||
| 47 | |||
| 48 | static const struct user_regset_view nds32_user_view = { | ||
| 49 | .name = "nds32", | ||
| 50 | .e_machine = EM_NDS32, | ||
| 51 | .regsets = nds32_regsets, | ||
| 52 | .n = ARRAY_SIZE(nds32_regsets) | ||
| 53 | }; | ||
| 54 | |||
| 55 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
| 56 | { | ||
| 57 | return &nds32_user_view; | ||
| 58 | } | ||
| 59 | |||
| 60 | void ptrace_disable(struct task_struct *child) | ||
| 61 | { | ||
| 62 | user_disable_single_step(child); | ||
| 63 | } | ||
| 64 | |||
| 65 | /* do_ptrace() | ||
| 66 | * | ||
| 67 | * Provide ptrace defined service. | ||
| 68 | */ | ||
| 69 | long arch_ptrace(struct task_struct *child, long request, unsigned long addr, | ||
| 70 | unsigned long data) | ||
| 71 | { | ||
| 72 | int ret = -EIO; | ||
| 73 | |||
| 74 | switch (request) { | ||
| 75 | default: | ||
| 76 | ret = ptrace_request(child, request, addr, data); | ||
| 77 | break; | ||
| 78 | } | ||
| 79 | |||
| 80 | return ret; | ||
| 81 | } | ||
| 82 | |||
| 83 | void user_enable_single_step(struct task_struct *child) | ||
| 84 | { | ||
| 85 | struct pt_regs *regs; | ||
| 86 | regs = task_pt_regs(child); | ||
| 87 | regs->ipsw |= PSW_mskHSS; | ||
| 88 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
| 89 | } | ||
| 90 | |||
| 91 | void user_disable_single_step(struct task_struct *child) | ||
| 92 | { | ||
| 93 | struct pt_regs *regs; | ||
| 94 | regs = task_pt_regs(child); | ||
| 95 | regs->ipsw &= ~PSW_mskHSS; | ||
| 96 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
| 97 | } | ||
| 98 | |||
| 99 | /* sys_trace() | ||
| 100 | * | ||
| 101 | * syscall trace handler. | ||
| 102 | */ | ||
| 103 | |||
| 104 | asmlinkage int syscall_trace_enter(struct pt_regs *regs) | ||
| 105 | { | ||
| 106 | if (test_thread_flag(TIF_SYSCALL_TRACE)) { | ||
| 107 | if (tracehook_report_syscall_entry(regs)) | ||
| 108 | forget_syscall(regs); | ||
| 109 | } | ||
| 110 | return regs->syscallno; | ||
| 111 | } | ||
| 112 | |||
| 113 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | ||
| 114 | { | ||
| 115 | int step = test_thread_flag(TIF_SINGLESTEP); | ||
| 116 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | ||
| 117 | tracehook_report_syscall_exit(regs, step); | ||
| 118 | |||
| 119 | } | ||
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c new file mode 100644 index 000000000000..ba910e9e4ecb --- /dev/null +++ b/arch/nds32/kernel/setup.c | |||
| @@ -0,0 +1,363 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/cpu.h> | ||
| 5 | #include <linux/bootmem.h> | ||
| 6 | #include <linux/seq_file.h> | ||
| 7 | #include <linux/memblock.h> | ||
| 8 | #include <linux/console.h> | ||
| 9 | #include <linux/screen_info.h> | ||
| 10 | #include <linux/delay.h> | ||
| 11 | #include <linux/dma-mapping.h> | ||
| 12 | #include <linux/of_fdt.h> | ||
| 13 | #include <linux/of_platform.h> | ||
| 14 | #include <asm/setup.h> | ||
| 15 | #include <asm/sections.h> | ||
| 16 | #include <asm/proc-fns.h> | ||
| 17 | #include <asm/cache_info.h> | ||
| 18 | #include <asm/elf.h> | ||
| 19 | #include <nds32_intrinsic.h> | ||
| 20 | |||
| 21 | #define HWCAP_MFUSR_PC 0x000001 | ||
| 22 | #define HWCAP_EXT 0x000002 | ||
| 23 | #define HWCAP_EXT2 0x000004 | ||
| 24 | #define HWCAP_FPU 0x000008 | ||
| 25 | #define HWCAP_AUDIO 0x000010 | ||
| 26 | #define HWCAP_BASE16 0x000020 | ||
| 27 | #define HWCAP_STRING 0x000040 | ||
| 28 | #define HWCAP_REDUCED_REGS 0x000080 | ||
| 29 | #define HWCAP_VIDEO 0x000100 | ||
| 30 | #define HWCAP_ENCRYPT 0x000200 | ||
| 31 | #define HWCAP_EDM 0x000400 | ||
| 32 | #define HWCAP_LMDMA 0x000800 | ||
| 33 | #define HWCAP_PFM 0x001000 | ||
| 34 | #define HWCAP_HSMP 0x002000 | ||
| 35 | #define HWCAP_TRACE 0x004000 | ||
| 36 | #define HWCAP_DIV 0x008000 | ||
| 37 | #define HWCAP_MAC 0x010000 | ||
| 38 | #define HWCAP_L2C 0x020000 | ||
| 39 | #define HWCAP_FPU_DP 0x040000 | ||
| 40 | #define HWCAP_V2 0x080000 | ||
| 41 | #define HWCAP_DX_REGS 0x100000 | ||
| 42 | |||
| 43 | unsigned long cpu_id, cpu_rev, cpu_cfgid; | ||
| 44 | char cpu_series; | ||
| 45 | char *endianness = NULL; | ||
| 46 | |||
| 47 | unsigned int __atags_pointer __initdata; | ||
| 48 | unsigned int elf_hwcap; | ||
| 49 | EXPORT_SYMBOL(elf_hwcap); | ||
| 50 | |||
| 51 | /* | ||
| 52 | * The following string table, must sync with HWCAP_xx bitmask, | ||
| 53 | * which is defined in <asm/procinfo.h> | ||
| 54 | */ | ||
| 55 | static const char *hwcap_str[] = { | ||
| 56 | "mfusr_pc", | ||
| 57 | "perf1", | ||
| 58 | "perf2", | ||
| 59 | "fpu", | ||
| 60 | "audio", | ||
| 61 | "16b", | ||
| 62 | "string", | ||
| 63 | "reduced_regs", | ||
| 64 | "video", | ||
| 65 | "encrypt", | ||
| 66 | "edm", | ||
| 67 | "lmdma", | ||
| 68 | "pfm", | ||
| 69 | "hsmp", | ||
| 70 | "trace", | ||
| 71 | "div", | ||
| 72 | "mac", | ||
| 73 | "l2c", | ||
| 74 | "dx_regs", | ||
| 75 | "v2", | ||
| 76 | NULL, | ||
| 77 | }; | ||
| 78 | |||
| 79 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 80 | #define WRITE_METHOD "write through" | ||
| 81 | #else | ||
| 82 | #define WRITE_METHOD "write back" | ||
| 83 | #endif | ||
| 84 | |||
| 85 | struct cache_info L1_cache_info[2]; | ||
| 86 | static void __init dump_cpu_info(int cpu) | ||
| 87 | { | ||
| 88 | int i, p = 0; | ||
| 89 | char str[sizeof(hwcap_str) + 16]; | ||
| 90 | |||
| 91 | for (i = 0; hwcap_str[i]; i++) { | ||
| 92 | if (elf_hwcap & (1 << i)) { | ||
| 93 | sprintf(str + p, "%s ", hwcap_str[i]); | ||
| 94 | p += strlen(hwcap_str[i]) + 1; | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | pr_info("CPU%d Features: %s\n", cpu, str); | ||
| 99 | |||
| 100 | L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE); | ||
| 101 | L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE); | ||
| 102 | L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE); | ||
| 103 | L1_cache_info[ICACHE].size = | ||
| 104 | L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].line_size * | ||
| 105 | L1_cache_info[ICACHE].sets / 1024; | ||
| 106 | pr_info("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size, | ||
| 107 | L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways, | ||
| 108 | L1_cache_info[ICACHE].line_size); | ||
| 109 | L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE); | ||
| 110 | L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE); | ||
| 111 | L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE); | ||
| 112 | L1_cache_info[DCACHE].size = | ||
| 113 | L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].line_size * | ||
| 114 | L1_cache_info[DCACHE].sets / 1024; | ||
| 115 | pr_info("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size, | ||
| 116 | L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways, | ||
| 117 | L1_cache_info[DCACHE].line_size); | ||
| 118 | pr_info("L1 D-Cache is %s\n", WRITE_METHOD); | ||
| 119 | if (L1_cache_info[DCACHE].size != L1_CACHE_BYTES) | ||
| 120 | pr_crit | ||
| 121 | ("The cache line size(%d) of this processor is not the same as L1_CACHE_BYTES(%d).\n", | ||
| 122 | L1_cache_info[DCACHE].size, L1_CACHE_BYTES); | ||
| 123 | #ifdef CONFIG_CPU_CACHE_ALIASING | ||
| 124 | { | ||
| 125 | int aliasing_num; | ||
| 126 | aliasing_num = | ||
| 127 | L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE / | ||
| 128 | L1_cache_info[ICACHE].ways; | ||
| 129 | L1_cache_info[ICACHE].aliasing_num = aliasing_num; | ||
| 130 | L1_cache_info[ICACHE].aliasing_mask = | ||
| 131 | (aliasing_num - 1) << PAGE_SHIFT; | ||
| 132 | aliasing_num = | ||
| 133 | L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE / | ||
| 134 | L1_cache_info[DCACHE].ways; | ||
| 135 | L1_cache_info[DCACHE].aliasing_num = aliasing_num; | ||
| 136 | L1_cache_info[DCACHE].aliasing_mask = | ||
| 137 | (aliasing_num - 1) << PAGE_SHIFT; | ||
| 138 | } | ||
| 139 | #endif | ||
| 140 | } | ||
| 141 | |||
| 142 | static void __init setup_cpuinfo(void) | ||
| 143 | { | ||
| 144 | unsigned long tmp = 0, cpu_name; | ||
| 145 | |||
| 146 | cpu_dcache_inval_all(); | ||
| 147 | cpu_icache_inval_all(); | ||
| 148 | __nds32__isb(); | ||
| 149 | |||
| 150 | cpu_id = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCPUID) >> CPU_VER_offCPUID; | ||
| 151 | cpu_name = ((cpu_id) & 0xf0) >> 4; | ||
| 152 | cpu_series = cpu_name ? cpu_name - 10 + 'A' : 'N'; | ||
| 153 | cpu_id = cpu_id & 0xf; | ||
| 154 | cpu_rev = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskREV) >> CPU_VER_offREV; | ||
| 155 | cpu_cfgid = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCFGID) >> CPU_VER_offCFGID; | ||
| 156 | |||
| 157 | pr_info("CPU:%c%ld, CPU_VER 0x%08x(id %lu, rev %lu, cfg %lu)\n", | ||
| 158 | cpu_series, cpu_id, __nds32__mfsr(NDS32_SR_CPU_VER), cpu_id, cpu_rev, cpu_cfgid); | ||
| 159 | |||
| 160 | elf_hwcap |= HWCAP_MFUSR_PC; | ||
| 161 | |||
| 162 | if (((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) { | ||
| 163 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskDIV) | ||
| 164 | elf_hwcap |= HWCAP_DIV; | ||
| 165 | |||
| 166 | if ((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskMAC) | ||
| 167 | || (cpu_id == 12 && cpu_rev < 4)) | ||
| 168 | elf_hwcap |= HWCAP_MAC; | ||
| 169 | } else { | ||
| 170 | elf_hwcap |= HWCAP_V2; | ||
| 171 | elf_hwcap |= HWCAP_DIV; | ||
| 172 | elf_hwcap |= HWCAP_MAC; | ||
| 173 | } | ||
| 174 | |||
| 175 | if (cpu_cfgid & 0x0001) | ||
| 176 | elf_hwcap |= HWCAP_EXT; | ||
| 177 | |||
| 178 | if (cpu_cfgid & 0x0002) | ||
| 179 | elf_hwcap |= HWCAP_BASE16; | ||
| 180 | |||
| 181 | if (cpu_cfgid & 0x0004) | ||
| 182 | elf_hwcap |= HWCAP_EXT2; | ||
| 183 | |||
| 184 | if (cpu_cfgid & 0x0008) | ||
| 185 | elf_hwcap |= HWCAP_FPU; | ||
| 186 | |||
| 187 | if (cpu_cfgid & 0x0010) | ||
| 188 | elf_hwcap |= HWCAP_STRING; | ||
| 189 | |||
| 190 | if (__nds32__mfsr(NDS32_SR_MMU_CFG) & MMU_CFG_mskDE) | ||
| 191 | endianness = "MSB"; | ||
| 192 | else | ||
| 193 | endianness = "LSB"; | ||
| 194 | |||
| 195 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskEDM) | ||
| 196 | elf_hwcap |= HWCAP_EDM; | ||
| 197 | |||
| 198 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskLMDMA) | ||
| 199 | elf_hwcap |= HWCAP_LMDMA; | ||
| 200 | |||
| 201 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskPFM) | ||
| 202 | elf_hwcap |= HWCAP_PFM; | ||
| 203 | |||
| 204 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskHSMP) | ||
| 205 | elf_hwcap |= HWCAP_HSMP; | ||
| 206 | |||
| 207 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskTRACE) | ||
| 208 | elf_hwcap |= HWCAP_TRACE; | ||
| 209 | |||
| 210 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskAUDIO) | ||
| 211 | elf_hwcap |= HWCAP_AUDIO; | ||
| 212 | |||
| 213 | if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C) | ||
| 214 | elf_hwcap |= HWCAP_L2C; | ||
| 215 | |||
| 216 | tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL); | ||
| 217 | if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE)) | ||
| 218 | tmp |= CACHE_CTL_mskDC_EN; | ||
| 219 | |||
| 220 | if (!IS_ENABLED(CONFIG_CPU_ICACHE_DISABLE)) | ||
| 221 | tmp |= CACHE_CTL_mskIC_EN; | ||
| 222 | __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL); | ||
| 223 | |||
| 224 | dump_cpu_info(smp_processor_id()); | ||
| 225 | } | ||
| 226 | |||
| 227 | static void __init setup_memory(void) | ||
| 228 | { | ||
| 229 | unsigned long ram_start_pfn; | ||
| 230 | unsigned long free_ram_start_pfn; | ||
| 231 | phys_addr_t memory_start, memory_end; | ||
| 232 | struct memblock_region *region; | ||
| 233 | |||
| 234 | memory_end = memory_start = 0; | ||
| 235 | |||
| 236 | /* Find main memory where is the kernel */ | ||
| 237 | for_each_memblock(memory, region) { | ||
| 238 | memory_start = region->base; | ||
| 239 | memory_end = region->base + region->size; | ||
| 240 | pr_info("%s: Memory: 0x%x-0x%x\n", __func__, | ||
| 241 | memory_start, memory_end); | ||
| 242 | } | ||
| 243 | |||
| 244 | if (!memory_end) { | ||
| 245 | panic("No memory!"); | ||
| 246 | } | ||
| 247 | |||
| 248 | ram_start_pfn = PFN_UP(memblock_start_of_DRAM()); | ||
| 249 | /* free_ram_start_pfn is first page after kernel */ | ||
| 250 | free_ram_start_pfn = PFN_UP(__pa(&_end)); | ||
| 251 | max_pfn = PFN_DOWN(memblock_end_of_DRAM()); | ||
| 252 | /* it could update max_pfn */ | ||
| 253 | if (max_pfn - ram_start_pfn <= MAXMEM_PFN) | ||
| 254 | max_low_pfn = max_pfn; | ||
| 255 | else { | ||
| 256 | max_low_pfn = MAXMEM_PFN + ram_start_pfn; | ||
| 257 | if (!IS_ENABLED(CONFIG_HIGHMEM)) | ||
| 258 | max_pfn = MAXMEM_PFN + ram_start_pfn; | ||
| 259 | } | ||
| 260 | /* high_memory is related with VMALLOC */ | ||
| 261 | high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); | ||
| 262 | min_low_pfn = free_ram_start_pfn; | ||
| 263 | |||
| 264 | /* | ||
| 265 | * initialize the boot-time allocator (with low memory only). | ||
| 266 | * | ||
| 267 | * This makes the memory from the end of the kernel to the end of | ||
| 268 | * RAM usable. | ||
| 269 | */ | ||
| 270 | memblock_set_bottom_up(true); | ||
| 271 | memblock_reserve(PFN_PHYS(ram_start_pfn), PFN_PHYS(free_ram_start_pfn - ram_start_pfn)); | ||
| 272 | |||
| 273 | early_init_fdt_reserve_self(); | ||
| 274 | early_init_fdt_scan_reserved_mem(); | ||
| 275 | |||
| 276 | memblock_dump_all(); | ||
| 277 | } | ||
| 278 | |||
| 279 | void __init setup_arch(char **cmdline_p) | ||
| 280 | { | ||
| 281 | early_init_devtree( __dtb_start); | ||
| 282 | |||
| 283 | setup_cpuinfo(); | ||
| 284 | |||
| 285 | init_mm.start_code = (unsigned long)&_stext; | ||
| 286 | init_mm.end_code = (unsigned long)&_etext; | ||
| 287 | init_mm.end_data = (unsigned long)&_edata; | ||
| 288 | init_mm.brk = (unsigned long)&_end; | ||
| 289 | |||
| 290 | /* setup bootmem allocator */ | ||
| 291 | setup_memory(); | ||
| 292 | |||
| 293 | /* paging_init() sets up the MMU and marks all pages as reserved */ | ||
| 294 | paging_init(); | ||
| 295 | |||
| 296 | /* use generic way to parse */ | ||
| 297 | parse_early_param(); | ||
| 298 | |||
| 299 | unflatten_and_copy_device_tree(); | ||
| 300 | |||
| 301 | if(IS_ENABLED(CONFIG_VT)) { | ||
| 302 | if(IS_ENABLED(CONFIG_DUMMY_CONSOLE)) | ||
| 303 | conswitchp = &dummy_con; | ||
| 304 | } | ||
| 305 | |||
| 306 | *cmdline_p = boot_command_line; | ||
| 307 | early_trap_init(); | ||
| 308 | } | ||
| 309 | |||
| 310 | static int c_show(struct seq_file *m, void *v) | ||
| 311 | { | ||
| 312 | int i; | ||
| 313 | |||
| 314 | seq_printf(m, "Processor\t: %c%ld (id %lu, rev %lu, cfg %lu)\n", | ||
| 315 | cpu_series, cpu_id, cpu_id, cpu_rev, cpu_cfgid); | ||
| 316 | |||
| 317 | seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n", | ||
| 318 | CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) * | ||
| 319 | CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE), | ||
| 320 | CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE)); | ||
| 321 | |||
| 322 | seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n", | ||
| 323 | CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) * | ||
| 324 | CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE), | ||
| 325 | CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE)); | ||
| 326 | |||
| 327 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | ||
| 328 | loops_per_jiffy / (500000 / HZ), | ||
| 329 | (loops_per_jiffy / (5000 / HZ)) % 100); | ||
| 330 | |||
| 331 | /* dump out the processor features */ | ||
| 332 | seq_puts(m, "Features\t: "); | ||
| 333 | |||
| 334 | for (i = 0; hwcap_str[i]; i++) | ||
| 335 | if (elf_hwcap & (1 << i)) | ||
| 336 | seq_printf(m, "%s ", hwcap_str[i]); | ||
| 337 | |||
| 338 | seq_puts(m, "\n\n"); | ||
| 339 | |||
| 340 | return 0; | ||
| 341 | } | ||
| 342 | |||
| 343 | static void *c_start(struct seq_file *m, loff_t * pos) | ||
| 344 | { | ||
| 345 | return *pos < 1 ? (void *)1 : NULL; | ||
| 346 | } | ||
| 347 | |||
| 348 | static void *c_next(struct seq_file *m, void *v, loff_t * pos) | ||
| 349 | { | ||
| 350 | ++*pos; | ||
| 351 | return NULL; | ||
| 352 | } | ||
| 353 | |||
| 354 | static void c_stop(struct seq_file *m, void *v) | ||
| 355 | { | ||
| 356 | } | ||
| 357 | |||
| 358 | struct seq_operations cpuinfo_op = { | ||
| 359 | .start = c_start, | ||
| 360 | .next = c_next, | ||
| 361 | .stop = c_stop, | ||
| 362 | .show = c_show | ||
| 363 | }; | ||
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c new file mode 100644 index 000000000000..5d01f6e33cb8 --- /dev/null +++ b/arch/nds32/kernel/signal.c | |||
| @@ -0,0 +1,324 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/errno.h> | ||
| 5 | #include <linux/signal.h> | ||
| 6 | #include <linux/ptrace.h> | ||
| 7 | #include <linux/personality.h> | ||
| 8 | #include <linux/freezer.h> | ||
| 9 | #include <linux/tracehook.h> | ||
| 10 | #include <linux/uaccess.h> | ||
| 11 | |||
| 12 | #include <asm/cacheflush.h> | ||
| 13 | #include <asm/ucontext.h> | ||
| 14 | #include <asm/unistd.h> | ||
| 15 | |||
| 16 | #include <asm/ptrace.h> | ||
| 17 | #include <asm/vdso.h> | ||
| 18 | |||
| 19 | struct rt_sigframe { | ||
| 20 | struct siginfo info; | ||
| 21 | struct ucontext uc; | ||
| 22 | }; | ||
| 23 | |||
| 24 | static int restore_sigframe(struct pt_regs *regs, | ||
| 25 | struct rt_sigframe __user * sf) | ||
| 26 | { | ||
| 27 | sigset_t set; | ||
| 28 | int err; | ||
| 29 | |||
| 30 | err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); | ||
| 31 | if (err == 0) { | ||
| 32 | set_current_blocked(&set); | ||
| 33 | } | ||
| 34 | |||
| 35 | __get_user_error(regs->uregs[0], &sf->uc.uc_mcontext.nds32_r0, err); | ||
| 36 | __get_user_error(regs->uregs[1], &sf->uc.uc_mcontext.nds32_r1, err); | ||
| 37 | __get_user_error(regs->uregs[2], &sf->uc.uc_mcontext.nds32_r2, err); | ||
| 38 | __get_user_error(regs->uregs[3], &sf->uc.uc_mcontext.nds32_r3, err); | ||
| 39 | __get_user_error(regs->uregs[4], &sf->uc.uc_mcontext.nds32_r4, err); | ||
| 40 | __get_user_error(regs->uregs[5], &sf->uc.uc_mcontext.nds32_r5, err); | ||
| 41 | __get_user_error(regs->uregs[6], &sf->uc.uc_mcontext.nds32_r6, err); | ||
| 42 | __get_user_error(regs->uregs[7], &sf->uc.uc_mcontext.nds32_r7, err); | ||
| 43 | __get_user_error(regs->uregs[8], &sf->uc.uc_mcontext.nds32_r8, err); | ||
| 44 | __get_user_error(regs->uregs[9], &sf->uc.uc_mcontext.nds32_r9, err); | ||
| 45 | __get_user_error(regs->uregs[10], &sf->uc.uc_mcontext.nds32_r10, err); | ||
| 46 | __get_user_error(regs->uregs[11], &sf->uc.uc_mcontext.nds32_r11, err); | ||
| 47 | __get_user_error(regs->uregs[12], &sf->uc.uc_mcontext.nds32_r12, err); | ||
| 48 | __get_user_error(regs->uregs[13], &sf->uc.uc_mcontext.nds32_r13, err); | ||
| 49 | __get_user_error(regs->uregs[14], &sf->uc.uc_mcontext.nds32_r14, err); | ||
| 50 | __get_user_error(regs->uregs[15], &sf->uc.uc_mcontext.nds32_r15, err); | ||
| 51 | __get_user_error(regs->uregs[16], &sf->uc.uc_mcontext.nds32_r16, err); | ||
| 52 | __get_user_error(regs->uregs[17], &sf->uc.uc_mcontext.nds32_r17, err); | ||
| 53 | __get_user_error(regs->uregs[18], &sf->uc.uc_mcontext.nds32_r18, err); | ||
| 54 | __get_user_error(regs->uregs[19], &sf->uc.uc_mcontext.nds32_r19, err); | ||
| 55 | __get_user_error(regs->uregs[20], &sf->uc.uc_mcontext.nds32_r20, err); | ||
| 56 | __get_user_error(regs->uregs[21], &sf->uc.uc_mcontext.nds32_r21, err); | ||
| 57 | __get_user_error(regs->uregs[22], &sf->uc.uc_mcontext.nds32_r22, err); | ||
| 58 | __get_user_error(regs->uregs[23], &sf->uc.uc_mcontext.nds32_r23, err); | ||
| 59 | __get_user_error(regs->uregs[24], &sf->uc.uc_mcontext.nds32_r24, err); | ||
| 60 | __get_user_error(regs->uregs[25], &sf->uc.uc_mcontext.nds32_r25, err); | ||
| 61 | |||
| 62 | __get_user_error(regs->fp, &sf->uc.uc_mcontext.nds32_fp, err); | ||
| 63 | __get_user_error(regs->gp, &sf->uc.uc_mcontext.nds32_gp, err); | ||
| 64 | __get_user_error(regs->lp, &sf->uc.uc_mcontext.nds32_lp, err); | ||
| 65 | __get_user_error(regs->sp, &sf->uc.uc_mcontext.nds32_sp, err); | ||
| 66 | __get_user_error(regs->ipc, &sf->uc.uc_mcontext.nds32_ipc, err); | ||
| 67 | #if defined(CONFIG_HWZOL) | ||
| 68 | __get_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err); | ||
| 69 | __get_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err); | ||
| 70 | __get_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err); | ||
| 71 | #endif | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Avoid sys_rt_sigreturn() restarting. | ||
| 75 | */ | ||
| 76 | forget_syscall(regs); | ||
| 77 | return err; | ||
| 78 | } | ||
| 79 | |||
| 80 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | ||
| 81 | { | ||
| 82 | struct rt_sigframe __user *frame; | ||
| 83 | |||
| 84 | /* Always make any pending restarted system calls return -EINTR */ | ||
| 85 | current->restart_block.fn = do_no_restart_syscall; | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Since we stacked the signal on a 64-bit boundary, | ||
| 89 | * then 'sp' should be two-word aligned here. If it's | ||
| 90 | * not, then the user is trying to mess with us. | ||
| 91 | */ | ||
| 92 | if (regs->sp & 7) | ||
| 93 | goto badframe; | ||
| 94 | |||
| 95 | frame = (struct rt_sigframe __user *)regs->sp; | ||
| 96 | |||
| 97 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
| 98 | goto badframe; | ||
| 99 | |||
| 100 | if (restore_sigframe(regs, frame)) | ||
| 101 | goto badframe; | ||
| 102 | |||
| 103 | if (restore_altstack(&frame->uc.uc_stack)) | ||
| 104 | goto badframe; | ||
| 105 | |||
| 106 | return regs->uregs[0]; | ||
| 107 | |||
| 108 | badframe: | ||
| 109 | force_sig(SIGSEGV, current); | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | static int | ||
| 114 | setup_sigframe(struct rt_sigframe __user * sf, struct pt_regs *regs, | ||
| 115 | sigset_t * set) | ||
| 116 | { | ||
| 117 | int err = 0; | ||
| 118 | |||
| 119 | __put_user_error(regs->uregs[0], &sf->uc.uc_mcontext.nds32_r0, err); | ||
| 120 | __put_user_error(regs->uregs[1], &sf->uc.uc_mcontext.nds32_r1, err); | ||
| 121 | __put_user_error(regs->uregs[2], &sf->uc.uc_mcontext.nds32_r2, err); | ||
| 122 | __put_user_error(regs->uregs[3], &sf->uc.uc_mcontext.nds32_r3, err); | ||
| 123 | __put_user_error(regs->uregs[4], &sf->uc.uc_mcontext.nds32_r4, err); | ||
| 124 | __put_user_error(regs->uregs[5], &sf->uc.uc_mcontext.nds32_r5, err); | ||
| 125 | __put_user_error(regs->uregs[6], &sf->uc.uc_mcontext.nds32_r6, err); | ||
| 126 | __put_user_error(regs->uregs[7], &sf->uc.uc_mcontext.nds32_r7, err); | ||
| 127 | __put_user_error(regs->uregs[8], &sf->uc.uc_mcontext.nds32_r8, err); | ||
| 128 | __put_user_error(regs->uregs[9], &sf->uc.uc_mcontext.nds32_r9, err); | ||
| 129 | __put_user_error(regs->uregs[10], &sf->uc.uc_mcontext.nds32_r10, err); | ||
| 130 | __put_user_error(regs->uregs[11], &sf->uc.uc_mcontext.nds32_r11, err); | ||
| 131 | __put_user_error(regs->uregs[12], &sf->uc.uc_mcontext.nds32_r12, err); | ||
| 132 | __put_user_error(regs->uregs[13], &sf->uc.uc_mcontext.nds32_r13, err); | ||
| 133 | __put_user_error(regs->uregs[14], &sf->uc.uc_mcontext.nds32_r14, err); | ||
| 134 | __put_user_error(regs->uregs[15], &sf->uc.uc_mcontext.nds32_r15, err); | ||
| 135 | __put_user_error(regs->uregs[16], &sf->uc.uc_mcontext.nds32_r16, err); | ||
| 136 | __put_user_error(regs->uregs[17], &sf->uc.uc_mcontext.nds32_r17, err); | ||
| 137 | __put_user_error(regs->uregs[18], &sf->uc.uc_mcontext.nds32_r18, err); | ||
| 138 | __put_user_error(regs->uregs[19], &sf->uc.uc_mcontext.nds32_r19, err); | ||
| 139 | __put_user_error(regs->uregs[20], &sf->uc.uc_mcontext.nds32_r20, err); | ||
| 140 | |||
| 141 | __put_user_error(regs->uregs[21], &sf->uc.uc_mcontext.nds32_r21, err); | ||
| 142 | __put_user_error(regs->uregs[22], &sf->uc.uc_mcontext.nds32_r22, err); | ||
| 143 | __put_user_error(regs->uregs[23], &sf->uc.uc_mcontext.nds32_r23, err); | ||
| 144 | __put_user_error(regs->uregs[24], &sf->uc.uc_mcontext.nds32_r24, err); | ||
| 145 | __put_user_error(regs->uregs[25], &sf->uc.uc_mcontext.nds32_r25, err); | ||
| 146 | __put_user_error(regs->fp, &sf->uc.uc_mcontext.nds32_fp, err); | ||
| 147 | __put_user_error(regs->gp, &sf->uc.uc_mcontext.nds32_gp, err); | ||
| 148 | __put_user_error(regs->lp, &sf->uc.uc_mcontext.nds32_lp, err); | ||
| 149 | __put_user_error(regs->sp, &sf->uc.uc_mcontext.nds32_sp, err); | ||
| 150 | __put_user_error(regs->ipc, &sf->uc.uc_mcontext.nds32_ipc, err); | ||
| 151 | #if defined(CONFIG_HWZOL) | ||
| 152 | __put_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err); | ||
| 153 | __put_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err); | ||
| 154 | __put_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err); | ||
| 155 | #endif | ||
| 156 | |||
| 157 | __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, | ||
| 158 | err); | ||
| 159 | __put_user_error(current->thread.error_code, | ||
| 160 | &sf->uc.uc_mcontext.error_code, err); | ||
| 161 | __put_user_error(current->thread.address, | ||
| 162 | &sf->uc.uc_mcontext.fault_address, err); | ||
| 163 | __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); | ||
| 164 | |||
| 165 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); | ||
| 166 | |||
| 167 | return err; | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline void __user *get_sigframe(struct ksignal *ksig, | ||
| 171 | struct pt_regs *regs, int framesize) | ||
| 172 | { | ||
| 173 | unsigned long sp; | ||
| 174 | |||
| 175 | /* Default to using normal stack */ | ||
| 176 | sp = regs->sp; | ||
| 177 | |||
| 178 | /* | ||
| 179 | * If we are on the alternate signal stack and would overflow it, don't. | ||
| 180 | * Return an always-bogus address instead so we will die with SIGSEGV. | ||
| 181 | */ | ||
| 182 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) | ||
| 183 | return (void __user __force *)(-1UL); | ||
| 184 | |||
| 185 | /* This is the X/Open sanctioned signal stack switching. */ | ||
| 186 | sp = (sigsp(sp, ksig) - framesize); | ||
| 187 | |||
| 188 | /* | ||
| 189 | * nds32 mandates 8-byte alignment | ||
| 190 | */ | ||
| 191 | sp &= ~0x7UL; | ||
| 192 | |||
| 193 | return (void __user *)sp; | ||
| 194 | } | ||
| 195 | |||
| 196 | static int | ||
| 197 | setup_return(struct pt_regs *regs, struct ksignal *ksig, void __user * frame) | ||
| 198 | { | ||
| 199 | unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; | ||
| 200 | unsigned long retcode; | ||
| 201 | |||
| 202 | retcode = VDSO_SYMBOL(current->mm->context.vdso, rt_sigtramp); | ||
| 203 | regs->uregs[0] = ksig->sig; | ||
| 204 | regs->sp = (unsigned long)frame; | ||
| 205 | regs->lp = retcode; | ||
| 206 | regs->ipc = handler; | ||
| 207 | |||
| 208 | return 0; | ||
| 209 | } | ||
| 210 | |||
| 211 | static int | ||
| 212 | setup_rt_frame(struct ksignal *ksig, sigset_t * set, struct pt_regs *regs) | ||
| 213 | { | ||
| 214 | struct rt_sigframe __user *frame = | ||
| 215 | get_sigframe(ksig, regs, sizeof(*frame)); | ||
| 216 | int err = 0; | ||
| 217 | |||
| 218 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
| 219 | return -EFAULT; | ||
| 220 | |||
| 221 | __put_user_error(0, &frame->uc.uc_flags, err); | ||
| 222 | __put_user_error(NULL, &frame->uc.uc_link, err); | ||
| 223 | |||
| 224 | err |= __save_altstack(&frame->uc.uc_stack, regs->sp); | ||
| 225 | err |= setup_sigframe(frame, regs, set); | ||
| 226 | if (err == 0) { | ||
| 227 | setup_return(regs, ksig, frame); | ||
| 228 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { | ||
| 229 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); | ||
| 230 | regs->uregs[1] = (unsigned long)&frame->info; | ||
| 231 | regs->uregs[2] = (unsigned long)&frame->uc; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | return err; | ||
| 235 | } | ||
| 236 | |||
| 237 | /* | ||
| 238 | * OK, we're invoking a handler | ||
| 239 | */ | ||
| 240 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) | ||
| 241 | { | ||
| 242 | int ret; | ||
| 243 | sigset_t *oldset = sigmask_to_save(); | ||
| 244 | |||
| 245 | if (in_syscall(regs)) { | ||
| 246 | /* Avoid additional syscall restarting via ret_slow_syscall. */ | ||
| 247 | forget_syscall(regs); | ||
| 248 | |||
| 249 | switch (regs->uregs[0]) { | ||
| 250 | case -ERESTART_RESTARTBLOCK: | ||
| 251 | case -ERESTARTNOHAND: | ||
| 252 | regs->uregs[0] = -EINTR; | ||
| 253 | break; | ||
| 254 | case -ERESTARTSYS: | ||
| 255 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { | ||
| 256 | regs->uregs[0] = -EINTR; | ||
| 257 | break; | ||
| 258 | } | ||
| 259 | case -ERESTARTNOINTR: | ||
| 260 | regs->uregs[0] = regs->orig_r0; | ||
| 261 | regs->ipc -= 4; | ||
| 262 | break; | ||
| 263 | } | ||
| 264 | } | ||
| 265 | /* | ||
| 266 | * Set up the stack frame | ||
| 267 | */ | ||
| 268 | ret = setup_rt_frame(ksig, oldset, regs); | ||
| 269 | |||
| 270 | signal_setup_done(ret, ksig, 0); | ||
| 271 | } | ||
| 272 | |||
| 273 | /* | ||
| 274 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
| 275 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
| 276 | * mistake. | ||
| 277 | * | ||
| 278 | * Note that we go through the signals twice: once to check the signals that | ||
| 279 | * the kernel can handle, and then we build all the user-level signal handling | ||
| 280 | * stack-frames in one go after that. | ||
| 281 | */ | ||
| 282 | static void do_signal(struct pt_regs *regs) | ||
| 283 | { | ||
| 284 | struct ksignal ksig; | ||
| 285 | |||
| 286 | if (get_signal(&ksig)) { | ||
| 287 | handle_signal(&ksig, regs); | ||
| 288 | return; | ||
| 289 | } | ||
| 290 | |||
| 291 | /* | ||
| 292 | * If we were from a system call, check for system call restarting... | ||
| 293 | */ | ||
| 294 | if (in_syscall(regs)) { | ||
| 295 | /* Restart the system call - no handlers present */ | ||
| 296 | |||
| 297 | /* Avoid additional syscall restarting via ret_slow_syscall. */ | ||
| 298 | forget_syscall(regs); | ||
| 299 | |||
| 300 | switch (regs->uregs[0]) { | ||
| 301 | case -ERESTART_RESTARTBLOCK: | ||
| 302 | regs->uregs[15] = __NR_restart_syscall; | ||
| 303 | case -ERESTARTNOHAND: | ||
| 304 | case -ERESTARTSYS: | ||
| 305 | case -ERESTARTNOINTR: | ||
| 306 | regs->uregs[0] = regs->orig_r0; | ||
| 307 | regs->ipc -= 0x4; | ||
| 308 | break; | ||
| 309 | } | ||
| 310 | } | ||
| 311 | restore_saved_sigmask(); | ||
| 312 | } | ||
| 313 | |||
| 314 | asmlinkage void | ||
| 315 | do_notify_resume(struct pt_regs *regs, unsigned int thread_flags) | ||
| 316 | { | ||
| 317 | if (thread_flags & _TIF_SIGPENDING) | ||
| 318 | do_signal(regs); | ||
| 319 | |||
| 320 | if (thread_flags & _TIF_NOTIFY_RESUME) { | ||
| 321 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
| 322 | tracehook_notify_resume(regs); | ||
| 323 | } | ||
| 324 | } | ||
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c new file mode 100644 index 000000000000..bc70113c0e84 --- /dev/null +++ b/arch/nds32/kernel/stacktrace.c | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/sched/debug.h> | ||
| 5 | #include <linux/sched/task_stack.h> | ||
| 6 | #include <linux/stacktrace.h> | ||
| 7 | |||
| 8 | void save_stack_trace(struct stack_trace *trace) | ||
| 9 | { | ||
| 10 | save_stack_trace_tsk(current, trace); | ||
| 11 | } | ||
| 12 | |||
| 13 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
| 14 | { | ||
| 15 | unsigned long *fpn; | ||
| 16 | int skip = trace->skip; | ||
| 17 | int savesched; | ||
| 18 | |||
| 19 | if (tsk == current) { | ||
| 20 | __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); | ||
| 21 | savesched = 1; | ||
| 22 | } else { | ||
| 23 | fpn = (unsigned long *)thread_saved_fp(tsk); | ||
| 24 | savesched = 0; | ||
| 25 | } | ||
| 26 | |||
| 27 | while (!kstack_end(fpn) && !((unsigned long)fpn & 0x3) | ||
| 28 | && (fpn >= (unsigned long *)TASK_SIZE)) { | ||
| 29 | unsigned long lpp, fpp; | ||
| 30 | |||
| 31 | lpp = fpn[-1]; | ||
| 32 | fpp = fpn[FP_OFFSET]; | ||
| 33 | if (!__kernel_text_address(lpp)) | ||
| 34 | break; | ||
| 35 | |||
| 36 | if (savesched || !in_sched_functions(lpp)) { | ||
| 37 | if (skip) { | ||
| 38 | skip--; | ||
| 39 | } else { | ||
| 40 | trace->entries[trace->nr_entries++] = lpp; | ||
| 41 | if (trace->nr_entries >= trace->max_entries) | ||
| 42 | break; | ||
| 43 | } | ||
| 44 | } | ||
| 45 | fpn = (unsigned long *)fpp; | ||
| 46 | } | ||
| 47 | } | ||
diff --git a/arch/nds32/kernel/sys_nds32.c b/arch/nds32/kernel/sys_nds32.c new file mode 100644 index 000000000000..9de93ab4c52b --- /dev/null +++ b/arch/nds32/kernel/sys_nds32.c | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/syscalls.h> | ||
| 5 | #include <linux/uaccess.h> | ||
| 6 | |||
| 7 | #include <asm/cachectl.h> | ||
| 8 | #include <asm/proc-fns.h> | ||
| 9 | |||
| 10 | SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | ||
| 11 | unsigned long, prot, unsigned long, flags, | ||
| 12 | unsigned long, fd, unsigned long, pgoff) | ||
| 13 | { | ||
| 14 | if (pgoff & (~PAGE_MASK >> 12)) | ||
| 15 | return -EINVAL; | ||
| 16 | |||
| 17 | return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
| 18 | pgoff >> (PAGE_SHIFT - 12)); | ||
| 19 | } | ||
| 20 | |||
| 21 | SYSCALL_DEFINE4(fadvise64_64_wrapper,int, fd, int, advice, loff_t, offset, | ||
| 22 | loff_t, len) | ||
| 23 | { | ||
| 24 | return sys_fadvise64_64(fd, offset, len, advice); | ||
| 25 | } | ||
| 26 | |||
| 27 | SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache) | ||
| 28 | { | ||
| 29 | struct vm_area_struct *vma; | ||
| 30 | bool flushi = true, wbd = true; | ||
| 31 | |||
| 32 | vma = find_vma(current->mm, start); | ||
| 33 | if (!vma) | ||
| 34 | return -EFAULT; | ||
| 35 | switch (cache) { | ||
| 36 | case ICACHE: | ||
| 37 | wbd = false; | ||
| 38 | break; | ||
| 39 | case DCACHE: | ||
| 40 | flushi = false; | ||
| 41 | break; | ||
| 42 | case BCACHE: | ||
| 43 | break; | ||
| 44 | default: | ||
| 45 | return -EINVAL; | ||
| 46 | } | ||
| 47 | cpu_cache_wbinval_range_check(vma, start, end, flushi, wbd); | ||
| 48 | |||
| 49 | return 0; | ||
| 50 | } | ||
diff --git a/arch/nds32/kernel/syscall_table.c b/arch/nds32/kernel/syscall_table.c new file mode 100644 index 000000000000..7879c061b87f --- /dev/null +++ b/arch/nds32/kernel/syscall_table.c | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/syscalls.h> | ||
| 5 | #include <linux/signal.h> | ||
| 6 | #include <linux/unistd.h> | ||
| 7 | #include <asm/syscalls.h> | ||
| 8 | |||
| 9 | #undef __SYSCALL | ||
| 10 | #define __SYSCALL(nr, call) [nr] = (call), | ||
| 11 | |||
| 12 | #define sys_rt_sigreturn sys_rt_sigreturn_wrapper | ||
| 13 | #define sys_fadvise64_64 sys_fadvise64_64_wrapper | ||
| 14 | void *sys_call_table[__NR_syscalls] __aligned(8192) = { | ||
| 15 | [0 ... __NR_syscalls - 1] = sys_ni_syscall, | ||
| 16 | #include <asm/unistd.h> | ||
| 17 | }; | ||
diff --git a/arch/nds32/kernel/time.c b/arch/nds32/kernel/time.c new file mode 100644 index 000000000000..ac9d78ce3a81 --- /dev/null +++ b/arch/nds32/kernel/time.c | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/clocksource.h> | ||
| 5 | #include <linux/clk-provider.h> | ||
| 6 | |||
| 7 | void __init time_init(void) | ||
| 8 | { | ||
| 9 | of_clk_init(NULL); | ||
| 10 | timer_probe(); | ||
| 11 | } | ||
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c new file mode 100644 index 000000000000..6e34eb9824a4 --- /dev/null +++ b/arch/nds32/kernel/traps.c | |||
| @@ -0,0 +1,430 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/module.h> | ||
| 5 | #include <linux/personality.h> | ||
| 6 | #include <linux/kallsyms.h> | ||
| 7 | #include <linux/hardirq.h> | ||
| 8 | #include <linux/kdebug.h> | ||
| 9 | #include <linux/sched/task_stack.h> | ||
| 10 | #include <linux/uaccess.h> | ||
| 11 | |||
| 12 | #include <asm/proc-fns.h> | ||
| 13 | #include <asm/unistd.h> | ||
| 14 | |||
| 15 | #include <linux/ptrace.h> | ||
| 16 | #include <nds32_intrinsic.h> | ||
| 17 | |||
| 18 | extern void show_pte(struct mm_struct *mm, unsigned long addr); | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Dump out the contents of some memory nicely... | ||
| 22 | */ | ||
| 23 | void dump_mem(const char *lvl, unsigned long bottom, unsigned long top) | ||
| 24 | { | ||
| 25 | unsigned long first; | ||
| 26 | mm_segment_t fs; | ||
| 27 | int i; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * We need to switch to kernel mode so that we can use __get_user | ||
| 31 | * to safely read from kernel space. Note that we now dump the | ||
| 32 | * code first, just in case the backtrace kills us. | ||
| 33 | */ | ||
| 34 | fs = get_fs(); | ||
| 35 | set_fs(KERNEL_DS); | ||
| 36 | |||
| 37 | pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top); | ||
| 38 | |||
| 39 | for (first = bottom & ~31; first < top; first += 32) { | ||
| 40 | unsigned long p; | ||
| 41 | char str[sizeof(" 12345678") * 8 + 1]; | ||
| 42 | |||
| 43 | memset(str, ' ', sizeof(str)); | ||
| 44 | str[sizeof(str) - 1] = '\0'; | ||
| 45 | |||
| 46 | for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { | ||
| 47 | if (p >= bottom && p < top) { | ||
| 48 | unsigned long val; | ||
| 49 | if (__get_user(val, (unsigned long *)p) == 0) | ||
| 50 | sprintf(str + i * 9, " %08lx", val); | ||
| 51 | else | ||
| 52 | sprintf(str + i * 9, " ????????"); | ||
| 53 | } | ||
| 54 | } | ||
| 55 | pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str); | ||
| 56 | } | ||
| 57 | |||
| 58 | set_fs(fs); | ||
| 59 | } | ||
| 60 | |||
| 61 | EXPORT_SYMBOL(dump_mem); | ||
| 62 | |||
| 63 | static void dump_instr(struct pt_regs *regs) | ||
| 64 | { | ||
| 65 | unsigned long addr = instruction_pointer(regs); | ||
| 66 | mm_segment_t fs; | ||
| 67 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; | ||
| 68 | int i; | ||
| 69 | |||
| 70 | return; | ||
| 71 | /* | ||
| 72 | * We need to switch to kernel mode so that we can use __get_user | ||
| 73 | * to safely read from kernel space. Note that we now dump the | ||
| 74 | * code first, just in case the backtrace kills us. | ||
| 75 | */ | ||
| 76 | fs = get_fs(); | ||
| 77 | set_fs(KERNEL_DS); | ||
| 78 | |||
| 79 | pr_emerg("Code: "); | ||
| 80 | for (i = -4; i < 1; i++) { | ||
| 81 | unsigned int val, bad; | ||
| 82 | |||
| 83 | bad = __get_user(val, &((u32 *) addr)[i]); | ||
| 84 | |||
| 85 | if (!bad) { | ||
| 86 | p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); | ||
| 87 | } else { | ||
| 88 | p += sprintf(p, "bad PC value"); | ||
| 89 | break; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | pr_emerg("Code: %s\n", str); | ||
| 93 | |||
| 94 | set_fs(fs); | ||
| 95 | } | ||
| 96 | |||
| 97 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 98 | #include <linux/ftrace.h> | ||
| 99 | static void | ||
| 100 | get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) | ||
| 101 | { | ||
| 102 | if (*addr == (unsigned long)return_to_handler) { | ||
| 103 | int index = tsk->curr_ret_stack; | ||
| 104 | |||
| 105 | if (tsk->ret_stack && index >= *graph) { | ||
| 106 | index -= *graph; | ||
| 107 | *addr = tsk->ret_stack[index].ret; | ||
| 108 | (*graph)++; | ||
| 109 | } | ||
| 110 | } | ||
| 111 | } | ||
| 112 | #else | ||
| 113 | static inline void | ||
| 114 | get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) | ||
| 115 | { | ||
| 116 | } | ||
| 117 | #endif | ||
| 118 | |||
| 119 | #define LOOP_TIMES (100) | ||
| 120 | static void __dump(struct task_struct *tsk, unsigned long *base_reg) | ||
| 121 | { | ||
| 122 | unsigned long ret_addr; | ||
| 123 | int cnt = LOOP_TIMES, graph = 0; | ||
| 124 | pr_emerg("Call Trace:\n"); | ||
| 125 | if (!IS_ENABLED(CONFIG_FRAME_POINTER)) { | ||
| 126 | while (!kstack_end(base_reg)) { | ||
| 127 | ret_addr = *base_reg++; | ||
| 128 | if (__kernel_text_address(ret_addr)) { | ||
| 129 | get_real_ret_addr(&ret_addr, tsk, &graph); | ||
| 130 | print_ip_sym(ret_addr); | ||
| 131 | } | ||
| 132 | if (--cnt < 0) | ||
| 133 | break; | ||
| 134 | } | ||
| 135 | } else { | ||
| 136 | while (!kstack_end((void *)base_reg) && | ||
| 137 | !((unsigned long)base_reg & 0x3) && | ||
| 138 | ((unsigned long)base_reg >= TASK_SIZE)) { | ||
| 139 | unsigned long next_fp; | ||
| 140 | #if !defined(NDS32_ABI_2) | ||
| 141 | ret_addr = base_reg[0]; | ||
| 142 | next_fp = base_reg[1]; | ||
| 143 | #else | ||
| 144 | ret_addr = base_reg[-1]; | ||
| 145 | next_fp = base_reg[FP_OFFSET]; | ||
| 146 | #endif | ||
| 147 | if (__kernel_text_address(ret_addr)) { | ||
| 148 | get_real_ret_addr(&ret_addr, tsk, &graph); | ||
| 149 | print_ip_sym(ret_addr); | ||
| 150 | } | ||
| 151 | if (--cnt < 0) | ||
| 152 | break; | ||
| 153 | base_reg = (unsigned long *)next_fp; | ||
| 154 | } | ||
| 155 | } | ||
| 156 | pr_emerg("\n"); | ||
| 157 | } | ||
| 158 | |||
| 159 | void show_stack(struct task_struct *tsk, unsigned long *sp) | ||
| 160 | { | ||
| 161 | unsigned long *base_reg; | ||
| 162 | |||
| 163 | if (!tsk) | ||
| 164 | tsk = current; | ||
| 165 | if (!IS_ENABLED(CONFIG_FRAME_POINTER)) { | ||
| 166 | if (tsk != current) | ||
| 167 | base_reg = (unsigned long *)(tsk->thread.cpu_context.sp); | ||
| 168 | else | ||
| 169 | __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg)); | ||
| 170 | } else { | ||
| 171 | if (tsk != current) | ||
| 172 | base_reg = (unsigned long *)(tsk->thread.cpu_context.fp); | ||
| 173 | else | ||
| 174 | __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg)); | ||
| 175 | } | ||
| 176 | __dump(tsk, base_reg); | ||
| 177 | barrier(); | ||
| 178 | } | ||
| 179 | |||
| 180 | DEFINE_SPINLOCK(die_lock); | ||
| 181 | |||
| 182 | /* | ||
| 183 | * This function is protected against re-entrancy. | ||
| 184 | */ | ||
| 185 | void die(const char *str, struct pt_regs *regs, int err) | ||
| 186 | { | ||
| 187 | struct task_struct *tsk = current; | ||
| 188 | static int die_counter; | ||
| 189 | |||
| 190 | console_verbose(); | ||
| 191 | spin_lock_irq(&die_lock); | ||
| 192 | bust_spinlocks(1); | ||
| 193 | |||
| 194 | pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); | ||
| 195 | print_modules(); | ||
| 196 | pr_emerg("CPU: %i\n", smp_processor_id()); | ||
| 197 | show_regs(regs); | ||
| 198 | pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", | ||
| 199 | tsk->comm, tsk->pid, task_thread_info(tsk) + 1); | ||
| 200 | |||
| 201 | if (!user_mode(regs) || in_interrupt()) { | ||
| 202 | dump_mem("Stack: ", regs->sp, | ||
| 203 | THREAD_SIZE + (unsigned long)task_thread_info(tsk)); | ||
| 204 | dump_instr(regs); | ||
| 205 | dump_stack(); | ||
| 206 | } | ||
| 207 | |||
| 208 | bust_spinlocks(0); | ||
| 209 | spin_unlock_irq(&die_lock); | ||
| 210 | do_exit(SIGSEGV); | ||
| 211 | } | ||
| 212 | |||
| 213 | EXPORT_SYMBOL(die); | ||
| 214 | |||
| 215 | void die_if_kernel(const char *str, struct pt_regs *regs, int err) | ||
| 216 | { | ||
| 217 | if (user_mode(regs)) | ||
| 218 | return; | ||
| 219 | |||
| 220 | die(str, regs, err); | ||
| 221 | } | ||
| 222 | |||
| 223 | int bad_syscall(int n, struct pt_regs *regs) | ||
| 224 | { | ||
| 225 | siginfo_t info; | ||
| 226 | |||
| 227 | if (current->personality != PER_LINUX) { | ||
| 228 | send_sig(SIGSEGV, current, 1); | ||
| 229 | return regs->uregs[0]; | ||
| 230 | } | ||
| 231 | |||
| 232 | info.si_signo = SIGILL; | ||
| 233 | info.si_errno = 0; | ||
| 234 | info.si_code = ILL_ILLTRP; | ||
| 235 | info.si_addr = (void __user *)instruction_pointer(regs) - 4; | ||
| 236 | |||
| 237 | force_sig_info(SIGILL, &info, current); | ||
| 238 | die_if_kernel("Oops - bad syscall", regs, n); | ||
| 239 | return regs->uregs[0]; | ||
| 240 | } | ||
| 241 | |||
| 242 | void __pte_error(const char *file, int line, unsigned long val) | ||
| 243 | { | ||
| 244 | pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val); | ||
| 245 | } | ||
| 246 | |||
| 247 | void __pmd_error(const char *file, int line, unsigned long val) | ||
| 248 | { | ||
| 249 | pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val); | ||
| 250 | } | ||
| 251 | |||
| 252 | void __pgd_error(const char *file, int line, unsigned long val) | ||
| 253 | { | ||
| 254 | pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val); | ||
| 255 | } | ||
| 256 | |||
| 257 | extern char *exception_vector, *exception_vector_end; | ||
| 258 | void __init trap_init(void) | ||
| 259 | { | ||
| 260 | return; | ||
| 261 | } | ||
| 262 | |||
| 263 | void __init early_trap_init(void) | ||
| 264 | { | ||
| 265 | unsigned long ivb = 0; | ||
| 266 | unsigned long base = PAGE_OFFSET; | ||
| 267 | |||
| 268 | memcpy((unsigned long *)base, (unsigned long *)&exception_vector, | ||
| 269 | ((unsigned long)&exception_vector_end - | ||
| 270 | (unsigned long)&exception_vector)); | ||
| 271 | ivb = __nds32__mfsr(NDS32_SR_IVB); | ||
| 272 | /* Check platform support. */ | ||
| 273 | if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2) | ||
| 274 | panic | ||
| 275 | ("IVIC mode is not allowed on the platform with interrupt controller\n"); | ||
| 276 | __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) | | ||
| 277 | IVB_BASE, NDS32_SR_IVB); | ||
| 278 | __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK); | ||
| 279 | |||
| 280 | /* | ||
| 281 | * 0x800 = 128 vectors * 16byte. | ||
| 282 | * It should be enough to flush a page. | ||
| 283 | */ | ||
| 284 | cpu_cache_wbinval_page(base, true); | ||
| 285 | } | ||
| 286 | |||
| 287 | void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | ||
| 288 | int error_code, int si_code) | ||
| 289 | { | ||
| 290 | struct siginfo info; | ||
| 291 | |||
| 292 | tsk->thread.trap_no = ENTRY_DEBUG_RELATED; | ||
| 293 | tsk->thread.error_code = error_code; | ||
| 294 | |||
| 295 | memset(&info, 0, sizeof(info)); | ||
| 296 | info.si_signo = SIGTRAP; | ||
| 297 | info.si_code = si_code; | ||
| 298 | info.si_addr = (void __user *)instruction_pointer(regs); | ||
| 299 | force_sig_info(SIGTRAP, &info, tsk); | ||
| 300 | } | ||
| 301 | |||
| 302 | void do_debug_trap(unsigned long entry, unsigned long addr, | ||
| 303 | unsigned long type, struct pt_regs *regs) | ||
| 304 | { | ||
| 305 | if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP) | ||
| 306 | == NOTIFY_STOP) | ||
| 307 | return; | ||
| 308 | |||
| 309 | if (user_mode(regs)) { | ||
| 310 | /* trap_signal */ | ||
| 311 | send_sigtrap(current, regs, 0, TRAP_BRKPT); | ||
| 312 | } else { | ||
| 313 | /* kernel_trap */ | ||
| 314 | if (!fixup_exception(regs)) | ||
| 315 | die("unexpected kernel_trap", regs, 0); | ||
| 316 | } | ||
| 317 | } | ||
| 318 | |||
| 319 | void unhandled_interruption(struct pt_regs *regs) | ||
| 320 | { | ||
| 321 | siginfo_t si; | ||
| 322 | pr_emerg("unhandled_interruption\n"); | ||
| 323 | show_regs(regs); | ||
| 324 | if (!user_mode(regs)) | ||
| 325 | do_exit(SIGKILL); | ||
| 326 | si.si_signo = SIGKILL; | ||
| 327 | si.si_errno = 0; | ||
| 328 | force_sig_info(SIGKILL, &si, current); | ||
| 329 | } | ||
| 330 | |||
| 331 | void unhandled_exceptions(unsigned long entry, unsigned long addr, | ||
| 332 | unsigned long type, struct pt_regs *regs) | ||
| 333 | { | ||
| 334 | siginfo_t si; | ||
| 335 | pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry, | ||
| 336 | addr, type); | ||
| 337 | show_regs(regs); | ||
| 338 | if (!user_mode(regs)) | ||
| 339 | do_exit(SIGKILL); | ||
| 340 | si.si_signo = SIGKILL; | ||
| 341 | si.si_errno = 0; | ||
| 342 | si.si_addr = (void *)addr; | ||
| 343 | force_sig_info(SIGKILL, &si, current); | ||
| 344 | } | ||
| 345 | |||
| 346 | extern int do_page_fault(unsigned long entry, unsigned long addr, | ||
| 347 | unsigned int error_code, struct pt_regs *regs); | ||
| 348 | |||
| 349 | /* | ||
| 350 | * 2:DEF dispatch for TLB MISC exception handler | ||
| 351 | */ | ||
| 352 | |||
| 353 | void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr, | ||
| 354 | unsigned long type, struct pt_regs *regs) | ||
| 355 | { | ||
| 356 | type = type & (ITYPE_mskINST | ITYPE_mskETYPE); | ||
| 357 | if ((type & ITYPE_mskETYPE) < 5) { | ||
| 358 | /* Permission exceptions */ | ||
| 359 | do_page_fault(entry, addr, type, regs); | ||
| 360 | } else | ||
| 361 | unhandled_exceptions(entry, addr, type, regs); | ||
| 362 | } | ||
| 363 | |||
| 364 | void do_revinsn(struct pt_regs *regs) | ||
| 365 | { | ||
| 366 | siginfo_t si; | ||
| 367 | pr_emerg("Reserved Instruction\n"); | ||
| 368 | show_regs(regs); | ||
| 369 | if (!user_mode(regs)) | ||
| 370 | do_exit(SIGILL); | ||
| 371 | si.si_signo = SIGILL; | ||
| 372 | si.si_errno = 0; | ||
| 373 | force_sig_info(SIGILL, &si, current); | ||
| 374 | } | ||
| 375 | |||
| 376 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
| 377 | extern int unalign_access_mode; | ||
| 378 | extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs); | ||
| 379 | #endif | ||
| 380 | void do_dispatch_general(unsigned long entry, unsigned long addr, | ||
| 381 | unsigned long itype, struct pt_regs *regs, | ||
| 382 | unsigned long oipc) | ||
| 383 | { | ||
| 384 | unsigned int swid = itype >> ITYPE_offSWID; | ||
| 385 | unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE); | ||
| 386 | if (type == ETYPE_ALIGNMENT_CHECK) { | ||
| 387 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
| 388 | /* Alignment check */ | ||
| 389 | if (user_mode(regs) && unalign_access_mode) { | ||
| 390 | int ret; | ||
| 391 | ret = do_unaligned_access(addr, regs); | ||
| 392 | |||
| 393 | if (ret == 0) | ||
| 394 | return; | ||
| 395 | |||
| 396 | if (ret == -EFAULT) | ||
| 397 | pr_emerg | ||
| 398 | ("Unhandled unaligned access exception\n"); | ||
| 399 | } | ||
| 400 | #endif | ||
| 401 | do_page_fault(entry, addr, type, regs); | ||
| 402 | } else if (type == ETYPE_RESERVED_INSTRUCTION) { | ||
| 403 | /* Reserved instruction */ | ||
| 404 | do_revinsn(regs); | ||
| 405 | } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) { | ||
| 406 | /* trap, used on v3 EDM target debugging workaround */ | ||
| 407 | /* | ||
| 408 | * DIPC(OIPC) is passed as parameter before | ||
| 409 | * interrupt is enabled, so the DIPC will not be corrupted | ||
| 410 | * even though interrupts are coming in | ||
| 411 | */ | ||
| 412 | /* | ||
| 413 | * 1. update ipc | ||
| 414 | * 2. update pt_regs ipc with oipc | ||
| 415 | * 3. update pt_regs ipsw (clear DEX) | ||
| 416 | */ | ||
| 417 | __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc)); | ||
| 418 | regs->ipc = oipc; | ||
| 419 | if (regs->pipsw & PSW_mskDEX) { | ||
| 420 | pr_emerg | ||
| 421 | ("Nested Debug exception is possibly happened\n"); | ||
| 422 | pr_emerg("ipc:%08x pipc:%08x\n", | ||
| 423 | (unsigned int)regs->ipc, | ||
| 424 | (unsigned int)regs->pipc); | ||
| 425 | } | ||
| 426 | do_debug_trap(entry, addr, itype, regs); | ||
| 427 | regs->ipsw &= ~PSW_mskDEX; | ||
| 428 | } else | ||
| 429 | unhandled_exceptions(entry, addr, type, regs); | ||
| 430 | } | ||
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c new file mode 100644 index 000000000000..f1198d7a5654 --- /dev/null +++ b/arch/nds32/kernel/vdso.c | |||
| @@ -0,0 +1,230 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2012 ARM Limited | ||
| 3 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | |||
| 5 | #include <linux/cache.h> | ||
| 6 | #include <linux/clocksource.h> | ||
| 7 | #include <linux/elf.h> | ||
| 8 | #include <linux/err.h> | ||
| 9 | #include <linux/errno.h> | ||
| 10 | #include <linux/gfp.h> | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/mm.h> | ||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/signal.h> | ||
| 15 | #include <linux/slab.h> | ||
| 16 | #include <linux/timekeeper_internal.h> | ||
| 17 | #include <linux/vmalloc.h> | ||
| 18 | #include <linux/random.h> | ||
| 19 | |||
| 20 | #include <asm/cacheflush.h> | ||
| 21 | #include <asm/vdso.h> | ||
| 22 | #include <asm/vdso_datapage.h> | ||
| 23 | #include <asm/vdso_timer_info.h> | ||
| 24 | #include <asm/cache_info.h> | ||
| 25 | extern struct cache_info L1_cache_info[2]; | ||
| 26 | extern char vdso_start, vdso_end; | ||
| 27 | static unsigned long vdso_pages __ro_after_init; | ||
| 28 | static unsigned long timer_mapping_base; | ||
| 29 | |||
| 30 | struct timer_info_t timer_info = { | ||
| 31 | .cycle_count_down = true, | ||
| 32 | .mapping_base = EMPTY_TIMER_MAPPING, | ||
| 33 | .cycle_count_reg_offset = EMPTY_REG_OFFSET | ||
| 34 | }; | ||
| 35 | /* | ||
| 36 | * The vDSO data page. | ||
| 37 | */ | ||
| 38 | static struct page *no_pages[] = { NULL }; | ||
| 39 | |||
| 40 | static union { | ||
| 41 | struct vdso_data data; | ||
| 42 | u8 page[PAGE_SIZE]; | ||
| 43 | } vdso_data_store __page_aligned_data; | ||
| 44 | struct vdso_data *vdso_data = &vdso_data_store.data; | ||
| 45 | static struct vm_special_mapping vdso_spec[2] __ro_after_init = { | ||
| 46 | { | ||
| 47 | .name = "[vvar]", | ||
| 48 | .pages = no_pages, | ||
| 49 | }, | ||
| 50 | { | ||
| 51 | .name = "[vdso]", | ||
| 52 | }, | ||
| 53 | }; | ||
| 54 | |||
| 55 | static void get_timer_node_info(void) | ||
| 56 | { | ||
| 57 | timer_mapping_base = timer_info.mapping_base; | ||
| 58 | vdso_data->cycle_count_offset = | ||
| 59 | timer_info.cycle_count_reg_offset; | ||
| 60 | vdso_data->cycle_count_down = | ||
| 61 | timer_info.cycle_count_down; | ||
| 62 | } | ||
| 63 | |||
| 64 | static int __init vdso_init(void) | ||
| 65 | { | ||
| 66 | int i; | ||
| 67 | struct page **vdso_pagelist; | ||
| 68 | |||
| 69 | if (memcmp(&vdso_start, "\177ELF", 4)) { | ||
| 70 | pr_err("vDSO is not a valid ELF object!\n"); | ||
| 71 | return -EINVAL; | ||
| 72 | } | ||
| 73 | /* Creat a timer io mapping to get clock cycles counter */ | ||
| 74 | get_timer_node_info(); | ||
| 75 | |||
| 76 | vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; | ||
| 77 | pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", | ||
| 78 | vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); | ||
| 79 | |||
| 80 | /* Allocate the vDSO pagelist */ | ||
| 81 | vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); | ||
| 82 | if (vdso_pagelist == NULL) | ||
| 83 | return -ENOMEM; | ||
| 84 | |||
| 85 | for (i = 0; i < vdso_pages; i++) | ||
| 86 | vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); | ||
| 87 | vdso_spec[1].pages = &vdso_pagelist[0]; | ||
| 88 | |||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 92 | arch_initcall(vdso_init); | ||
| 93 | |||
| 94 | unsigned long inline vdso_random_addr(unsigned long vdso_mapping_len) | ||
| 95 | { | ||
| 96 | unsigned long start = current->mm->mmap_base, end, offset, addr; | ||
| 97 | start = PAGE_ALIGN(start); | ||
| 98 | |||
| 99 | /* Round the lowest possible end address up to a PMD boundary. */ | ||
| 100 | end = (start + vdso_mapping_len + PMD_SIZE - 1) & PMD_MASK; | ||
| 101 | if (end >= TASK_SIZE) | ||
| 102 | end = TASK_SIZE; | ||
| 103 | end -= vdso_mapping_len; | ||
| 104 | |||
| 105 | if (end > start) { | ||
| 106 | offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); | ||
| 107 | addr = start + (offset << PAGE_SHIFT); | ||
| 108 | } else { | ||
| 109 | addr = start; | ||
| 110 | } | ||
| 111 | return addr; | ||
| 112 | } | ||
| 113 | |||
| 114 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
| 115 | { | ||
| 116 | struct mm_struct *mm = current->mm; | ||
| 117 | unsigned long vdso_base, vdso_text_len, vdso_mapping_len; | ||
| 118 | struct vm_area_struct *vma; | ||
| 119 | unsigned long addr = 0; | ||
| 120 | pgprot_t prot; | ||
| 121 | int ret, vvar_page_num = 2; | ||
| 122 | |||
| 123 | vdso_text_len = vdso_pages << PAGE_SHIFT; | ||
| 124 | |||
| 125 | if(timer_mapping_base == EMPTY_VALUE) | ||
| 126 | vvar_page_num = 1; | ||
| 127 | /* Be sure to map the data page */ | ||
| 128 | vdso_mapping_len = vdso_text_len + vvar_page_num * PAGE_SIZE; | ||
| 129 | #ifdef CONFIG_CPU_CACHE_ALIASING | ||
| 130 | vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1; | ||
| 131 | #endif | ||
| 132 | |||
| 133 | if (down_write_killable(&mm->mmap_sem)) | ||
| 134 | return -EINTR; | ||
| 135 | |||
| 136 | addr = vdso_random_addr(vdso_mapping_len); | ||
| 137 | vdso_base = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0); | ||
| 138 | if (IS_ERR_VALUE(vdso_base)) { | ||
| 139 | ret = vdso_base; | ||
| 140 | goto up_fail; | ||
| 141 | } | ||
| 142 | |||
| 143 | #ifdef CONFIG_CPU_CACHE_ALIASING | ||
| 144 | { | ||
| 145 | unsigned int aliasing_mask = | ||
| 146 | L1_cache_info[DCACHE].aliasing_mask; | ||
| 147 | unsigned int page_colour_ofs; | ||
| 148 | page_colour_ofs = ((unsigned int)vdso_data & aliasing_mask) - | ||
| 149 | (vdso_base & aliasing_mask); | ||
| 150 | vdso_base += page_colour_ofs & aliasing_mask; | ||
| 151 | } | ||
| 152 | #endif | ||
| 153 | |||
| 154 | vma = _install_special_mapping(mm, vdso_base, vvar_page_num * PAGE_SIZE, | ||
| 155 | VM_READ | VM_MAYREAD, &vdso_spec[0]); | ||
| 156 | if (IS_ERR(vma)) { | ||
| 157 | ret = PTR_ERR(vma); | ||
| 158 | goto up_fail; | ||
| 159 | } | ||
| 160 | |||
| 161 | /*Map vdata to user space */ | ||
| 162 | ret = io_remap_pfn_range(vma, vdso_base, | ||
| 163 | virt_to_phys(vdso_data) >> PAGE_SHIFT, | ||
| 164 | PAGE_SIZE, vma->vm_page_prot); | ||
| 165 | if (ret) | ||
| 166 | goto up_fail; | ||
| 167 | |||
| 168 | /*Map timer to user space */ | ||
| 169 | vdso_base += PAGE_SIZE; | ||
| 170 | prot = __pgprot(_PAGE_V | _PAGE_M_UR_KR | _PAGE_D | _PAGE_C_DEV); | ||
| 171 | ret = io_remap_pfn_range(vma, vdso_base, timer_mapping_base >> PAGE_SHIFT, | ||
| 172 | PAGE_SIZE, prot); | ||
| 173 | if (ret) | ||
| 174 | goto up_fail; | ||
| 175 | |||
| 176 | /*Map vdso to user space */ | ||
| 177 | vdso_base += PAGE_SIZE; | ||
| 178 | mm->context.vdso = (void *)vdso_base; | ||
| 179 | vma = _install_special_mapping(mm, vdso_base, vdso_text_len, | ||
| 180 | VM_READ | VM_EXEC | | ||
| 181 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | ||
| 182 | &vdso_spec[1]); | ||
| 183 | if (IS_ERR(vma)) { | ||
| 184 | ret = PTR_ERR(vma); | ||
| 185 | goto up_fail; | ||
| 186 | } | ||
| 187 | |||
| 188 | up_write(&mm->mmap_sem); | ||
| 189 | return 0; | ||
| 190 | |||
| 191 | up_fail: | ||
| 192 | mm->context.vdso = NULL; | ||
| 193 | up_write(&mm->mmap_sem); | ||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | static void vdso_write_begin(struct vdso_data *vdata) | ||
| 198 | { | ||
| 199 | ++vdso_data->seq_count; | ||
| 200 | smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */ | ||
| 201 | } | ||
| 202 | |||
| 203 | static void vdso_write_end(struct vdso_data *vdata) | ||
| 204 | { | ||
| 205 | smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */ | ||
| 206 | ++vdso_data->seq_count; | ||
| 207 | } | ||
| 208 | |||
| 209 | void update_vsyscall(struct timekeeper *tk) | ||
| 210 | { | ||
| 211 | vdso_write_begin(vdso_data); | ||
| 212 | vdso_data->cs_mask = tk->tkr_mono.mask; | ||
| 213 | vdso_data->cs_mult = tk->tkr_mono.mult; | ||
| 214 | vdso_data->cs_shift = tk->tkr_mono.shift; | ||
| 215 | vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; | ||
| 216 | vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; | ||
| 217 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; | ||
| 218 | vdso_data->xtime_clock_sec = tk->xtime_sec; | ||
| 219 | vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; | ||
| 220 | vdso_data->xtime_coarse_sec = tk->xtime_sec; | ||
| 221 | vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> | ||
| 222 | tk->tkr_mono.shift; | ||
| 223 | vdso_write_end(vdso_data); | ||
| 224 | } | ||
| 225 | |||
| 226 | void update_vsyscall_tz(void) | ||
| 227 | { | ||
| 228 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; | ||
| 229 | vdso_data->tz_dsttime = sys_tz.tz_dsttime; | ||
| 230 | } | ||
diff --git a/arch/nds32/kernel/vdso/Makefile b/arch/nds32/kernel/vdso/Makefile new file mode 100644 index 000000000000..e6c50a701313 --- /dev/null +++ b/arch/nds32/kernel/vdso/Makefile | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | # | ||
| 2 | # Building a vDSO image for AArch64. | ||
| 3 | # | ||
| 4 | # Author: Will Deacon <will.deacon@arm.com> | ||
| 5 | # Heavily based on the vDSO Makefiles for other archs. | ||
| 6 | # | ||
| 7 | |||
| 8 | obj-vdso := note.o datapage.o sigreturn.o gettimeofday.o | ||
| 9 | |||
| 10 | # Build rules | ||
| 11 | targets := $(obj-vdso) vdso.so vdso.so.dbg | ||
| 12 | obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) | ||
| 13 | |||
| 14 | ccflags-y := -shared -fno-common -fno-builtin | ||
| 15 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ | ||
| 16 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | ||
| 17 | ccflags-y += -fPIC -Wl,-shared -g | ||
| 18 | |||
| 19 | # Disable gcov profiling for VDSO code | ||
| 20 | GCOV_PROFILE := n | ||
| 21 | |||
| 22 | |||
| 23 | obj-y += vdso.o | ||
| 24 | extra-y += vdso.lds | ||
| 25 | CPPFLAGS_vdso.lds += -P -C -U$(ARCH) | ||
| 26 | |||
| 27 | # Force dependency | ||
| 28 | $(obj)/vdso.o : $(obj)/vdso.so | ||
| 29 | |||
| 30 | # Link rule for the .so file, .lds has to be first | ||
| 31 | $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) | ||
| 32 | $(call if_changed,vdsold) | ||
| 33 | |||
| 34 | |||
| 35 | # Strip rule for the .so file | ||
| 36 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
| 37 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
| 38 | $(call if_changed,objcopy) | ||
| 39 | |||
| 40 | # Generate VDSO offsets using helper script | ||
| 41 | gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh | ||
| 42 | quiet_cmd_vdsosym = VDSOSYM $@ | ||
| 43 | define cmd_vdsosym | ||
| 44 | $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ | ||
| 45 | endef | ||
| 46 | |||
| 47 | include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE | ||
| 48 | $(call if_changed,vdsosym) | ||
| 49 | |||
| 50 | |||
| 51 | |||
| 52 | # Assembly rules for the .S files | ||
| 53 | |||
| 54 | sigreturn.o : sigreturn.S | ||
| 55 | $(call if_changed_dep,vdsoas) | ||
| 56 | |||
| 57 | note.o : note.S | ||
| 58 | $(call if_changed_dep,vdsoas) | ||
| 59 | |||
| 60 | datapage.o : datapage.S | ||
| 61 | $(call if_changed_dep,vdsoas) | ||
| 62 | |||
| 63 | gettimeofday.o : gettimeofday.c FORCE | ||
| 64 | $(call if_changed_dep,vdsocc) | ||
| 65 | |||
| 66 | # Actual build commands | ||
| 67 | quiet_cmd_vdsold = VDSOL $@ | ||
| 68 | cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ | ||
| 69 | quiet_cmd_vdsoas = VDSOA $@ | ||
| 70 | cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< | ||
| 71 | quiet_cmd_vdsocc = VDSOA $@ | ||
| 72 | cmd_vdsocc = $(CC) $(c_flags) -c -o $@ $< | ||
| 73 | |||
| 74 | # Install commands for the unstripped file | ||
| 75 | quiet_cmd_vdso_install = INSTALL $@ | ||
| 76 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
| 77 | |||
| 78 | vdso.so: $(obj)/vdso.so.dbg | ||
| 79 | @mkdir -p $(MODLIB)/vdso | ||
| 80 | $(call cmd,vdso_install) | ||
| 81 | |||
| 82 | vdso_install: vdso.so | ||
diff --git a/arch/nds32/kernel/vdso/datapage.S b/arch/nds32/kernel/vdso/datapage.S new file mode 100644 index 000000000000..4a62c3cab1c8 --- /dev/null +++ b/arch/nds32/kernel/vdso/datapage.S | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/page.h> | ||
| 6 | |||
| 7 | ENTRY(__get_timerpage) | ||
| 8 | sethi $r0, hi20(. + PAGE_SIZE + 8) | ||
| 9 | ori $r0, $r0, lo12(. + PAGE_SIZE + 4) | ||
| 10 | mfusr $r1, $pc | ||
| 11 | sub $r0, $r1, $r0 | ||
| 12 | ret | ||
| 13 | ENDPROC(__get_timerpage) | ||
| 14 | |||
| 15 | ENTRY(__get_datapage) | ||
| 16 | sethi $r0, hi20(. + 2*PAGE_SIZE + 8) | ||
| 17 | ori $r0, $r0, lo12(. + 2*PAGE_SIZE + 4) | ||
| 18 | mfusr $r1, $pc | ||
| 19 | sub $r0, $r1, $r0 | ||
| 20 | ret | ||
| 21 | ENDPROC(__get_datapage) | ||
diff --git a/arch/nds32/kernel/vdso/gen_vdso_offsets.sh b/arch/nds32/kernel/vdso/gen_vdso_offsets.sh new file mode 100755 index 000000000000..01924ff071ad --- /dev/null +++ b/arch/nds32/kernel/vdso/gen_vdso_offsets.sh | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | |||
| 3 | # | ||
| 4 | # Match symbols in the DSO that look like VDSO_*; produce a header file | ||
| 5 | # of constant offsets into the shared object. | ||
| 6 | # | ||
| 7 | # Doing this inside the Makefile will break the $(filter-out) function, | ||
| 8 | # causing Kbuild to rebuild the vdso-offsets header file every time. | ||
| 9 | # | ||
| 10 | # Author: Will Deacon <will.deacon@arm.com | ||
| 11 | # | ||
| 12 | |||
| 13 | LC_ALL=C | ||
| 14 | sed -n -e 's/^00*/0/' -e \ | ||
| 15 | 's/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p' | ||
diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c new file mode 100644 index 000000000000..038721af40e3 --- /dev/null +++ b/arch/nds32/kernel/vdso/gettimeofday.c | |||
| @@ -0,0 +1,270 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/compiler.h> | ||
| 5 | #include <linux/hrtimer.h> | ||
| 6 | #include <linux/time.h> | ||
| 7 | #include <asm/io.h> | ||
| 8 | #include <asm/barrier.h> | ||
| 9 | #include <asm/bug.h> | ||
| 10 | #include <asm/page.h> | ||
| 11 | #include <asm/unistd.h> | ||
| 12 | #include <asm/vdso_datapage.h> | ||
| 13 | #include <asm/vdso_timer_info.h> | ||
| 14 | #include <asm/asm-offsets.h> | ||
| 15 | |||
| 16 | #define X(x) #x | ||
| 17 | #define Y(x) X(x) | ||
| 18 | |||
| 19 | extern struct vdso_data *__get_datapage(void); | ||
| 20 | extern struct vdso_data *__get_timerpage(void); | ||
| 21 | |||
| 22 | static notrace unsigned int __vdso_read_begin(const struct vdso_data *vdata) | ||
| 23 | { | ||
| 24 | u32 seq; | ||
| 25 | repeat: | ||
| 26 | seq = READ_ONCE(vdata->seq_count); | ||
| 27 | if (seq & 1) { | ||
| 28 | cpu_relax(); | ||
| 29 | goto repeat; | ||
| 30 | } | ||
| 31 | return seq; | ||
| 32 | } | ||
| 33 | |||
| 34 | static notrace unsigned int vdso_read_begin(const struct vdso_data *vdata) | ||
| 35 | { | ||
| 36 | unsigned int seq; | ||
| 37 | |||
| 38 | seq = __vdso_read_begin(vdata); | ||
| 39 | |||
| 40 | smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */ | ||
| 41 | return seq; | ||
| 42 | } | ||
| 43 | |||
| 44 | static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start) | ||
| 45 | { | ||
| 46 | smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */ | ||
| 47 | return vdata->seq_count != start; | ||
| 48 | } | ||
| 49 | |||
| 50 | static notrace long clock_gettime_fallback(clockid_t _clkid, | ||
| 51 | struct timespec *_ts) | ||
| 52 | { | ||
| 53 | register struct timespec *ts asm("$r1") = _ts; | ||
| 54 | register clockid_t clkid asm("$r0") = _clkid; | ||
| 55 | register long ret asm("$r0"); | ||
| 56 | |||
| 57 | asm volatile ("movi $r15, %3\n" | ||
| 58 | "syscall 0x0\n" | ||
| 59 | :"=r" (ret) | ||
| 60 | :"r"(clkid), "r"(ts), "i"(__NR_clock_gettime) | ||
| 61 | :"$r15", "memory"); | ||
| 62 | |||
| 63 | return ret; | ||
| 64 | } | ||
| 65 | |||
| 66 | static notrace int do_realtime_coarse(struct timespec *ts, | ||
| 67 | struct vdso_data *vdata) | ||
| 68 | { | ||
| 69 | u32 seq; | ||
| 70 | |||
| 71 | do { | ||
| 72 | seq = vdso_read_begin(vdata); | ||
| 73 | |||
| 74 | ts->tv_sec = vdata->xtime_coarse_sec; | ||
| 75 | ts->tv_nsec = vdata->xtime_coarse_nsec; | ||
| 76 | |||
| 77 | } while (vdso_read_retry(vdata, seq)); | ||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | static notrace int do_monotonic_coarse(struct timespec *ts, | ||
| 82 | struct vdso_data *vdata) | ||
| 83 | { | ||
| 84 | struct timespec tomono; | ||
| 85 | u32 seq; | ||
| 86 | |||
| 87 | do { | ||
| 88 | seq = vdso_read_begin(vdata); | ||
| 89 | |||
| 90 | ts->tv_sec = vdata->xtime_coarse_sec; | ||
| 91 | ts->tv_nsec = vdata->xtime_coarse_nsec; | ||
| 92 | |||
| 93 | tomono.tv_sec = vdata->wtm_clock_sec; | ||
| 94 | tomono.tv_nsec = vdata->wtm_clock_nsec; | ||
| 95 | |||
| 96 | } while (vdso_read_retry(vdata, seq)); | ||
| 97 | |||
| 98 | ts->tv_sec += tomono.tv_sec; | ||
| 99 | timespec_add_ns(ts, tomono.tv_nsec); | ||
| 100 | return 0; | ||
| 101 | } | ||
| 102 | |||
| 103 | static notrace inline u64 vgetsns(struct vdso_data *vdso) | ||
| 104 | { | ||
| 105 | u32 cycle_now; | ||
| 106 | u32 cycle_delta; | ||
| 107 | u32 *timer_cycle_base; | ||
| 108 | |||
| 109 | timer_cycle_base = | ||
| 110 | (u32 *) ((char *)__get_timerpage() + vdso->cycle_count_offset); | ||
| 111 | cycle_now = readl_relaxed(timer_cycle_base); | ||
| 112 | if (true == vdso->cycle_count_down) | ||
| 113 | cycle_now = ~(*timer_cycle_base); | ||
| 114 | cycle_delta = cycle_now - (u32) vdso->cs_cycle_last; | ||
| 115 | return ((u64) cycle_delta & vdso->cs_mask) * vdso->cs_mult; | ||
| 116 | } | ||
| 117 | |||
| 118 | static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata) | ||
| 119 | { | ||
| 120 | unsigned count; | ||
| 121 | u64 ns; | ||
| 122 | do { | ||
| 123 | count = vdso_read_begin(vdata); | ||
| 124 | ts->tv_sec = vdata->xtime_clock_sec; | ||
| 125 | ns = vdata->xtime_clock_nsec; | ||
| 126 | ns += vgetsns(vdata); | ||
| 127 | ns >>= vdata->cs_shift; | ||
| 128 | } while (vdso_read_retry(vdata, count)); | ||
| 129 | |||
| 130 | ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); | ||
| 131 | ts->tv_nsec = ns; | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata) | ||
| 137 | { | ||
| 138 | struct timespec tomono; | ||
| 139 | u64 nsecs; | ||
| 140 | u32 seq; | ||
| 141 | |||
| 142 | do { | ||
| 143 | seq = vdso_read_begin(vdata); | ||
| 144 | |||
| 145 | ts->tv_sec = vdata->xtime_clock_sec; | ||
| 146 | nsecs = vdata->xtime_clock_nsec; | ||
| 147 | nsecs += vgetsns(vdata); | ||
| 148 | nsecs >>= vdata->cs_shift; | ||
| 149 | |||
| 150 | tomono.tv_sec = vdata->wtm_clock_sec; | ||
| 151 | tomono.tv_nsec = vdata->wtm_clock_nsec; | ||
| 152 | |||
| 153 | } while (vdso_read_retry(vdata, seq)); | ||
| 154 | |||
| 155 | ts->tv_sec += tomono.tv_sec; | ||
| 156 | ts->tv_nsec = 0; | ||
| 157 | timespec_add_ns(ts, nsecs + tomono.tv_nsec); | ||
| 158 | return 0; | ||
| 159 | } | ||
| 160 | |||
| 161 | notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts) | ||
| 162 | { | ||
| 163 | struct vdso_data *vdata; | ||
| 164 | int ret = -1; | ||
| 165 | |||
| 166 | vdata = __get_datapage(); | ||
| 167 | if (vdata->cycle_count_offset == EMPTY_REG_OFFSET) | ||
| 168 | return clock_gettime_fallback(clkid, ts); | ||
| 169 | |||
| 170 | switch (clkid) { | ||
| 171 | case CLOCK_REALTIME_COARSE: | ||
| 172 | ret = do_realtime_coarse(ts, vdata); | ||
| 173 | break; | ||
| 174 | case CLOCK_MONOTONIC_COARSE: | ||
| 175 | ret = do_monotonic_coarse(ts, vdata); | ||
| 176 | break; | ||
| 177 | case CLOCK_REALTIME: | ||
| 178 | ret = do_realtime(ts, vdata); | ||
| 179 | break; | ||
| 180 | case CLOCK_MONOTONIC: | ||
| 181 | ret = do_monotonic(ts, vdata); | ||
| 182 | break; | ||
| 183 | default: | ||
| 184 | break; | ||
| 185 | } | ||
| 186 | |||
| 187 | if (ret) | ||
| 188 | ret = clock_gettime_fallback(clkid, ts); | ||
| 189 | |||
| 190 | return ret; | ||
| 191 | } | ||
| 192 | |||
| 193 | static notrace int clock_getres_fallback(clockid_t _clk_id, | ||
| 194 | struct timespec *_res) | ||
| 195 | { | ||
| 196 | register clockid_t clk_id asm("$r0") = _clk_id; | ||
| 197 | register struct timespec *res asm("$r1") = _res; | ||
| 198 | register int ret asm("$r0"); | ||
| 199 | |||
| 200 | asm volatile ("movi $r15, %3\n" | ||
| 201 | "syscall 0x0\n" | ||
| 202 | :"=r" (ret) | ||
| 203 | :"r"(clk_id), "r"(res), "i"(__NR_clock_getres) | ||
| 204 | :"$r15", "memory"); | ||
| 205 | |||
| 206 | return ret; | ||
| 207 | } | ||
| 208 | |||
| 209 | notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res) | ||
| 210 | { | ||
| 211 | if (res == NULL) | ||
| 212 | return 0; | ||
| 213 | switch (clk_id) { | ||
| 214 | case CLOCK_REALTIME: | ||
| 215 | case CLOCK_MONOTONIC: | ||
| 216 | case CLOCK_MONOTONIC_RAW: | ||
| 217 | res->tv_sec = 0; | ||
| 218 | res->tv_nsec = CLOCK_REALTIME_RES; | ||
| 219 | break; | ||
| 220 | case CLOCK_REALTIME_COARSE: | ||
| 221 | case CLOCK_MONOTONIC_COARSE: | ||
| 222 | res->tv_sec = 0; | ||
| 223 | res->tv_nsec = CLOCK_COARSE_RES; | ||
| 224 | break; | ||
| 225 | default: | ||
| 226 | return clock_getres_fallback(clk_id, res); | ||
| 227 | } | ||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | static notrace inline int gettimeofday_fallback(struct timeval *_tv, | ||
| 232 | struct timezone *_tz) | ||
| 233 | { | ||
| 234 | register struct timeval *tv asm("$r0") = _tv; | ||
| 235 | register struct timezone *tz asm("$r1") = _tz; | ||
| 236 | register int ret asm("$r0"); | ||
| 237 | |||
| 238 | asm volatile ("movi $r15, %3\n" | ||
| 239 | "syscall 0x0\n" | ||
| 240 | :"=r" (ret) | ||
| 241 | :"r"(tv), "r"(tz), "i"(__NR_gettimeofday) | ||
| 242 | :"$r15", "memory"); | ||
| 243 | |||
| 244 | return ret; | ||
| 245 | } | ||
| 246 | |||
| 247 | notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | ||
| 248 | { | ||
| 249 | struct timespec ts; | ||
| 250 | struct vdso_data *vdata; | ||
| 251 | int ret; | ||
| 252 | |||
| 253 | vdata = __get_datapage(); | ||
| 254 | |||
| 255 | if (vdata->cycle_count_offset == EMPTY_REG_OFFSET) | ||
| 256 | return gettimeofday_fallback(tv, tz); | ||
| 257 | |||
| 258 | ret = do_realtime(&ts, vdata); | ||
| 259 | |||
| 260 | if (tv) { | ||
| 261 | tv->tv_sec = ts.tv_sec; | ||
| 262 | tv->tv_usec = ts.tv_nsec / 1000; | ||
| 263 | } | ||
| 264 | if (tz) { | ||
| 265 | tz->tz_minuteswest = vdata->tz_minuteswest; | ||
| 266 | tz->tz_dsttime = vdata->tz_dsttime; | ||
| 267 | } | ||
| 268 | |||
| 269 | return ret; | ||
| 270 | } | ||
diff --git a/arch/nds32/kernel/vdso/note.S b/arch/nds32/kernel/vdso/note.S new file mode 100644 index 000000000000..0aeaa19b05f0 --- /dev/null +++ b/arch/nds32/kernel/vdso/note.S | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2012 ARM Limited | ||
| 3 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | |||
| 5 | #include <linux/uts.h> | ||
| 6 | #include <linux/version.h> | ||
| 7 | #include <linux/elfnote.h> | ||
| 8 | |||
| 9 | ELFNOTE_START(Linux, 0, "a") | ||
| 10 | .long LINUX_VERSION_CODE | ||
| 11 | ELFNOTE_END | ||
diff --git a/arch/nds32/kernel/vdso/sigreturn.S b/arch/nds32/kernel/vdso/sigreturn.S new file mode 100644 index 000000000000..67e4d1d1612a --- /dev/null +++ b/arch/nds32/kernel/vdso/sigreturn.S | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2012 ARM Limited | ||
| 3 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | |||
| 5 | #include <linux/linkage.h> | ||
| 6 | #include <asm/unistd.h> | ||
| 7 | |||
| 8 | .text | ||
| 9 | |||
| 10 | ENTRY(__kernel_rt_sigreturn) | ||
| 11 | .cfi_startproc | ||
| 12 | movi $r15, __NR_rt_sigreturn | ||
| 13 | /* | ||
| 14 | * The SWID of syscall should be __NR_rt_sigreturn to synchronize | ||
| 15 | * the unwinding scheme in gcc | ||
| 16 | */ | ||
| 17 | syscall __NR_rt_sigreturn | ||
| 18 | .cfi_endproc | ||
| 19 | ENDPROC(__kernel_rt_sigreturn) | ||
diff --git a/arch/nds32/kernel/vdso/vdso.S b/arch/nds32/kernel/vdso/vdso.S new file mode 100644 index 000000000000..16737c11e55b --- /dev/null +++ b/arch/nds32/kernel/vdso/vdso.S | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2012 ARM Limited | ||
| 3 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | |||
| 5 | #include <linux/init.h> | ||
| 6 | #include <linux/linkage.h> | ||
| 7 | #include <linux/const.h> | ||
| 8 | #include <asm/page.h> | ||
| 9 | |||
| 10 | .globl vdso_start, vdso_end | ||
| 11 | .section .rodata | ||
| 12 | .balign PAGE_SIZE | ||
| 13 | vdso_start: | ||
| 14 | .incbin "arch/nds32/kernel/vdso/vdso.so" | ||
| 15 | .balign PAGE_SIZE | ||
| 16 | vdso_end: | ||
| 17 | |||
| 18 | .previous | ||
diff --git a/arch/nds32/kernel/vdso/vdso.lds.S b/arch/nds32/kernel/vdso/vdso.lds.S new file mode 100644 index 000000000000..1f2b16004594 --- /dev/null +++ b/arch/nds32/kernel/vdso/vdso.lds.S | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | /* | ||
| 2 | * SPDX-License-Identifier: GPL-2.0 | ||
| 3 | * Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 4 | */ | ||
| 5 | |||
| 6 | |||
| 7 | #include <linux/const.h> | ||
| 8 | #include <asm/page.h> | ||
| 9 | #include <asm/vdso.h> | ||
| 10 | |||
| 11 | OUTPUT_ARCH(nds32) | ||
| 12 | |||
| 13 | SECTIONS | ||
| 14 | { | ||
| 15 | . = SIZEOF_HEADERS; | ||
| 16 | |||
| 17 | .hash : { *(.hash) } :text | ||
| 18 | .gnu.hash : { *(.gnu.hash) } | ||
| 19 | .dynsym : { *(.dynsym) } | ||
| 20 | .dynstr : { *(.dynstr) } | ||
| 21 | .gnu.version : { *(.gnu.version) } | ||
| 22 | .gnu.version_d : { *(.gnu.version_d) } | ||
| 23 | .gnu.version_r : { *(.gnu.version_r) } | ||
| 24 | |||
| 25 | .note : { *(.note.*) } :text :note | ||
| 26 | |||
| 27 | |||
| 28 | .text : { *(.text*) } :text | ||
| 29 | |||
| 30 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
| 31 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
| 32 | |||
| 33 | .dynamic : { *(.dynamic) } :text :dynamic | ||
| 34 | |||
| 35 | .rodata : { *(.rodata*) } :text | ||
| 36 | |||
| 37 | |||
| 38 | /DISCARD/ : { | ||
| 39 | *(.note.GNU-stack) | ||
| 40 | *(.data .data.* .gnu.linkonce.d.* .sdata*) | ||
| 41 | *(.bss .sbss .dynbss .dynsbss) | ||
| 42 | } | ||
| 43 | } | ||
| 44 | |||
| 45 | /* | ||
| 46 | * We must supply the ELF program headers explicitly to get just one | ||
| 47 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
| 48 | */ | ||
| 49 | PHDRS | ||
| 50 | { | ||
| 51 | text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ | ||
| 52 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
| 53 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
| 54 | eh_frame_hdr PT_GNU_EH_FRAME; | ||
| 55 | } | ||
| 56 | |||
| 57 | /* | ||
| 58 | * This controls what symbols we export from the DSO. | ||
| 59 | */ | ||
| 60 | VERSION | ||
| 61 | { | ||
| 62 | LINUX_4 { | ||
| 63 | global: | ||
| 64 | __kernel_rt_sigreturn; | ||
| 65 | __vdso_gettimeofday; | ||
| 66 | __vdso_clock_getres; | ||
| 67 | __vdso_clock_gettime; | ||
| 68 | local: *; | ||
| 69 | }; | ||
| 70 | } | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Make the rt_sigreturn code visible to the kernel. | ||
| 74 | */ | ||
| 75 | VDSO_rt_sigtramp = __kernel_rt_sigreturn; | ||
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..288313b886ef --- /dev/null +++ b/arch/nds32/kernel/vmlinux.lds.S | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <asm/page.h> | ||
| 5 | #include <asm/thread_info.h> | ||
| 6 | #include <asm/cache.h> | ||
| 7 | #include <asm/memory.h> | ||
| 8 | |||
| 9 | #define LOAD_OFFSET (PAGE_OFFSET - PHYS_OFFSET) | ||
| 10 | #include <asm-generic/vmlinux.lds.h> | ||
| 11 | |||
| 12 | OUTPUT_ARCH(nds32) | ||
| 13 | ENTRY(_stext_lma) | ||
| 14 | jiffies = jiffies_64; | ||
| 15 | |||
| 16 | SECTIONS | ||
| 17 | { | ||
| 18 | _stext_lma = TEXTADDR - LOAD_OFFSET; | ||
| 19 | . = TEXTADDR; | ||
| 20 | __init_begin = .; | ||
| 21 | HEAD_TEXT_SECTION | ||
| 22 | INIT_TEXT_SECTION(PAGE_SIZE) | ||
| 23 | INIT_DATA_SECTION(16) | ||
| 24 | PERCPU_SECTION(L1_CACHE_BYTES) | ||
| 25 | __init_end = .; | ||
| 26 | |||
| 27 | . = ALIGN(PAGE_SIZE); | ||
| 28 | _stext = .; | ||
| 29 | /* Real text segment */ | ||
| 30 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | ||
| 31 | _text = .; /* Text and read-only data */ | ||
| 32 | TEXT_TEXT | ||
| 33 | SCHED_TEXT | ||
| 34 | CPUIDLE_TEXT | ||
| 35 | LOCK_TEXT | ||
| 36 | KPROBES_TEXT | ||
| 37 | IRQENTRY_TEXT | ||
| 38 | *(.fixup) | ||
| 39 | } | ||
| 40 | |||
| 41 | _etext = .; /* End of text and rodata section */ | ||
| 42 | |||
| 43 | _sdata = .; | ||
| 44 | RO_DATA_SECTION(PAGE_SIZE) | ||
| 45 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | ||
| 46 | _edata = .; | ||
| 47 | |||
| 48 | EXCEPTION_TABLE(16) | ||
| 49 | NOTES | ||
| 50 | BSS_SECTION(4, 4, 4) | ||
| 51 | _end = .; | ||
| 52 | |||
| 53 | STABS_DEBUG | ||
| 54 | DWARF_DEBUG | ||
| 55 | |||
| 56 | DISCARDS | ||
| 57 | } | ||
diff --git a/arch/nds32/lib/Makefile b/arch/nds32/lib/Makefile new file mode 100644 index 000000000000..0f9840103f03 --- /dev/null +++ b/arch/nds32/lib/Makefile | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | lib-y := copy_page.o memcpy.o memmove.o \ | ||
| 2 | memset.o memzero.o \ | ||
| 3 | copy_from_user.o copy_to_user.o clear_user.o | ||
diff --git a/arch/nds32/lib/clear_user.S b/arch/nds32/lib/clear_user.S new file mode 100644 index 000000000000..805dfcd25bf8 --- /dev/null +++ b/arch/nds32/lib/clear_user.S | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/assembler.h> | ||
| 6 | #include <asm/errno.h> | ||
| 7 | |||
| 8 | /* Prototype: int __arch_clear_user(void *addr, size_t sz) | ||
| 9 | * Purpose : clear some user memory | ||
| 10 | * Params : addr - user memory address to clear | ||
| 11 | * : sz - number of bytes to clear | ||
| 12 | * Returns : number of bytes NOT cleared | ||
| 13 | */ | ||
| 14 | .text | ||
| 15 | .align 5 | ||
| 16 | ENTRY(__arch_clear_user) | ||
| 17 | add $r5, $r0, $r1 | ||
| 18 | beqz $r1, clear_exit | ||
| 19 | xor $p1, $p1, $p1 ! Use $p1=0 to clear mem | ||
| 20 | srli $p0, $r1, #2 ! $p0 = number of word to clear | ||
| 21 | andi $r1, $r1, #3 ! Bytes less than a word to copy | ||
| 22 | beqz $p0, byte_clear ! Only less than a word to clear | ||
| 23 | word_clear: | ||
| 24 | USER( smw.bim,$p1, [$r0], $p1) ! Clear the word | ||
| 25 | addi $p0, $p0, #-1 ! Decrease word count | ||
| 26 | bnez $p0, word_clear ! Continue looping to clear all words | ||
| 27 | beqz $r1, clear_exit ! No left bytes to copy | ||
| 28 | byte_clear: | ||
| 29 | USER( sbi.bi, $p1, [$r0], #1) ! Clear the byte | ||
| 30 | addi $r1, $r1, #-1 ! Decrease byte count | ||
| 31 | bnez $r1, byte_clear ! Continue looping to clear all left bytes | ||
| 32 | clear_exit: | ||
| 33 | move $r0, $r1 ! Set return value | ||
| 34 | ret | ||
| 35 | |||
| 36 | .section .fixup,"ax" | ||
| 37 | .align 0 | ||
| 38 | 9001: | ||
| 39 | sub $r0, $r5, $r0 ! Bytes left to copy | ||
| 40 | ret | ||
| 41 | .previous | ||
| 42 | ENDPROC(__arch_clear_user) | ||
diff --git a/arch/nds32/lib/copy_from_user.S b/arch/nds32/lib/copy_from_user.S new file mode 100644 index 000000000000..ad1857b20067 --- /dev/null +++ b/arch/nds32/lib/copy_from_user.S | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/assembler.h> | ||
| 6 | #include <asm/errno.h> | ||
| 7 | |||
| 8 | .macro lbi1 dst, addr, adj | ||
| 9 | USER( lbi.bi, \dst, [\addr], \adj) | ||
| 10 | .endm | ||
| 11 | |||
| 12 | .macro sbi1 src, addr, adj | ||
| 13 | sbi.bi \src, [\addr], \adj | ||
| 14 | .endm | ||
| 15 | |||
| 16 | .macro lmw1 start_reg, addr, end_reg | ||
| 17 | USER( lmw.bim, \start_reg, [\addr], \end_reg) | ||
| 18 | .endm | ||
| 19 | |||
| 20 | .macro smw1 start_reg, addr, end_reg | ||
| 21 | smw.bim \start_reg, [\addr], \end_reg | ||
| 22 | .endm | ||
| 23 | |||
| 24 | |||
| 25 | /* Prototype: int __arch_copy_from_user(void *to, const char *from, size_t n) | ||
| 26 | * Purpose : copy a block from user memory to kernel memory | ||
| 27 | * Params : to - kernel memory | ||
| 28 | * : from - user memory | ||
| 29 | * : n - number of bytes to copy | ||
| 30 | * Returns : Number of bytes NOT copied. | ||
| 31 | */ | ||
| 32 | |||
| 33 | .text | ||
| 34 | ENTRY(__arch_copy_from_user) | ||
| 35 | add $r5, $r0, $r2 | ||
| 36 | #include "copy_template.S" | ||
| 37 | move $r0, $r2 | ||
| 38 | ret | ||
| 39 | .section .fixup,"ax" | ||
| 40 | .align 2 | ||
| 41 | 9001: | ||
| 42 | sub $r0, $r5, $r0 | ||
| 43 | ret | ||
| 44 | .previous | ||
| 45 | ENDPROC(__arch_copy_from_user) | ||
diff --git a/arch/nds32/lib/copy_page.S b/arch/nds32/lib/copy_page.S new file mode 100644 index 000000000000..4a2ff85f17ee --- /dev/null +++ b/arch/nds32/lib/copy_page.S | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/page.h> | ||
| 6 | |||
| 7 | .text | ||
| 8 | ENTRY(copy_page) | ||
| 9 | pushm $r2, $r10 | ||
| 10 | movi $r2, PAGE_SIZE >> 5 | ||
| 11 | .Lcopy_loop: | ||
| 12 | lmw.bim $r3, [$r1], $r10 | ||
| 13 | smw.bim $r3, [$r0], $r10 | ||
| 14 | subi45 $r2, #1 | ||
| 15 | bnez38 $r2, .Lcopy_loop | ||
| 16 | popm $r2, $r10 | ||
| 17 | ret | ||
| 18 | ENDPROC(copy_page) | ||
| 19 | |||
| 20 | ENTRY(clear_page) | ||
| 21 | pushm $r1, $r9 | ||
| 22 | movi $r1, PAGE_SIZE >> 5 | ||
| 23 | movi55 $r2, #0 | ||
| 24 | movi55 $r3, #0 | ||
| 25 | movi55 $r4, #0 | ||
| 26 | movi55 $r5, #0 | ||
| 27 | movi55 $r6, #0 | ||
| 28 | movi55 $r7, #0 | ||
| 29 | movi55 $r8, #0 | ||
| 30 | movi55 $r9, #0 | ||
| 31 | .Lclear_loop: | ||
| 32 | smw.bim $r2, [$r0], $r9 | ||
| 33 | subi45 $r1, #1 | ||
| 34 | bnez38 $r1, .Lclear_loop | ||
| 35 | popm $r1, $r9 | ||
| 36 | ret | ||
| 37 | ENDPROC(clear_page) | ||
diff --git a/arch/nds32/lib/copy_template.S b/arch/nds32/lib/copy_template.S new file mode 100644 index 000000000000..3a9a2de468c2 --- /dev/null +++ b/arch/nds32/lib/copy_template.S | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | |||
| 5 | beq $r1, $r0, quit_memcpy | ||
| 6 | beqz $r2, quit_memcpy | ||
| 7 | srli $r3, $r2, #5 ! check if len < cache-line size 32 | ||
| 8 | beqz $r3, word_copy_entry | ||
| 9 | andi $r4, $r0, #0x3 ! check byte-align | ||
| 10 | beqz $r4, unalign_word_copy_entry | ||
| 11 | |||
| 12 | addi $r4, $r4,#-4 | ||
| 13 | abs $r4, $r4 ! check how many un-align byte to copy | ||
| 14 | sub $r2, $r2, $r4 ! update $R2 | ||
| 15 | |||
| 16 | unalign_byte_copy: | ||
| 17 | lbi1 $r3, $r1, #1 | ||
| 18 | addi $r4, $r4, #-1 | ||
| 19 | sbi1 $r3, $r0, #1 | ||
| 20 | bnez $r4, unalign_byte_copy | ||
| 21 | beqz $r2, quit_memcpy | ||
| 22 | |||
| 23 | unalign_word_copy_entry: | ||
| 24 | andi $r3, $r0, 0x1f ! check cache-line unaligncount | ||
| 25 | beqz $r3, cache_copy | ||
| 26 | |||
| 27 | addi $r3, $r3, #-32 | ||
| 28 | abs $r3, $r3 | ||
| 29 | sub $r2, $r2, $r3 ! update $R2 | ||
| 30 | |||
| 31 | unalign_word_copy: | ||
| 32 | lmw1 $r4, $r1, $r4 | ||
| 33 | addi $r3, $r3, #-4 | ||
| 34 | smw1 $r4, $r0, $r4 | ||
| 35 | bnez $r3, unalign_word_copy | ||
| 36 | beqz $r2, quit_memcpy | ||
| 37 | |||
| 38 | addi $r3, $r2, #-32 ! to check $r2< cache_line , than go to word_copy | ||
| 39 | bltz $r3, word_copy_entry | ||
| 40 | cache_copy: | ||
| 41 | srli $r3, $r2, #5 | ||
| 42 | beqz $r3, word_copy_entry | ||
| 43 | 3: | ||
| 44 | lmw1 $r17, $r1, $r24 | ||
| 45 | addi $r3, $r3, #-1 | ||
| 46 | smw1 $r17, $r0, $r24 | ||
| 47 | bnez $r3, 3b | ||
| 48 | |||
| 49 | word_copy_entry: | ||
| 50 | andi $r2, $r2, #31 | ||
| 51 | |||
| 52 | beqz $r2, quit_memcpy | ||
| 53 | 5: | ||
| 54 | srli $r3, $r2, #2 | ||
| 55 | beqz $r3, byte_copy | ||
| 56 | word_copy: | ||
| 57 | lmw1 $r4, $r1, $r4 | ||
| 58 | addi $r3, $r3, #-1 | ||
| 59 | smw1 $r4, $r0, $r4 | ||
| 60 | bnez $r3, word_copy | ||
| 61 | andi $r2, $r2, #3 | ||
| 62 | beqz $r2, quit_memcpy | ||
| 63 | byte_copy: | ||
| 64 | lbi1 $r3, $r1, #1 | ||
| 65 | addi $r2, $r2, #-1 | ||
| 66 | |||
| 67 | sbi1 $r3, $r0, #1 | ||
| 68 | bnez $r2, byte_copy | ||
| 69 | quit_memcpy: | ||
diff --git a/arch/nds32/lib/copy_to_user.S b/arch/nds32/lib/copy_to_user.S new file mode 100644 index 000000000000..3230044dcfb8 --- /dev/null +++ b/arch/nds32/lib/copy_to_user.S | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <asm/assembler.h> | ||
| 6 | #include <asm/errno.h> | ||
| 7 | |||
| 8 | .macro lbi1 dst, addr, adj | ||
| 9 | lbi.bi \dst, [\addr], \adj | ||
| 10 | .endm | ||
| 11 | |||
| 12 | .macro sbi1 src, addr, adj | ||
| 13 | USER( sbi.bi, \src, [\addr], \adj) | ||
| 14 | .endm | ||
| 15 | |||
| 16 | .macro lmw1 start_reg, addr, end_reg | ||
| 17 | lmw.bim \start_reg, [\addr], \end_reg | ||
| 18 | .endm | ||
| 19 | |||
| 20 | .macro smw1 start_reg, addr, end_reg | ||
| 21 | USER( smw.bim, \start_reg, [\addr], \end_reg) | ||
| 22 | .endm | ||
| 23 | |||
| 24 | |||
| 25 | /* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n) | ||
| 26 | * Purpose : copy a block to user memory from kernel memory | ||
| 27 | * Params : to - user memory | ||
| 28 | * : from - kernel memory | ||
| 29 | * : n - number of bytes to copy | ||
| 30 | * Returns : Number of bytes NOT copied. | ||
| 31 | */ | ||
| 32 | |||
| 33 | .text | ||
| 34 | ENTRY(__arch_copy_to_user) | ||
| 35 | add $r5, $r0, $r2 | ||
| 36 | #include "copy_template.S" | ||
| 37 | move $r0, $r2 | ||
| 38 | ret | ||
| 39 | .section .fixup,"ax" | ||
| 40 | .align 2 | ||
| 41 | 9001: | ||
| 42 | sub $r0, $r5, $r0 | ||
| 43 | ret | ||
| 44 | .previous | ||
| 45 | ENDPROC(__arch_copy_to_user) | ||
diff --git a/arch/nds32/lib/memcpy.S b/arch/nds32/lib/memcpy.S new file mode 100644 index 000000000000..a2345ea721e4 --- /dev/null +++ b/arch/nds32/lib/memcpy.S | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | |||
| 6 | |||
| 7 | .macro lbi1 dst, addr, adj | ||
| 8 | lbi.bi \dst, [\addr], \adj | ||
| 9 | .endm | ||
| 10 | |||
| 11 | .macro sbi1 src, addr, adj | ||
| 12 | sbi.bi \src, [\addr], \adj | ||
| 13 | .endm | ||
| 14 | |||
| 15 | .macro lmw1 start_reg, addr, end_reg | ||
| 16 | lmw.bim \start_reg, [\addr], \end_reg | ||
| 17 | .endm | ||
| 18 | |||
| 19 | .macro smw1 start_reg, addr, end_reg | ||
| 20 | smw.bim \start_reg, [\addr], \end_reg | ||
| 21 | .endm | ||
| 22 | |||
| 23 | .text | ||
| 24 | ENTRY(memcpy) | ||
| 25 | move $r5, $r0 | ||
| 26 | #include "copy_template.S" | ||
| 27 | move $r0, $r5 | ||
| 28 | ret | ||
| 29 | |||
| 30 | ENDPROC(memcpy) | ||
diff --git a/arch/nds32/lib/memmove.S b/arch/nds32/lib/memmove.S new file mode 100644 index 000000000000..c823aada2271 --- /dev/null +++ b/arch/nds32/lib/memmove.S | |||
| @@ -0,0 +1,70 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | void *memmove(void *dst, const void *src, int n); | ||
| 8 | |||
| 9 | dst: $r0 | ||
| 10 | src: $r1 | ||
| 11 | n : $r2 | ||
| 12 | ret: $r0 - pointer to the memory area dst. | ||
| 13 | */ | ||
| 14 | .text | ||
| 15 | |||
| 16 | ENTRY(memmove) | ||
| 17 | move $r5, $r0 ! Set return value = det | ||
| 18 | beq $r0, $r1, exit_memcpy ! Exit when det = src | ||
| 19 | beqz $r2, exit_memcpy ! Exit when n = 0 | ||
| 20 | pushm $t0, $t1 ! Save reg | ||
| 21 | srli $p1, $r2, #2 ! $p1 is how many words to copy | ||
| 22 | |||
| 23 | ! Avoid data lost when memory overlap | ||
| 24 | ! Copy data reversely when src < dst | ||
| 25 | slt $p0, $r0, $r1 ! check if $r0 < $r1 | ||
| 26 | beqz $p0, do_reverse ! branch if dst > src | ||
| 27 | |||
| 28 | ! No reverse, dst < src | ||
| 29 | andi $r2, $r2, #3 ! How many bytes are less than a word | ||
| 30 | li $t0, #1 ! Determining copy direction in byte_cpy | ||
| 31 | beqz $p1, byte_cpy ! When n is less than a word | ||
| 32 | |||
| 33 | word_cpy: | ||
| 34 | lmw.bim $p0, [$r1], $p0 ! Read a word from src | ||
| 35 | addi $p1, $p1, #-1 ! How many words left to copy | ||
| 36 | smw.bim $p0, [$r0], $p0 ! Copy the word to det | ||
| 37 | bnez $p1, word_cpy ! If remained words > 0 | ||
| 38 | beqz $r2, end_memcpy ! No left bytes to copy | ||
| 39 | b byte_cpy | ||
| 40 | |||
| 41 | do_reverse: | ||
| 42 | add $r0, $r0, $r2 ! Start with the end of $r0 | ||
| 43 | add $r1, $r1, $r2 ! Start with the end of $r1 | ||
| 44 | andi $r2, $r2, #3 ! How many bytes are less than a word | ||
| 45 | li $t0, #-1 ! Determining copy direction in byte_cpy | ||
| 46 | beqz $p1, reverse_byte_cpy ! When n is less than a word | ||
| 47 | |||
| 48 | reverse_word_cpy: | ||
| 49 | lmw.adm $p0, [$r1], $p0 ! Read a word from src | ||
| 50 | addi $p1, $p1, #-1 ! How many words left to copy | ||
| 51 | smw.adm $p0, [$r0], $p0 ! Copy the word to det | ||
| 52 | bnez $p1, reverse_word_cpy ! If remained words > 0 | ||
| 53 | beqz $r2, end_memcpy ! No left bytes to copy | ||
| 54 | |||
| 55 | reverse_byte_cpy: | ||
| 56 | addi $r0, $r0, #-1 | ||
| 57 | addi $r1, $r1, #-1 | ||
| 58 | byte_cpy: ! Less than 4 bytes to copy now | ||
| 59 | lb.bi $p0, [$r1], $t0 ! Read a byte from src | ||
| 60 | addi $r2, $r2, #-1 ! How many bytes left to copy | ||
| 61 | sb.bi $p0, [$r0], $t0 ! copy the byte to det | ||
| 62 | bnez $r2, byte_cpy ! If remained bytes > 0 | ||
| 63 | |||
| 64 | end_memcpy: | ||
| 65 | popm $t0, $t1 | ||
| 66 | exit_memcpy: | ||
| 67 | move $r0, $r5 | ||
| 68 | ret | ||
| 69 | |||
| 70 | ENDPROC(memmove) | ||
diff --git a/arch/nds32/lib/memset.S b/arch/nds32/lib/memset.S new file mode 100644 index 000000000000..193cb6ce21a9 --- /dev/null +++ b/arch/nds32/lib/memset.S | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | |||
| 6 | .text | ||
| 7 | ENTRY(memset) | ||
| 8 | move $r5, $r0 ! Return value | ||
| 9 | beqz $r2, end_memset ! Exit when len = 0 | ||
| 10 | srli $p1, $r2, 2 ! $p1 is how many words to copy | ||
| 11 | andi $r2, $r2, 3 ! How many bytes are less than a word | ||
| 12 | beqz $p1, byte_set ! When n is less than a word | ||
| 13 | |||
| 14 | ! set $r1 from ??????ab to abababab | ||
| 15 | andi $r1, $r1, #0x00ff ! $r1 = 000000ab | ||
| 16 | slli $p0, $r1, #8 ! $p0 = 0000ab00 | ||
| 17 | or $r1, $r1, $p0 ! $r1 = 0000abab | ||
| 18 | slli $p0, $r1, #16 ! $p0 = abab0000 | ||
| 19 | or $r1, $r1, $p0 ! $r1 = abababab | ||
| 20 | word_set: | ||
| 21 | addi $p1, $p1, #-1 ! How many words left to copy | ||
| 22 | smw.bim $r1, [$r0], $r1 ! Copy the word to det | ||
| 23 | bnez $p1, word_set ! Still words to set, continue looping | ||
| 24 | beqz $r2, end_memset ! No left byte to set | ||
| 25 | byte_set: ! Less than 4 bytes left to set | ||
| 26 | addi $r2, $r2, #-1 ! Decrease len by 1 | ||
| 27 | sbi.bi $r1, [$r0], #1 ! Set data of the next byte to $r1 | ||
| 28 | bnez $r2, byte_set ! Still bytes left to set | ||
| 29 | end_memset: | ||
| 30 | move $r0, $r5 | ||
| 31 | ret | ||
| 32 | |||
| 33 | ENDPROC(memset) | ||
diff --git a/arch/nds32/lib/memzero.S b/arch/nds32/lib/memzero.S new file mode 100644 index 000000000000..f055972c9343 --- /dev/null +++ b/arch/nds32/lib/memzero.S | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | |||
| 6 | .text | ||
| 7 | ENTRY(memzero) | ||
| 8 | beqz $r1, 1f | ||
| 9 | push $lp | ||
| 10 | move $r2, $r1 | ||
| 11 | move $r1, #0 | ||
| 12 | push $r0 | ||
| 13 | bal memset | ||
| 14 | pop $r0 | ||
| 15 | pop $lp | ||
| 16 | 1: | ||
| 17 | ret | ||
| 18 | ENDPROC(memzero) | ||
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile new file mode 100644 index 000000000000..6b6855852223 --- /dev/null +++ b/arch/nds32/mm/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | obj-y := extable.o tlb.o \ | ||
| 2 | fault.o init.o ioremap.o mmap.o \ | ||
| 3 | mm-nds32.o cacheflush.o proc.o | ||
| 4 | |||
| 5 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o | ||
| 6 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
| 7 | CFLAGS_proc-n13.o += -fomit-frame-pointer | ||
diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c new file mode 100644 index 000000000000..b96a01b10ca7 --- /dev/null +++ b/arch/nds32/mm/alignment.c | |||
| @@ -0,0 +1,576 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/proc_fs.h> | ||
| 5 | #include <linux/uaccess.h> | ||
| 6 | #include <linux/sysctl.h> | ||
| 7 | #include <asm/unaligned.h> | ||
| 8 | |||
| 9 | #define DEBUG(enable, tagged, ...) \ | ||
| 10 | do{ \ | ||
| 11 | if (enable) { \ | ||
| 12 | if (tagged) \ | ||
| 13 | pr_warn("[ %30s() ] ", __func__); \ | ||
| 14 | pr_warn(__VA_ARGS__); \ | ||
| 15 | } \ | ||
| 16 | } while (0) | ||
| 17 | |||
| 18 | #define RT(inst) (((inst) >> 20) & 0x1FUL) | ||
| 19 | #define RA(inst) (((inst) >> 15) & 0x1FUL) | ||
| 20 | #define RB(inst) (((inst) >> 10) & 0x1FUL) | ||
| 21 | #define SV(inst) (((inst) >> 8) & 0x3UL) | ||
| 22 | #define IMM(inst) (((inst) >> 0) & 0x3FFFUL) | ||
| 23 | |||
| 24 | #define RA3(inst) (((inst) >> 3) & 0x7UL) | ||
| 25 | #define RT3(inst) (((inst) >> 6) & 0x7UL) | ||
| 26 | #define IMM3U(inst) (((inst) >> 0) & 0x7UL) | ||
| 27 | |||
| 28 | #define RA5(inst) (((inst) >> 0) & 0x1FUL) | ||
| 29 | #define RT4(inst) (((inst) >> 5) & 0xFUL) | ||
| 30 | |||
| 31 | #define __get8_data(val,addr,err) \ | ||
| 32 | __asm__( \ | ||
| 33 | "1: lbi.bi %1, [%2], #1\n" \ | ||
| 34 | "2:\n" \ | ||
| 35 | " .pushsection .text.fixup,\"ax\"\n" \ | ||
| 36 | " .align 2\n" \ | ||
| 37 | "3: movi %0, #1\n" \ | ||
| 38 | " j 2b\n" \ | ||
| 39 | " .popsection\n" \ | ||
| 40 | " .pushsection __ex_table,\"a\"\n" \ | ||
| 41 | " .align 3\n" \ | ||
| 42 | " .long 1b, 3b\n" \ | ||
| 43 | " .popsection\n" \ | ||
| 44 | : "=r" (err), "=&r" (val), "=r" (addr) \ | ||
| 45 | : "0" (err), "2" (addr)) | ||
| 46 | |||
| 47 | #define get16_data(addr, val_ptr) \ | ||
| 48 | do { \ | ||
| 49 | unsigned int err = 0, v, a = addr; \ | ||
| 50 | __get8_data(v,a,err); \ | ||
| 51 | *val_ptr = v << 0; \ | ||
| 52 | __get8_data(v,a,err); \ | ||
| 53 | *val_ptr |= v << 8; \ | ||
| 54 | if (err) \ | ||
| 55 | goto fault; \ | ||
| 56 | *val_ptr = le16_to_cpu(*val_ptr); \ | ||
| 57 | } while(0) | ||
| 58 | |||
| 59 | #define get32_data(addr, val_ptr) \ | ||
| 60 | do { \ | ||
| 61 | unsigned int err = 0, v, a = addr; \ | ||
| 62 | __get8_data(v,a,err); \ | ||
| 63 | *val_ptr = v << 0; \ | ||
| 64 | __get8_data(v,a,err); \ | ||
| 65 | *val_ptr |= v << 8; \ | ||
| 66 | __get8_data(v,a,err); \ | ||
| 67 | *val_ptr |= v << 16; \ | ||
| 68 | __get8_data(v,a,err); \ | ||
| 69 | *val_ptr |= v << 24; \ | ||
| 70 | if (err) \ | ||
| 71 | goto fault; \ | ||
| 72 | *val_ptr = le32_to_cpu(*val_ptr); \ | ||
| 73 | } while(0) | ||
| 74 | |||
| 75 | #define get_data(addr, val_ptr, len) \ | ||
| 76 | if (len == 2) \ | ||
| 77 | get16_data(addr, val_ptr); \ | ||
| 78 | else \ | ||
| 79 | get32_data(addr, val_ptr); | ||
| 80 | |||
| 81 | #define set16_data(addr, val) \ | ||
| 82 | do { \ | ||
| 83 | unsigned int err = 0, *ptr = addr ; \ | ||
| 84 | val = le32_to_cpu(val); \ | ||
| 85 | __asm__( \ | ||
| 86 | "1: sbi.bi %2, [%1], #1\n" \ | ||
| 87 | " srli %2, %2, #8\n" \ | ||
| 88 | "2: sbi %2, [%1]\n" \ | ||
| 89 | "3:\n" \ | ||
| 90 | " .pushsection .text.fixup,\"ax\"\n" \ | ||
| 91 | " .align 2\n" \ | ||
| 92 | "4: movi %0, #1\n" \ | ||
| 93 | " j 3b\n" \ | ||
| 94 | " .popsection\n" \ | ||
| 95 | " .pushsection __ex_table,\"a\"\n" \ | ||
| 96 | " .align 3\n" \ | ||
| 97 | " .long 1b, 4b\n" \ | ||
| 98 | " .long 2b, 4b\n" \ | ||
| 99 | " .popsection\n" \ | ||
| 100 | : "=r" (err), "+r" (ptr), "+r" (val) \ | ||
| 101 | : "0" (err) \ | ||
| 102 | ); \ | ||
| 103 | if (err) \ | ||
| 104 | goto fault; \ | ||
| 105 | } while(0) | ||
| 106 | |||
| 107 | #define set32_data(addr, val) \ | ||
| 108 | do { \ | ||
| 109 | unsigned int err = 0, *ptr = addr ; \ | ||
| 110 | val = le32_to_cpu(val); \ | ||
| 111 | __asm__( \ | ||
| 112 | "1: sbi.bi %2, [%1], #1\n" \ | ||
| 113 | " srli %2, %2, #8\n" \ | ||
| 114 | "2: sbi.bi %2, [%1], #1\n" \ | ||
| 115 | " srli %2, %2, #8\n" \ | ||
| 116 | "3: sbi.bi %2, [%1], #1\n" \ | ||
| 117 | " srli %2, %2, #8\n" \ | ||
| 118 | "4: sbi %2, [%1]\n" \ | ||
| 119 | "5:\n" \ | ||
| 120 | " .pushsection .text.fixup,\"ax\"\n" \ | ||
| 121 | " .align 2\n" \ | ||
| 122 | "6: movi %0, #1\n" \ | ||
| 123 | " j 5b\n" \ | ||
| 124 | " .popsection\n" \ | ||
| 125 | " .pushsection __ex_table,\"a\"\n" \ | ||
| 126 | " .align 3\n" \ | ||
| 127 | " .long 1b, 6b\n" \ | ||
| 128 | " .long 2b, 6b\n" \ | ||
| 129 | " .long 3b, 6b\n" \ | ||
| 130 | " .long 4b, 6b\n" \ | ||
| 131 | " .popsection\n" \ | ||
| 132 | : "=r" (err), "+r" (ptr), "+r" (val) \ | ||
| 133 | : "0" (err) \ | ||
| 134 | ); \ | ||
| 135 | if (err) \ | ||
| 136 | goto fault; \ | ||
| 137 | } while(0) | ||
| 138 | #define set_data(addr, val, len) \ | ||
| 139 | if (len == 2) \ | ||
| 140 | set16_data(addr, val); \ | ||
| 141 | else \ | ||
| 142 | set32_data(addr, val); | ||
| 143 | #define NDS32_16BIT_INSTRUCTION 0x80000000 | ||
| 144 | |||
| 145 | extern pte_t va_present(struct mm_struct *mm, unsigned long addr); | ||
| 146 | extern pte_t va_kernel_present(unsigned long addr); | ||
| 147 | extern int va_readable(struct pt_regs *regs, unsigned long addr); | ||
| 148 | extern int va_writable(struct pt_regs *regs, unsigned long addr); | ||
| 149 | |||
| 150 | int unalign_access_mode = 0, unalign_access_debug = 0; | ||
| 151 | |||
| 152 | static inline unsigned long *idx_to_addr(struct pt_regs *regs, int idx) | ||
| 153 | { | ||
| 154 | /* this should be consistent with ptrace.h */ | ||
| 155 | if (idx >= 0 && idx <= 25) /* R0-R25 */ | ||
| 156 | return ®s->uregs[0] + idx; | ||
| 157 | else if (idx >= 28 && idx <= 30) /* FP, GP, LP */ | ||
| 158 | return ®s->fp + (idx - 28); | ||
| 159 | else if (idx == 31) /* SP */ | ||
| 160 | return ®s->sp; | ||
| 161 | else | ||
| 162 | return NULL; /* cause a segfault */ | ||
| 163 | } | ||
| 164 | |||
| 165 | static inline unsigned long get_inst(unsigned long addr) | ||
| 166 | { | ||
| 167 | return be32_to_cpu(get_unaligned((u32 *) addr)); | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline unsigned long sign_extend(unsigned long val, int len) | ||
| 171 | { | ||
| 172 | unsigned long ret = 0; | ||
| 173 | unsigned char *s, *t; | ||
| 174 | int i = 0; | ||
| 175 | |||
| 176 | val = cpu_to_le32(val); | ||
| 177 | |||
| 178 | s = (void *)&val; | ||
| 179 | t = (void *)&ret; | ||
| 180 | |||
| 181 | while (i++ < len) | ||
| 182 | *t++ = *s++; | ||
| 183 | |||
| 184 | if (((*(t - 1)) & 0x80) && (i < 4)) { | ||
| 185 | |||
| 186 | while (i++ <= 4) | ||
| 187 | *t++ = 0xff; | ||
| 188 | } | ||
| 189 | |||
| 190 | return le32_to_cpu(ret); | ||
| 191 | } | ||
| 192 | |||
| 193 | static inline int do_16(unsigned long inst, struct pt_regs *regs) | ||
| 194 | { | ||
| 195 | int imm, regular, load, len, addr_mode, idx_mode; | ||
| 196 | unsigned long unaligned_addr, target_val, source_idx, target_idx, | ||
| 197 | shift = 0; | ||
| 198 | switch ((inst >> 9) & 0x3F) { | ||
| 199 | |||
| 200 | case 0x12: /* LHI333 */ | ||
| 201 | imm = 1; | ||
| 202 | regular = 1; | ||
| 203 | load = 1; | ||
| 204 | len = 2; | ||
| 205 | addr_mode = 3; | ||
| 206 | idx_mode = 3; | ||
| 207 | break; | ||
| 208 | case 0x10: /* LWI333 */ | ||
| 209 | imm = 1; | ||
| 210 | regular = 1; | ||
| 211 | load = 1; | ||
| 212 | len = 4; | ||
| 213 | addr_mode = 3; | ||
| 214 | idx_mode = 3; | ||
| 215 | break; | ||
| 216 | case 0x11: /* LWI333.bi */ | ||
| 217 | imm = 1; | ||
| 218 | regular = 0; | ||
| 219 | load = 1; | ||
| 220 | len = 4; | ||
| 221 | addr_mode = 3; | ||
| 222 | idx_mode = 3; | ||
| 223 | break; | ||
| 224 | case 0x1A: /* LWI450 */ | ||
| 225 | imm = 0; | ||
| 226 | regular = 1; | ||
| 227 | load = 1; | ||
| 228 | len = 4; | ||
| 229 | addr_mode = 5; | ||
| 230 | idx_mode = 4; | ||
| 231 | break; | ||
| 232 | case 0x16: /* SHI333 */ | ||
| 233 | imm = 1; | ||
| 234 | regular = 1; | ||
| 235 | load = 0; | ||
| 236 | len = 2; | ||
| 237 | addr_mode = 3; | ||
| 238 | idx_mode = 3; | ||
| 239 | break; | ||
| 240 | case 0x14: /* SWI333 */ | ||
| 241 | imm = 1; | ||
| 242 | regular = 1; | ||
| 243 | load = 0; | ||
| 244 | len = 4; | ||
| 245 | addr_mode = 3; | ||
| 246 | idx_mode = 3; | ||
| 247 | break; | ||
| 248 | case 0x15: /* SWI333.bi */ | ||
| 249 | imm = 1; | ||
| 250 | regular = 0; | ||
| 251 | load = 0; | ||
| 252 | len = 4; | ||
| 253 | addr_mode = 3; | ||
| 254 | idx_mode = 3; | ||
| 255 | break; | ||
| 256 | case 0x1B: /* SWI450 */ | ||
| 257 | imm = 0; | ||
| 258 | regular = 1; | ||
| 259 | load = 0; | ||
| 260 | len = 4; | ||
| 261 | addr_mode = 5; | ||
| 262 | idx_mode = 4; | ||
| 263 | break; | ||
| 264 | |||
| 265 | default: | ||
| 266 | return -EFAULT; | ||
| 267 | } | ||
| 268 | |||
| 269 | if (addr_mode == 3) { | ||
| 270 | unaligned_addr = *idx_to_addr(regs, RA3(inst)); | ||
| 271 | source_idx = RA3(inst); | ||
| 272 | } else { | ||
| 273 | unaligned_addr = *idx_to_addr(regs, RA5(inst)); | ||
| 274 | source_idx = RA5(inst); | ||
| 275 | } | ||
| 276 | |||
| 277 | if (idx_mode == 3) | ||
| 278 | target_idx = RT3(inst); | ||
| 279 | else | ||
| 280 | target_idx = RT4(inst); | ||
| 281 | |||
| 282 | if (imm) | ||
| 283 | shift = IMM3U(inst) * len; | ||
| 284 | |||
| 285 | if (regular) | ||
| 286 | unaligned_addr += shift; | ||
| 287 | |||
| 288 | if (load) { | ||
| 289 | if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len)) | ||
| 290 | return -EACCES; | ||
| 291 | |||
| 292 | get_data(unaligned_addr, &target_val, len); | ||
| 293 | *idx_to_addr(regs, target_idx) = target_val; | ||
| 294 | } else { | ||
| 295 | if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len)) | ||
| 296 | return -EACCES; | ||
| 297 | target_val = *idx_to_addr(regs, target_idx); | ||
| 298 | set_data((void *)unaligned_addr, target_val, len); | ||
| 299 | } | ||
| 300 | |||
| 301 | if (!regular) | ||
| 302 | *idx_to_addr(regs, source_idx) = unaligned_addr + shift; | ||
| 303 | regs->ipc += 2; | ||
| 304 | |||
| 305 | return 0; | ||
| 306 | fault: | ||
| 307 | return -EACCES; | ||
| 308 | } | ||
| 309 | |||
| 310 | static inline int do_32(unsigned long inst, struct pt_regs *regs) | ||
| 311 | { | ||
| 312 | int imm, regular, load, len, sign_ext; | ||
| 313 | unsigned long unaligned_addr, target_val, shift; | ||
| 314 | |||
| 315 | unaligned_addr = *idx_to_addr(regs, RA(inst)); | ||
| 316 | |||
| 317 | switch ((inst >> 25) << 1) { | ||
| 318 | |||
| 319 | case 0x02: /* LHI */ | ||
| 320 | imm = 1; | ||
| 321 | regular = 1; | ||
| 322 | load = 1; | ||
| 323 | len = 2; | ||
| 324 | sign_ext = 0; | ||
| 325 | break; | ||
| 326 | case 0x0A: /* LHI.bi */ | ||
| 327 | imm = 1; | ||
| 328 | regular = 0; | ||
| 329 | load = 1; | ||
| 330 | len = 2; | ||
| 331 | sign_ext = 0; | ||
| 332 | break; | ||
| 333 | case 0x22: /* LHSI */ | ||
| 334 | imm = 1; | ||
| 335 | regular = 1; | ||
| 336 | load = 1; | ||
| 337 | len = 2; | ||
| 338 | sign_ext = 1; | ||
| 339 | break; | ||
| 340 | case 0x2A: /* LHSI.bi */ | ||
| 341 | imm = 1; | ||
| 342 | regular = 0; | ||
| 343 | load = 1; | ||
| 344 | len = 2; | ||
| 345 | sign_ext = 1; | ||
| 346 | break; | ||
| 347 | case 0x04: /* LWI */ | ||
| 348 | imm = 1; | ||
| 349 | regular = 1; | ||
| 350 | load = 1; | ||
| 351 | len = 4; | ||
| 352 | sign_ext = 0; | ||
| 353 | break; | ||
| 354 | case 0x0C: /* LWI.bi */ | ||
| 355 | imm = 1; | ||
| 356 | regular = 0; | ||
| 357 | load = 1; | ||
| 358 | len = 4; | ||
| 359 | sign_ext = 0; | ||
| 360 | break; | ||
| 361 | case 0x12: /* SHI */ | ||
| 362 | imm = 1; | ||
| 363 | regular = 1; | ||
| 364 | load = 0; | ||
| 365 | len = 2; | ||
| 366 | sign_ext = 0; | ||
| 367 | break; | ||
| 368 | case 0x1A: /* SHI.bi */ | ||
| 369 | imm = 1; | ||
| 370 | regular = 0; | ||
| 371 | load = 0; | ||
| 372 | len = 2; | ||
| 373 | sign_ext = 0; | ||
| 374 | break; | ||
| 375 | case 0x14: /* SWI */ | ||
| 376 | imm = 1; | ||
| 377 | regular = 1; | ||
| 378 | load = 0; | ||
| 379 | len = 4; | ||
| 380 | sign_ext = 0; | ||
| 381 | break; | ||
| 382 | case 0x1C: /* SWI.bi */ | ||
| 383 | imm = 1; | ||
| 384 | regular = 0; | ||
| 385 | load = 0; | ||
| 386 | len = 4; | ||
| 387 | sign_ext = 0; | ||
| 388 | break; | ||
| 389 | |||
| 390 | default: | ||
| 391 | switch (inst & 0xff) { | ||
| 392 | |||
| 393 | case 0x01: /* LH */ | ||
| 394 | imm = 0; | ||
| 395 | regular = 1; | ||
| 396 | load = 1; | ||
| 397 | len = 2; | ||
| 398 | sign_ext = 0; | ||
| 399 | break; | ||
| 400 | case 0x05: /* LH.bi */ | ||
| 401 | imm = 0; | ||
| 402 | regular = 0; | ||
| 403 | load = 1; | ||
| 404 | len = 2; | ||
| 405 | sign_ext = 0; | ||
| 406 | break; | ||
| 407 | case 0x11: /* LHS */ | ||
| 408 | imm = 0; | ||
| 409 | regular = 1; | ||
| 410 | load = 1; | ||
| 411 | len = 2; | ||
| 412 | sign_ext = 1; | ||
| 413 | break; | ||
| 414 | case 0x15: /* LHS.bi */ | ||
| 415 | imm = 0; | ||
| 416 | regular = 0; | ||
| 417 | load = 1; | ||
| 418 | len = 2; | ||
| 419 | sign_ext = 1; | ||
| 420 | break; | ||
| 421 | case 0x02: /* LW */ | ||
| 422 | imm = 0; | ||
| 423 | regular = 1; | ||
| 424 | load = 1; | ||
| 425 | len = 4; | ||
| 426 | sign_ext = 0; | ||
| 427 | break; | ||
| 428 | case 0x06: /* LW.bi */ | ||
| 429 | imm = 0; | ||
| 430 | regular = 0; | ||
| 431 | load = 1; | ||
| 432 | len = 4; | ||
| 433 | sign_ext = 0; | ||
| 434 | break; | ||
| 435 | case 0x09: /* SH */ | ||
| 436 | imm = 0; | ||
| 437 | regular = 1; | ||
| 438 | load = 0; | ||
| 439 | len = 2; | ||
| 440 | sign_ext = 0; | ||
| 441 | break; | ||
| 442 | case 0x0D: /* SH.bi */ | ||
| 443 | imm = 0; | ||
| 444 | regular = 0; | ||
| 445 | load = 0; | ||
| 446 | len = 2; | ||
| 447 | sign_ext = 0; | ||
| 448 | break; | ||
| 449 | case 0x0A: /* SW */ | ||
| 450 | imm = 0; | ||
| 451 | regular = 1; | ||
| 452 | load = 0; | ||
| 453 | len = 4; | ||
| 454 | sign_ext = 0; | ||
| 455 | break; | ||
| 456 | case 0x0E: /* SW.bi */ | ||
| 457 | imm = 0; | ||
| 458 | regular = 0; | ||
| 459 | load = 0; | ||
| 460 | len = 4; | ||
| 461 | sign_ext = 0; | ||
| 462 | break; | ||
| 463 | |||
| 464 | default: | ||
| 465 | return -EFAULT; | ||
| 466 | } | ||
| 467 | } | ||
| 468 | |||
| 469 | if (imm) | ||
| 470 | shift = IMM(inst) * len; | ||
| 471 | else | ||
| 472 | shift = *idx_to_addr(regs, RB(inst)) << SV(inst); | ||
| 473 | |||
| 474 | if (regular) | ||
| 475 | unaligned_addr += shift; | ||
| 476 | |||
| 477 | if (load) { | ||
| 478 | |||
| 479 | if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len)) | ||
| 480 | return -EACCES; | ||
| 481 | |||
| 482 | get_data(unaligned_addr, &target_val, len); | ||
| 483 | |||
| 484 | if (sign_ext) | ||
| 485 | *idx_to_addr(regs, RT(inst)) = | ||
| 486 | sign_extend(target_val, len); | ||
| 487 | else | ||
| 488 | *idx_to_addr(regs, RT(inst)) = target_val; | ||
| 489 | } else { | ||
| 490 | |||
| 491 | if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len)) | ||
| 492 | return -EACCES; | ||
| 493 | |||
| 494 | target_val = *idx_to_addr(regs, RT(inst)); | ||
| 495 | set_data((void *)unaligned_addr, target_val, len); | ||
| 496 | } | ||
| 497 | |||
| 498 | if (!regular) | ||
| 499 | *idx_to_addr(regs, RA(inst)) = unaligned_addr + shift; | ||
| 500 | |||
| 501 | regs->ipc += 4; | ||
| 502 | |||
| 503 | return 0; | ||
| 504 | fault: | ||
| 505 | return -EACCES; | ||
| 506 | } | ||
| 507 | |||
| 508 | int do_unaligned_access(unsigned long addr, struct pt_regs *regs) | ||
| 509 | { | ||
| 510 | unsigned long inst; | ||
| 511 | int ret = -EFAULT; | ||
| 512 | mm_segment_t seg = get_fs(); | ||
| 513 | |||
| 514 | inst = get_inst(regs->ipc); | ||
| 515 | |||
| 516 | DEBUG((unalign_access_debug > 0), 1, | ||
| 517 | "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr, | ||
| 518 | regs->ipc, inst); | ||
| 519 | |||
| 520 | set_fs(USER_DS); | ||
| 521 | |||
| 522 | if (inst & NDS32_16BIT_INSTRUCTION) | ||
| 523 | ret = do_16((inst >> 16) & 0xffff, regs); | ||
| 524 | else | ||
| 525 | ret = do_32(inst, regs); | ||
| 526 | set_fs(seg); | ||
| 527 | |||
| 528 | return ret; | ||
| 529 | } | ||
| 530 | |||
| 531 | #ifdef CONFIG_PROC_FS | ||
| 532 | |||
| 533 | static struct ctl_table alignment_tbl[3] = { | ||
| 534 | { | ||
| 535 | .procname = "enable", | ||
| 536 | .data = &unalign_access_mode, | ||
| 537 | .maxlen = sizeof(unalign_access_mode), | ||
| 538 | .mode = 0666, | ||
| 539 | .proc_handler = &proc_dointvec | ||
| 540 | } | ||
| 541 | , | ||
| 542 | { | ||
| 543 | .procname = "debug_info", | ||
| 544 | .data = &unalign_access_debug, | ||
| 545 | .maxlen = sizeof(unalign_access_debug), | ||
| 546 | .mode = 0644, | ||
| 547 | .proc_handler = &proc_dointvec | ||
| 548 | } | ||
| 549 | , | ||
| 550 | {} | ||
| 551 | }; | ||
| 552 | |||
| 553 | static struct ctl_table nds32_sysctl_table[2] = { | ||
| 554 | { | ||
| 555 | .procname = "unaligned_acess", | ||
| 556 | .mode = 0555, | ||
| 557 | .child = alignment_tbl}, | ||
| 558 | {} | ||
| 559 | }; | ||
| 560 | |||
| 561 | static struct ctl_path nds32_path[2] = { | ||
| 562 | {.procname = "nds32"}, | ||
| 563 | {} | ||
| 564 | }; | ||
| 565 | |||
| 566 | /* | ||
| 567 | * Initialize nds32 alignment-correction interface | ||
| 568 | */ | ||
| 569 | static int __init nds32_sysctl_init(void) | ||
| 570 | { | ||
| 571 | register_sysctl_paths(nds32_path, nds32_sysctl_table); | ||
| 572 | return 0; | ||
| 573 | } | ||
| 574 | |||
| 575 | __initcall(nds32_sysctl_init); | ||
| 576 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c new file mode 100644 index 000000000000..6eb786a399a2 --- /dev/null +++ b/arch/nds32/mm/cacheflush.c | |||
| @@ -0,0 +1,322 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/mm.h> | ||
| 5 | #include <linux/sched.h> | ||
| 6 | #include <linux/fs.h> | ||
| 7 | #include <linux/pagemap.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <asm/cacheflush.h> | ||
| 10 | #include <asm/proc-fns.h> | ||
| 11 | #include <asm/shmparam.h> | ||
| 12 | #include <asm/cache_info.h> | ||
| 13 | |||
| 14 | extern struct cache_info L1_cache_info[2]; | ||
| 15 | |||
| 16 | #ifndef CONFIG_CPU_CACHE_ALIASING | ||
| 17 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | ||
| 18 | pte_t * pte) | ||
| 19 | { | ||
| 20 | struct page *page; | ||
| 21 | unsigned long pfn = pte_pfn(*pte); | ||
| 22 | unsigned long flags; | ||
| 23 | |||
| 24 | if (!pfn_valid(pfn)) | ||
| 25 | return; | ||
| 26 | |||
| 27 | if (vma->vm_mm == current->active_mm) { | ||
| 28 | local_irq_save(flags); | ||
| 29 | __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN); | ||
| 30 | __nds32__tlbop_rwr(*pte); | ||
| 31 | __nds32__isb(); | ||
| 32 | local_irq_restore(flags); | ||
| 33 | } | ||
| 34 | page = pfn_to_page(pfn); | ||
| 35 | |||
| 36 | if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) || | ||
| 37 | (vma->vm_flags & VM_EXEC)) { | ||
| 38 | |||
| 39 | if (!PageHighMem(page)) { | ||
| 40 | cpu_cache_wbinval_page((unsigned long) | ||
| 41 | page_address(page), | ||
| 42 | vma->vm_flags & VM_EXEC); | ||
| 43 | } else { | ||
| 44 | unsigned long kaddr = (unsigned long)kmap_atomic(page); | ||
| 45 | cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); | ||
| 46 | kunmap_atomic((void *)kaddr); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | } | ||
| 50 | #else | ||
| 51 | extern pte_t va_present(struct mm_struct *mm, unsigned long addr); | ||
| 52 | |||
| 53 | static inline unsigned long aliasing(unsigned long addr, unsigned long page) | ||
| 54 | { | ||
| 55 | return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa) | ||
| 59 | { | ||
| 60 | unsigned long kaddr, pte; | ||
| 61 | |||
| 62 | #define BASE_ADDR0 0xffffc000 | ||
| 63 | kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask); | ||
| 64 | pte = (pa | PAGE_KERNEL); | ||
| 65 | __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN); | ||
| 66 | __nds32__tlbop_rwlk(pte); | ||
| 67 | __nds32__isb(); | ||
| 68 | return kaddr; | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline void kunmap01(unsigned long kaddr) | ||
| 72 | { | ||
| 73 | __nds32__tlbop_unlk(kaddr); | ||
| 74 | __nds32__tlbop_inv(kaddr); | ||
| 75 | __nds32__isb(); | ||
| 76 | } | ||
| 77 | |||
| 78 | static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa) | ||
| 79 | { | ||
| 80 | unsigned long kaddr, pte; | ||
| 81 | |||
| 82 | #define BASE_ADDR1 0xffff8000 | ||
| 83 | kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask); | ||
| 84 | pte = (pa | PAGE_KERNEL); | ||
| 85 | __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN); | ||
| 86 | __nds32__tlbop_rwlk(pte); | ||
| 87 | __nds32__isb(); | ||
| 88 | return kaddr; | ||
| 89 | } | ||
| 90 | |||
| 91 | void flush_cache_mm(struct mm_struct *mm) | ||
| 92 | { | ||
| 93 | unsigned long flags; | ||
| 94 | |||
| 95 | local_irq_save(flags); | ||
| 96 | cpu_dcache_wbinval_all(); | ||
| 97 | cpu_icache_inval_all(); | ||
| 98 | local_irq_restore(flags); | ||
| 99 | } | ||
| 100 | |||
| 101 | void flush_cache_dup_mm(struct mm_struct *mm) | ||
| 102 | { | ||
| 103 | } | ||
| 104 | |||
| 105 | void flush_cache_range(struct vm_area_struct *vma, | ||
| 106 | unsigned long start, unsigned long end) | ||
| 107 | { | ||
| 108 | unsigned long flags; | ||
| 109 | |||
| 110 | if ((end - start) > 8 * PAGE_SIZE) { | ||
| 111 | cpu_dcache_wbinval_all(); | ||
| 112 | if (vma->vm_flags & VM_EXEC) | ||
| 113 | cpu_icache_inval_all(); | ||
| 114 | return; | ||
| 115 | } | ||
| 116 | local_irq_save(flags); | ||
| 117 | while (start < end) { | ||
| 118 | if (va_present(vma->vm_mm, start)) | ||
| 119 | cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC); | ||
| 120 | start += PAGE_SIZE; | ||
| 121 | } | ||
| 122 | local_irq_restore(flags); | ||
| 123 | return; | ||
| 124 | } | ||
| 125 | |||
| 126 | void flush_cache_page(struct vm_area_struct *vma, | ||
| 127 | unsigned long addr, unsigned long pfn) | ||
| 128 | { | ||
| 129 | unsigned long vto, flags; | ||
| 130 | |||
| 131 | local_irq_save(flags); | ||
| 132 | vto = kremap0(addr, pfn << PAGE_SHIFT); | ||
| 133 | cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC); | ||
| 134 | kunmap01(vto); | ||
| 135 | local_irq_restore(flags); | ||
| 136 | } | ||
| 137 | |||
| 138 | void flush_cache_vmap(unsigned long start, unsigned long end) | ||
| 139 | { | ||
| 140 | cpu_dcache_wbinval_all(); | ||
| 141 | cpu_icache_inval_all(); | ||
| 142 | } | ||
| 143 | |||
| 144 | void flush_cache_vunmap(unsigned long start, unsigned long end) | ||
| 145 | { | ||
| 146 | cpu_dcache_wbinval_all(); | ||
| 147 | cpu_icache_inval_all(); | ||
| 148 | } | ||
| 149 | |||
| 150 | void copy_user_highpage(struct page *to, struct page *from, | ||
| 151 | unsigned long vaddr, struct vm_area_struct *vma) | ||
| 152 | { | ||
| 153 | unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto; | ||
| 154 | kto = ((unsigned long)page_address(to) & PAGE_MASK); | ||
| 155 | kfrom = ((unsigned long)page_address(from) & PAGE_MASK); | ||
| 156 | pto = page_to_phys(to); | ||
| 157 | pfrom = page_to_phys(from); | ||
| 158 | |||
| 159 | if (aliasing(vaddr, (unsigned long)kfrom)) | ||
| 160 | cpu_dcache_wb_page((unsigned long)kfrom); | ||
| 161 | if (aliasing(vaddr, (unsigned long)kto)) | ||
| 162 | cpu_dcache_inval_page((unsigned long)kto); | ||
| 163 | local_irq_save(flags); | ||
| 164 | vto = kremap0(vaddr, pto); | ||
| 165 | vfrom = kremap1(vaddr, pfrom); | ||
| 166 | copy_page((void *)vto, (void *)vfrom); | ||
| 167 | kunmap01(vfrom); | ||
| 168 | kunmap01(vto); | ||
| 169 | local_irq_restore(flags); | ||
| 170 | } | ||
| 171 | |||
| 172 | EXPORT_SYMBOL(copy_user_highpage); | ||
| 173 | |||
| 174 | void clear_user_highpage(struct page *page, unsigned long vaddr) | ||
| 175 | { | ||
| 176 | unsigned long vto, flags, kto; | ||
| 177 | |||
| 178 | kto = ((unsigned long)page_address(page) & PAGE_MASK); | ||
| 179 | |||
| 180 | local_irq_save(flags); | ||
| 181 | if (aliasing(kto, vaddr) && kto != 0) { | ||
| 182 | cpu_dcache_inval_page(kto); | ||
| 183 | cpu_icache_inval_page(kto); | ||
| 184 | } | ||
| 185 | vto = kremap0(vaddr, page_to_phys(page)); | ||
| 186 | clear_page((void *)vto); | ||
| 187 | kunmap01(vto); | ||
| 188 | local_irq_restore(flags); | ||
| 189 | } | ||
| 190 | |||
| 191 | EXPORT_SYMBOL(clear_user_highpage); | ||
| 192 | |||
| 193 | void flush_dcache_page(struct page *page) | ||
| 194 | { | ||
| 195 | struct address_space *mapping; | ||
| 196 | |||
| 197 | mapping = page_mapping(page); | ||
| 198 | if (mapping && !mapping_mapped(mapping)) | ||
| 199 | set_bit(PG_dcache_dirty, &page->flags); | ||
| 200 | else { | ||
| 201 | int i, pc; | ||
| 202 | unsigned long vto, kaddr, flags; | ||
| 203 | kaddr = (unsigned long)page_address(page); | ||
| 204 | cpu_dcache_wbinval_page(kaddr); | ||
| 205 | pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE; | ||
| 206 | local_irq_save(flags); | ||
| 207 | for (i = 0; i < pc; i++) { | ||
| 208 | vto = | ||
| 209 | kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page)); | ||
| 210 | cpu_dcache_wbinval_page(vto); | ||
| 211 | kunmap01(vto); | ||
| 212 | } | ||
| 213 | local_irq_restore(flags); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
| 218 | unsigned long vaddr, void *dst, void *src, int len) | ||
| 219 | { | ||
| 220 | unsigned long line_size, start, end, vto, flags; | ||
| 221 | |||
| 222 | local_irq_save(flags); | ||
| 223 | vto = kremap0(vaddr, page_to_phys(page)); | ||
| 224 | dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1))); | ||
| 225 | memcpy(dst, src, len); | ||
| 226 | if (vma->vm_flags & VM_EXEC) { | ||
| 227 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 228 | start = (unsigned long)dst & ~(line_size - 1); | ||
| 229 | end = | ||
| 230 | ((unsigned long)dst + len + line_size - 1) & ~(line_size - | ||
| 231 | 1); | ||
| 232 | cpu_cache_wbinval_range(start, end, 1); | ||
| 233 | } | ||
| 234 | kunmap01(vto); | ||
| 235 | local_irq_restore(flags); | ||
| 236 | } | ||
| 237 | |||
| 238 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
| 239 | unsigned long vaddr, void *dst, void *src, int len) | ||
| 240 | { | ||
| 241 | unsigned long vto, flags; | ||
| 242 | |||
| 243 | local_irq_save(flags); | ||
| 244 | vto = kremap0(vaddr, page_to_phys(page)); | ||
| 245 | src = (void *)(vto | (vaddr & (PAGE_SIZE - 1))); | ||
| 246 | memcpy(dst, src, len); | ||
| 247 | kunmap01(vto); | ||
| 248 | local_irq_restore(flags); | ||
| 249 | } | ||
| 250 | |||
| 251 | void flush_anon_page(struct vm_area_struct *vma, | ||
| 252 | struct page *page, unsigned long vaddr) | ||
| 253 | { | ||
| 254 | unsigned long flags; | ||
| 255 | if (!PageAnon(page)) | ||
| 256 | return; | ||
| 257 | |||
| 258 | if (vma->vm_mm != current->active_mm) | ||
| 259 | return; | ||
| 260 | |||
| 261 | local_irq_save(flags); | ||
| 262 | if (vma->vm_flags & VM_EXEC) | ||
| 263 | cpu_icache_inval_page(vaddr & PAGE_MASK); | ||
| 264 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | ||
| 265 | local_irq_restore(flags); | ||
| 266 | } | ||
| 267 | |||
| 268 | void flush_kernel_dcache_page(struct page *page) | ||
| 269 | { | ||
| 270 | unsigned long flags; | ||
| 271 | local_irq_save(flags); | ||
| 272 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | ||
| 273 | local_irq_restore(flags); | ||
| 274 | } | ||
| 275 | |||
| 276 | void flush_icache_range(unsigned long start, unsigned long end) | ||
| 277 | { | ||
| 278 | unsigned long line_size, flags; | ||
| 279 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 280 | start = start & ~(line_size - 1); | ||
| 281 | end = (end + line_size - 1) & ~(line_size - 1); | ||
| 282 | local_irq_save(flags); | ||
| 283 | cpu_cache_wbinval_range(start, end, 1); | ||
| 284 | local_irq_restore(flags); | ||
| 285 | } | ||
| 286 | |||
| 287 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
| 288 | { | ||
| 289 | unsigned long flags; | ||
| 290 | local_irq_save(flags); | ||
| 291 | cpu_cache_wbinval_page((unsigned long)page_address(page), | ||
| 292 | vma->vm_flags & VM_EXEC); | ||
| 293 | local_irq_restore(flags); | ||
| 294 | } | ||
| 295 | |||
| 296 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | ||
| 297 | pte_t * pte) | ||
| 298 | { | ||
| 299 | struct page *page; | ||
| 300 | unsigned long flags; | ||
| 301 | unsigned long pfn = pte_pfn(*pte); | ||
| 302 | |||
| 303 | if (!pfn_valid(pfn)) | ||
| 304 | return; | ||
| 305 | |||
| 306 | if (vma->vm_mm == current->active_mm) { | ||
| 307 | local_irq_save(flags); | ||
| 308 | __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN); | ||
| 309 | __nds32__tlbop_rwr(*pte); | ||
| 310 | __nds32__isb(); | ||
| 311 | local_irq_restore(flags); | ||
| 312 | } | ||
| 313 | |||
| 314 | page = pfn_to_page(pfn); | ||
| 315 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags) || | ||
| 316 | (vma->vm_flags & VM_EXEC)) { | ||
| 317 | local_irq_save(flags); | ||
| 318 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | ||
| 319 | local_irq_restore(flags); | ||
| 320 | } | ||
| 321 | } | ||
| 322 | #endif | ||
diff --git a/arch/nds32/mm/extable.c b/arch/nds32/mm/extable.c new file mode 100644 index 000000000000..db7f0a7c8966 --- /dev/null +++ b/arch/nds32/mm/extable.c | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/extable.h> | ||
| 5 | #include <linux/uaccess.h> | ||
| 6 | |||
| 7 | int fixup_exception(struct pt_regs *regs) | ||
| 8 | { | ||
| 9 | const struct exception_table_entry *fixup; | ||
| 10 | |||
| 11 | fixup = search_exception_tables(instruction_pointer(regs)); | ||
| 12 | if (fixup) | ||
| 13 | regs->ipc = fixup->fixup; | ||
| 14 | |||
| 15 | return fixup != NULL; | ||
| 16 | } | ||
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c new file mode 100644 index 000000000000..3a246fb8098c --- /dev/null +++ b/arch/nds32/mm/fault.c | |||
| @@ -0,0 +1,410 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/extable.h> | ||
| 5 | #include <linux/module.h> | ||
| 6 | #include <linux/signal.h> | ||
| 7 | #include <linux/ptrace.h> | ||
| 8 | #include <linux/mm.h> | ||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/hardirq.h> | ||
| 11 | #include <linux/uaccess.h> | ||
| 12 | |||
| 13 | #include <asm/pgtable.h> | ||
| 14 | #include <asm/tlbflush.h> | ||
| 15 | |||
| 16 | extern void die(const char *str, struct pt_regs *regs, long err); | ||
| 17 | |||
| 18 | /* | ||
| 19 | * This is useful to dump out the page tables associated with | ||
| 20 | * 'addr' in mm 'mm'. | ||
| 21 | */ | ||
| 22 | void show_pte(struct mm_struct *mm, unsigned long addr) | ||
| 23 | { | ||
| 24 | pgd_t *pgd; | ||
| 25 | if (!mm) | ||
| 26 | mm = &init_mm; | ||
| 27 | |||
| 28 | pr_alert("pgd = %p\n", mm->pgd); | ||
| 29 | pgd = pgd_offset(mm, addr); | ||
| 30 | pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); | ||
| 31 | |||
| 32 | do { | ||
| 33 | pmd_t *pmd; | ||
| 34 | |||
| 35 | if (pgd_none(*pgd)) | ||
| 36 | break; | ||
| 37 | |||
| 38 | if (pgd_bad(*pgd)) { | ||
| 39 | pr_alert("(bad)"); | ||
| 40 | break; | ||
| 41 | } | ||
| 42 | |||
| 43 | pmd = pmd_offset(pgd, addr); | ||
| 44 | #if PTRS_PER_PMD != 1 | ||
| 45 | pr_alert(", *pmd=%08lx", pmd_val(*pmd)); | ||
| 46 | #endif | ||
| 47 | |||
| 48 | if (pmd_none(*pmd)) | ||
| 49 | break; | ||
| 50 | |||
| 51 | if (pmd_bad(*pmd)) { | ||
| 52 | pr_alert("(bad)"); | ||
| 53 | break; | ||
| 54 | } | ||
| 55 | |||
| 56 | if (IS_ENABLED(CONFIG_HIGHMEM)) | ||
| 57 | { | ||
| 58 | pte_t *pte; | ||
| 59 | /* We must not map this if we have highmem enabled */ | ||
| 60 | pte = pte_offset_map(pmd, addr); | ||
| 61 | pr_alert(", *pte=%08lx", pte_val(*pte)); | ||
| 62 | pte_unmap(pte); | ||
| 63 | } | ||
| 64 | } while (0); | ||
| 65 | |||
| 66 | pr_alert("\n"); | ||
| 67 | } | ||
| 68 | |||
| 69 | void do_page_fault(unsigned long entry, unsigned long addr, | ||
| 70 | unsigned int error_code, struct pt_regs *regs) | ||
| 71 | { | ||
| 72 | struct task_struct *tsk; | ||
| 73 | struct mm_struct *mm; | ||
| 74 | struct vm_area_struct *vma; | ||
| 75 | siginfo_t info; | ||
| 76 | int fault; | ||
| 77 | unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; | ||
| 78 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | ||
| 79 | |||
| 80 | error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE); | ||
| 81 | tsk = current; | ||
| 82 | mm = tsk->mm; | ||
| 83 | info.si_code = SEGV_MAPERR; | ||
| 84 | /* | ||
| 85 | * We fault-in kernel-space virtual memory on-demand. The | ||
| 86 | * 'reference' page table is init_mm.pgd. | ||
| 87 | * | ||
| 88 | * NOTE! We MUST NOT take any locks for this case. We may | ||
| 89 | * be in an interrupt or a critical region, and should | ||
| 90 | * only copy the information from the master page table, | ||
| 91 | * nothing more. | ||
| 92 | */ | ||
| 93 | if (addr >= TASK_SIZE) { | ||
| 94 | if (user_mode(regs)) | ||
| 95 | goto bad_area_nosemaphore; | ||
| 96 | |||
| 97 | if (addr >= TASK_SIZE && addr < VMALLOC_END | ||
| 98 | && (entry == ENTRY_PTE_NOT_PRESENT)) | ||
| 99 | goto vmalloc_fault; | ||
| 100 | else | ||
| 101 | goto no_context; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* Send a signal to the task for handling the unalignment access. */ | ||
| 105 | if (entry == ENTRY_GENERAL_EXCPETION | ||
| 106 | && error_code == ETYPE_ALIGNMENT_CHECK) { | ||
| 107 | if (user_mode(regs)) | ||
| 108 | goto bad_area_nosemaphore; | ||
| 109 | else | ||
| 110 | goto no_context; | ||
| 111 | } | ||
| 112 | |||
| 113 | /* | ||
| 114 | * If we're in an interrupt or have no user | ||
| 115 | * context, we must not take the fault.. | ||
| 116 | */ | ||
| 117 | if (unlikely(faulthandler_disabled() || !mm)) | ||
| 118 | goto no_context; | ||
| 119 | |||
| 120 | /* | ||
| 121 | * As per x86, we may deadlock here. However, since the kernel only | ||
| 122 | * validly references user space from well defined areas of the code, | ||
| 123 | * we can bug out early if this is from code which shouldn't. | ||
| 124 | */ | ||
| 125 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { | ||
| 126 | if (!user_mode(regs) && | ||
| 127 | !search_exception_tables(instruction_pointer(regs))) | ||
| 128 | goto no_context; | ||
| 129 | retry: | ||
| 130 | down_read(&mm->mmap_sem); | ||
| 131 | } else { | ||
| 132 | /* | ||
| 133 | * The above down_read_trylock() might have succeeded in which | ||
| 134 | * case, we'll have missed the might_sleep() from down_read(). | ||
| 135 | */ | ||
| 136 | might_sleep(); | ||
| 137 | if (IS_ENABLED(CONFIG_DEBUG_VM)) { | ||
| 138 | if (!user_mode(regs) && | ||
| 139 | !search_exception_tables(instruction_pointer(regs))) | ||
| 140 | goto no_context; | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | vma = find_vma(mm, addr); | ||
| 145 | |||
| 146 | if (unlikely(!vma)) | ||
| 147 | goto bad_area; | ||
| 148 | |||
| 149 | if (vma->vm_start <= addr) | ||
| 150 | goto good_area; | ||
| 151 | |||
| 152 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) | ||
| 153 | goto bad_area; | ||
| 154 | |||
| 155 | if (unlikely(expand_stack(vma, addr))) | ||
| 156 | goto bad_area; | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Ok, we have a good vm_area for this memory access, so | ||
| 160 | * we can handle it.. | ||
| 161 | */ | ||
| 162 | |||
| 163 | good_area: | ||
| 164 | info.si_code = SEGV_ACCERR; | ||
| 165 | |||
| 166 | /* first do some preliminary protection checks */ | ||
| 167 | if (entry == ENTRY_PTE_NOT_PRESENT) { | ||
| 168 | if (error_code & ITYPE_mskINST) | ||
| 169 | mask = VM_EXEC; | ||
| 170 | else { | ||
| 171 | mask = VM_READ | VM_WRITE; | ||
| 172 | if (vma->vm_flags & VM_WRITE) | ||
| 173 | flags |= FAULT_FLAG_WRITE; | ||
| 174 | } | ||
| 175 | } else if (entry == ENTRY_TLB_MISC) { | ||
| 176 | switch (error_code & ITYPE_mskETYPE) { | ||
| 177 | case RD_PROT: | ||
| 178 | mask = VM_READ; | ||
| 179 | break; | ||
| 180 | case WRT_PROT: | ||
| 181 | mask = VM_WRITE; | ||
| 182 | flags |= FAULT_FLAG_WRITE; | ||
| 183 | break; | ||
| 184 | case NOEXEC: | ||
| 185 | mask = VM_EXEC; | ||
| 186 | break; | ||
| 187 | case PAGE_MODIFY: | ||
| 188 | mask = VM_WRITE; | ||
| 189 | flags |= FAULT_FLAG_WRITE; | ||
| 190 | break; | ||
| 191 | case ACC_BIT: | ||
| 192 | BUG(); | ||
| 193 | default: | ||
| 194 | break; | ||
| 195 | } | ||
| 196 | |||
| 197 | } | ||
| 198 | if (!(vma->vm_flags & mask)) | ||
| 199 | goto bad_area; | ||
| 200 | |||
| 201 | /* | ||
| 202 | * If for any reason at all we couldn't handle the fault, | ||
| 203 | * make sure we exit gracefully rather than endlessly redo | ||
| 204 | * the fault. | ||
| 205 | */ | ||
| 206 | |||
| 207 | fault = handle_mm_fault(vma, addr, flags); | ||
| 208 | |||
| 209 | /* | ||
| 210 | * If we need to retry but a fatal signal is pending, handle the | ||
| 211 | * signal first. We do not need to release the mmap_sem because it | ||
| 212 | * would already be released in __lock_page_or_retry in mm/filemap.c. | ||
| 213 | */ | ||
| 214 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { | ||
| 215 | if (!user_mode(regs)) | ||
| 216 | goto no_context; | ||
| 217 | return; | ||
| 218 | } | ||
| 219 | |||
| 220 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
| 221 | if (fault & VM_FAULT_OOM) | ||
| 222 | goto out_of_memory; | ||
| 223 | else if (fault & VM_FAULT_SIGBUS) | ||
| 224 | goto do_sigbus; | ||
| 225 | else | ||
| 226 | goto bad_area; | ||
| 227 | } | ||
| 228 | |||
| 229 | /* | ||
| 230 | * Major/minor page fault accounting is only done on the initial | ||
| 231 | * attempt. If we go through a retry, it is extremely likely that the | ||
| 232 | * page will be found in page cache at that point. | ||
| 233 | */ | ||
| 234 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | ||
| 235 | if (fault & VM_FAULT_MAJOR) | ||
| 236 | tsk->maj_flt++; | ||
| 237 | else | ||
| 238 | tsk->min_flt++; | ||
| 239 | if (fault & VM_FAULT_RETRY) { | ||
| 240 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
| 241 | flags |= FAULT_FLAG_TRIED; | ||
| 242 | |||
| 243 | /* No need to up_read(&mm->mmap_sem) as we would | ||
| 244 | * have already released it in __lock_page_or_retry | ||
| 245 | * in mm/filemap.c. | ||
| 246 | */ | ||
| 247 | goto retry; | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 251 | up_read(&mm->mmap_sem); | ||
| 252 | return; | ||
| 253 | |||
| 254 | /* | ||
| 255 | * Something tried to access memory that isn't in our memory map.. | ||
| 256 | * Fix it, but check if it's kernel or user first.. | ||
| 257 | */ | ||
| 258 | bad_area: | ||
| 259 | up_read(&mm->mmap_sem); | ||
| 260 | |||
| 261 | bad_area_nosemaphore: | ||
| 262 | |||
| 263 | /* User mode accesses just cause a SIGSEGV */ | ||
| 264 | |||
| 265 | if (user_mode(regs)) { | ||
| 266 | tsk->thread.address = addr; | ||
| 267 | tsk->thread.error_code = error_code; | ||
| 268 | tsk->thread.trap_no = entry; | ||
| 269 | info.si_signo = SIGSEGV; | ||
| 270 | info.si_errno = 0; | ||
| 271 | /* info.si_code has been set above */ | ||
| 272 | info.si_addr = (void *)addr; | ||
| 273 | force_sig_info(SIGSEGV, &info, tsk); | ||
| 274 | return; | ||
| 275 | } | ||
| 276 | |||
| 277 | no_context: | ||
| 278 | |||
| 279 | /* Are we prepared to handle this kernel fault? | ||
| 280 | * | ||
| 281 | * (The kernel has valid exception-points in the source | ||
| 282 | * when it acesses user-memory. When it fails in one | ||
| 283 | * of those points, we find it in a table and do a jump | ||
| 284 | * to some fixup code that loads an appropriate error | ||
| 285 | * code) | ||
| 286 | */ | ||
| 287 | |||
| 288 | { | ||
| 289 | const struct exception_table_entry *entry; | ||
| 290 | |||
| 291 | if ((entry = | ||
| 292 | search_exception_tables(instruction_pointer(regs))) != | ||
| 293 | NULL) { | ||
| 294 | /* Adjust the instruction pointer in the stackframe */ | ||
| 295 | instruction_pointer(regs) = entry->fixup; | ||
| 296 | return; | ||
| 297 | } | ||
| 298 | } | ||
| 299 | |||
| 300 | /* | ||
| 301 | * Oops. The kernel tried to access some bad page. We'll have to | ||
| 302 | * terminate things with extreme prejudice. | ||
| 303 | */ | ||
| 304 | |||
| 305 | bust_spinlocks(1); | ||
| 306 | pr_alert("Unable to handle kernel %s at virtual address %08lx\n", | ||
| 307 | (addr < PAGE_SIZE) ? "NULL pointer dereference" : | ||
| 308 | "paging request", addr); | ||
| 309 | |||
| 310 | show_pte(mm, addr); | ||
| 311 | die("Oops", regs, error_code); | ||
| 312 | bust_spinlocks(0); | ||
| 313 | do_exit(SIGKILL); | ||
| 314 | |||
| 315 | return; | ||
| 316 | |||
| 317 | /* | ||
| 318 | * We ran out of memory, or some other thing happened to us that made | ||
| 319 | * us unable to handle the page fault gracefully. | ||
| 320 | */ | ||
| 321 | |||
| 322 | out_of_memory: | ||
| 323 | up_read(&mm->mmap_sem); | ||
| 324 | if (!user_mode(regs)) | ||
| 325 | goto no_context; | ||
| 326 | pagefault_out_of_memory(); | ||
| 327 | return; | ||
| 328 | |||
| 329 | do_sigbus: | ||
| 330 | up_read(&mm->mmap_sem); | ||
| 331 | |||
| 332 | /* Kernel mode? Handle exceptions or die */ | ||
| 333 | if (!user_mode(regs)) | ||
| 334 | goto no_context; | ||
| 335 | |||
| 336 | /* | ||
| 337 | * Send a sigbus | ||
| 338 | */ | ||
| 339 | tsk->thread.address = addr; | ||
| 340 | tsk->thread.error_code = error_code; | ||
| 341 | tsk->thread.trap_no = entry; | ||
| 342 | info.si_signo = SIGBUS; | ||
| 343 | info.si_errno = 0; | ||
| 344 | info.si_code = BUS_ADRERR; | ||
| 345 | info.si_addr = (void *)addr; | ||
| 346 | force_sig_info(SIGBUS, &info, tsk); | ||
| 347 | |||
| 348 | return; | ||
| 349 | |||
| 350 | vmalloc_fault: | ||
| 351 | { | ||
| 352 | /* | ||
| 353 | * Synchronize this task's top level page-table | ||
| 354 | * with the 'reference' page table. | ||
| 355 | * | ||
| 356 | * Use current_pgd instead of tsk->active_mm->pgd | ||
| 357 | * since the latter might be unavailable if this | ||
| 358 | * code is executed in a misfortunately run irq | ||
| 359 | * (like inside schedule() between switch_mm and | ||
| 360 | * switch_to...). | ||
| 361 | */ | ||
| 362 | |||
| 363 | unsigned int index = pgd_index(addr); | ||
| 364 | pgd_t *pgd, *pgd_k; | ||
| 365 | pud_t *pud, *pud_k; | ||
| 366 | pmd_t *pmd, *pmd_k; | ||
| 367 | pte_t *pte_k; | ||
| 368 | |||
| 369 | pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index; | ||
| 370 | pgd_k = init_mm.pgd + index; | ||
| 371 | |||
| 372 | if (!pgd_present(*pgd_k)) | ||
| 373 | goto no_context; | ||
| 374 | |||
| 375 | pud = pud_offset(pgd, addr); | ||
| 376 | pud_k = pud_offset(pgd_k, addr); | ||
| 377 | if (!pud_present(*pud_k)) | ||
| 378 | goto no_context; | ||
| 379 | |||
| 380 | pmd = pmd_offset(pud, addr); | ||
| 381 | pmd_k = pmd_offset(pud_k, addr); | ||
| 382 | if (!pmd_present(*pmd_k)) | ||
| 383 | goto no_context; | ||
| 384 | |||
| 385 | if (!pmd_present(*pmd)) | ||
| 386 | set_pmd(pmd, *pmd_k); | ||
| 387 | else | ||
| 388 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
| 389 | |||
| 390 | /* | ||
| 391 | * Since the vmalloc area is global, we don't | ||
| 392 | * need to copy individual PTE's, it is enough to | ||
| 393 | * copy the pgd pointer into the pte page of the | ||
| 394 | * root task. If that is there, we'll find our pte if | ||
| 395 | * it exists. | ||
| 396 | */ | ||
| 397 | |||
| 398 | /* Make sure the actual PTE exists as well to | ||
| 399 | * catch kernel vmalloc-area accesses to non-mapped | ||
| 400 | * addres. If we don't do this, this will just | ||
| 401 | * silently loop forever. | ||
| 402 | */ | ||
| 403 | |||
| 404 | pte_k = pte_offset_kernel(pmd_k, addr); | ||
| 405 | if (!pte_present(*pte_k)) | ||
| 406 | goto no_context; | ||
| 407 | |||
| 408 | return; | ||
| 409 | } | ||
| 410 | } | ||
diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c new file mode 100644 index 000000000000..e17cb8a69315 --- /dev/null +++ b/arch/nds32/mm/highmem.c | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/export.h> | ||
| 5 | #include <linux/highmem.h> | ||
| 6 | #include <linux/sched.h> | ||
| 7 | #include <linux/smp.h> | ||
| 8 | #include <linux/interrupt.h> | ||
| 9 | #include <linux/bootmem.h> | ||
| 10 | #include <asm/fixmap.h> | ||
| 11 | #include <asm/tlbflush.h> | ||
| 12 | |||
| 13 | void *kmap(struct page *page) | ||
| 14 | { | ||
| 15 | unsigned long vaddr; | ||
| 16 | might_sleep(); | ||
| 17 | if (!PageHighMem(page)) | ||
| 18 | return page_address(page); | ||
| 19 | vaddr = (unsigned long)kmap_high(page); | ||
| 20 | return (void *)vaddr; | ||
| 21 | } | ||
| 22 | |||
| 23 | EXPORT_SYMBOL(kmap); | ||
| 24 | |||
| 25 | void kunmap(struct page *page) | ||
| 26 | { | ||
| 27 | BUG_ON(in_interrupt()); | ||
| 28 | if (!PageHighMem(page)) | ||
| 29 | return; | ||
| 30 | kunmap_high(page); | ||
| 31 | } | ||
| 32 | |||
| 33 | EXPORT_SYMBOL(kunmap); | ||
| 34 | |||
| 35 | void *kmap_atomic(struct page *page) | ||
| 36 | { | ||
| 37 | unsigned int idx; | ||
| 38 | unsigned long vaddr, pte; | ||
| 39 | int type; | ||
| 40 | pte_t *ptep; | ||
| 41 | |||
| 42 | preempt_disable(); | ||
| 43 | pagefault_disable(); | ||
| 44 | if (!PageHighMem(page)) | ||
| 45 | return page_address(page); | ||
| 46 | |||
| 47 | type = kmap_atomic_idx_push(); | ||
| 48 | |||
| 49 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
| 50 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 51 | pte = (page_to_pfn(page) << PAGE_SHIFT) | (PAGE_KERNEL); | ||
| 52 | ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); | ||
| 53 | set_pte(ptep, pte); | ||
| 54 | |||
| 55 | __nds32__tlbop_inv(vaddr); | ||
| 56 | __nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN); | ||
| 57 | __nds32__tlbop_rwr(pte); | ||
| 58 | __nds32__isb(); | ||
| 59 | return (void *)vaddr; | ||
| 60 | } | ||
| 61 | |||
| 62 | EXPORT_SYMBOL(kmap_atomic); | ||
| 63 | |||
| 64 | void __kunmap_atomic(void *kvaddr) | ||
| 65 | { | ||
| 66 | if (kvaddr >= (void *)FIXADDR_START) { | ||
| 67 | unsigned long vaddr = (unsigned long)kvaddr; | ||
| 68 | pte_t *ptep; | ||
| 69 | kmap_atomic_idx_pop(); | ||
| 70 | __nds32__tlbop_inv(vaddr); | ||
| 71 | __nds32__isb(); | ||
| 72 | ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); | ||
| 73 | set_pte(ptep, 0); | ||
| 74 | } | ||
| 75 | pagefault_enable(); | ||
| 76 | preempt_enable(); | ||
| 77 | } | ||
| 78 | |||
| 79 | EXPORT_SYMBOL(__kunmap_atomic); | ||
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c new file mode 100644 index 000000000000..93ee0160720b --- /dev/null +++ b/arch/nds32/mm/init.c | |||
| @@ -0,0 +1,277 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 1995-2005 Russell King | ||
| 3 | // Copyright (C) 2012 ARM Ltd. | ||
| 4 | // Copyright (C) 2013-2017 Andes Technology Corporation | ||
| 5 | |||
| 6 | #include <linux/kernel.h> | ||
| 7 | #include <linux/errno.h> | ||
| 8 | #include <linux/swap.h> | ||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/bootmem.h> | ||
| 11 | #include <linux/mman.h> | ||
| 12 | #include <linux/nodemask.h> | ||
| 13 | #include <linux/initrd.h> | ||
| 14 | #include <linux/highmem.h> | ||
| 15 | #include <linux/memblock.h> | ||
| 16 | |||
| 17 | #include <asm/sections.h> | ||
| 18 | #include <asm/setup.h> | ||
| 19 | #include <asm/tlb.h> | ||
| 20 | #include <asm/page.h> | ||
| 21 | |||
| 22 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
| 23 | DEFINE_SPINLOCK(anon_alias_lock); | ||
| 24 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
| 25 | extern unsigned long phys_initrd_start; | ||
| 26 | extern unsigned long phys_initrd_size; | ||
| 27 | |||
| 28 | /* | ||
| 29 | * empty_zero_page is a special page that is used for | ||
| 30 | * zero-initialized data and COW. | ||
| 31 | */ | ||
| 32 | struct page *empty_zero_page; | ||
| 33 | |||
| 34 | static void __init zone_sizes_init(void) | ||
| 35 | { | ||
| 36 | unsigned long zones_size[MAX_NR_ZONES]; | ||
| 37 | |||
| 38 | /* Clear the zone sizes */ | ||
| 39 | memset(zones_size, 0, sizeof(zones_size)); | ||
| 40 | |||
| 41 | zones_size[ZONE_NORMAL] = max_low_pfn; | ||
| 42 | #ifdef CONFIG_HIGHMEM | ||
| 43 | zones_size[ZONE_HIGHMEM] = max_pfn; | ||
| 44 | #endif | ||
| 45 | free_area_init(zones_size); | ||
| 46 | |||
| 47 | } | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Map all physical memory under high_memory into kernel's address space. | ||
| 51 | * | ||
| 52 | * This is explicitly coded for two-level page tables, so if you need | ||
| 53 | * something else then this needs to change. | ||
| 54 | */ | ||
| 55 | static void __init map_ram(void) | ||
| 56 | { | ||
| 57 | unsigned long v, p, e; | ||
| 58 | pgd_t *pge; | ||
| 59 | pud_t *pue; | ||
| 60 | pmd_t *pme; | ||
| 61 | pte_t *pte; | ||
| 62 | /* These mark extents of read-only kernel pages... | ||
| 63 | * ...from vmlinux.lds.S | ||
| 64 | */ | ||
| 65 | |||
| 66 | p = (u32) memblock_start_of_DRAM() & PAGE_MASK; | ||
| 67 | e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory)); | ||
| 68 | |||
| 69 | v = (u32) __va(p); | ||
| 70 | pge = pgd_offset_k(v); | ||
| 71 | |||
| 72 | while (p < e) { | ||
| 73 | int j; | ||
| 74 | pue = pud_offset(pge, v); | ||
| 75 | pme = pmd_offset(pue, v); | ||
| 76 | |||
| 77 | if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { | ||
| 78 | panic("%s: Kernel hardcoded for " | ||
| 79 | "two-level page tables", __func__); | ||
| 80 | } | ||
| 81 | |||
| 82 | /* Alloc one page for holding PTE's... */ | ||
| 83 | pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); | ||
| 84 | memset(pte, 0, PAGE_SIZE); | ||
| 85 | set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); | ||
| 86 | |||
| 87 | /* Fill the newly allocated page with PTE'S */ | ||
| 88 | for (j = 0; p < e && j < PTRS_PER_PTE; | ||
| 89 | v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { | ||
| 90 | /* Create mapping between p and v. */ | ||
| 91 | /* TODO: more fine grant for page access permission */ | ||
| 92 | set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL))); | ||
| 93 | } | ||
| 94 | |||
| 95 | pge++; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | static pmd_t *fixmap_pmd_p; | ||
| 99 | static void __init fixedrange_init(void) | ||
| 100 | { | ||
| 101 | unsigned long vaddr; | ||
| 102 | pgd_t *pgd; | ||
| 103 | pud_t *pud; | ||
| 104 | pmd_t *pmd; | ||
| 105 | #ifdef CONFIG_HIGHMEM | ||
| 106 | pte_t *pte; | ||
| 107 | #endif /* CONFIG_HIGHMEM */ | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Fixed mappings: | ||
| 111 | */ | ||
| 112 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); | ||
| 113 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
| 114 | pud = pud_offset(pgd, vaddr); | ||
| 115 | pmd = pmd_offset(pud, vaddr); | ||
| 116 | fixmap_pmd_p = (pmd_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); | ||
| 117 | memset(fixmap_pmd_p, 0, PAGE_SIZE); | ||
| 118 | set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE)); | ||
| 119 | |||
| 120 | #ifdef CONFIG_HIGHMEM | ||
| 121 | /* | ||
| 122 | * Permanent kmaps: | ||
| 123 | */ | ||
| 124 | vaddr = PKMAP_BASE; | ||
| 125 | |||
| 126 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
| 127 | pud = pud_offset(pgd, vaddr); | ||
| 128 | pmd = pmd_offset(pud, vaddr); | ||
| 129 | pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); | ||
| 130 | memset(pte, 0, PAGE_SIZE); | ||
| 131 | set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); | ||
| 132 | pkmap_page_table = pte; | ||
| 133 | #endif /* CONFIG_HIGHMEM */ | ||
| 134 | } | ||
| 135 | |||
| 136 | /* | ||
| 137 | * paging_init() sets up the page tables, initialises the zone memory | ||
| 138 | * maps, and sets up the zero page, bad page and bad page tables. | ||
| 139 | */ | ||
| 140 | void __init paging_init(void) | ||
| 141 | { | ||
| 142 | int i; | ||
| 143 | void *zero_page; | ||
| 144 | |||
| 145 | pr_info("Setting up paging and PTEs.\n"); | ||
| 146 | /* clear out the init_mm.pgd that will contain the kernel's mappings */ | ||
| 147 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
| 148 | swapper_pg_dir[i] = __pgd(1); | ||
| 149 | |||
| 150 | map_ram(); | ||
| 151 | |||
| 152 | fixedrange_init(); | ||
| 153 | |||
| 154 | /* allocate space for empty_zero_page */ | ||
| 155 | zero_page = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); | ||
| 156 | memset(zero_page, 0, PAGE_SIZE); | ||
| 157 | zone_sizes_init(); | ||
| 158 | |||
| 159 | empty_zero_page = virt_to_page(zero_page); | ||
| 160 | flush_dcache_page(empty_zero_page); | ||
| 161 | } | ||
| 162 | |||
| 163 | static inline void __init free_highmem(void) | ||
| 164 | { | ||
| 165 | #ifdef CONFIG_HIGHMEM | ||
| 166 | unsigned long pfn; | ||
| 167 | for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) { | ||
| 168 | phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT; | ||
| 169 | if (!memblock_is_reserved(paddr)) | ||
| 170 | free_highmem_page(pfn_to_page(pfn)); | ||
| 171 | } | ||
| 172 | #endif | ||
| 173 | } | ||
| 174 | |||
| 175 | static void __init set_max_mapnr_init(void) | ||
| 176 | { | ||
| 177 | max_mapnr = max_pfn; | ||
| 178 | } | ||
| 179 | |||
| 180 | /* | ||
| 181 | * mem_init() marks the free areas in the mem_map and tells us how much | ||
| 182 | * memory is free. This is done after various parts of the system have | ||
| 183 | * claimed their memory after the kernel image. | ||
| 184 | */ | ||
| 185 | void __init mem_init(void) | ||
| 186 | { | ||
| 187 | phys_addr_t memory_start = memblock_start_of_DRAM(); | ||
| 188 | BUG_ON(!mem_map); | ||
| 189 | set_max_mapnr_init(); | ||
| 190 | |||
| 191 | free_highmem(); | ||
| 192 | |||
| 193 | /* this will put all low memory onto the freelists */ | ||
| 194 | free_all_bootmem(); | ||
| 195 | mem_init_print_info(NULL); | ||
| 196 | |||
| 197 | pr_info("virtual kernel memory layout:\n" | ||
| 198 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 199 | #ifdef CONFIG_HIGHMEM | ||
| 200 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 201 | #endif | ||
| 202 | " consist : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
| 203 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
| 204 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
| 205 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 206 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 207 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | ||
| 208 | FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, | ||
| 209 | #ifdef CONFIG_HIGHMEM | ||
| 210 | PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, | ||
| 211 | (LAST_PKMAP * PAGE_SIZE) >> 10, | ||
| 212 | #endif | ||
| 213 | CONSISTENT_BASE, CONSISTENT_END, | ||
| 214 | ((CONSISTENT_END) - (CONSISTENT_BASE)) >> 20, VMALLOC_START, | ||
| 215 | (unsigned long)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, | ||
| 216 | (unsigned long)__va(memory_start), (unsigned long)high_memory, | ||
| 217 | ((unsigned long)high_memory - | ||
| 218 | (unsigned long)__va(memory_start)) >> 20, | ||
| 219 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | ||
| 220 | ((unsigned long)&__init_end - | ||
| 221 | (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, | ||
| 222 | (unsigned long)&_edata, | ||
| 223 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | ||
| 224 | (unsigned long)&_text, (unsigned long)&_etext, | ||
| 225 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | ||
| 226 | |||
| 227 | /* | ||
| 228 | * Check boundaries twice: Some fundamental inconsistencies can | ||
| 229 | * be detected at build time already. | ||
| 230 | */ | ||
| 231 | #ifdef CONFIG_HIGHMEM | ||
| 232 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START); | ||
| 233 | BUILD_BUG_ON((CONSISTENT_END) > PKMAP_BASE); | ||
| 234 | #endif | ||
| 235 | BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
| 236 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | ||
| 237 | |||
| 238 | #ifdef CONFIG_HIGHMEM | ||
| 239 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START); | ||
| 240 | BUG_ON(CONSISTENT_END > PKMAP_BASE); | ||
| 241 | #endif | ||
| 242 | BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
| 243 | BUG_ON(VMALLOC_START >= VMALLOC_END); | ||
| 244 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | ||
| 245 | |||
| 246 | return; | ||
| 247 | } | ||
| 248 | |||
| 249 | void free_initmem(void) | ||
| 250 | { | ||
| 251 | free_initmem_default(-1); | ||
| 252 | } | ||
| 253 | |||
| 254 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 255 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
| 256 | { | ||
| 257 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); | ||
| 258 | } | ||
| 259 | #endif | ||
| 260 | |||
| 261 | void __set_fixmap(enum fixed_addresses idx, | ||
| 262 | phys_addr_t phys, pgprot_t flags) | ||
| 263 | { | ||
| 264 | unsigned long addr = __fix_to_virt(idx); | ||
| 265 | pte_t *pte; | ||
| 266 | |||
| 267 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); | ||
| 268 | |||
| 269 | pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];; | ||
| 270 | |||
| 271 | if (pgprot_val(flags)) { | ||
| 272 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | ||
| 273 | } else { | ||
| 274 | pte_clear(&init_mm, addr, pte); | ||
| 275 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); | ||
| 276 | } | ||
| 277 | } | ||
diff --git a/arch/nds32/mm/ioremap.c b/arch/nds32/mm/ioremap.c new file mode 100644 index 000000000000..690140bb23a2 --- /dev/null +++ b/arch/nds32/mm/ioremap.c | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/vmalloc.h> | ||
| 5 | #include <linux/io.h> | ||
| 6 | #include <linux/mm.h> | ||
| 7 | #include <asm/pgtable.h> | ||
| 8 | |||
| 9 | void __iomem *ioremap(phys_addr_t phys_addr, size_t size); | ||
| 10 | |||
| 11 | static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, | ||
| 12 | void *caller) | ||
| 13 | { | ||
| 14 | struct vm_struct *area; | ||
| 15 | unsigned long addr, offset, last_addr; | ||
| 16 | pgprot_t prot; | ||
| 17 | |||
| 18 | /* Don't allow wraparound or zero size */ | ||
| 19 | last_addr = phys_addr + size - 1; | ||
| 20 | if (!size || last_addr < phys_addr) | ||
| 21 | return NULL; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Mappings have to be page-aligned | ||
| 25 | */ | ||
| 26 | offset = phys_addr & ~PAGE_MASK; | ||
| 27 | phys_addr &= PAGE_MASK; | ||
| 28 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | ||
| 29 | |||
| 30 | /* | ||
| 31 | * Ok, go for it.. | ||
| 32 | */ | ||
| 33 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | ||
| 34 | if (!area) | ||
| 35 | return NULL; | ||
| 36 | |||
| 37 | area->phys_addr = phys_addr; | ||
| 38 | addr = (unsigned long)area->addr; | ||
| 39 | prot = __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | | ||
| 40 | _PAGE_G | _PAGE_C_DEV); | ||
| 41 | if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { | ||
| 42 | vunmap((void *)addr); | ||
| 43 | return NULL; | ||
| 44 | } | ||
| 45 | return (__force void __iomem *)(offset + (char *)addr); | ||
| 46 | |||
| 47 | } | ||
| 48 | |||
| 49 | void __iomem *ioremap(phys_addr_t phys_addr, size_t size) | ||
| 50 | { | ||
| 51 | return __ioremap_caller(phys_addr, size, | ||
| 52 | __builtin_return_address(0)); | ||
| 53 | } | ||
| 54 | |||
| 55 | EXPORT_SYMBOL(ioremap); | ||
| 56 | |||
| 57 | void iounmap(volatile void __iomem * addr) | ||
| 58 | { | ||
| 59 | vunmap((void *)(PAGE_MASK & (unsigned long)addr)); | ||
| 60 | } | ||
| 61 | |||
| 62 | EXPORT_SYMBOL(iounmap); | ||
diff --git a/arch/nds32/mm/mm-nds32.c b/arch/nds32/mm/mm-nds32.c new file mode 100644 index 000000000000..3b43798d754f --- /dev/null +++ b/arch/nds32/mm/mm-nds32.c | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/init_task.h> | ||
| 5 | #include <asm/pgalloc.h> | ||
| 6 | |||
| 7 | #define FIRST_KERNEL_PGD_NR (USER_PTRS_PER_PGD) | ||
| 8 | |||
| 9 | /* | ||
| 10 | * need to get a page for level 1 | ||
| 11 | */ | ||
| 12 | |||
| 13 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
| 14 | { | ||
| 15 | pgd_t *new_pgd, *init_pgd; | ||
| 16 | int i; | ||
| 17 | |||
| 18 | new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0); | ||
| 19 | if (!new_pgd) | ||
| 20 | return NULL; | ||
| 21 | for (i = 0; i < PTRS_PER_PGD; i++) { | ||
| 22 | (*new_pgd) = 1; | ||
| 23 | new_pgd++; | ||
| 24 | } | ||
| 25 | new_pgd -= PTRS_PER_PGD; | ||
| 26 | |||
| 27 | init_pgd = pgd_offset_k(0); | ||
| 28 | |||
| 29 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
| 30 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
| 31 | |||
| 32 | cpu_dcache_wb_range((unsigned long)new_pgd, | ||
| 33 | (unsigned long)new_pgd + | ||
| 34 | PTRS_PER_PGD * sizeof(pgd_t)); | ||
| 35 | inc_zone_page_state(virt_to_page((unsigned long *)new_pgd), | ||
| 36 | NR_PAGETABLE); | ||
| 37 | |||
| 38 | return new_pgd; | ||
| 39 | } | ||
| 40 | |||
| 41 | void pgd_free(struct mm_struct *mm, pgd_t * pgd) | ||
| 42 | { | ||
| 43 | pmd_t *pmd; | ||
| 44 | struct page *pte; | ||
| 45 | |||
| 46 | if (!pgd) | ||
| 47 | return; | ||
| 48 | |||
| 49 | pmd = (pmd_t *) pgd; | ||
| 50 | if (pmd_none(*pmd)) | ||
| 51 | goto free; | ||
| 52 | if (pmd_bad(*pmd)) { | ||
| 53 | pmd_ERROR(*pmd); | ||
| 54 | pmd_clear(pmd); | ||
| 55 | goto free; | ||
| 56 | } | ||
| 57 | |||
| 58 | pte = pmd_page(*pmd); | ||
| 59 | pmd_clear(pmd); | ||
| 60 | dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); | ||
| 61 | pte_free(mm, pte); | ||
| 62 | mm_dec_nr_ptes(mm); | ||
| 63 | pmd_free(mm, pmd); | ||
| 64 | free: | ||
| 65 | free_pages((unsigned long)pgd, 0); | ||
| 66 | } | ||
| 67 | |||
| 68 | /* | ||
| 69 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
| 70 | * the user-mode pages. This will then ensure that we have predictable | ||
| 71 | * results when turning the mmu off | ||
| 72 | */ | ||
| 73 | void setup_mm_for_reboot(char mode) | ||
| 74 | { | ||
| 75 | unsigned long pmdval; | ||
| 76 | pgd_t *pgd; | ||
| 77 | pmd_t *pmd; | ||
| 78 | int i; | ||
| 79 | |||
| 80 | if (current->mm && current->mm->pgd) | ||
| 81 | pgd = current->mm->pgd; | ||
| 82 | else | ||
| 83 | pgd = init_mm.pgd; | ||
| 84 | |||
| 85 | for (i = 0; i < USER_PTRS_PER_PGD; i++) { | ||
| 86 | pmdval = (i << PGDIR_SHIFT); | ||
| 87 | pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); | ||
| 88 | set_pmd(pmd, __pmd(pmdval)); | ||
| 89 | } | ||
| 90 | } | ||
diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c new file mode 100644 index 000000000000..c206b31ce07a --- /dev/null +++ b/arch/nds32/mm/mmap.c | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/sched.h> | ||
| 5 | #include <linux/mman.h> | ||
| 6 | #include <linux/shm.h> | ||
| 7 | |||
| 8 | #define COLOUR_ALIGN(addr,pgoff) \ | ||
| 9 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ | ||
| 10 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) | ||
| 11 | |||
| 12 | /* | ||
| 13 | * We need to ensure that shared mappings are correctly aligned to | ||
| 14 | * avoid aliasing issues with VIPT caches. We need to ensure that | ||
| 15 | * a specific page of an object is always mapped at a multiple of | ||
| 16 | * SHMLBA bytes. | ||
| 17 | * | ||
| 18 | * We unconditionally provide this function for all cases, however | ||
| 19 | * in the VIVT case, we optimise out the alignment rules. | ||
| 20 | */ | ||
| 21 | unsigned long | ||
| 22 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
| 23 | unsigned long len, unsigned long pgoff, | ||
| 24 | unsigned long flags) | ||
| 25 | { | ||
| 26 | struct mm_struct *mm = current->mm; | ||
| 27 | struct vm_area_struct *vma; | ||
| 28 | int do_align = 0; | ||
| 29 | struct vm_unmapped_area_info info; | ||
| 30 | int aliasing = 0; | ||
| 31 | if(IS_ENABLED(CONFIG_CPU_CACHE_ALIASING)) | ||
| 32 | aliasing = 1; | ||
| 33 | |||
| 34 | /* | ||
| 35 | * We only need to do colour alignment if either the I or D | ||
| 36 | * caches alias. | ||
| 37 | */ | ||
| 38 | if (aliasing) | ||
| 39 | do_align = filp || (flags & MAP_SHARED); | ||
| 40 | |||
| 41 | /* | ||
| 42 | * We enforce the MAP_FIXED case. | ||
| 43 | */ | ||
| 44 | if (flags & MAP_FIXED) { | ||
| 45 | if (aliasing && flags & MAP_SHARED && | ||
| 46 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) | ||
| 47 | return -EINVAL; | ||
| 48 | return addr; | ||
| 49 | } | ||
| 50 | |||
| 51 | if (len > TASK_SIZE) | ||
| 52 | return -ENOMEM; | ||
| 53 | |||
| 54 | if (addr) { | ||
| 55 | if (do_align) | ||
| 56 | addr = COLOUR_ALIGN(addr, pgoff); | ||
| 57 | else | ||
| 58 | addr = PAGE_ALIGN(addr); | ||
| 59 | |||
| 60 | vma = find_vma(mm, addr); | ||
| 61 | if (TASK_SIZE - len >= addr && | ||
| 62 | (!vma || addr + len <= vma->vm_start)) | ||
| 63 | return addr; | ||
| 64 | } | ||
| 65 | |||
| 66 | info.flags = 0; | ||
| 67 | info.length = len; | ||
| 68 | info.low_limit = mm->mmap_base; | ||
| 69 | info.high_limit = TASK_SIZE; | ||
| 70 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | ||
| 71 | info.align_offset = pgoff << PAGE_SHIFT; | ||
| 72 | return vm_unmapped_area(&info); | ||
| 73 | } | ||
diff --git a/arch/nds32/mm/proc.c b/arch/nds32/mm/proc.c new file mode 100644 index 000000000000..ba80992d13a2 --- /dev/null +++ b/arch/nds32/mm/proc.c | |||
| @@ -0,0 +1,533 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/module.h> | ||
| 5 | #include <linux/sched.h> | ||
| 6 | #include <linux/mm.h> | ||
| 7 | #include <asm/nds32.h> | ||
| 8 | #include <asm/pgtable.h> | ||
| 9 | #include <asm/tlbflush.h> | ||
| 10 | #include <asm/cacheflush.h> | ||
| 11 | #include <asm/l2_cache.h> | ||
| 12 | #include <nds32_intrinsic.h> | ||
| 13 | |||
| 14 | #include <asm/cache_info.h> | ||
| 15 | extern struct cache_info L1_cache_info[2]; | ||
| 16 | |||
| 17 | int va_kernel_present(unsigned long addr) | ||
| 18 | { | ||
| 19 | pmd_t *pmd; | ||
| 20 | pte_t *ptep, pte; | ||
| 21 | |||
| 22 | pmd = pmd_offset(pgd_offset_k(addr), addr); | ||
| 23 | if (!pmd_none(*pmd)) { | ||
| 24 | ptep = pte_offset_map(pmd, addr); | ||
| 25 | pte = *ptep; | ||
| 26 | if (pte_present(pte)) | ||
| 27 | return pte; | ||
| 28 | } | ||
| 29 | return 0; | ||
| 30 | } | ||
| 31 | |||
| 32 | pte_t va_present(struct mm_struct * mm, unsigned long addr) | ||
| 33 | { | ||
| 34 | pgd_t *pgd; | ||
| 35 | pud_t *pud; | ||
| 36 | pmd_t *pmd; | ||
| 37 | pte_t *ptep, pte; | ||
| 38 | |||
| 39 | pgd = pgd_offset(mm, addr); | ||
| 40 | if (!pgd_none(*pgd)) { | ||
| 41 | pud = pud_offset(pgd, addr); | ||
| 42 | if (!pud_none(*pud)) { | ||
| 43 | pmd = pmd_offset(pud, addr); | ||
| 44 | if (!pmd_none(*pmd)) { | ||
| 45 | ptep = pte_offset_map(pmd, addr); | ||
| 46 | pte = *ptep; | ||
| 47 | if (pte_present(pte)) | ||
| 48 | return pte; | ||
| 49 | } | ||
| 50 | } | ||
| 51 | } | ||
| 52 | return 0; | ||
| 53 | |||
| 54 | } | ||
| 55 | |||
| 56 | int va_readable(struct pt_regs *regs, unsigned long addr) | ||
| 57 | { | ||
| 58 | struct mm_struct *mm = current->mm; | ||
| 59 | pte_t pte; | ||
| 60 | int ret = 0; | ||
| 61 | |||
| 62 | if (user_mode(regs)) { | ||
| 63 | /* user mode */ | ||
| 64 | pte = va_present(mm, addr); | ||
| 65 | if (!pte && pte_read(pte)) | ||
| 66 | ret = 1; | ||
| 67 | } else { | ||
| 68 | /* superuser mode is always readable, so we can only | ||
| 69 | * check it is present or not*/ | ||
| 70 | return (! !va_kernel_present(addr)); | ||
| 71 | } | ||
| 72 | return ret; | ||
| 73 | } | ||
| 74 | |||
| 75 | int va_writable(struct pt_regs *regs, unsigned long addr) | ||
| 76 | { | ||
| 77 | struct mm_struct *mm = current->mm; | ||
| 78 | pte_t pte; | ||
| 79 | int ret = 0; | ||
| 80 | |||
| 81 | if (user_mode(regs)) { | ||
| 82 | /* user mode */ | ||
| 83 | pte = va_present(mm, addr); | ||
| 84 | if (!pte && pte_write(pte)) | ||
| 85 | ret = 1; | ||
| 86 | } else { | ||
| 87 | /* superuser mode */ | ||
| 88 | pte = va_kernel_present(addr); | ||
| 89 | if (!pte && pte_kernel_write(pte)) | ||
| 90 | ret = 1; | ||
| 91 | } | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | /* | ||
| 96 | * All | ||
| 97 | */ | ||
| 98 | void cpu_icache_inval_all(void) | ||
| 99 | { | ||
| 100 | unsigned long end, line_size; | ||
| 101 | |||
| 102 | line_size = L1_cache_info[ICACHE].line_size; | ||
| 103 | end = | ||
| 104 | line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets; | ||
| 105 | |||
| 106 | do { | ||
| 107 | end -= line_size; | ||
| 108 | __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); | ||
| 109 | end -= line_size; | ||
| 110 | __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); | ||
| 111 | end -= line_size; | ||
| 112 | __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); | ||
| 113 | end -= line_size; | ||
| 114 | __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end)); | ||
| 115 | } while (end > 0); | ||
| 116 | __nds32__isb(); | ||
| 117 | } | ||
| 118 | |||
| 119 | void cpu_dcache_inval_all(void) | ||
| 120 | { | ||
| 121 | __nds32__cctl_l1d_invalall(); | ||
| 122 | } | ||
| 123 | |||
| 124 | #ifdef CONFIG_CACHE_L2 | ||
| 125 | void dcache_wb_all_level(void) | ||
| 126 | { | ||
| 127 | unsigned long flags, cmd; | ||
| 128 | local_irq_save(flags); | ||
| 129 | __nds32__cctl_l1d_wball_alvl(); | ||
| 130 | /* Section 1: Ensure the section 2 & 3 program code execution after */ | ||
| 131 | __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0); | ||
| 132 | |||
| 133 | /* Section 2: Confirm the writeback all level is done in CPU and L2C */ | ||
| 134 | cmd = CCTL_CMD_L2_SYNC; | ||
| 135 | L2_CMD_RDY(); | ||
| 136 | L2C_W_REG(L2_CCTL_CMD_OFF, cmd); | ||
| 137 | L2_CMD_RDY(); | ||
| 138 | |||
| 139 | /* Section 3: Writeback whole L2 cache */ | ||
| 140 | cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB; | ||
| 141 | L2_CMD_RDY(); | ||
| 142 | L2C_W_REG(L2_CCTL_CMD_OFF, cmd); | ||
| 143 | L2_CMD_RDY(); | ||
| 144 | __nds32__msync_all(); | ||
| 145 | local_irq_restore(flags); | ||
| 146 | } | ||
| 147 | EXPORT_SYMBOL(dcache_wb_all_level); | ||
| 148 | #endif | ||
| 149 | |||
| 150 | void cpu_dcache_wb_all(void) | ||
| 151 | { | ||
| 152 | __nds32__cctl_l1d_wball_one_lvl(); | ||
| 153 | __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0); | ||
| 154 | } | ||
| 155 | |||
| 156 | void cpu_dcache_wbinval_all(void) | ||
| 157 | { | ||
| 158 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 159 | unsigned long flags; | ||
| 160 | local_irq_save(flags); | ||
| 161 | #endif | ||
| 162 | cpu_dcache_wb_all(); | ||
| 163 | cpu_dcache_inval_all(); | ||
| 164 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 165 | local_irq_restore(flags); | ||
| 166 | #endif | ||
| 167 | } | ||
| 168 | |||
| 169 | /* | ||
| 170 | * Page | ||
| 171 | */ | ||
| 172 | void cpu_icache_inval_page(unsigned long start) | ||
| 173 | { | ||
| 174 | unsigned long line_size, end; | ||
| 175 | |||
| 176 | line_size = L1_cache_info[ICACHE].line_size; | ||
| 177 | end = start + PAGE_SIZE; | ||
| 178 | |||
| 179 | do { | ||
| 180 | end -= line_size; | ||
| 181 | __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); | ||
| 182 | end -= line_size; | ||
| 183 | __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); | ||
| 184 | end -= line_size; | ||
| 185 | __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); | ||
| 186 | end -= line_size; | ||
| 187 | __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end)); | ||
| 188 | } while (end != start); | ||
| 189 | __nds32__isb(); | ||
| 190 | } | ||
| 191 | |||
| 192 | void cpu_dcache_inval_page(unsigned long start) | ||
| 193 | { | ||
| 194 | unsigned long line_size, end; | ||
| 195 | |||
| 196 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 197 | end = start + PAGE_SIZE; | ||
| 198 | |||
| 199 | do { | ||
| 200 | end -= line_size; | ||
| 201 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 202 | end -= line_size; | ||
| 203 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 204 | end -= line_size; | ||
| 205 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 206 | end -= line_size; | ||
| 207 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 208 | } while (end != start); | ||
| 209 | } | ||
| 210 | |||
| 211 | void cpu_dcache_wb_page(unsigned long start) | ||
| 212 | { | ||
| 213 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 214 | unsigned long line_size, end; | ||
| 215 | |||
| 216 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 217 | end = start + PAGE_SIZE; | ||
| 218 | |||
| 219 | do { | ||
| 220 | end -= line_size; | ||
| 221 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 222 | end -= line_size; | ||
| 223 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 224 | end -= line_size; | ||
| 225 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 226 | end -= line_size; | ||
| 227 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 228 | } while (end != start); | ||
| 229 | __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0); | ||
| 230 | #endif | ||
| 231 | } | ||
| 232 | |||
| 233 | void cpu_dcache_wbinval_page(unsigned long start) | ||
| 234 | { | ||
| 235 | unsigned long line_size, end; | ||
| 236 | |||
| 237 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 238 | end = start + PAGE_SIZE; | ||
| 239 | |||
| 240 | do { | ||
| 241 | end -= line_size; | ||
| 242 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 243 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 244 | #endif | ||
| 245 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 246 | end -= line_size; | ||
| 247 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 248 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 249 | #endif | ||
| 250 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 251 | end -= line_size; | ||
| 252 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 253 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 254 | #endif | ||
| 255 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 256 | end -= line_size; | ||
| 257 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 258 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end)); | ||
| 259 | #endif | ||
| 260 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end)); | ||
| 261 | } while (end != start); | ||
| 262 | __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0); | ||
| 263 | } | ||
| 264 | |||
| 265 | void cpu_cache_wbinval_page(unsigned long page, int flushi) | ||
| 266 | { | ||
| 267 | cpu_dcache_wbinval_page(page); | ||
| 268 | if (flushi) | ||
| 269 | cpu_icache_inval_page(page); | ||
| 270 | } | ||
| 271 | |||
| 272 | /* | ||
| 273 | * Range | ||
| 274 | */ | ||
| 275 | void cpu_icache_inval_range(unsigned long start, unsigned long end) | ||
| 276 | { | ||
| 277 | unsigned long line_size; | ||
| 278 | |||
| 279 | line_size = L1_cache_info[ICACHE].line_size; | ||
| 280 | |||
| 281 | while (end > start) { | ||
| 282 | __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start)); | ||
| 283 | start += line_size; | ||
| 284 | } | ||
| 285 | __nds32__isb(); | ||
| 286 | } | ||
| 287 | |||
| 288 | void cpu_dcache_inval_range(unsigned long start, unsigned long end) | ||
| 289 | { | ||
| 290 | unsigned long line_size; | ||
| 291 | |||
| 292 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 293 | |||
| 294 | while (end > start) { | ||
| 295 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start)); | ||
| 296 | start += line_size; | ||
| 297 | } | ||
| 298 | } | ||
| 299 | |||
| 300 | void cpu_dcache_wb_range(unsigned long start, unsigned long end) | ||
| 301 | { | ||
| 302 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 303 | unsigned long line_size; | ||
| 304 | |||
| 305 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 306 | |||
| 307 | while (end > start) { | ||
| 308 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start)); | ||
| 309 | start += line_size; | ||
| 310 | } | ||
| 311 | __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0); | ||
| 312 | #endif | ||
| 313 | } | ||
| 314 | |||
| 315 | void cpu_dcache_wbinval_range(unsigned long start, unsigned long end) | ||
| 316 | { | ||
| 317 | unsigned long line_size; | ||
| 318 | |||
| 319 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 320 | |||
| 321 | while (end > start) { | ||
| 322 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 323 | __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start)); | ||
| 324 | #endif | ||
| 325 | __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start)); | ||
| 326 | start += line_size; | ||
| 327 | } | ||
| 328 | __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0); | ||
| 329 | } | ||
| 330 | |||
| 331 | void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi) | ||
| 332 | { | ||
| 333 | unsigned long line_size, align_start, align_end; | ||
| 334 | |||
| 335 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 336 | align_start = start & ~(line_size - 1); | ||
| 337 | align_end = (end + line_size - 1) & ~(line_size - 1); | ||
| 338 | cpu_dcache_wbinval_range(align_start, align_end); | ||
| 339 | |||
| 340 | if (flushi) { | ||
| 341 | line_size = L1_cache_info[ICACHE].line_size; | ||
| 342 | align_start = start & ~(line_size - 1); | ||
| 343 | align_end = (end + line_size - 1) & ~(line_size - 1); | ||
| 344 | cpu_icache_inval_range(align_start, align_end); | ||
| 345 | } | ||
| 346 | } | ||
| 347 | |||
| 348 | void cpu_cache_wbinval_range_check(struct vm_area_struct *vma, | ||
| 349 | unsigned long start, unsigned long end, | ||
| 350 | bool flushi, bool wbd) | ||
| 351 | { | ||
| 352 | unsigned long line_size, t_start, t_end; | ||
| 353 | |||
| 354 | if (!flushi && !wbd) | ||
| 355 | return; | ||
| 356 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 357 | start = start & ~(line_size - 1); | ||
| 358 | end = (end + line_size - 1) & ~(line_size - 1); | ||
| 359 | |||
| 360 | if ((end - start) > (8 * PAGE_SIZE)) { | ||
| 361 | if (wbd) | ||
| 362 | cpu_dcache_wbinval_all(); | ||
| 363 | if (flushi) | ||
| 364 | cpu_icache_inval_all(); | ||
| 365 | return; | ||
| 366 | } | ||
| 367 | |||
| 368 | t_start = (start + PAGE_SIZE) & PAGE_MASK; | ||
| 369 | t_end = ((end - 1) & PAGE_MASK); | ||
| 370 | |||
| 371 | if ((start & PAGE_MASK) == t_end) { | ||
| 372 | if (va_present(vma->vm_mm, start)) { | ||
| 373 | if (wbd) | ||
| 374 | cpu_dcache_wbinval_range(start, end); | ||
| 375 | if (flushi) | ||
| 376 | cpu_icache_inval_range(start, end); | ||
| 377 | } | ||
| 378 | return; | ||
| 379 | } | ||
| 380 | |||
| 381 | if (va_present(vma->vm_mm, start)) { | ||
| 382 | if (wbd) | ||
| 383 | cpu_dcache_wbinval_range(start, t_start); | ||
| 384 | if (flushi) | ||
| 385 | cpu_icache_inval_range(start, t_start); | ||
| 386 | } | ||
| 387 | |||
| 388 | if (va_present(vma->vm_mm, end - 1)) { | ||
| 389 | if (wbd) | ||
| 390 | cpu_dcache_wbinval_range(t_end, end); | ||
| 391 | if (flushi) | ||
| 392 | cpu_icache_inval_range(t_end, end); | ||
| 393 | } | ||
| 394 | |||
| 395 | while (t_start < t_end) { | ||
| 396 | if (va_present(vma->vm_mm, t_start)) { | ||
| 397 | if (wbd) | ||
| 398 | cpu_dcache_wbinval_page(t_start); | ||
| 399 | if (flushi) | ||
| 400 | cpu_icache_inval_page(t_start); | ||
| 401 | } | ||
| 402 | t_start += PAGE_SIZE; | ||
| 403 | } | ||
| 404 | } | ||
| 405 | |||
| 406 | #ifdef CONFIG_CACHE_L2 | ||
| 407 | static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op) | ||
| 408 | { | ||
| 409 | if (atl2c_base) { | ||
| 410 | unsigned long p_start = __pa(start); | ||
| 411 | unsigned long p_end = __pa(end); | ||
| 412 | unsigned long cmd; | ||
| 413 | unsigned long line_size; | ||
| 414 | /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */ | ||
| 415 | line_size = L2_CACHE_LINE_SIZE(); | ||
| 416 | p_start = p_start & (~(line_size - 1)); | ||
| 417 | p_end = (p_end + line_size - 1) & (~(line_size - 1)); | ||
| 418 | cmd = | ||
| 419 | (p_start & ~(line_size - 1)) | op | | ||
| 420 | CCTL_SINGLE_CMD; | ||
| 421 | do { | ||
| 422 | L2_CMD_RDY(); | ||
| 423 | L2C_W_REG(L2_CCTL_CMD_OFF, cmd); | ||
| 424 | cmd += line_size; | ||
| 425 | p_start += line_size; | ||
| 426 | } while (p_end > p_start); | ||
| 427 | cmd = CCTL_CMD_L2_SYNC; | ||
| 428 | L2_CMD_RDY(); | ||
| 429 | L2C_W_REG(L2_CCTL_CMD_OFF, cmd); | ||
| 430 | L2_CMD_RDY(); | ||
| 431 | } | ||
| 432 | } | ||
| 433 | #else | ||
| 434 | #define cpu_l2cache_op(start,end,op) do { } while (0) | ||
| 435 | #endif | ||
| 436 | /* | ||
| 437 | * DMA | ||
| 438 | */ | ||
| 439 | void cpu_dma_wb_range(unsigned long start, unsigned long end) | ||
| 440 | { | ||
| 441 | unsigned long line_size; | ||
| 442 | unsigned long flags; | ||
| 443 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 444 | start = start & (~(line_size - 1)); | ||
| 445 | end = (end + line_size - 1) & (~(line_size - 1)); | ||
| 446 | if (unlikely(start == end)) | ||
| 447 | return; | ||
| 448 | |||
| 449 | local_irq_save(flags); | ||
| 450 | cpu_dcache_wb_range(start, end); | ||
| 451 | cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB); | ||
| 452 | __nds32__msync_all(); | ||
| 453 | local_irq_restore(flags); | ||
| 454 | } | ||
| 455 | |||
| 456 | void cpu_dma_inval_range(unsigned long start, unsigned long end) | ||
| 457 | { | ||
| 458 | unsigned long line_size; | ||
| 459 | unsigned long old_start = start; | ||
| 460 | unsigned long old_end = end; | ||
| 461 | unsigned long flags; | ||
| 462 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 463 | start = start & (~(line_size - 1)); | ||
| 464 | end = (end + line_size - 1) & (~(line_size - 1)); | ||
| 465 | if (unlikely(start == end)) | ||
| 466 | return; | ||
| 467 | local_irq_save(flags); | ||
| 468 | if (start != old_start) { | ||
| 469 | cpu_dcache_wbinval_range(start, start + line_size); | ||
| 470 | cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL); | ||
| 471 | } | ||
| 472 | if (end != old_end) { | ||
| 473 | cpu_dcache_wbinval_range(end - line_size, end); | ||
| 474 | cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL); | ||
| 475 | } | ||
| 476 | cpu_dcache_inval_range(start, end); | ||
| 477 | cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL); | ||
| 478 | __nds32__msync_all(); | ||
| 479 | local_irq_restore(flags); | ||
| 480 | |||
| 481 | } | ||
| 482 | |||
| 483 | void cpu_dma_wbinval_range(unsigned long start, unsigned long end) | ||
| 484 | { | ||
| 485 | unsigned long line_size; | ||
| 486 | unsigned long flags; | ||
| 487 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 488 | start = start & (~(line_size - 1)); | ||
| 489 | end = (end + line_size - 1) & (~(line_size - 1)); | ||
| 490 | if (unlikely(start == end)) | ||
| 491 | return; | ||
| 492 | |||
| 493 | local_irq_save(flags); | ||
| 494 | cpu_dcache_wbinval_range(start, end); | ||
| 495 | cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL); | ||
| 496 | __nds32__msync_all(); | ||
| 497 | local_irq_restore(flags); | ||
| 498 | } | ||
| 499 | |||
| 500 | void cpu_proc_init(void) | ||
| 501 | { | ||
| 502 | } | ||
| 503 | |||
| 504 | void cpu_proc_fin(void) | ||
| 505 | { | ||
| 506 | } | ||
| 507 | |||
| 508 | void cpu_do_idle(void) | ||
| 509 | { | ||
| 510 | __nds32__standby_no_wake_grant(); | ||
| 511 | } | ||
| 512 | |||
| 513 | void cpu_reset(unsigned long reset) | ||
| 514 | { | ||
| 515 | u32 tmp; | ||
| 516 | GIE_DISABLE(); | ||
| 517 | tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL); | ||
| 518 | tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN); | ||
| 519 | __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL); | ||
| 520 | cpu_dcache_wbinval_all(); | ||
| 521 | cpu_icache_inval_all(); | ||
| 522 | |||
| 523 | __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset)); | ||
| 524 | } | ||
| 525 | |||
| 526 | void cpu_switch_mm(struct mm_struct *mm) | ||
| 527 | { | ||
| 528 | unsigned long cid; | ||
| 529 | cid = __nds32__mfsr(NDS32_SR_TLB_MISC); | ||
| 530 | cid = (cid & ~TLB_MISC_mskCID) | mm->context.id; | ||
| 531 | __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC); | ||
| 532 | __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB); | ||
| 533 | } | ||
diff --git a/arch/nds32/mm/tlb.c b/arch/nds32/mm/tlb.c new file mode 100644 index 000000000000..dd41f5e0712f --- /dev/null +++ b/arch/nds32/mm/tlb.c | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/spinlock_types.h> | ||
| 5 | #include <linux/mm.h> | ||
| 6 | #include <linux/sched.h> | ||
| 7 | #include <asm/nds32.h> | ||
| 8 | #include <nds32_intrinsic.h> | ||
| 9 | |||
| 10 | unsigned int cpu_last_cid = { TLB_MISC_mskCID + (2 << TLB_MISC_offCID) }; | ||
| 11 | |||
| 12 | DEFINE_SPINLOCK(cid_lock); | ||
| 13 | |||
| 14 | void local_flush_tlb_range(struct vm_area_struct *vma, | ||
| 15 | unsigned long start, unsigned long end) | ||
| 16 | { | ||
| 17 | unsigned long flags, ocid, ncid; | ||
| 18 | |||
| 19 | if ((end - start) > 0x400000) { | ||
| 20 | __nds32__tlbop_flua(); | ||
| 21 | __nds32__isb(); | ||
| 22 | return; | ||
| 23 | } | ||
| 24 | |||
| 25 | spin_lock_irqsave(&cid_lock, flags); | ||
| 26 | ocid = __nds32__mfsr(NDS32_SR_TLB_MISC); | ||
| 27 | ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; | ||
| 28 | __nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC); | ||
| 29 | while (start < end) { | ||
| 30 | __nds32__tlbop_inv(start); | ||
| 31 | __nds32__isb(); | ||
| 32 | start += PAGE_SIZE; | ||
| 33 | } | ||
| 34 | __nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC); | ||
| 35 | spin_unlock_irqrestore(&cid_lock, flags); | ||
| 36 | } | ||
| 37 | |||
| 38 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | ||
| 39 | { | ||
| 40 | unsigned long flags, ocid, ncid; | ||
| 41 | |||
| 42 | spin_lock_irqsave(&cid_lock, flags); | ||
| 43 | ocid = __nds32__mfsr(NDS32_SR_TLB_MISC); | ||
| 44 | ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id; | ||
| 45 | __nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC); | ||
| 46 | __nds32__tlbop_inv(addr); | ||
| 47 | __nds32__isb(); | ||
| 48 | __nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC); | ||
| 49 | spin_unlock_irqrestore(&cid_lock, flags); | ||
| 50 | } | ||
diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h index ce072ba0f8dd..9010243077ab 100644 --- a/arch/nios2/include/asm/io.h +++ b/arch/nios2/include/asm/io.h | |||
| @@ -45,6 +45,7 @@ static inline void iounmap(void __iomem *addr) | |||
| 45 | __iounmap(addr); | 45 | __iounmap(addr); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | #define ioremap_nocache ioremap_nocache | ||
| 48 | #define ioremap_wc ioremap_nocache | 49 | #define ioremap_wc ioremap_nocache |
| 49 | #define ioremap_wt ioremap_nocache | 50 | #define ioremap_wt ioremap_nocache |
| 50 | 51 | ||
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h index 7c691399da3f..6709b28a0221 100644 --- a/arch/openrisc/include/asm/io.h +++ b/arch/openrisc/include/asm/io.h | |||
| @@ -29,13 +29,14 @@ | |||
| 29 | #define PIO_OFFSET 0 | 29 | #define PIO_OFFSET 0 |
| 30 | #define PIO_MASK 0 | 30 | #define PIO_MASK 0 |
| 31 | 31 | ||
| 32 | #define ioremap_nocache ioremap_nocache | ||
| 32 | #include <asm-generic/io.h> | 33 | #include <asm-generic/io.h> |
| 33 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
| 34 | 35 | ||
| 35 | extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size, | 36 | extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size, |
| 36 | pgprot_t prot); | 37 | pgprot_t prot); |
| 37 | 38 | ||
| 38 | static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) | 39 | static inline void __iomem *ioremap(phys_addr_t offset, size_t size) |
| 39 | { | 40 | { |
| 40 | return __ioremap(offset, size, PAGE_KERNEL); | 41 | return __ioremap(offset, size, PAGE_KERNEL); |
| 41 | } | 42 | } |
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h index cd51a89b393c..df2dc1784673 100644 --- a/arch/sparc/include/asm/io_32.h +++ b/arch/sparc/include/asm/io_32.h | |||
| @@ -127,12 +127,7 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst, | |||
| 127 | * Bus number may be embedded in the higher bits of the physical address. | 127 | * Bus number may be embedded in the higher bits of the physical address. |
| 128 | * This is why we have no bus number argument to ioremap(). | 128 | * This is why we have no bus number argument to ioremap(). |
| 129 | */ | 129 | */ |
| 130 | void __iomem *ioremap(unsigned long offset, unsigned long size); | ||
| 131 | #define ioremap_nocache(X,Y) ioremap((X),(Y)) | ||
| 132 | #define ioremap_wc(X,Y) ioremap((X),(Y)) | ||
| 133 | #define ioremap_wt(X,Y) ioremap((X),(Y)) | ||
| 134 | void iounmap(volatile void __iomem *addr); | 130 | void iounmap(volatile void __iomem *addr); |
| 135 | |||
| 136 | /* Create a virtual mapping cookie for an IO port range */ | 131 | /* Create a virtual mapping cookie for an IO port range */ |
| 137 | void __iomem *ioport_map(unsigned long port, unsigned int nr); | 132 | void __iomem *ioport_map(unsigned long port, unsigned int nr); |
| 138 | void ioport_unmap(void __iomem *); | 133 | void ioport_unmap(void __iomem *); |
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 7eeef80c02f7..3bcef9ce74df 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
| @@ -122,12 +122,12 @@ static void xres_free(struct xresource *xrp) { | |||
| 122 | * | 122 | * |
| 123 | * Bus type is always zero on IIep. | 123 | * Bus type is always zero on IIep. |
| 124 | */ | 124 | */ |
| 125 | void __iomem *ioremap(unsigned long offset, unsigned long size) | 125 | void __iomem *ioremap(phys_addr_t offset, size_t size) |
| 126 | { | 126 | { |
| 127 | char name[14]; | 127 | char name[14]; |
| 128 | 128 | ||
| 129 | sprintf(name, "phys_%08x", (u32)offset); | 129 | sprintf(name, "phys_%08x", (u32)offset); |
| 130 | return _sparc_alloc_io(0, offset, size, name); | 130 | return _sparc_alloc_io(0, (unsigned long)offset, size, name); |
| 131 | } | 131 | } |
| 132 | EXPORT_SYMBOL(ioremap); | 132 | EXPORT_SYMBOL(ioremap); |
| 133 | 133 | ||
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index c38e5a732d86..acc5bb2cf1c7 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h | |||
| @@ -52,6 +52,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset, | |||
| 52 | return xtensa_ioremap_cache(offset, size); | 52 | return xtensa_ioremap_cache(offset, size); |
| 53 | } | 53 | } |
| 54 | #define ioremap_cache ioremap_cache | 54 | #define ioremap_cache ioremap_cache |
| 55 | #define ioremap_nocache ioremap_nocache | ||
| 55 | 56 | ||
| 56 | #define ioremap_wc ioremap_nocache | 57 | #define ioremap_wc ioremap_nocache |
| 57 | #define ioremap_wt ioremap_nocache | 58 | #define ioremap_wt ioremap_nocache |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index d2e5382821a4..14c9796e37ac 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -592,4 +592,13 @@ config CLKSRC_ST_LPC | |||
| 592 | Enable this option to use the Low Power controller timer | 592 | Enable this option to use the Low Power controller timer |
| 593 | as clocksource. | 593 | as clocksource. |
| 594 | 594 | ||
| 595 | config ATCPIT100_TIMER | ||
| 596 | bool "ATCPIT100 timer driver" | ||
| 597 | depends on NDS32 || COMPILE_TEST | ||
| 598 | depends on HAS_IOMEM | ||
| 599 | select TIMER_OF | ||
| 600 | default NDS32 | ||
| 601 | help | ||
| 602 | This option enables support for the Andestech ATCPIT100 timers. | ||
| 603 | |||
| 595 | endmenu | 604 | endmenu |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index d6dec4489d66..a79523b22e52 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
| @@ -76,3 +76,4 @@ obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o | |||
| 76 | obj-$(CONFIG_H8300_TPU) += h8300_tpu.o | 76 | obj-$(CONFIG_H8300_TPU) += h8300_tpu.o |
| 77 | obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o | 77 | obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o |
| 78 | obj-$(CONFIG_X86_NUMACHIP) += numachip.o | 78 | obj-$(CONFIG_X86_NUMACHIP) += numachip.o |
| 79 | obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o | ||
diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c new file mode 100644 index 000000000000..5e23d7b4a722 --- /dev/null +++ b/drivers/clocksource/timer-atcpit100.c | |||
| @@ -0,0 +1,266 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | /* | ||
| 4 | * Andestech ATCPIT100 Timer Device Driver Implementation | ||
| 5 | * Rick Chen, Andes Technology Corporation <rick@andestech.com> | ||
| 6 | * | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/irq.h> | ||
| 10 | #include <linux/clocksource.h> | ||
| 11 | #include <linux/clockchips.h> | ||
| 12 | #include <linux/interrupt.h> | ||
| 13 | #include <linux/ioport.h> | ||
| 14 | #include <linux/cpufreq.h> | ||
| 15 | #include <linux/sched.h> | ||
| 16 | #include <linux/sched_clock.h> | ||
| 17 | #include <linux/of_address.h> | ||
| 18 | #include <linux/of_irq.h> | ||
| 19 | #include <linux/of_platform.h> | ||
| 20 | #include "timer-of.h" | ||
| 21 | #ifdef CONFIG_NDS32 | ||
| 22 | #include <asm/vdso_timer_info.h> | ||
| 23 | #endif | ||
| 24 | |||
| 25 | /* | ||
| 26 | * Definition of register offsets | ||
| 27 | */ | ||
| 28 | |||
| 29 | /* ID and Revision Register */ | ||
| 30 | #define ID_REV 0x0 | ||
| 31 | |||
| 32 | /* Configuration Register */ | ||
| 33 | #define CFG 0x10 | ||
| 34 | |||
| 35 | /* Interrupt Enable Register */ | ||
| 36 | #define INT_EN 0x14 | ||
| 37 | #define CH_INT_EN(c, i) ((1<<i)<<(4*c)) | ||
| 38 | #define CH0INT0EN 0x01 | ||
| 39 | |||
| 40 | /* Interrupt Status Register */ | ||
| 41 | #define INT_STA 0x18 | ||
| 42 | #define CH0INT0 0x01 | ||
| 43 | |||
| 44 | /* Channel Enable Register */ | ||
| 45 | #define CH_EN 0x1C | ||
| 46 | #define CH0TMR0EN 0x1 | ||
| 47 | #define CH1TMR0EN 0x10 | ||
| 48 | |||
| 49 | /* Channel 0 , 1 Control Register */ | ||
| 50 | #define CH0_CTL (0x20) | ||
| 51 | #define CH1_CTL (0x20 + 0x10) | ||
| 52 | |||
| 53 | /* Channel clock source , bit 3 , 0:External clock , 1:APB clock */ | ||
| 54 | #define APB_CLK BIT(3) | ||
| 55 | |||
| 56 | /* Channel mode , bit 0~2 */ | ||
| 57 | #define TMR_32 0x1 | ||
| 58 | #define TMR_16 0x2 | ||
| 59 | #define TMR_8 0x3 | ||
| 60 | |||
| 61 | /* Channel 0 , 1 Reload Register */ | ||
| 62 | #define CH0_REL (0x24) | ||
| 63 | #define CH1_REL (0x24 + 0x10) | ||
| 64 | |||
| 65 | /* Channel 0 , 1 Counter Register */ | ||
| 66 | #define CH0_CNT (0x28) | ||
| 67 | #define CH1_CNT (0x28 + 0x10) | ||
| 68 | |||
| 69 | #define TIMER_SYNC_TICKS 3 | ||
| 70 | |||
| 71 | static void atcpit100_ch1_tmr0_en(void __iomem *base) | ||
| 72 | { | ||
| 73 | writel(~0, base + CH1_REL); | ||
| 74 | writel(APB_CLK|TMR_32, base + CH1_CTL); | ||
| 75 | } | ||
| 76 | |||
| 77 | static void atcpit100_ch0_tmr0_en(void __iomem *base) | ||
| 78 | { | ||
| 79 | writel(APB_CLK|TMR_32, base + CH0_CTL); | ||
| 80 | } | ||
| 81 | |||
| 82 | static void atcpit100_clkevt_time_setup(void __iomem *base, unsigned long delay) | ||
| 83 | { | ||
| 84 | writel(delay, base + CH0_CNT); | ||
| 85 | writel(delay, base + CH0_REL); | ||
| 86 | } | ||
| 87 | |||
| 88 | static void atcpit100_timer_clear_interrupt(void __iomem *base) | ||
| 89 | { | ||
| 90 | u32 val; | ||
| 91 | |||
| 92 | val = readl(base + INT_STA); | ||
| 93 | writel(val | CH0INT0, base + INT_STA); | ||
| 94 | } | ||
| 95 | |||
| 96 | static void atcpit100_clocksource_start(void __iomem *base) | ||
| 97 | { | ||
| 98 | u32 val; | ||
| 99 | |||
| 100 | val = readl(base + CH_EN); | ||
| 101 | writel(val | CH1TMR0EN, base + CH_EN); | ||
| 102 | } | ||
| 103 | |||
| 104 | static void atcpit100_clkevt_time_start(void __iomem *base) | ||
| 105 | { | ||
| 106 | u32 val; | ||
| 107 | |||
| 108 | val = readl(base + CH_EN); | ||
| 109 | writel(val | CH0TMR0EN, base + CH_EN); | ||
| 110 | } | ||
| 111 | |||
| 112 | static void atcpit100_clkevt_time_stop(void __iomem *base) | ||
| 113 | { | ||
| 114 | u32 val; | ||
| 115 | |||
| 116 | atcpit100_timer_clear_interrupt(base); | ||
| 117 | val = readl(base + CH_EN); | ||
| 118 | writel(val & ~CH0TMR0EN, base + CH_EN); | ||
| 119 | } | ||
| 120 | |||
| 121 | static int atcpit100_clkevt_next_event(unsigned long evt, | ||
| 122 | struct clock_event_device *clkevt) | ||
| 123 | { | ||
| 124 | u32 val; | ||
| 125 | struct timer_of *to = to_timer_of(clkevt); | ||
| 126 | |||
| 127 | val = readl(timer_of_base(to) + CH_EN); | ||
| 128 | writel(val & ~CH0TMR0EN, timer_of_base(to) + CH_EN); | ||
| 129 | writel(evt, timer_of_base(to) + CH0_REL); | ||
| 130 | writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN); | ||
| 131 | |||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | static int atcpit100_clkevt_set_periodic(struct clock_event_device *evt) | ||
| 136 | { | ||
| 137 | struct timer_of *to = to_timer_of(evt); | ||
| 138 | |||
| 139 | atcpit100_clkevt_time_setup(timer_of_base(to), timer_of_period(to)); | ||
| 140 | atcpit100_clkevt_time_start(timer_of_base(to)); | ||
| 141 | |||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | static int atcpit100_clkevt_shutdown(struct clock_event_device *evt) | ||
| 145 | { | ||
| 146 | struct timer_of *to = to_timer_of(evt); | ||
| 147 | |||
| 148 | atcpit100_clkevt_time_stop(timer_of_base(to)); | ||
| 149 | |||
| 150 | return 0; | ||
| 151 | } | ||
| 152 | static int atcpit100_clkevt_set_oneshot(struct clock_event_device *evt) | ||
| 153 | { | ||
| 154 | struct timer_of *to = to_timer_of(evt); | ||
| 155 | u32 val; | ||
| 156 | |||
| 157 | writel(~0x0, timer_of_base(to) + CH0_REL); | ||
| 158 | val = readl(timer_of_base(to) + CH_EN); | ||
| 159 | writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN); | ||
| 160 | |||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | |||
| 164 | static irqreturn_t atcpit100_timer_interrupt(int irq, void *dev_id) | ||
| 165 | { | ||
| 166 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; | ||
| 167 | struct timer_of *to = to_timer_of(evt); | ||
| 168 | |||
| 169 | atcpit100_timer_clear_interrupt(timer_of_base(to)); | ||
| 170 | |||
| 171 | evt->event_handler(evt); | ||
| 172 | |||
| 173 | return IRQ_HANDLED; | ||
| 174 | } | ||
| 175 | |||
| 176 | static struct timer_of to = { | ||
| 177 | .flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE, | ||
| 178 | |||
| 179 | .clkevt = { | ||
| 180 | .name = "atcpit100_tick", | ||
| 181 | .rating = 300, | ||
| 182 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
| 183 | .set_state_shutdown = atcpit100_clkevt_shutdown, | ||
| 184 | .set_state_periodic = atcpit100_clkevt_set_periodic, | ||
| 185 | .set_state_oneshot = atcpit100_clkevt_set_oneshot, | ||
| 186 | .tick_resume = atcpit100_clkevt_shutdown, | ||
| 187 | .set_next_event = atcpit100_clkevt_next_event, | ||
| 188 | .cpumask = cpu_all_mask, | ||
| 189 | }, | ||
| 190 | |||
| 191 | .of_irq = { | ||
| 192 | .handler = atcpit100_timer_interrupt, | ||
| 193 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | ||
| 194 | }, | ||
| 195 | |||
| 196 | /* | ||
| 197 | * FIXME: we currently only support clocking using PCLK | ||
| 198 | * and using EXTCLK is not supported in the driver. | ||
| 199 | */ | ||
| 200 | .of_clk = { | ||
| 201 | .name = "PCLK", | ||
| 202 | } | ||
| 203 | }; | ||
| 204 | |||
| 205 | static u64 notrace atcpit100_timer_sched_read(void) | ||
| 206 | { | ||
| 207 | return ~readl(timer_of_base(&to) + CH1_CNT); | ||
| 208 | } | ||
| 209 | |||
| 210 | #ifdef CONFIG_NDS32 | ||
| 211 | static void fill_vdso_need_info(struct device_node *node) | ||
| 212 | { | ||
| 213 | struct resource timer_res; | ||
| 214 | of_address_to_resource(node, 0, &timer_res); | ||
| 215 | timer_info.mapping_base = (unsigned long)timer_res.start; | ||
| 216 | timer_info.cycle_count_down = true; | ||
| 217 | timer_info.cycle_count_reg_offset = CH1_CNT; | ||
| 218 | } | ||
| 219 | #endif | ||
| 220 | |||
| 221 | static int __init atcpit100_timer_init(struct device_node *node) | ||
| 222 | { | ||
| 223 | int ret; | ||
| 224 | u32 val; | ||
| 225 | void __iomem *base; | ||
| 226 | |||
| 227 | ret = timer_of_init(node, &to); | ||
| 228 | if (ret) | ||
| 229 | return ret; | ||
| 230 | |||
| 231 | base = timer_of_base(&to); | ||
| 232 | |||
| 233 | sched_clock_register(atcpit100_timer_sched_read, 32, | ||
| 234 | timer_of_rate(&to)); | ||
| 235 | |||
| 236 | ret = clocksource_mmio_init(base + CH1_CNT, | ||
| 237 | node->name, timer_of_rate(&to), 300, 32, | ||
| 238 | clocksource_mmio_readl_down); | ||
| 239 | |||
| 240 | if (ret) { | ||
| 241 | pr_err("Failed to register clocksource\n"); | ||
| 242 | return ret; | ||
| 243 | } | ||
| 244 | |||
| 245 | /* clear channel 0 timer0 interrupt */ | ||
| 246 | atcpit100_timer_clear_interrupt(base); | ||
| 247 | |||
| 248 | clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), | ||
| 249 | TIMER_SYNC_TICKS, 0xffffffff); | ||
| 250 | atcpit100_ch0_tmr0_en(base); | ||
| 251 | atcpit100_ch1_tmr0_en(base); | ||
| 252 | atcpit100_clocksource_start(base); | ||
| 253 | atcpit100_clkevt_time_start(base); | ||
| 254 | |||
| 255 | /* Enable channel 0 timer0 interrupt */ | ||
| 256 | val = readl(base + INT_EN); | ||
| 257 | writel(val | CH0INT0EN, base + INT_EN); | ||
| 258 | |||
| 259 | #ifdef CONFIG_NDS32 | ||
| 260 | fill_vdso_need_info(node); | ||
| 261 | #endif | ||
| 262 | |||
| 263 | return ret; | ||
| 264 | } | ||
| 265 | |||
| 266 | TIMER_OF_DECLARE(atcpit100, "andestech,atcpit100", atcpit100_timer_init); | ||
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index d27e3e3619e0..de7cf483e1b7 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
| @@ -85,3 +85,4 @@ obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o | |||
| 85 | obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o | 85 | obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o |
| 86 | obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o | 86 | obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o |
| 87 | obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o | 87 | obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o |
| 88 | obj-$(CONFIG_NDS32) += irq-ativic32.o | ||
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c new file mode 100644 index 000000000000..f69a8588521c --- /dev/null +++ b/drivers/irqchip/irq-ativic32.c | |||
| @@ -0,0 +1,107 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | ||
| 3 | |||
| 4 | #include <linux/irq.h> | ||
| 5 | #include <linux/of.h> | ||
| 6 | #include <linux/of_irq.h> | ||
| 7 | #include <linux/of_address.h> | ||
| 8 | #include <linux/interrupt.h> | ||
| 9 | #include <linux/irqdomain.h> | ||
| 10 | #include <linux/irqchip.h> | ||
| 11 | #include <nds32_intrinsic.h> | ||
| 12 | |||
| 13 | static void ativic32_ack_irq(struct irq_data *data) | ||
| 14 | { | ||
| 15 | __nds32__mtsr_dsb(BIT(data->hwirq), NDS32_SR_INT_PEND2); | ||
| 16 | } | ||
| 17 | |||
| 18 | static void ativic32_mask_irq(struct irq_data *data) | ||
| 19 | { | ||
| 20 | unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2); | ||
| 21 | __nds32__mtsr_dsb(int_mask2 & (~(BIT(data->hwirq))), NDS32_SR_INT_MASK2); | ||
| 22 | } | ||
| 23 | |||
| 24 | static void ativic32_unmask_irq(struct irq_data *data) | ||
| 25 | { | ||
| 26 | unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2); | ||
| 27 | __nds32__mtsr_dsb(int_mask2 | (BIT(data->hwirq)), NDS32_SR_INT_MASK2); | ||
| 28 | } | ||
| 29 | |||
| 30 | static struct irq_chip ativic32_chip = { | ||
| 31 | .name = "ativic32", | ||
| 32 | .irq_ack = ativic32_ack_irq, | ||
| 33 | .irq_mask = ativic32_mask_irq, | ||
| 34 | .irq_unmask = ativic32_unmask_irq, | ||
| 35 | }; | ||
| 36 | |||
| 37 | static unsigned int __initdata nivic_map[6] = { 6, 2, 10, 16, 24, 32 }; | ||
| 38 | |||
| 39 | static struct irq_domain *root_domain; | ||
| 40 | static int ativic32_irq_domain_map(struct irq_domain *id, unsigned int virq, | ||
| 41 | irq_hw_number_t hw) | ||
| 42 | { | ||
| 43 | |||
| 44 | unsigned long int_trigger_type; | ||
| 45 | u32 type; | ||
| 46 | struct irq_data *irq_data; | ||
| 47 | int_trigger_type = __nds32__mfsr(NDS32_SR_INT_TRIGGER); | ||
| 48 | irq_data = irq_get_irq_data(virq); | ||
| 49 | if (!irq_data) | ||
| 50 | return -EINVAL; | ||
| 51 | |||
| 52 | if (int_trigger_type & (BIT(hw))) { | ||
| 53 | irq_set_chip_and_handler(virq, &ativic32_chip, handle_edge_irq); | ||
| 54 | type = IRQ_TYPE_EDGE_RISING; | ||
| 55 | } else { | ||
| 56 | irq_set_chip_and_handler(virq, &ativic32_chip, handle_level_irq); | ||
| 57 | type = IRQ_TYPE_LEVEL_HIGH; | ||
| 58 | } | ||
| 59 | |||
| 60 | irqd_set_trigger_type(irq_data, type); | ||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | static struct irq_domain_ops ativic32_ops = { | ||
| 65 | .map = ativic32_irq_domain_map, | ||
| 66 | .xlate = irq_domain_xlate_onecell | ||
| 67 | }; | ||
| 68 | |||
| 69 | static irq_hw_number_t get_intr_src(void) | ||
| 70 | { | ||
| 71 | return ((__nds32__mfsr(NDS32_SR_ITYPE) & ITYPE_mskVECTOR) >> ITYPE_offVECTOR) | ||
| 72 | - NDS32_VECTOR_offINTERRUPT; | ||
| 73 | } | ||
| 74 | |||
| 75 | asmlinkage void asm_do_IRQ(struct pt_regs *regs) | ||
| 76 | { | ||
| 77 | irq_hw_number_t hwirq = get_intr_src(); | ||
| 78 | handle_domain_irq(root_domain, hwirq, regs); | ||
| 79 | } | ||
| 80 | |||
| 81 | int __init ativic32_init_irq(struct device_node *node, struct device_node *parent) | ||
| 82 | { | ||
| 83 | unsigned long int_vec_base, nivic, nr_ints; | ||
| 84 | |||
| 85 | if (WARN(parent, "non-root ativic32 are not supported")) | ||
| 86 | return -EINVAL; | ||
| 87 | |||
| 88 | int_vec_base = __nds32__mfsr(NDS32_SR_IVB); | ||
| 89 | |||
| 90 | if (((int_vec_base & IVB_mskIVIC_VER) >> IVB_offIVIC_VER) == 0) | ||
| 91 | panic("Unable to use atcivic32 for this cpu.\n"); | ||
| 92 | |||
| 93 | nivic = (int_vec_base & IVB_mskNIVIC) >> IVB_offNIVIC; | ||
| 94 | if (nivic >= ARRAY_SIZE(nivic_map)) | ||
| 95 | panic("The number of input for ativic32 is not supported.\n"); | ||
| 96 | |||
| 97 | nr_ints = nivic_map[nivic]; | ||
| 98 | |||
| 99 | root_domain = irq_domain_add_linear(node, nr_ints, | ||
| 100 | &ativic32_ops, NULL); | ||
| 101 | |||
| 102 | if (!root_domain) | ||
| 103 | panic("%s: unable to create IRQ domain\n", node->full_name); | ||
| 104 | |||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | IRQCHIP_DECLARE(ativic32, "andestech,ativic32", ativic32_init_irq); | ||
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index 040c7f163325..0fb8df656677 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | config NET_VENDOR_FARADAY | 5 | config NET_VENDOR_FARADAY |
| 6 | bool "Faraday devices" | 6 | bool "Faraday devices" |
| 7 | default y | 7 | default y |
| 8 | depends on ARM | 8 | depends on ARM || NDS32 || COMPILE_TEST |
| 9 | ---help--- | 9 | ---help--- |
| 10 | If you have a network (Ethernet) card belonging to this class, say Y. | 10 | If you have a network (Ethernet) card belonging to this class, say Y. |
| 11 | 11 | ||
| @@ -18,7 +18,8 @@ if NET_VENDOR_FARADAY | |||
| 18 | 18 | ||
| 19 | config FTMAC100 | 19 | config FTMAC100 |
| 20 | tristate "Faraday FTMAC100 10/100 Ethernet support" | 20 | tristate "Faraday FTMAC100 10/100 Ethernet support" |
| 21 | depends on ARM | 21 | depends on ARM || NDS32 || COMPILE_TEST |
| 22 | depends on !64BIT || BROKEN | ||
| 22 | select MII | 23 | select MII |
| 23 | ---help--- | 24 | ---help--- |
| 24 | This driver supports the FTMAC100 10/100 Ethernet controller | 25 | This driver supports the FTMAC100 10/100 Ethernet controller |
| @@ -27,7 +28,8 @@ config FTMAC100 | |||
| 27 | 28 | ||
| 28 | config FTGMAC100 | 29 | config FTGMAC100 |
| 29 | tristate "Faraday FTGMAC100 Gigabit Ethernet support" | 30 | tristate "Faraday FTGMAC100 Gigabit Ethernet support" |
| 30 | depends on ARM | 31 | depends on ARM || NDS32 || COMPILE_TEST |
| 32 | depends on !64BIT || BROKEN | ||
| 31 | select PHYLIB | 33 | select PHYLIB |
| 32 | ---help--- | 34 | ---help--- |
| 33 | This driver supports the FTGMAC100 Gigabit Ethernet controller | 35 | This driver supports the FTGMAC100 Gigabit Ethernet controller |
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 7f1f1fbcef9e..27bb893cf6b2 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig | |||
| @@ -9,7 +9,7 @@ config VGA_CONSOLE | |||
| 9 | depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !FRV && \ | 9 | depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !FRV && \ |
| 10 | !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \ | 10 | !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \ |
| 11 | (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \ | 11 | (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \ |
| 12 | !ARM64 && !ARC && !MICROBLAZE && !OPENRISC | 12 | !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !NDS32 |
| 13 | default y | 13 | default y |
| 14 | help | 14 | help |
| 15 | Saying Y here will allow you to use Linux in text mode through a | 15 | Saying Y here will allow you to use Linux in text mode through a |
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index b4531e3b2120..7c6a39e64749 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
| @@ -852,7 +852,16 @@ static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, | |||
| 852 | } | 852 | } |
| 853 | #endif | 853 | #endif |
| 854 | 854 | ||
| 855 | #ifndef iounmap | ||
| 856 | #define iounmap iounmap | ||
| 857 | |||
| 858 | static inline void iounmap(void __iomem *addr) | ||
| 859 | { | ||
| 860 | } | ||
| 861 | #endif | ||
| 862 | #endif /* CONFIG_MMU */ | ||
| 855 | #ifndef ioremap_nocache | 863 | #ifndef ioremap_nocache |
| 864 | void __iomem *ioremap(phys_addr_t phys_addr, size_t size); | ||
| 856 | #define ioremap_nocache ioremap_nocache | 865 | #define ioremap_nocache ioremap_nocache |
| 857 | static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) | 866 | static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) |
| 858 | { | 867 | { |
| @@ -884,15 +893,6 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) | |||
| 884 | } | 893 | } |
| 885 | #endif | 894 | #endif |
| 886 | 895 | ||
| 887 | #ifndef iounmap | ||
| 888 | #define iounmap iounmap | ||
| 889 | |||
| 890 | static inline void iounmap(void __iomem *addr) | ||
| 891 | { | ||
| 892 | } | ||
| 893 | #endif | ||
| 894 | #endif /* CONFIG_MMU */ | ||
| 895 | |||
| 896 | #ifdef CONFIG_HAS_IOPORT_MAP | 896 | #ifdef CONFIG_HAS_IOPORT_MAP |
| 897 | #ifndef CONFIG_GENERIC_IOMAP | 897 | #ifndef CONFIG_GENERIC_IOMAP |
| 898 | #ifndef ioport_map | 898 | #ifndef ioport_map |
