aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/tile/Kconfig12
-rw-r--r--arch/tile/Makefile2
-rw-r--r--arch/tile/include/arch/spr_def.h85
-rw-r--r--arch/tile/include/arch/spr_def_32.h39
-rw-r--r--arch/tile/include/asm/irqflags.h64
-rw-r--r--arch/tile/include/asm/page.h27
-rw-r--r--arch/tile/include/asm/processor.h11
-rw-r--r--arch/tile/include/asm/ptrace.h4
-rw-r--r--arch/tile/include/asm/system.h2
-rw-r--r--arch/tile/include/hv/hypervisor.h30
-rw-r--r--arch/tile/kernel/entry.S12
-rw-r--r--arch/tile/kernel/head_32.S5
-rw-r--r--arch/tile/kernel/intvec_32.S67
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/process.c12
-rw-r--r--arch/tile/kernel/regs_32.S2
-rw-r--r--arch/tile/kernel/setup.c28
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/traps.c2
-rw-r--r--arch/tile/kvm/Kconfig38
-rw-r--r--arch/tile/mm/fault.c6
-rw-r--r--arch/tile/mm/init.c2
23 files changed, 338 insertions, 132 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 2361e3e80bd5..89cfee07efa9 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -96,6 +96,7 @@ config HVC_TILE
96 96
97config TILE 97config TILE
98 def_bool y 98 def_bool y
99 select HAVE_KVM if !TILEGX
99 select GENERIC_FIND_FIRST_BIT 100 select GENERIC_FIND_FIRST_BIT
100 select GENERIC_FIND_NEXT_BIT 101 select GENERIC_FIND_NEXT_BIT
101 select USE_GENERIC_SMP_HELPERS 102 select USE_GENERIC_SMP_HELPERS
@@ -314,6 +315,15 @@ config HARDWALL
314 bool "Hardwall support to allow access to user dynamic network" 315 bool "Hardwall support to allow access to user dynamic network"
315 default y 316 default y
316 317
318config KERNEL_PL
319 int "Processor protection level for kernel"
320 range 1 2
321 default "1"
322 ---help---
323 This setting determines the processor protection level the
324 kernel will be built to run at. Generally you should use
325 the default value here.
326
317endmenu # Tilera-specific configuration 327endmenu # Tilera-specific configuration
318 328
319menu "Bus options" 329menu "Bus options"
@@ -354,3 +364,5 @@ source "security/Kconfig"
354source "crypto/Kconfig" 364source "crypto/Kconfig"
355 365
356source "lib/Kconfig" 366source "lib/Kconfig"
367
368source "arch/tile/kvm/Kconfig"
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index a700f7f6cf5b..17acce70569b 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -53,6 +53,8 @@ libs-y += $(LIBGCC_PATH)
53# See arch/tile/Kbuild for content of core part of the kernel 53# See arch/tile/Kbuild for content of core part of the kernel
54core-y += arch/tile/ 54core-y += arch/tile/
55 55
56core-$(CONFIG_KVM) += arch/tile/kvm/
57
56ifdef TILERA_ROOT 58ifdef TILERA_ROOT
57INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot 59INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
58endif 60endif
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index c8fdbd9a45e6..442fcba0d122 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -12,8 +12,93 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15/*
16 * In addition to including the proper base SPR definition file, depending
17 * on machine architecture, this file defines several macros which allow
18 * kernel code to use protection-level dependent SPRs without worrying
19 * about which PL it's running at. In these macros, the PL that the SPR
20 * or interrupt number applies to is replaced by K.
21 */
22
23#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
24#error CONFIG_KERNEL_PL must be 1 or 2
25#endif
26
27/* Concatenate 4 strings. */
28#define __concat4(a, b, c, d) a ## b ## c ## d
29#define _concat4(a, b, c, d) __concat4(a, b, c, d)
30
15#ifdef __tilegx__ 31#ifdef __tilegx__
16#include <arch/spr_def_64.h> 32#include <arch/spr_def_64.h>
33
34/* TILE-Gx dependent, protection-level dependent SPRs. */
35
36#define SPR_INTERRUPT_MASK_K \
37 _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
38#define SPR_INTERRUPT_MASK_SET_K \
39 _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
40#define SPR_INTERRUPT_MASK_RESET_K \
41 _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
42#define SPR_INTERRUPT_VECTOR_BASE_K \
43 _concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
44
45#define SPR_IPI_MASK_K \
46 _concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
47#define SPR_IPI_MASK_RESET_K \
48 _concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
49#define SPR_IPI_MASK_SET_K \
50 _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
51#define SPR_IPI_EVENT_K \
52 _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
53#define SPR_IPI_EVENT_RESET_K \
54 _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
55#define SPR_IPI_MASK_SET_K \
56 _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
57#define INT_IPI_K \
58 _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
59
60#define SPR_SINGLE_STEP_CONTROL_K \
61 _concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
62#define SPR_SINGLE_STEP_EN_K_K \
63 _concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
64#define INT_SINGLE_STEP_K \
65 _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
66
17#else 67#else
18#include <arch/spr_def_32.h> 68#include <arch/spr_def_32.h>
69
70/* TILEPro dependent, protection-level dependent SPRs. */
71
72#define SPR_INTERRUPT_MASK_K_0 \
73 _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
74#define SPR_INTERRUPT_MASK_K_1 \
75 _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
76#define SPR_INTERRUPT_MASK_SET_K_0 \
77 _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
78#define SPR_INTERRUPT_MASK_SET_K_1 \
79 _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
80#define SPR_INTERRUPT_MASK_RESET_K_0 \
81 _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
82#define SPR_INTERRUPT_MASK_RESET_K_1 \
83 _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
84
19#endif 85#endif
86
87/* Generic protection-level dependent SPRs. */
88
89#define SPR_SYSTEM_SAVE_K_0 \
90 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
91#define SPR_SYSTEM_SAVE_K_1 \
92 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
93#define SPR_SYSTEM_SAVE_K_2 \
94 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
95#define SPR_SYSTEM_SAVE_K_3 \
96 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
97#define SPR_EX_CONTEXT_K_0 \
98 _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
99#define SPR_EX_CONTEXT_K_1 \
100 _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
101#define SPR_INTCTRL_K_STATUS \
102 _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
103#define INT_INTCTRL_K \
104 _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index b4fc06864df6..bbc1f4c924ee 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -56,58 +56,93 @@
56#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2 56#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
57#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1 57#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
58#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4 58#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
59#define SPR_EX_CONTEXT_2_0 0x4605
60#define SPR_EX_CONTEXT_2_1 0x4606
61#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
62#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
63#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
64#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
65#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
66#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
59#define SPR_FAIL 0x4e09 67#define SPR_FAIL 0x4e09
60#define SPR_INTCTRL_0_STATUS 0x4a07 68#define SPR_INTCTRL_0_STATUS 0x4a07
61#define SPR_INTCTRL_1_STATUS 0x4807 69#define SPR_INTCTRL_1_STATUS 0x4807
70#define SPR_INTCTRL_2_STATUS 0x4607
62#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a 71#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
63#define SPR_INTERRUPT_MASK_0_0 0x4a08 72#define SPR_INTERRUPT_MASK_0_0 0x4a08
64#define SPR_INTERRUPT_MASK_0_1 0x4a09 73#define SPR_INTERRUPT_MASK_0_1 0x4a09
65#define SPR_INTERRUPT_MASK_1_0 0x4809 74#define SPR_INTERRUPT_MASK_1_0 0x4809
66#define SPR_INTERRUPT_MASK_1_1 0x480a 75#define SPR_INTERRUPT_MASK_1_1 0x480a
76#define SPR_INTERRUPT_MASK_2_0 0x4608
77#define SPR_INTERRUPT_MASK_2_1 0x4609
67#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a 78#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
68#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b 79#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
69#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b 80#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
70#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c 81#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
82#define SPR_INTERRUPT_MASK_RESET_2_0 0x460a
83#define SPR_INTERRUPT_MASK_RESET_2_1 0x460b
71#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c 84#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
72#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d 85#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
73#define SPR_INTERRUPT_MASK_SET_1_0 0x480d 86#define SPR_INTERRUPT_MASK_SET_1_0 0x480d
74#define SPR_INTERRUPT_MASK_SET_1_1 0x480e 87#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
88#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
89#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
75#define SPR_MPL_DMA_CPL_SET_0 0x5800 90#define SPR_MPL_DMA_CPL_SET_0 0x5800
76#define SPR_MPL_DMA_CPL_SET_1 0x5801 91#define SPR_MPL_DMA_CPL_SET_1 0x5801
92#define SPR_MPL_DMA_CPL_SET_2 0x5802
77#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 93#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
78#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 94#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
95#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
79#define SPR_MPL_INTCTRL_0_SET_0 0x4a00 96#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
80#define SPR_MPL_INTCTRL_0_SET_1 0x4a01 97#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
98#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
81#define SPR_MPL_INTCTRL_1_SET_0 0x4800 99#define SPR_MPL_INTCTRL_1_SET_0 0x4800
82#define SPR_MPL_INTCTRL_1_SET_1 0x4801 100#define SPR_MPL_INTCTRL_1_SET_1 0x4801
101#define SPR_MPL_INTCTRL_1_SET_2 0x4802
102#define SPR_MPL_INTCTRL_2_SET_0 0x4600
103#define SPR_MPL_INTCTRL_2_SET_1 0x4601
104#define SPR_MPL_INTCTRL_2_SET_2 0x4602
83#define SPR_MPL_SN_ACCESS_SET_0 0x0800 105#define SPR_MPL_SN_ACCESS_SET_0 0x0800
84#define SPR_MPL_SN_ACCESS_SET_1 0x0801 106#define SPR_MPL_SN_ACCESS_SET_1 0x0801
107#define SPR_MPL_SN_ACCESS_SET_2 0x0802
85#define SPR_MPL_SN_CPL_SET_0 0x5a00 108#define SPR_MPL_SN_CPL_SET_0 0x5a00
86#define SPR_MPL_SN_CPL_SET_1 0x5a01 109#define SPR_MPL_SN_CPL_SET_1 0x5a01
110#define SPR_MPL_SN_CPL_SET_2 0x5a02
87#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00 111#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
88#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01 112#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
113#define SPR_MPL_SN_FIREWALL_SET_2 0x2c02
89#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00 114#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
90#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01 115#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
116#define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
91#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00 117#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
92#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01 118#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
119#define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
93#define SPR_MPL_UDN_AVAIL_SET_0 0x4000 120#define SPR_MPL_UDN_AVAIL_SET_0 0x4000
94#define SPR_MPL_UDN_AVAIL_SET_1 0x4001 121#define SPR_MPL_UDN_AVAIL_SET_1 0x4001
122#define SPR_MPL_UDN_AVAIL_SET_2 0x4002
95#define SPR_MPL_UDN_CA_SET_0 0x3c00 123#define SPR_MPL_UDN_CA_SET_0 0x3c00
96#define SPR_MPL_UDN_CA_SET_1 0x3c01 124#define SPR_MPL_UDN_CA_SET_1 0x3c01
125#define SPR_MPL_UDN_CA_SET_2 0x3c02
97#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400 126#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
98#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401 127#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
128#define SPR_MPL_UDN_COMPLETE_SET_2 0x1402
99#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000 129#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
100#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001 130#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
131#define SPR_MPL_UDN_FIREWALL_SET_2 0x3002
101#define SPR_MPL_UDN_REFILL_SET_0 0x1000 132#define SPR_MPL_UDN_REFILL_SET_0 0x1000
102#define SPR_MPL_UDN_REFILL_SET_1 0x1001 133#define SPR_MPL_UDN_REFILL_SET_1 0x1001
134#define SPR_MPL_UDN_REFILL_SET_2 0x1002
103#define SPR_MPL_UDN_TIMER_SET_0 0x3600 135#define SPR_MPL_UDN_TIMER_SET_0 0x3600
104#define SPR_MPL_UDN_TIMER_SET_1 0x3601 136#define SPR_MPL_UDN_TIMER_SET_1 0x3601
137#define SPR_MPL_UDN_TIMER_SET_2 0x3602
105#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00 138#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
106#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01 139#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
140#define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
107#define SPR_PASS 0x4e0b 141#define SPR_PASS 0x4e0b
108#define SPR_PERF_COUNT_0 0x4205 142#define SPR_PERF_COUNT_0 0x4205
109#define SPR_PERF_COUNT_1 0x4206 143#define SPR_PERF_COUNT_1 0x4206
110#define SPR_PERF_COUNT_CTL 0x4207 144#define SPR_PERF_COUNT_CTL 0x4207
145#define SPR_PERF_COUNT_DN_CTL 0x4210
111#define SPR_PERF_COUNT_STS 0x4208 146#define SPR_PERF_COUNT_STS 0x4208
112#define SPR_PROC_STATUS 0x4f00 147#define SPR_PROC_STATUS 0x4f00
113#define SPR_SIM_CONTROL 0x4e0c 148#define SPR_SIM_CONTROL 0x4e0c
@@ -124,6 +159,10 @@
124#define SPR_SYSTEM_SAVE_1_1 0x4901 159#define SPR_SYSTEM_SAVE_1_1 0x4901
125#define SPR_SYSTEM_SAVE_1_2 0x4902 160#define SPR_SYSTEM_SAVE_1_2 0x4902
126#define SPR_SYSTEM_SAVE_1_3 0x4903 161#define SPR_SYSTEM_SAVE_1_3 0x4903
162#define SPR_SYSTEM_SAVE_2_0 0x4700
163#define SPR_SYSTEM_SAVE_2_1 0x4701
164#define SPR_SYSTEM_SAVE_2_2 0x4702
165#define SPR_SYSTEM_SAVE_2_3 0x4703
127#define SPR_TILE_COORD 0x4c17 166#define SPR_TILE_COORD 0x4c17
128#define SPR_TILE_RTF_HWM 0x4e10 167#define SPR_TILE_RTF_HWM 0x4e10
129#define SPR_TILE_TIMER_CONTROL 0x3205 168#define SPR_TILE_TIMER_CONTROL 0x3205
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 45cf67c2f286..6ebdd7d1e67a 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -47,53 +47,53 @@
47 int __n = (n); \ 47 int __n = (n); \
48 int __mask = 1 << (__n & 0x1f); \ 48 int __mask = 1 << (__n & 0x1f); \
49 if (__n < 32) \ 49 if (__n < 32) \
50 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ 50 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
51 else \ 51 else \
52 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ 52 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
53} while (0) 53} while (0)
54#define interrupt_mask_reset(n) do { \ 54#define interrupt_mask_reset(n) do { \
55 int __n = (n); \ 55 int __n = (n); \
56 int __mask = 1 << (__n & 0x1f); \ 56 int __mask = 1 << (__n & 0x1f); \
57 if (__n < 32) \ 57 if (__n < 32) \
58 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ 58 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
59 else \ 59 else \
60 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ 60 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
61} while (0) 61} while (0)
62#define interrupt_mask_check(n) ({ \ 62#define interrupt_mask_check(n) ({ \
63 int __n = (n); \ 63 int __n = (n); \
64 (((__n < 32) ? \ 64 (((__n < 32) ? \
65 __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ 65 __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
66 __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ 66 __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
67 >> (__n & 0x1f)) & 1; \ 67 >> (__n & 0x1f)) & 1; \
68}) 68})
69#define interrupt_mask_set_mask(mask) do { \ 69#define interrupt_mask_set_mask(mask) do { \
70 unsigned long long __m = (mask); \ 70 unsigned long long __m = (mask); \
71 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ 71 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
72 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ 72 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
73} while (0) 73} while (0)
74#define interrupt_mask_reset_mask(mask) do { \ 74#define interrupt_mask_reset_mask(mask) do { \
75 unsigned long long __m = (mask); \ 75 unsigned long long __m = (mask); \
76 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ 76 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
77 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ 77 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
78} while (0) 78} while (0)
79#else 79#else
80#define interrupt_mask_set(n) \ 80#define interrupt_mask_set(n) \
81 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) 81 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
82#define interrupt_mask_reset(n) \ 82#define interrupt_mask_reset(n) \
83 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) 83 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
84#define interrupt_mask_check(n) \ 84#define interrupt_mask_check(n) \
85 ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) 85 ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
86#define interrupt_mask_set_mask(mask) \ 86#define interrupt_mask_set_mask(mask) \
87 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) 87 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
88#define interrupt_mask_reset_mask(mask) \ 88#define interrupt_mask_reset_mask(mask) \
89 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) 89 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
90#endif 90#endif
91 91
92/* 92/*
93 * The set of interrupts we want active if irqs are enabled. 93 * The set of interrupts we want active if irqs are enabled.
94 * Note that in particular, the tile timer interrupt comes and goes 94 * Note that in particular, the tile timer interrupt comes and goes
95 * from this set, since we have no other way to turn off the timer. 95 * from this set, since we have no other way to turn off the timer.
96 * Likewise, INTCTRL_1 is removed and re-added during device 96 * Likewise, INTCTRL_K is removed and re-added during device
97 * interrupts, as is the the hardwall UDN_FIREWALL interrupt. 97 * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
98 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it 98 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
99 * is always claimed as an "active interrupt" so we can query that bit 99 * is always claimed as an "active interrupt" so we can query that bit
@@ -168,14 +168,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
168 168
169/* Return 0 or 1 to indicate whether interrupts are currently disabled. */ 169/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
170#define IRQS_DISABLED(tmp) \ 170#define IRQS_DISABLED(tmp) \
171 mfspr tmp, INTERRUPT_MASK_1; \ 171 mfspr tmp, SPR_INTERRUPT_MASK_K; \
172 andi tmp, tmp, 1 172 andi tmp, tmp, 1
173 173
174/* Load up a pointer to &interrupts_enabled_mask. */ 174/* Load up a pointer to &interrupts_enabled_mask. */
175#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 175#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
176 moveli reg, hw2_last(interrupts_enabled_mask); \ 176 moveli reg, hw2_last(interrupts_enabled_mask); \
177 shl16insli reg, reg, hw1(interrupts_enabled_mask); \ 177 shl16insli reg, reg, hw1(interrupts_enabled_mask); \
178 shl16insli reg, reg, hw0(interrupts_enabled_mask); \ 178 shl16insli reg, reg, hw0(interrupts_enabled_mask); \
179 add reg, reg, tp 179 add reg, reg, tp
180 180
181/* Disable interrupts. */ 181/* Disable interrupts. */
@@ -183,18 +183,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
183 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ 183 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
184 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ 184 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
185 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ 185 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
186 mtspr INTERRUPT_MASK_SET_1, tmp0 186 mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
187 187
188/* Disable ALL synchronous interrupts (used by NMI entry). */ 188/* Disable ALL synchronous interrupts (used by NMI entry). */
189#define IRQ_DISABLE_ALL(tmp) \ 189#define IRQ_DISABLE_ALL(tmp) \
190 movei tmp, -1; \ 190 movei tmp, -1; \
191 mtspr INTERRUPT_MASK_SET_1, tmp 191 mtspr SPR_INTERRUPT_MASK_SET_K, tmp
192 192
193/* Enable interrupts. */ 193/* Enable interrupts. */
194#define IRQ_ENABLE(tmp0, tmp1) \ 194#define IRQ_ENABLE(tmp0, tmp1) \
195 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 195 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
196 ld tmp0, tmp0; \ 196 ld tmp0, tmp0; \
197 mtspr INTERRUPT_MASK_RESET_1, tmp0 197 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
198 198
199#else /* !__tilegx__ */ 199#else /* !__tilegx__ */
200 200
@@ -208,14 +208,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
208 * (making the original code's write of the "high" mask word idempotent). 208 * (making the original code's write of the "high" mask word idempotent).
209 */ 209 */
210#define IRQS_DISABLED(tmp) \ 210#define IRQS_DISABLED(tmp) \
211 mfspr tmp, INTERRUPT_MASK_1_0; \ 211 mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
212 shri tmp, tmp, INT_MEM_ERROR; \ 212 shri tmp, tmp, INT_MEM_ERROR; \
213 andi tmp, tmp, 1 213 andi tmp, tmp, 1
214 214
215/* Load up a pointer to &interrupts_enabled_mask. */ 215/* Load up a pointer to &interrupts_enabled_mask. */
216#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 216#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
217 moveli reg, lo16(interrupts_enabled_mask); \ 217 moveli reg, lo16(interrupts_enabled_mask); \
218 auli reg, reg, ha16(interrupts_enabled_mask);\ 218 auli reg, reg, ha16(interrupts_enabled_mask); \
219 add reg, reg, tp 219 add reg, reg, tp
220 220
221/* Disable interrupts. */ 221/* Disable interrupts. */
@@ -225,16 +225,16 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
225 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ 225 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
226 }; \ 226 }; \
227 { \ 227 { \
228 mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ 228 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
229 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ 229 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
230 }; \ 230 }; \
231 mtspr INTERRUPT_MASK_SET_1_1, tmp1 231 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
232 232
233/* Disable ALL synchronous interrupts (used by NMI entry). */ 233/* Disable ALL synchronous interrupts (used by NMI entry). */
234#define IRQ_DISABLE_ALL(tmp) \ 234#define IRQ_DISABLE_ALL(tmp) \
235 movei tmp, -1; \ 235 movei tmp, -1; \
236 mtspr INTERRUPT_MASK_SET_1_0, tmp; \ 236 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
237 mtspr INTERRUPT_MASK_SET_1_1, tmp 237 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
238 238
239/* Enable interrupts. */ 239/* Enable interrupts. */
240#define IRQ_ENABLE(tmp0, tmp1) \ 240#define IRQ_ENABLE(tmp0, tmp1) \
@@ -244,8 +244,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
244 addi tmp1, tmp0, 4 \ 244 addi tmp1, tmp0, 4 \
245 }; \ 245 }; \
246 lw tmp1, tmp1; \ 246 lw tmp1, tmp1; \
247 mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ 247 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
248 mtspr INTERRUPT_MASK_RESET_1_1, tmp1 248 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
249#endif 249#endif
250 250
251/* 251/*
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 7d90641cf18d..7979a45430d3 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -199,17 +199,17 @@ static inline __attribute_const__ int get_order(unsigned long size)
199 * If you want more physical memory than this then see the CONFIG_HIGHMEM 199 * If you want more physical memory than this then see the CONFIG_HIGHMEM
200 * option in the kernel configuration. 200 * option in the kernel configuration.
201 * 201 *
202 * The top two 16MB chunks in the table below (VIRT and HV) are 202 * The top 16MB chunk in the table below is unavailable to Linux. Since
203 * unavailable to Linux. Since the kernel interrupt vectors must live 203 * the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
204 * at 0xfd000000, we map all of the bottom of RAM at this address with 204 * (depending on whether the kernel is at PL2 or Pl1), we map all of the
205 * a huge page table entry to minimize its ITLB footprint (as well as 205 * bottom of RAM at this address with a huge page table entry to minimize
206 * at PAGE_OFFSET). The last architected requirement is that user 206 * its ITLB footprint (as well as at PAGE_OFFSET). The last architected
207 * interrupt vectors live at 0xfc000000, so we make that range of 207 * requirement is that user interrupt vectors live at 0xfc000000, so we
208 * memory available to user processes. The remaining regions are sized 208 * make that range of memory available to user processes. The remaining
209 * as shown; after the first four addresses, we show "typical" values, 209 * regions are sized as shown; the first four addresses use the PL 1
210 * since the actual addresses depend on kernel #defines. 210 * values, and after that, we show "typical" values, since the actual
211 * addresses depend on kernel #defines.
211 * 212 *
212 * MEM_VIRT_INTRPT 0xff000000
213 * MEM_HV_INTRPT 0xfe000000 213 * MEM_HV_INTRPT 0xfe000000
214 * MEM_SV_INTRPT (kernel code) 0xfd000000 214 * MEM_SV_INTRPT (kernel code) 0xfd000000
215 * MEM_USER_INTRPT (user vector) 0xfc000000 215 * MEM_USER_INTRPT (user vector) 0xfc000000
@@ -221,9 +221,14 @@ static inline __attribute_const__ int get_order(unsigned long size)
221 */ 221 */
222 222
223#define MEM_USER_INTRPT _AC(0xfc000000, UL) 223#define MEM_USER_INTRPT _AC(0xfc000000, UL)
224#if CONFIG_KERNEL_PL == 1
224#define MEM_SV_INTRPT _AC(0xfd000000, UL) 225#define MEM_SV_INTRPT _AC(0xfd000000, UL)
225#define MEM_HV_INTRPT _AC(0xfe000000, UL) 226#define MEM_HV_INTRPT _AC(0xfe000000, UL)
226#define MEM_VIRT_INTRPT _AC(0xff000000, UL) 227#else
228#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
229#define MEM_SV_INTRPT _AC(0xfe000000, UL)
230#define MEM_HV_INTRPT _AC(0xff000000, UL)
231#endif
227 232
228#define INTRPT_SIZE 0x4000 233#define INTRPT_SIZE 0x4000
229 234
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index ccd5f8425688..1747ff3946b2 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -328,18 +328,21 @@ extern int kdata_huge;
328 * Note that assembly code assumes that USER_PL is zero. 328 * Note that assembly code assumes that USER_PL is zero.
329 */ 329 */
330#define USER_PL 0 330#define USER_PL 0
331#define KERNEL_PL 1 331#if CONFIG_KERNEL_PL == 2
332#define GUEST_PL 1
333#endif
334#define KERNEL_PL CONFIG_KERNEL_PL
332 335
333/* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */ 336/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
334#define CPU_LOG_MASK_VALUE 12 337#define CPU_LOG_MASK_VALUE 12
335#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) 338#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
336#if CONFIG_NR_CPUS > CPU_MASK_VALUE 339#if CONFIG_NR_CPUS > CPU_MASK_VALUE
337# error Too many cpus! 340# error Too many cpus!
338#endif 341#endif
339#define raw_smp_processor_id() \ 342#define raw_smp_processor_id() \
340 ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE) 343 ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
341#define get_current_ksp0() \ 344#define get_current_ksp0() \
342 (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE) 345 (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
343#define next_current_ksp0(task) ({ \ 346#define next_current_ksp0(task) ({ \
344 unsigned long __ksp0 = task_ksp0(task); \ 347 unsigned long __ksp0 = task_ksp0(task); \
345 int __cpu = raw_smp_processor_id(); \ 348 int __cpu = raw_smp_processor_id(); \
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 4a02bb073979..ac6d343129d3 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -62,8 +62,8 @@ struct pt_regs {
62 pt_reg_t lr; /* aliases regs[TREG_LR] */ 62 pt_reg_t lr; /* aliases regs[TREG_LR] */
63 63
64 /* Saved special registers. */ 64 /* Saved special registers. */
65 pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */ 65 pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
66 pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */ 66 pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
67 pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */ 67 pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
68 pt_reg_t orig_r0; /* r0 at syscall entry, else zero */ 68 pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
69 pt_reg_t flags; /* flags (see below) */ 69 pt_reg_t flags; /* flags (see below) */
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h
index fb7ff9574d76..5388850deeb2 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/system.h
@@ -164,7 +164,7 @@ extern struct task_struct *_switch_to(struct task_struct *prev,
164/* Helper function for _switch_to(). */ 164/* Helper function for _switch_to(). */
165extern struct task_struct *__switch_to(struct task_struct *prev, 165extern struct task_struct *__switch_to(struct task_struct *prev,
166 struct task_struct *next, 166 struct task_struct *next,
167 unsigned long new_system_save_1_0); 167 unsigned long new_system_save_k_0);
168 168
169/* Address that switched-away from tasks are at. */ 169/* Address that switched-away from tasks are at. */
170extern unsigned long get_switch_to_pc(void); 170extern unsigned long get_switch_to_pc(void);
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 9bd303a141b2..f672544cd4f9 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -1003,37 +1003,37 @@ int hv_console_write(HV_VirtAddr bytes, int len);
1003 * when these occur in a client's interrupt critical section, they must 1003 * when these occur in a client's interrupt critical section, they must
1004 * be delivered through the downcall mechanism. 1004 * be delivered through the downcall mechanism.
1005 * 1005 *
1006 * A downcall is initially delivered to the client as an INTCTRL_1 1006 * A downcall is initially delivered to the client as an INTCTRL_CL
1007 * interrupt. Upon entry to the INTCTRL_1 vector, the client must 1007 * interrupt, where CL is the client's PL. Upon entry to the INTCTRL_CL
1008 * immediately invoke the hv_downcall_dispatch service. This service 1008 * vector, the client must immediately invoke the hv_downcall_dispatch
1009 * will not return; instead it will cause one of the client's actual 1009 * service. This service will not return; instead it will cause one of
1010 * downcall-handling interrupt vectors to be entered. The EX_CONTEXT 1010 * the client's actual downcall-handling interrupt vectors to be entered.
1011 * registers in the client will be set so that when the client irets, 1011 * The EX_CONTEXT registers in the client will be set so that when the
1012 * it will return to the code which was interrupted by the INTCTRL_1 1012 * client irets, it will return to the code which was interrupted by the
1013 * interrupt. 1013 * INTCTRL_CL interrupt.
1014 * 1014 *
1015 * Under some circumstances, the firing of INTCTRL_1 can race with 1015 * Under some circumstances, the firing of INTCTRL_CL can race with
1016 * the lowering of a device interrupt. In such a case, the 1016 * the lowering of a device interrupt. In such a case, the
1017 * hv_downcall_dispatch service may issue an iret instruction instead 1017 * hv_downcall_dispatch service may issue an iret instruction instead
1018 * of entering one of the client's actual downcall-handling interrupt 1018 * of entering one of the client's actual downcall-handling interrupt
1019 * vectors. This will return execution to the location that was 1019 * vectors. This will return execution to the location that was
1020 * interrupted by INTCTRL_1. 1020 * interrupted by INTCTRL_CL.
1021 * 1021 *
1022 * Any saving of registers should be done by the actual handling 1022 * Any saving of registers should be done by the actual handling
1023 * vectors; no registers should be changed by the INTCTRL_1 handler. 1023 * vectors; no registers should be changed by the INTCTRL_CL handler.
1024 * In particular, the client should not use a jal instruction to invoke 1024 * In particular, the client should not use a jal instruction to invoke
1025 * the hv_downcall_dispatch service, as that would overwrite the client's 1025 * the hv_downcall_dispatch service, as that would overwrite the client's
1026 * lr register. Note that the hv_downcall_dispatch service may overwrite 1026 * lr register. Note that the hv_downcall_dispatch service may overwrite
1027 * one or more of the client's system save registers. 1027 * one or more of the client's system save registers.
1028 * 1028 *
1029 * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor 1029 * The client must not modify the INTCTRL_CL_STATUS SPR. The hypervisor
1030 * will set this register to cause a downcall to happen, and will clear 1030 * will set this register to cause a downcall to happen, and will clear
1031 * it when no further downcalls are pending. 1031 * it when no further downcalls are pending.
1032 * 1032 *
1033 * When a downcall vector is entered, the INTCTRL_1 interrupt will be 1033 * When a downcall vector is entered, the INTCTRL_CL interrupt will be
1034 * masked. When the client is done processing a downcall, and is ready 1034 * masked. When the client is done processing a downcall, and is ready
1035 * to accept another, it must unmask this interrupt; if more downcalls 1035 * to accept another, it must unmask this interrupt; if more downcalls
1036 * are pending, this will cause the INTCTRL_1 vector to be reentered. 1036 * are pending, this will cause the INTCTRL_CL vector to be reentered.
1037 * Currently the following interrupt vectors can be entered through a 1037 * Currently the following interrupt vectors can be entered through a
1038 * downcall: 1038 * downcall:
1039 * 1039 *
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 80d13f013bb2..fd8dc42abdcb 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -15,7 +15,9 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/unistd.h> 16#include <linux/unistd.h>
17#include <asm/irqflags.h> 17#include <asm/irqflags.h>
18#include <asm/processor.h>
18#include <arch/abi.h> 19#include <arch/abi.h>
20#include <arch/spr_def.h>
19 21
20#ifdef __tilegx__ 22#ifdef __tilegx__
21#define bnzt bnezt 23#define bnzt bnezt
@@ -80,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
80STD_ENTRY(cpu_idle_on_new_stack) 82STD_ENTRY(cpu_idle_on_new_stack)
81 { 83 {
82 move sp, r1 84 move sp, r1
83 mtspr SYSTEM_SAVE_1_0, r2 85 mtspr SPR_SYSTEM_SAVE_K_0, r2
84 } 86 }
85 jal free_thread_info 87 jal free_thread_info
86 j cpu_idle 88 j cpu_idle
@@ -102,15 +104,15 @@ STD_ENTRY(smp_nap)
102STD_ENTRY(_cpu_idle) 104STD_ENTRY(_cpu_idle)
103 { 105 {
104 lnk r0 106 lnk r0
105 movei r1, 1 107 movei r1, KERNEL_PL
106 } 108 }
107 { 109 {
108 addli r0, r0, _cpu_idle_nap - . 110 addli r0, r0, _cpu_idle_nap - .
109 mtspr INTERRUPT_CRITICAL_SECTION, r1 111 mtspr INTERRUPT_CRITICAL_SECTION, r1
110 } 112 }
111 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 113 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
112 mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ 114 mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
113 mtspr EX_CONTEXT_1_0, r0 115 mtspr SPR_EX_CONTEXT_K_0, r0
114 iret 116 iret
115 .global _cpu_idle_nap 117 .global _cpu_idle_nap
116_cpu_idle_nap: 118_cpu_idle_nap:
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 2b4f6c091701..90e7c4435693 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -23,6 +23,7 @@
23#include <asm/asm-offsets.h> 23#include <asm/asm-offsets.h>
24#include <hv/hypervisor.h> 24#include <hv/hypervisor.h>
25#include <arch/chip.h> 25#include <arch/chip.h>
26#include <arch/spr_def.h>
26 27
27/* 28/*
28 * This module contains the entry code for kernel images. It performs the 29 * This module contains the entry code for kernel images. It performs the
@@ -76,7 +77,7 @@ ENTRY(_start)
76 } 77 }
771: 781:
78 79
79 /* Get our processor number and save it away in SAVE_1_0. */ 80 /* Get our processor number and save it away in SAVE_K_0. */
80 jal hv_inquire_topology 81 jal hv_inquire_topology
81 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ 82 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
82 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ 83 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
@@ -124,7 +125,7 @@ ENTRY(_start)
124 lw r0, r0 125 lw r0, r0
125 lw sp, r1 126 lw sp, r1
126 or r4, sp, r4 127 or r4, sp, r4
127 mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */ 128 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
128 addi sp, sp, -STACK_TOP_DELTA 129 addi sp, sp, -STACK_TOP_DELTA
129 { 130 {
130 move lr, zero /* stop backtraces in the called function */ 131 move lr, zero /* stop backtraces in the called function */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index f87c5c044d6b..206dc7e1fe36 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,8 +32,8 @@
32# error "No support for kernel preemption currently" 32# error "No support for kernel preemption currently"
33#endif 33#endif
34 34
35#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48 35#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
36# error INT_INTCTRL_1 coded to set high interrupt mask 36# error INT_INTCTRL_K coded to set high interrupt mask
37#endif 37#endif
38 38
39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
@@ -132,8 +132,8 @@ intvec_\vecname:
132 132
133 /* Temporarily save a register so we have somewhere to work. */ 133 /* Temporarily save a register so we have somewhere to work. */
134 134
135 mtspr SYSTEM_SAVE_1_1, r0 135 mtspr SPR_SYSTEM_SAVE_K_1, r0
136 mfspr r0, EX_CONTEXT_1_1 136 mfspr r0, SPR_EX_CONTEXT_K_1
137 137
138 /* The cmpxchg code clears sp to force us to reset it here on fault. */ 138 /* The cmpxchg code clears sp to force us to reset it here on fault. */
139 { 139 {
@@ -167,18 +167,18 @@ intvec_\vecname:
167 * The page_fault handler may be downcalled directly by the 167 * The page_fault handler may be downcalled directly by the
168 * hypervisor even when Linux is running and has ICS set. 168 * hypervisor even when Linux is running and has ICS set.
169 * 169 *
170 * In this case the contents of EX_CONTEXT_1_1 reflect the 170 * In this case the contents of EX_CONTEXT_K_1 reflect the
171 * previous fault and can't be relied on to choose whether or 171 * previous fault and can't be relied on to choose whether or
172 * not to reinitialize the stack pointer. So we add a test 172 * not to reinitialize the stack pointer. So we add a test
173 * to see whether SYSTEM_SAVE_1_2 has the high bit set, 173 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
174 * and if so we don't reinitialize sp, since we must be coming 174 * and if so we don't reinitialize sp, since we must be coming
175 * from Linux. (In fact the precise case is !(val & ~1), 175 * from Linux. (In fact the precise case is !(val & ~1),
176 * but any Linux PC has to have the high bit set.) 176 * but any Linux PC has to have the high bit set.)
177 * 177 *
178 * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for 178 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
179 * any path that turns into a downcall to one of our TLB handlers. 179 * any path that turns into a downcall to one of our TLB handlers.
180 */ 180 */
181 mfspr r0, SYSTEM_SAVE_1_2 181 mfspr r0, SPR_SYSTEM_SAVE_K_2
182 { 182 {
183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ 183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
184 move r0, sp 184 move r0, sp
@@ -187,12 +187,12 @@ intvec_\vecname:
187 187
1882: 1882:
189 /* 189 /*
190 * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and 190 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
191 * the current stack top in the higher bits. So we recover 191 * the current stack top in the higher bits. So we recover
192 * our stack top by just masking off the low bits, then 192 * our stack top by just masking off the low bits, then
193 * point sp at the top aligned address on the actual stack page. 193 * point sp at the top aligned address on the actual stack page.
194 */ 194 */
195 mfspr r0, SYSTEM_SAVE_1_0 195 mfspr r0, SPR_SYSTEM_SAVE_K_0
196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31 196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31
197 197
1980: 1980:
@@ -254,7 +254,7 @@ intvec_\vecname:
254 sw sp, r3 254 sw sp, r3
255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) 255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
256 } 256 }
257 mfspr r0, EX_CONTEXT_1_0 257 mfspr r0, SPR_EX_CONTEXT_K_0
258 .ifc \processing,handle_syscall 258 .ifc \processing,handle_syscall
259 /* 259 /*
260 * Bump the saved PC by one bundle so that when we return, we won't 260 * Bump the saved PC by one bundle so that when we return, we won't
@@ -267,7 +267,7 @@ intvec_\vecname:
267 sw sp, r0 267 sw sp, r0
268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
269 } 269 }
270 mfspr r0, EX_CONTEXT_1_1 270 mfspr r0, SPR_EX_CONTEXT_K_1
271 { 271 {
272 sw sp, r0 272 sw sp, r0
273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
@@ -289,7 +289,7 @@ intvec_\vecname:
289 .endif 289 .endif
290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM 290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
291 } 291 }
292 mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */ 292 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
293 { 293 {
294 sw sp, r0 294 sw sp, r0
295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
@@ -309,12 +309,12 @@ intvec_\vecname:
309 * See discussion below at "finish_interrupt_save". 309 * See discussion below at "finish_interrupt_save".
310 */ 310 */
311 .ifc \c_routine, do_page_fault 311 .ifc \c_routine, do_page_fault
312 mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */ 312 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
313 mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */ 313 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
314 .else 314 .else
315 .ifc \vecnum, INT_DOUBLE_FAULT 315 .ifc \vecnum, INT_DOUBLE_FAULT
316 { 316 {
317 mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */ 317 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
318 movei r3, 0 318 movei r3, 0
319 } 319 }
320 .else 320 .else
@@ -467,7 +467,7 @@ intvec_\vecname:
467 /* Load tp with our per-cpu offset. */ 467 /* Load tp with our per-cpu offset. */
468#ifdef CONFIG_SMP 468#ifdef CONFIG_SMP
469 { 469 {
470 mfspr r20, SYSTEM_SAVE_1_0 470 mfspr r20, SPR_SYSTEM_SAVE_K_0
471 moveli r21, lo16(__per_cpu_offset) 471 moveli r21, lo16(__per_cpu_offset)
472 } 472 }
473 { 473 {
@@ -487,7 +487,7 @@ intvec_\vecname:
487 * We load flags in r32 here so we can jump to .Lrestore_regs 487 * We load flags in r32 here so we can jump to .Lrestore_regs
488 * directly after do_page_fault_ics() if necessary. 488 * directly after do_page_fault_ics() if necessary.
489 */ 489 */
490 mfspr r32, EX_CONTEXT_1_1 490 mfspr r32, SPR_EX_CONTEXT_K_1
491 { 491 {
492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) 493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
@@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
959 { 959 {
960 mtspr EX_CONTEXT_1_0, r21 960 mtspr SPR_EX_CONTEXT_K_0, r21
961 move r5, zero 961 move r5, zero
962 } 962 }
963 { 963 {
964 mtspr EX_CONTEXT_1_1, lr 964 mtspr SPR_EX_CONTEXT_K_1, lr
965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
966 } 966 }
967 967
@@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
1199 STD_ENDPROC(interrupt_return) 1199 STD_ENDPROC(interrupt_return)
1200 1200
1201 /* 1201 /*
1202 * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit 1202 * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
1203 * before returning, so we can properly get more downcalls. 1203 * before returning, so we can properly get more downcalls.
1204 */ 1204 */
1205 .pushsection .text.handle_interrupt_downcall,"ax" 1205 .pushsection .text.handle_interrupt_downcall,"ax"
@@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
1208 check_single_stepping normal, .Ldispatch_downcall 1208 check_single_stepping normal, .Ldispatch_downcall
1209.Ldispatch_downcall: 1209.Ldispatch_downcall:
1210 1210
1211 /* Clear INTCTRL_1 from the set of interrupts we ever enable. */ 1211 /* Clear INTCTRL_K from the set of interrupts we ever enable. */
1212 GET_INTERRUPTS_ENABLED_MASK_PTR(r30) 1212 GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
1213 { 1213 {
1214 addi r30, r30, 4 1214 addi r30, r30, 4
1215 movei r31, INT_MASK(INT_INTCTRL_1) 1215 movei r31, INT_MASK(INT_INTCTRL_K)
1216 } 1216 }
1217 { 1217 {
1218 lw r20, r30 1218 lw r20, r30
@@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
1227 } 1227 }
1228 FEEDBACK_REENTER(handle_interrupt_downcall) 1228 FEEDBACK_REENTER(handle_interrupt_downcall)
1229 1229
1230 /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */ 1230 /* Allow INTCTRL_K to be enabled next time we enable interrupts. */
1231 lw r20, r30 1231 lw r20, r30
1232 or r20, r20, r31 1232 or r20, r20, r31
1233 sw r30, r20 1233 sw r30, r20
@@ -1509,7 +1509,7 @@ handle_ill:
1509/* Various stub interrupt handlers and syscall handlers */ 1509/* Various stub interrupt handlers and syscall handlers */
1510 1510
1511STD_ENTRY_LOCAL(_kernel_double_fault) 1511STD_ENTRY_LOCAL(_kernel_double_fault)
1512 mfspr r1, EX_CONTEXT_1_0 1512 mfspr r1, SPR_EX_CONTEXT_K_0
1513 move r2, lr 1513 move r2, lr
1514 move r3, sp 1514 move r3, sp
1515 move r4, r52 1515 move r4, r52
@@ -1518,7 +1518,7 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
1518 STD_ENDPROC(_kernel_double_fault) 1518 STD_ENDPROC(_kernel_double_fault)
1519 1519
1520STD_ENTRY_LOCAL(bad_intr) 1520STD_ENTRY_LOCAL(bad_intr)
1521 mfspr r2, EX_CONTEXT_1_0 1521 mfspr r2, SPR_EX_CONTEXT_K_0
1522 panic "Unhandled interrupt %#x: PC %#lx" 1522 panic "Unhandled interrupt %#x: PC %#lx"
1523 STD_ENDPROC(bad_intr) 1523 STD_ENDPROC(bad_intr)
1524 1524
@@ -1560,7 +1560,7 @@ STD_ENTRY(_sys_clone)
1560 * a page fault which would assume the stack was valid, it does 1560 * a page fault which would assume the stack was valid, it does
1561 * save/restore the stack pointer and zero it out to make sure it gets reset. 1561 * save/restore the stack pointer and zero it out to make sure it gets reset.
1562 * Since we always keep interrupts disabled, the hypervisor won't 1562 * Since we always keep interrupts disabled, the hypervisor won't
1563 * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them 1563 * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
1564 * (other than to advance the PC on return). 1564 * (other than to advance the PC on return).
1565 * 1565 *
1566 * We have to manually validate the user vs kernel address range 1566 * We have to manually validate the user vs kernel address range
@@ -1766,7 +1766,7 @@ ENTRY(sys_cmpxchg)
1766 /* Do slow mtspr here so the following "mf" waits less. */ 1766 /* Do slow mtspr here so the following "mf" waits less. */
1767 { 1767 {
1768 move sp, r27 1768 move sp, r27
1769 mtspr EX_CONTEXT_1_0, r28 1769 mtspr SPR_EX_CONTEXT_K_0, r28
1770 } 1770 }
1771 mf 1771 mf
1772 1772
@@ -1785,7 +1785,7 @@ ENTRY(sys_cmpxchg)
1785 } 1785 }
1786 { 1786 {
1787 move sp, r27 1787 move sp, r27
1788 mtspr EX_CONTEXT_1_0, r28 1788 mtspr SPR_EX_CONTEXT_K_0, r28
1789 } 1789 }
1790 iret 1790 iret
1791 1791
@@ -1813,7 +1813,7 @@ ENTRY(sys_cmpxchg)
1813#endif 1813#endif
1814 1814
1815 /* Issue the slow SPR here while the tns result is in flight. */ 1815 /* Issue the slow SPR here while the tns result is in flight. */
1816 mfspr r28, EX_CONTEXT_1_0 1816 mfspr r28, SPR_EX_CONTEXT_K_0
1817 1817
1818 { 1818 {
1819 addi r28, r28, 8 /* return to the instruction after the swint1 */ 1819 addi r28, r28, 8 /* return to the instruction after the swint1 */
@@ -1901,7 +1901,7 @@ ENTRY(sys_cmpxchg)
1901.Lcmpxchg64_mismatch: 1901.Lcmpxchg64_mismatch:
1902 { 1902 {
1903 move sp, r27 1903 move sp, r27
1904 mtspr EX_CONTEXT_1_0, r28 1904 mtspr SPR_EX_CONTEXT_K_0, r28
1905 } 1905 }
1906 mf 1906 mf
1907 { 1907 {
@@ -1982,8 +1982,13 @@ int_unalign:
1982 int_hand INT_PERF_COUNT, PERF_COUNT, \ 1982 int_hand INT_PERF_COUNT, PERF_COUNT, \
1983 op_handle_perf_interrupt, handle_nmi 1983 op_handle_perf_interrupt, handle_nmi
1984 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr 1984 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1985#if CONFIG_KERNEL_PL == 2
1986 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1987 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1988#else
1985 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr 1989 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1986 dc_dispatch INT_INTCTRL_1, INTCTRL_1 1990 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1991#endif
1987 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr 1992 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1988 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ 1993 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1989 hv_message_intr, handle_interrupt_downcall 1994 hv_message_intr, handle_interrupt_downcall
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 596c60086930..394d554f21aa 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
61 61
62#if CHIP_HAS_IPI() 62#if CHIP_HAS_IPI()
63/* Use SPRs to manipulate device interrupts. */ 63/* Use SPRs to manipulate device interrupts. */
64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) 64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) 65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) 66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
67#else 67#else
68/* Use HV to manipulate device interrupts. */ 68/* Use HV to manipulate device interrupts. */
69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask) 69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
@@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
89 * masked by a previous interrupt. Then, mask out the ones 89 * masked by a previous interrupt. Then, mask out the ones
90 * we're going to handle. 90 * we're going to handle.
91 */ 91 */
92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); 92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; 93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
94 __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); 94 __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
95#else 95#else
96 /* 96 /*
97 * Hypervisor performs the equivalent of the Gx code above and 97 * Hypervisor performs the equivalent of the Gx code above and
98 * then puts the pending interrupt mask into a system save reg 98 * then puts the pending interrupt mask into a system save reg
99 * for us to find. 99 * for us to find.
100 */ 100 */
101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
102#endif 102#endif
103 remaining_irqs = original_irqs; 103 remaining_irqs = original_irqs;
104 104
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
227#if CHIP_HAS_IPI() 227#if CHIP_HAS_IPI()
228 raw_local_irq_unmask(INT_IPI_1); 228 raw_local_irq_unmask(INT_IPI_K);
229#endif 229#endif
230} 230}
231 231
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 6d23ed271d10..997e3933f726 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
35 35
36 /* Make sure downcall interrupts will be enabled. */ 36 /* Make sure downcall interrupts will be enabled. */
37 raw_local_irq_unmask(INT_INTCTRL_1); 37 raw_local_irq_unmask(INT_INTCTRL_K);
38} 38}
39 39
40void hv_message_intr(struct pt_regs *regs, int intnum) 40void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 221f12bd27fa..8430f45daea6 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -305,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
305/* Allow user processes to access the DMA SPRs */ 305/* Allow user processes to access the DMA SPRs */
306void grant_dma_mpls(void) 306void grant_dma_mpls(void)
307{ 307{
308#if CONFIG_KERNEL_PL == 2
309 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
310 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
311#else
308 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); 312 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
309 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); 313 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
314#endif
310} 315}
311 316
312/* Forbid user processes from accessing the DMA SPRs */ 317/* Forbid user processes from accessing the DMA SPRs */
313void restrict_dma_mpls(void) 318void restrict_dma_mpls(void)
314{ 319{
320#if CONFIG_KERNEL_PL == 2
321 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
322 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
323#else
315 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); 324 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
316 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); 325 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
326#endif
317} 327}
318 328
319/* Pause the DMA engine, then save off its state registers. */ 329/* Pause the DMA engine, then save off its state registers. */
@@ -524,7 +534,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
524 * Switch kernel SP, PC, and callee-saved registers. 534 * Switch kernel SP, PC, and callee-saved registers.
525 * In the context of the new task, return the old task pointer 535 * In the context of the new task, return the old task pointer
526 * (i.e. the task that actually called __switch_to). 536 * (i.e. the task that actually called __switch_to).
527 * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. 537 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
528 */ 538 */
529 return __switch_to(prev, next, next_current_ksp0(next)); 539 return __switch_to(prev, next, next_current_ksp0(next));
530} 540}
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
index e88d6e122783..caa13101c264 100644
--- a/arch/tile/kernel/regs_32.S
+++ b/arch/tile/kernel/regs_32.S
@@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
85 { 85 {
86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */ 86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
87 move sp, r13 87 move sp, r13
88 mtspr SYSTEM_SAVE_1_0, r2 88 mtspr SPR_SYSTEM_SAVE_K_0, r2
89 } 89 }
90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG) 90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
91.L__switch_to_pc: 91.L__switch_to_pc:
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index c1a697e68546..6444a2bbe1a4 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -187,11 +187,11 @@ early_param("vmalloc", parse_vmalloc);
187 187
188#ifdef CONFIG_HIGHMEM 188#ifdef CONFIG_HIGHMEM
189/* 189/*
190 * Determine for each controller where its lowmem is mapped and how 190 * Determine for each controller where its lowmem is mapped and how much of
191 * much of it is mapped there. On controller zero, the first few 191 * it is mapped there. On controller zero, the first few megabytes are
192 * megabytes are mapped at 0xfd000000 as code, so in principle we 192 * already mapped in as code at MEM_SV_INTRPT, so in principle we could
193 * could start our data mappings higher up, but for now we don't 193 * start our data mappings higher up, but for now we don't bother, to avoid
194 * bother, to avoid additional confusion. 194 * additional confusion.
195 * 195 *
196 * One question is whether, on systems with more than 768 Mb and 196 * One question is whether, on systems with more than 768 Mb and
197 * controllers of different sizes, to map in a proportionate amount of 197 * controllers of different sizes, to map in a proportionate amount of
@@ -876,6 +876,9 @@ void __cpuinit setup_cpu(int boot)
876#if CHIP_HAS_SN_PROC() 876#if CHIP_HAS_SN_PROC()
877 raw_local_irq_unmask(INT_SNITLB_MISS); 877 raw_local_irq_unmask(INT_SNITLB_MISS);
878#endif 878#endif
879#ifdef __tilegx__
880 raw_local_irq_unmask(INT_SINGLE_STEP_K);
881#endif
879 882
880 /* 883 /*
881 * Allow user access to many generic SPRs, like the cycle 884 * Allow user access to many generic SPRs, like the cycle
@@ -893,11 +896,12 @@ void __cpuinit setup_cpu(int boot)
893#endif 896#endif
894 897
895 /* 898 /*
896 * Set the MPL for interrupt control 0 to user level. 899 * Set the MPL for interrupt control 0 & 1 to the corresponding
897 * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, 900 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
898 * as well as the PL 0 interrupt mask. 901 * SPRs, as well as the interrupt mask.
899 */ 902 */
900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 903 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
904 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
901 905
902 /* Initialize IRQ support for this cpu. */ 906 /* Initialize IRQ support for this cpu. */
903 setup_irq_regs(); 907 setup_irq_regs();
@@ -1033,7 +1037,7 @@ static void __init validate_va(void)
1033 * In addition, make sure we CAN'T use the end of memory, since 1037 * In addition, make sure we CAN'T use the end of memory, since
1034 * we use the last chunk of each pgd for the pgd_list. 1038 * we use the last chunk of each pgd for the pgd_list.
1035 */ 1039 */
1036 int i, fc_fd_ok = 0; 1040 int i, user_kernel_ok = 0;
1037 unsigned long max_va = 0; 1041 unsigned long max_va = 0;
1038 unsigned long list_va = 1042 unsigned long list_va =
1039 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); 1043 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1048,13 @@ static void __init validate_va(void)
1044 break; 1048 break;
1045 if (range.start <= MEM_USER_INTRPT && 1049 if (range.start <= MEM_USER_INTRPT &&
1046 range.start + range.size >= MEM_HV_INTRPT) 1050 range.start + range.size >= MEM_HV_INTRPT)
1047 fc_fd_ok = 1; 1051 user_kernel_ok = 1;
1048 if (range.start == 0) 1052 if (range.start == 0)
1049 max_va = range.size; 1053 max_va = range.size;
1050 BUG_ON(range.start + range.size > list_va); 1054 BUG_ON(range.start + range.size > list_va);
1051 } 1055 }
1052 if (!fc_fd_ok) 1056 if (!user_kernel_ok)
1053 early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); 1057 early_panic("Hypervisor not configured for user/kernel VAs\n");
1054 if (max_va == 0) 1058 if (max_va == 0)
1055 early_panic("Hypervisor not configured for low VAs\n"); 1059 early_panic("Hypervisor not configured for low VAs\n");
1056 if (max_va < KERNEL_HIGH_VADDR) 1060 if (max_va < KERNEL_HIGH_VADDR)
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 1cb5ec79de04..75255d90aff3 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -212,7 +212,7 @@ void __init ipi_init(void)
212 212
213 tile.x = cpu_x(cpu); 213 tile.x = cpu_x(cpu);
214 tile.y = cpu_y(cpu); 214 tile.y = cpu_y(cpu);
215 if (hv_get_ipi_pte(tile, 1, &pte) != 0) 215 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
216 panic("Failed to initialize IPI for cpu %d\n", cpu); 216 panic("Failed to initialize IPI for cpu %d\n", cpu);
217 217
218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 0f362dc2c57f..7826a8b17997 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
278 case INT_DOUBLE_FAULT: 278 case INT_DOUBLE_FAULT:
279 /* 279 /*
280 * For double fault, "reason" is actually passed as 280 * For double fault, "reason" is actually passed as
281 * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so 281 * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
282 * we can provide the original fault number rather than 282 * we can provide the original fault number rather than
283 * the uninteresting "INT_DOUBLE_FAULT" so the user can 283 * the uninteresting "INT_DOUBLE_FAULT" so the user can
284 * learn what actually struck while PL0 ICS was set. 284 * learn what actually struck while PL0 ICS was set.
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
new file mode 100644
index 000000000000..b88f9c047781
--- /dev/null
+++ b/arch/tile/kvm/Kconfig
@@ -0,0 +1,38 @@
1#
2# KVM configuration
3#
4
5source "virt/kvm/Kconfig"
6
7menuconfig VIRTUALIZATION
8 bool "Virtualization"
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and
15 disabled.
16
17if VIRTUALIZATION
18
19config KVM
20 tristate "Kernel-based Virtual Machine (KVM) support"
21 depends on HAVE_KVM && MODULES && EXPERIMENTAL
22 select PREEMPT_NOTIFIERS
23 select ANON_INODES
24 ---help---
25 Support hosting paravirtualized guest machines.
26
27 This module provides access to the hardware capabilities through
28 a character device node named /dev/kvm.
29
30 To compile this as a module, choose M here: the module
31 will be called kvm.
32
33 If unsure, say N.
34
35source drivers/vhost/Kconfig
36source drivers/virtio/Kconfig
37
38endif # VIRTUALIZATION
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 94f579d0a494..f295b4ac941d 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -563,10 +563,10 @@ do_sigbus:
563/* 563/*
564 * When we take an ITLB or DTLB fault or access violation in the 564 * When we take an ITLB or DTLB fault or access violation in the
565 * supervisor while the critical section bit is set, the hypervisor is 565 * supervisor while the critical section bit is set, the hypervisor is
566 * reluctant to write new values into the EX_CONTEXT_1_x registers, 566 * reluctant to write new values into the EX_CONTEXT_K_x registers,
567 * since that might indicate we have not yet squirreled the SPR 567 * since that might indicate we have not yet squirreled the SPR
568 * contents away and can thus safely take a recursive interrupt. 568 * contents away and can thus safely take a recursive interrupt.
569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. 569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
570 * 570 *
571 * Note that this routine is called before homecache_tlb_defer_enter(), 571 * Note that this routine is called before homecache_tlb_defer_enter(),
572 * which means that we can properly unlock any atomics that might 572 * which means that we can properly unlock any atomics that might
@@ -610,7 +610,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
610 * fault. We didn't set up a kernel stack on initial entry to 610 * fault. We didn't set up a kernel stack on initial entry to
611 * sys_cmpxchg, but instead had one set up by the fault, which 611 * sys_cmpxchg, but instead had one set up by the fault, which
612 * (because sys_cmpxchg never releases ICS) came to us via the 612 * (because sys_cmpxchg never releases ICS) came to us via the
613 * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are 613 * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
614 * still referencing the original user code. We release the 614 * still referencing the original user code. We release the
615 * atomic lock and rewrite pt_regs so that it appears that we 615 * atomic lock and rewrite pt_regs so that it appears that we
616 * came from user-space directly, and after we finish the 616 * came from user-space directly, and after we finish the
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index d89c9eacd162..78e1982cb6c9 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -1060,7 +1060,7 @@ void free_initmem(void)
1060 1060
1061 /* 1061 /*
1062 * Free the pages mapped from 0xc0000000 that correspond to code 1062 * Free the pages mapped from 0xc0000000 that correspond to code
1063 * pages from 0xfd000000 that we won't use again after init. 1063 * pages from MEM_SV_INTRPT that we won't use again after init.
1064 */ 1064 */
1065 free_init_pages("unused kernel text", 1065 free_init_pages("unused kernel text",
1066 (unsigned long)_sinittext - text_delta, 1066 (unsigned long)_sinittext - text_delta,