aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-30 16:01:48 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-05-25 12:48:27 -0400
commitb8ace0833feb308b1cb69d8b33ab08e0602dd2d2 (patch)
treef65f6914dd2c5d7dcdb233178ca5bc101003982d /arch/tile
parent621b19551507c8fd9d721f4038509c5bb155a983 (diff)
arch/tile: fix hardwall for tilegx and generalize for idn and ipi
The hardwall drain code was not properly implemented for tilegx, just tilepro, so you couldn't reliably restart an application that made use of the udn. In addition, the code was only applicable to the udn (user dynamic network). On tilegx there is a second user network that is available (the "idn"), and there is support for having I/O shims deliver user-level interrupts to applications ("ipi") which functions in a very similar way to the inter-core permissions used for udn/idn. So this change also generalizes the code from supporting just the udn to supports udn/idn/ipi on tilegx. By default we now use /dev/hardwall/{udn,idn,ipi} with separate minor numbers for the three devices. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/arch/spr_def_32.h56
-rw-r--r--arch/tile/include/arch/spr_def_64.h43
-rw-r--r--arch/tile/include/asm/hardwall.h18
-rw-r--r--arch/tile/include/asm/processor.h17
-rw-r--r--arch/tile/include/asm/setup.h10
-rw-r--r--arch/tile/kernel/hardwall.c754
-rw-r--r--arch/tile/kernel/intvec_64.S2
-rw-r--r--arch/tile/kernel/process.c16
8 files changed, 636 insertions, 280 deletions
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index bbc1f4c924ee..78bbce2fb19a 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -65,6 +65,31 @@
65#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1 65#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
66#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4 66#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
67#define SPR_FAIL 0x4e09 67#define SPR_FAIL 0x4e09
68#define SPR_IDN_AVAIL_EN 0x3e05
69#define SPR_IDN_CA_DATA 0x0b00
70#define SPR_IDN_DATA_AVAIL 0x0b03
71#define SPR_IDN_DEADLOCK_TIMEOUT 0x3406
72#define SPR_IDN_DEMUX_CA_COUNT 0x0a05
73#define SPR_IDN_DEMUX_COUNT_0 0x0a06
74#define SPR_IDN_DEMUX_COUNT_1 0x0a07
75#define SPR_IDN_DEMUX_CTL 0x0a08
76#define SPR_IDN_DEMUX_QUEUE_SEL 0x0a0a
77#define SPR_IDN_DEMUX_STATUS 0x0a0b
78#define SPR_IDN_DEMUX_WRITE_FIFO 0x0a0c
79#define SPR_IDN_DIRECTION_PROTECT 0x2e05
80#define SPR_IDN_PENDING 0x0a0e
81#define SPR_IDN_REFILL_EN 0x0e05
82#define SPR_IDN_SP_FIFO_DATA 0x0a0f
83#define SPR_IDN_SP_FIFO_SEL 0x0a10
84#define SPR_IDN_SP_FREEZE 0x0a11
85#define SPR_IDN_SP_FREEZE__SP_FRZ_MASK 0x1
86#define SPR_IDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2
87#define SPR_IDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4
88#define SPR_IDN_SP_STATE 0x0a12
89#define SPR_IDN_TAG_0 0x0a13
90#define SPR_IDN_TAG_1 0x0a14
91#define SPR_IDN_TAG_VALID 0x0a15
92#define SPR_IDN_TILE_COORD 0x0a16
68#define SPR_INTCTRL_0_STATUS 0x4a07 93#define SPR_INTCTRL_0_STATUS 0x4a07
69#define SPR_INTCTRL_1_STATUS 0x4807 94#define SPR_INTCTRL_1_STATUS 0x4807
70#define SPR_INTCTRL_2_STATUS 0x4607 95#define SPR_INTCTRL_2_STATUS 0x4607
@@ -87,12 +112,36 @@
87#define SPR_INTERRUPT_MASK_SET_1_1 0x480e 112#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
88#define SPR_INTERRUPT_MASK_SET_2_0 0x460c 113#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
89#define SPR_INTERRUPT_MASK_SET_2_1 0x460d 114#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
115#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x6000
116#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x6001
117#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x6002
90#define SPR_MPL_DMA_CPL_SET_0 0x5800 118#define SPR_MPL_DMA_CPL_SET_0 0x5800
91#define SPR_MPL_DMA_CPL_SET_1 0x5801 119#define SPR_MPL_DMA_CPL_SET_1 0x5801
92#define SPR_MPL_DMA_CPL_SET_2 0x5802 120#define SPR_MPL_DMA_CPL_SET_2 0x5802
93#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 121#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
94#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 122#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
95#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802 123#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
124#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
125#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
126#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
127#define SPR_MPL_IDN_AVAIL_SET_0 0x3e00
128#define SPR_MPL_IDN_AVAIL_SET_1 0x3e01
129#define SPR_MPL_IDN_AVAIL_SET_2 0x3e02
130#define SPR_MPL_IDN_CA_SET_0 0x3a00
131#define SPR_MPL_IDN_CA_SET_1 0x3a01
132#define SPR_MPL_IDN_CA_SET_2 0x3a02
133#define SPR_MPL_IDN_COMPLETE_SET_0 0x1200
134#define SPR_MPL_IDN_COMPLETE_SET_1 0x1201
135#define SPR_MPL_IDN_COMPLETE_SET_2 0x1202
136#define SPR_MPL_IDN_FIREWALL_SET_0 0x2e00
137#define SPR_MPL_IDN_FIREWALL_SET_1 0x2e01
138#define SPR_MPL_IDN_FIREWALL_SET_2 0x2e02
139#define SPR_MPL_IDN_REFILL_SET_0 0x0e00
140#define SPR_MPL_IDN_REFILL_SET_1 0x0e01
141#define SPR_MPL_IDN_REFILL_SET_2 0x0e02
142#define SPR_MPL_IDN_TIMER_SET_0 0x3400
143#define SPR_MPL_IDN_TIMER_SET_1 0x3401
144#define SPR_MPL_IDN_TIMER_SET_2 0x3402
96#define SPR_MPL_INTCTRL_0_SET_0 0x4a00 145#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
97#define SPR_MPL_INTCTRL_0_SET_1 0x4a01 146#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
98#define SPR_MPL_INTCTRL_0_SET_2 0x4a02 147#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
@@ -102,6 +151,9 @@
102#define SPR_MPL_INTCTRL_2_SET_0 0x4600 151#define SPR_MPL_INTCTRL_2_SET_0 0x4600
103#define SPR_MPL_INTCTRL_2_SET_1 0x4601 152#define SPR_MPL_INTCTRL_2_SET_1 0x4601
104#define SPR_MPL_INTCTRL_2_SET_2 0x4602 153#define SPR_MPL_INTCTRL_2_SET_2 0x4602
154#define SPR_MPL_PERF_COUNT_SET_0 0x4200
155#define SPR_MPL_PERF_COUNT_SET_1 0x4201
156#define SPR_MPL_PERF_COUNT_SET_2 0x4202
105#define SPR_MPL_SN_ACCESS_SET_0 0x0800 157#define SPR_MPL_SN_ACCESS_SET_0 0x0800
106#define SPR_MPL_SN_ACCESS_SET_1 0x0801 158#define SPR_MPL_SN_ACCESS_SET_1 0x0801
107#define SPR_MPL_SN_ACCESS_SET_2 0x0802 159#define SPR_MPL_SN_ACCESS_SET_2 0x0802
@@ -181,6 +233,7 @@
181#define SPR_UDN_DEMUX_STATUS 0x0c0d 233#define SPR_UDN_DEMUX_STATUS 0x0c0d
182#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e 234#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
183#define SPR_UDN_DIRECTION_PROTECT 0x3005 235#define SPR_UDN_DIRECTION_PROTECT 0x3005
236#define SPR_UDN_PENDING 0x0c10
184#define SPR_UDN_REFILL_EN 0x1005 237#define SPR_UDN_REFILL_EN 0x1005
185#define SPR_UDN_SP_FIFO_DATA 0x0c11 238#define SPR_UDN_SP_FIFO_DATA 0x0c11
186#define SPR_UDN_SP_FIFO_SEL 0x0c12 239#define SPR_UDN_SP_FIFO_SEL 0x0c12
@@ -195,6 +248,9 @@
195#define SPR_UDN_TAG_3 0x0c18 248#define SPR_UDN_TAG_3 0x0c18
196#define SPR_UDN_TAG_VALID 0x0c19 249#define SPR_UDN_TAG_VALID 0x0c19
197#define SPR_UDN_TILE_COORD 0x0c1a 250#define SPR_UDN_TILE_COORD 0x0c1a
251#define SPR_WATCH_CTL 0x4209
252#define SPR_WATCH_MASK 0x420a
253#define SPR_WATCH_VAL 0x420b
198 254
199#endif /* !defined(__ARCH_SPR_DEF_H__) */ 255#endif /* !defined(__ARCH_SPR_DEF_H__) */
200 256
diff --git a/arch/tile/include/arch/spr_def_64.h b/arch/tile/include/arch/spr_def_64.h
index cd3e5f95d5fd..0da86faa3370 100644
--- a/arch/tile/include/arch/spr_def_64.h
+++ b/arch/tile/include/arch/spr_def_64.h
@@ -52,6 +52,13 @@
52#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1 52#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
53#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4 53#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
54#define SPR_FAIL 0x2707 54#define SPR_FAIL 0x2707
55#define SPR_IDN_AVAIL_EN 0x1a05
56#define SPR_IDN_DATA_AVAIL 0x0a80
57#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
58#define SPR_IDN_DEMUX_COUNT_0 0x0a05
59#define SPR_IDN_DEMUX_COUNT_1 0x0a06
60#define SPR_IDN_DIRECTION_PROTECT 0x1405
61#define SPR_IDN_PENDING 0x0a08
55#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1 62#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
56#define SPR_INTCTRL_0_STATUS 0x2505 63#define SPR_INTCTRL_0_STATUS 0x2505
57#define SPR_INTCTRL_1_STATUS 0x2405 64#define SPR_INTCTRL_1_STATUS 0x2405
@@ -88,9 +95,27 @@
88#define SPR_IPI_MASK_SET_0 0x1f0a 95#define SPR_IPI_MASK_SET_0 0x1f0a
89#define SPR_IPI_MASK_SET_1 0x1e0a 96#define SPR_IPI_MASK_SET_1 0x1e0a
90#define SPR_IPI_MASK_SET_2 0x1d0a 97#define SPR_IPI_MASK_SET_2 0x1d0a
98#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
99#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
100#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
91#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700 101#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
92#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701 102#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
93#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702 103#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
104#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
105#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
106#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
107#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
108#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
109#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
110#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
111#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
112#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
113#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
114#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
115#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
116#define SPR_MPL_IDN_TIMER_SET_0 0x1800
117#define SPR_MPL_IDN_TIMER_SET_1 0x1801
118#define SPR_MPL_IDN_TIMER_SET_2 0x1802
94#define SPR_MPL_INTCTRL_0_SET_0 0x2500 119#define SPR_MPL_INTCTRL_0_SET_0 0x2500
95#define SPR_MPL_INTCTRL_0_SET_1 0x2501 120#define SPR_MPL_INTCTRL_0_SET_1 0x2501
96#define SPR_MPL_INTCTRL_0_SET_2 0x2502 121#define SPR_MPL_INTCTRL_0_SET_2 0x2502
@@ -100,6 +125,21 @@
100#define SPR_MPL_INTCTRL_2_SET_0 0x2300 125#define SPR_MPL_INTCTRL_2_SET_0 0x2300
101#define SPR_MPL_INTCTRL_2_SET_1 0x2301 126#define SPR_MPL_INTCTRL_2_SET_1 0x2301
102#define SPR_MPL_INTCTRL_2_SET_2 0x2302 127#define SPR_MPL_INTCTRL_2_SET_2 0x2302
128#define SPR_MPL_IPI_0 0x1f04
129#define SPR_MPL_IPI_0_SET_0 0x1f00
130#define SPR_MPL_IPI_0_SET_1 0x1f01
131#define SPR_MPL_IPI_0_SET_2 0x1f02
132#define SPR_MPL_IPI_1 0x1e04
133#define SPR_MPL_IPI_1_SET_0 0x1e00
134#define SPR_MPL_IPI_1_SET_1 0x1e01
135#define SPR_MPL_IPI_1_SET_2 0x1e02
136#define SPR_MPL_IPI_2 0x1d04
137#define SPR_MPL_IPI_2_SET_0 0x1d00
138#define SPR_MPL_IPI_2_SET_1 0x1d01
139#define SPR_MPL_IPI_2_SET_2 0x1d02
140#define SPR_MPL_PERF_COUNT_SET_0 0x2000
141#define SPR_MPL_PERF_COUNT_SET_1 0x2001
142#define SPR_MPL_PERF_COUNT_SET_2 0x2002
103#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00 143#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
104#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01 144#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
105#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02 145#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
@@ -167,6 +207,9 @@
167#define SPR_UDN_DEMUX_COUNT_2 0x0b07 207#define SPR_UDN_DEMUX_COUNT_2 0x0b07
168#define SPR_UDN_DEMUX_COUNT_3 0x0b08 208#define SPR_UDN_DEMUX_COUNT_3 0x0b08
169#define SPR_UDN_DIRECTION_PROTECT 0x1505 209#define SPR_UDN_DIRECTION_PROTECT 0x1505
210#define SPR_UDN_PENDING 0x0b0a
211#define SPR_WATCH_MASK 0x200a
212#define SPR_WATCH_VAL 0x200b
170 213
171#endif /* !defined(__ARCH_SPR_DEF_H__) */ 214#endif /* !defined(__ARCH_SPR_DEF_H__) */
172 215
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
index 2ac422848c7d..47514a58d685 100644
--- a/arch/tile/include/asm/hardwall.h
+++ b/arch/tile/include/asm/hardwall.h
@@ -11,12 +11,14 @@
11 * NON INFRINGEMENT. See the GNU General Public License for 11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * Provide methods for the HARDWALL_FILE for accessing the UDN. 14 * Provide methods for access control of per-cpu resources like
15 * UDN, IDN, or IPI.
15 */ 16 */
16 17
17#ifndef _ASM_TILE_HARDWALL_H 18#ifndef _ASM_TILE_HARDWALL_H
18#define _ASM_TILE_HARDWALL_H 19#define _ASM_TILE_HARDWALL_H
19 20
21#include <arch/chip.h>
20#include <linux/ioctl.h> 22#include <linux/ioctl.h>
21 23
22#define HARDWALL_IOCTL_BASE 0xa2 24#define HARDWALL_IOCTL_BASE 0xa2
@@ -24,8 +26,9 @@
24/* 26/*
25 * The HARDWALL_CREATE() ioctl is a macro with a "size" argument. 27 * The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
26 * The resulting ioctl value is passed to the kernel in conjunction 28 * The resulting ioctl value is passed to the kernel in conjunction
27 * with a pointer to a little-endian bitmask of cpus, which must be 29 * with a pointer to a standard kernel bitmask of cpus.
28 * physically in a rectangular configuration on the chip. 30 * For network resources (UDN or IDN) the bitmask must physically
31 * represent a rectangular configuration on the chip.
29 * The "size" is the number of bytes of cpu mask data. 32 * The "size" is the number of bytes of cpu mask data.
30 */ 33 */
31#define _HARDWALL_CREATE 1 34#define _HARDWALL_CREATE 1
@@ -44,13 +47,7 @@
44#define HARDWALL_GET_ID \ 47#define HARDWALL_GET_ID \
45 _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID) 48 _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
46 49
47#ifndef __KERNEL__ 50#ifdef __KERNEL__
48
49/* This is the canonical name expected by userspace. */
50#define HARDWALL_FILE "/dev/hardwall"
51
52#else
53
54/* /proc hooks for hardwall. */ 51/* /proc hooks for hardwall. */
55struct proc_dir_entry; 52struct proc_dir_entry;
56#ifdef CONFIG_HARDWALL 53#ifdef CONFIG_HARDWALL
@@ -59,7 +56,6 @@ int proc_pid_hardwall(struct task_struct *task, char *buffer);
59#else 56#else
60static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {} 57static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
61#endif 58#endif
62
63#endif 59#endif
64 60
65#endif /* _ASM_TILE_HARDWALL_H */ 61#endif /* _ASM_TILE_HARDWALL_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 34c1e01ffb5e..e85a9af12968 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -76,6 +76,17 @@ struct async_tlb {
76 76
77#ifdef CONFIG_HARDWALL 77#ifdef CONFIG_HARDWALL
78struct hardwall_info; 78struct hardwall_info;
79struct hardwall_task {
80 /* Which hardwall is this task tied to? (or NULL if none) */
81 struct hardwall_info *info;
82 /* Chains this task into the list at info->task_head. */
83 struct list_head list;
84};
85#ifdef __tilepro__
86#define HARDWALL_TYPES 1 /* udn */
87#else
88#define HARDWALL_TYPES 3 /* udn, idn, and ipi */
89#endif
79#endif 90#endif
80 91
81struct thread_struct { 92struct thread_struct {
@@ -116,10 +127,8 @@ struct thread_struct {
116 unsigned long dstream_pf; 127 unsigned long dstream_pf;
117#endif 128#endif
118#ifdef CONFIG_HARDWALL 129#ifdef CONFIG_HARDWALL
119 /* Is this task tied to an activated hardwall? */ 130 /* Hardwall information for various resources. */
120 struct hardwall_info *hardwall; 131 struct hardwall_task hardwall[HARDWALL_TYPES];
121 /* Chains this task into the list at hardwall->list. */
122 struct list_head hardwall_list;
123#endif 132#endif
124#if CHIP_HAS_TILE_DMA() 133#if CHIP_HAS_TILE_DMA()
125 /* Async DMA TLB fault information */ 134 /* Async DMA TLB fault information */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index e58613e0752f..c67eb70ea78e 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -41,15 +41,15 @@ void restrict_dma_mpls(void);
41#ifdef CONFIG_HARDWALL 41#ifdef CONFIG_HARDWALL
42/* User-level network management functions */ 42/* User-level network management functions */
43void reset_network_state(void); 43void reset_network_state(void);
44void grant_network_mpls(void);
45void restrict_network_mpls(void);
46struct task_struct; 44struct task_struct;
47int hardwall_deactivate(struct task_struct *task); 45void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
46void hardwall_deactivate_all(struct task_struct *task);
47int hardwall_ipi_valid(int cpu);
48 48
49/* Hook hardwall code into changes in affinity. */ 49/* Hook hardwall code into changes in affinity. */
50#define arch_set_cpus_allowed(p, new_mask) do { \ 50#define arch_set_cpus_allowed(p, new_mask) do { \
51 if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ 51 if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
52 hardwall_deactivate(p); \ 52 hardwall_deactivate_all(p); \
53} while (0) 53} while (0)
54#endif 54#endif
55 55
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 8c41891aab34..20273ee37deb 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -33,59 +33,157 @@
33 33
34 34
35/* 35/*
36 * This data structure tracks the rectangle data, etc., associated 36 * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
37 * one-to-one with a "struct file *" from opening HARDWALL_FILE. 37 * We use "hardwall" nomenclature throughout for historical reasons.
38 * The lock here controls access to the list data structure as well as
39 * to the items on the list.
40 */
41struct hardwall_type {
42 int index;
43 int is_xdn;
44 int is_idn;
45 int disabled;
46 const char *name;
47 struct list_head list;
48 spinlock_t lock;
49 struct proc_dir_entry *proc_dir;
50};
51
52enum hardwall_index {
53 HARDWALL_UDN = 0,
54#ifndef __tilepro__
55 HARDWALL_IDN = 1,
56 HARDWALL_IPI = 2,
57#endif
58 _HARDWALL_TYPES
59};
60
61static struct hardwall_type hardwall_types[] = {
62 { /* user-space access to UDN */
63 0,
64 1,
65 0,
66 0,
67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
70 NULL
71 },
72#ifndef __tilepro__
73 { /* user-space access to IDN */
74 1,
75 1,
76 1,
77 1, /* disabled pending hypervisor support */
78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
81 NULL
82 },
83 { /* access to user-space IPI */
84 2,
85 0,
86 0,
87 0,
88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
91 NULL
92 },
93#endif
94};
95
96/*
97 * This data structure tracks the cpu data, etc., associated
98 * one-to-one with a "struct file *" from opening a hardwall device file.
38 * Note that the file's private data points back to this structure. 99 * Note that the file's private data points back to this structure.
39 */ 100 */
40struct hardwall_info { 101struct hardwall_info {
41 struct list_head list; /* "rectangles" list */ 102 struct list_head list; /* for hardwall_types.list */
42 struct list_head task_head; /* head of tasks in this hardwall */ 103 struct list_head task_head; /* head of tasks in this hardwall */
43 struct cpumask cpumask; /* cpus in the rectangle */ 104 struct hardwall_type *type; /* type of this resource */
105 struct cpumask cpumask; /* cpus reserved */
106 int id; /* integer id for this hardwall */
107 int teardown_in_progress; /* are we tearing this one down? */
108
109 /* Remaining fields only valid for user-network resources. */
44 int ulhc_x; /* upper left hand corner x coord */ 110 int ulhc_x; /* upper left hand corner x coord */
45 int ulhc_y; /* upper left hand corner y coord */ 111 int ulhc_y; /* upper left hand corner y coord */
46 int width; /* rectangle width */ 112 int width; /* rectangle width */
47 int height; /* rectangle height */ 113 int height; /* rectangle height */
48 int id; /* integer id for this hardwall */ 114#if CHIP_HAS_REV1_XDN()
49 int teardown_in_progress; /* are we tearing this one down? */ 115 atomic_t xdn_pending_count; /* cores in phase 1 of drain */
116#endif
50}; 117};
51 118
52/* Currently allocated hardwall rectangles */
53static LIST_HEAD(rectangles);
54 119
55/* /proc/tile/hardwall */ 120/* /proc/tile/hardwall */
56static struct proc_dir_entry *hardwall_proc_dir; 121static struct proc_dir_entry *hardwall_proc_dir;
57 122
58/* Functions to manage files in /proc/tile/hardwall. */ 123/* Functions to manage files in /proc/tile/hardwall. */
59static void hardwall_add_proc(struct hardwall_info *rect); 124static void hardwall_add_proc(struct hardwall_info *);
60static void hardwall_remove_proc(struct hardwall_info *rect); 125static void hardwall_remove_proc(struct hardwall_info *);
61
62/*
63 * Guard changes to the hardwall data structures.
64 * This could be finer grained (e.g. one lock for the list of hardwall
65 * rectangles, then separate embedded locks for each one's list of tasks),
66 * but there are subtle correctness issues when trying to start with
67 * a task's "hardwall" pointer and lock the correct rectangle's embedded
68 * lock in the presence of a simultaneous deactivation, so it seems
69 * easier to have a single lock, given that none of these data
70 * structures are touched very frequently during normal operation.
71 */
72static DEFINE_SPINLOCK(hardwall_lock);
73 126
74/* Allow disabling UDN access. */ 127/* Allow disabling UDN access. */
75static int udn_disabled;
76static int __init noudn(char *str) 128static int __init noudn(char *str)
77{ 129{
78 pr_info("User-space UDN access is disabled\n"); 130 pr_info("User-space UDN access is disabled\n");
79 udn_disabled = 1; 131 hardwall_types[HARDWALL_UDN].disabled = 1;
80 return 0; 132 return 0;
81} 133}
82early_param("noudn", noudn); 134early_param("noudn", noudn);
83 135
136#ifndef __tilepro__
137/* Allow disabling IDN access. */
138static int __init noidn(char *str)
139{
140 pr_info("User-space IDN access is disabled\n");
141 hardwall_types[HARDWALL_IDN].disabled = 1;
142 return 0;
143}
144early_param("noidn", noidn);
145
146/* Allow disabling IPI access. */
147static int __init noipi(char *str)
148{
149 pr_info("User-space IPI access is disabled\n");
150 hardwall_types[HARDWALL_IPI].disabled = 1;
151 return 0;
152}
153early_param("noipi", noipi);
154#endif
155
84 156
85/* 157/*
86 * Low-level primitives 158 * Low-level primitives for UDN/IDN
87 */ 159 */
88 160
161#ifdef __tilepro__
162#define mtspr_XDN(hwt, name, val) \
163 do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
164#define mtspr_MPL_XDN(hwt, name, val) \
165 do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
166#define mfspr_XDN(hwt, name) \
167 ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
168#else
169#define mtspr_XDN(hwt, name, val) \
170 do { \
171 if ((hwt)->is_idn) \
172 __insn_mtspr(SPR_IDN_##name, (val)); \
173 else \
174 __insn_mtspr(SPR_UDN_##name, (val)); \
175 } while (0)
176#define mtspr_MPL_XDN(hwt, name, val) \
177 do { \
178 if ((hwt)->is_idn) \
179 __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
180 else \
181 __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
182 } while (0)
183#define mfspr_XDN(hwt, name) \
184 ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
185#endif
186
89/* Set a CPU bit if the CPU is online. */ 187/* Set a CPU bit if the CPU is online. */
90#define cpu_online_set(cpu, dst) do { \ 188#define cpu_online_set(cpu, dst) do { \
91 if (cpu_online(cpu)) \ 189 if (cpu_online(cpu)) \
@@ -101,7 +199,7 @@ static int contains(struct hardwall_info *r, int x, int y)
101} 199}
102 200
103/* Compute the rectangle parameters and validate the cpumask. */ 201/* Compute the rectangle parameters and validate the cpumask. */
104static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask) 202static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
105{ 203{
106 int x, y, cpu, ulhc, lrhc; 204 int x, y, cpu, ulhc, lrhc;
107 205
@@ -114,8 +212,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
114 r->ulhc_y = cpu_y(ulhc); 212 r->ulhc_y = cpu_y(ulhc);
115 r->width = cpu_x(lrhc) - r->ulhc_x + 1; 213 r->width = cpu_x(lrhc) - r->ulhc_x + 1;
116 r->height = cpu_y(lrhc) - r->ulhc_y + 1; 214 r->height = cpu_y(lrhc) - r->ulhc_y + 1;
117 cpumask_copy(&r->cpumask, mask);
118 r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */
119 215
120 /* Width and height must be positive */ 216 /* Width and height must be positive */
121 if (r->width <= 0 || r->height <= 0) 217 if (r->width <= 0 || r->height <= 0)
@@ -128,7 +224,7 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
128 return -EINVAL; 224 return -EINVAL;
129 225
130 /* 226 /*
131 * Note that offline cpus can't be drained when this UDN 227 * Note that offline cpus can't be drained when this user network
132 * rectangle eventually closes. We used to detect this 228 * rectangle eventually closes. We used to detect this
133 * situation and print a warning, but it annoyed users and 229 * situation and print a warning, but it annoyed users and
134 * they ignored it anyway, so now we just return without a 230 * they ignored it anyway, so now we just return without a
@@ -137,16 +233,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
137 return 0; 233 return 0;
138} 234}
139 235
140/* Do the two given rectangles overlap on any cpu? */
141static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
142{
143 return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */
144 b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */
145 a->ulhc_y + a->height > b->ulhc_y && /* A not above */
146 b->ulhc_y + b->height > a->ulhc_y; /* B not above */
147}
148
149
150/* 236/*
151 * Hardware management of hardwall setup, teardown, trapping, 237 * Hardware management of hardwall setup, teardown, trapping,
152 * and enabling/disabling PL0 access to the networks. 238 * and enabling/disabling PL0 access to the networks.
@@ -157,23 +243,35 @@ enum direction_protect {
157 N_PROTECT = (1 << 0), 243 N_PROTECT = (1 << 0),
158 E_PROTECT = (1 << 1), 244 E_PROTECT = (1 << 1),
159 S_PROTECT = (1 << 2), 245 S_PROTECT = (1 << 2),
160 W_PROTECT = (1 << 3) 246 W_PROTECT = (1 << 3),
247 C_PROTECT = (1 << 4),
161}; 248};
162 249
163static void enable_firewall_interrupts(void) 250static inline int xdn_which_interrupt(struct hardwall_type *hwt)
251{
252#ifndef __tilepro__
253 if (hwt->is_idn)
254 return INT_IDN_FIREWALL;
255#endif
256 return INT_UDN_FIREWALL;
257}
258
259static void enable_firewall_interrupts(struct hardwall_type *hwt)
164{ 260{
165 arch_local_irq_unmask_now(INT_UDN_FIREWALL); 261 arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
166} 262}
167 263
168static void disable_firewall_interrupts(void) 264static void disable_firewall_interrupts(struct hardwall_type *hwt)
169{ 265{
170 arch_local_irq_mask_now(INT_UDN_FIREWALL); 266 arch_local_irq_mask_now(xdn_which_interrupt(hwt));
171} 267}
172 268
173/* Set up hardwall on this cpu based on the passed hardwall_info. */ 269/* Set up hardwall on this cpu based on the passed hardwall_info. */
174static void hardwall_setup_ipi_func(void *info) 270static void hardwall_setup_func(void *info)
175{ 271{
176 struct hardwall_info *r = info; 272 struct hardwall_info *r = info;
273 struct hardwall_type *hwt = r->type;
274
177 int cpu = smp_processor_id(); 275 int cpu = smp_processor_id();
178 int x = cpu % smp_width; 276 int x = cpu % smp_width;
179 int y = cpu / smp_width; 277 int y = cpu / smp_width;
@@ -187,13 +285,12 @@ static void hardwall_setup_ipi_func(void *info)
187 if (y == r->ulhc_y + r->height - 1) 285 if (y == r->ulhc_y + r->height - 1)
188 bits |= S_PROTECT; 286 bits |= S_PROTECT;
189 BUG_ON(bits == 0); 287 BUG_ON(bits == 0);
190 __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits); 288 mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
191 enable_firewall_interrupts(); 289 enable_firewall_interrupts(hwt);
192
193} 290}
194 291
195/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ 292/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
196static void hardwall_setup(struct hardwall_info *r) 293static void hardwall_protect_rectangle(struct hardwall_info *r)
197{ 294{
198 int x, y, cpu, delta; 295 int x, y, cpu, delta;
199 struct cpumask rect_cpus; 296 struct cpumask rect_cpus;
@@ -217,37 +314,50 @@ static void hardwall_setup(struct hardwall_info *r)
217 } 314 }
218 315
219 /* Then tell all the cpus to set up their protection SPR */ 316 /* Then tell all the cpus to set up their protection SPR */
220 on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1); 317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
221} 318}
222 319
223void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) 320void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
224{ 321{
225 struct hardwall_info *rect; 322 struct hardwall_info *rect;
323 struct hardwall_type *hwt;
226 struct task_struct *p; 324 struct task_struct *p;
227 struct siginfo info; 325 struct siginfo info;
228 int x, y;
229 int cpu = smp_processor_id(); 326 int cpu = smp_processor_id();
230 int found_processes; 327 int found_processes;
231 unsigned long flags; 328 unsigned long flags;
232
233 struct pt_regs *old_regs = set_irq_regs(regs); 329 struct pt_regs *old_regs = set_irq_regs(regs);
330
234 irq_enter(); 331 irq_enter();
235 332
333 /* Figure out which network trapped. */
334 switch (fault_num) {
335#ifndef __tilepro__
336 case INT_IDN_FIREWALL:
337 hwt = &hardwall_types[HARDWALL_IDN];
338 break;
339#endif
340 case INT_UDN_FIREWALL:
341 hwt = &hardwall_types[HARDWALL_UDN];
342 break;
343 default:
344 BUG();
345 }
346 BUG_ON(hwt->disabled);
347
236 /* This tile trapped a network access; find the rectangle. */ 348 /* This tile trapped a network access; find the rectangle. */
237 x = cpu % smp_width; 349 spin_lock_irqsave(&hwt->lock, flags);
238 y = cpu / smp_width; 350 list_for_each_entry(rect, &hwt->list, list) {
239 spin_lock_irqsave(&hardwall_lock, flags); 351 if (cpumask_test_cpu(cpu, &rect->cpumask))
240 list_for_each_entry(rect, &rectangles, list) {
241 if (contains(rect, x, y))
242 break; 352 break;
243 } 353 }
244 354
245 /* 355 /*
246 * It shouldn't be possible not to find this cpu on the 356 * It shouldn't be possible not to find this cpu on the
247 * rectangle list, since only cpus in rectangles get hardwalled. 357 * rectangle list, since only cpus in rectangles get hardwalled.
248 * The hardwall is only removed after the UDN is drained. 358 * The hardwall is only removed after the user network is drained.
249 */ 359 */
250 BUG_ON(&rect->list == &rectangles); 360 BUG_ON(&rect->list == &hwt->list);
251 361
252 /* 362 /*
253 * If we already started teardown on this hardwall, don't worry; 363 * If we already started teardown on this hardwall, don't worry;
@@ -255,30 +365,32 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
255 * to quiesce. 365 * to quiesce.
256 */ 366 */
257 if (rect->teardown_in_progress) { 367 if (rect->teardown_in_progress) {
258 pr_notice("cpu %d: detected hardwall violation %#lx" 368 pr_notice("cpu %d: detected %s hardwall violation %#lx"
259 " while teardown already in progress\n", 369 " while teardown already in progress\n",
260 cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); 370 cpu, hwt->name,
371 (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
261 goto done; 372 goto done;
262 } 373 }
263 374
264 /* 375 /*
265 * Kill off any process that is activated in this rectangle. 376 * Kill off any process that is activated in this rectangle.
266 * We bypass security to deliver the signal, since it must be 377 * We bypass security to deliver the signal, since it must be
267 * one of the activated processes that generated the UDN 378 * one of the activated processes that generated the user network
268 * message that caused this trap, and all the activated 379 * message that caused this trap, and all the activated
269 * processes shared a single open file so are pretty tightly 380 * processes shared a single open file so are pretty tightly
270 * bound together from a security point of view to begin with. 381 * bound together from a security point of view to begin with.
271 */ 382 */
272 rect->teardown_in_progress = 1; 383 rect->teardown_in_progress = 1;
273 wmb(); /* Ensure visibility of rectangle before notifying processes. */ 384 wmb(); /* Ensure visibility of rectangle before notifying processes. */
274 pr_notice("cpu %d: detected hardwall violation %#lx...\n", 385 pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
275 cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); 386 cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
276 info.si_signo = SIGILL; 387 info.si_signo = SIGILL;
277 info.si_errno = 0; 388 info.si_errno = 0;
278 info.si_code = ILL_HARDWALL; 389 info.si_code = ILL_HARDWALL;
279 found_processes = 0; 390 found_processes = 0;
280 list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { 391 list_for_each_entry(p, &rect->task_head,
281 BUG_ON(p->thread.hardwall != rect); 392 thread.hardwall[hwt->index].list) {
393 BUG_ON(p->thread.hardwall[hwt->index].info != rect);
282 if (!(p->flags & PF_EXITING)) { 394 if (!(p->flags & PF_EXITING)) {
283 found_processes = 1; 395 found_processes = 1;
284 pr_notice("hardwall: killing %d\n", p->pid); 396 pr_notice("hardwall: killing %d\n", p->pid);
@@ -289,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
289 pr_notice("hardwall: no associated processes!\n"); 401 pr_notice("hardwall: no associated processes!\n");
290 402
291 done: 403 done:
292 spin_unlock_irqrestore(&hardwall_lock, flags); 404 spin_unlock_irqrestore(&hwt->lock, flags);
293 405
294 /* 406 /*
295 * We have to disable firewall interrupts now, or else when we 407 * We have to disable firewall interrupts now, or else when we
@@ -298,48 +410,87 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
298 * haven't yet drained the network, and that would allow packets 410 * haven't yet drained the network, and that would allow packets
299 * to cross out of the hardwall region. 411 * to cross out of the hardwall region.
300 */ 412 */
301 disable_firewall_interrupts(); 413 disable_firewall_interrupts(hwt);
302 414
303 irq_exit(); 415 irq_exit();
304 set_irq_regs(old_regs); 416 set_irq_regs(old_regs);
305} 417}
306 418
307/* Allow access from user space to the UDN. */ 419/* Allow access from user space to the user network. */
308void grant_network_mpls(void) 420void grant_hardwall_mpls(struct hardwall_type *hwt)
309{ 421{
310 __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1); 422#ifndef __tilepro__
311 __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1); 423 if (!hwt->is_xdn) {
312 __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1); 424 __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
313 __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1); 425 return;
426 }
427#endif
428 mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
429 mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
430 mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
431 mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
314#if !CHIP_HAS_REV1_XDN() 432#if !CHIP_HAS_REV1_XDN()
315 __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1); 433 mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
316 __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1); 434 mtspr_MPL_XDN(hwt, CA_SET_0, 1);
317#endif 435#endif
318} 436}
319 437
320/* Deny access from user space to the UDN. */ 438/* Deny access from user space to the user network. */
321void restrict_network_mpls(void) 439void restrict_hardwall_mpls(struct hardwall_type *hwt)
322{ 440{
323 __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1); 441#ifndef __tilepro__
324 __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1); 442 if (!hwt->is_xdn) {
325 __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1); 443 __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
326 __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1); 444 return;
445 }
446#endif
447 mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
448 mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
449 mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
450 mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
327#if !CHIP_HAS_REV1_XDN() 451#if !CHIP_HAS_REV1_XDN()
328 __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1); 452 mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
329 __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1); 453 mtspr_MPL_XDN(hwt, CA_SET_1, 1);
330#endif 454#endif
331} 455}
332 456
457/* Restrict or deny as necessary for the task we're switching to. */
458void hardwall_switch_tasks(struct task_struct *prev,
459 struct task_struct *next)
460{
461 int i;
462 for (i = 0; i < HARDWALL_TYPES; ++i) {
463 if (prev->thread.hardwall[i].info != NULL) {
464 if (next->thread.hardwall[i].info == NULL)
465 restrict_hardwall_mpls(&hardwall_types[i]);
466 } else if (next->thread.hardwall[i].info != NULL) {
467 grant_hardwall_mpls(&hardwall_types[i]);
468 }
469 }
470}
471
472/* Does this task have the right to IPI the given cpu? */
473int hardwall_ipi_valid(int cpu)
474{
475#ifdef __tilegx__
476 struct hardwall_info *info =
477 current->thread.hardwall[HARDWALL_IPI].info;
478 return info && cpumask_test_cpu(cpu, &info->cpumask);
479#else
480 return 0;
481#endif
482}
333 483
334/* 484/*
335 * Code to create, activate, deactivate, and destroy hardwall rectangles. 485 * Code to create, activate, deactivate, and destroy hardwall resources.
336 */ 486 */
337 487
338/* Create a hardwall for the given rectangle */ 488/* Create a hardwall for the given resource */
339static struct hardwall_info *hardwall_create( 489static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
340 size_t size, const unsigned char __user *bits) 490 size_t size,
491 const unsigned char __user *bits)
341{ 492{
342 struct hardwall_info *iter, *rect; 493 struct hardwall_info *iter, *info;
343 struct cpumask mask; 494 struct cpumask mask;
344 unsigned long flags; 495 unsigned long flags;
345 int rc; 496 int rc;
@@ -370,55 +521,62 @@ static struct hardwall_info *hardwall_create(
370 } 521 }
371 } 522 }
372 523
373 /* Allocate a new rectangle optimistically. */ 524 /* Allocate a new hardwall_info optimistically. */
374 rect = kmalloc(sizeof(struct hardwall_info), 525 info = kmalloc(sizeof(struct hardwall_info),
375 GFP_KERNEL | __GFP_ZERO); 526 GFP_KERNEL | __GFP_ZERO);
376 if (rect == NULL) 527 if (info == NULL)
377 return ERR_PTR(-ENOMEM); 528 return ERR_PTR(-ENOMEM);
378 INIT_LIST_HEAD(&rect->task_head); 529 INIT_LIST_HEAD(&info->task_head);
530 info->type = hwt;
379 531
380 /* Compute the rectangle size and validate that it's plausible. */ 532 /* Compute the rectangle size and validate that it's plausible. */
381 rc = setup_rectangle(rect, &mask); 533 cpumask_copy(&info->cpumask, &mask);
382 if (rc != 0) { 534 info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
383 kfree(rect); 535 if (hwt->is_xdn) {
384 return ERR_PTR(rc); 536 rc = check_rectangle(info, &mask);
537 if (rc != 0) {
538 kfree(info);
539 return ERR_PTR(rc);
540 }
385 } 541 }
386 542
387 /* Confirm it doesn't overlap and add it to the list. */ 543 /* Confirm it doesn't overlap and add it to the list. */
388 spin_lock_irqsave(&hardwall_lock, flags); 544 spin_lock_irqsave(&hwt->lock, flags);
389 list_for_each_entry(iter, &rectangles, list) { 545 list_for_each_entry(iter, &hwt->list, list) {
390 if (overlaps(iter, rect)) { 546 if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
391 spin_unlock_irqrestore(&hardwall_lock, flags); 547 spin_unlock_irqrestore(&hwt->lock, flags);
392 kfree(rect); 548 kfree(info);
393 return ERR_PTR(-EBUSY); 549 return ERR_PTR(-EBUSY);
394 } 550 }
395 } 551 }
396 list_add_tail(&rect->list, &rectangles); 552 list_add_tail(&info->list, &hwt->list);
397 spin_unlock_irqrestore(&hardwall_lock, flags); 553 spin_unlock_irqrestore(&hwt->lock, flags);
398 554
399 /* Set up appropriate hardwalling on all affected cpus. */ 555 /* Set up appropriate hardwalling on all affected cpus. */
400 hardwall_setup(rect); 556 if (hwt->is_xdn)
557 hardwall_protect_rectangle(info);
401 558
402 /* Create a /proc/tile/hardwall entry. */ 559 /* Create a /proc/tile/hardwall entry. */
403 hardwall_add_proc(rect); 560 hardwall_add_proc(info);
404 561
405 return rect; 562 return info;
406} 563}
407 564
408/* Activate a given hardwall on this cpu for this process. */ 565/* Activate a given hardwall on this cpu for this process. */
409static int hardwall_activate(struct hardwall_info *rect) 566static int hardwall_activate(struct hardwall_info *info)
410{ 567{
411 int cpu, x, y; 568 int cpu;
412 unsigned long flags; 569 unsigned long flags;
413 struct task_struct *p = current; 570 struct task_struct *p = current;
414 struct thread_struct *ts = &p->thread; 571 struct thread_struct *ts = &p->thread;
572 struct hardwall_type *hwt;
415 573
416 /* Require a rectangle. */ 574 /* Require a hardwall. */
417 if (rect == NULL) 575 if (info == NULL)
418 return -ENODATA; 576 return -ENODATA;
419 577
420 /* Not allowed to activate a rectangle that is being torn down. */ 578 /* Not allowed to activate a hardwall that is being torn down. */
421 if (rect->teardown_in_progress) 579 if (info->teardown_in_progress)
422 return -EINVAL; 580 return -EINVAL;
423 581
424 /* 582 /*
@@ -428,78 +586,87 @@ static int hardwall_activate(struct hardwall_info *rect)
428 if (cpumask_weight(&p->cpus_allowed) != 1) 586 if (cpumask_weight(&p->cpus_allowed) != 1)
429 return -EPERM; 587 return -EPERM;
430 588
431 /* Make sure we are bound to a cpu in this rectangle. */ 589 /* Make sure we are bound to a cpu assigned to this resource. */
432 cpu = smp_processor_id(); 590 cpu = smp_processor_id();
433 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); 591 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
434 x = cpu_x(cpu); 592 if (!cpumask_test_cpu(cpu, &info->cpumask))
435 y = cpu_y(cpu);
436 if (!contains(rect, x, y))
437 return -EINVAL; 593 return -EINVAL;
438 594
439 /* If we are already bound to this hardwall, it's a no-op. */ 595 /* If we are already bound to this hardwall, it's a no-op. */
440 if (ts->hardwall) { 596 hwt = info->type;
441 BUG_ON(ts->hardwall != rect); 597 if (ts->hardwall[hwt->index].info) {
598 BUG_ON(ts->hardwall[hwt->index].info != info);
442 return 0; 599 return 0;
443 } 600 }
444 601
445 /* Success! This process gets to use the user networks on this cpu. */ 602 /* Success! This process gets to use the resource on this cpu. */
446 ts->hardwall = rect; 603 ts->hardwall[hwt->index].info = info;
447 spin_lock_irqsave(&hardwall_lock, flags); 604 spin_lock_irqsave(&hwt->lock, flags);
448 list_add(&ts->hardwall_list, &rect->task_head); 605 list_add(&ts->hardwall[hwt->index].list, &info->task_head);
449 spin_unlock_irqrestore(&hardwall_lock, flags); 606 spin_unlock_irqrestore(&hwt->lock, flags);
450 grant_network_mpls(); 607 grant_hardwall_mpls(hwt);
451 printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n", 608 printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
452 p->pid, p->comm, cpu); 609 p->pid, p->comm, hwt->name, cpu);
453 return 0; 610 return 0;
454} 611}
455 612
456/* 613/*
457 * Deactivate a task's hardwall. Must hold hardwall_lock. 614 * Deactivate a task's hardwall. Must hold lock for hardwall_type.
458 * This method may be called from free_task(), so we don't want to 615 * This method may be called from free_task(), so we don't want to
459 * rely on too many fields of struct task_struct still being valid. 616 * rely on too many fields of struct task_struct still being valid.
460 * We assume the cpus_allowed, pid, and comm fields are still valid. 617 * We assume the cpus_allowed, pid, and comm fields are still valid.
461 */ 618 */
462static void _hardwall_deactivate(struct task_struct *task) 619static void _hardwall_deactivate(struct hardwall_type *hwt,
620 struct task_struct *task)
463{ 621{
464 struct thread_struct *ts = &task->thread; 622 struct thread_struct *ts = &task->thread;
465 623
466 if (cpumask_weight(&task->cpus_allowed) != 1) { 624 if (cpumask_weight(&task->cpus_allowed) != 1) {
467 pr_err("pid %d (%s) releasing networks with" 625 pr_err("pid %d (%s) releasing %s hardwall with"
468 " an affinity mask containing %d cpus!\n", 626 " an affinity mask containing %d cpus!\n",
469 task->pid, task->comm, 627 task->pid, task->comm, hwt->name,
470 cpumask_weight(&task->cpus_allowed)); 628 cpumask_weight(&task->cpus_allowed));
471 BUG(); 629 BUG();
472 } 630 }
473 631
474 BUG_ON(ts->hardwall == NULL); 632 BUG_ON(ts->hardwall[hwt->index].info == NULL);
475 ts->hardwall = NULL; 633 ts->hardwall[hwt->index].info = NULL;
476 list_del(&ts->hardwall_list); 634 list_del(&ts->hardwall[hwt->index].list);
477 if (task == current) 635 if (task == current)
478 restrict_network_mpls(); 636 restrict_hardwall_mpls(hwt);
479} 637}
480 638
481/* Deactivate a task's hardwall. */ 639/* Deactivate a task's hardwall. */
482int hardwall_deactivate(struct task_struct *task) 640static int hardwall_deactivate(struct hardwall_type *hwt,
641 struct task_struct *task)
483{ 642{
484 unsigned long flags; 643 unsigned long flags;
485 int activated; 644 int activated;
486 645
487 spin_lock_irqsave(&hardwall_lock, flags); 646 spin_lock_irqsave(&hwt->lock, flags);
488 activated = (task->thread.hardwall != NULL); 647 activated = (task->thread.hardwall[hwt->index].info != NULL);
489 if (activated) 648 if (activated)
490 _hardwall_deactivate(task); 649 _hardwall_deactivate(hwt, task);
491 spin_unlock_irqrestore(&hardwall_lock, flags); 650 spin_unlock_irqrestore(&hwt->lock, flags);
492 651
493 if (!activated) 652 if (!activated)
494 return -EINVAL; 653 return -EINVAL;
495 654
496 printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n", 655 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
497 task->pid, task->comm, smp_processor_id()); 656 task->pid, task->comm, hwt->name, smp_processor_id());
498 return 0; 657 return 0;
499} 658}
500 659
501/* Stop a UDN switch before draining the network. */ 660void hardwall_deactivate_all(struct task_struct *task)
502static void stop_udn_switch(void *ignored) 661{
662 int i;
663 for (i = 0; i < HARDWALL_TYPES; ++i)
664 if (task->thread.hardwall[i].info)
665 hardwall_deactivate(&hardwall_types[i], task);
666}
667
668/* Stop the switch before draining the network. */
669static void stop_xdn_switch(void *arg)
503{ 670{
504#if !CHIP_HAS_REV1_XDN() 671#if !CHIP_HAS_REV1_XDN()
505 /* Freeze the switch and the demux. */ 672 /* Freeze the switch and the demux. */
@@ -507,13 +674,71 @@ static void stop_udn_switch(void *ignored)
507 SPR_UDN_SP_FREEZE__SP_FRZ_MASK | 674 SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
508 SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | 675 SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
509 SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); 676 SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
677#else
678 /*
679 * Drop all packets bound for the core or off the edge.
680 * We rely on the normal hardwall protection setup code
681 * to have set the low four bits to trigger firewall interrupts,
682 * and shift those bits up to trigger "drop on send" semantics,
683 * plus adding "drop on send to core" for all switches.
684 * In practice it seems the switches latch the DIRECTION_PROTECT
685 * SPR so they won't start dropping if they're already
686 * delivering the last message to the core, but it doesn't
687 * hurt to enable it here.
688 */
689 struct hardwall_type *hwt = arg;
690 unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
691 mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
510#endif 692#endif
511} 693}
512 694
695static void empty_xdn_demuxes(struct hardwall_type *hwt)
696{
697#ifndef __tilepro__
698 if (hwt->is_idn) {
699 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
700 (void) __tile_idn0_receive();
701 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
702 (void) __tile_idn1_receive();
703 return;
704 }
705#endif
706 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
707 (void) __tile_udn0_receive();
708 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
709 (void) __tile_udn1_receive();
710 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
711 (void) __tile_udn2_receive();
712 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
713 (void) __tile_udn3_receive();
714}
715
513/* Drain all the state from a stopped switch. */ 716/* Drain all the state from a stopped switch. */
514static void drain_udn_switch(void *ignored) 717static void drain_xdn_switch(void *arg)
515{ 718{
516#if !CHIP_HAS_REV1_XDN() 719 struct hardwall_info *info = arg;
720 struct hardwall_type *hwt = info->type;
721
722#if CHIP_HAS_REV1_XDN()
723 /*
724 * The switches have been configured to drop any messages
725 * destined for cores (or off the edge of the rectangle).
726 * But the current message may continue to be delivered,
727 * so we wait until all the cores have finished any pending
728 * messages before we stop draining.
729 */
730 int pending = mfspr_XDN(hwt, PENDING);
731 while (pending--) {
732 empty_xdn_demuxes(hwt);
733 if (hwt->is_idn)
734 __tile_idn_send(0);
735 else
736 __tile_udn_send(0);
737 }
738 atomic_dec(&info->xdn_pending_count);
739 while (atomic_read(&info->xdn_pending_count))
740 empty_xdn_demuxes(hwt);
741#else
517 int i; 742 int i;
518 int from_tile_words, ca_count; 743 int from_tile_words, ca_count;
519 744
@@ -533,15 +758,7 @@ static void drain_udn_switch(void *ignored)
533 (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); 758 (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
534 759
535 /* Empty out demuxes. */ 760 /* Empty out demuxes. */
536 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) 761 empty_xdn_demuxes(hwt);
537 (void) __tile_udn0_receive();
538 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
539 (void) __tile_udn1_receive();
540 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
541 (void) __tile_udn2_receive();
542 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
543 (void) __tile_udn3_receive();
544 BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
545 762
546 /* Empty out catch all. */ 763 /* Empty out catch all. */
547 ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); 764 ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
@@ -563,21 +780,25 @@ static void drain_udn_switch(void *ignored)
563#endif 780#endif
564} 781}
565 782
566/* Reset random UDN state registers at boot up and during hardwall teardown. */ 783/* Reset random XDN state registers at boot up and during hardwall teardown. */
567void reset_network_state(void) 784static void reset_xdn_network_state(struct hardwall_type *hwt)
568{ 785{
569#if !CHIP_HAS_REV1_XDN() 786 if (hwt->disabled)
570 /* Reset UDN coordinates to their standard value */
571 unsigned int cpu = smp_processor_id();
572 unsigned int x = cpu % smp_width;
573 unsigned int y = cpu / smp_width;
574#endif
575
576 if (udn_disabled)
577 return; 787 return;
578 788
789 /* Clear out other random registers so we have a clean slate. */
790 mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
791 mtspr_XDN(hwt, AVAIL_EN, 0);
792 mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
793
579#if !CHIP_HAS_REV1_XDN() 794#if !CHIP_HAS_REV1_XDN()
580 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); 795 /* Reset UDN coordinates to their standard value */
796 {
797 unsigned int cpu = smp_processor_id();
798 unsigned int x = cpu % smp_width;
799 unsigned int y = cpu / smp_width;
800 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
801 }
581 802
582 /* Set demux tags to predefined values and enable them. */ 803 /* Set demux tags to predefined values and enable them. */
583 __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); 804 __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
@@ -585,56 +806,50 @@ void reset_network_state(void)
585 __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); 806 __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
586 __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); 807 __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
587 __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); 808 __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
588#endif
589 809
590 /* Clear out other random registers so we have a clean slate. */ 810 /* Set other rev0 random registers to a clean state. */
591 __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
592 __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
593#if !CHIP_HAS_REV1_XDN()
594 __insn_mtspr(SPR_UDN_REFILL_EN, 0); 811 __insn_mtspr(SPR_UDN_REFILL_EN, 0);
595 __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); 812 __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
596 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); 813 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
597#endif
598 814
599 /* Start the switch and demux. */ 815 /* Start the switch and demux. */
600#if !CHIP_HAS_REV1_XDN()
601 __insn_mtspr(SPR_UDN_SP_FREEZE, 0); 816 __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
602#endif 817#endif
603} 818}
604 819
605/* Restart a UDN switch after draining. */ 820void reset_network_state(void)
606static void restart_udn_switch(void *ignored)
607{ 821{
608 reset_network_state(); 822 reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
609 823#ifndef __tilepro__
610 /* Disable firewall interrupts. */ 824 reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
611 __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0); 825#endif
612 disable_firewall_interrupts();
613} 826}
614 827
615/* Build a struct cpumask containing all valid tiles in bounding rectangle. */ 828/* Restart an XDN switch after draining. */
616static void fill_mask(struct hardwall_info *r, struct cpumask *result) 829static void restart_xdn_switch(void *arg)
617{ 830{
618 int x, y, cpu; 831 struct hardwall_type *hwt = arg;
619 832
620 cpumask_clear(result); 833#if CHIP_HAS_REV1_XDN()
834 /* One last drain step to avoid races with injection and draining. */
835 empty_xdn_demuxes(hwt);
836#endif
621 837
622 cpu = r->ulhc_y * smp_width + r->ulhc_x; 838 reset_xdn_network_state(hwt);
623 for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) { 839
624 for (x = 0; x < r->width; ++x, ++cpu) 840 /* Disable firewall interrupts. */
625 cpu_online_set(cpu, result); 841 disable_firewall_interrupts(hwt);
626 }
627} 842}
628 843
629/* Last reference to a hardwall is gone, so clear the network. */ 844/* Last reference to a hardwall is gone, so clear the network. */
630static void hardwall_destroy(struct hardwall_info *rect) 845static void hardwall_destroy(struct hardwall_info *info)
631{ 846{
632 struct task_struct *task; 847 struct task_struct *task;
848 struct hardwall_type *hwt;
633 unsigned long flags; 849 unsigned long flags;
634 struct cpumask mask;
635 850
636 /* Make sure this file actually represents a rectangle. */ 851 /* Make sure this file actually represents a hardwall. */
637 if (rect == NULL) 852 if (info == NULL)
638 return; 853 return;
639 854
640 /* 855 /*
@@ -644,39 +859,53 @@ static void hardwall_destroy(struct hardwall_info *rect)
644 * deactivate any remaining tasks before freeing the 859 * deactivate any remaining tasks before freeing the
645 * hardwall_info object itself. 860 * hardwall_info object itself.
646 */ 861 */
647 spin_lock_irqsave(&hardwall_lock, flags); 862 hwt = info->type;
648 list_for_each_entry(task, &rect->task_head, thread.hardwall_list) 863 info->teardown_in_progress = 1;
649 _hardwall_deactivate(task); 864 spin_lock_irqsave(&hwt->lock, flags);
650 spin_unlock_irqrestore(&hardwall_lock, flags); 865 list_for_each_entry(task, &info->task_head,
651 866 thread.hardwall[hwt->index].list)
652 /* Drain the UDN. */ 867 _hardwall_deactivate(hwt, task);
653 printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n", 868 spin_unlock_irqrestore(&hwt->lock, flags);
654 rect->width, rect->height, rect->ulhc_x, rect->ulhc_y); 869
655 fill_mask(rect, &mask); 870 if (hwt->is_xdn) {
656 on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1); 871 /* Configure the switches for draining the user network. */
657 on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1); 872 printk(KERN_DEBUG
873 "Clearing %s hardwall rectangle %dx%d %d,%d\n",
874 hwt->name, info->width, info->height,
875 info->ulhc_x, info->ulhc_y);
876 on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
877
878 /* Drain the network. */
879#if CHIP_HAS_REV1_XDN()
880 atomic_set(&info->xdn_pending_count,
881 cpumask_weight(&info->cpumask));
882 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
883#else
884 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
885#endif
658 886
659 /* Restart switch and disable firewall. */ 887 /* Restart switch and disable firewall. */
660 on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1); 888 on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
889 }
661 890
662 /* Remove the /proc/tile/hardwall entry. */ 891 /* Remove the /proc/tile/hardwall entry. */
663 hardwall_remove_proc(rect); 892 hardwall_remove_proc(info);
664 893
665 /* Now free the rectangle from the list. */ 894 /* Now free the hardwall from the list. */
666 spin_lock_irqsave(&hardwall_lock, flags); 895 spin_lock_irqsave(&hwt->lock, flags);
667 BUG_ON(!list_empty(&rect->task_head)); 896 BUG_ON(!list_empty(&info->task_head));
668 list_del(&rect->list); 897 list_del(&info->list);
669 spin_unlock_irqrestore(&hardwall_lock, flags); 898 spin_unlock_irqrestore(&hwt->lock, flags);
670 kfree(rect); 899 kfree(info);
671} 900}
672 901
673 902
674static int hardwall_proc_show(struct seq_file *sf, void *v) 903static int hardwall_proc_show(struct seq_file *sf, void *v)
675{ 904{
676 struct hardwall_info *rect = sf->private; 905 struct hardwall_info *info = sf->private;
677 char buf[256]; 906 char buf[256];
678 907
679 int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask); 908 int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
680 buf[rc++] = '\n'; 909 buf[rc++] = '\n';
681 seq_write(sf, buf, rc); 910 seq_write(sf, buf, rc);
682 return 0; 911 return 0;
@@ -695,31 +924,45 @@ static const struct file_operations hardwall_proc_fops = {
695 .release = single_release, 924 .release = single_release,
696}; 925};
697 926
698static void hardwall_add_proc(struct hardwall_info *rect) 927static void hardwall_add_proc(struct hardwall_info *info)
699{ 928{
700 char buf[64]; 929 char buf[64];
701 snprintf(buf, sizeof(buf), "%d", rect->id); 930 snprintf(buf, sizeof(buf), "%d", info->id);
702 proc_create_data(buf, 0444, hardwall_proc_dir, 931 proc_create_data(buf, 0444, info->type->proc_dir,
703 &hardwall_proc_fops, rect); 932 &hardwall_proc_fops, info);
704} 933}
705 934
706static void hardwall_remove_proc(struct hardwall_info *rect) 935static void hardwall_remove_proc(struct hardwall_info *info)
707{ 936{
708 char buf[64]; 937 char buf[64];
709 snprintf(buf, sizeof(buf), "%d", rect->id); 938 snprintf(buf, sizeof(buf), "%d", info->id);
710 remove_proc_entry(buf, hardwall_proc_dir); 939 remove_proc_entry(buf, info->type->proc_dir);
711} 940}
712 941
713int proc_pid_hardwall(struct task_struct *task, char *buffer) 942int proc_pid_hardwall(struct task_struct *task, char *buffer)
714{ 943{
715 struct hardwall_info *rect = task->thread.hardwall; 944 int i;
716 return rect ? sprintf(buffer, "%d\n", rect->id) : 0; 945 int n = 0;
946 for (i = 0; i < HARDWALL_TYPES; ++i) {
947 struct hardwall_info *info = task->thread.hardwall[i].info;
948 if (info)
949 n += sprintf(&buffer[n], "%s: %d\n",
950 info->type->name, info->id);
951 }
952 return n;
717} 953}
718 954
719void proc_tile_hardwall_init(struct proc_dir_entry *root) 955void proc_tile_hardwall_init(struct proc_dir_entry *root)
720{ 956{
721 if (!udn_disabled) 957 int i;
722 hardwall_proc_dir = proc_mkdir("hardwall", root); 958 for (i = 0; i < HARDWALL_TYPES; ++i) {
959 struct hardwall_type *hwt = &hardwall_types[i];
960 if (hwt->disabled)
961 continue;
962 if (hardwall_proc_dir == NULL)
963 hardwall_proc_dir = proc_mkdir("hardwall", root);
964 hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
965 }
723} 966}
724 967
725 968
@@ -729,34 +972,45 @@ void proc_tile_hardwall_init(struct proc_dir_entry *root)
729 972
730static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) 973static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
731{ 974{
732 struct hardwall_info *rect = file->private_data; 975 struct hardwall_info *info = file->private_data;
976 int minor = iminor(file->f_mapping->host);
977 struct hardwall_type* hwt;
733 978
734 if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) 979 if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
735 return -EINVAL; 980 return -EINVAL;
736 981
982 BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
983 BUILD_BUG_ON(HARDWALL_TYPES !=
984 sizeof(hardwall_types)/sizeof(hardwall_types[0]));
985
986 if (minor < 0 || minor >= HARDWALL_TYPES)
987 return -EINVAL;
988 hwt = &hardwall_types[minor];
989 WARN_ON(info && hwt != info->type);
990
737 switch (_IOC_NR(a)) { 991 switch (_IOC_NR(a)) {
738 case _HARDWALL_CREATE: 992 case _HARDWALL_CREATE:
739 if (udn_disabled) 993 if (hwt->disabled)
740 return -ENOSYS; 994 return -ENOSYS;
741 if (rect != NULL) 995 if (info != NULL)
742 return -EALREADY; 996 return -EALREADY;
743 rect = hardwall_create(_IOC_SIZE(a), 997 info = hardwall_create(hwt, _IOC_SIZE(a),
744 (const unsigned char __user *)b); 998 (const unsigned char __user *)b);
745 if (IS_ERR(rect)) 999 if (IS_ERR(info))
746 return PTR_ERR(rect); 1000 return PTR_ERR(info);
747 file->private_data = rect; 1001 file->private_data = info;
748 return 0; 1002 return 0;
749 1003
750 case _HARDWALL_ACTIVATE: 1004 case _HARDWALL_ACTIVATE:
751 return hardwall_activate(rect); 1005 return hardwall_activate(info);
752 1006
753 case _HARDWALL_DEACTIVATE: 1007 case _HARDWALL_DEACTIVATE:
754 if (current->thread.hardwall != rect) 1008 if (current->thread.hardwall[hwt->index].info != info)
755 return -EINVAL; 1009 return -EINVAL;
756 return hardwall_deactivate(current); 1010 return hardwall_deactivate(hwt, current);
757 1011
758 case _HARDWALL_GET_ID: 1012 case _HARDWALL_GET_ID:
759 return rect ? rect->id : -EINVAL; 1013 return info ? info->id : -EINVAL;
760 1014
761 default: 1015 default:
762 return -EINVAL; 1016 return -EINVAL;
@@ -775,26 +1029,28 @@ static long hardwall_compat_ioctl(struct file *file,
775/* The user process closed the file; revoke access to user networks. */ 1029/* The user process closed the file; revoke access to user networks. */
776static int hardwall_flush(struct file *file, fl_owner_t owner) 1030static int hardwall_flush(struct file *file, fl_owner_t owner)
777{ 1031{
778 struct hardwall_info *rect = file->private_data; 1032 struct hardwall_info *info = file->private_data;
779 struct task_struct *task, *tmp; 1033 struct task_struct *task, *tmp;
780 unsigned long flags; 1034 unsigned long flags;
781 1035
782 if (rect) { 1036 if (info) {
783 /* 1037 /*
784 * NOTE: if multiple threads are activated on this hardwall 1038 * NOTE: if multiple threads are activated on this hardwall
785 * file, the other threads will continue having access to the 1039 * file, the other threads will continue having access to the
786 * UDN until they are context-switched out and back in again. 1040 * user network until they are context-switched out and back
1041 * in again.
787 * 1042 *
788 * NOTE: A NULL files pointer means the task is being torn 1043 * NOTE: A NULL files pointer means the task is being torn
789 * down, so in that case we also deactivate it. 1044 * down, so in that case we also deactivate it.
790 */ 1045 */
791 spin_lock_irqsave(&hardwall_lock, flags); 1046 struct hardwall_type *hwt = info->type;
792 list_for_each_entry_safe(task, tmp, &rect->task_head, 1047 spin_lock_irqsave(&hwt->lock, flags);
793 thread.hardwall_list) { 1048 list_for_each_entry_safe(task, tmp, &info->task_head,
1049 thread.hardwall[hwt->index].list) {
794 if (task->files == owner || task->files == NULL) 1050 if (task->files == owner || task->files == NULL)
795 _hardwall_deactivate(task); 1051 _hardwall_deactivate(hwt, task);
796 } 1052 }
797 spin_unlock_irqrestore(&hardwall_lock, flags); 1053 spin_unlock_irqrestore(&hwt->lock, flags);
798 } 1054 }
799 1055
800 return 0; 1056 return 0;
@@ -824,11 +1080,11 @@ static int __init dev_hardwall_init(void)
824 int rc; 1080 int rc;
825 dev_t dev; 1081 dev_t dev;
826 1082
827 rc = alloc_chrdev_region(&dev, 0, 1, "hardwall"); 1083 rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
828 if (rc < 0) 1084 if (rc < 0)
829 return rc; 1085 return rc;
830 cdev_init(&hardwall_dev, &dev_hardwall_fops); 1086 cdev_init(&hardwall_dev, &dev_hardwall_fops);
831 rc = cdev_add(&hardwall_dev, dev, 1); 1087 rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
832 if (rc < 0) 1088 if (rc < 0)
833 return rc; 1089 return rc;
834 1090
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 0ae8723ea578..7c06d597ffd0 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1257,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
1257 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign 1257 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1258 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault 1258 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1259 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault 1259 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1260 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr 1260 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
1261 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap 1261 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1262 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt 1262 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1263 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr 1263 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 54e6c64b85cc..03448eb189a7 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -145,10 +145,10 @@ void free_thread_info(struct thread_info *info)
145 * Calling deactivate here just frees up the data structures. 145 * Calling deactivate here just frees up the data structures.
146 * If the task we're freeing held the last reference to a 146 * If the task we're freeing held the last reference to a
147 * hardwall fd, it would have been released prior to this point 147 * hardwall fd, it would have been released prior to this point
148 * anyway via exit_files(), and "hardwall" would be NULL by now. 148 * anyway via exit_files(), and the hardwall_task.info pointers
149 * would be NULL by now.
149 */ 150 */
150 if (info->task->thread.hardwall) 151 hardwall_deactivate_all(info->task);
151 hardwall_deactivate(info->task);
152#endif 152#endif
153 153
154 if (step_state) { 154 if (step_state) {
@@ -264,7 +264,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
264 264
265#ifdef CONFIG_HARDWALL 265#ifdef CONFIG_HARDWALL
266 /* New thread does not own any networks. */ 266 /* New thread does not own any networks. */
267 p->thread.hardwall = NULL; 267 memset(&p->thread.hardwall[0], 0,
268 sizeof(struct hardwall_task) * HARDWALL_TYPES);
268#endif 269#endif
269 270
270 271
@@ -534,12 +535,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
534 535
535#ifdef CONFIG_HARDWALL 536#ifdef CONFIG_HARDWALL
536 /* Enable or disable access to the network registers appropriately. */ 537 /* Enable or disable access to the network registers appropriately. */
537 if (prev->thread.hardwall != NULL) { 538 hardwall_switch_tasks(prev, next);
538 if (next->thread.hardwall == NULL)
539 restrict_network_mpls();
540 } else if (next->thread.hardwall != NULL) {
541 grant_network_mpls();
542 }
543#endif 539#endif
544 540
545 /* 541 /*