diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/tile/kernel/hardwall.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'arch/tile/kernel/hardwall.c')
-rw-r--r-- | arch/tile/kernel/hardwall.c | 754 |
1 files changed, 249 insertions, 505 deletions
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index 20273ee37de..8c41891aab3 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c | |||
@@ -33,157 +33,59 @@ | |||
33 | 33 | ||
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Implement a per-cpu "hardwall" resource class such as UDN or IPI. | 36 | * This data structure tracks the rectangle data, etc., associated |
37 | * We use "hardwall" nomenclature throughout for historical reasons. | 37 | * one-to-one with a "struct file *" from opening HARDWALL_FILE. |
38 | * The lock here controls access to the list data structure as well as | ||
39 | * to the items on the list. | ||
40 | */ | ||
41 | struct hardwall_type { | ||
42 | int index; | ||
43 | int is_xdn; | ||
44 | int is_idn; | ||
45 | int disabled; | ||
46 | const char *name; | ||
47 | struct list_head list; | ||
48 | spinlock_t lock; | ||
49 | struct proc_dir_entry *proc_dir; | ||
50 | }; | ||
51 | |||
52 | enum hardwall_index { | ||
53 | HARDWALL_UDN = 0, | ||
54 | #ifndef __tilepro__ | ||
55 | HARDWALL_IDN = 1, | ||
56 | HARDWALL_IPI = 2, | ||
57 | #endif | ||
58 | _HARDWALL_TYPES | ||
59 | }; | ||
60 | |||
61 | static struct hardwall_type hardwall_types[] = { | ||
62 | { /* user-space access to UDN */ | ||
63 | 0, | ||
64 | 1, | ||
65 | 0, | ||
66 | 0, | ||
67 | "udn", | ||
68 | LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), | ||
69 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), | ||
70 | NULL | ||
71 | }, | ||
72 | #ifndef __tilepro__ | ||
73 | { /* user-space access to IDN */ | ||
74 | 1, | ||
75 | 1, | ||
76 | 1, | ||
77 | 1, /* disabled pending hypervisor support */ | ||
78 | "idn", | ||
79 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), | ||
80 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), | ||
81 | NULL | ||
82 | }, | ||
83 | { /* access to user-space IPI */ | ||
84 | 2, | ||
85 | 0, | ||
86 | 0, | ||
87 | 0, | ||
88 | "ipi", | ||
89 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), | ||
90 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), | ||
91 | NULL | ||
92 | }, | ||
93 | #endif | ||
94 | }; | ||
95 | |||
96 | /* | ||
97 | * This data structure tracks the cpu data, etc., associated | ||
98 | * one-to-one with a "struct file *" from opening a hardwall device file. | ||
99 | * Note that the file's private data points back to this structure. | 38 | * Note that the file's private data points back to this structure. |
100 | */ | 39 | */ |
101 | struct hardwall_info { | 40 | struct hardwall_info { |
102 | struct list_head list; /* for hardwall_types.list */ | 41 | struct list_head list; /* "rectangles" list */ |
103 | struct list_head task_head; /* head of tasks in this hardwall */ | 42 | struct list_head task_head; /* head of tasks in this hardwall */ |
104 | struct hardwall_type *type; /* type of this resource */ | 43 | struct cpumask cpumask; /* cpus in the rectangle */ |
105 | struct cpumask cpumask; /* cpus reserved */ | ||
106 | int id; /* integer id for this hardwall */ | ||
107 | int teardown_in_progress; /* are we tearing this one down? */ | ||
108 | |||
109 | /* Remaining fields only valid for user-network resources. */ | ||
110 | int ulhc_x; /* upper left hand corner x coord */ | 44 | int ulhc_x; /* upper left hand corner x coord */ |
111 | int ulhc_y; /* upper left hand corner y coord */ | 45 | int ulhc_y; /* upper left hand corner y coord */ |
112 | int width; /* rectangle width */ | 46 | int width; /* rectangle width */ |
113 | int height; /* rectangle height */ | 47 | int height; /* rectangle height */ |
114 | #if CHIP_HAS_REV1_XDN() | 48 | int id; /* integer id for this hardwall */ |
115 | atomic_t xdn_pending_count; /* cores in phase 1 of drain */ | 49 | int teardown_in_progress; /* are we tearing this one down? */ |
116 | #endif | ||
117 | }; | 50 | }; |
118 | 51 | ||
52 | /* Currently allocated hardwall rectangles */ | ||
53 | static LIST_HEAD(rectangles); | ||
119 | 54 | ||
120 | /* /proc/tile/hardwall */ | 55 | /* /proc/tile/hardwall */ |
121 | static struct proc_dir_entry *hardwall_proc_dir; | 56 | static struct proc_dir_entry *hardwall_proc_dir; |
122 | 57 | ||
123 | /* Functions to manage files in /proc/tile/hardwall. */ | 58 | /* Functions to manage files in /proc/tile/hardwall. */ |
124 | static void hardwall_add_proc(struct hardwall_info *); | 59 | static void hardwall_add_proc(struct hardwall_info *rect); |
125 | static void hardwall_remove_proc(struct hardwall_info *); | 60 | static void hardwall_remove_proc(struct hardwall_info *rect); |
61 | |||
62 | /* | ||
63 | * Guard changes to the hardwall data structures. | ||
64 | * This could be finer grained (e.g. one lock for the list of hardwall | ||
65 | * rectangles, then separate embedded locks for each one's list of tasks), | ||
66 | * but there are subtle correctness issues when trying to start with | ||
67 | * a task's "hardwall" pointer and lock the correct rectangle's embedded | ||
68 | * lock in the presence of a simultaneous deactivation, so it seems | ||
69 | * easier to have a single lock, given that none of these data | ||
70 | * structures are touched very frequently during normal operation. | ||
71 | */ | ||
72 | static DEFINE_SPINLOCK(hardwall_lock); | ||
126 | 73 | ||
127 | /* Allow disabling UDN access. */ | 74 | /* Allow disabling UDN access. */ |
75 | static int udn_disabled; | ||
128 | static int __init noudn(char *str) | 76 | static int __init noudn(char *str) |
129 | { | 77 | { |
130 | pr_info("User-space UDN access is disabled\n"); | 78 | pr_info("User-space UDN access is disabled\n"); |
131 | hardwall_types[HARDWALL_UDN].disabled = 1; | 79 | udn_disabled = 1; |
132 | return 0; | 80 | return 0; |
133 | } | 81 | } |
134 | early_param("noudn", noudn); | 82 | early_param("noudn", noudn); |
135 | 83 | ||
136 | #ifndef __tilepro__ | ||
137 | /* Allow disabling IDN access. */ | ||
138 | static int __init noidn(char *str) | ||
139 | { | ||
140 | pr_info("User-space IDN access is disabled\n"); | ||
141 | hardwall_types[HARDWALL_IDN].disabled = 1; | ||
142 | return 0; | ||
143 | } | ||
144 | early_param("noidn", noidn); | ||
145 | |||
146 | /* Allow disabling IPI access. */ | ||
147 | static int __init noipi(char *str) | ||
148 | { | ||
149 | pr_info("User-space IPI access is disabled\n"); | ||
150 | hardwall_types[HARDWALL_IPI].disabled = 1; | ||
151 | return 0; | ||
152 | } | ||
153 | early_param("noipi", noipi); | ||
154 | #endif | ||
155 | |||
156 | 84 | ||
157 | /* | 85 | /* |
158 | * Low-level primitives for UDN/IDN | 86 | * Low-level primitives |
159 | */ | 87 | */ |
160 | 88 | ||
161 | #ifdef __tilepro__ | ||
162 | #define mtspr_XDN(hwt, name, val) \ | ||
163 | do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0) | ||
164 | #define mtspr_MPL_XDN(hwt, name, val) \ | ||
165 | do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0) | ||
166 | #define mfspr_XDN(hwt, name) \ | ||
167 | ((void)(hwt), __insn_mfspr(SPR_UDN_##name)) | ||
168 | #else | ||
169 | #define mtspr_XDN(hwt, name, val) \ | ||
170 | do { \ | ||
171 | if ((hwt)->is_idn) \ | ||
172 | __insn_mtspr(SPR_IDN_##name, (val)); \ | ||
173 | else \ | ||
174 | __insn_mtspr(SPR_UDN_##name, (val)); \ | ||
175 | } while (0) | ||
176 | #define mtspr_MPL_XDN(hwt, name, val) \ | ||
177 | do { \ | ||
178 | if ((hwt)->is_idn) \ | ||
179 | __insn_mtspr(SPR_MPL_IDN_##name, (val)); \ | ||
180 | else \ | ||
181 | __insn_mtspr(SPR_MPL_UDN_##name, (val)); \ | ||
182 | } while (0) | ||
183 | #define mfspr_XDN(hwt, name) \ | ||
184 | ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name)) | ||
185 | #endif | ||
186 | |||
187 | /* Set a CPU bit if the CPU is online. */ | 89 | /* Set a CPU bit if the CPU is online. */ |
188 | #define cpu_online_set(cpu, dst) do { \ | 90 | #define cpu_online_set(cpu, dst) do { \ |
189 | if (cpu_online(cpu)) \ | 91 | if (cpu_online(cpu)) \ |
@@ -199,7 +101,7 @@ static int contains(struct hardwall_info *r, int x, int y) | |||
199 | } | 101 | } |
200 | 102 | ||
201 | /* Compute the rectangle parameters and validate the cpumask. */ | 103 | /* Compute the rectangle parameters and validate the cpumask. */ |
202 | static int check_rectangle(struct hardwall_info *r, struct cpumask *mask) | 104 | static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask) |
203 | { | 105 | { |
204 | int x, y, cpu, ulhc, lrhc; | 106 | int x, y, cpu, ulhc, lrhc; |
205 | 107 | ||
@@ -212,6 +114,8 @@ static int check_rectangle(struct hardwall_info *r, struct cpumask *mask) | |||
212 | r->ulhc_y = cpu_y(ulhc); | 114 | r->ulhc_y = cpu_y(ulhc); |
213 | r->width = cpu_x(lrhc) - r->ulhc_x + 1; | 115 | r->width = cpu_x(lrhc) - r->ulhc_x + 1; |
214 | r->height = cpu_y(lrhc) - r->ulhc_y + 1; | 116 | r->height = cpu_y(lrhc) - r->ulhc_y + 1; |
117 | cpumask_copy(&r->cpumask, mask); | ||
118 | r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */ | ||
215 | 119 | ||
216 | /* Width and height must be positive */ | 120 | /* Width and height must be positive */ |
217 | if (r->width <= 0 || r->height <= 0) | 121 | if (r->width <= 0 || r->height <= 0) |
@@ -224,7 +128,7 @@ static int check_rectangle(struct hardwall_info *r, struct cpumask *mask) | |||
224 | return -EINVAL; | 128 | return -EINVAL; |
225 | 129 | ||
226 | /* | 130 | /* |
227 | * Note that offline cpus can't be drained when this user network | 131 | * Note that offline cpus can't be drained when this UDN |
228 | * rectangle eventually closes. We used to detect this | 132 | * rectangle eventually closes. We used to detect this |
229 | * situation and print a warning, but it annoyed users and | 133 | * situation and print a warning, but it annoyed users and |
230 | * they ignored it anyway, so now we just return without a | 134 | * they ignored it anyway, so now we just return without a |
@@ -233,6 +137,16 @@ static int check_rectangle(struct hardwall_info *r, struct cpumask *mask) | |||
233 | return 0; | 137 | return 0; |
234 | } | 138 | } |
235 | 139 | ||
140 | /* Do the two given rectangles overlap on any cpu? */ | ||
141 | static int overlaps(struct hardwall_info *a, struct hardwall_info *b) | ||
142 | { | ||
143 | return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */ | ||
144 | b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */ | ||
145 | a->ulhc_y + a->height > b->ulhc_y && /* A not above */ | ||
146 | b->ulhc_y + b->height > a->ulhc_y; /* B not above */ | ||
147 | } | ||
148 | |||
149 | |||
236 | /* | 150 | /* |
237 | * Hardware management of hardwall setup, teardown, trapping, | 151 | * Hardware management of hardwall setup, teardown, trapping, |
238 | * and enabling/disabling PL0 access to the networks. | 152 | * and enabling/disabling PL0 access to the networks. |
@@ -243,35 +157,23 @@ enum direction_protect { | |||
243 | N_PROTECT = (1 << 0), | 157 | N_PROTECT = (1 << 0), |
244 | E_PROTECT = (1 << 1), | 158 | E_PROTECT = (1 << 1), |
245 | S_PROTECT = (1 << 2), | 159 | S_PROTECT = (1 << 2), |
246 | W_PROTECT = (1 << 3), | 160 | W_PROTECT = (1 << 3) |
247 | C_PROTECT = (1 << 4), | ||
248 | }; | 161 | }; |
249 | 162 | ||
250 | static inline int xdn_which_interrupt(struct hardwall_type *hwt) | 163 | static void enable_firewall_interrupts(void) |
251 | { | ||
252 | #ifndef __tilepro__ | ||
253 | if (hwt->is_idn) | ||
254 | return INT_IDN_FIREWALL; | ||
255 | #endif | ||
256 | return INT_UDN_FIREWALL; | ||
257 | } | ||
258 | |||
259 | static void enable_firewall_interrupts(struct hardwall_type *hwt) | ||
260 | { | 164 | { |
261 | arch_local_irq_unmask_now(xdn_which_interrupt(hwt)); | 165 | arch_local_irq_unmask_now(INT_UDN_FIREWALL); |
262 | } | 166 | } |
263 | 167 | ||
264 | static void disable_firewall_interrupts(struct hardwall_type *hwt) | 168 | static void disable_firewall_interrupts(void) |
265 | { | 169 | { |
266 | arch_local_irq_mask_now(xdn_which_interrupt(hwt)); | 170 | arch_local_irq_mask_now(INT_UDN_FIREWALL); |
267 | } | 171 | } |
268 | 172 | ||
269 | /* Set up hardwall on this cpu based on the passed hardwall_info. */ | 173 | /* Set up hardwall on this cpu based on the passed hardwall_info. */ |
270 | static void hardwall_setup_func(void *info) | 174 | static void hardwall_setup_ipi_func(void *info) |
271 | { | 175 | { |
272 | struct hardwall_info *r = info; | 176 | struct hardwall_info *r = info; |
273 | struct hardwall_type *hwt = r->type; | ||
274 | |||
275 | int cpu = smp_processor_id(); | 177 | int cpu = smp_processor_id(); |
276 | int x = cpu % smp_width; | 178 | int x = cpu % smp_width; |
277 | int y = cpu / smp_width; | 179 | int y = cpu / smp_width; |
@@ -285,12 +187,13 @@ static void hardwall_setup_func(void *info) | |||
285 | if (y == r->ulhc_y + r->height - 1) | 187 | if (y == r->ulhc_y + r->height - 1) |
286 | bits |= S_PROTECT; | 188 | bits |= S_PROTECT; |
287 | BUG_ON(bits == 0); | 189 | BUG_ON(bits == 0); |
288 | mtspr_XDN(hwt, DIRECTION_PROTECT, bits); | 190 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits); |
289 | enable_firewall_interrupts(hwt); | 191 | enable_firewall_interrupts(); |
192 | |||
290 | } | 193 | } |
291 | 194 | ||
292 | /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ | 195 | /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ |
293 | static void hardwall_protect_rectangle(struct hardwall_info *r) | 196 | static void hardwall_setup(struct hardwall_info *r) |
294 | { | 197 | { |
295 | int x, y, cpu, delta; | 198 | int x, y, cpu, delta; |
296 | struct cpumask rect_cpus; | 199 | struct cpumask rect_cpus; |
@@ -314,50 +217,37 @@ static void hardwall_protect_rectangle(struct hardwall_info *r) | |||
314 | } | 217 | } |
315 | 218 | ||
316 | /* Then tell all the cpus to set up their protection SPR */ | 219 | /* Then tell all the cpus to set up their protection SPR */ |
317 | on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1); | 220 | on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1); |
318 | } | 221 | } |
319 | 222 | ||
320 | void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | 223 | void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) |
321 | { | 224 | { |
322 | struct hardwall_info *rect; | 225 | struct hardwall_info *rect; |
323 | struct hardwall_type *hwt; | ||
324 | struct task_struct *p; | 226 | struct task_struct *p; |
325 | struct siginfo info; | 227 | struct siginfo info; |
228 | int x, y; | ||
326 | int cpu = smp_processor_id(); | 229 | int cpu = smp_processor_id(); |
327 | int found_processes; | 230 | int found_processes; |
328 | unsigned long flags; | 231 | unsigned long flags; |
329 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
330 | 232 | ||
233 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
331 | irq_enter(); | 234 | irq_enter(); |
332 | 235 | ||
333 | /* Figure out which network trapped. */ | ||
334 | switch (fault_num) { | ||
335 | #ifndef __tilepro__ | ||
336 | case INT_IDN_FIREWALL: | ||
337 | hwt = &hardwall_types[HARDWALL_IDN]; | ||
338 | break; | ||
339 | #endif | ||
340 | case INT_UDN_FIREWALL: | ||
341 | hwt = &hardwall_types[HARDWALL_UDN]; | ||
342 | break; | ||
343 | default: | ||
344 | BUG(); | ||
345 | } | ||
346 | BUG_ON(hwt->disabled); | ||
347 | |||
348 | /* This tile trapped a network access; find the rectangle. */ | 236 | /* This tile trapped a network access; find the rectangle. */ |
349 | spin_lock_irqsave(&hwt->lock, flags); | 237 | x = cpu % smp_width; |
350 | list_for_each_entry(rect, &hwt->list, list) { | 238 | y = cpu / smp_width; |
351 | if (cpumask_test_cpu(cpu, &rect->cpumask)) | 239 | spin_lock_irqsave(&hardwall_lock, flags); |
240 | list_for_each_entry(rect, &rectangles, list) { | ||
241 | if (contains(rect, x, y)) | ||
352 | break; | 242 | break; |
353 | } | 243 | } |
354 | 244 | ||
355 | /* | 245 | /* |
356 | * It shouldn't be possible not to find this cpu on the | 246 | * It shouldn't be possible not to find this cpu on the |
357 | * rectangle list, since only cpus in rectangles get hardwalled. | 247 | * rectangle list, since only cpus in rectangles get hardwalled. |
358 | * The hardwall is only removed after the user network is drained. | 248 | * The hardwall is only removed after the UDN is drained. |
359 | */ | 249 | */ |
360 | BUG_ON(&rect->list == &hwt->list); | 250 | BUG_ON(&rect->list == &rectangles); |
361 | 251 | ||
362 | /* | 252 | /* |
363 | * If we already started teardown on this hardwall, don't worry; | 253 | * If we already started teardown on this hardwall, don't worry; |
@@ -365,32 +255,30 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | |||
365 | * to quiesce. | 255 | * to quiesce. |
366 | */ | 256 | */ |
367 | if (rect->teardown_in_progress) { | 257 | if (rect->teardown_in_progress) { |
368 | pr_notice("cpu %d: detected %s hardwall violation %#lx" | 258 | pr_notice("cpu %d: detected hardwall violation %#lx" |
369 | " while teardown already in progress\n", | 259 | " while teardown already in progress\n", |
370 | cpu, hwt->name, | 260 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); |
371 | (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); | ||
372 | goto done; | 261 | goto done; |
373 | } | 262 | } |
374 | 263 | ||
375 | /* | 264 | /* |
376 | * Kill off any process that is activated in this rectangle. | 265 | * Kill off any process that is activated in this rectangle. |
377 | * We bypass security to deliver the signal, since it must be | 266 | * We bypass security to deliver the signal, since it must be |
378 | * one of the activated processes that generated the user network | 267 | * one of the activated processes that generated the UDN |
379 | * message that caused this trap, and all the activated | 268 | * message that caused this trap, and all the activated |
380 | * processes shared a single open file so are pretty tightly | 269 | * processes shared a single open file so are pretty tightly |
381 | * bound together from a security point of view to begin with. | 270 | * bound together from a security point of view to begin with. |
382 | */ | 271 | */ |
383 | rect->teardown_in_progress = 1; | 272 | rect->teardown_in_progress = 1; |
384 | wmb(); /* Ensure visibility of rectangle before notifying processes. */ | 273 | wmb(); /* Ensure visibility of rectangle before notifying processes. */ |
385 | pr_notice("cpu %d: detected %s hardwall violation %#lx...\n", | 274 | pr_notice("cpu %d: detected hardwall violation %#lx...\n", |
386 | cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); | 275 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); |
387 | info.si_signo = SIGILL; | 276 | info.si_signo = SIGILL; |
388 | info.si_errno = 0; | 277 | info.si_errno = 0; |
389 | info.si_code = ILL_HARDWALL; | 278 | info.si_code = ILL_HARDWALL; |
390 | found_processes = 0; | 279 | found_processes = 0; |
391 | list_for_each_entry(p, &rect->task_head, | 280 | list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { |
392 | thread.hardwall[hwt->index].list) { | 281 | BUG_ON(p->thread.hardwall != rect); |
393 | BUG_ON(p->thread.hardwall[hwt->index].info != rect); | ||
394 | if (!(p->flags & PF_EXITING)) { | 282 | if (!(p->flags & PF_EXITING)) { |
395 | found_processes = 1; | 283 | found_processes = 1; |
396 | pr_notice("hardwall: killing %d\n", p->pid); | 284 | pr_notice("hardwall: killing %d\n", p->pid); |
@@ -401,7 +289,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | |||
401 | pr_notice("hardwall: no associated processes!\n"); | 289 | pr_notice("hardwall: no associated processes!\n"); |
402 | 290 | ||
403 | done: | 291 | done: |
404 | spin_unlock_irqrestore(&hwt->lock, flags); | 292 | spin_unlock_irqrestore(&hardwall_lock, flags); |
405 | 293 | ||
406 | /* | 294 | /* |
407 | * We have to disable firewall interrupts now, or else when we | 295 | * We have to disable firewall interrupts now, or else when we |
@@ -410,87 +298,48 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | |||
410 | * haven't yet drained the network, and that would allow packets | 298 | * haven't yet drained the network, and that would allow packets |
411 | * to cross out of the hardwall region. | 299 | * to cross out of the hardwall region. |
412 | */ | 300 | */ |
413 | disable_firewall_interrupts(hwt); | 301 | disable_firewall_interrupts(); |
414 | 302 | ||
415 | irq_exit(); | 303 | irq_exit(); |
416 | set_irq_regs(old_regs); | 304 | set_irq_regs(old_regs); |
417 | } | 305 | } |
418 | 306 | ||
419 | /* Allow access from user space to the user network. */ | 307 | /* Allow access from user space to the UDN. */ |
420 | void grant_hardwall_mpls(struct hardwall_type *hwt) | 308 | void grant_network_mpls(void) |
421 | { | 309 | { |
422 | #ifndef __tilepro__ | 310 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1); |
423 | if (!hwt->is_xdn) { | 311 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1); |
424 | __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1); | 312 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1); |
425 | return; | 313 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1); |
426 | } | ||
427 | #endif | ||
428 | mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1); | ||
429 | mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1); | ||
430 | mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1); | ||
431 | mtspr_MPL_XDN(hwt, TIMER_SET_0, 1); | ||
432 | #if !CHIP_HAS_REV1_XDN() | 314 | #if !CHIP_HAS_REV1_XDN() |
433 | mtspr_MPL_XDN(hwt, REFILL_SET_0, 1); | 315 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1); |
434 | mtspr_MPL_XDN(hwt, CA_SET_0, 1); | 316 | __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1); |
435 | #endif | 317 | #endif |
436 | } | 318 | } |
437 | 319 | ||
438 | /* Deny access from user space to the user network. */ | 320 | /* Deny access from user space to the UDN. */ |
439 | void restrict_hardwall_mpls(struct hardwall_type *hwt) | 321 | void restrict_network_mpls(void) |
440 | { | 322 | { |
441 | #ifndef __tilepro__ | 323 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1); |
442 | if (!hwt->is_xdn) { | 324 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1); |
443 | __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1); | 325 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1); |
444 | return; | 326 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1); |
445 | } | ||
446 | #endif | ||
447 | mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1); | ||
448 | mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1); | ||
449 | mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1); | ||
450 | mtspr_MPL_XDN(hwt, TIMER_SET_1, 1); | ||
451 | #if !CHIP_HAS_REV1_XDN() | 327 | #if !CHIP_HAS_REV1_XDN() |
452 | mtspr_MPL_XDN(hwt, REFILL_SET_1, 1); | 328 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1); |
453 | mtspr_MPL_XDN(hwt, CA_SET_1, 1); | 329 | __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1); |
454 | #endif | 330 | #endif |
455 | } | 331 | } |
456 | 332 | ||
457 | /* Restrict or deny as necessary for the task we're switching to. */ | ||
458 | void hardwall_switch_tasks(struct task_struct *prev, | ||
459 | struct task_struct *next) | ||
460 | { | ||
461 | int i; | ||
462 | for (i = 0; i < HARDWALL_TYPES; ++i) { | ||
463 | if (prev->thread.hardwall[i].info != NULL) { | ||
464 | if (next->thread.hardwall[i].info == NULL) | ||
465 | restrict_hardwall_mpls(&hardwall_types[i]); | ||
466 | } else if (next->thread.hardwall[i].info != NULL) { | ||
467 | grant_hardwall_mpls(&hardwall_types[i]); | ||
468 | } | ||
469 | } | ||
470 | } | ||
471 | |||
472 | /* Does this task have the right to IPI the given cpu? */ | ||
473 | int hardwall_ipi_valid(int cpu) | ||
474 | { | ||
475 | #ifdef __tilegx__ | ||
476 | struct hardwall_info *info = | ||
477 | current->thread.hardwall[HARDWALL_IPI].info; | ||
478 | return info && cpumask_test_cpu(cpu, &info->cpumask); | ||
479 | #else | ||
480 | return 0; | ||
481 | #endif | ||
482 | } | ||
483 | 333 | ||
484 | /* | 334 | /* |
485 | * Code to create, activate, deactivate, and destroy hardwall resources. | 335 | * Code to create, activate, deactivate, and destroy hardwall rectangles. |
486 | */ | 336 | */ |
487 | 337 | ||
488 | /* Create a hardwall for the given resource */ | 338 | /* Create a hardwall for the given rectangle */ |
489 | static struct hardwall_info *hardwall_create(struct hardwall_type *hwt, | 339 | static struct hardwall_info *hardwall_create( |
490 | size_t size, | 340 | size_t size, const unsigned char __user *bits) |
491 | const unsigned char __user *bits) | ||
492 | { | 341 | { |
493 | struct hardwall_info *iter, *info; | 342 | struct hardwall_info *iter, *rect; |
494 | struct cpumask mask; | 343 | struct cpumask mask; |
495 | unsigned long flags; | 344 | unsigned long flags; |
496 | int rc; | 345 | int rc; |
@@ -521,62 +370,55 @@ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt, | |||
521 | } | 370 | } |
522 | } | 371 | } |
523 | 372 | ||
524 | /* Allocate a new hardwall_info optimistically. */ | 373 | /* Allocate a new rectangle optimistically. */ |
525 | info = kmalloc(sizeof(struct hardwall_info), | 374 | rect = kmalloc(sizeof(struct hardwall_info), |
526 | GFP_KERNEL | __GFP_ZERO); | 375 | GFP_KERNEL | __GFP_ZERO); |
527 | if (info == NULL) | 376 | if (rect == NULL) |
528 | return ERR_PTR(-ENOMEM); | 377 | return ERR_PTR(-ENOMEM); |
529 | INIT_LIST_HEAD(&info->task_head); | 378 | INIT_LIST_HEAD(&rect->task_head); |
530 | info->type = hwt; | ||
531 | 379 | ||
532 | /* Compute the rectangle size and validate that it's plausible. */ | 380 | /* Compute the rectangle size and validate that it's plausible. */ |
533 | cpumask_copy(&info->cpumask, &mask); | 381 | rc = setup_rectangle(rect, &mask); |
534 | info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits); | 382 | if (rc != 0) { |
535 | if (hwt->is_xdn) { | 383 | kfree(rect); |
536 | rc = check_rectangle(info, &mask); | 384 | return ERR_PTR(rc); |
537 | if (rc != 0) { | ||
538 | kfree(info); | ||
539 | return ERR_PTR(rc); | ||
540 | } | ||
541 | } | 385 | } |
542 | 386 | ||
543 | /* Confirm it doesn't overlap and add it to the list. */ | 387 | /* Confirm it doesn't overlap and add it to the list. */ |
544 | spin_lock_irqsave(&hwt->lock, flags); | 388 | spin_lock_irqsave(&hardwall_lock, flags); |
545 | list_for_each_entry(iter, &hwt->list, list) { | 389 | list_for_each_entry(iter, &rectangles, list) { |
546 | if (cpumask_intersects(&iter->cpumask, &info->cpumask)) { | 390 | if (overlaps(iter, rect)) { |
547 | spin_unlock_irqrestore(&hwt->lock, flags); | 391 | spin_unlock_irqrestore(&hardwall_lock, flags); |
548 | kfree(info); | 392 | kfree(rect); |
549 | return ERR_PTR(-EBUSY); | 393 | return ERR_PTR(-EBUSY); |
550 | } | 394 | } |
551 | } | 395 | } |
552 | list_add_tail(&info->list, &hwt->list); | 396 | list_add_tail(&rect->list, &rectangles); |
553 | spin_unlock_irqrestore(&hwt->lock, flags); | 397 | spin_unlock_irqrestore(&hardwall_lock, flags); |
554 | 398 | ||
555 | /* Set up appropriate hardwalling on all affected cpus. */ | 399 | /* Set up appropriate hardwalling on all affected cpus. */ |
556 | if (hwt->is_xdn) | 400 | hardwall_setup(rect); |
557 | hardwall_protect_rectangle(info); | ||
558 | 401 | ||
559 | /* Create a /proc/tile/hardwall entry. */ | 402 | /* Create a /proc/tile/hardwall entry. */ |
560 | hardwall_add_proc(info); | 403 | hardwall_add_proc(rect); |
561 | 404 | ||
562 | return info; | 405 | return rect; |
563 | } | 406 | } |
564 | 407 | ||
565 | /* Activate a given hardwall on this cpu for this process. */ | 408 | /* Activate a given hardwall on this cpu for this process. */ |
566 | static int hardwall_activate(struct hardwall_info *info) | 409 | static int hardwall_activate(struct hardwall_info *rect) |
567 | { | 410 | { |
568 | int cpu; | 411 | int cpu, x, y; |
569 | unsigned long flags; | 412 | unsigned long flags; |
570 | struct task_struct *p = current; | 413 | struct task_struct *p = current; |
571 | struct thread_struct *ts = &p->thread; | 414 | struct thread_struct *ts = &p->thread; |
572 | struct hardwall_type *hwt; | ||
573 | 415 | ||
574 | /* Require a hardwall. */ | 416 | /* Require a rectangle. */ |
575 | if (info == NULL) | 417 | if (rect == NULL) |
576 | return -ENODATA; | 418 | return -ENODATA; |
577 | 419 | ||
578 | /* Not allowed to activate a hardwall that is being torn down. */ | 420 | /* Not allowed to activate a rectangle that is being torn down. */ |
579 | if (info->teardown_in_progress) | 421 | if (rect->teardown_in_progress) |
580 | return -EINVAL; | 422 | return -EINVAL; |
581 | 423 | ||
582 | /* | 424 | /* |
@@ -586,87 +428,78 @@ static int hardwall_activate(struct hardwall_info *info) | |||
586 | if (cpumask_weight(&p->cpus_allowed) != 1) | 428 | if (cpumask_weight(&p->cpus_allowed) != 1) |
587 | return -EPERM; | 429 | return -EPERM; |
588 | 430 | ||
589 | /* Make sure we are bound to a cpu assigned to this resource. */ | 431 | /* Make sure we are bound to a cpu in this rectangle. */ |
590 | cpu = smp_processor_id(); | 432 | cpu = smp_processor_id(); |
591 | BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); | 433 | BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); |
592 | if (!cpumask_test_cpu(cpu, &info->cpumask)) | 434 | x = cpu_x(cpu); |
435 | y = cpu_y(cpu); | ||
436 | if (!contains(rect, x, y)) | ||
593 | return -EINVAL; | 437 | return -EINVAL; |
594 | 438 | ||
595 | /* If we are already bound to this hardwall, it's a no-op. */ | 439 | /* If we are already bound to this hardwall, it's a no-op. */ |
596 | hwt = info->type; | 440 | if (ts->hardwall) { |
597 | if (ts->hardwall[hwt->index].info) { | 441 | BUG_ON(ts->hardwall != rect); |
598 | BUG_ON(ts->hardwall[hwt->index].info != info); | ||
599 | return 0; | 442 | return 0; |
600 | } | 443 | } |
601 | 444 | ||
602 | /* Success! This process gets to use the resource on this cpu. */ | 445 | /* Success! This process gets to use the user networks on this cpu. */ |
603 | ts->hardwall[hwt->index].info = info; | 446 | ts->hardwall = rect; |
604 | spin_lock_irqsave(&hwt->lock, flags); | 447 | spin_lock_irqsave(&hardwall_lock, flags); |
605 | list_add(&ts->hardwall[hwt->index].list, &info->task_head); | 448 | list_add(&ts->hardwall_list, &rect->task_head); |
606 | spin_unlock_irqrestore(&hwt->lock, flags); | 449 | spin_unlock_irqrestore(&hardwall_lock, flags); |
607 | grant_hardwall_mpls(hwt); | 450 | grant_network_mpls(); |
608 | printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n", | 451 | printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n", |
609 | p->pid, p->comm, hwt->name, cpu); | 452 | p->pid, p->comm, cpu); |
610 | return 0; | 453 | return 0; |
611 | } | 454 | } |
612 | 455 | ||
613 | /* | 456 | /* |
614 | * Deactivate a task's hardwall. Must hold lock for hardwall_type. | 457 | * Deactivate a task's hardwall. Must hold hardwall_lock. |
615 | * This method may be called from free_task(), so we don't want to | 458 | * This method may be called from free_task(), so we don't want to |
616 | * rely on too many fields of struct task_struct still being valid. | 459 | * rely on too many fields of struct task_struct still being valid. |
617 | * We assume the cpus_allowed, pid, and comm fields are still valid. | 460 | * We assume the cpus_allowed, pid, and comm fields are still valid. |
618 | */ | 461 | */ |
619 | static void _hardwall_deactivate(struct hardwall_type *hwt, | 462 | static void _hardwall_deactivate(struct task_struct *task) |
620 | struct task_struct *task) | ||
621 | { | 463 | { |
622 | struct thread_struct *ts = &task->thread; | 464 | struct thread_struct *ts = &task->thread; |
623 | 465 | ||
624 | if (cpumask_weight(&task->cpus_allowed) != 1) { | 466 | if (cpumask_weight(&task->cpus_allowed) != 1) { |
625 | pr_err("pid %d (%s) releasing %s hardwall with" | 467 | pr_err("pid %d (%s) releasing networks with" |
626 | " an affinity mask containing %d cpus!\n", | 468 | " an affinity mask containing %d cpus!\n", |
627 | task->pid, task->comm, hwt->name, | 469 | task->pid, task->comm, |
628 | cpumask_weight(&task->cpus_allowed)); | 470 | cpumask_weight(&task->cpus_allowed)); |
629 | BUG(); | 471 | BUG(); |
630 | } | 472 | } |
631 | 473 | ||
632 | BUG_ON(ts->hardwall[hwt->index].info == NULL); | 474 | BUG_ON(ts->hardwall == NULL); |
633 | ts->hardwall[hwt->index].info = NULL; | 475 | ts->hardwall = NULL; |
634 | list_del(&ts->hardwall[hwt->index].list); | 476 | list_del(&ts->hardwall_list); |
635 | if (task == current) | 477 | if (task == current) |
636 | restrict_hardwall_mpls(hwt); | 478 | restrict_network_mpls(); |
637 | } | 479 | } |
638 | 480 | ||
639 | /* Deactivate a task's hardwall. */ | 481 | /* Deactivate a task's hardwall. */ |
640 | static int hardwall_deactivate(struct hardwall_type *hwt, | 482 | int hardwall_deactivate(struct task_struct *task) |
641 | struct task_struct *task) | ||
642 | { | 483 | { |
643 | unsigned long flags; | 484 | unsigned long flags; |
644 | int activated; | 485 | int activated; |
645 | 486 | ||
646 | spin_lock_irqsave(&hwt->lock, flags); | 487 | spin_lock_irqsave(&hardwall_lock, flags); |
647 | activated = (task->thread.hardwall[hwt->index].info != NULL); | 488 | activated = (task->thread.hardwall != NULL); |
648 | if (activated) | 489 | if (activated) |
649 | _hardwall_deactivate(hwt, task); | 490 | _hardwall_deactivate(task); |
650 | spin_unlock_irqrestore(&hwt->lock, flags); | 491 | spin_unlock_irqrestore(&hardwall_lock, flags); |
651 | 492 | ||
652 | if (!activated) | 493 | if (!activated) |
653 | return -EINVAL; | 494 | return -EINVAL; |
654 | 495 | ||
655 | printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n", | 496 | printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n", |
656 | task->pid, task->comm, hwt->name, smp_processor_id()); | 497 | task->pid, task->comm, smp_processor_id()); |
657 | return 0; | 498 | return 0; |
658 | } | 499 | } |
659 | 500 | ||
660 | void hardwall_deactivate_all(struct task_struct *task) | 501 | /* Stop a UDN switch before draining the network. */ |
661 | { | 502 | static void stop_udn_switch(void *ignored) |
662 | int i; | ||
663 | for (i = 0; i < HARDWALL_TYPES; ++i) | ||
664 | if (task->thread.hardwall[i].info) | ||
665 | hardwall_deactivate(&hardwall_types[i], task); | ||
666 | } | ||
667 | |||
668 | /* Stop the switch before draining the network. */ | ||
669 | static void stop_xdn_switch(void *arg) | ||
670 | { | 503 | { |
671 | #if !CHIP_HAS_REV1_XDN() | 504 | #if !CHIP_HAS_REV1_XDN() |
672 | /* Freeze the switch and the demux. */ | 505 | /* Freeze the switch and the demux. */ |
@@ -674,71 +507,13 @@ static void stop_xdn_switch(void *arg) | |||
674 | SPR_UDN_SP_FREEZE__SP_FRZ_MASK | | 507 | SPR_UDN_SP_FREEZE__SP_FRZ_MASK | |
675 | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | | 508 | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | |
676 | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); | 509 | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); |
677 | #else | ||
678 | /* | ||
679 | * Drop all packets bound for the core or off the edge. | ||
680 | * We rely on the normal hardwall protection setup code | ||
681 | * to have set the low four bits to trigger firewall interrupts, | ||
682 | * and shift those bits up to trigger "drop on send" semantics, | ||
683 | * plus adding "drop on send to core" for all switches. | ||
684 | * In practice it seems the switches latch the DIRECTION_PROTECT | ||
685 | * SPR so they won't start dropping if they're already | ||
686 | * delivering the last message to the core, but it doesn't | ||
687 | * hurt to enable it here. | ||
688 | */ | ||
689 | struct hardwall_type *hwt = arg; | ||
690 | unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT); | ||
691 | mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5); | ||
692 | #endif | 510 | #endif |
693 | } | 511 | } |
694 | 512 | ||
695 | static void empty_xdn_demuxes(struct hardwall_type *hwt) | ||
696 | { | ||
697 | #ifndef __tilepro__ | ||
698 | if (hwt->is_idn) { | ||
699 | while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0)) | ||
700 | (void) __tile_idn0_receive(); | ||
701 | while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1)) | ||
702 | (void) __tile_idn1_receive(); | ||
703 | return; | ||
704 | } | ||
705 | #endif | ||
706 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) | ||
707 | (void) __tile_udn0_receive(); | ||
708 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) | ||
709 | (void) __tile_udn1_receive(); | ||
710 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) | ||
711 | (void) __tile_udn2_receive(); | ||
712 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) | ||
713 | (void) __tile_udn3_receive(); | ||
714 | } | ||
715 | |||
716 | /* Drain all the state from a stopped switch. */ | 513 | /* Drain all the state from a stopped switch. */ |
717 | static void drain_xdn_switch(void *arg) | 514 | static void drain_udn_switch(void *ignored) |
718 | { | 515 | { |
719 | struct hardwall_info *info = arg; | 516 | #if !CHIP_HAS_REV1_XDN() |
720 | struct hardwall_type *hwt = info->type; | ||
721 | |||
722 | #if CHIP_HAS_REV1_XDN() | ||
723 | /* | ||
724 | * The switches have been configured to drop any messages | ||
725 | * destined for cores (or off the edge of the rectangle). | ||
726 | * But the current message may continue to be delivered, | ||
727 | * so we wait until all the cores have finished any pending | ||
728 | * messages before we stop draining. | ||
729 | */ | ||
730 | int pending = mfspr_XDN(hwt, PENDING); | ||
731 | while (pending--) { | ||
732 | empty_xdn_demuxes(hwt); | ||
733 | if (hwt->is_idn) | ||
734 | __tile_idn_send(0); | ||
735 | else | ||
736 | __tile_udn_send(0); | ||
737 | } | ||
738 | atomic_dec(&info->xdn_pending_count); | ||
739 | while (atomic_read(&info->xdn_pending_count)) | ||
740 | empty_xdn_demuxes(hwt); | ||
741 | #else | ||
742 | int i; | 517 | int i; |
743 | int from_tile_words, ca_count; | 518 | int from_tile_words, ca_count; |
744 | 519 | ||
@@ -758,7 +533,15 @@ static void drain_xdn_switch(void *arg) | |||
758 | (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); | 533 | (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); |
759 | 534 | ||
760 | /* Empty out demuxes. */ | 535 | /* Empty out demuxes. */ |
761 | empty_xdn_demuxes(hwt); | 536 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) |
537 | (void) __tile_udn0_receive(); | ||
538 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) | ||
539 | (void) __tile_udn1_receive(); | ||
540 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) | ||
541 | (void) __tile_udn2_receive(); | ||
542 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) | ||
543 | (void) __tile_udn3_receive(); | ||
544 | BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0); | ||
762 | 545 | ||
763 | /* Empty out catch all. */ | 546 | /* Empty out catch all. */ |
764 | ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); | 547 | ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); |
@@ -780,25 +563,21 @@ static void drain_xdn_switch(void *arg) | |||
780 | #endif | 563 | #endif |
781 | } | 564 | } |
782 | 565 | ||
783 | /* Reset random XDN state registers at boot up and during hardwall teardown. */ | 566 | /* Reset random UDN state registers at boot up and during hardwall teardown. */ |
784 | static void reset_xdn_network_state(struct hardwall_type *hwt) | 567 | void reset_network_state(void) |
785 | { | 568 | { |
786 | if (hwt->disabled) | 569 | #if !CHIP_HAS_REV1_XDN() |
787 | return; | 570 | /* Reset UDN coordinates to their standard value */ |
571 | unsigned int cpu = smp_processor_id(); | ||
572 | unsigned int x = cpu % smp_width; | ||
573 | unsigned int y = cpu / smp_width; | ||
574 | #endif | ||
788 | 575 | ||
789 | /* Clear out other random registers so we have a clean slate. */ | 576 | if (udn_disabled) |
790 | mtspr_XDN(hwt, DIRECTION_PROTECT, 0); | 577 | return; |
791 | mtspr_XDN(hwt, AVAIL_EN, 0); | ||
792 | mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0); | ||
793 | 578 | ||
794 | #if !CHIP_HAS_REV1_XDN() | 579 | #if !CHIP_HAS_REV1_XDN() |
795 | /* Reset UDN coordinates to their standard value */ | 580 | __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); |
796 | { | ||
797 | unsigned int cpu = smp_processor_id(); | ||
798 | unsigned int x = cpu % smp_width; | ||
799 | unsigned int y = cpu / smp_width; | ||
800 | __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); | ||
801 | } | ||
802 | 581 | ||
803 | /* Set demux tags to predefined values and enable them. */ | 582 | /* Set demux tags to predefined values and enable them. */ |
804 | __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); | 583 | __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); |
@@ -806,50 +585,56 @@ static void reset_xdn_network_state(struct hardwall_type *hwt) | |||
806 | __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); | 585 | __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); |
807 | __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); | 586 | __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); |
808 | __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); | 587 | __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); |
588 | #endif | ||
809 | 589 | ||
810 | /* Set other rev0 random registers to a clean state. */ | 590 | /* Clear out other random registers so we have a clean slate. */ |
591 | __insn_mtspr(SPR_UDN_AVAIL_EN, 0); | ||
592 | __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0); | ||
593 | #if !CHIP_HAS_REV1_XDN() | ||
811 | __insn_mtspr(SPR_UDN_REFILL_EN, 0); | 594 | __insn_mtspr(SPR_UDN_REFILL_EN, 0); |
812 | __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); | 595 | __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); |
813 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); | 596 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); |
597 | #endif | ||
814 | 598 | ||
815 | /* Start the switch and demux. */ | 599 | /* Start the switch and demux. */ |
600 | #if !CHIP_HAS_REV1_XDN() | ||
816 | __insn_mtspr(SPR_UDN_SP_FREEZE, 0); | 601 | __insn_mtspr(SPR_UDN_SP_FREEZE, 0); |
817 | #endif | 602 | #endif |
818 | } | 603 | } |
819 | 604 | ||
820 | void reset_network_state(void) | 605 | /* Restart a UDN switch after draining. */ |
606 | static void restart_udn_switch(void *ignored) | ||
821 | { | 607 | { |
822 | reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]); | 608 | reset_network_state(); |
823 | #ifndef __tilepro__ | 609 | |
824 | reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]); | 610 | /* Disable firewall interrupts. */ |
825 | #endif | 611 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0); |
612 | disable_firewall_interrupts(); | ||
826 | } | 613 | } |
827 | 614 | ||
828 | /* Restart an XDN switch after draining. */ | 615 | /* Build a struct cpumask containing all valid tiles in bounding rectangle. */ |
829 | static void restart_xdn_switch(void *arg) | 616 | static void fill_mask(struct hardwall_info *r, struct cpumask *result) |
830 | { | 617 | { |
831 | struct hardwall_type *hwt = arg; | 618 | int x, y, cpu; |
832 | 619 | ||
833 | #if CHIP_HAS_REV1_XDN() | 620 | cpumask_clear(result); |
834 | /* One last drain step to avoid races with injection and draining. */ | ||
835 | empty_xdn_demuxes(hwt); | ||
836 | #endif | ||
837 | 621 | ||
838 | reset_xdn_network_state(hwt); | 622 | cpu = r->ulhc_y * smp_width + r->ulhc_x; |
839 | 623 | for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) { | |
840 | /* Disable firewall interrupts. */ | 624 | for (x = 0; x < r->width; ++x, ++cpu) |
841 | disable_firewall_interrupts(hwt); | 625 | cpu_online_set(cpu, result); |
626 | } | ||
842 | } | 627 | } |
843 | 628 | ||
844 | /* Last reference to a hardwall is gone, so clear the network. */ | 629 | /* Last reference to a hardwall is gone, so clear the network. */ |
845 | static void hardwall_destroy(struct hardwall_info *info) | 630 | static void hardwall_destroy(struct hardwall_info *rect) |
846 | { | 631 | { |
847 | struct task_struct *task; | 632 | struct task_struct *task; |
848 | struct hardwall_type *hwt; | ||
849 | unsigned long flags; | 633 | unsigned long flags; |
634 | struct cpumask mask; | ||
850 | 635 | ||
851 | /* Make sure this file actually represents a hardwall. */ | 636 | /* Make sure this file actually represents a rectangle. */ |
852 | if (info == NULL) | 637 | if (rect == NULL) |
853 | return; | 638 | return; |
854 | 639 | ||
855 | /* | 640 | /* |
@@ -859,53 +644,39 @@ static void hardwall_destroy(struct hardwall_info *info) | |||
859 | * deactivate any remaining tasks before freeing the | 644 | * deactivate any remaining tasks before freeing the |
860 | * hardwall_info object itself. | 645 | * hardwall_info object itself. |
861 | */ | 646 | */ |
862 | hwt = info->type; | 647 | spin_lock_irqsave(&hardwall_lock, flags); |
863 | info->teardown_in_progress = 1; | 648 | list_for_each_entry(task, &rect->task_head, thread.hardwall_list) |
864 | spin_lock_irqsave(&hwt->lock, flags); | 649 | _hardwall_deactivate(task); |
865 | list_for_each_entry(task, &info->task_head, | 650 | spin_unlock_irqrestore(&hardwall_lock, flags); |
866 | thread.hardwall[hwt->index].list) | ||
867 | _hardwall_deactivate(hwt, task); | ||
868 | spin_unlock_irqrestore(&hwt->lock, flags); | ||
869 | |||
870 | if (hwt->is_xdn) { | ||
871 | /* Configure the switches for draining the user network. */ | ||
872 | printk(KERN_DEBUG | ||
873 | "Clearing %s hardwall rectangle %dx%d %d,%d\n", | ||
874 | hwt->name, info->width, info->height, | ||
875 | info->ulhc_x, info->ulhc_y); | ||
876 | on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1); | ||
877 | |||
878 | /* Drain the network. */ | ||
879 | #if CHIP_HAS_REV1_XDN() | ||
880 | atomic_set(&info->xdn_pending_count, | ||
881 | cpumask_weight(&info->cpumask)); | ||
882 | on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0); | ||
883 | #else | ||
884 | on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1); | ||
885 | #endif | ||
886 | 651 | ||
887 | /* Restart switch and disable firewall. */ | 652 | /* Drain the UDN. */ |
888 | on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1); | 653 | printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n", |
889 | } | 654 | rect->width, rect->height, rect->ulhc_x, rect->ulhc_y); |
655 | fill_mask(rect, &mask); | ||
656 | on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1); | ||
657 | on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1); | ||
658 | |||
659 | /* Restart switch and disable firewall. */ | ||
660 | on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1); | ||
890 | 661 | ||
891 | /* Remove the /proc/tile/hardwall entry. */ | 662 | /* Remove the /proc/tile/hardwall entry. */ |
892 | hardwall_remove_proc(info); | 663 | hardwall_remove_proc(rect); |
893 | 664 | ||
894 | /* Now free the hardwall from the list. */ | 665 | /* Now free the rectangle from the list. */ |
895 | spin_lock_irqsave(&hwt->lock, flags); | 666 | spin_lock_irqsave(&hardwall_lock, flags); |
896 | BUG_ON(!list_empty(&info->task_head)); | 667 | BUG_ON(!list_empty(&rect->task_head)); |
897 | list_del(&info->list); | 668 | list_del(&rect->list); |
898 | spin_unlock_irqrestore(&hwt->lock, flags); | 669 | spin_unlock_irqrestore(&hardwall_lock, flags); |
899 | kfree(info); | 670 | kfree(rect); |
900 | } | 671 | } |
901 | 672 | ||
902 | 673 | ||
903 | static int hardwall_proc_show(struct seq_file *sf, void *v) | 674 | static int hardwall_proc_show(struct seq_file *sf, void *v) |
904 | { | 675 | { |
905 | struct hardwall_info *info = sf->private; | 676 | struct hardwall_info *rect = sf->private; |
906 | char buf[256]; | 677 | char buf[256]; |
907 | 678 | ||
908 | int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask); | 679 | int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask); |
909 | buf[rc++] = '\n'; | 680 | buf[rc++] = '\n'; |
910 | seq_write(sf, buf, rc); | 681 | seq_write(sf, buf, rc); |
911 | return 0; | 682 | return 0; |
@@ -924,45 +695,31 @@ static const struct file_operations hardwall_proc_fops = { | |||
924 | .release = single_release, | 695 | .release = single_release, |
925 | }; | 696 | }; |
926 | 697 | ||
927 | static void hardwall_add_proc(struct hardwall_info *info) | 698 | static void hardwall_add_proc(struct hardwall_info *rect) |
928 | { | 699 | { |
929 | char buf[64]; | 700 | char buf[64]; |
930 | snprintf(buf, sizeof(buf), "%d", info->id); | 701 | snprintf(buf, sizeof(buf), "%d", rect->id); |
931 | proc_create_data(buf, 0444, info->type->proc_dir, | 702 | proc_create_data(buf, 0444, hardwall_proc_dir, |
932 | &hardwall_proc_fops, info); | 703 | &hardwall_proc_fops, rect); |
933 | } | 704 | } |
934 | 705 | ||
935 | static void hardwall_remove_proc(struct hardwall_info *info) | 706 | static void hardwall_remove_proc(struct hardwall_info *rect) |
936 | { | 707 | { |
937 | char buf[64]; | 708 | char buf[64]; |
938 | snprintf(buf, sizeof(buf), "%d", info->id); | 709 | snprintf(buf, sizeof(buf), "%d", rect->id); |
939 | remove_proc_entry(buf, info->type->proc_dir); | 710 | remove_proc_entry(buf, hardwall_proc_dir); |
940 | } | 711 | } |
941 | 712 | ||
942 | int proc_pid_hardwall(struct task_struct *task, char *buffer) | 713 | int proc_pid_hardwall(struct task_struct *task, char *buffer) |
943 | { | 714 | { |
944 | int i; | 715 | struct hardwall_info *rect = task->thread.hardwall; |
945 | int n = 0; | 716 | return rect ? sprintf(buffer, "%d\n", rect->id) : 0; |
946 | for (i = 0; i < HARDWALL_TYPES; ++i) { | ||
947 | struct hardwall_info *info = task->thread.hardwall[i].info; | ||
948 | if (info) | ||
949 | n += sprintf(&buffer[n], "%s: %d\n", | ||
950 | info->type->name, info->id); | ||
951 | } | ||
952 | return n; | ||
953 | } | 717 | } |
954 | 718 | ||
955 | void proc_tile_hardwall_init(struct proc_dir_entry *root) | 719 | void proc_tile_hardwall_init(struct proc_dir_entry *root) |
956 | { | 720 | { |
957 | int i; | 721 | if (!udn_disabled) |
958 | for (i = 0; i < HARDWALL_TYPES; ++i) { | 722 | hardwall_proc_dir = proc_mkdir("hardwall", root); |
959 | struct hardwall_type *hwt = &hardwall_types[i]; | ||
960 | if (hwt->disabled) | ||
961 | continue; | ||
962 | if (hardwall_proc_dir == NULL) | ||
963 | hardwall_proc_dir = proc_mkdir("hardwall", root); | ||
964 | hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir); | ||
965 | } | ||
966 | } | 723 | } |
967 | 724 | ||
968 | 725 | ||
@@ -972,45 +729,34 @@ void proc_tile_hardwall_init(struct proc_dir_entry *root) | |||
972 | 729 | ||
973 | static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) | 730 | static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) |
974 | { | 731 | { |
975 | struct hardwall_info *info = file->private_data; | 732 | struct hardwall_info *rect = file->private_data; |
976 | int minor = iminor(file->f_mapping->host); | ||
977 | struct hardwall_type* hwt; | ||
978 | 733 | ||
979 | if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) | 734 | if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) |
980 | return -EINVAL; | 735 | return -EINVAL; |
981 | 736 | ||
982 | BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES); | ||
983 | BUILD_BUG_ON(HARDWALL_TYPES != | ||
984 | sizeof(hardwall_types)/sizeof(hardwall_types[0])); | ||
985 | |||
986 | if (minor < 0 || minor >= HARDWALL_TYPES) | ||
987 | return -EINVAL; | ||
988 | hwt = &hardwall_types[minor]; | ||
989 | WARN_ON(info && hwt != info->type); | ||
990 | |||
991 | switch (_IOC_NR(a)) { | 737 | switch (_IOC_NR(a)) { |
992 | case _HARDWALL_CREATE: | 738 | case _HARDWALL_CREATE: |
993 | if (hwt->disabled) | 739 | if (udn_disabled) |
994 | return -ENOSYS; | 740 | return -ENOSYS; |
995 | if (info != NULL) | 741 | if (rect != NULL) |
996 | return -EALREADY; | 742 | return -EALREADY; |
997 | info = hardwall_create(hwt, _IOC_SIZE(a), | 743 | rect = hardwall_create(_IOC_SIZE(a), |
998 | (const unsigned char __user *)b); | 744 | (const unsigned char __user *)b); |
999 | if (IS_ERR(info)) | 745 | if (IS_ERR(rect)) |
1000 | return PTR_ERR(info); | 746 | return PTR_ERR(rect); |
1001 | file->private_data = info; | 747 | file->private_data = rect; |
1002 | return 0; | 748 | return 0; |
1003 | 749 | ||
1004 | case _HARDWALL_ACTIVATE: | 750 | case _HARDWALL_ACTIVATE: |
1005 | return hardwall_activate(info); | 751 | return hardwall_activate(rect); |
1006 | 752 | ||
1007 | case _HARDWALL_DEACTIVATE: | 753 | case _HARDWALL_DEACTIVATE: |
1008 | if (current->thread.hardwall[hwt->index].info != info) | 754 | if (current->thread.hardwall != rect) |
1009 | return -EINVAL; | 755 | return -EINVAL; |
1010 | return hardwall_deactivate(hwt, current); | 756 | return hardwall_deactivate(current); |
1011 | 757 | ||
1012 | case _HARDWALL_GET_ID: | 758 | case _HARDWALL_GET_ID: |
1013 | return info ? info->id : -EINVAL; | 759 | return rect ? rect->id : -EINVAL; |
1014 | 760 | ||
1015 | default: | 761 | default: |
1016 | return -EINVAL; | 762 | return -EINVAL; |
@@ -1029,28 +775,26 @@ static long hardwall_compat_ioctl(struct file *file, | |||
1029 | /* The user process closed the file; revoke access to user networks. */ | 775 | /* The user process closed the file; revoke access to user networks. */ |
1030 | static int hardwall_flush(struct file *file, fl_owner_t owner) | 776 | static int hardwall_flush(struct file *file, fl_owner_t owner) |
1031 | { | 777 | { |
1032 | struct hardwall_info *info = file->private_data; | 778 | struct hardwall_info *rect = file->private_data; |
1033 | struct task_struct *task, *tmp; | 779 | struct task_struct *task, *tmp; |
1034 | unsigned long flags; | 780 | unsigned long flags; |
1035 | 781 | ||
1036 | if (info) { | 782 | if (rect) { |
1037 | /* | 783 | /* |
1038 | * NOTE: if multiple threads are activated on this hardwall | 784 | * NOTE: if multiple threads are activated on this hardwall |
1039 | * file, the other threads will continue having access to the | 785 | * file, the other threads will continue having access to the |
1040 | * user network until they are context-switched out and back | 786 | * UDN until they are context-switched out and back in again. |
1041 | * in again. | ||
1042 | * | 787 | * |
1043 | * NOTE: A NULL files pointer means the task is being torn | 788 | * NOTE: A NULL files pointer means the task is being torn |
1044 | * down, so in that case we also deactivate it. | 789 | * down, so in that case we also deactivate it. |
1045 | */ | 790 | */ |
1046 | struct hardwall_type *hwt = info->type; | 791 | spin_lock_irqsave(&hardwall_lock, flags); |
1047 | spin_lock_irqsave(&hwt->lock, flags); | 792 | list_for_each_entry_safe(task, tmp, &rect->task_head, |
1048 | list_for_each_entry_safe(task, tmp, &info->task_head, | 793 | thread.hardwall_list) { |
1049 | thread.hardwall[hwt->index].list) { | ||
1050 | if (task->files == owner || task->files == NULL) | 794 | if (task->files == owner || task->files == NULL) |
1051 | _hardwall_deactivate(hwt, task); | 795 | _hardwall_deactivate(task); |
1052 | } | 796 | } |
1053 | spin_unlock_irqrestore(&hwt->lock, flags); | 797 | spin_unlock_irqrestore(&hardwall_lock, flags); |
1054 | } | 798 | } |
1055 | 799 | ||
1056 | return 0; | 800 | return 0; |
@@ -1080,11 +824,11 @@ static int __init dev_hardwall_init(void) | |||
1080 | int rc; | 824 | int rc; |
1081 | dev_t dev; | 825 | dev_t dev; |
1082 | 826 | ||
1083 | rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall"); | 827 | rc = alloc_chrdev_region(&dev, 0, 1, "hardwall"); |
1084 | if (rc < 0) | 828 | if (rc < 0) |
1085 | return rc; | 829 | return rc; |
1086 | cdev_init(&hardwall_dev, &dev_hardwall_fops); | 830 | cdev_init(&hardwall_dev, &dev_hardwall_fops); |
1087 | rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES); | 831 | rc = cdev_add(&hardwall_dev, dev, 1); |
1088 | if (rc < 0) | 832 | if (rc < 0) |
1089 | return rc; | 833 | return rc; |
1090 | 834 | ||