aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-25 18:59:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-25 18:59:38 -0400
commitfa2af6e4fe0c4d2f8875d42625b25675e8584010 (patch)
treeef9a92949858ab763aa1bfda7cb11a5f7b84d123 /arch/tile/kernel
parent109b9b0408e5f1dd327a44f446841a9fbe0bcd83 (diff)
parent1fcb78e9da714d96f65edd37b29dae3b1f7df508 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull tile updates from Chris Metcalf: "These changes cover a range of new arch/tile features and optimizations. They've been through LKML review and on linux-next for a month or so. There's also one bug-fix that just missed 3.4, which I've marked for stable." Fixed up trivial conflict in arch/tile/Kconfig (new added tile Kconfig entries clashing with the generic timer/clockevents changes). * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: tile: default to tilegx_defconfig for ARCH=tile tile: fix bug where fls(0) was not returning 0 arch/tile: mark TILEGX as not EXPERIMENTAL tile/mm/fault.c: Port OOM changes to handle_page_fault arch/tile: add descriptive text if the kernel reports a bad trap arch/tile: allow querying cpu module information from the hypervisor arch/tile: fix hardwall for tilegx and generalize for idn and ipi arch/tile: support multiple huge page sizes dynamically mm: add new arch_make_huge_pte() method for tile support arch/tile: support kexec() for tilegx arch/tile: support <asm/cachectl.h> header for cacheflush() syscall arch/tile: Allow tilegx to build with either 16K or 64K page size arch/tile: optimize get_user/put_user and friends arch/tile: support building big-endian kernel arch/tile: allow building Linux with transparent huge pages enabled arch/tile: use interrupt critical sections less
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/Makefile3
-rw-r--r--arch/tile/kernel/entry.S3
-rw-r--r--arch/tile/kernel/hardwall.c754
-rw-r--r--arch/tile/kernel/head_32.S8
-rw-r--r--arch/tile/kernel/head_64.S22
-rw-r--r--arch/tile/kernel/hvglue.lds3
-rw-r--r--arch/tile/kernel/intvec_64.S80
-rw-r--r--arch/tile/kernel/machine_kexec.c42
-rw-r--r--arch/tile/kernel/module.c12
-rw-r--r--arch/tile/kernel/proc.c1
-rw-r--r--arch/tile/kernel/process.c16
-rw-r--r--arch/tile/kernel/relocate_kernel_32.S (renamed from arch/tile/kernel/relocate_kernel.S)0
-rw-r--r--arch/tile/kernel/relocate_kernel_64.S260
-rw-r--r--arch/tile/kernel/setup.c169
-rw-r--r--arch/tile/kernel/single_step.c16
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/sys.c10
-rw-r--r--arch/tile/kernel/sysfs.c8
-rw-r--r--arch/tile/kernel/tlb.c11
-rw-r--r--arch/tile/kernel/traps.c30
20 files changed, 1057 insertions, 393 deletions
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 0d826faf8f35..5de99248d8df 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -9,10 +9,9 @@ obj-y := backtrace.o entry.o irq.o messaging.o \
9 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o 9 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
10 10
11obj-$(CONFIG_HARDWALL) += hardwall.o 11obj-$(CONFIG_HARDWALL) += hardwall.o
12obj-$(CONFIG_TILEGX) += futex_64.o
13obj-$(CONFIG_COMPAT) += compat.o compat_signal.o 12obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
14obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o 13obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
15obj-$(CONFIG_MODULES) += module.o 14obj-$(CONFIG_MODULES) += module.o
16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 15obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
17obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 16obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
18obj-$(CONFIG_PCI) += pci.o 17obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index ec91568df880..133c4b56a99e 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -100,8 +100,9 @@ STD_ENTRY(smp_nap)
100 */ 100 */
101STD_ENTRY(_cpu_idle) 101STD_ENTRY(_cpu_idle)
102 movei r1, 1 102 movei r1, 1
103 IRQ_ENABLE_LOAD(r2, r3)
103 mtspr INTERRUPT_CRITICAL_SECTION, r1 104 mtspr INTERRUPT_CRITICAL_SECTION, r1
104 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 105 IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
105 mtspr INTERRUPT_CRITICAL_SECTION, zero 106 mtspr INTERRUPT_CRITICAL_SECTION, zero
106 .global _cpu_idle_nap 107 .global _cpu_idle_nap
107_cpu_idle_nap: 108_cpu_idle_nap:
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 8c41891aab34..20273ee37deb 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -33,59 +33,157 @@
33 33
34 34
35/* 35/*
36 * This data structure tracks the rectangle data, etc., associated 36 * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
37 * one-to-one with a "struct file *" from opening HARDWALL_FILE. 37 * We use "hardwall" nomenclature throughout for historical reasons.
38 * The lock here controls access to the list data structure as well as
39 * to the items on the list.
40 */
41struct hardwall_type {
42 int index;
43 int is_xdn;
44 int is_idn;
45 int disabled;
46 const char *name;
47 struct list_head list;
48 spinlock_t lock;
49 struct proc_dir_entry *proc_dir;
50};
51
52enum hardwall_index {
53 HARDWALL_UDN = 0,
54#ifndef __tilepro__
55 HARDWALL_IDN = 1,
56 HARDWALL_IPI = 2,
57#endif
58 _HARDWALL_TYPES
59};
60
61static struct hardwall_type hardwall_types[] = {
62 { /* user-space access to UDN */
63 0,
64 1,
65 0,
66 0,
67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
70 NULL
71 },
72#ifndef __tilepro__
73 { /* user-space access to IDN */
74 1,
75 1,
76 1,
77 1, /* disabled pending hypervisor support */
78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
81 NULL
82 },
83 { /* access to user-space IPI */
84 2,
85 0,
86 0,
87 0,
88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
91 NULL
92 },
93#endif
94};
95
96/*
97 * This data structure tracks the cpu data, etc., associated
98 * one-to-one with a "struct file *" from opening a hardwall device file.
38 * Note that the file's private data points back to this structure. 99 * Note that the file's private data points back to this structure.
39 */ 100 */
40struct hardwall_info { 101struct hardwall_info {
41 struct list_head list; /* "rectangles" list */ 102 struct list_head list; /* for hardwall_types.list */
42 struct list_head task_head; /* head of tasks in this hardwall */ 103 struct list_head task_head; /* head of tasks in this hardwall */
43 struct cpumask cpumask; /* cpus in the rectangle */ 104 struct hardwall_type *type; /* type of this resource */
105 struct cpumask cpumask; /* cpus reserved */
106 int id; /* integer id for this hardwall */
107 int teardown_in_progress; /* are we tearing this one down? */
108
109 /* Remaining fields only valid for user-network resources. */
44 int ulhc_x; /* upper left hand corner x coord */ 110 int ulhc_x; /* upper left hand corner x coord */
45 int ulhc_y; /* upper left hand corner y coord */ 111 int ulhc_y; /* upper left hand corner y coord */
46 int width; /* rectangle width */ 112 int width; /* rectangle width */
47 int height; /* rectangle height */ 113 int height; /* rectangle height */
48 int id; /* integer id for this hardwall */ 114#if CHIP_HAS_REV1_XDN()
49 int teardown_in_progress; /* are we tearing this one down? */ 115 atomic_t xdn_pending_count; /* cores in phase 1 of drain */
116#endif
50}; 117};
51 118
52/* Currently allocated hardwall rectangles */
53static LIST_HEAD(rectangles);
54 119
55/* /proc/tile/hardwall */ 120/* /proc/tile/hardwall */
56static struct proc_dir_entry *hardwall_proc_dir; 121static struct proc_dir_entry *hardwall_proc_dir;
57 122
58/* Functions to manage files in /proc/tile/hardwall. */ 123/* Functions to manage files in /proc/tile/hardwall. */
59static void hardwall_add_proc(struct hardwall_info *rect); 124static void hardwall_add_proc(struct hardwall_info *);
60static void hardwall_remove_proc(struct hardwall_info *rect); 125static void hardwall_remove_proc(struct hardwall_info *);
61
62/*
63 * Guard changes to the hardwall data structures.
64 * This could be finer grained (e.g. one lock for the list of hardwall
65 * rectangles, then separate embedded locks for each one's list of tasks),
66 * but there are subtle correctness issues when trying to start with
67 * a task's "hardwall" pointer and lock the correct rectangle's embedded
68 * lock in the presence of a simultaneous deactivation, so it seems
69 * easier to have a single lock, given that none of these data
70 * structures are touched very frequently during normal operation.
71 */
72static DEFINE_SPINLOCK(hardwall_lock);
73 126
74/* Allow disabling UDN access. */ 127/* Allow disabling UDN access. */
75static int udn_disabled;
76static int __init noudn(char *str) 128static int __init noudn(char *str)
77{ 129{
78 pr_info("User-space UDN access is disabled\n"); 130 pr_info("User-space UDN access is disabled\n");
79 udn_disabled = 1; 131 hardwall_types[HARDWALL_UDN].disabled = 1;
80 return 0; 132 return 0;
81} 133}
82early_param("noudn", noudn); 134early_param("noudn", noudn);
83 135
136#ifndef __tilepro__
137/* Allow disabling IDN access. */
138static int __init noidn(char *str)
139{
140 pr_info("User-space IDN access is disabled\n");
141 hardwall_types[HARDWALL_IDN].disabled = 1;
142 return 0;
143}
144early_param("noidn", noidn);
145
146/* Allow disabling IPI access. */
147static int __init noipi(char *str)
148{
149 pr_info("User-space IPI access is disabled\n");
150 hardwall_types[HARDWALL_IPI].disabled = 1;
151 return 0;
152}
153early_param("noipi", noipi);
154#endif
155
84 156
85/* 157/*
86 * Low-level primitives 158 * Low-level primitives for UDN/IDN
87 */ 159 */
88 160
161#ifdef __tilepro__
162#define mtspr_XDN(hwt, name, val) \
163 do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
164#define mtspr_MPL_XDN(hwt, name, val) \
165 do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
166#define mfspr_XDN(hwt, name) \
167 ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
168#else
169#define mtspr_XDN(hwt, name, val) \
170 do { \
171 if ((hwt)->is_idn) \
172 __insn_mtspr(SPR_IDN_##name, (val)); \
173 else \
174 __insn_mtspr(SPR_UDN_##name, (val)); \
175 } while (0)
176#define mtspr_MPL_XDN(hwt, name, val) \
177 do { \
178 if ((hwt)->is_idn) \
179 __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
180 else \
181 __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
182 } while (0)
183#define mfspr_XDN(hwt, name) \
184 ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
185#endif
186
89/* Set a CPU bit if the CPU is online. */ 187/* Set a CPU bit if the CPU is online. */
90#define cpu_online_set(cpu, dst) do { \ 188#define cpu_online_set(cpu, dst) do { \
91 if (cpu_online(cpu)) \ 189 if (cpu_online(cpu)) \
@@ -101,7 +199,7 @@ static int contains(struct hardwall_info *r, int x, int y)
101} 199}
102 200
103/* Compute the rectangle parameters and validate the cpumask. */ 201/* Compute the rectangle parameters and validate the cpumask. */
104static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask) 202static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
105{ 203{
106 int x, y, cpu, ulhc, lrhc; 204 int x, y, cpu, ulhc, lrhc;
107 205
@@ -114,8 +212,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
114 r->ulhc_y = cpu_y(ulhc); 212 r->ulhc_y = cpu_y(ulhc);
115 r->width = cpu_x(lrhc) - r->ulhc_x + 1; 213 r->width = cpu_x(lrhc) - r->ulhc_x + 1;
116 r->height = cpu_y(lrhc) - r->ulhc_y + 1; 214 r->height = cpu_y(lrhc) - r->ulhc_y + 1;
117 cpumask_copy(&r->cpumask, mask);
118 r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */
119 215
120 /* Width and height must be positive */ 216 /* Width and height must be positive */
121 if (r->width <= 0 || r->height <= 0) 217 if (r->width <= 0 || r->height <= 0)
@@ -128,7 +224,7 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
128 return -EINVAL; 224 return -EINVAL;
129 225
130 /* 226 /*
131 * Note that offline cpus can't be drained when this UDN 227 * Note that offline cpus can't be drained when this user network
132 * rectangle eventually closes. We used to detect this 228 * rectangle eventually closes. We used to detect this
133 * situation and print a warning, but it annoyed users and 229 * situation and print a warning, but it annoyed users and
134 * they ignored it anyway, so now we just return without a 230 * they ignored it anyway, so now we just return without a
@@ -137,16 +233,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
137 return 0; 233 return 0;
138} 234}
139 235
140/* Do the two given rectangles overlap on any cpu? */
141static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
142{
143 return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */
144 b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */
145 a->ulhc_y + a->height > b->ulhc_y && /* A not above */
146 b->ulhc_y + b->height > a->ulhc_y; /* B not above */
147}
148
149
150/* 236/*
151 * Hardware management of hardwall setup, teardown, trapping, 237 * Hardware management of hardwall setup, teardown, trapping,
152 * and enabling/disabling PL0 access to the networks. 238 * and enabling/disabling PL0 access to the networks.
@@ -157,23 +243,35 @@ enum direction_protect {
157 N_PROTECT = (1 << 0), 243 N_PROTECT = (1 << 0),
158 E_PROTECT = (1 << 1), 244 E_PROTECT = (1 << 1),
159 S_PROTECT = (1 << 2), 245 S_PROTECT = (1 << 2),
160 W_PROTECT = (1 << 3) 246 W_PROTECT = (1 << 3),
247 C_PROTECT = (1 << 4),
161}; 248};
162 249
163static void enable_firewall_interrupts(void) 250static inline int xdn_which_interrupt(struct hardwall_type *hwt)
251{
252#ifndef __tilepro__
253 if (hwt->is_idn)
254 return INT_IDN_FIREWALL;
255#endif
256 return INT_UDN_FIREWALL;
257}
258
259static void enable_firewall_interrupts(struct hardwall_type *hwt)
164{ 260{
165 arch_local_irq_unmask_now(INT_UDN_FIREWALL); 261 arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
166} 262}
167 263
168static void disable_firewall_interrupts(void) 264static void disable_firewall_interrupts(struct hardwall_type *hwt)
169{ 265{
170 arch_local_irq_mask_now(INT_UDN_FIREWALL); 266 arch_local_irq_mask_now(xdn_which_interrupt(hwt));
171} 267}
172 268
173/* Set up hardwall on this cpu based on the passed hardwall_info. */ 269/* Set up hardwall on this cpu based on the passed hardwall_info. */
174static void hardwall_setup_ipi_func(void *info) 270static void hardwall_setup_func(void *info)
175{ 271{
176 struct hardwall_info *r = info; 272 struct hardwall_info *r = info;
273 struct hardwall_type *hwt = r->type;
274
177 int cpu = smp_processor_id(); 275 int cpu = smp_processor_id();
178 int x = cpu % smp_width; 276 int x = cpu % smp_width;
179 int y = cpu / smp_width; 277 int y = cpu / smp_width;
@@ -187,13 +285,12 @@ static void hardwall_setup_ipi_func(void *info)
187 if (y == r->ulhc_y + r->height - 1) 285 if (y == r->ulhc_y + r->height - 1)
188 bits |= S_PROTECT; 286 bits |= S_PROTECT;
189 BUG_ON(bits == 0); 287 BUG_ON(bits == 0);
190 __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits); 288 mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
191 enable_firewall_interrupts(); 289 enable_firewall_interrupts(hwt);
192
193} 290}
194 291
195/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ 292/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
196static void hardwall_setup(struct hardwall_info *r) 293static void hardwall_protect_rectangle(struct hardwall_info *r)
197{ 294{
198 int x, y, cpu, delta; 295 int x, y, cpu, delta;
199 struct cpumask rect_cpus; 296 struct cpumask rect_cpus;
@@ -217,37 +314,50 @@ static void hardwall_setup(struct hardwall_info *r)
217 } 314 }
218 315
219 /* Then tell all the cpus to set up their protection SPR */ 316 /* Then tell all the cpus to set up their protection SPR */
220 on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1); 317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
221} 318}
222 319
223void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) 320void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
224{ 321{
225 struct hardwall_info *rect; 322 struct hardwall_info *rect;
323 struct hardwall_type *hwt;
226 struct task_struct *p; 324 struct task_struct *p;
227 struct siginfo info; 325 struct siginfo info;
228 int x, y;
229 int cpu = smp_processor_id(); 326 int cpu = smp_processor_id();
230 int found_processes; 327 int found_processes;
231 unsigned long flags; 328 unsigned long flags;
232
233 struct pt_regs *old_regs = set_irq_regs(regs); 329 struct pt_regs *old_regs = set_irq_regs(regs);
330
234 irq_enter(); 331 irq_enter();
235 332
333 /* Figure out which network trapped. */
334 switch (fault_num) {
335#ifndef __tilepro__
336 case INT_IDN_FIREWALL:
337 hwt = &hardwall_types[HARDWALL_IDN];
338 break;
339#endif
340 case INT_UDN_FIREWALL:
341 hwt = &hardwall_types[HARDWALL_UDN];
342 break;
343 default:
344 BUG();
345 }
346 BUG_ON(hwt->disabled);
347
236 /* This tile trapped a network access; find the rectangle. */ 348 /* This tile trapped a network access; find the rectangle. */
237 x = cpu % smp_width; 349 spin_lock_irqsave(&hwt->lock, flags);
238 y = cpu / smp_width; 350 list_for_each_entry(rect, &hwt->list, list) {
239 spin_lock_irqsave(&hardwall_lock, flags); 351 if (cpumask_test_cpu(cpu, &rect->cpumask))
240 list_for_each_entry(rect, &rectangles, list) {
241 if (contains(rect, x, y))
242 break; 352 break;
243 } 353 }
244 354
245 /* 355 /*
246 * It shouldn't be possible not to find this cpu on the 356 * It shouldn't be possible not to find this cpu on the
247 * rectangle list, since only cpus in rectangles get hardwalled. 357 * rectangle list, since only cpus in rectangles get hardwalled.
248 * The hardwall is only removed after the UDN is drained. 358 * The hardwall is only removed after the user network is drained.
249 */ 359 */
250 BUG_ON(&rect->list == &rectangles); 360 BUG_ON(&rect->list == &hwt->list);
251 361
252 /* 362 /*
253 * If we already started teardown on this hardwall, don't worry; 363 * If we already started teardown on this hardwall, don't worry;
@@ -255,30 +365,32 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
255 * to quiesce. 365 * to quiesce.
256 */ 366 */
257 if (rect->teardown_in_progress) { 367 if (rect->teardown_in_progress) {
258 pr_notice("cpu %d: detected hardwall violation %#lx" 368 pr_notice("cpu %d: detected %s hardwall violation %#lx"
259 " while teardown already in progress\n", 369 " while teardown already in progress\n",
260 cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); 370 cpu, hwt->name,
371 (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
261 goto done; 372 goto done;
262 } 373 }
263 374
264 /* 375 /*
265 * Kill off any process that is activated in this rectangle. 376 * Kill off any process that is activated in this rectangle.
266 * We bypass security to deliver the signal, since it must be 377 * We bypass security to deliver the signal, since it must be
267 * one of the activated processes that generated the UDN 378 * one of the activated processes that generated the user network
268 * message that caused this trap, and all the activated 379 * message that caused this trap, and all the activated
269 * processes shared a single open file so are pretty tightly 380 * processes shared a single open file so are pretty tightly
270 * bound together from a security point of view to begin with. 381 * bound together from a security point of view to begin with.
271 */ 382 */
272 rect->teardown_in_progress = 1; 383 rect->teardown_in_progress = 1;
273 wmb(); /* Ensure visibility of rectangle before notifying processes. */ 384 wmb(); /* Ensure visibility of rectangle before notifying processes. */
274 pr_notice("cpu %d: detected hardwall violation %#lx...\n", 385 pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
275 cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); 386 cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
276 info.si_signo = SIGILL; 387 info.si_signo = SIGILL;
277 info.si_errno = 0; 388 info.si_errno = 0;
278 info.si_code = ILL_HARDWALL; 389 info.si_code = ILL_HARDWALL;
279 found_processes = 0; 390 found_processes = 0;
280 list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { 391 list_for_each_entry(p, &rect->task_head,
281 BUG_ON(p->thread.hardwall != rect); 392 thread.hardwall[hwt->index].list) {
393 BUG_ON(p->thread.hardwall[hwt->index].info != rect);
282 if (!(p->flags & PF_EXITING)) { 394 if (!(p->flags & PF_EXITING)) {
283 found_processes = 1; 395 found_processes = 1;
284 pr_notice("hardwall: killing %d\n", p->pid); 396 pr_notice("hardwall: killing %d\n", p->pid);
@@ -289,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
289 pr_notice("hardwall: no associated processes!\n"); 401 pr_notice("hardwall: no associated processes!\n");
290 402
291 done: 403 done:
292 spin_unlock_irqrestore(&hardwall_lock, flags); 404 spin_unlock_irqrestore(&hwt->lock, flags);
293 405
294 /* 406 /*
295 * We have to disable firewall interrupts now, or else when we 407 * We have to disable firewall interrupts now, or else when we
@@ -298,48 +410,87 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
298 * haven't yet drained the network, and that would allow packets 410 * haven't yet drained the network, and that would allow packets
299 * to cross out of the hardwall region. 411 * to cross out of the hardwall region.
300 */ 412 */
301 disable_firewall_interrupts(); 413 disable_firewall_interrupts(hwt);
302 414
303 irq_exit(); 415 irq_exit();
304 set_irq_regs(old_regs); 416 set_irq_regs(old_regs);
305} 417}
306 418
307/* Allow access from user space to the UDN. */ 419/* Allow access from user space to the user network. */
308void grant_network_mpls(void) 420void grant_hardwall_mpls(struct hardwall_type *hwt)
309{ 421{
310 __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1); 422#ifndef __tilepro__
311 __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1); 423 if (!hwt->is_xdn) {
312 __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1); 424 __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
313 __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1); 425 return;
426 }
427#endif
428 mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
429 mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
430 mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
431 mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
314#if !CHIP_HAS_REV1_XDN() 432#if !CHIP_HAS_REV1_XDN()
315 __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1); 433 mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
316 __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1); 434 mtspr_MPL_XDN(hwt, CA_SET_0, 1);
317#endif 435#endif
318} 436}
319 437
320/* Deny access from user space to the UDN. */ 438/* Deny access from user space to the user network. */
321void restrict_network_mpls(void) 439void restrict_hardwall_mpls(struct hardwall_type *hwt)
322{ 440{
323 __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1); 441#ifndef __tilepro__
324 __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1); 442 if (!hwt->is_xdn) {
325 __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1); 443 __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
326 __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1); 444 return;
445 }
446#endif
447 mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
448 mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
449 mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
450 mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
327#if !CHIP_HAS_REV1_XDN() 451#if !CHIP_HAS_REV1_XDN()
328 __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1); 452 mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
329 __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1); 453 mtspr_MPL_XDN(hwt, CA_SET_1, 1);
330#endif 454#endif
331} 455}
332 456
457/* Restrict or deny as necessary for the task we're switching to. */
458void hardwall_switch_tasks(struct task_struct *prev,
459 struct task_struct *next)
460{
461 int i;
462 for (i = 0; i < HARDWALL_TYPES; ++i) {
463 if (prev->thread.hardwall[i].info != NULL) {
464 if (next->thread.hardwall[i].info == NULL)
465 restrict_hardwall_mpls(&hardwall_types[i]);
466 } else if (next->thread.hardwall[i].info != NULL) {
467 grant_hardwall_mpls(&hardwall_types[i]);
468 }
469 }
470}
471
472/* Does this task have the right to IPI the given cpu? */
473int hardwall_ipi_valid(int cpu)
474{
475#ifdef __tilegx__
476 struct hardwall_info *info =
477 current->thread.hardwall[HARDWALL_IPI].info;
478 return info && cpumask_test_cpu(cpu, &info->cpumask);
479#else
480 return 0;
481#endif
482}
333 483
334/* 484/*
335 * Code to create, activate, deactivate, and destroy hardwall rectangles. 485 * Code to create, activate, deactivate, and destroy hardwall resources.
336 */ 486 */
337 487
338/* Create a hardwall for the given rectangle */ 488/* Create a hardwall for the given resource */
339static struct hardwall_info *hardwall_create( 489static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
340 size_t size, const unsigned char __user *bits) 490 size_t size,
491 const unsigned char __user *bits)
341{ 492{
342 struct hardwall_info *iter, *rect; 493 struct hardwall_info *iter, *info;
343 struct cpumask mask; 494 struct cpumask mask;
344 unsigned long flags; 495 unsigned long flags;
345 int rc; 496 int rc;
@@ -370,55 +521,62 @@ static struct hardwall_info *hardwall_create(
370 } 521 }
371 } 522 }
372 523
373 /* Allocate a new rectangle optimistically. */ 524 /* Allocate a new hardwall_info optimistically. */
374 rect = kmalloc(sizeof(struct hardwall_info), 525 info = kmalloc(sizeof(struct hardwall_info),
375 GFP_KERNEL | __GFP_ZERO); 526 GFP_KERNEL | __GFP_ZERO);
376 if (rect == NULL) 527 if (info == NULL)
377 return ERR_PTR(-ENOMEM); 528 return ERR_PTR(-ENOMEM);
378 INIT_LIST_HEAD(&rect->task_head); 529 INIT_LIST_HEAD(&info->task_head);
530 info->type = hwt;
379 531
380 /* Compute the rectangle size and validate that it's plausible. */ 532 /* Compute the rectangle size and validate that it's plausible. */
381 rc = setup_rectangle(rect, &mask); 533 cpumask_copy(&info->cpumask, &mask);
382 if (rc != 0) { 534 info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
383 kfree(rect); 535 if (hwt->is_xdn) {
384 return ERR_PTR(rc); 536 rc = check_rectangle(info, &mask);
537 if (rc != 0) {
538 kfree(info);
539 return ERR_PTR(rc);
540 }
385 } 541 }
386 542
387 /* Confirm it doesn't overlap and add it to the list. */ 543 /* Confirm it doesn't overlap and add it to the list. */
388 spin_lock_irqsave(&hardwall_lock, flags); 544 spin_lock_irqsave(&hwt->lock, flags);
389 list_for_each_entry(iter, &rectangles, list) { 545 list_for_each_entry(iter, &hwt->list, list) {
390 if (overlaps(iter, rect)) { 546 if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
391 spin_unlock_irqrestore(&hardwall_lock, flags); 547 spin_unlock_irqrestore(&hwt->lock, flags);
392 kfree(rect); 548 kfree(info);
393 return ERR_PTR(-EBUSY); 549 return ERR_PTR(-EBUSY);
394 } 550 }
395 } 551 }
396 list_add_tail(&rect->list, &rectangles); 552 list_add_tail(&info->list, &hwt->list);
397 spin_unlock_irqrestore(&hardwall_lock, flags); 553 spin_unlock_irqrestore(&hwt->lock, flags);
398 554
399 /* Set up appropriate hardwalling on all affected cpus. */ 555 /* Set up appropriate hardwalling on all affected cpus. */
400 hardwall_setup(rect); 556 if (hwt->is_xdn)
557 hardwall_protect_rectangle(info);
401 558
402 /* Create a /proc/tile/hardwall entry. */ 559 /* Create a /proc/tile/hardwall entry. */
403 hardwall_add_proc(rect); 560 hardwall_add_proc(info);
404 561
405 return rect; 562 return info;
406} 563}
407 564
408/* Activate a given hardwall on this cpu for this process. */ 565/* Activate a given hardwall on this cpu for this process. */
409static int hardwall_activate(struct hardwall_info *rect) 566static int hardwall_activate(struct hardwall_info *info)
410{ 567{
411 int cpu, x, y; 568 int cpu;
412 unsigned long flags; 569 unsigned long flags;
413 struct task_struct *p = current; 570 struct task_struct *p = current;
414 struct thread_struct *ts = &p->thread; 571 struct thread_struct *ts = &p->thread;
572 struct hardwall_type *hwt;
415 573
416 /* Require a rectangle. */ 574 /* Require a hardwall. */
417 if (rect == NULL) 575 if (info == NULL)
418 return -ENODATA; 576 return -ENODATA;
419 577
420 /* Not allowed to activate a rectangle that is being torn down. */ 578 /* Not allowed to activate a hardwall that is being torn down. */
421 if (rect->teardown_in_progress) 579 if (info->teardown_in_progress)
422 return -EINVAL; 580 return -EINVAL;
423 581
424 /* 582 /*
@@ -428,78 +586,87 @@ static int hardwall_activate(struct hardwall_info *rect)
428 if (cpumask_weight(&p->cpus_allowed) != 1) 586 if (cpumask_weight(&p->cpus_allowed) != 1)
429 return -EPERM; 587 return -EPERM;
430 588
431 /* Make sure we are bound to a cpu in this rectangle. */ 589 /* Make sure we are bound to a cpu assigned to this resource. */
432 cpu = smp_processor_id(); 590 cpu = smp_processor_id();
433 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); 591 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
434 x = cpu_x(cpu); 592 if (!cpumask_test_cpu(cpu, &info->cpumask))
435 y = cpu_y(cpu);
436 if (!contains(rect, x, y))
437 return -EINVAL; 593 return -EINVAL;
438 594
439 /* If we are already bound to this hardwall, it's a no-op. */ 595 /* If we are already bound to this hardwall, it's a no-op. */
440 if (ts->hardwall) { 596 hwt = info->type;
441 BUG_ON(ts->hardwall != rect); 597 if (ts->hardwall[hwt->index].info) {
598 BUG_ON(ts->hardwall[hwt->index].info != info);
442 return 0; 599 return 0;
443 } 600 }
444 601
445 /* Success! This process gets to use the user networks on this cpu. */ 602 /* Success! This process gets to use the resource on this cpu. */
446 ts->hardwall = rect; 603 ts->hardwall[hwt->index].info = info;
447 spin_lock_irqsave(&hardwall_lock, flags); 604 spin_lock_irqsave(&hwt->lock, flags);
448 list_add(&ts->hardwall_list, &rect->task_head); 605 list_add(&ts->hardwall[hwt->index].list, &info->task_head);
449 spin_unlock_irqrestore(&hardwall_lock, flags); 606 spin_unlock_irqrestore(&hwt->lock, flags);
450 grant_network_mpls(); 607 grant_hardwall_mpls(hwt);
451 printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n", 608 printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
452 p->pid, p->comm, cpu); 609 p->pid, p->comm, hwt->name, cpu);
453 return 0; 610 return 0;
454} 611}
455 612
456/* 613/*
457 * Deactivate a task's hardwall. Must hold hardwall_lock. 614 * Deactivate a task's hardwall. Must hold lock for hardwall_type.
458 * This method may be called from free_task(), so we don't want to 615 * This method may be called from free_task(), so we don't want to
459 * rely on too many fields of struct task_struct still being valid. 616 * rely on too many fields of struct task_struct still being valid.
460 * We assume the cpus_allowed, pid, and comm fields are still valid. 617 * We assume the cpus_allowed, pid, and comm fields are still valid.
461 */ 618 */
462static void _hardwall_deactivate(struct task_struct *task) 619static void _hardwall_deactivate(struct hardwall_type *hwt,
620 struct task_struct *task)
463{ 621{
464 struct thread_struct *ts = &task->thread; 622 struct thread_struct *ts = &task->thread;
465 623
466 if (cpumask_weight(&task->cpus_allowed) != 1) { 624 if (cpumask_weight(&task->cpus_allowed) != 1) {
467 pr_err("pid %d (%s) releasing networks with" 625 pr_err("pid %d (%s) releasing %s hardwall with"
468 " an affinity mask containing %d cpus!\n", 626 " an affinity mask containing %d cpus!\n",
469 task->pid, task->comm, 627 task->pid, task->comm, hwt->name,
470 cpumask_weight(&task->cpus_allowed)); 628 cpumask_weight(&task->cpus_allowed));
471 BUG(); 629 BUG();
472 } 630 }
473 631
474 BUG_ON(ts->hardwall == NULL); 632 BUG_ON(ts->hardwall[hwt->index].info == NULL);
475 ts->hardwall = NULL; 633 ts->hardwall[hwt->index].info = NULL;
476 list_del(&ts->hardwall_list); 634 list_del(&ts->hardwall[hwt->index].list);
477 if (task == current) 635 if (task == current)
478 restrict_network_mpls(); 636 restrict_hardwall_mpls(hwt);
479} 637}
480 638
481/* Deactivate a task's hardwall. */ 639/* Deactivate a task's hardwall. */
482int hardwall_deactivate(struct task_struct *task) 640static int hardwall_deactivate(struct hardwall_type *hwt,
641 struct task_struct *task)
483{ 642{
484 unsigned long flags; 643 unsigned long flags;
485 int activated; 644 int activated;
486 645
487 spin_lock_irqsave(&hardwall_lock, flags); 646 spin_lock_irqsave(&hwt->lock, flags);
488 activated = (task->thread.hardwall != NULL); 647 activated = (task->thread.hardwall[hwt->index].info != NULL);
489 if (activated) 648 if (activated)
490 _hardwall_deactivate(task); 649 _hardwall_deactivate(hwt, task);
491 spin_unlock_irqrestore(&hardwall_lock, flags); 650 spin_unlock_irqrestore(&hwt->lock, flags);
492 651
493 if (!activated) 652 if (!activated)
494 return -EINVAL; 653 return -EINVAL;
495 654
496 printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n", 655 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
497 task->pid, task->comm, smp_processor_id()); 656 task->pid, task->comm, hwt->name, smp_processor_id());
498 return 0; 657 return 0;
499} 658}
500 659
501/* Stop a UDN switch before draining the network. */ 660void hardwall_deactivate_all(struct task_struct *task)
502static void stop_udn_switch(void *ignored) 661{
662 int i;
663 for (i = 0; i < HARDWALL_TYPES; ++i)
664 if (task->thread.hardwall[i].info)
665 hardwall_deactivate(&hardwall_types[i], task);
666}
667
668/* Stop the switch before draining the network. */
669static void stop_xdn_switch(void *arg)
503{ 670{
504#if !CHIP_HAS_REV1_XDN() 671#if !CHIP_HAS_REV1_XDN()
505 /* Freeze the switch and the demux. */ 672 /* Freeze the switch and the demux. */
@@ -507,13 +674,71 @@ static void stop_udn_switch(void *ignored)
507 SPR_UDN_SP_FREEZE__SP_FRZ_MASK | 674 SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
508 SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | 675 SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
509 SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); 676 SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
677#else
678 /*
679 * Drop all packets bound for the core or off the edge.
680 * We rely on the normal hardwall protection setup code
681 * to have set the low four bits to trigger firewall interrupts,
682 * and shift those bits up to trigger "drop on send" semantics,
683 * plus adding "drop on send to core" for all switches.
684 * In practice it seems the switches latch the DIRECTION_PROTECT
685 * SPR so they won't start dropping if they're already
686 * delivering the last message to the core, but it doesn't
687 * hurt to enable it here.
688 */
689 struct hardwall_type *hwt = arg;
690 unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
691 mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
510#endif 692#endif
511} 693}
512 694
695static void empty_xdn_demuxes(struct hardwall_type *hwt)
696{
697#ifndef __tilepro__
698 if (hwt->is_idn) {
699 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
700 (void) __tile_idn0_receive();
701 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
702 (void) __tile_idn1_receive();
703 return;
704 }
705#endif
706 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
707 (void) __tile_udn0_receive();
708 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
709 (void) __tile_udn1_receive();
710 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
711 (void) __tile_udn2_receive();
712 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
713 (void) __tile_udn3_receive();
714}
715
513/* Drain all the state from a stopped switch. */ 716/* Drain all the state from a stopped switch. */
514static void drain_udn_switch(void *ignored) 717static void drain_xdn_switch(void *arg)
515{ 718{
516#if !CHIP_HAS_REV1_XDN() 719 struct hardwall_info *info = arg;
720 struct hardwall_type *hwt = info->type;
721
722#if CHIP_HAS_REV1_XDN()
723 /*
724 * The switches have been configured to drop any messages
725 * destined for cores (or off the edge of the rectangle).
726 * But the current message may continue to be delivered,
727 * so we wait until all the cores have finished any pending
728 * messages before we stop draining.
729 */
730 int pending = mfspr_XDN(hwt, PENDING);
731 while (pending--) {
732 empty_xdn_demuxes(hwt);
733 if (hwt->is_idn)
734 __tile_idn_send(0);
735 else
736 __tile_udn_send(0);
737 }
738 atomic_dec(&info->xdn_pending_count);
739 while (atomic_read(&info->xdn_pending_count))
740 empty_xdn_demuxes(hwt);
741#else
517 int i; 742 int i;
518 int from_tile_words, ca_count; 743 int from_tile_words, ca_count;
519 744
@@ -533,15 +758,7 @@ static void drain_udn_switch(void *ignored)
533 (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); 758 (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
534 759
535 /* Empty out demuxes. */ 760 /* Empty out demuxes. */
536 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) 761 empty_xdn_demuxes(hwt);
537 (void) __tile_udn0_receive();
538 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
539 (void) __tile_udn1_receive();
540 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
541 (void) __tile_udn2_receive();
542 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
543 (void) __tile_udn3_receive();
544 BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
545 762
546 /* Empty out catch all. */ 763 /* Empty out catch all. */
547 ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); 764 ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
@@ -563,21 +780,25 @@ static void drain_udn_switch(void *ignored)
563#endif 780#endif
564} 781}
565 782
566/* Reset random UDN state registers at boot up and during hardwall teardown. */ 783/* Reset random XDN state registers at boot up and during hardwall teardown. */
567void reset_network_state(void) 784static void reset_xdn_network_state(struct hardwall_type *hwt)
568{ 785{
569#if !CHIP_HAS_REV1_XDN() 786 if (hwt->disabled)
570 /* Reset UDN coordinates to their standard value */
571 unsigned int cpu = smp_processor_id();
572 unsigned int x = cpu % smp_width;
573 unsigned int y = cpu / smp_width;
574#endif
575
576 if (udn_disabled)
577 return; 787 return;
578 788
789 /* Clear out other random registers so we have a clean slate. */
790 mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
791 mtspr_XDN(hwt, AVAIL_EN, 0);
792 mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
793
579#if !CHIP_HAS_REV1_XDN() 794#if !CHIP_HAS_REV1_XDN()
580 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); 795 /* Reset UDN coordinates to their standard value */
796 {
797 unsigned int cpu = smp_processor_id();
798 unsigned int x = cpu % smp_width;
799 unsigned int y = cpu / smp_width;
800 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
801 }
581 802
582 /* Set demux tags to predefined values and enable them. */ 803 /* Set demux tags to predefined values and enable them. */
583 __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); 804 __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
@@ -585,56 +806,50 @@ void reset_network_state(void)
585 __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); 806 __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
586 __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); 807 __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
587 __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); 808 __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
588#endif
589 809
590 /* Clear out other random registers so we have a clean slate. */ 810 /* Set other rev0 random registers to a clean state. */
591 __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
592 __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
593#if !CHIP_HAS_REV1_XDN()
594 __insn_mtspr(SPR_UDN_REFILL_EN, 0); 811 __insn_mtspr(SPR_UDN_REFILL_EN, 0);
595 __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); 812 __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
596 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); 813 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
597#endif
598 814
599 /* Start the switch and demux. */ 815 /* Start the switch and demux. */
600#if !CHIP_HAS_REV1_XDN()
601 __insn_mtspr(SPR_UDN_SP_FREEZE, 0); 816 __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
602#endif 817#endif
603} 818}
604 819
605/* Restart a UDN switch after draining. */ 820void reset_network_state(void)
606static void restart_udn_switch(void *ignored)
607{ 821{
608 reset_network_state(); 822 reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
609 823#ifndef __tilepro__
610 /* Disable firewall interrupts. */ 824 reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
611 __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0); 825#endif
612 disable_firewall_interrupts();
613} 826}
614 827
615/* Build a struct cpumask containing all valid tiles in bounding rectangle. */ 828/* Restart an XDN switch after draining. */
616static void fill_mask(struct hardwall_info *r, struct cpumask *result) 829static void restart_xdn_switch(void *arg)
617{ 830{
618 int x, y, cpu; 831 struct hardwall_type *hwt = arg;
619 832
620 cpumask_clear(result); 833#if CHIP_HAS_REV1_XDN()
834 /* One last drain step to avoid races with injection and draining. */
835 empty_xdn_demuxes(hwt);
836#endif
621 837
622 cpu = r->ulhc_y * smp_width + r->ulhc_x; 838 reset_xdn_network_state(hwt);
623 for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) { 839
624 for (x = 0; x < r->width; ++x, ++cpu) 840 /* Disable firewall interrupts. */
625 cpu_online_set(cpu, result); 841 disable_firewall_interrupts(hwt);
626 }
627} 842}
628 843
629/* Last reference to a hardwall is gone, so clear the network. */ 844/* Last reference to a hardwall is gone, so clear the network. */
630static void hardwall_destroy(struct hardwall_info *rect) 845static void hardwall_destroy(struct hardwall_info *info)
631{ 846{
632 struct task_struct *task; 847 struct task_struct *task;
848 struct hardwall_type *hwt;
633 unsigned long flags; 849 unsigned long flags;
634 struct cpumask mask;
635 850
636 /* Make sure this file actually represents a rectangle. */ 851 /* Make sure this file actually represents a hardwall. */
637 if (rect == NULL) 852 if (info == NULL)
638 return; 853 return;
639 854
640 /* 855 /*
@@ -644,39 +859,53 @@ static void hardwall_destroy(struct hardwall_info *rect)
644 * deactivate any remaining tasks before freeing the 859 * deactivate any remaining tasks before freeing the
645 * hardwall_info object itself. 860 * hardwall_info object itself.
646 */ 861 */
647 spin_lock_irqsave(&hardwall_lock, flags); 862 hwt = info->type;
648 list_for_each_entry(task, &rect->task_head, thread.hardwall_list) 863 info->teardown_in_progress = 1;
649 _hardwall_deactivate(task); 864 spin_lock_irqsave(&hwt->lock, flags);
650 spin_unlock_irqrestore(&hardwall_lock, flags); 865 list_for_each_entry(task, &info->task_head,
651 866 thread.hardwall[hwt->index].list)
652 /* Drain the UDN. */ 867 _hardwall_deactivate(hwt, task);
653 printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n", 868 spin_unlock_irqrestore(&hwt->lock, flags);
654 rect->width, rect->height, rect->ulhc_x, rect->ulhc_y); 869
655 fill_mask(rect, &mask); 870 if (hwt->is_xdn) {
656 on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1); 871 /* Configure the switches for draining the user network. */
657 on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1); 872 printk(KERN_DEBUG
873 "Clearing %s hardwall rectangle %dx%d %d,%d\n",
874 hwt->name, info->width, info->height,
875 info->ulhc_x, info->ulhc_y);
876 on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
877
878 /* Drain the network. */
879#if CHIP_HAS_REV1_XDN()
880 atomic_set(&info->xdn_pending_count,
881 cpumask_weight(&info->cpumask));
882 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
883#else
884 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
885#endif
658 886
659 /* Restart switch and disable firewall. */ 887 /* Restart switch and disable firewall. */
660 on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1); 888 on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
889 }
661 890
662 /* Remove the /proc/tile/hardwall entry. */ 891 /* Remove the /proc/tile/hardwall entry. */
663 hardwall_remove_proc(rect); 892 hardwall_remove_proc(info);
664 893
665 /* Now free the rectangle from the list. */ 894 /* Now free the hardwall from the list. */
666 spin_lock_irqsave(&hardwall_lock, flags); 895 spin_lock_irqsave(&hwt->lock, flags);
667 BUG_ON(!list_empty(&rect->task_head)); 896 BUG_ON(!list_empty(&info->task_head));
668 list_del(&rect->list); 897 list_del(&info->list);
669 spin_unlock_irqrestore(&hardwall_lock, flags); 898 spin_unlock_irqrestore(&hwt->lock, flags);
670 kfree(rect); 899 kfree(info);
671} 900}
672 901
673 902
674static int hardwall_proc_show(struct seq_file *sf, void *v) 903static int hardwall_proc_show(struct seq_file *sf, void *v)
675{ 904{
676 struct hardwall_info *rect = sf->private; 905 struct hardwall_info *info = sf->private;
677 char buf[256]; 906 char buf[256];
678 907
679 int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask); 908 int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
680 buf[rc++] = '\n'; 909 buf[rc++] = '\n';
681 seq_write(sf, buf, rc); 910 seq_write(sf, buf, rc);
682 return 0; 911 return 0;
@@ -695,31 +924,45 @@ static const struct file_operations hardwall_proc_fops = {
695 .release = single_release, 924 .release = single_release,
696}; 925};
697 926
698static void hardwall_add_proc(struct hardwall_info *rect) 927static void hardwall_add_proc(struct hardwall_info *info)
699{ 928{
700 char buf[64]; 929 char buf[64];
701 snprintf(buf, sizeof(buf), "%d", rect->id); 930 snprintf(buf, sizeof(buf), "%d", info->id);
702 proc_create_data(buf, 0444, hardwall_proc_dir, 931 proc_create_data(buf, 0444, info->type->proc_dir,
703 &hardwall_proc_fops, rect); 932 &hardwall_proc_fops, info);
704} 933}
705 934
706static void hardwall_remove_proc(struct hardwall_info *rect) 935static void hardwall_remove_proc(struct hardwall_info *info)
707{ 936{
708 char buf[64]; 937 char buf[64];
709 snprintf(buf, sizeof(buf), "%d", rect->id); 938 snprintf(buf, sizeof(buf), "%d", info->id);
710 remove_proc_entry(buf, hardwall_proc_dir); 939 remove_proc_entry(buf, info->type->proc_dir);
711} 940}
712 941
713int proc_pid_hardwall(struct task_struct *task, char *buffer) 942int proc_pid_hardwall(struct task_struct *task, char *buffer)
714{ 943{
715 struct hardwall_info *rect = task->thread.hardwall; 944 int i;
716 return rect ? sprintf(buffer, "%d\n", rect->id) : 0; 945 int n = 0;
946 for (i = 0; i < HARDWALL_TYPES; ++i) {
947 struct hardwall_info *info = task->thread.hardwall[i].info;
948 if (info)
949 n += sprintf(&buffer[n], "%s: %d\n",
950 info->type->name, info->id);
951 }
952 return n;
717} 953}
718 954
719void proc_tile_hardwall_init(struct proc_dir_entry *root) 955void proc_tile_hardwall_init(struct proc_dir_entry *root)
720{ 956{
721 if (!udn_disabled) 957 int i;
722 hardwall_proc_dir = proc_mkdir("hardwall", root); 958 for (i = 0; i < HARDWALL_TYPES; ++i) {
959 struct hardwall_type *hwt = &hardwall_types[i];
960 if (hwt->disabled)
961 continue;
962 if (hardwall_proc_dir == NULL)
963 hardwall_proc_dir = proc_mkdir("hardwall", root);
964 hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
965 }
723} 966}
724 967
725 968
@@ -729,34 +972,45 @@ void proc_tile_hardwall_init(struct proc_dir_entry *root)
729 972
730static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) 973static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
731{ 974{
732 struct hardwall_info *rect = file->private_data; 975 struct hardwall_info *info = file->private_data;
976 int minor = iminor(file->f_mapping->host);
977 struct hardwall_type* hwt;
733 978
734 if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) 979 if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
735 return -EINVAL; 980 return -EINVAL;
736 981
982 BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
983 BUILD_BUG_ON(HARDWALL_TYPES !=
984 sizeof(hardwall_types)/sizeof(hardwall_types[0]));
985
986 if (minor < 0 || minor >= HARDWALL_TYPES)
987 return -EINVAL;
988 hwt = &hardwall_types[minor];
989 WARN_ON(info && hwt != info->type);
990
737 switch (_IOC_NR(a)) { 991 switch (_IOC_NR(a)) {
738 case _HARDWALL_CREATE: 992 case _HARDWALL_CREATE:
739 if (udn_disabled) 993 if (hwt->disabled)
740 return -ENOSYS; 994 return -ENOSYS;
741 if (rect != NULL) 995 if (info != NULL)
742 return -EALREADY; 996 return -EALREADY;
743 rect = hardwall_create(_IOC_SIZE(a), 997 info = hardwall_create(hwt, _IOC_SIZE(a),
744 (const unsigned char __user *)b); 998 (const unsigned char __user *)b);
745 if (IS_ERR(rect)) 999 if (IS_ERR(info))
746 return PTR_ERR(rect); 1000 return PTR_ERR(info);
747 file->private_data = rect; 1001 file->private_data = info;
748 return 0; 1002 return 0;
749 1003
750 case _HARDWALL_ACTIVATE: 1004 case _HARDWALL_ACTIVATE:
751 return hardwall_activate(rect); 1005 return hardwall_activate(info);
752 1006
753 case _HARDWALL_DEACTIVATE: 1007 case _HARDWALL_DEACTIVATE:
754 if (current->thread.hardwall != rect) 1008 if (current->thread.hardwall[hwt->index].info != info)
755 return -EINVAL; 1009 return -EINVAL;
756 return hardwall_deactivate(current); 1010 return hardwall_deactivate(hwt, current);
757 1011
758 case _HARDWALL_GET_ID: 1012 case _HARDWALL_GET_ID:
759 return rect ? rect->id : -EINVAL; 1013 return info ? info->id : -EINVAL;
760 1014
761 default: 1015 default:
762 return -EINVAL; 1016 return -EINVAL;
@@ -775,26 +1029,28 @@ static long hardwall_compat_ioctl(struct file *file,
775/* The user process closed the file; revoke access to user networks. */ 1029/* The user process closed the file; revoke access to user networks. */
776static int hardwall_flush(struct file *file, fl_owner_t owner) 1030static int hardwall_flush(struct file *file, fl_owner_t owner)
777{ 1031{
778 struct hardwall_info *rect = file->private_data; 1032 struct hardwall_info *info = file->private_data;
779 struct task_struct *task, *tmp; 1033 struct task_struct *task, *tmp;
780 unsigned long flags; 1034 unsigned long flags;
781 1035
782 if (rect) { 1036 if (info) {
783 /* 1037 /*
784 * NOTE: if multiple threads are activated on this hardwall 1038 * NOTE: if multiple threads are activated on this hardwall
785 * file, the other threads will continue having access to the 1039 * file, the other threads will continue having access to the
786 * UDN until they are context-switched out and back in again. 1040 * user network until they are context-switched out and back
1041 * in again.
787 * 1042 *
788 * NOTE: A NULL files pointer means the task is being torn 1043 * NOTE: A NULL files pointer means the task is being torn
789 * down, so in that case we also deactivate it. 1044 * down, so in that case we also deactivate it.
790 */ 1045 */
791 spin_lock_irqsave(&hardwall_lock, flags); 1046 struct hardwall_type *hwt = info->type;
792 list_for_each_entry_safe(task, tmp, &rect->task_head, 1047 spin_lock_irqsave(&hwt->lock, flags);
793 thread.hardwall_list) { 1048 list_for_each_entry_safe(task, tmp, &info->task_head,
1049 thread.hardwall[hwt->index].list) {
794 if (task->files == owner || task->files == NULL) 1050 if (task->files == owner || task->files == NULL)
795 _hardwall_deactivate(task); 1051 _hardwall_deactivate(hwt, task);
796 } 1052 }
797 spin_unlock_irqrestore(&hardwall_lock, flags); 1053 spin_unlock_irqrestore(&hwt->lock, flags);
798 } 1054 }
799 1055
800 return 0; 1056 return 0;
@@ -824,11 +1080,11 @@ static int __init dev_hardwall_init(void)
824 int rc; 1080 int rc;
825 dev_t dev; 1081 dev_t dev;
826 1082
827 rc = alloc_chrdev_region(&dev, 0, 1, "hardwall"); 1083 rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
828 if (rc < 0) 1084 if (rc < 0)
829 return rc; 1085 return rc;
830 cdev_init(&hardwall_dev, &dev_hardwall_fops); 1086 cdev_init(&hardwall_dev, &dev_hardwall_fops);
831 rc = cdev_add(&hardwall_dev, dev, 1); 1087 rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
832 if (rc < 0) 1088 if (rc < 0)
833 return rc; 1089 return rc;
834 1090
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 1a39b7c1c87e..f71bfeeaf1a9 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -69,7 +69,7 @@ ENTRY(_start)
69 } 69 }
70 { 70 {
71 moveli lr, lo16(1f) 71 moveli lr, lo16(1f)
72 move r5, zero 72 moveli r5, CTX_PAGE_FLAG
73 } 73 }
74 { 74 {
75 auli lr, lr, ha16(1f) 75 auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
141 141
142 .macro PTE va, cpa, bits1, no_org=0 142 .macro PTE va, cpa, bits1, no_org=0
143 .ifeq \no_org 143 .ifeq \no_org
144 .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE 144 .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
145 .endif 145 .endif
146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32)) 148 .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
149 .endm 149 .endm
150 150
151__PAGE_ALIGNED_DATA 151__PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
169 .org swapper_pg_dir + HV_L1_SIZE 169 .org swapper_pg_dir + PGDIR_SIZE
170 END(swapper_pg_dir) 170 END(swapper_pg_dir)
171 171
172 /* 172 /*
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6bc3a932fe45..f9a2734f7b82 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -114,7 +114,7 @@ ENTRY(_start)
114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET) 114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
115 } 115 }
116 { 116 {
117 move r3, zero 117 moveli r3, CTX_PAGE_FLAG
118 j hv_install_context 118 j hv_install_context
119 } 119 }
1201: 1201:
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
210 .macro PTE cpa, bits1 210 .macro PTE cpa, bits1
211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\ 211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\ 212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
213 (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) 213 (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
214 .endm 214 .endm
215 215
216__PAGE_ALIGNED_DATA 216__PAGE_ALIGNED_DATA
217 .align PAGE_SIZE 217 .align PAGE_SIZE
218ENTRY(swapper_pg_dir) 218ENTRY(swapper_pg_dir)
219 .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE 219 .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
220.Lsv_data_pmd: 220.Lsv_data_pmd:
221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */ 221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
222 .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE 222 .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
223.Lsv_code_pmd: 223.Lsv_code_pmd:
224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */ 224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
225 .org swapper_pg_dir + HV_L0_SIZE 225 .org swapper_pg_dir + SIZEOF_PGD
226 END(swapper_pg_dir) 226 END(swapper_pg_dir)
227 227
228 .align HV_PAGE_TABLE_ALIGN 228 .align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
233 * permissions later. 233 * permissions later.
234 */ 234 */
235 .set addr, 0 235 .set addr, 0
236 .rept HV_L1_ENTRIES 236 .rept PTRS_PER_PMD
237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE 237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
238 .set addr, addr + HV_PAGE_SIZE_LARGE 238 .set addr, addr + HPAGE_SIZE
239 .endr 239 .endr
240 .org temp_data_pmd + HV_L1_SIZE 240 .org temp_data_pmd + SIZEOF_PMD
241 END(temp_data_pmd) 241 END(temp_data_pmd)
242 242
243 .align HV_PAGE_TABLE_ALIGN 243 .align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
248 * permissions later. 248 * permissions later.
249 */ 249 */
250 .set addr, 0 250 .set addr, 0
251 .rept HV_L1_ENTRIES 251 .rept PTRS_PER_PMD
252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE 252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
253 .set addr, addr + HV_PAGE_SIZE_LARGE 253 .set addr, addr + HPAGE_SIZE
254 .endr 254 .endr
255 .org temp_code_pmd + HV_L1_SIZE 255 .org temp_code_pmd + SIZEOF_PMD
256 END(temp_code_pmd) 256 END(temp_code_pmd)
257 257
258 /* 258 /*
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
index 2b7cd0a659a9..d44c5a67a1ed 100644
--- a/arch/tile/kernel/hvglue.lds
+++ b/arch/tile/kernel/hvglue.lds
@@ -55,4 +55,5 @@ hv_store_mapping = TEXT_OFFSET + 0x106a0;
55hv_inquire_realpa = TEXT_OFFSET + 0x106c0; 55hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
56hv_flush_all = TEXT_OFFSET + 0x106e0; 56hv_flush_all = TEXT_OFFSET + 0x106e0;
57hv_get_ipi_pte = TEXT_OFFSET + 0x10700; 57hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
58hv_glue_internals = TEXT_OFFSET + 0x10720; 58hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
59hv_glue_internals = TEXT_OFFSET + 0x10740;
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 30ae76e50c44..7c06d597ffd0 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -220,7 +220,9 @@ intvec_\vecname:
220 * This routine saves just the first four registers, plus the 220 * This routine saves just the first four registers, plus the
221 * stack context so we can do proper backtracing right away, 221 * stack context so we can do proper backtracing right away,
222 * and defers to handle_interrupt to save the rest. 222 * and defers to handle_interrupt to save the rest.
223 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. 223 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
224 * and needs sp set to its final location at the bottom of
225 * the stack frame.
224 */ 226 */
225 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) 227 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
226 wh64 r0 /* cache line 7 */ 228 wh64 r0 /* cache line 7 */
@@ -450,23 +452,6 @@ intvec_\vecname:
450 push_reg r5, r52 452 push_reg r5, r52
451 st r52, r4 453 st r52, r4
452 454
453 /* Load tp with our per-cpu offset. */
454#ifdef CONFIG_SMP
455 {
456 mfspr r20, SPR_SYSTEM_SAVE_K_0
457 moveli r21, hw2_last(__per_cpu_offset)
458 }
459 {
460 shl16insli r21, r21, hw1(__per_cpu_offset)
461 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
462 }
463 shl16insli r21, r21, hw0(__per_cpu_offset)
464 shl3add r20, r20, r21
465 ld tp, r20
466#else
467 move tp, zero
468#endif
469
470 /* 455 /*
471 * If we will be returning to the kernel, we will need to 456 * If we will be returning to the kernel, we will need to
472 * reset the interrupt masks to the state they had before. 457 * reset the interrupt masks to the state they had before.
@@ -489,6 +474,44 @@ intvec_\vecname:
489 .endif 474 .endif
490 st r21, r32 475 st r21, r32
491 476
477 /*
478 * we've captured enough state to the stack (including in
479 * particular our EX_CONTEXT state) that we can now release
480 * the interrupt critical section and replace it with our
481 * standard "interrupts disabled" mask value. This allows
482 * synchronous interrupts (and profile interrupts) to punch
483 * through from this point onwards.
484 *
485 * It's important that no code before this point touch memory
486 * other than our own stack (to keep the invariant that this
487 * is all that gets touched under ICS), and that no code after
488 * this point reference any interrupt-specific SPR, in particular
489 * the EX_CONTEXT_K_ values.
490 */
491 .ifc \function,handle_nmi
492 IRQ_DISABLE_ALL(r20)
493 .else
494 IRQ_DISABLE(r20, r21)
495 .endif
496 mtspr INTERRUPT_CRITICAL_SECTION, zero
497
498 /* Load tp with our per-cpu offset. */
499#ifdef CONFIG_SMP
500 {
501 mfspr r20, SPR_SYSTEM_SAVE_K_0
502 moveli r21, hw2_last(__per_cpu_offset)
503 }
504 {
505 shl16insli r21, r21, hw1(__per_cpu_offset)
506 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
507 }
508 shl16insli r21, r21, hw0(__per_cpu_offset)
509 shl3add r20, r20, r21
510 ld tp, r20
511#else
512 move tp, zero
513#endif
514
492#ifdef __COLLECT_LINKER_FEEDBACK__ 515#ifdef __COLLECT_LINKER_FEEDBACK__
493 /* 516 /*
494 * Notify the feedback routines that we were in the 517 * Notify the feedback routines that we were in the
@@ -513,21 +536,6 @@ intvec_\vecname:
513#endif 536#endif
514 537
515 /* 538 /*
516 * we've captured enough state to the stack (including in
517 * particular our EX_CONTEXT state) that we can now release
518 * the interrupt critical section and replace it with our
519 * standard "interrupts disabled" mask value. This allows
520 * synchronous interrupts (and profile interrupts) to punch
521 * through from this point onwards.
522 */
523 .ifc \function,handle_nmi
524 IRQ_DISABLE_ALL(r20)
525 .else
526 IRQ_DISABLE(r20, r21)
527 .endif
528 mtspr INTERRUPT_CRITICAL_SECTION, zero
529
530 /*
531 * Prepare the first 256 stack bytes to be rapidly accessible 539 * Prepare the first 256 stack bytes to be rapidly accessible
532 * without having to fetch the background data. 540 * without having to fetch the background data.
533 */ 541 */
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return)
736 beqzt r30, .Lrestore_regs 744 beqzt r30, .Lrestore_regs
737 j 3f 745 j 3f
7382: TRACE_IRQS_ON 7462: TRACE_IRQS_ON
747 IRQ_ENABLE_LOAD(r20, r21)
739 movei r0, 1 748 movei r0, 1
740 mtspr INTERRUPT_CRITICAL_SECTION, r0 749 mtspr INTERRUPT_CRITICAL_SECTION, r0
741 IRQ_ENABLE(r20, r21) 750 IRQ_ENABLE_APPLY(r20, r21)
742 beqzt r30, .Lrestore_regs 751 beqzt r30, .Lrestore_regs
7433: 7523:
744 753
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return)
755 * that will save some cycles if this turns out to be a syscall. 764 * that will save some cycles if this turns out to be a syscall.
756 */ 765 */
757.Lrestore_regs: 766.Lrestore_regs:
758 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
759 767
760 /* 768 /*
761 * Rotate so we have one high bit and one low bit to test. 769 * Rotate so we have one high bit and one low bit to test.
@@ -1249,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
1249 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign 1257 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1250 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault 1258 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1251 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault 1259 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1252 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr 1260 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
1253 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap 1261 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1254 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt 1262 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1255 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr 1263 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 6255f2eab112..f0b54a934712 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -31,6 +31,8 @@
31#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/checksum.h> 33#include <asm/checksum.h>
34#include <asm/tlbflush.h>
35#include <asm/homecache.h>
34#include <hv/hypervisor.h> 36#include <hv/hypervisor.h>
35 37
36 38
@@ -222,11 +224,22 @@ struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
222 return alloc_pages_node(0, gfp_mask, order); 224 return alloc_pages_node(0, gfp_mask, order);
223} 225}
224 226
227/*
228 * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
229 * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
230 * for tilepro, while for tilegx, we limit it to entire middle level page
231 * table which we assume has been allocated and is undoubtedly large enough.
232 */
233#ifndef __tilegx__
234#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
235#else
236#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
237#endif
238
225static void setup_quasi_va_is_pa(void) 239static void setup_quasi_va_is_pa(void)
226{ 240{
227 HV_PTE *pgtable;
228 HV_PTE pte; 241 HV_PTE pte;
229 int i; 242 unsigned long i;
230 243
231 /* 244 /*
232 * Flush our TLB to prevent conflicts between the previous contents 245 * Flush our TLB to prevent conflicts between the previous contents
@@ -234,16 +247,22 @@ static void setup_quasi_va_is_pa(void)
234 */ 247 */
235 local_flush_tlb_all(); 248 local_flush_tlb_all();
236 249
237 /* setup VA is PA, at least up to PAGE_OFFSET */ 250 /*
238 251 * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
239 pgtable = (HV_PTE *)current->mm->pgd; 252 * Note here we assume that level-1 page table is defined by
253 * HPAGE_SIZE.
254 */
240 pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); 255 pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
241 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); 256 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
242 257 for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
243 for (i = 0; i < pgd_index(PAGE_OFFSET); i++) { 258 unsigned long vaddr = i << HPAGE_SHIFT;
259 pgd_t *pgd = pgd_offset(current->mm, vaddr);
260 pud_t *pud = pud_offset(pgd, vaddr);
261 pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
244 unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT); 262 unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
263
245 if (pfn_valid(pfn)) 264 if (pfn_valid(pfn))
246 __set_pte(&pgtable[i], pfn_pte(pfn, pte)); 265 __set_pte(ptep, pfn_pte(pfn, pte));
247 } 266 }
248} 267}
249 268
@@ -251,6 +270,7 @@ static void setup_quasi_va_is_pa(void)
251void machine_kexec(struct kimage *image) 270void machine_kexec(struct kimage *image)
252{ 271{
253 void *reboot_code_buffer; 272 void *reboot_code_buffer;
273 pte_t *ptep;
254 void (*rnk)(unsigned long, void *, unsigned long) 274 void (*rnk)(unsigned long, void *, unsigned long)
255 __noreturn; 275 __noreturn;
256 276
@@ -266,8 +286,10 @@ void machine_kexec(struct kimage *image)
266 */ 286 */
267 homecache_change_page_home(image->control_code_page, 0, 287 homecache_change_page_home(image->control_code_page, 0,
268 smp_processor_id()); 288 smp_processor_id());
269 reboot_code_buffer = vmap(&image->control_code_page, 1, 0, 289 reboot_code_buffer = page_address(image->control_code_page);
270 __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); 290 BUG_ON(reboot_code_buffer == NULL);
291 ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
292 __set_pte(ptep, pte_mkexec(*ptep));
271 memcpy(reboot_code_buffer, relocate_new_kernel, 293 memcpy(reboot_code_buffer, relocate_new_kernel,
272 relocate_new_kernel_size); 294 relocate_new_kernel_size);
273 __flush_icache_range( 295 __flush_icache_range(
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 98d476920106..001cbfa10ac6 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -159,7 +159,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
159 159
160 switch (ELF_R_TYPE(rel[i].r_info)) { 160 switch (ELF_R_TYPE(rel[i].r_info)) {
161 161
162#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value))) 162#ifdef __LITTLE_ENDIAN
163# define MUNGE(func) \
164 (*location = ((*location & ~func(-1)) | func(value)))
165#else
166/*
167 * Instructions are always little-endian, so when we read them as data,
168 * we have to swap them around before and after modifying them.
169 */
170# define MUNGE(func) \
171 (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
172#endif
163 173
164#ifndef __tilegx__ 174#ifndef __tilegx__
165 case R_TILE_32: 175 case R_TILE_32:
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 446a7f52cc11..dafc447b5125 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -22,6 +22,7 @@
22#include <linux/proc_fs.h> 22#include <linux/proc_fs.h>
23#include <linux/sysctl.h> 23#include <linux/sysctl.h>
24#include <linux/hardirq.h> 24#include <linux/hardirq.h>
25#include <linux/hugetlb.h>
25#include <linux/mman.h> 26#include <linux/mman.h>
26#include <asm/unaligned.h> 27#include <asm/unaligned.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index f572c19c4082..ba1023d8a021 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -128,10 +128,10 @@ void arch_release_thread_info(struct thread_info *info)
128 * Calling deactivate here just frees up the data structures. 128 * Calling deactivate here just frees up the data structures.
129 * If the task we're freeing held the last reference to a 129 * If the task we're freeing held the last reference to a
130 * hardwall fd, it would have been released prior to this point 130 * hardwall fd, it would have been released prior to this point
131 * anyway via exit_files(), and "hardwall" would be NULL by now. 131 * anyway via exit_files(), and the hardwall_task.info pointers
132 * would be NULL by now.
132 */ 133 */
133 if (info->task->thread.hardwall) 134 hardwall_deactivate_all(info->task);
134 hardwall_deactivate(info->task);
135#endif 135#endif
136 136
137 if (step_state) { 137 if (step_state) {
@@ -245,7 +245,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
245 245
246#ifdef CONFIG_HARDWALL 246#ifdef CONFIG_HARDWALL
247 /* New thread does not own any networks. */ 247 /* New thread does not own any networks. */
248 p->thread.hardwall = NULL; 248 memset(&p->thread.hardwall[0], 0,
249 sizeof(struct hardwall_task) * HARDWALL_TYPES);
249#endif 250#endif
250 251
251 252
@@ -515,12 +516,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
515 516
516#ifdef CONFIG_HARDWALL 517#ifdef CONFIG_HARDWALL
517 /* Enable or disable access to the network registers appropriately. */ 518 /* Enable or disable access to the network registers appropriately. */
518 if (prev->thread.hardwall != NULL) { 519 hardwall_switch_tasks(prev, next);
519 if (next->thread.hardwall == NULL)
520 restrict_network_mpls();
521 } else if (next->thread.hardwall != NULL) {
522 grant_network_mpls();
523 }
524#endif 520#endif
525 521
526 /* 522 /*
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel_32.S
index 010b418515f8..010b418515f8 100644
--- a/arch/tile/kernel/relocate_kernel.S
+++ b/arch/tile/kernel/relocate_kernel_32.S
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
new file mode 100644
index 000000000000..1c09a4f5a4ea
--- /dev/null
+++ b/arch/tile/kernel/relocate_kernel_64.S
@@ -0,0 +1,260 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * copy new kernel into place and then call hv_reexec
15 *
16 */
17
18#include <linux/linkage.h>
19#include <arch/chip.h>
20#include <asm/page.h>
21#include <hv/hypervisor.h>
22
23#undef RELOCATE_NEW_KERNEL_VERBOSE
24
25STD_ENTRY(relocate_new_kernel)
26
27 move r30, r0 /* page list */
28 move r31, r1 /* address of page we are on */
29 move r32, r2 /* start address of new kernel */
30
31 shrui r1, r1, PAGE_SHIFT
32 addi r1, r1, 1
33 shli sp, r1, PAGE_SHIFT
34 addi sp, sp, -8
35 /* we now have a stack (whether we need one or not) */
36
37 moveli r40, hw2_last(hv_console_putc)
38 shl16insli r40, r40, hw1(hv_console_putc)
39 shl16insli r40, r40, hw0(hv_console_putc)
40
41#ifdef RELOCATE_NEW_KERNEL_VERBOSE
42 moveli r0, 'r'
43 jalr r40
44
45 moveli r0, '_'
46 jalr r40
47
48 moveli r0, 'n'
49 jalr r40
50
51 moveli r0, '_'
52 jalr r40
53
54 moveli r0, 'k'
55 jalr r40
56
57 moveli r0, '\n'
58 jalr r40
59#endif
60
61 /*
62 * Throughout this code r30 is pointer to the element of page
63 * list we are working on.
64 *
65 * Normally we get to the next element of the page list by
66 * incrementing r30 by eight. The exception is if the element
67 * on the page list is an IND_INDIRECTION in which case we use
68 * the element with the low bits masked off as the new value
69 * of r30.
70 *
71 * To get this started, we need the value passed to us (which
72 * will always be an IND_INDIRECTION) in memory somewhere with
73 * r30 pointing at it. To do that, we push the value passed
74 * to us on the stack and make r30 point to it.
75 */
76
77 st sp, r30
78 move r30, sp
79 addi sp, sp, -16
80
81#if CHIP_HAS_CBOX_HOME_MAP()
82 /*
83 * On TILE-GX, we need to flush all tiles' caches, since we may
84 * have been doing hash-for-home caching there. Note that we
85 * must do this _after_ we're completely done modifying any memory
86 * other than our output buffer (which we know is locally cached).
87 * We want the caches to be fully clean when we do the reexec,
88 * because the hypervisor is going to do this flush again at that
89 * point, and we don't want that second flush to overwrite any memory.
90 */
91 {
92 move r0, zero /* cache_pa */
93 moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
94 }
95 {
96 shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
97 movei r2, -1 /* cache_cpumask; -1 means all client tiles */
98 }
99 {
100 shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2) /* cache_control */
101 move r3, zero /* tlb_va */
102 }
103 {
104 move r4, zero /* tlb_length */
105 move r5, zero /* tlb_pgsize */
106 }
107 {
108 move r6, zero /* tlb_cpumask */
109 move r7, zero /* asids */
110 }
111 {
112 moveli r20, hw2_last(hv_flush_remote)
113 move r8, zero /* asidcount */
114 }
115 shl16insli r20, r20, hw1(hv_flush_remote)
116 shl16insli r20, r20, hw0(hv_flush_remote)
117
118 jalr r20
119#endif
120
121 /* r33 is destination pointer, default to zero */
122
123 moveli r33, 0
124
125.Lloop: ld r10, r30
126
127 andi r9, r10, 0xf /* low 4 bits tell us what type it is */
128 xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
129
130 cmpeqi r0, r9, 0x1 /* IND_DESTINATION */
131 beqzt r0, .Ltry2
132
133 move r33, r10
134
135#ifdef RELOCATE_NEW_KERNEL_VERBOSE
136 moveli r0, 'd'
137 jalr r40
138#endif
139
140 addi r30, r30, 8
141 j .Lloop
142
143.Ltry2:
144 cmpeqi r0, r9, 0x2 /* IND_INDIRECTION */
145 beqzt r0, .Ltry4
146
147 move r30, r10
148
149#ifdef RELOCATE_NEW_KERNEL_VERBOSE
150 moveli r0, 'i'
151 jalr r40
152#endif
153
154 j .Lloop
155
156.Ltry4:
157 cmpeqi r0, r9, 0x4 /* IND_DONE */
158 beqzt r0, .Ltry8
159
160 mf
161
162#ifdef RELOCATE_NEW_KERNEL_VERBOSE
163 moveli r0, 'D'
164 jalr r40
165 moveli r0, '\n'
166 jalr r40
167#endif
168
169 move r0, r32
170
171 moveli r41, hw2_last(hv_reexec)
172 shl16insli r41, r41, hw1(hv_reexec)
173 shl16insli r41, r41, hw0(hv_reexec)
174
175 jalr r41
176
177 /* we should not get here */
178
179 moveli r0, '?'
180 jalr r40
181 moveli r0, '\n'
182 jalr r40
183
184 j .Lhalt
185
186.Ltry8: cmpeqi r0, r9, 0x8 /* IND_SOURCE */
187 beqz r0, .Lerr /* unknown type */
188
189 /* copy page at r10 to page at r33 */
190
191 move r11, r33
192
193 moveli r0, hw2_last(PAGE_SIZE)
194 shl16insli r0, r0, hw1(PAGE_SIZE)
195 shl16insli r0, r0, hw0(PAGE_SIZE)
196 add r33, r33, r0
197
198 /* copy word at r10 to word at r11 until r11 equals r33 */
199
200 /* We know page size must be multiple of 8, so we can unroll
201 * 8 times safely without any edge case checking.
202 *
203 * Issue a flush of the destination every 8 words to avoid
204 * incoherence when starting the new kernel. (Now this is
205 * just good paranoia because the hv_reexec call will also
206 * take care of this.)
207 */
208
2091:
210 { ld r0, r10; addi r10, r10, 8 }
211 { st r11, r0; addi r11, r11, 8 }
212 { ld r0, r10; addi r10, r10, 8 }
213 { st r11, r0; addi r11, r11, 8 }
214 { ld r0, r10; addi r10, r10, 8 }
215 { st r11, r0; addi r11, r11, 8 }
216 { ld r0, r10; addi r10, r10, 8 }
217 { st r11, r0; addi r11, r11, 8 }
218 { ld r0, r10; addi r10, r10, 8 }
219 { st r11, r0; addi r11, r11, 8 }
220 { ld r0, r10; addi r10, r10, 8 }
221 { st r11, r0; addi r11, r11, 8 }
222 { ld r0, r10; addi r10, r10, 8 }
223 { st r11, r0; addi r11, r11, 8 }
224 { ld r0, r10; addi r10, r10, 8 }
225 { st r11, r0 }
226 { flush r11 ; addi r11, r11, 8 }
227
228 cmpeq r0, r33, r11
229 beqzt r0, 1b
230
231#ifdef RELOCATE_NEW_KERNEL_VERBOSE
232 moveli r0, 's'
233 jalr r40
234#endif
235
236 addi r30, r30, 8
237 j .Lloop
238
239
240.Lerr: moveli r0, 'e'
241 jalr r40
242 moveli r0, 'r'
243 jalr r40
244 moveli r0, 'r'
245 jalr r40
246 moveli r0, '\n'
247 jalr r40
248.Lhalt:
249 moveli r41, hw2_last(hv_halt)
250 shl16insli r41, r41, hw1(hv_halt)
251 shl16insli r41, r41, hw0(hv_halt)
252
253 jalr r41
254 STD_ENDPROC(relocate_new_kernel)
255
256 .section .rodata,"a"
257
258 .globl relocate_new_kernel_size
259relocate_new_kernel_size:
260 .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 98d80eb49ddb..6098ccc59be2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -28,6 +28,7 @@
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/smp.h> 29#include <linux/smp.h>
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <linux/hugetlb.h>
31#include <asm/setup.h> 32#include <asm/setup.h>
32#include <asm/sections.h> 33#include <asm/sections.h>
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
@@ -49,9 +50,6 @@ char chip_model[64] __write_once;
49struct pglist_data node_data[MAX_NUMNODES] __read_mostly; 50struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
50EXPORT_SYMBOL(node_data); 51EXPORT_SYMBOL(node_data);
51 52
52/* We only create bootmem data on node 0. */
53static bootmem_data_t __initdata node0_bdata;
54
55/* Information on the NUMA nodes that we compute early */ 53/* Information on the NUMA nodes that we compute early */
56unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; 54unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
57unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; 55unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
@@ -534,37 +532,96 @@ static void __init setup_memory(void)
534#endif 532#endif
535} 533}
536 534
537static void __init setup_bootmem_allocator(void) 535/*
536 * On 32-bit machines, we only put bootmem on the low controller,
537 * since PAs > 4GB can't be used in bootmem. In principle one could
538 * imagine, e.g., multiple 1 GB controllers all of which could support
539 * bootmem, but in practice using controllers this small isn't a
540 * particularly interesting scenario, so we just keep it simple and
541 * use only the first controller for bootmem on 32-bit machines.
542 */
543static inline int node_has_bootmem(int nid)
538{ 544{
539 unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; 545#ifdef CONFIG_64BIT
546 return 1;
547#else
548 return nid == 0;
549#endif
550}
540 551
541 /* Provide a node 0 bdata. */ 552static inline unsigned long alloc_bootmem_pfn(int nid,
542 NODE_DATA(0)->bdata = &node0_bdata; 553 unsigned long size,
554 unsigned long goal)
555{
556 void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
557 PAGE_SIZE, goal);
558 unsigned long pfn = kaddr_to_pfn(kva);
559 BUG_ON(goal && PFN_PHYS(pfn) != goal);
560 return pfn;
561}
543 562
544#ifdef CONFIG_PCI 563static void __init setup_bootmem_allocator_node(int i)
545 /* Don't let boot memory alias the PCI region. */ 564{
546 last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); 565 unsigned long start, end, mapsize, mapstart;
566
567 if (node_has_bootmem(i)) {
568 NODE_DATA(i)->bdata = &bootmem_node_data[i];
569 } else {
570 /* Share controller zero's bdata for now. */
571 NODE_DATA(i)->bdata = &bootmem_node_data[0];
572 return;
573 }
574
575 /* Skip up to after the bss in node 0. */
576 start = (i == 0) ? min_low_pfn : node_start_pfn[i];
577
578 /* Only lowmem, if we're a HIGHMEM build. */
579#ifdef CONFIG_HIGHMEM
580 end = node_lowmem_end_pfn[i];
547#else 581#else
548 last_alloc_pfn = max_low_pfn; 582 end = node_end_pfn[i];
549#endif 583#endif
550 584
551 /* 585 /* No memory here. */
552 * Initialize the boot-time allocator (with low memory only): 586 if (end == start)
553 * The first argument says where to put the bitmap, and the 587 return;
554 * second says where the end of allocatable memory is. 588
555 */ 589 /* Figure out where the bootmem bitmap is located. */
556 bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); 590 mapsize = bootmem_bootmap_pages(end - start);
591 if (i == 0) {
592 /* Use some space right before the heap on node 0. */
593 mapstart = start;
594 start += mapsize;
595 } else {
596 /* Allocate bitmap on node 0 to avoid page table issues. */
597 mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
598 }
557 599
600 /* Initialize a node. */
601 init_bootmem_node(NODE_DATA(i), mapstart, start, end);
602
603 /* Free all the space back into the allocator. */
604 free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
605
606#if defined(CONFIG_PCI)
558 /* 607 /*
559 * Let the bootmem allocator use all the space we've given it 608 * Throw away any memory aliased by the PCI region. FIXME: this
560 * except for its own bitmap. 609 * is a temporary hack to work around bug 10502, and needs to be
610 * fixed properly.
561 */ 611 */
562 first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); 612 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
563 if (first_alloc_pfn >= last_alloc_pfn) 613 reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
564 early_panic("Not enough memory on controller 0 for bootmem\n"); 614 PFN_PHYS(pci_reserve_end_pfn -
615 pci_reserve_start_pfn),
616 BOOTMEM_EXCLUSIVE);
617#endif
618}
565 619
566 free_bootmem(PFN_PHYS(first_alloc_pfn), 620static void __init setup_bootmem_allocator(void)
567 PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); 621{
622 int i;
623 for (i = 0; i < MAX_NUMNODES; ++i)
624 setup_bootmem_allocator_node(i);
568 625
569#ifdef CONFIG_KEXEC 626#ifdef CONFIG_KEXEC
570 if (crashk_res.start != crashk_res.end) 627 if (crashk_res.start != crashk_res.end)
@@ -595,14 +652,6 @@ static int __init percpu_size(void)
595 return size; 652 return size;
596} 653}
597 654
598static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
599{
600 void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
601 unsigned long pfn = kaddr_to_pfn(kva);
602 BUG_ON(goal && PFN_PHYS(pfn) != goal);
603 return pfn;
604}
605
606static void __init zone_sizes_init(void) 655static void __init zone_sizes_init(void)
607{ 656{
608 unsigned long zones_size[MAX_NR_ZONES] = { 0 }; 657 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
@@ -640,21 +689,22 @@ static void __init zone_sizes_init(void)
640 * though, there'll be no lowmem, so we just alloc_bootmem 689 * though, there'll be no lowmem, so we just alloc_bootmem
641 * the memmap. There will be no percpu memory either. 690 * the memmap. There will be no percpu memory either.
642 */ 691 */
643 if (__pfn_to_highbits(start) == 0) { 692 if (i != 0 && cpu_isset(i, isolnodes)) {
644 /* In low PAs, allocate via bootmem. */ 693 node_memmap_pfn[i] =
694 alloc_bootmem_pfn(0, memmap_size, 0);
695 BUG_ON(node_percpu[i] != 0);
696 } else if (node_has_bootmem(start)) {
645 unsigned long goal = 0; 697 unsigned long goal = 0;
646 node_memmap_pfn[i] = 698 node_memmap_pfn[i] =
647 alloc_bootmem_pfn(memmap_size, goal); 699 alloc_bootmem_pfn(i, memmap_size, 0);
648 if (kdata_huge) 700 if (kdata_huge)
649 goal = PFN_PHYS(lowmem_end) - node_percpu[i]; 701 goal = PFN_PHYS(lowmem_end) - node_percpu[i];
650 if (node_percpu[i]) 702 if (node_percpu[i])
651 node_percpu_pfn[i] = 703 node_percpu_pfn[i] =
652 alloc_bootmem_pfn(node_percpu[i], goal); 704 alloc_bootmem_pfn(i, node_percpu[i],
653 } else if (cpu_isset(i, isolnodes)) { 705 goal);
654 node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
655 BUG_ON(node_percpu[i] != 0);
656 } else { 706 } else {
657 /* In high PAs, just reserve some pages. */ 707 /* In non-bootmem zones, just reserve some pages. */
658 node_memmap_pfn[i] = node_free_pfn[i]; 708 node_memmap_pfn[i] = node_free_pfn[i];
659 node_free_pfn[i] += PFN_UP(memmap_size); 709 node_free_pfn[i] += PFN_UP(memmap_size);
660 if (!kdata_huge) { 710 if (!kdata_huge) {
@@ -678,16 +728,9 @@ static void __init zone_sizes_init(void)
678 zones_size[ZONE_NORMAL] = end - start; 728 zones_size[ZONE_NORMAL] = end - start;
679#endif 729#endif
680 730
681 /* 731 /* Take zone metadata from controller 0 if we're isolnode. */
682 * Everyone shares node 0's bootmem allocator, but 732 if (node_isset(i, isolnodes))
683 * we use alloc_remap(), above, to put the actual 733 NODE_DATA(i)->bdata = &bootmem_node_data[0];
684 * struct page array on the individual controllers,
685 * which is most of the data that we actually care about.
686 * We can't place bootmem allocators on the other
687 * controllers since the bootmem allocator can only
688 * operate on 32-bit physical addresses.
689 */
690 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
691 734
692 free_area_init_node(i, zones_size, start, NULL); 735 free_area_init_node(i, zones_size, start, NULL);
693 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n", 736 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
@@ -870,6 +913,22 @@ subsys_initcall(topology_init);
870 913
871#endif /* CONFIG_NUMA */ 914#endif /* CONFIG_NUMA */
872 915
916/*
917 * Initialize hugepage support on this cpu. We do this on all cores
918 * early in boot: before argument parsing for the boot cpu, and after
919 * argument parsing but before the init functions run on the secondaries.
920 * So the values we set up here in the hypervisor may be overridden on
921 * the boot cpu as arguments are parsed.
922 */
923static __cpuinit void init_super_pages(void)
924{
925#ifdef CONFIG_HUGETLB_SUPER_PAGES
926 int i;
927 for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
928 hv_set_pte_super_shift(i, huge_shift[i]);
929#endif
930}
931
873/** 932/**
874 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. 933 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
875 * @boot: Is this the boot cpu? 934 * @boot: Is this the boot cpu?
@@ -924,6 +983,8 @@ void __cpuinit setup_cpu(int boot)
924 /* Reset the network state on this cpu. */ 983 /* Reset the network state on this cpu. */
925 reset_network_state(); 984 reset_network_state();
926#endif 985#endif
986
987 init_super_pages();
927} 988}
928 989
929#ifdef CONFIG_BLK_DEV_INITRD 990#ifdef CONFIG_BLK_DEV_INITRD
@@ -1412,13 +1473,13 @@ void __init setup_per_cpu_areas(void)
1412 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { 1473 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1413 1474
1414 /* Update the vmalloc mapping and page home. */ 1475 /* Update the vmalloc mapping and page home. */
1415 pte_t *ptep = 1476 unsigned long addr = (unsigned long)ptr + i;
1416 virt_to_pte(NULL, (unsigned long)ptr + i); 1477 pte_t *ptep = virt_to_pte(NULL, addr);
1417 pte_t pte = *ptep; 1478 pte_t pte = *ptep;
1418 BUG_ON(pfn != pte_pfn(pte)); 1479 BUG_ON(pfn != pte_pfn(pte));
1419 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1480 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1420 pte = set_remote_cache_cpu(pte, cpu); 1481 pte = set_remote_cache_cpu(pte, cpu);
1421 set_pte(ptep, pte); 1482 set_pte_at(&init_mm, addr, ptep, pte);
1422 1483
1423 /* Update the lowmem mapping for consistency. */ 1484 /* Update the lowmem mapping for consistency. */
1424 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1485 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1431,7 +1492,7 @@ void __init setup_per_cpu_areas(void)
1431 BUG_ON(pte_huge(*ptep)); 1492 BUG_ON(pte_huge(*ptep));
1432 } 1493 }
1433 BUG_ON(pfn != pte_pfn(*ptep)); 1494 BUG_ON(pfn != pte_pfn(*ptep));
1434 set_pte(ptep, pte); 1495 set_pte_at(&init_mm, lowmem_va, ptep, pte);
1435 } 1496 }
1436 } 1497 }
1437 1498
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 89529c9f0605..27742e87e255 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -172,9 +172,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
172 return (tilepro_bundle_bits) 0; 172 return (tilepro_bundle_bits) 0;
173 } 173 }
174 174
175#ifndef __LITTLE_ENDIAN
176# error We assume little-endian representation with copy_xx_user size 2 here
177#endif
178 /* Handle unaligned load/store */ 175 /* Handle unaligned load/store */
179 if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { 176 if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
180 unsigned short val_16; 177 unsigned short val_16;
@@ -195,8 +192,19 @@ static tile_bundle_bits rewrite_load_store_unaligned(
195 state->update = 1; 192 state->update = 1;
196 } 193 }
197 } else { 194 } else {
195 unsigned short val_16;
198 val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; 196 val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
199 err = copy_to_user(addr, &val, size); 197 switch (size) {
198 case 2:
199 val_16 = val;
200 err = copy_to_user(addr, &val_16, sizeof(val_16));
201 break;
202 case 4:
203 err = copy_to_user(addr, &val, sizeof(val));
204 break;
205 default:
206 BUG();
207 }
200 } 208 }
201 209
202 if (err) { 210 if (err) {
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 91da0f721958..cbc73a8b8fe1 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -203,7 +203,7 @@ void __init ipi_init(void)
203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0) 203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
204 panic("Failed to initialize IPI for cpu %d\n", cpu); 204 panic("Failed to initialize IPI for cpu %d\n", cpu);
205 205
206 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 206 offset = PFN_PHYS(pte_pfn(pte));
207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); 207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
208 } 208 }
209#endif 209#endif
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index cb44ba7ccd2d..b08095b402d6 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -32,11 +32,17 @@
32#include <asm/syscalls.h> 32#include <asm/syscalls.h>
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/homecache.h> 34#include <asm/homecache.h>
35#include <asm/cachectl.h>
35#include <arch/chip.h> 36#include <arch/chip.h>
36 37
37SYSCALL_DEFINE0(flush_cache) 38SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
39 unsigned long, flags)
38{ 40{
39 homecache_evict(cpumask_of(smp_processor_id())); 41 if (flags & DCACHE)
42 homecache_evict(cpumask_of(smp_processor_id()));
43 if (flags & ICACHE)
44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
45 0, 0, 0, NULL, NULL, 0);
40 return 0; 46 return 0;
41} 47}
42 48
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index 71ae728e9d0b..e25b0a89c18f 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -93,6 +93,10 @@ HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
93HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM) 93HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
94HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV) 94HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
95HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC) 95HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
96HV_CONF_ATTR(cpumod_part, HV_CONFSTR_CPUMOD_PART_NUM)
97HV_CONF_ATTR(cpumod_serial, HV_CONFSTR_CPUMOD_SERIAL_NUM)
98HV_CONF_ATTR(cpumod_revision, HV_CONFSTR_CPUMOD_REV)
99HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
96HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL) 100HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
97 101
98static struct attribute *board_attrs[] = { 102static struct attribute *board_attrs[] = {
@@ -104,6 +108,10 @@ static struct attribute *board_attrs[] = {
104 &dev_attr_mezz_serial.attr, 108 &dev_attr_mezz_serial.attr,
105 &dev_attr_mezz_revision.attr, 109 &dev_attr_mezz_revision.attr,
106 &dev_attr_mezz_description.attr, 110 &dev_attr_mezz_description.attr,
111 &dev_attr_cpumod_part.attr,
112 &dev_attr_cpumod_serial.attr,
113 &dev_attr_cpumod_revision.attr,
114 &dev_attr_cpumod_description.attr,
107 &dev_attr_switch_control.attr, 115 &dev_attr_switch_control.attr,
108 NULL 116 NULL
109}; 117};
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index a5f241c24cac..3fd54d5bbd4c 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/hugetlb.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include <asm/homecache.h> 20#include <asm/homecache.h>
20#include <hv/hypervisor.h> 21#include <hv/hypervisor.h>
@@ -49,25 +50,25 @@ void flush_tlb_current_task(void)
49 flush_tlb_mm(current->mm); 50 flush_tlb_mm(current->mm);
50} 51}
51 52
52void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm, 53void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
53 unsigned long va) 54 unsigned long va)
54{ 55{
55 unsigned long size = hv_page_size(vma); 56 unsigned long size = vma_kernel_pagesize(vma);
56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
57 flush_remote(0, cache, mm_cpumask(mm), 58 flush_remote(0, cache, mm_cpumask(mm),
58 va, size, size, mm_cpumask(mm), NULL, 0); 59 va, size, size, mm_cpumask(mm), NULL, 0);
59} 60}
60 61
61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) 62void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
62{ 63{
63 flush_tlb_page_mm(vma, vma->vm_mm, va); 64 flush_tlb_page_mm(vma, vma->vm_mm, va);
64} 65}
65EXPORT_SYMBOL(flush_tlb_page); 66EXPORT_SYMBOL(flush_tlb_page);
66 67
67void flush_tlb_range(const struct vm_area_struct *vma, 68void flush_tlb_range(struct vm_area_struct *vma,
68 unsigned long start, unsigned long end) 69 unsigned long start, unsigned long end)
69{ 70{
70 unsigned long size = hv_page_size(vma); 71 unsigned long size = vma_kernel_pagesize(vma);
71 struct mm_struct *mm = vma->vm_mm; 72 struct mm_struct *mm = vma->vm_mm;
72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
73 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, 74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 73cff814ac57..5b19a23c8908 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -195,6 +195,25 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
195 return 1; 195 return 1;
196} 196}
197 197
198static const char *const int_name[] = {
199 [INT_MEM_ERROR] = "Memory error",
200 [INT_ILL] = "Illegal instruction",
201 [INT_GPV] = "General protection violation",
202 [INT_UDN_ACCESS] = "UDN access",
203 [INT_IDN_ACCESS] = "IDN access",
204#if CHIP_HAS_SN()
205 [INT_SN_ACCESS] = "SN access",
206#endif
207 [INT_SWINT_3] = "Software interrupt 3",
208 [INT_SWINT_2] = "Software interrupt 2",
209 [INT_SWINT_0] = "Software interrupt 0",
210 [INT_UNALIGN_DATA] = "Unaligned data",
211 [INT_DOUBLE_FAULT] = "Double fault",
212#ifdef __tilegx__
213 [INT_ILL_TRANS] = "Illegal virtual address",
214#endif
215};
216
198void __kprobes do_trap(struct pt_regs *regs, int fault_num, 217void __kprobes do_trap(struct pt_regs *regs, int fault_num,
199 unsigned long reason) 218 unsigned long reason)
200{ 219{
@@ -211,10 +230,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
211 * current process and hope for the best. 230 * current process and hope for the best.
212 */ 231 */
213 if (!user_mode(regs)) { 232 if (!user_mode(regs)) {
233 const char *name;
214 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ 234 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
215 return; 235 return;
216 pr_alert("Kernel took bad trap %d at PC %#lx\n", 236 if (fault_num >= 0 &&
217 fault_num, regs->pc); 237 fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
238 int_name[fault_num] != NULL)
239 name = int_name[fault_num];
240 else
241 name = "Unknown interrupt";
242 pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
243 fault_num, name, regs->pc);
218 if (fault_num == INT_GPV) 244 if (fault_num == INT_GPV)
219 pr_alert("GPV_REASON is %#lx\n", reason); 245 pr_alert("GPV_REASON is %#lx\n", reason);
220 show_regs(regs); 246 show_regs(regs);