aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2005-09-08 01:45:47 -0400
committerLen Brown <len.brown@intel.com>2005-09-08 01:45:47 -0400
commit64e47488c913ac704d465a6af86a26786d1412a5 (patch)
treed3b0148592963dcde26e4bb35ddfec8b1eaf8e23 /arch/ia64
parent4a35a46bf1cda4737c428380d1db5d15e2590d18 (diff)
parentcaf39e87cc1182f7dae84eefc43ca14d54c78ef9 (diff)
Merge linux-2.6 with linux-acpi-2.6
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/hp/sim/simserial.c2
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/ia32/sys_ia32.c31
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/domain.c396
-rw-r--r--arch/ia64/kernel/irq.c39
-rw-r--r--arch/ia64/kernel/jprobes.S1
-rw-r--r--arch/ia64/kernel/kprobes.c124
-rw-r--r--arch/ia64/kernel/traps.c5
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/lib/flush.S1
-rw-r--r--arch/ia64/mm/fault.c3
-rw-r--r--arch/ia64/sn/kernel/io_init.c2
-rw-r--r--arch/ia64/sn/kernel/tiocx.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c8
16 files changed, 120 insertions, 504 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e1c9ea03f31f..00151a8320d8 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -408,6 +408,11 @@ config GENERIC_IRQ_PROBE
408 bool 408 bool
409 default y 409 default y
410 410
411config GENERIC_PENDING_IRQ
412 bool
413 depends on GENERIC_HARDIRQS && SMP
414 default y
415
411source "arch/ia64/hp/sim/Kconfig" 416source "arch/ia64/hp/sim/Kconfig"
412 417
413source "arch/ia64/oprofile/Kconfig" 418source "arch/ia64/oprofile/Kconfig"
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 7dcb8582ae0d..b42ec37be51c 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -130,7 +130,7 @@ static void rs_stop(struct tty_struct *tty)
130 130
131static void rs_start(struct tty_struct *tty) 131static void rs_start(struct tty_struct *tty)
132{ 132{
133#if SIMSERIAL_DEBUG 133#ifdef SIMSERIAL_DEBUG
134 printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", 134 printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n",
135 tty->stopped, tty->hw_stopped, tty->flow_stopped); 135 tty->stopped, tty->hw_stopped, tty->flow_stopped);
136#endif 136#endif
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 829a6d80711c..0708edb06cc4 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -215,7 +215,7 @@ ia32_syscall_table:
215 data8 sys32_fork 215 data8 sys32_fork
216 data8 sys_read 216 data8 sys_read
217 data8 sys_write 217 data8 sys_write
218 data8 sys32_open /* 5 */ 218 data8 compat_sys_open /* 5 */
219 data8 sys_close 219 data8 sys_close
220 data8 sys32_waitpid 220 data8 sys32_waitpid
221 data8 sys_creat 221 data8 sys_creat
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index c1e20d65dd6c..e29a8a55486a 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -2359,37 +2359,6 @@ sys32_brk (unsigned int brk)
2359 return ret; 2359 return ret;
2360} 2360}
2361 2361
2362/*
2363 * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
2364 */
2365asmlinkage long
2366sys32_open (const char __user * filename, int flags, int mode)
2367{
2368 char * tmp;
2369 int fd, error;
2370
2371 tmp = getname(filename);
2372 fd = PTR_ERR(tmp);
2373 if (!IS_ERR(tmp)) {
2374 fd = get_unused_fd();
2375 if (fd >= 0) {
2376 struct file *f = filp_open(tmp, flags, mode);
2377 error = PTR_ERR(f);
2378 if (IS_ERR(f))
2379 goto out_error;
2380 fd_install(fd, f);
2381 }
2382out:
2383 putname(tmp);
2384 }
2385 return fd;
2386
2387out_error:
2388 put_unused_fd(fd);
2389 fd = error;
2390 goto out;
2391}
2392
2393/* Structure for ia32 emulation on ia64 */ 2362/* Structure for ia32 emulation on ia64 */
2394struct epoll_event32 2363struct epoll_event32
2395{ 2364{
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index b242594be55b..307514f7a282 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
16obj-$(CONFIG_IA64_PALINFO) += palinfo.o 16obj-$(CONFIG_IA64_PALINFO) += palinfo.o
17obj-$(CONFIG_IOSAPIC) += iosapic.o 17obj-$(CONFIG_IOSAPIC) += iosapic.o
18obj-$(CONFIG_MODULES) += module.o 18obj-$(CONFIG_MODULES) += module.o
19obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o 19obj-$(CONFIG_SMP) += smp.o smpboot.o
20obj-$(CONFIG_NUMA) += numa.o 20obj-$(CONFIG_NUMA) += numa.o
21obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 21obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
22obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 22obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c
deleted file mode 100644
index bbb8efe126b7..000000000000
--- a/arch/ia64/kernel/domain.c
+++ /dev/null
@@ -1,396 +0,0 @@
1/*
2 * arch/ia64/kernel/domain.c
3 * Architecture specific sched-domains builder.
4 *
5 * Copyright (C) 2004 Jesse Barnes
6 * Copyright (C) 2004 Silicon Graphics, Inc.
7 */
8
9#include <linux/sched.h>
10#include <linux/percpu.h>
11#include <linux/slab.h>
12#include <linux/cpumask.h>
13#include <linux/init.h>
14#include <linux/topology.h>
15#include <linux/nodemask.h>
16
17#define SD_NODES_PER_DOMAIN 16
18
19#ifdef CONFIG_NUMA
20/**
21 * find_next_best_node - find the next node to include in a sched_domain
22 * @node: node whose sched_domain we're building
23 * @used_nodes: nodes already in the sched_domain
24 *
25 * Find the next node to include in a given scheduling domain. Simply
26 * finds the closest node not already in the @used_nodes map.
27 *
28 * Should use nodemask_t.
29 */
30static int find_next_best_node(int node, unsigned long *used_nodes)
31{
32 int i, n, val, min_val, best_node = 0;
33
34 min_val = INT_MAX;
35
36 for (i = 0; i < MAX_NUMNODES; i++) {
37 /* Start at @node */
38 n = (node + i) % MAX_NUMNODES;
39
40 if (!nr_cpus_node(n))
41 continue;
42
43 /* Skip already used nodes */
44 if (test_bit(n, used_nodes))
45 continue;
46
47 /* Simple min distance search */
48 val = node_distance(node, n);
49
50 if (val < min_val) {
51 min_val = val;
52 best_node = n;
53 }
54 }
55
56 set_bit(best_node, used_nodes);
57 return best_node;
58}
59
60/**
61 * sched_domain_node_span - get a cpumask for a node's sched_domain
62 * @node: node whose cpumask we're constructing
63 * @size: number of nodes to include in this span
64 *
65 * Given a node, construct a good cpumask for its sched_domain to span. It
66 * should be one that prevents unnecessary balancing, but also spreads tasks
67 * out optimally.
68 */
69static cpumask_t sched_domain_node_span(int node)
70{
71 int i;
72 cpumask_t span, nodemask;
73 DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
74
75 cpus_clear(span);
76 bitmap_zero(used_nodes, MAX_NUMNODES);
77
78 nodemask = node_to_cpumask(node);
79 cpus_or(span, span, nodemask);
80 set_bit(node, used_nodes);
81
82 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
83 int next_node = find_next_best_node(node, used_nodes);
84 nodemask = node_to_cpumask(next_node);
85 cpus_or(span, span, nodemask);
86 }
87
88 return span;
89}
90#endif
91
92/*
93 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
94 * can switch it on easily if needed.
95 */
96#ifdef CONFIG_SCHED_SMT
97static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
98static struct sched_group sched_group_cpus[NR_CPUS];
99static int cpu_to_cpu_group(int cpu)
100{
101 return cpu;
102}
103#endif
104
105static DEFINE_PER_CPU(struct sched_domain, phys_domains);
106static struct sched_group sched_group_phys[NR_CPUS];
107static int cpu_to_phys_group(int cpu)
108{
109#ifdef CONFIG_SCHED_SMT
110 return first_cpu(cpu_sibling_map[cpu]);
111#else
112 return cpu;
113#endif
114}
115
116#ifdef CONFIG_NUMA
117/*
118 * The init_sched_build_groups can't handle what we want to do with node
119 * groups, so roll our own. Now each node has its own list of groups which
120 * gets dynamically allocated.
121 */
122static DEFINE_PER_CPU(struct sched_domain, node_domains);
123static struct sched_group *sched_group_nodes[MAX_NUMNODES];
124
125static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
126static struct sched_group sched_group_allnodes[MAX_NUMNODES];
127
128static int cpu_to_allnodes_group(int cpu)
129{
130 return cpu_to_node(cpu);
131}
132#endif
133
134/*
135 * Build sched domains for a given set of cpus and attach the sched domains
136 * to the individual cpus
137 */
138void build_sched_domains(const cpumask_t *cpu_map)
139{
140 int i;
141
142 /*
143 * Set up domains for cpus specified by the cpu_map.
144 */
145 for_each_cpu_mask(i, *cpu_map) {
146 int group;
147 struct sched_domain *sd = NULL, *p;
148 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
149
150 cpus_and(nodemask, nodemask, *cpu_map);
151
152#ifdef CONFIG_NUMA
153 if (num_online_cpus()
154 > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
155 sd = &per_cpu(allnodes_domains, i);
156 *sd = SD_ALLNODES_INIT;
157 sd->span = *cpu_map;
158 group = cpu_to_allnodes_group(i);
159 sd->groups = &sched_group_allnodes[group];
160 p = sd;
161 } else
162 p = NULL;
163
164 sd = &per_cpu(node_domains, i);
165 *sd = SD_NODE_INIT;
166 sd->span = sched_domain_node_span(cpu_to_node(i));
167 sd->parent = p;
168 cpus_and(sd->span, sd->span, *cpu_map);
169#endif
170
171 p = sd;
172 sd = &per_cpu(phys_domains, i);
173 group = cpu_to_phys_group(i);
174 *sd = SD_CPU_INIT;
175 sd->span = nodemask;
176 sd->parent = p;
177 sd->groups = &sched_group_phys[group];
178
179#ifdef CONFIG_SCHED_SMT
180 p = sd;
181 sd = &per_cpu(cpu_domains, i);
182 group = cpu_to_cpu_group(i);
183 *sd = SD_SIBLING_INIT;
184 sd->span = cpu_sibling_map[i];
185 cpus_and(sd->span, sd->span, *cpu_map);
186 sd->parent = p;
187 sd->groups = &sched_group_cpus[group];
188#endif
189 }
190
191#ifdef CONFIG_SCHED_SMT
192 /* Set up CPU (sibling) groups */
193 for_each_cpu_mask(i, *cpu_map) {
194 cpumask_t this_sibling_map = cpu_sibling_map[i];
195 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
196 if (i != first_cpu(this_sibling_map))
197 continue;
198
199 init_sched_build_groups(sched_group_cpus, this_sibling_map,
200 &cpu_to_cpu_group);
201 }
202#endif
203
204 /* Set up physical groups */
205 for (i = 0; i < MAX_NUMNODES; i++) {
206 cpumask_t nodemask = node_to_cpumask(i);
207
208 cpus_and(nodemask, nodemask, *cpu_map);
209 if (cpus_empty(nodemask))
210 continue;
211
212 init_sched_build_groups(sched_group_phys, nodemask,
213 &cpu_to_phys_group);
214 }
215
216#ifdef CONFIG_NUMA
217 init_sched_build_groups(sched_group_allnodes, *cpu_map,
218 &cpu_to_allnodes_group);
219
220 for (i = 0; i < MAX_NUMNODES; i++) {
221 /* Set up node groups */
222 struct sched_group *sg, *prev;
223 cpumask_t nodemask = node_to_cpumask(i);
224 cpumask_t domainspan;
225 cpumask_t covered = CPU_MASK_NONE;
226 int j;
227
228 cpus_and(nodemask, nodemask, *cpu_map);
229 if (cpus_empty(nodemask))
230 continue;
231
232 domainspan = sched_domain_node_span(i);
233 cpus_and(domainspan, domainspan, *cpu_map);
234
235 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
236 sched_group_nodes[i] = sg;
237 for_each_cpu_mask(j, nodemask) {
238 struct sched_domain *sd;
239 sd = &per_cpu(node_domains, j);
240 sd->groups = sg;
241 if (sd->groups == NULL) {
242 /* Turn off balancing if we have no groups */
243 sd->flags = 0;
244 }
245 }
246 if (!sg) {
247 printk(KERN_WARNING
248 "Can not alloc domain group for node %d\n", i);
249 continue;
250 }
251 sg->cpu_power = 0;
252 sg->cpumask = nodemask;
253 cpus_or(covered, covered, nodemask);
254 prev = sg;
255
256 for (j = 0; j < MAX_NUMNODES; j++) {
257 cpumask_t tmp, notcovered;
258 int n = (i + j) % MAX_NUMNODES;
259
260 cpus_complement(notcovered, covered);
261 cpus_and(tmp, notcovered, *cpu_map);
262 cpus_and(tmp, tmp, domainspan);
263 if (cpus_empty(tmp))
264 break;
265
266 nodemask = node_to_cpumask(n);
267 cpus_and(tmp, tmp, nodemask);
268 if (cpus_empty(tmp))
269 continue;
270
271 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
272 if (!sg) {
273 printk(KERN_WARNING
274 "Can not alloc domain group for node %d\n", j);
275 break;
276 }
277 sg->cpu_power = 0;
278 sg->cpumask = tmp;
279 cpus_or(covered, covered, tmp);
280 prev->next = sg;
281 prev = sg;
282 }
283 prev->next = sched_group_nodes[i];
284 }
285#endif
286
287 /* Calculate CPU power for physical packages and nodes */
288 for_each_cpu_mask(i, *cpu_map) {
289 int power;
290 struct sched_domain *sd;
291#ifdef CONFIG_SCHED_SMT
292 sd = &per_cpu(cpu_domains, i);
293 power = SCHED_LOAD_SCALE;
294 sd->groups->cpu_power = power;
295#endif
296
297 sd = &per_cpu(phys_domains, i);
298 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
299 (cpus_weight(sd->groups->cpumask)-1) / 10;
300 sd->groups->cpu_power = power;
301
302#ifdef CONFIG_NUMA
303 sd = &per_cpu(allnodes_domains, i);
304 if (sd->groups) {
305 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
306 (cpus_weight(sd->groups->cpumask)-1) / 10;
307 sd->groups->cpu_power = power;
308 }
309#endif
310 }
311
312#ifdef CONFIG_NUMA
313 for (i = 0; i < MAX_NUMNODES; i++) {
314 struct sched_group *sg = sched_group_nodes[i];
315 int j;
316
317 if (sg == NULL)
318 continue;
319next_sg:
320 for_each_cpu_mask(j, sg->cpumask) {
321 struct sched_domain *sd;
322 int power;
323
324 sd = &per_cpu(phys_domains, j);
325 if (j != first_cpu(sd->groups->cpumask)) {
326 /*
327 * Only add "power" once for each
328 * physical package.
329 */
330 continue;
331 }
332 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
333 (cpus_weight(sd->groups->cpumask)-1) / 10;
334
335 sg->cpu_power += power;
336 }
337 sg = sg->next;
338 if (sg != sched_group_nodes[i])
339 goto next_sg;
340 }
341#endif
342
343 /* Attach the domains */
344 for_each_cpu_mask(i, *cpu_map) {
345 struct sched_domain *sd;
346#ifdef CONFIG_SCHED_SMT
347 sd = &per_cpu(cpu_domains, i);
348#else
349 sd = &per_cpu(phys_domains, i);
350#endif
351 cpu_attach_domain(sd, i);
352 }
353}
354/*
355 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
356 */
357void arch_init_sched_domains(const cpumask_t *cpu_map)
358{
359 cpumask_t cpu_default_map;
360
361 /*
362 * Setup mask for cpus without special case scheduling requirements.
363 * For now this just excludes isolated cpus, but could be used to
364 * exclude other special cases in the future.
365 */
366 cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
367
368 build_sched_domains(&cpu_default_map);
369}
370
371void arch_destroy_sched_domains(const cpumask_t *cpu_map)
372{
373#ifdef CONFIG_NUMA
374 int i;
375 for (i = 0; i < MAX_NUMNODES; i++) {
376 cpumask_t nodemask = node_to_cpumask(i);
377 struct sched_group *oldsg, *sg = sched_group_nodes[i];
378
379 cpus_and(nodemask, nodemask, *cpu_map);
380 if (cpus_empty(nodemask))
381 continue;
382
383 if (sg == NULL)
384 continue;
385 sg = sg->next;
386next_sg:
387 oldsg = sg;
388 sg = sg->next;
389 kfree(oldsg);
390 if (oldsg != sched_group_nodes[i])
391 goto next_sg;
392 sched_group_nodes[i] = NULL;
393 }
394#endif
395}
396
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 28f2aadc38d0..205d98028261 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -91,23 +91,8 @@ skip:
91} 91}
92 92
93#ifdef CONFIG_SMP 93#ifdef CONFIG_SMP
94/*
95 * This is updated when the user sets irq affinity via /proc
96 */
97static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
98static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
99
100static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; 94static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
101 95
102/*
103 * Arch specific routine for deferred write to iosapic rte to reprogram
104 * intr destination.
105 */
106void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
107{
108 pending_irq_cpumask[irq] = mask_val;
109}
110
111void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 96void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
112{ 97{
113 cpumask_t mask = CPU_MASK_NONE; 98 cpumask_t mask = CPU_MASK_NONE;
@@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
116 101
117 if (irq < NR_IRQS) { 102 if (irq < NR_IRQS) {
118 irq_affinity[irq] = mask; 103 irq_affinity[irq] = mask;
104 set_irq_info(irq, mask);
119 irq_redir[irq] = (char) (redir & 0xff); 105 irq_redir[irq] = (char) (redir & 0xff);
120 } 106 }
121} 107}
122
123
124void move_irq(int irq)
125{
126 /* note - we hold desc->lock */
127 cpumask_t tmp;
128 irq_desc_t *desc = irq_descp(irq);
129 int redir = test_bit(irq, pending_irq_redir);
130
131 if (unlikely(!desc->handler->set_affinity))
132 return;
133
134 if (!cpus_empty(pending_irq_cpumask[irq])) {
135 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
136 if (unlikely(!cpus_empty(tmp))) {
137 desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
138 pending_irq_cpumask[irq]);
139 }
140 cpus_clear(pending_irq_cpumask[irq]);
141 }
142}
143
144
145#endif /* CONFIG_SMP */ 108#endif /* CONFIG_SMP */
146 109
147#ifdef CONFIG_HOTPLUG_CPU 110#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index b7fa3ccd2b0f..2323377e3695 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -49,6 +49,7 @@
49 /* 49 /*
50 * void jprobe_break(void) 50 * void jprobe_break(void)
51 */ 51 */
52 .section .kprobes.text, "ax"
52ENTRY(jprobe_break) 53ENTRY(jprobe_break)
53 break.m 0x80300 54 break.m 0x80300
54END(jprobe_break) 55END(jprobe_break)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 884f5cd27d8a..471086b808a4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -87,12 +87,25 @@ static enum instruction_type bundle_encoding[32][3] = {
87 * is IP relative instruction and update the kprobe 87 * is IP relative instruction and update the kprobe
88 * inst flag accordingly 88 * inst flag accordingly
89 */ 89 */
90static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, 90static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
91 unsigned long kprobe_inst, struct kprobe *p) 91 uint major_opcode,
92 unsigned long kprobe_inst,
93 struct kprobe *p)
92{ 94{
93 p->ainsn.inst_flag = 0; 95 p->ainsn.inst_flag = 0;
94 p->ainsn.target_br_reg = 0; 96 p->ainsn.target_br_reg = 0;
95 97
98 /* Check for Break instruction
99 * Bits 37:40 Major opcode to be zero
100 * Bits 27:32 X6 to be zero
101 * Bits 32:35 X3 to be zero
102 */
103 if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
104 /* is a break instruction */
105 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
106 return;
107 }
108
96 if (bundle_encoding[template][slot] == B) { 109 if (bundle_encoding[template][slot] == B) {
97 switch (major_opcode) { 110 switch (major_opcode) {
98 case INDIRECT_CALL_OPCODE: 111 case INDIRECT_CALL_OPCODE:
@@ -126,8 +139,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode
126 * Returns 0 if supported 139 * Returns 0 if supported
127 * Returns -EINVAL if unsupported 140 * Returns -EINVAL if unsupported
128 */ 141 */
129static int unsupported_inst(uint template, uint slot, uint major_opcode, 142static int __kprobes unsupported_inst(uint template, uint slot,
130 unsigned long kprobe_inst, struct kprobe *p) 143 uint major_opcode,
144 unsigned long kprobe_inst,
145 struct kprobe *p)
131{ 146{
132 unsigned long addr = (unsigned long)p->addr; 147 unsigned long addr = (unsigned long)p->addr;
133 148
@@ -168,8 +183,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode,
168 * on which we are inserting kprobe is cmp instruction 183 * on which we are inserting kprobe is cmp instruction
169 * with ctype as unc. 184 * with ctype as unc.
170 */ 185 */
171static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, 186static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
172unsigned long kprobe_inst) 187 uint major_opcode,
188 unsigned long kprobe_inst)
173{ 189{
174 cmp_inst_t cmp_inst; 190 cmp_inst_t cmp_inst;
175 uint ctype_unc = 0; 191 uint ctype_unc = 0;
@@ -201,8 +217,10 @@ out:
201 * In this function we override the bundle with 217 * In this function we override the bundle with
202 * the break instruction at the given slot. 218 * the break instruction at the given slot.
203 */ 219 */
204static void prepare_break_inst(uint template, uint slot, uint major_opcode, 220static void __kprobes prepare_break_inst(uint template, uint slot,
205 unsigned long kprobe_inst, struct kprobe *p) 221 uint major_opcode,
222 unsigned long kprobe_inst,
223 struct kprobe *p)
206{ 224{
207 unsigned long break_inst = BREAK_INST; 225 unsigned long break_inst = BREAK_INST;
208 bundle_t *bundle = &p->ainsn.insn.bundle; 226 bundle_t *bundle = &p->ainsn.insn.bundle;
@@ -271,7 +289,8 @@ static inline int in_ivt_functions(unsigned long addr)
271 && addr < (unsigned long)__end_ivt_text); 289 && addr < (unsigned long)__end_ivt_text);
272} 290}
273 291
274static int valid_kprobe_addr(int template, int slot, unsigned long addr) 292static int __kprobes valid_kprobe_addr(int template, int slot,
293 unsigned long addr)
275{ 294{
276 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { 295 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
277 printk(KERN_WARNING "Attempting to insert unaligned kprobe " 296 printk(KERN_WARNING "Attempting to insert unaligned kprobe "
@@ -323,7 +342,7 @@ static void kretprobe_trampoline(void)
323 * - cleanup by marking the instance as unused 342 * - cleanup by marking the instance as unused
324 * - long jump back to the original return address 343 * - long jump back to the original return address
325 */ 344 */
326int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 345int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
327{ 346{
328 struct kretprobe_instance *ri = NULL; 347 struct kretprobe_instance *ri = NULL;
329 struct hlist_head *head; 348 struct hlist_head *head;
@@ -381,7 +400,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
381 return 1; 400 return 1;
382} 401}
383 402
384void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 403void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
404 struct pt_regs *regs)
385{ 405{
386 struct kretprobe_instance *ri; 406 struct kretprobe_instance *ri;
387 407
@@ -399,7 +419,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
399 } 419 }
400} 420}
401 421
402int arch_prepare_kprobe(struct kprobe *p) 422int __kprobes arch_prepare_kprobe(struct kprobe *p)
403{ 423{
404 unsigned long addr = (unsigned long) p->addr; 424 unsigned long addr = (unsigned long) p->addr;
405 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 425 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
@@ -430,7 +450,7 @@ int arch_prepare_kprobe(struct kprobe *p)
430 return 0; 450 return 0;
431} 451}
432 452
433void arch_arm_kprobe(struct kprobe *p) 453void __kprobes arch_arm_kprobe(struct kprobe *p)
434{ 454{
435 unsigned long addr = (unsigned long)p->addr; 455 unsigned long addr = (unsigned long)p->addr;
436 unsigned long arm_addr = addr & ~0xFULL; 456 unsigned long arm_addr = addr & ~0xFULL;
@@ -439,7 +459,7 @@ void arch_arm_kprobe(struct kprobe *p)
439 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 459 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
440} 460}
441 461
442void arch_disarm_kprobe(struct kprobe *p) 462void __kprobes arch_disarm_kprobe(struct kprobe *p)
443{ 463{
444 unsigned long addr = (unsigned long)p->addr; 464 unsigned long addr = (unsigned long)p->addr;
445 unsigned long arm_addr = addr & ~0xFULL; 465 unsigned long arm_addr = addr & ~0xFULL;
@@ -449,7 +469,7 @@ void arch_disarm_kprobe(struct kprobe *p)
449 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 469 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
450} 470}
451 471
452void arch_remove_kprobe(struct kprobe *p) 472void __kprobes arch_remove_kprobe(struct kprobe *p)
453{ 473{
454} 474}
455 475
@@ -461,7 +481,7 @@ void arch_remove_kprobe(struct kprobe *p)
461 * to original stack address, handle the case where we need to fixup the 481 * to original stack address, handle the case where we need to fixup the
462 * relative IP address and/or fixup branch register. 482 * relative IP address and/or fixup branch register.
463 */ 483 */
464static void resume_execution(struct kprobe *p, struct pt_regs *regs) 484static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
465{ 485{
466 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; 486 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
467 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 487 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
@@ -528,13 +548,16 @@ turn_ss_off:
528 ia64_psr(regs)->ss = 0; 548 ia64_psr(regs)->ss = 0;
529} 549}
530 550
531static void prepare_ss(struct kprobe *p, struct pt_regs *regs) 551static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
532{ 552{
533 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; 553 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
534 unsigned long slot = (unsigned long)p->addr & 0xf; 554 unsigned long slot = (unsigned long)p->addr & 0xf;
535 555
536 /* Update instruction pointer (IIP) and slot number (IPSR.ri) */ 556 /* single step inline if break instruction */
537 regs->cr_iip = bundle_addr & ~0xFULL; 557 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
558 regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
559 else
560 regs->cr_iip = bundle_addr & ~0xFULL;
538 561
539 if (slot > 2) 562 if (slot > 2)
540 slot = 0; 563 slot = 0;
@@ -545,7 +568,39 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
545 ia64_psr(regs)->ss = 1; 568 ia64_psr(regs)->ss = 1;
546} 569}
547 570
548static int pre_kprobes_handler(struct die_args *args) 571static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
572{
573 unsigned int slot = ia64_psr(regs)->ri;
574 unsigned int template, major_opcode;
575 unsigned long kprobe_inst;
576 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
577 bundle_t bundle;
578
579 memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
580 template = bundle.quad0.template;
581
582 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
583 if (slot == 1 && bundle_encoding[template][1] == L)
584 slot++;
585
586 /* Get Kprobe probe instruction at given slot*/
587 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
588
589 /* For break instruction,
590 * Bits 37:40 Major opcode to be zero
591 * Bits 27:32 X6 to be zero
592 * Bits 32:35 X3 to be zero
593 */
594 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
595 /* Not a break instruction */
596 return 0;
597 }
598
599 /* Is a break instruction */
600 return 1;
601}
602
603static int __kprobes pre_kprobes_handler(struct die_args *args)
549{ 604{
550 struct kprobe *p; 605 struct kprobe *p;
551 int ret = 0; 606 int ret = 0;
@@ -558,7 +613,9 @@ static int pre_kprobes_handler(struct die_args *args)
558 if (kprobe_running()) { 613 if (kprobe_running()) {
559 p = get_kprobe(addr); 614 p = get_kprobe(addr);
560 if (p) { 615 if (p) {
561 if (kprobe_status == KPROBE_HIT_SS) { 616 if ( (kprobe_status == KPROBE_HIT_SS) &&
617 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
618 ia64_psr(regs)->ss = 0;
562 unlock_kprobes(); 619 unlock_kprobes();
563 goto no_kprobe; 620 goto no_kprobe;
564 } 621 }
@@ -592,6 +649,19 @@ static int pre_kprobes_handler(struct die_args *args)
592 p = get_kprobe(addr); 649 p = get_kprobe(addr);
593 if (!p) { 650 if (!p) {
594 unlock_kprobes(); 651 unlock_kprobes();
652 if (!is_ia64_break_inst(regs)) {
653 /*
654 * The breakpoint instruction was removed right
655 * after we hit it. Another cpu has removed
656 * either a probepoint or a debugger breakpoint
657 * at this address. In either case, no further
658 * handling of this interrupt is appropriate.
659 */
660 ret = 1;
661
662 }
663
664 /* Not one of our break, let kernel handle it */
595 goto no_kprobe; 665 goto no_kprobe;
596 } 666 }
597 667
@@ -616,7 +686,7 @@ no_kprobe:
616 return ret; 686 return ret;
617} 687}
618 688
619static int post_kprobes_handler(struct pt_regs *regs) 689static int __kprobes post_kprobes_handler(struct pt_regs *regs)
620{ 690{
621 if (!kprobe_running()) 691 if (!kprobe_running())
622 return 0; 692 return 0;
@@ -641,7 +711,7 @@ out:
641 return 1; 711 return 1;
642} 712}
643 713
644static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) 714static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
645{ 715{
646 if (!kprobe_running()) 716 if (!kprobe_running())
647 return 0; 717 return 0;
@@ -659,8 +729,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
659 return 0; 729 return 0;
660} 730}
661 731
662int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 732int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
663 void *data) 733 unsigned long val, void *data)
664{ 734{
665 struct die_args *args = (struct die_args *)data; 735 struct die_args *args = (struct die_args *)data;
666 switch(val) { 736 switch(val) {
@@ -681,7 +751,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
681 return NOTIFY_DONE; 751 return NOTIFY_DONE;
682} 752}
683 753
684int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 754int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
685{ 755{
686 struct jprobe *jp = container_of(p, struct jprobe, kp); 756 struct jprobe *jp = container_of(p, struct jprobe, kp);
687 unsigned long addr = ((struct fnptr *)(jp->entry))->ip; 757 unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
@@ -703,7 +773,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
703 return 1; 773 return 1;
704} 774}
705 775
706int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 776int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
707{ 777{
708 *regs = jprobe_saved_regs; 778 *regs = jprobe_saved_regs;
709 return 1; 779 return 1;
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 4440c8343fa4..f970359e7edf 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -15,6 +15,7 @@
15#include <linux/vt_kern.h> /* For unblank_screen() */ 15#include <linux/vt_kern.h> /* For unblank_screen() */
16#include <linux/module.h> /* for EXPORT_SYMBOL */ 16#include <linux/module.h> /* for EXPORT_SYMBOL */
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/kprobes.h>
18 19
19#include <asm/fpswa.h> 20#include <asm/fpswa.h>
20#include <asm/ia32.h> 21#include <asm/ia32.h>
@@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err)
122} 123}
123 124
124void 125void
125ia64_bad_break (unsigned long break_num, struct pt_regs *regs) 126__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
126{ 127{
127 siginfo_t siginfo; 128 siginfo_t siginfo;
128 int sig, code; 129 int sig, code;
@@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
444 return rv; 445 return rv;
445} 446}
446 447
447void 448void __kprobes
448ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, 449ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
449 unsigned long iim, unsigned long itir, long arg5, long arg6, 450 unsigned long iim, unsigned long itir, long arg5, long arg6,
450 long arg7, struct pt_regs regs) 451 long arg7, struct pt_regs regs)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index a676e79e0681..30d8564e9603 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -48,6 +48,7 @@ SECTIONS
48 *(.text) 48 *(.text)
49 SCHED_TEXT 49 SCHED_TEXT
50 LOCK_TEXT 50 LOCK_TEXT
51 KPROBES_TEXT
51 *(.gnu.linkonce.t*) 52 *(.gnu.linkonce.t*)
52 } 53 }
53 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) 54 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 3e2cfa2c6d39..2a0d27f2f21b 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -20,6 +20,7 @@
20 * 20 *
21 * Note: "in0" and "in1" are preserved for debugging purposes. 21 * Note: "in0" and "in1" are preserved for debugging purposes.
22 */ 22 */
23 .section .kprobes.text,"ax"
23GLOBAL_ENTRY(flush_icache_range) 24GLOBAL_ENTRY(flush_icache_range)
24 25
25 .prologue 26 .prologue
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index ff62551eb3a1..24614869e866 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -9,6 +9,7 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/smp_lock.h> 10#include <linux/smp_lock.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/kprobes.h>
12 13
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
@@ -76,7 +77,7 @@ mapped_kernel_page_is_present (unsigned long address)
76 return pte_present(pte); 77 return pte_present(pte);
77} 78}
78 79
79void 80void __kprobes
80ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 81ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
81{ 82{
82 int signal = SIGSEGV, code = SEGV_MAPERR; 83 int signal = SIGSEGV, code = SEGV_MAPERR;
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 4564ed0b5ff3..906622d9f933 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -431,7 +431,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
431{ 431{
432 struct sysdata_el *element; 432 struct sysdata_el *element;
433 433
434 element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL); 434 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
435 if (!element) { 435 if (!element) {
436 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); 436 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
437 return; 437 return;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 254fe15c064b..b45db5133f55 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -191,7 +191,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num,
191{ 191{
192 struct cx_dev *cx_dev; 192 struct cx_dev *cx_dev;
193 193
194 cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL); 194 cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL);
195 DBG("cx_dev= 0x%p\n", cx_dev); 195 DBG("cx_dev= 0x%p\n", cx_dev);
196 if (cx_dev == NULL) 196 if (cx_dev == NULL)
197 return -ENOMEM; 197 return -ENOMEM;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index ea09c12f0258..19bced34d5f1 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -148,7 +148,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
148 tioca_kern->ca_pcigart_entries = 148 tioca_kern->ca_pcigart_entries =
149 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; 149 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
150 tioca_kern->ca_pcigart_pagemap = 150 tioca_kern->ca_pcigart_pagemap =
151 kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); 151 kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
152 if (!tioca_kern->ca_pcigart_pagemap) { 152 if (!tioca_kern->ca_pcigart_pagemap) {
153 free_pages((unsigned long)tioca_kern->ca_gart, 153 free_pages((unsigned long)tioca_kern->ca_gart,
154 get_order(tioca_kern->ca_gart_size)); 154 get_order(tioca_kern->ca_gart_size));
@@ -392,7 +392,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
392 * allocate a map struct 392 * allocate a map struct
393 */ 393 */
394 394
395 ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC); 395 ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
396 if (!ca_dmamap) 396 if (!ca_dmamap)
397 goto map_return; 397 goto map_return;
398 398
@@ -600,7 +600,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
600 * Allocate kernel bus soft and copy from prom. 600 * Allocate kernel bus soft and copy from prom.
601 */ 601 */
602 602
603 tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL); 603 tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
604 if (!tioca_common) 604 if (!tioca_common)
605 return NULL; 605 return NULL;
606 606
@@ -609,7 +609,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
609 609
610 /* init kernel-private area */ 610 /* init kernel-private area */
611 611
612 tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL); 612 tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
613 if (!tioca_kern) { 613 if (!tioca_kern) {
614 kfree(tioca_common); 614 kfree(tioca_common);
615 return NULL; 615 return NULL;