aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c14
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c49
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c29
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c2
-rw-r--r--arch/sh/kernel/irq.c37
-rw-r--r--arch/sh/kernel/irq_64.c16
-rw-r--r--arch/sh/kernel/ptrace_32.c45
-rw-r--r--arch/sh/kernel/ptrace_64.c25
-rw-r--r--arch/sh/kernel/setup.c4
12 files changed, 112 insertions, 119 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 97661061ff20..fac742e514ee 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -340,6 +340,8 @@ asmlinkage void __cpuinit cpu_init(void)
340 */ 340 */
341 current_cpu_data.asid_cache = NO_CONTEXT; 341 current_cpu_data.asid_cache = NO_CONTEXT;
342 342
343 current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
344
343 speculative_execution_init(); 345 speculative_execution_init();
344 expmask_init(); 346 expmask_init();
345 347
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index a351ed84eec5..32c825c9488e 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -51,16 +51,20 @@ static inline void set_interrupt_registers(int ip)
51 : "t"); 51 : "t");
52} 52}
53 53
54static void mask_imask_irq(unsigned int irq) 54static void mask_imask_irq(struct irq_data *data)
55{ 55{
56 unsigned int irq = data->irq;
57
56 clear_bit(irq, imask_mask); 58 clear_bit(irq, imask_mask);
57 if (interrupt_priority < IMASK_PRIORITY - irq) 59 if (interrupt_priority < IMASK_PRIORITY - irq)
58 interrupt_priority = IMASK_PRIORITY - irq; 60 interrupt_priority = IMASK_PRIORITY - irq;
59 set_interrupt_registers(interrupt_priority); 61 set_interrupt_registers(interrupt_priority);
60} 62}
61 63
62static void unmask_imask_irq(unsigned int irq) 64static void unmask_imask_irq(struct irq_data *data)
63{ 65{
66 unsigned int irq = data->irq;
67
64 set_bit(irq, imask_mask); 68 set_bit(irq, imask_mask);
65 interrupt_priority = IMASK_PRIORITY - 69 interrupt_priority = IMASK_PRIORITY -
66 find_first_zero_bit(imask_mask, IMASK_PRIORITY); 70 find_first_zero_bit(imask_mask, IMASK_PRIORITY);
@@ -69,9 +73,9 @@ static void unmask_imask_irq(unsigned int irq)
69 73
70static struct irq_chip imask_irq_chip = { 74static struct irq_chip imask_irq_chip = {
71 .name = "SR.IMASK", 75 .name = "SR.IMASK",
72 .mask = mask_imask_irq, 76 .irq_mask = mask_imask_irq,
73 .unmask = unmask_imask_irq, 77 .irq_unmask = unmask_imask_irq,
74 .mask_ack = mask_imask_irq, 78 .irq_mask_ack = mask_imask_irq,
75}; 79};
76 80
77void make_imask_irq(unsigned int irq) 81void make_imask_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 96a239583948..5af48f8357e5 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -76,39 +76,11 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = {
76}; 76};
77 77
78static unsigned long intc_virt; 78static unsigned long intc_virt;
79
80static unsigned int startup_intc_irq(unsigned int irq);
81static void shutdown_intc_irq(unsigned int irq);
82static void enable_intc_irq(unsigned int irq);
83static void disable_intc_irq(unsigned int irq);
84static void mask_and_ack_intc(unsigned int);
85static void end_intc_irq(unsigned int irq);
86
87static struct irq_chip intc_irq_type = {
88 .name = "INTC",
89 .startup = startup_intc_irq,
90 .shutdown = shutdown_intc_irq,
91 .enable = enable_intc_irq,
92 .disable = disable_intc_irq,
93 .ack = mask_and_ack_intc,
94 .end = end_intc_irq
95};
96
97static int irlm; /* IRL mode */ 79static int irlm; /* IRL mode */
98 80
99static unsigned int startup_intc_irq(unsigned int irq) 81static void enable_intc_irq(struct irq_data *data)
100{
101 enable_intc_irq(irq);
102 return 0; /* never anything pending */
103}
104
105static void shutdown_intc_irq(unsigned int irq)
106{
107 disable_intc_irq(irq);
108}
109
110static void enable_intc_irq(unsigned int irq)
111{ 82{
83 unsigned int irq = data->irq;
112 unsigned long reg; 84 unsigned long reg;
113 unsigned long bitmask; 85 unsigned long bitmask;
114 86
@@ -126,8 +98,9 @@ static void enable_intc_irq(unsigned int irq)
126 __raw_writel(bitmask, reg); 98 __raw_writel(bitmask, reg);
127} 99}
128 100
129static void disable_intc_irq(unsigned int irq) 101static void disable_intc_irq(struct irq_data *data)
130{ 102{
103 unsigned int irq = data->irq;
131 unsigned long reg; 104 unsigned long reg;
132 unsigned long bitmask; 105 unsigned long bitmask;
133 106
@@ -142,15 +115,11 @@ static void disable_intc_irq(unsigned int irq)
142 __raw_writel(bitmask, reg); 115 __raw_writel(bitmask, reg);
143} 116}
144 117
145static void mask_and_ack_intc(unsigned int irq) 118static struct irq_chip intc_irq_type = {
146{ 119 .name = "INTC",
147 disable_intc_irq(irq); 120 .irq_enable = enable_intc_irq,
148} 121 .irq_disable = disable_intc_irq,
149 122};
150static void end_intc_irq(unsigned int irq)
151{
152 enable_intc_irq(irq);
153}
154 123
155void __init plat_irq_setup(void) 124void __init plat_irq_setup(void)
156{ 125{
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 9282d965a1b6..7516c35ee514 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -24,25 +24,25 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/topology.h> 25#include <linux/topology.h>
26 26
27static inline struct ipr_desc *get_ipr_desc(unsigned int irq) 27static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
28{ 28{
29 struct irq_chip *chip = get_irq_chip(irq); 29 struct irq_chip *chip = irq_data_get_irq_chip(data);
30 return container_of(chip, struct ipr_desc, chip); 30 return container_of(chip, struct ipr_desc, chip);
31} 31}
32 32
33static void disable_ipr_irq(unsigned int irq) 33static void disable_ipr_irq(struct irq_data *data)
34{ 34{
35 struct ipr_data *p = get_irq_chip_data(irq); 35 struct ipr_data *p = irq_data_get_irq_chip_data(data);
36 unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; 36 unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
37 /* Set the priority in IPR to 0 */ 37 /* Set the priority in IPR to 0 */
38 __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr); 38 __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
39 (void)__raw_readw(addr); /* Read back to flush write posting */ 39 (void)__raw_readw(addr); /* Read back to flush write posting */
40} 40}
41 41
42static void enable_ipr_irq(unsigned int irq) 42static void enable_ipr_irq(struct irq_data *data)
43{ 43{
44 struct ipr_data *p = get_irq_chip_data(irq); 44 struct ipr_data *p = irq_data_get_irq_chip_data(data);
45 unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; 45 unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
46 /* Set priority in IPR back to original value */ 46 /* Set priority in IPR back to original value */
47 __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr); 47 __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
48} 48}
@@ -56,19 +56,18 @@ void register_ipr_controller(struct ipr_desc *desc)
56{ 56{
57 int i; 57 int i;
58 58
59 desc->chip.mask = disable_ipr_irq; 59 desc->chip.irq_mask = disable_ipr_irq;
60 desc->chip.unmask = enable_ipr_irq; 60 desc->chip.irq_unmask = enable_ipr_irq;
61 desc->chip.mask_ack = disable_ipr_irq;
62 61
63 for (i = 0; i < desc->nr_irqs; i++) { 62 for (i = 0; i < desc->nr_irqs; i++) {
64 struct ipr_data *p = desc->ipr_data + i; 63 struct ipr_data *p = desc->ipr_data + i;
65 struct irq_desc *irq_desc; 64 int res;
66 65
67 BUG_ON(p->ipr_idx >= desc->nr_offsets); 66 BUG_ON(p->ipr_idx >= desc->nr_offsets);
68 BUG_ON(!desc->ipr_offsets[p->ipr_idx]); 67 BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
69 68
70 irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id()); 69 res = irq_alloc_desc_at(p->irq, numa_node_id());
71 if (unlikely(!irq_desc)) { 70 if (unlikely(res != p->irq && res != -EEXIST)) {
72 printk(KERN_INFO "can not get irq_desc for %d\n", 71 printk(KERN_INFO "can not get irq_desc for %d\n",
73 p->irq); 72 p->irq);
74 continue; 73 continue;
@@ -78,7 +77,7 @@ void register_ipr_controller(struct ipr_desc *desc)
78 set_irq_chip_and_handler_name(p->irq, &desc->chip, 77 set_irq_chip_and_handler_name(p->irq, &desc->chip,
79 handle_level_irq, "level"); 78 handle_level_irq, "level");
80 set_irq_chip_data(p->irq, p); 79 set_irq_chip_data(p->irq, p);
81 disable_ipr_irq(p->irq); 80 disable_ipr_irq(irq_get_irq_data(p->irq));
82 } 81 }
83} 82}
84EXPORT_SYMBOL(register_ipr_controller); 83EXPORT_SYMBOL(register_ipr_controller);
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
index 7f9ecc9c2d02..dbf3b4bb71fe 100644
--- a/arch/sh/kernel/cpu/sh4/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -225,7 +225,7 @@ static void sh7750_pmu_enable_all(void)
225} 225}
226 226
227static struct sh_pmu sh7750_pmu = { 227static struct sh_pmu sh7750_pmu = {
228 .name = "SH7750", 228 .name = "sh7750",
229 .num_events = 2, 229 .num_events = 2,
230 .event_map = sh7750_event_map, 230 .event_map = sh7750_event_map,
231 .max_events = ARRAY_SIZE(sh7750_general_events), 231 .max_events = ARRAY_SIZE(sh7750_general_events),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2d9700c6b53a..0fe2e9329cb2 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -48,7 +48,7 @@ static struct clk r_clk = {
48 * Default rate for the root input clock, reset this with clk_set_rate() 48 * Default rate for the root input clock, reset this with clk_set_rate()
49 * from the platform code. 49 * from the platform code.
50 */ 50 */
51struct clk extal_clk = { 51static struct clk extal_clk = {
52 .rate = 33333333, 52 .rate = 33333333,
53}; 53};
54 54
@@ -111,7 +111,7 @@ static struct clk div3_clk = {
111 .parent = &pll_clk, 111 .parent = &pll_clk,
112}; 112};
113 113
114struct clk *main_clks[] = { 114static struct clk *main_clks[] = {
115 &r_clk, 115 &r_clk,
116 &extal_clk, 116 &extal_clk,
117 &fll_clk, 117 &fll_clk,
@@ -156,7 +156,7 @@ struct clk div4_clks[DIV4_NR] = {
156 156
157enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR }; 157enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
158 158
159struct clk div6_clks[DIV6_NR] = { 159static struct clk div6_clks[DIV6_NR] = {
160 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0), 160 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
161 [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0), 161 [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
162 [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0), 162 [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
index b8b873d8d6b5..580276525731 100644
--- a/arch/sh/kernel/cpu/sh4a/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -259,7 +259,7 @@ static void sh4a_pmu_enable_all(void)
259} 259}
260 260
261static struct sh_pmu sh4a_pmu = { 261static struct sh_pmu sh4a_pmu = {
262 .name = "SH-4A", 262 .name = "sh4a",
263 .num_events = 2, 263 .num_events = 2,
264 .event_map = sh4a_event_map, 264 .event_map = sh4a_event_map,
265 .max_events = ARRAY_SIZE(sh4a_general_events), 265 .max_events = ARRAY_SIZE(sh4a_general_events),
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 9dc447db8a44..68ecbe6c881a 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -56,6 +56,8 @@ int show_interrupts(struct seq_file *p, void *v)
56 int i = *(loff_t *)v, j, prec; 56 int i = *(loff_t *)v, j, prec;
57 struct irqaction *action; 57 struct irqaction *action;
58 struct irq_desc *desc; 58 struct irq_desc *desc;
59 struct irq_data *data;
60 struct irq_chip *chip;
59 61
60 if (i > nr_irqs) 62 if (i > nr_irqs)
61 return 0; 63 return 0;
@@ -77,6 +79,9 @@ int show_interrupts(struct seq_file *p, void *v)
77 if (!desc) 79 if (!desc)
78 return 0; 80 return 0;
79 81
82 data = irq_get_irq_data(i);
83 chip = irq_data_get_irq_chip(data);
84
80 raw_spin_lock_irqsave(&desc->lock, flags); 85 raw_spin_lock_irqsave(&desc->lock, flags);
81 for_each_online_cpu(j) 86 for_each_online_cpu(j)
82 any_count |= kstat_irqs_cpu(i, j); 87 any_count |= kstat_irqs_cpu(i, j);
@@ -87,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
87 seq_printf(p, "%*d: ", prec, i); 92 seq_printf(p, "%*d: ", prec, i);
88 for_each_online_cpu(j) 93 for_each_online_cpu(j)
89 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 94 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
90 seq_printf(p, " %14s", desc->chip->name); 95 seq_printf(p, " %14s", chip->name);
91 seq_printf(p, "-%-8s", desc->name); 96 seq_printf(p, "-%-8s", desc->name);
92 97
93 if (action) { 98 if (action) {
@@ -273,12 +278,6 @@ void __init init_IRQ(void)
273{ 278{
274 plat_irq_setup(); 279 plat_irq_setup();
275 280
276 /*
277 * Pin any of the legacy IRQ vectors that haven't already been
278 * grabbed by the platform
279 */
280 reserve_irq_legacy();
281
282 /* Perform the machine specific initialisation */ 281 /* Perform the machine specific initialisation */
283 if (sh_mv.mv_init_irq) 282 if (sh_mv.mv_init_irq)
284 sh_mv.mv_init_irq(); 283 sh_mv.mv_init_irq();
@@ -297,13 +296,16 @@ int __init arch_probe_nr_irqs(void)
297#endif 296#endif
298 297
299#ifdef CONFIG_HOTPLUG_CPU 298#ifdef CONFIG_HOTPLUG_CPU
300static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 299static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
301{ 300{
301 struct irq_desc *desc = irq_to_desc(irq);
302 struct irq_chip *chip = irq_data_get_irq_chip(data);
303
302 printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", 304 printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
303 irq, desc->node, cpu); 305 irq, data->node, cpu);
304 306
305 raw_spin_lock_irq(&desc->lock); 307 raw_spin_lock_irq(&desc->lock);
306 desc->chip->set_affinity(irq, cpumask_of(cpu)); 308 chip->irq_set_affinity(data, cpumask_of(cpu), false);
307 raw_spin_unlock_irq(&desc->lock); 309 raw_spin_unlock_irq(&desc->lock);
308} 310}
309 311
@@ -314,24 +316,25 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
314 */ 316 */
315void migrate_irqs(void) 317void migrate_irqs(void)
316{ 318{
317 struct irq_desc *desc;
318 unsigned int irq, cpu = smp_processor_id(); 319 unsigned int irq, cpu = smp_processor_id();
319 320
320 for_each_irq_desc(irq, desc) { 321 for_each_active_irq(irq) {
321 if (desc->node == cpu) { 322 struct irq_data *data = irq_get_irq_data(irq);
322 unsigned int newcpu = cpumask_any_and(desc->affinity, 323
324 if (data->node == cpu) {
325 unsigned int newcpu = cpumask_any_and(data->affinity,
323 cpu_online_mask); 326 cpu_online_mask);
324 if (newcpu >= nr_cpu_ids) { 327 if (newcpu >= nr_cpu_ids) {
325 if (printk_ratelimit()) 328 if (printk_ratelimit())
326 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", 329 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
327 irq, cpu); 330 irq, cpu);
328 331
329 cpumask_setall(desc->affinity); 332 cpumask_setall(data->affinity);
330 newcpu = cpumask_any_and(desc->affinity, 333 newcpu = cpumask_any_and(data->affinity,
331 cpu_online_mask); 334 cpu_online_mask);
332 } 335 }
333 336
334 route_irq(desc, irq, newcpu); 337 route_irq(data, irq, newcpu);
335 } 338 }
336 } 339 }
337} 340}
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
index 32365ba0e039..8fc05b997b6d 100644
--- a/arch/sh/kernel/irq_64.c
+++ b/arch/sh/kernel/irq_64.c
@@ -11,17 +11,17 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <cpu/registers.h> 12#include <cpu/registers.h>
13 13
14void notrace raw_local_irq_restore(unsigned long flags) 14void notrace arch_local_irq_restore(unsigned long flags)
15{ 15{
16 unsigned long long __dummy; 16 unsigned long long __dummy;
17 17
18 if (flags == RAW_IRQ_DISABLED) { 18 if (flags == ARCH_IRQ_DISABLED) {
19 __asm__ __volatile__ ( 19 __asm__ __volatile__ (
20 "getcon " __SR ", %0\n\t" 20 "getcon " __SR ", %0\n\t"
21 "or %0, %1, %0\n\t" 21 "or %0, %1, %0\n\t"
22 "putcon %0, " __SR "\n\t" 22 "putcon %0, " __SR "\n\t"
23 : "=&r" (__dummy) 23 : "=&r" (__dummy)
24 : "r" (RAW_IRQ_DISABLED) 24 : "r" (ARCH_IRQ_DISABLED)
25 ); 25 );
26 } else { 26 } else {
27 __asm__ __volatile__ ( 27 __asm__ __volatile__ (
@@ -29,13 +29,13 @@ void notrace raw_local_irq_restore(unsigned long flags)
29 "and %0, %1, %0\n\t" 29 "and %0, %1, %0\n\t"
30 "putcon %0, " __SR "\n\t" 30 "putcon %0, " __SR "\n\t"
31 : "=&r" (__dummy) 31 : "=&r" (__dummy)
32 : "r" (~RAW_IRQ_DISABLED) 32 : "r" (~ARCH_IRQ_DISABLED)
33 ); 33 );
34 } 34 }
35} 35}
36EXPORT_SYMBOL(raw_local_irq_restore); 36EXPORT_SYMBOL(arch_local_irq_restore);
37 37
38unsigned long notrace __raw_local_save_flags(void) 38unsigned long notrace arch_local_save_flags(void)
39{ 39{
40 unsigned long flags; 40 unsigned long flags;
41 41
@@ -43,9 +43,9 @@ unsigned long notrace __raw_local_save_flags(void)
43 "getcon " __SR ", %0\n\t" 43 "getcon " __SR ", %0\n\t"
44 "and %0, %1, %0" 44 "and %0, %1, %0"
45 : "=&r" (flags) 45 : "=&r" (flags)
46 : "r" (RAW_IRQ_DISABLED) 46 : "r" (ARCH_IRQ_DISABLED)
47 ); 47 );
48 48
49 return flags; 49 return flags;
50} 50}
51EXPORT_SYMBOL(__raw_local_save_flags); 51EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 2cd42b58cb20..90a15d29feeb 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -365,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
365 return &user_sh_native_view; 365 return &user_sh_native_view;
366} 366}
367 367
368long arch_ptrace(struct task_struct *child, long request, long addr, long data) 368long arch_ptrace(struct task_struct *child, long request,
369 unsigned long addr, unsigned long data)
369{ 370{
370 struct user * dummy = NULL;
371 unsigned long __user *datap = (unsigned long __user *)data; 371 unsigned long __user *datap = (unsigned long __user *)data;
372 int ret; 372 int ret;
373 373
@@ -383,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
383 383
384 if (addr < sizeof(struct pt_regs)) 384 if (addr < sizeof(struct pt_regs))
385 tmp = get_stack_long(child, addr); 385 tmp = get_stack_long(child, addr);
386 else if (addr >= (long) &dummy->fpu && 386 else if (addr >= offsetof(struct user, fpu) &&
387 addr < (long) &dummy->u_fpvalid) { 387 addr < offsetof(struct user, u_fpvalid)) {
388 if (!tsk_used_math(child)) { 388 if (!tsk_used_math(child)) {
389 if (addr == (long)&dummy->fpu.fpscr) 389 if (addr == offsetof(struct user, fpu.fpscr))
390 tmp = FPSCR_INIT; 390 tmp = FPSCR_INIT;
391 else 391 else
392 tmp = 0; 392 tmp = 0;
393 } else 393 } else {
394 tmp = ((long *)child->thread.xstate) 394 unsigned long index;
395 [(addr - (long)&dummy->fpu) >> 2]; 395 index = addr - offsetof(struct user, fpu);
396 } else if (addr == (long) &dummy->u_fpvalid) 396 tmp = ((unsigned long *)child->thread.xstate)
397 [index >> 2];
398 }
399 } else if (addr == offsetof(struct user, u_fpvalid))
397 tmp = !!tsk_used_math(child); 400 tmp = !!tsk_used_math(child);
398 else if (addr == PT_TEXT_ADDR) 401 else if (addr == PT_TEXT_ADDR)
399 tmp = child->mm->start_code; 402 tmp = child->mm->start_code;
@@ -417,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
417 420
418 if (addr < sizeof(struct pt_regs)) 421 if (addr < sizeof(struct pt_regs))
419 ret = put_stack_long(child, addr, data); 422 ret = put_stack_long(child, addr, data);
420 else if (addr >= (long) &dummy->fpu && 423 else if (addr >= offsetof(struct user, fpu) &&
421 addr < (long) &dummy->u_fpvalid) { 424 addr < offsetof(struct user, u_fpvalid)) {
425 unsigned long index;
426 index = addr - offsetof(struct user, fpu);
422 set_stopped_child_used_math(child); 427 set_stopped_child_used_math(child);
423 ((long *)child->thread.xstate) 428 ((unsigned long *)child->thread.xstate)
424 [(addr - (long)&dummy->fpu) >> 2] = data; 429 [index >> 2] = data;
425 ret = 0; 430 ret = 0;
426 } else if (addr == (long) &dummy->u_fpvalid) { 431 } else if (addr == offsetof(struct user, u_fpvalid)) {
427 conditional_stopped_child_used_math(data, child); 432 conditional_stopped_child_used_math(data, child);
428 ret = 0; 433 ret = 0;
429 } 434 }
@@ -433,35 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
433 return copy_regset_to_user(child, &user_sh_native_view, 438 return copy_regset_to_user(child, &user_sh_native_view,
434 REGSET_GENERAL, 439 REGSET_GENERAL,
435 0, sizeof(struct pt_regs), 440 0, sizeof(struct pt_regs),
436 (void __user *)data); 441 datap);
437 case PTRACE_SETREGS: 442 case PTRACE_SETREGS:
438 return copy_regset_from_user(child, &user_sh_native_view, 443 return copy_regset_from_user(child, &user_sh_native_view,
439 REGSET_GENERAL, 444 REGSET_GENERAL,
440 0, sizeof(struct pt_regs), 445 0, sizeof(struct pt_regs),
441 (const void __user *)data); 446 datap);
442#ifdef CONFIG_SH_FPU 447#ifdef CONFIG_SH_FPU
443 case PTRACE_GETFPREGS: 448 case PTRACE_GETFPREGS:
444 return copy_regset_to_user(child, &user_sh_native_view, 449 return copy_regset_to_user(child, &user_sh_native_view,
445 REGSET_FPU, 450 REGSET_FPU,
446 0, sizeof(struct user_fpu_struct), 451 0, sizeof(struct user_fpu_struct),
447 (void __user *)data); 452 datap);
448 case PTRACE_SETFPREGS: 453 case PTRACE_SETFPREGS:
449 return copy_regset_from_user(child, &user_sh_native_view, 454 return copy_regset_from_user(child, &user_sh_native_view,
450 REGSET_FPU, 455 REGSET_FPU,
451 0, sizeof(struct user_fpu_struct), 456 0, sizeof(struct user_fpu_struct),
452 (const void __user *)data); 457 datap);
453#endif 458#endif
454#ifdef CONFIG_SH_DSP 459#ifdef CONFIG_SH_DSP
455 case PTRACE_GETDSPREGS: 460 case PTRACE_GETDSPREGS:
456 return copy_regset_to_user(child, &user_sh_native_view, 461 return copy_regset_to_user(child, &user_sh_native_view,
457 REGSET_DSP, 462 REGSET_DSP,
458 0, sizeof(struct pt_dspregs), 463 0, sizeof(struct pt_dspregs),
459 (void __user *)data); 464 datap);
460 case PTRACE_SETDSPREGS: 465 case PTRACE_SETDSPREGS:
461 return copy_regset_from_user(child, &user_sh_native_view, 466 return copy_regset_from_user(child, &user_sh_native_view,
462 REGSET_DSP, 467 REGSET_DSP,
463 0, sizeof(struct pt_dspregs), 468 0, sizeof(struct pt_dspregs),
464 (const void __user *)data); 469 datap);
465#endif 470#endif
466 default: 471 default:
467 ret = ptrace_request(child, request, addr, data); 472 ret = ptrace_request(child, request, addr, data);
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index e0fb065914aa..4436eacddb15 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -383,9 +383,11 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
383 return &user_sh64_native_view; 383 return &user_sh64_native_view;
384} 384}
385 385
386long arch_ptrace(struct task_struct *child, long request, long addr, long data) 386long arch_ptrace(struct task_struct *child, long request,
387 unsigned long addr, unsigned long data)
387{ 388{
388 int ret; 389 int ret;
390 unsigned long __user *datap = (unsigned long __user *) data;
389 391
390 switch (request) { 392 switch (request) {
391 /* read the word at location addr in the USER area. */ 393 /* read the word at location addr in the USER area. */
@@ -400,13 +402,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
400 tmp = get_stack_long(child, addr); 402 tmp = get_stack_long(child, addr);
401 else if ((addr >= offsetof(struct user, fpu)) && 403 else if ((addr >= offsetof(struct user, fpu)) &&
402 (addr < offsetof(struct user, u_fpvalid))) { 404 (addr < offsetof(struct user, u_fpvalid))) {
403 tmp = get_fpu_long(child, addr - offsetof(struct user, fpu)); 405 unsigned long index;
406 index = addr - offsetof(struct user, fpu);
407 tmp = get_fpu_long(child, index);
404 } else if (addr == offsetof(struct user, u_fpvalid)) { 408 } else if (addr == offsetof(struct user, u_fpvalid)) {
405 tmp = !!tsk_used_math(child); 409 tmp = !!tsk_used_math(child);
406 } else { 410 } else {
407 break; 411 break;
408 } 412 }
409 ret = put_user(tmp, (unsigned long *)data); 413 ret = put_user(tmp, datap);
410 break; 414 break;
411 } 415 }
412 416
@@ -437,7 +441,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
437 } 441 }
438 else if ((addr >= offsetof(struct user, fpu)) && 442 else if ((addr >= offsetof(struct user, fpu)) &&
439 (addr < offsetof(struct user, u_fpvalid))) { 443 (addr < offsetof(struct user, u_fpvalid))) {
440 ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data); 444 unsigned long index;
445 index = addr - offsetof(struct user, fpu);
446 ret = put_fpu_long(child, index, data);
441 } 447 }
442 break; 448 break;
443 449
@@ -445,23 +451,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
445 return copy_regset_to_user(child, &user_sh64_native_view, 451 return copy_regset_to_user(child, &user_sh64_native_view,
446 REGSET_GENERAL, 452 REGSET_GENERAL,
447 0, sizeof(struct pt_regs), 453 0, sizeof(struct pt_regs),
448 (void __user *)data); 454 datap);
449 case PTRACE_SETREGS: 455 case PTRACE_SETREGS:
450 return copy_regset_from_user(child, &user_sh64_native_view, 456 return copy_regset_from_user(child, &user_sh64_native_view,
451 REGSET_GENERAL, 457 REGSET_GENERAL,
452 0, sizeof(struct pt_regs), 458 0, sizeof(struct pt_regs),
453 (const void __user *)data); 459 datap);
454#ifdef CONFIG_SH_FPU 460#ifdef CONFIG_SH_FPU
455 case PTRACE_GETFPREGS: 461 case PTRACE_GETFPREGS:
456 return copy_regset_to_user(child, &user_sh64_native_view, 462 return copy_regset_to_user(child, &user_sh64_native_view,
457 REGSET_FPU, 463 REGSET_FPU,
458 0, sizeof(struct user_fpu_struct), 464 0, sizeof(struct user_fpu_struct),
459 (void __user *)data); 465 datap);
460 case PTRACE_SETFPREGS: 466 case PTRACE_SETFPREGS:
461 return copy_regset_from_user(child, &user_sh64_native_view, 467 return copy_regset_from_user(child, &user_sh64_native_view,
462 REGSET_FPU, 468 REGSET_FPU,
463 0, sizeof(struct user_fpu_struct), 469 0, sizeof(struct user_fpu_struct),
464 (const void __user *)data); 470 datap);
465#endif 471#endif
466 default: 472 default:
467 ret = ptrace_request(child, request, addr, data); 473 ret = ptrace_request(child, request, addr, data);
@@ -471,7 +477,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
471 return ret; 477 return ret;
472} 478}
473 479
474asmlinkage int sh64_ptrace(long request, long pid, long addr, long data) 480asmlinkage int sh64_ptrace(long request, long pid,
481 unsigned long addr, unsigned long data)
475{ 482{
476#define WPC_DBRMODE 0x0d104008 483#define WPC_DBRMODE 0x0d104008
477 static unsigned long first_call; 484 static unsigned long first_call;
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 4e278467f76c..d6b018c7ebdc 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -41,6 +41,7 @@
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/mmu_context.h> 42#include <asm/mmu_context.h>
43#include <asm/mmzone.h> 43#include <asm/mmzone.h>
44#include <asm/sparsemem.h>
44 45
45/* 46/*
46 * Initialize loops_per_jiffy as 10000000 (1000MIPS). 47 * Initialize loops_per_jiffy as 10000000 (1000MIPS).
@@ -52,6 +53,7 @@ struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
52 .type = CPU_SH_NONE, 53 .type = CPU_SH_NONE,
53 .family = CPU_FAMILY_UNKNOWN, 54 .family = CPU_FAMILY_UNKNOWN,
54 .loops_per_jiffy = 10000000, 55 .loops_per_jiffy = 10000000,
56 .phys_bits = MAX_PHYSMEM_BITS,
55 }, 57 },
56}; 58};
57EXPORT_SYMBOL(cpu_data); 59EXPORT_SYMBOL(cpu_data);
@@ -432,6 +434,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
432 if (c->flags & CPU_HAS_L2_CACHE) 434 if (c->flags & CPU_HAS_L2_CACHE)
433 show_cacheinfo(m, "scache", c->scache); 435 show_cacheinfo(m, "scache", c->scache);
434 436
437 seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
438
435 seq_printf(m, "bogomips\t: %lu.%02lu\n", 439 seq_printf(m, "bogomips\t: %lu.%02lu\n",
436 c->loops_per_jiffy/(500000/HZ), 440 c->loops_per_jiffy/(500000/HZ),
437 (c->loops_per_jiffy/(5000/HZ)) % 100); 441 (c->loops_per_jiffy/(5000/HZ)) % 100);