aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r--arch/m32r/kernel/entry.S7
-rw-r--r--arch/m32r/kernel/head.S4
-rw-r--r--arch/m32r/kernel/init_task.c5
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c6
-rw-r--r--arch/m32r/kernel/ptrace.c5
-rw-r--r--arch/m32r/kernel/smp.c33
-rw-r--r--arch/m32r/kernel/smpboot.c4
-rw-r--r--arch/m32r/kernel/time.c89
-rw-r--r--arch/m32r/kernel/traps.c4
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S78
10 files changed, 59 insertions, 176 deletions
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 612d35b082a6..403869833b98 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -118,6 +118,13 @@
118#define resume_kernel restore_all 118#define resume_kernel restore_all
119#endif 119#endif
120 120
121/* how to get the thread information struct from ASM */
122#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg
123 .macro GET_THREAD_INFO reg
124 ldi \reg, #-THREAD_SIZE
125 and \reg, sp
126 .endm
127
121ENTRY(ret_from_fork) 128ENTRY(ret_from_fork)
122 pop r0 129 pop r0
123 bl schedule_tail 130 bl schedule_tail
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
index 0a7194439eb1..a46652dd83e6 100644
--- a/arch/m32r/kernel/head.S
+++ b/arch/m32r/kernel/head.S
@@ -268,13 +268,13 @@ ENTRY(empty_zero_page)
268/*------------------------------------------------------------------------ 268/*------------------------------------------------------------------------
269 * Stack area 269 * Stack area
270 */ 270 */
271 .section .spi 271 .section .init.data, "aw"
272 ALIGN 272 ALIGN
273 .global spi_stack_top 273 .global spi_stack_top
274 .zero 1024 274 .zero 1024
275spi_stack_top: 275spi_stack_top:
276 276
277 .section .spu 277 .section .init.data, "aw"
278 ALIGN 278 ALIGN
279 .global spu_stack_top 279 .global spu_stack_top
280 .zero 1024 280 .zero 1024
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c
index fce57e5d3f91..6c42d5f8df50 100644
--- a/arch/m32r/kernel/init_task.c
+++ b/arch/m32r/kernel/init_task.c
@@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
20 * way process stacks are handled. This is done by having a special 20 * way process stacks are handled. This is done by having a special
21 * "init_task" linker map entry.. 21 * "init_task" linker map entry..
22 */ 22 */
23union thread_union init_thread_union 23union thread_union init_thread_union __init_task_data =
24 __attribute__((__section__(".data.init_task"))) = 24 { INIT_THREAD_INFO(init_task) };
25 { INIT_THREAD_INFO(init_task) };
26 25
27/* 26/*
28 * Initial task structure. 27 * Initial task structure.
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 22624b51d4d3..700570747a90 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -23,12 +23,6 @@ EXPORT_SYMBOL(__ioremap);
23EXPORT_SYMBOL(iounmap); 23EXPORT_SYMBOL(iounmap);
24EXPORT_SYMBOL(kernel_thread); 24EXPORT_SYMBOL(kernel_thread);
25 25
26/* Networking helper routines. */
27/* Delay loops */
28EXPORT_SYMBOL(__udelay);
29EXPORT_SYMBOL(__delay);
30EXPORT_SYMBOL(__const_udelay);
31
32EXPORT_SYMBOL(strncpy_from_user); 26EXPORT_SYMBOL(strncpy_from_user);
33EXPORT_SYMBOL(__strncpy_from_user); 27EXPORT_SYMBOL(__strncpy_from_user);
34EXPORT_SYMBOL(clear_user); 28EXPORT_SYMBOL(clear_user);
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 98b8feb12ed8..98682bba0ed9 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
77 struct user * dummy = NULL; 77 struct user * dummy = NULL;
78#endif 78#endif
79 79
80 if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3)) 80 if ((off & 3) || off > sizeof(struct user) - 3)
81 return -EIO; 81 return -EIO;
82 82
83 off >>= 2; 83 off >>= 2;
@@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
139 struct user * dummy = NULL; 139 struct user * dummy = NULL;
140#endif 140#endif
141 141
142 if ((off & 3) || off < 0 || 142 if ((off & 3) || off > sizeof(struct user) - 3)
143 off > sizeof(struct user) - 3)
144 return -EIO; 143 return -EIO;
145 144
146 off >>= 2; 145 off >>= 2;
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad9..31cef20b2996 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sched.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
21#include <linux/mm.h> 22#include <linux/mm.h>
22#include <linux/smp.h> 23#include <linux/smp.h>
@@ -85,7 +86,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
85void smp_local_timer_interrupt(void); 86void smp_local_timer_interrupt(void);
86 87
87static void send_IPI_allbutself(int, int); 88static void send_IPI_allbutself(int, int);
88static void send_IPI_mask(cpumask_t, int, int); 89static void send_IPI_mask(const struct cpumask *, int, int);
89unsigned long send_IPI_mask_phys(cpumask_t, int, int); 90unsigned long send_IPI_mask_phys(cpumask_t, int, int);
90 91
91/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 92/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +114,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
113void smp_send_reschedule(int cpu_id) 114void smp_send_reschedule(int cpu_id)
114{ 115{
115 WARN_ON(cpu_is_offline(cpu_id)); 116 WARN_ON(cpu_is_offline(cpu_id));
116 send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); 117 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
117} 118}
118 119
119/*==========================================================================* 120/*==========================================================================*
@@ -168,7 +169,7 @@ void smp_flush_cache_all(void)
168 spin_lock(&flushcache_lock); 169 spin_lock(&flushcache_lock);
169 mask=cpus_addr(cpumask); 170 mask=cpus_addr(cpumask);
170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 171 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
171 send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); 172 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
172 _flush_cache_copyback_all(); 173 _flush_cache_copyback_all();
173 while (flushcache_cpumask) 174 while (flushcache_cpumask)
174 mb(); 175 mb();
@@ -264,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
264 preempt_disable(); 265 preempt_disable();
265 cpu_id = smp_processor_id(); 266 cpu_id = smp_processor_id();
266 mmc = &mm->context[cpu_id]; 267 mmc = &mm->context[cpu_id];
267 cpu_mask = mm->cpu_vm_mask; 268 cpu_mask = *mm_cpumask(mm);
268 cpu_clear(cpu_id, cpu_mask); 269 cpu_clear(cpu_id, cpu_mask);
269 270
270 if (*mmc != NO_CONTEXT) { 271 if (*mmc != NO_CONTEXT) {
@@ -273,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
273 if (mm == current->mm) 274 if (mm == current->mm)
274 activate_context(mm); 275 activate_context(mm);
275 else 276 else
276 cpu_clear(cpu_id, mm->cpu_vm_mask); 277 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
277 local_irq_restore(flags); 278 local_irq_restore(flags);
278 } 279 }
279 if (!cpus_empty(cpu_mask)) 280 if (!cpus_empty(cpu_mask))
@@ -334,7 +335,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
334 preempt_disable(); 335 preempt_disable();
335 cpu_id = smp_processor_id(); 336 cpu_id = smp_processor_id();
336 mmc = &mm->context[cpu_id]; 337 mmc = &mm->context[cpu_id];
337 cpu_mask = mm->cpu_vm_mask; 338 cpu_mask = *mm_cpumask(mm);
338 cpu_clear(cpu_id, cpu_mask); 339 cpu_clear(cpu_id, cpu_mask);
339 340
340#ifdef DEBUG_SMP 341#ifdef DEBUG_SMP
@@ -424,7 +425,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
424 * We have to send the IPI only to 425 * We have to send the IPI only to
425 * CPUs affected. 426 * CPUs affected.
426 */ 427 */
427 send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); 428 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
428 429
429 while (!cpus_empty(flush_cpumask)) { 430 while (!cpus_empty(flush_cpumask)) {
430 /* nothing. lockup detection does not belong here */ 431 /* nothing. lockup detection does not belong here */
@@ -469,7 +470,7 @@ void smp_invalidate_interrupt(void)
469 if (flush_mm == current->active_mm) 470 if (flush_mm == current->active_mm)
470 activate_context(flush_mm); 471 activate_context(flush_mm);
471 else 472 else
472 cpu_clear(cpu_id, flush_mm->cpu_vm_mask); 473 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
473 } else { 474 } else {
474 unsigned long va = flush_va; 475 unsigned long va = flush_va;
475 476
@@ -546,14 +547,14 @@ static void stop_this_cpu(void *dummy)
546 for ( ; ; ); 547 for ( ; ; );
547} 548}
548 549
549void arch_send_call_function_ipi(cpumask_t mask) 550void arch_send_call_function_ipi_mask(const struct cpumask *mask)
550{ 551{
551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); 552 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
552} 553}
553 554
554void arch_send_call_function_single_ipi(int cpu) 555void arch_send_call_function_single_ipi(int cpu)
555{ 556{
556 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); 557 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
557} 558}
558 559
559/*==========================================================================* 560/*==========================================================================*
@@ -729,7 +730,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
729 cpumask = cpu_online_map; 730 cpumask = cpu_online_map;
730 cpu_clear(smp_processor_id(), cpumask); 731 cpu_clear(smp_processor_id(), cpumask);
731 732
732 send_IPI_mask(cpumask, ipi_num, try); 733 send_IPI_mask(&cpumask, ipi_num, try);
733} 734}
734 735
735/*==========================================================================* 736/*==========================================================================*
@@ -752,7 +753,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
752 * ---------- --- -------------------------------------------------------- 753 * ---------- --- --------------------------------------------------------
753 * 754 *
754 *==========================================================================*/ 755 *==========================================================================*/
755static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) 756static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
756{ 757{
757 cpumask_t physid_mask, tmp; 758 cpumask_t physid_mask, tmp;
758 int cpu_id, phys_id; 759 int cpu_id, phys_id;
@@ -761,11 +762,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
761 if (num_cpus <= 1) /* NO MP */ 762 if (num_cpus <= 1) /* NO MP */
762 return; 763 return;
763 764
764 cpus_and(tmp, cpumask, cpu_online_map); 765 cpumask_and(&tmp, cpumask, cpu_online_mask);
765 BUG_ON(!cpus_equal(cpumask, tmp)); 766 BUG_ON(!cpumask_equal(cpumask, &tmp));
766 767
767 physid_mask = CPU_MASK_NONE; 768 physid_mask = CPU_MASK_NONE;
768 for_each_cpu_mask(cpu_id, cpumask){ 769 for_each_cpu(cpu_id, cpumask) {
769 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 770 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
770 cpu_set(phys_id, physid_mask); 771 cpu_set(phys_id, physid_mask);
771 } 772 }
@@ -805,7 +806,7 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
805 806
806 if (mask & ~physids_coerce(phys_cpu_present_map)) 807 if (mask & ~physids_coerce(phys_cpu_present_map))
807 BUG(); 808 BUG();
808 if (ipi_num >= NR_IPIS) 809 if (ipi_num >= NR_IPIS || ipi_num < 0)
809 BUG(); 810 BUG();
810 811
811 mask <<= IPI_SHIFT; 812 mask <<= IPI_SHIFT;
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 2547d6c4a827..e034844cfc0d 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
179 physid_set(phys_id, phys_cpu_present_map); 179 physid_set(phys_id, phys_cpu_present_map);
180#ifndef CONFIG_HOTPLUG_CPU 180#ifndef CONFIG_HOTPLUG_CPU
181 cpu_present_map = cpu_possible_map; 181 init_cpu_present(&cpu_possible_map);
182#endif 182#endif
183 183
184 show_mp_info(nr_cpu); 184 show_mp_info(nr_cpu);
@@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
213 if (!physid_isset(phys_id, phys_cpu_present_map)) 213 if (!physid_isset(phys_id, phys_cpu_present_map))
214 continue; 214 continue;
215 215
216 if ((max_cpus >= 0) && (max_cpus <= cpucount + 1)) 216 if (max_cpus <= cpucount + 1)
217 continue; 217 continue;
218 218
219 do_boot_cpu(phys_id); 219 do_boot_cpu(phys_id);
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index cada3ba4b990..9cedcef11575 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -33,6 +33,15 @@
33 33
34#include <asm/hw_irq.h> 34#include <asm/hw_irq.h>
35 35
36#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
37/* this needs a better home */
38DEFINE_SPINLOCK(rtc_lock);
39
40#ifdef CONFIG_RTC_DRV_CMOS_MODULE
41EXPORT_SYMBOL(rtc_lock);
42#endif
43#endif /* pc-style 'CMOS' RTC support */
44
36#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
37extern void smp_local_timer_interrupt(void); 46extern void smp_local_timer_interrupt(void);
38#endif 47#endif
@@ -48,7 +57,7 @@ extern void smp_local_timer_interrupt(void);
48 57
49static unsigned long latch; 58static unsigned long latch;
50 59
51static unsigned long do_gettimeoffset(void) 60u32 arch_gettimeoffset(void)
52{ 61{
53 unsigned long elapsed_time = 0; /* [us] */ 62 unsigned long elapsed_time = 0; /* [us] */
54 63
@@ -66,7 +75,7 @@ static unsigned long do_gettimeoffset(void)
66 count = 0; 75 count = 0;
67 76
68 count = (latch - count) * TICK_SIZE; 77 count = (latch - count) * TICK_SIZE;
69 elapsed_time = (count + latch / 2) / latch; 78 elapsed_time = DIV_ROUND_CLOSEST(count, latch);
70 /* NOTE: LATCH is equal to the "interval" value (= reload count). */ 79 /* NOTE: LATCH is equal to the "interval" value (= reload count). */
71 80
72#else /* CONFIG_SMP */ 81#else /* CONFIG_SMP */
@@ -84,7 +93,7 @@ static unsigned long do_gettimeoffset(void)
84 p_count = count; 93 p_count = count;
85 94
86 count = (latch - count) * TICK_SIZE; 95 count = (latch - count) * TICK_SIZE;
87 elapsed_time = (count + latch / 2) / latch; 96 elapsed_time = DIV_ROUND_CLOSEST(count, latch);
88 /* NOTE: LATCH is equal to the "interval" value (= reload count). */ 97 /* NOTE: LATCH is equal to the "interval" value (= reload count). */
89#endif /* CONFIG_SMP */ 98#endif /* CONFIG_SMP */
90#elif defined(CONFIG_CHIP_M32310) 99#elif defined(CONFIG_CHIP_M32310)
@@ -93,78 +102,9 @@ static unsigned long do_gettimeoffset(void)
93#error no chip configuration 102#error no chip configuration
94#endif 103#endif
95 104
96 return elapsed_time; 105 return elapsed_time * 1000;
97}
98
99/*
100 * This version of gettimeofday has near microsecond resolution.
101 */
102void do_gettimeofday(struct timeval *tv)
103{
104 unsigned long seq;
105 unsigned long usec, sec;
106 unsigned long max_ntp_tick = tick_usec - tickadj;
107
108 do {
109 seq = read_seqbegin(&xtime_lock);
110
111 usec = do_gettimeoffset();
112
113 /*
114 * If time_adjust is negative then NTP is slowing the clock
115 * so make sure not to go into next possible interval.
116 * Better to lose some accuracy than have time go backwards..
117 */
118 if (unlikely(time_adjust < 0))
119 usec = min(usec, max_ntp_tick);
120
121 sec = xtime.tv_sec;
122 usec += (xtime.tv_nsec / 1000);
123 } while (read_seqretry(&xtime_lock, seq));
124
125 while (usec >= 1000000) {
126 usec -= 1000000;
127 sec++;
128 }
129
130 tv->tv_sec = sec;
131 tv->tv_usec = usec;
132}
133
134EXPORT_SYMBOL(do_gettimeofday);
135
136int do_settimeofday(struct timespec *tv)
137{
138 time_t wtm_sec, sec = tv->tv_sec;
139 long wtm_nsec, nsec = tv->tv_nsec;
140
141 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
142 return -EINVAL;
143
144 write_seqlock_irq(&xtime_lock);
145 /*
146 * This is revolting. We need to set "xtime" correctly. However, the
147 * value in this location is the value at the most recent update of
148 * wall time. Discover what correction gettimeofday() would have
149 * made, and then undo it!
150 */
151 nsec -= do_gettimeoffset() * NSEC_PER_USEC;
152
153 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
154 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
155
156 set_normalized_timespec(&xtime, sec, nsec);
157 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
158
159 ntp_clear();
160 write_sequnlock_irq(&xtime_lock);
161 clock_was_set();
162
163 return 0;
164} 106}
165 107
166EXPORT_SYMBOL(do_settimeofday);
167
168/* 108/*
169 * In order to set the CMOS clock precisely, set_rtc_mmss has to be 109 * In order to set the CMOS clock precisely, set_rtc_mmss has to be
170 * called 500 ms after the second nowtime has started, because when 110 * called 500 ms after the second nowtime has started, because when
@@ -192,6 +132,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
192#ifndef CONFIG_SMP 132#ifndef CONFIG_SMP
193 profile_tick(CPU_PROFILING); 133 profile_tick(CPU_PROFILING);
194#endif 134#endif
135 /* XXX FIXME. Uh, the xtime_lock should be held here, no? */
195 do_timer(1); 136 do_timer(1);
196 137
197#ifndef CONFIG_SMP 138#ifndef CONFIG_SMP
@@ -270,7 +211,7 @@ void __init time_init(void)
270 211
271 bus_clock = boot_cpu_data.bus_clock; 212 bus_clock = boot_cpu_data.bus_clock;
272 divide = boot_cpu_data.timer_divide; 213 divide = boot_cpu_data.timer_divide;
273 latch = (bus_clock/divide + HZ / 2) / HZ; 214 latch = DIV_ROUND_CLOSEST(bus_clock/divide, HZ);
274 215
275 printk("Timer start : latch = %ld\n", latch); 216 printk("Timer start : latch = %ld\n", latch);
276 217
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 03b14e55cd89..fbd109031df3 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -104,8 +104,8 @@ static void set_eit_vector_entries(void)
104 eit_vector[186] = (unsigned long)smp_call_function_interrupt; 104 eit_vector[186] = (unsigned long)smp_call_function_interrupt;
105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; 105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; 106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
107 eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; 107 eit_vector[189] = 0; /* CPU_BOOT_IPI */
108 eit_vector[190] = 0; 108 eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
109 eit_vector[191] = 0; 109 eit_vector[191] = 0;
110#endif 110#endif
111 _flush_cache_copyback_all(); 111 _flush_cache_copyback_all();
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index de5e21cca6a5..7da94eaa082b 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -4,6 +4,7 @@
4#include <asm-generic/vmlinux.lds.h> 4#include <asm-generic/vmlinux.lds.h>
5#include <asm/addrspace.h> 5#include <asm/addrspace.h>
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm/thread_info.h>
7 8
8OUTPUT_ARCH(m32r) 9OUTPUT_ARCH(m32r)
9#if defined(__LITTLE_ENDIAN__) 10#if defined(__LITTLE_ENDIAN__)
@@ -40,83 +41,24 @@ SECTIONS
40#endif 41#endif
41 _etext = .; /* End of text section */ 42 _etext = .; /* End of text section */
42 43
43 . = ALIGN(16); /* Exception table */ 44 EXCEPTION_TABLE(16)
44 __start___ex_table = .; 45 NOTES
45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .;
47 46
48 RODATA 47 RODATA
49 48 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
50 /* writeable */
51 .data : { /* Data */
52 *(.spu)
53 *(.spi)
54 DATA_DATA
55 CONSTRUCTORS
56 }
57
58 . = ALIGN(4096);
59 __nosave_begin = .;
60 .data_nosave : { *(.data.nosave) }
61 . = ALIGN(4096);
62 __nosave_end = .;
63
64 . = ALIGN(32);
65 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
66
67 _edata = .; /* End of data section */ 49 _edata = .; /* End of data section */
68 50
69 . = ALIGN(8192); /* init_task */
70 .data.init_task : { *(.data.init_task) }
71
72 /* will be freed after init */ 51 /* will be freed after init */
73 . = ALIGN(4096); /* Init code and data */ 52 . = ALIGN(PAGE_SIZE); /* Init code and data */
74 __init_begin = .; 53 __init_begin = .;
75 .init.text : { 54 INIT_TEXT_SECTION(PAGE_SIZE)
76 _sinittext = .; 55 INIT_DATA_SECTION(16)
77 INIT_TEXT 56 PERCPU(PAGE_SIZE)
78 _einittext = .; 57 . = ALIGN(PAGE_SIZE);
79 }
80 .init.data : { INIT_DATA }
81 . = ALIGN(16);
82 __setup_start = .;
83 .init.setup : { *(.init.setup) }
84 __setup_end = .;
85 __initcall_start = .;
86 .initcall.init : {
87 INITCALLS
88 }
89 __initcall_end = .;
90 __con_initcall_start = .;
91 .con_initcall.init : { *(.con_initcall.init) }
92 __con_initcall_end = .;
93 SECURITY_INIT
94 . = ALIGN(4);
95 __alt_instructions = .;
96 .altinstructions : { *(.altinstructions) }
97 __alt_instructions_end = .;
98 .altinstr_replacement : { *(.altinstr_replacement) }
99 /* .exit.text is discard at runtime, not link time, to deal with references
100 from .altinstructions and .eh_frame */
101 .exit.text : { EXIT_TEXT }
102 .exit.data : { EXIT_DATA }
103
104#ifdef CONFIG_BLK_DEV_INITRD
105 . = ALIGN(4096);
106 __initramfs_start = .;
107 .init.ramfs : { *(.init.ramfs) }
108 __initramfs_end = .;
109#endif
110
111 PERCPU(4096)
112 . = ALIGN(4096);
113 __init_end = .; 58 __init_end = .;
114 /* freed after init ends here */ 59 /* freed after init ends here */
115 60
116 __bss_start = .; /* BSS */ 61 BSS_SECTION(0, 0, 4)
117 .bss : { *(.bss) }
118 . = ALIGN(4);
119 __bss_stop = .;
120 62
121 _end = . ; 63 _end = . ;
122 64