aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-12-01 02:16:22 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2009-12-01 02:16:22 -0500
commit838632438145ac6863377eb12d8b8eef9c55d288 (patch)
treefbb0757df837f3c75a99c518a3596c38daef162d /arch/m32r
parent9996508b3353063f2d6c48c1a28a84543d72d70b (diff)
parent29e553631b2a0d4eebd23db630572e1027a9967a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/m32r')
-rw-r--r--arch/m32r/Kconfig9
-rw-r--r--arch/m32r/boot/compressed/Makefile18
-rw-r--r--arch/m32r/boot/compressed/install.sh4
-rw-r--r--arch/m32r/boot/compressed/misc.c143
-rw-r--r--arch/m32r/include/asm/hardirq.h15
-rw-r--r--arch/m32r/include/asm/io.h7
-rw-r--r--arch/m32r/include/asm/mman.h18
-rw-r--r--arch/m32r/include/asm/mmu_context.h4
-rw-r--r--arch/m32r/include/asm/page.h4
-rw-r--r--arch/m32r/include/asm/processor.h2
-rw-r--r--arch/m32r/include/asm/smp.h2
-rw-r--r--arch/m32r/include/asm/thread_info.h15
-rw-r--r--arch/m32r/kernel/entry.S7
-rw-r--r--arch/m32r/kernel/head.S4
-rw-r--r--arch/m32r/kernel/init_task.c5
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c6
-rw-r--r--arch/m32r/kernel/ptrace.c5
-rw-r--r--arch/m32r/kernel/smp.c33
-rw-r--r--arch/m32r/kernel/smpboot.c4
-rw-r--r--arch/m32r/kernel/time.c89
-rw-r--r--arch/m32r/kernel/traps.c4
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S78
-rw-r--r--arch/m32r/lib/delay.c4
-rw-r--r--arch/m32r/mm/discontig.c5
-rw-r--r--arch/m32r/mm/init.c2
-rw-r--r--arch/m32r/mm/mmu.S12
26 files changed, 145 insertions, 354 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index cabba332cc48..3a9319f93e89 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -11,6 +11,9 @@ config M32R
11 select HAVE_IDE 11 select HAVE_IDE
12 select HAVE_OPROFILE 12 select HAVE_OPROFILE
13 select INIT_ALL_POSSIBLE 13 select INIT_ALL_POSSIBLE
14 select HAVE_KERNEL_GZIP
15 select HAVE_KERNEL_BZIP2
16 select HAVE_KERNEL_LZMA
14 17
15config SBUS 18config SBUS
16 bool 19 bool
@@ -41,6 +44,12 @@ config HZ
41 int 44 int
42 default 100 45 default 100
43 46
47config GENERIC_TIME
48 def_bool y
49
50config ARCH_USES_GETTIMEOFFSET
51 def_bool y
52
44source "init/Kconfig" 53source "init/Kconfig"
45 54
46source "kernel/Kconfig.freezer" 55source "kernel/Kconfig.freezer"
diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
index 560484ae35ec..177716b1d613 100644
--- a/arch/m32r/boot/compressed/Makefile
+++ b/arch/m32r/boot/compressed/Makefile
@@ -1,11 +1,11 @@
1# 1#
2# linux/arch/sh/boot/compressed/Makefile 2# linux/arch/m32r/boot/compressed/Makefile
3# 3#
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o \ 7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
8 piggy.o vmlinux.lds 8 vmlinux.bin.lzma head.o misc.o piggy.o vmlinux.lds
9 9
10OBJECTS = $(obj)/head.o $(obj)/misc.o 10OBJECTS = $(obj)/head.o $(obj)/misc.o
11 11
@@ -27,6 +27,12 @@ $(obj)/vmlinux.bin: vmlinux FORCE
27$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 27$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
28 $(call if_changed,gzip) 28 $(call if_changed,gzip)
29 29
30$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
31 $(call if_changed,bzip2)
32
33$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
34 $(call if_changed,lzma)
35
30CFLAGS_misc.o += -fpic 36CFLAGS_misc.o += -fpic
31 37
32ifdef CONFIG_MMU 38ifdef CONFIG_MMU
@@ -37,5 +43,9 @@ endif
37 43
38OBJCOPYFLAGS += -R .empty_zero_page 44OBJCOPYFLAGS += -R .empty_zero_page
39 45
40$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE 46suffix_$(CONFIG_KERNEL_GZIP) = gz
47suffix_$(CONFIG_KERNEL_BZIP2) = bz2
48suffix_$(CONFIG_KERNEL_LZMA) = lzma
49
50$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
41 $(call if_changed,ld) 51 $(call if_changed,ld)
diff --git a/arch/m32r/boot/compressed/install.sh b/arch/m32r/boot/compressed/install.sh
index 6d72e9e72697..16e5a0a13437 100644
--- a/arch/m32r/boot/compressed/install.sh
+++ b/arch/m32r/boot/compressed/install.sh
@@ -24,8 +24,8 @@
24 24
25# User may have a custom install script 25# User may have a custom install script
26 26
27if [ -x /sbin/installkernel ]; then 27if [ -x /sbin/${INSTALLKERNEL} ]; then
28 exec /sbin/installkernel "$@" 28 exec /sbin/${INSTALLKERNEL} "$@"
29fi 29fi
30 30
31if [ "$2" = "zImage" ]; then 31if [ "$2" = "zImage" ]; then
diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
index d394292498c0..370d60881977 100644
--- a/arch/m32r/boot/compressed/misc.c
+++ b/arch/m32r/boot/compressed/misc.c
@@ -9,140 +9,49 @@
9 * Adapted for SH by Stuart Menefy, Aug 1999 9 * Adapted for SH by Stuart Menefy, Aug 1999
10 * 10 *
11 * 2003-02-12: Support M32R by Takeo Takahashi 11 * 2003-02-12: Support M32R by Takeo Takahashi
12 * This is based on arch/sh/boot/compressed/misc.c.
13 */ 12 */
14 13
15#include <linux/string.h>
16
17/* 14/*
18 * gzip declarations 15 * gzip declarations
19 */ 16 */
20
21#define OF(args) args
22#define STATIC static 17#define STATIC static
23 18
24#undef memset 19#undef memset
25#undef memcpy 20#undef memcpy
26#define memzero(s, n) memset ((s), 0, (n)) 21#define memzero(s, n) memset ((s), 0, (n))
27 22
28typedef unsigned char uch;
29typedef unsigned short ush;
30typedef unsigned long ulg;
31
32#define WSIZE 0x8000 /* Window size must be at least 32k, */
33 /* and a power of two */
34
35static uch *inbuf; /* input buffer */
36static uch window[WSIZE]; /* Sliding window buffer */
37
38static unsigned insize = 0; /* valid bytes in inbuf */
39static unsigned inptr = 0; /* index of next byte to be processed in inbuf */
40static unsigned outcnt = 0; /* bytes in output buffer */
41
42/* gzip flag byte */
43#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
44#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
45#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
46#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
47#define COMMENT 0x10 /* bit 4 set: file comment present */
48#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
49#define RESERVED 0xC0 /* bit 6,7: reserved */
50
51#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
52
53/* Diagnostic functions */
54#ifdef DEBUG
55# define Assert(cond,msg) {if(!(cond)) error(msg);}
56# define Trace(x) fprintf x
57# define Tracev(x) {if (verbose) fprintf x ;}
58# define Tracevv(x) {if (verbose>1) fprintf x ;}
59# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
60# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
61#else
62# define Assert(cond,msg)
63# define Trace(x)
64# define Tracev(x)
65# define Tracevv(x)
66# define Tracec(c,x)
67# define Tracecv(c,x)
68#endif
69
70static int fill_inbuf(void);
71static void flush_window(void);
72static void error(char *m); 23static void error(char *m);
73 24
74static unsigned char *input_data;
75static int input_len;
76
77static long bytes_out = 0;
78static uch *output_data;
79static unsigned long output_ptr = 0;
80
81#include "m32r_sio.c" 25#include "m32r_sio.c"
82 26
83static unsigned long free_mem_ptr; 27static unsigned long free_mem_ptr;
84static unsigned long free_mem_end_ptr; 28static unsigned long free_mem_end_ptr;
85 29
86#define HEAP_SIZE 0x10000 30#ifdef CONFIG_KERNEL_BZIP2
87 31static void *memset(void *s, int c, size_t n)
88#include "../../../../lib/inflate.c"
89
90void* memset(void* s, int c, size_t n)
91{ 32{
92 int i; 33 char *ss = s;
93 char *ss = (char*)s;
94 34
95 for (i=0;i<n;i++) ss[i] = c; 35 while (n--)
36 *ss++ = c;
96 return s; 37 return s;
97} 38}
39#endif
98 40
99void* memcpy(void* __dest, __const void* __src, 41#ifdef CONFIG_KERNEL_GZIP
100 size_t __n) 42#define BOOT_HEAP_SIZE 0x10000
101{ 43#include "../../../../lib/decompress_inflate.c"
102 int i; 44#endif
103 char *d = (char *)__dest, *s = (char *)__src;
104
105 for (i=0;i<__n;i++) d[i] = s[i];
106 return __dest;
107}
108
109/* ===========================================================================
110 * Fill the input buffer. This is called only when the buffer is empty
111 * and at least one byte is really needed.
112 */
113static int fill_inbuf(void)
114{
115 if (insize != 0) {
116 error("ran out of input data");
117 }
118
119 inbuf = input_data;
120 insize = input_len;
121 inptr = 1;
122 return inbuf[0];
123}
124 45
125/* =========================================================================== 46#ifdef CONFIG_KERNEL_BZIP2
126 * Write the output window window[0..outcnt-1] and update crc and bytes_out. 47#define BOOT_HEAP_SIZE 0x400000
127 * (Used for the decompressed data only.) 48#include "../../../../lib/decompress_bunzip2.c"
128 */ 49#endif
129static void flush_window(void)
130{
131 ulg c = crc; /* temporary variable */
132 unsigned n;
133 uch *in, *out, ch;
134 50
135 in = window; 51#ifdef CONFIG_KERNEL_LZMA
136 out = &output_data[output_ptr]; 52#define BOOT_HEAP_SIZE 0x10000
137 for (n = 0; n < outcnt; n++) { 53#include "../../../../lib/decompress_unlzma.c"
138 ch = *out++ = *in++; 54#endif
139 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
140 }
141 crc = c;
142 bytes_out += (ulg)outcnt;
143 output_ptr += (ulg)outcnt;
144 outcnt = 0;
145}
146 55
147static void error(char *x) 56static void error(char *x)
148{ 57{
@@ -153,20 +62,20 @@ static void error(char *x)
153 while(1); /* Halt */ 62 while(1); /* Halt */
154} 63}
155 64
156/* return decompressed size */
157void 65void
158decompress_kernel(int mmu_on, unsigned char *zimage_data, 66decompress_kernel(int mmu_on, unsigned char *zimage_data,
159 unsigned int zimage_len, unsigned long heap) 67 unsigned int zimage_len, unsigned long heap)
160{ 68{
69 unsigned char *input_data = zimage_data;
70 int input_len = zimage_len;
71 unsigned char *output_data;
72
161 output_data = (unsigned char *)CONFIG_MEMORY_START + 0x2000 73 output_data = (unsigned char *)CONFIG_MEMORY_START + 0x2000
162 + (mmu_on ? 0x80000000 : 0); 74 + (mmu_on ? 0x80000000 : 0);
163 free_mem_ptr = heap; 75 free_mem_ptr = heap;
164 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; 76 free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
165 input_data = zimage_data;
166 input_len = zimage_len;
167 77
168 makecrc(); 78 puts("\nDecompressing Linux... ");
169 puts("Uncompressing Linux... "); 79 decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
170 gunzip(); 80 puts("done.\nBooting the kernel.\n");
171 puts("Ok, booting the kernel.\n");
172} 81}
diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h
index cb8aa762f235..4c31c0ae215e 100644
--- a/arch/m32r/include/asm/hardirq.h
+++ b/arch/m32r/include/asm/hardirq.h
@@ -2,14 +2,7 @@
2#ifndef __ASM_HARDIRQ_H 2#ifndef __ASM_HARDIRQ_H
3#define __ASM_HARDIRQ_H 3#define __ASM_HARDIRQ_H
4 4
5#include <linux/threads.h> 5#include <asm/irq.h>
6#include <linux/irq.h>
7
8typedef struct {
9 unsigned int __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t;
11
12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13 6
14#if NR_IRQS > 256 7#if NR_IRQS > 256
15#define HARDIRQ_BITS 9 8#define HARDIRQ_BITS 9
@@ -26,11 +19,7 @@ typedef struct {
26# error HARDIRQ_BITS is too low! 19# error HARDIRQ_BITS is too low!
27#endif 20#endif
28 21
29static inline void ack_bad_irq(int irq) 22#include <asm-generic/hardirq.h>
30{
31 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
32 BUG();
33}
34 23
35#endif /* __ASM_HARDIRQ_H */ 24#endif /* __ASM_HARDIRQ_H */
36#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index d06933bd6318..4010f1fc5b65 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -162,6 +162,13 @@ static inline void _writel(unsigned long l, unsigned long addr)
162#define __raw_writew writew 162#define __raw_writew writew
163#define __raw_writel writel 163#define __raw_writel writel
164 164
165#define ioread8 read
166#define ioread16 readw
167#define ioread32 readl
168#define iowrite8 writeb
169#define iowrite16 writew
170#define iowrite32 writel
171
165#define mmiowb() 172#define mmiowb()
166 173
167#define flush_write_buffers() do { } while (0) /* M32R_FIXME */ 174#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/asm/mman.h
index 04a5f40aa401..8eebf89f5ab1 100644
--- a/arch/m32r/include/asm/mman.h
+++ b/arch/m32r/include/asm/mman.h
@@ -1,17 +1 @@
1#ifndef __M32R_MMAN_H__ #include <asm-generic/mman.h>
2#define __M32R_MMAN_H__
3
4#include <asm-generic/mman-common.h>
5
6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
8#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
9#define MAP_LOCKED 0x2000 /* pages are locked */
10#define MAP_NORESERVE 0x4000 /* don't check for reservations */
11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
13
14#define MCL_CURRENT 1 /* lock all current mappings */
15#define MCL_FUTURE 2 /* lock all future mappings */
16
17#endif /* __M32R_MMAN_H__ */
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index 91909e5dd9d0..a70a3df33635 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
127 127
128 if (prev != next) { 128 if (prev != next) {
129#ifdef CONFIG_SMP 129#ifdef CONFIG_SMP
130 cpu_set(cpu, next->cpu_vm_mask); 130 cpumask_set_cpu(cpu, mm_cpumask(next));
131#endif /* CONFIG_SMP */ 131#endif /* CONFIG_SMP */
132 /* Set MPTB = next->pgd */ 132 /* Set MPTB = next->pgd */
133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; 133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
135 } 135 }
136#ifdef CONFIG_SMP 136#ifdef CONFIG_SMP
137 else 137 else
138 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) 138 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
139 activate_context(next); 139 activate_context(next);
140#endif /* CONFIG_SMP */ 140#endif /* CONFIG_SMP */
141} 141}
diff --git a/arch/m32r/include/asm/page.h b/arch/m32r/include/asm/page.h
index 11777f7a5628..725ede8f2889 100644
--- a/arch/m32r/include/asm/page.h
+++ b/arch/m32r/include/asm/page.h
@@ -1,9 +1,11 @@
1#ifndef _ASM_M32R_PAGE_H 1#ifndef _ASM_M32R_PAGE_H
2#define _ASM_M32R_PAGE_H 2#define _ASM_M32R_PAGE_H
3 3
4#include <linux/const.h>
5
4/* PAGE_SHIFT determines the page size */ 6/* PAGE_SHIFT determines the page size */
5#define PAGE_SHIFT 12 7#define PAGE_SHIFT 12
6#define PAGE_SIZE (1UL << PAGE_SHIFT) 8#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7#define PAGE_MASK (~(PAGE_SIZE-1)) 9#define PAGE_MASK (~(PAGE_SIZE-1))
8 10
9#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 1a997fc148a2..8397c249989b 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -140,8 +140,6 @@ unsigned long get_wchan(struct task_struct *p);
140#define KSTK_EIP(tsk) ((tsk)->thread.lr) 140#define KSTK_EIP(tsk) ((tsk)->thread.lr)
141#define KSTK_ESP(tsk) ((tsk)->thread.sp) 141#define KSTK_ESP(tsk) ((tsk)->thread.sp)
142 142
143#define THREAD_SIZE (2*PAGE_SIZE)
144
145#define cpu_relax() barrier() 143#define cpu_relax() barrier()
146 144
147#endif /* _ASM_M32R_PROCESSOR_H */ 145#endif /* _ASM_M32R_PROCESSOR_H */
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index b96a6d2ffbc3..e67ded1aab91 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -88,7 +88,7 @@ extern void smp_send_timer(void);
88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
89 89
90extern void arch_send_call_function_single_ipi(int cpu); 90extern void arch_send_call_function_single_ipi(int cpu);
91extern void arch_send_call_function_ipi(cpumask_t mask); 91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
92 92
93#endif /* not __ASSEMBLY__ */ 93#endif /* not __ASSEMBLY__ */
94 94
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 71578151a403..ed240b6e8e77 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -55,6 +55,8 @@ struct thread_info {
55 55
56#define PREEMPT_ACTIVE 0x10000000 56#define PREEMPT_ACTIVE 0x10000000
57 57
58#define THREAD_SIZE (PAGE_SIZE << 1)
59
58/* 60/*
59 * macros/functions for gaining access to the thread information structure 61 * macros/functions for gaining access to the thread information structure
60 */ 62 */
@@ -76,8 +78,6 @@ struct thread_info {
76#define init_thread_info (init_thread_union.thread_info) 78#define init_thread_info (init_thread_union.thread_info)
77#define init_stack (init_thread_union.stack) 79#define init_stack (init_thread_union.stack)
78 80
79#define THREAD_SIZE (2*PAGE_SIZE)
80
81/* how to get the thread information struct from C */ 81/* how to get the thread information struct from C */
82static inline struct thread_info *current_thread_info(void) 82static inline struct thread_info *current_thread_info(void)
83{ 83{
@@ -125,17 +125,6 @@ static inline unsigned int get_thread_fault_code(void)
125 return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; 125 return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
126} 126}
127 127
128#else /* !__ASSEMBLY__ */
129
130#define THREAD_SIZE 8192
131
132/* how to get the thread information struct from ASM */
133#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg
134 .macro GET_THREAD_INFO reg
135 ldi \reg, #-THREAD_SIZE
136 and \reg, sp
137 .endm
138
139#endif 128#endif
140 129
141/* 130/*
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 612d35b082a6..403869833b98 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -118,6 +118,13 @@
118#define resume_kernel restore_all 118#define resume_kernel restore_all
119#endif 119#endif
120 120
121/* how to get the thread information struct from ASM */
122#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg
123 .macro GET_THREAD_INFO reg
124 ldi \reg, #-THREAD_SIZE
125 and \reg, sp
126 .endm
127
121ENTRY(ret_from_fork) 128ENTRY(ret_from_fork)
122 pop r0 129 pop r0
123 bl schedule_tail 130 bl schedule_tail
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
index 0a7194439eb1..a46652dd83e6 100644
--- a/arch/m32r/kernel/head.S
+++ b/arch/m32r/kernel/head.S
@@ -268,13 +268,13 @@ ENTRY(empty_zero_page)
268/*------------------------------------------------------------------------ 268/*------------------------------------------------------------------------
269 * Stack area 269 * Stack area
270 */ 270 */
271 .section .spi 271 .section .init.data, "aw"
272 ALIGN 272 ALIGN
273 .global spi_stack_top 273 .global spi_stack_top
274 .zero 1024 274 .zero 1024
275spi_stack_top: 275spi_stack_top:
276 276
277 .section .spu 277 .section .init.data, "aw"
278 ALIGN 278 ALIGN
279 .global spu_stack_top 279 .global spu_stack_top
280 .zero 1024 280 .zero 1024
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c
index fce57e5d3f91..6c42d5f8df50 100644
--- a/arch/m32r/kernel/init_task.c
+++ b/arch/m32r/kernel/init_task.c
@@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
20 * way process stacks are handled. This is done by having a special 20 * way process stacks are handled. This is done by having a special
21 * "init_task" linker map entry.. 21 * "init_task" linker map entry..
22 */ 22 */
23union thread_union init_thread_union 23union thread_union init_thread_union __init_task_data =
24 __attribute__((__section__(".data.init_task"))) = 24 { INIT_THREAD_INFO(init_task) };
25 { INIT_THREAD_INFO(init_task) };
26 25
27/* 26/*
28 * Initial task structure. 27 * Initial task structure.
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 22624b51d4d3..700570747a90 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -23,12 +23,6 @@ EXPORT_SYMBOL(__ioremap);
23EXPORT_SYMBOL(iounmap); 23EXPORT_SYMBOL(iounmap);
24EXPORT_SYMBOL(kernel_thread); 24EXPORT_SYMBOL(kernel_thread);
25 25
26/* Networking helper routines. */
27/* Delay loops */
28EXPORT_SYMBOL(__udelay);
29EXPORT_SYMBOL(__delay);
30EXPORT_SYMBOL(__const_udelay);
31
32EXPORT_SYMBOL(strncpy_from_user); 26EXPORT_SYMBOL(strncpy_from_user);
33EXPORT_SYMBOL(__strncpy_from_user); 27EXPORT_SYMBOL(__strncpy_from_user);
34EXPORT_SYMBOL(clear_user); 28EXPORT_SYMBOL(clear_user);
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 98b8feb12ed8..98682bba0ed9 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
77 struct user * dummy = NULL; 77 struct user * dummy = NULL;
78#endif 78#endif
79 79
80 if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3)) 80 if ((off & 3) || off > sizeof(struct user) - 3)
81 return -EIO; 81 return -EIO;
82 82
83 off >>= 2; 83 off >>= 2;
@@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
139 struct user * dummy = NULL; 139 struct user * dummy = NULL;
140#endif 140#endif
141 141
142 if ((off & 3) || off < 0 || 142 if ((off & 3) || off > sizeof(struct user) - 3)
143 off > sizeof(struct user) - 3)
144 return -EIO; 143 return -EIO;
145 144
146 off >>= 2; 145 off >>= 2;
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad9..31cef20b2996 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sched.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
21#include <linux/mm.h> 22#include <linux/mm.h>
22#include <linux/smp.h> 23#include <linux/smp.h>
@@ -85,7 +86,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
85void smp_local_timer_interrupt(void); 86void smp_local_timer_interrupt(void);
86 87
87static void send_IPI_allbutself(int, int); 88static void send_IPI_allbutself(int, int);
88static void send_IPI_mask(cpumask_t, int, int); 89static void send_IPI_mask(const struct cpumask *, int, int);
89unsigned long send_IPI_mask_phys(cpumask_t, int, int); 90unsigned long send_IPI_mask_phys(cpumask_t, int, int);
90 91
91/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 92/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +114,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
113void smp_send_reschedule(int cpu_id) 114void smp_send_reschedule(int cpu_id)
114{ 115{
115 WARN_ON(cpu_is_offline(cpu_id)); 116 WARN_ON(cpu_is_offline(cpu_id));
116 send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); 117 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
117} 118}
118 119
119/*==========================================================================* 120/*==========================================================================*
@@ -168,7 +169,7 @@ void smp_flush_cache_all(void)
168 spin_lock(&flushcache_lock); 169 spin_lock(&flushcache_lock);
169 mask=cpus_addr(cpumask); 170 mask=cpus_addr(cpumask);
170 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 171 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
171 send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); 172 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
172 _flush_cache_copyback_all(); 173 _flush_cache_copyback_all();
173 while (flushcache_cpumask) 174 while (flushcache_cpumask)
174 mb(); 175 mb();
@@ -264,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
264 preempt_disable(); 265 preempt_disable();
265 cpu_id = smp_processor_id(); 266 cpu_id = smp_processor_id();
266 mmc = &mm->context[cpu_id]; 267 mmc = &mm->context[cpu_id];
267 cpu_mask = mm->cpu_vm_mask; 268 cpu_mask = *mm_cpumask(mm);
268 cpu_clear(cpu_id, cpu_mask); 269 cpu_clear(cpu_id, cpu_mask);
269 270
270 if (*mmc != NO_CONTEXT) { 271 if (*mmc != NO_CONTEXT) {
@@ -273,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
273 if (mm == current->mm) 274 if (mm == current->mm)
274 activate_context(mm); 275 activate_context(mm);
275 else 276 else
276 cpu_clear(cpu_id, mm->cpu_vm_mask); 277 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
277 local_irq_restore(flags); 278 local_irq_restore(flags);
278 } 279 }
279 if (!cpus_empty(cpu_mask)) 280 if (!cpus_empty(cpu_mask))
@@ -334,7 +335,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
334 preempt_disable(); 335 preempt_disable();
335 cpu_id = smp_processor_id(); 336 cpu_id = smp_processor_id();
336 mmc = &mm->context[cpu_id]; 337 mmc = &mm->context[cpu_id];
337 cpu_mask = mm->cpu_vm_mask; 338 cpu_mask = *mm_cpumask(mm);
338 cpu_clear(cpu_id, cpu_mask); 339 cpu_clear(cpu_id, cpu_mask);
339 340
340#ifdef DEBUG_SMP 341#ifdef DEBUG_SMP
@@ -424,7 +425,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
424 * We have to send the IPI only to 425 * We have to send the IPI only to
425 * CPUs affected. 426 * CPUs affected.
426 */ 427 */
427 send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); 428 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
428 429
429 while (!cpus_empty(flush_cpumask)) { 430 while (!cpus_empty(flush_cpumask)) {
430 /* nothing. lockup detection does not belong here */ 431 /* nothing. lockup detection does not belong here */
@@ -469,7 +470,7 @@ void smp_invalidate_interrupt(void)
469 if (flush_mm == current->active_mm) 470 if (flush_mm == current->active_mm)
470 activate_context(flush_mm); 471 activate_context(flush_mm);
471 else 472 else
472 cpu_clear(cpu_id, flush_mm->cpu_vm_mask); 473 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
473 } else { 474 } else {
474 unsigned long va = flush_va; 475 unsigned long va = flush_va;
475 476
@@ -546,14 +547,14 @@ static void stop_this_cpu(void *dummy)
546 for ( ; ; ); 547 for ( ; ; );
547} 548}
548 549
549void arch_send_call_function_ipi(cpumask_t mask) 550void arch_send_call_function_ipi_mask(const struct cpumask *mask)
550{ 551{
551 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); 552 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
552} 553}
553 554
554void arch_send_call_function_single_ipi(int cpu) 555void arch_send_call_function_single_ipi(int cpu)
555{ 556{
556 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); 557 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
557} 558}
558 559
559/*==========================================================================* 560/*==========================================================================*
@@ -729,7 +730,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
729 cpumask = cpu_online_map; 730 cpumask = cpu_online_map;
730 cpu_clear(smp_processor_id(), cpumask); 731 cpu_clear(smp_processor_id(), cpumask);
731 732
732 send_IPI_mask(cpumask, ipi_num, try); 733 send_IPI_mask(&cpumask, ipi_num, try);
733} 734}
734 735
735/*==========================================================================* 736/*==========================================================================*
@@ -752,7 +753,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
752 * ---------- --- -------------------------------------------------------- 753 * ---------- --- --------------------------------------------------------
753 * 754 *
754 *==========================================================================*/ 755 *==========================================================================*/
755static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) 756static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
756{ 757{
757 cpumask_t physid_mask, tmp; 758 cpumask_t physid_mask, tmp;
758 int cpu_id, phys_id; 759 int cpu_id, phys_id;
@@ -761,11 +762,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
761 if (num_cpus <= 1) /* NO MP */ 762 if (num_cpus <= 1) /* NO MP */
762 return; 763 return;
763 764
764 cpus_and(tmp, cpumask, cpu_online_map); 765 cpumask_and(&tmp, cpumask, cpu_online_mask);
765 BUG_ON(!cpus_equal(cpumask, tmp)); 766 BUG_ON(!cpumask_equal(cpumask, &tmp));
766 767
767 physid_mask = CPU_MASK_NONE; 768 physid_mask = CPU_MASK_NONE;
768 for_each_cpu_mask(cpu_id, cpumask){ 769 for_each_cpu(cpu_id, cpumask) {
769 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 770 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
770 cpu_set(phys_id, physid_mask); 771 cpu_set(phys_id, physid_mask);
771 } 772 }
@@ -805,7 +806,7 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
805 806
806 if (mask & ~physids_coerce(phys_cpu_present_map)) 807 if (mask & ~physids_coerce(phys_cpu_present_map))
807 BUG(); 808 BUG();
808 if (ipi_num >= NR_IPIS) 809 if (ipi_num >= NR_IPIS || ipi_num < 0)
809 BUG(); 810 BUG();
810 811
811 mask <<= IPI_SHIFT; 812 mask <<= IPI_SHIFT;
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 2547d6c4a827..e034844cfc0d 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
179 physid_set(phys_id, phys_cpu_present_map); 179 physid_set(phys_id, phys_cpu_present_map);
180#ifndef CONFIG_HOTPLUG_CPU 180#ifndef CONFIG_HOTPLUG_CPU
181 cpu_present_map = cpu_possible_map; 181 init_cpu_present(&cpu_possible_map);
182#endif 182#endif
183 183
184 show_mp_info(nr_cpu); 184 show_mp_info(nr_cpu);
@@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
213 if (!physid_isset(phys_id, phys_cpu_present_map)) 213 if (!physid_isset(phys_id, phys_cpu_present_map))
214 continue; 214 continue;
215 215
216 if ((max_cpus >= 0) && (max_cpus <= cpucount + 1)) 216 if (max_cpus <= cpucount + 1)
217 continue; 217 continue;
218 218
219 do_boot_cpu(phys_id); 219 do_boot_cpu(phys_id);
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index cada3ba4b990..9cedcef11575 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -33,6 +33,15 @@
33 33
34#include <asm/hw_irq.h> 34#include <asm/hw_irq.h>
35 35
36#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
37/* this needs a better home */
38DEFINE_SPINLOCK(rtc_lock);
39
40#ifdef CONFIG_RTC_DRV_CMOS_MODULE
41EXPORT_SYMBOL(rtc_lock);
42#endif
43#endif /* pc-style 'CMOS' RTC support */
44
36#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
37extern void smp_local_timer_interrupt(void); 46extern void smp_local_timer_interrupt(void);
38#endif 47#endif
@@ -48,7 +57,7 @@ extern void smp_local_timer_interrupt(void);
48 57
49static unsigned long latch; 58static unsigned long latch;
50 59
51static unsigned long do_gettimeoffset(void) 60u32 arch_gettimeoffset(void)
52{ 61{
53 unsigned long elapsed_time = 0; /* [us] */ 62 unsigned long elapsed_time = 0; /* [us] */
54 63
@@ -66,7 +75,7 @@ static unsigned long do_gettimeoffset(void)
66 count = 0; 75 count = 0;
67 76
68 count = (latch - count) * TICK_SIZE; 77 count = (latch - count) * TICK_SIZE;
69 elapsed_time = (count + latch / 2) / latch; 78 elapsed_time = DIV_ROUND_CLOSEST(count, latch);
70 /* NOTE: LATCH is equal to the "interval" value (= reload count). */ 79 /* NOTE: LATCH is equal to the "interval" value (= reload count). */
71 80
72#else /* CONFIG_SMP */ 81#else /* CONFIG_SMP */
@@ -84,7 +93,7 @@ static unsigned long do_gettimeoffset(void)
84 p_count = count; 93 p_count = count;
85 94
86 count = (latch - count) * TICK_SIZE; 95 count = (latch - count) * TICK_SIZE;
87 elapsed_time = (count + latch / 2) / latch; 96 elapsed_time = DIV_ROUND_CLOSEST(count, latch);
88 /* NOTE: LATCH is equal to the "interval" value (= reload count). */ 97 /* NOTE: LATCH is equal to the "interval" value (= reload count). */
89#endif /* CONFIG_SMP */ 98#endif /* CONFIG_SMP */
90#elif defined(CONFIG_CHIP_M32310) 99#elif defined(CONFIG_CHIP_M32310)
@@ -93,78 +102,9 @@ static unsigned long do_gettimeoffset(void)
93#error no chip configuration 102#error no chip configuration
94#endif 103#endif
95 104
96 return elapsed_time; 105 return elapsed_time * 1000;
97}
98
99/*
100 * This version of gettimeofday has near microsecond resolution.
101 */
102void do_gettimeofday(struct timeval *tv)
103{
104 unsigned long seq;
105 unsigned long usec, sec;
106 unsigned long max_ntp_tick = tick_usec - tickadj;
107
108 do {
109 seq = read_seqbegin(&xtime_lock);
110
111 usec = do_gettimeoffset();
112
113 /*
114 * If time_adjust is negative then NTP is slowing the clock
115 * so make sure not to go into next possible interval.
116 * Better to lose some accuracy than have time go backwards..
117 */
118 if (unlikely(time_adjust < 0))
119 usec = min(usec, max_ntp_tick);
120
121 sec = xtime.tv_sec;
122 usec += (xtime.tv_nsec / 1000);
123 } while (read_seqretry(&xtime_lock, seq));
124
125 while (usec >= 1000000) {
126 usec -= 1000000;
127 sec++;
128 }
129
130 tv->tv_sec = sec;
131 tv->tv_usec = usec;
132}
133
134EXPORT_SYMBOL(do_gettimeofday);
135
136int do_settimeofday(struct timespec *tv)
137{
138 time_t wtm_sec, sec = tv->tv_sec;
139 long wtm_nsec, nsec = tv->tv_nsec;
140
141 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
142 return -EINVAL;
143
144 write_seqlock_irq(&xtime_lock);
145 /*
146 * This is revolting. We need to set "xtime" correctly. However, the
147 * value in this location is the value at the most recent update of
148 * wall time. Discover what correction gettimeofday() would have
149 * made, and then undo it!
150 */
151 nsec -= do_gettimeoffset() * NSEC_PER_USEC;
152
153 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
154 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
155
156 set_normalized_timespec(&xtime, sec, nsec);
157 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
158
159 ntp_clear();
160 write_sequnlock_irq(&xtime_lock);
161 clock_was_set();
162
163 return 0;
164} 106}
165 107
166EXPORT_SYMBOL(do_settimeofday);
167
168/* 108/*
169 * In order to set the CMOS clock precisely, set_rtc_mmss has to be 109 * In order to set the CMOS clock precisely, set_rtc_mmss has to be
170 * called 500 ms after the second nowtime has started, because when 110 * called 500 ms after the second nowtime has started, because when
@@ -192,6 +132,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
192#ifndef CONFIG_SMP 132#ifndef CONFIG_SMP
193 profile_tick(CPU_PROFILING); 133 profile_tick(CPU_PROFILING);
194#endif 134#endif
135 /* XXX FIXME. Uh, the xtime_lock should be held here, no? */
195 do_timer(1); 136 do_timer(1);
196 137
197#ifndef CONFIG_SMP 138#ifndef CONFIG_SMP
@@ -270,7 +211,7 @@ void __init time_init(void)
270 211
271 bus_clock = boot_cpu_data.bus_clock; 212 bus_clock = boot_cpu_data.bus_clock;
272 divide = boot_cpu_data.timer_divide; 213 divide = boot_cpu_data.timer_divide;
273 latch = (bus_clock/divide + HZ / 2) / HZ; 214 latch = DIV_ROUND_CLOSEST(bus_clock/divide, HZ);
274 215
275 printk("Timer start : latch = %ld\n", latch); 216 printk("Timer start : latch = %ld\n", latch);
276 217
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 03b14e55cd89..fbd109031df3 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -104,8 +104,8 @@ static void set_eit_vector_entries(void)
104 eit_vector[186] = (unsigned long)smp_call_function_interrupt; 104 eit_vector[186] = (unsigned long)smp_call_function_interrupt;
105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; 105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; 106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
107 eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; 107 eit_vector[189] = 0; /* CPU_BOOT_IPI */
108 eit_vector[190] = 0; 108 eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
109 eit_vector[191] = 0; 109 eit_vector[191] = 0;
110#endif 110#endif
111 _flush_cache_copyback_all(); 111 _flush_cache_copyback_all();
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index de5e21cca6a5..7da94eaa082b 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -4,6 +4,7 @@
4#include <asm-generic/vmlinux.lds.h> 4#include <asm-generic/vmlinux.lds.h>
5#include <asm/addrspace.h> 5#include <asm/addrspace.h>
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm/thread_info.h>
7 8
8OUTPUT_ARCH(m32r) 9OUTPUT_ARCH(m32r)
9#if defined(__LITTLE_ENDIAN__) 10#if defined(__LITTLE_ENDIAN__)
@@ -40,83 +41,24 @@ SECTIONS
40#endif 41#endif
41 _etext = .; /* End of text section */ 42 _etext = .; /* End of text section */
42 43
43 . = ALIGN(16); /* Exception table */ 44 EXCEPTION_TABLE(16)
44 __start___ex_table = .; 45 NOTES
45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .;
47 46
48 RODATA 47 RODATA
49 48 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
50 /* writeable */
51 .data : { /* Data */
52 *(.spu)
53 *(.spi)
54 DATA_DATA
55 CONSTRUCTORS
56 }
57
58 . = ALIGN(4096);
59 __nosave_begin = .;
60 .data_nosave : { *(.data.nosave) }
61 . = ALIGN(4096);
62 __nosave_end = .;
63
64 . = ALIGN(32);
65 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
66
67 _edata = .; /* End of data section */ 49 _edata = .; /* End of data section */
68 50
69 . = ALIGN(8192); /* init_task */
70 .data.init_task : { *(.data.init_task) }
71
72 /* will be freed after init */ 51 /* will be freed after init */
73 . = ALIGN(4096); /* Init code and data */ 52 . = ALIGN(PAGE_SIZE); /* Init code and data */
74 __init_begin = .; 53 __init_begin = .;
75 .init.text : { 54 INIT_TEXT_SECTION(PAGE_SIZE)
76 _sinittext = .; 55 INIT_DATA_SECTION(16)
77 INIT_TEXT 56 PERCPU(PAGE_SIZE)
78 _einittext = .; 57 . = ALIGN(PAGE_SIZE);
79 }
80 .init.data : { INIT_DATA }
81 . = ALIGN(16);
82 __setup_start = .;
83 .init.setup : { *(.init.setup) }
84 __setup_end = .;
85 __initcall_start = .;
86 .initcall.init : {
87 INITCALLS
88 }
89 __initcall_end = .;
90 __con_initcall_start = .;
91 .con_initcall.init : { *(.con_initcall.init) }
92 __con_initcall_end = .;
93 SECURITY_INIT
94 . = ALIGN(4);
95 __alt_instructions = .;
96 .altinstructions : { *(.altinstructions) }
97 __alt_instructions_end = .;
98 .altinstr_replacement : { *(.altinstr_replacement) }
99 /* .exit.text is discard at runtime, not link time, to deal with references
100 from .altinstructions and .eh_frame */
101 .exit.text : { EXIT_TEXT }
102 .exit.data : { EXIT_DATA }
103
104#ifdef CONFIG_BLK_DEV_INITRD
105 . = ALIGN(4096);
106 __initramfs_start = .;
107 .init.ramfs : { *(.init.ramfs) }
108 __initramfs_end = .;
109#endif
110
111 PERCPU(4096)
112 . = ALIGN(4096);
113 __init_end = .; 58 __init_end = .;
114 /* freed after init ends here */ 59 /* freed after init ends here */
115 60
116 __bss_start = .; /* BSS */ 61 BSS_SECTION(0, 0, 4)
117 .bss : { *(.bss) }
118 . = ALIGN(4);
119 __bss_stop = .;
120 62
121 _end = . ; 63 _end = . ;
122 64
diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c
index ced549be80f5..940f4837e42b 100644
--- a/arch/m32r/lib/delay.c
+++ b/arch/m32r/lib/delay.c
@@ -122,4 +122,8 @@ void __ndelay(unsigned long nsecs)
122{ 122{
123 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 123 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
124} 124}
125
126EXPORT_SYMBOL(__delay);
127EXPORT_SYMBOL(__const_udelay);
128EXPORT_SYMBOL(__udelay);
125EXPORT_SYMBOL(__ndelay); 129EXPORT_SYMBOL(__ndelay);
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index b7a78ad429b7..5d2858f6eede 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -32,6 +32,9 @@ typedef struct {
32} mem_prof_t; 32} mem_prof_t;
33static mem_prof_t mem_prof[MAX_NUMNODES]; 33static mem_prof_t mem_prof[MAX_NUMNODES];
34 34
35extern unsigned long memory_start;
36extern unsigned long memory_end;
37
35static void __init mem_prof_init(void) 38static void __init mem_prof_init(void)
36{ 39{
37 unsigned long start_pfn, holes, free_pfn; 40 unsigned long start_pfn, holes, free_pfn;
@@ -42,7 +45,7 @@ static void __init mem_prof_init(void)
42 /* Node#0 SDRAM */ 45 /* Node#0 SDRAM */
43 mp = &mem_prof[0]; 46 mp = &mem_prof[0];
44 mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); 47 mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
45 mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE); 48 mp->pages = PFN_DOWN(memory_end - memory_start);
46 mp->holes = 0; 49 mp->holes = 0;
47 mp->free_pfn = PFN_UP(__pa(_end)); 50 mp->free_pfn = PFN_UP(__pa(_end));
48 51
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 24d429f9358a..9f581df3952b 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -171,7 +171,7 @@ void __init mem_init(void)
171 171
172 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 172 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
173 "%dk reserved, %dk data, %dk init)\n", 173 "%dk reserved, %dk data, %dk init)\n",
174 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 174 nr_free_pages() << (PAGE_SHIFT-10),
175 num_physpages << (PAGE_SHIFT-10), 175 num_physpages << (PAGE_SHIFT-10),
176 codesize >> 10, 176 codesize >> 10,
177 reservedpages << (PAGE_SHIFT-10), 177 reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/m32r/mm/mmu.S b/arch/m32r/mm/mmu.S
index 49a6d16a3d58..e9491a5ae827 100644
--- a/arch/m32r/mm/mmu.S
+++ b/arch/m32r/mm/mmu.S
@@ -150,9 +150,13 @@ ENTRY(tme_handler)
150 150
151 ; pmd = pmd_offset(pgd, address); 151 ; pmd = pmd_offset(pgd, address);
152 ld r3, @r3 ; r3: pmd data 152 ld r3, @r3 ; r3: pmd data
153 ldi r2, #-4096
154 beqz r3, 3f ; pmd_none(*pmd) ? 153 beqz r3, 3f ; pmd_none(*pmd) ?
155 154
155 and3 r2, r3, #0xfff
156 add3 r2, r2, #-355 ; _KERNPG_TABLE(=0x163)
157 bnez r2, 3f ; pmd_bad(*pmd) ?
158 ldi r2, #-4096
159
156 ; pte = pte_offset(pmd, address); 160 ; pte = pte_offset(pmd, address);
157 and r2, r3 ; r2: pte base addr 161 and r2, r3 ; r2: pte base addr
158 srl3 r3, r0, #10 162 srl3 r3, r0, #10
@@ -263,9 +267,9 @@ ENTRY(tme_handler)
263 ld r1, @r3 ; r1: pmd 267 ld r1, @r3 ; r1: pmd
264 beqz r1, 3f ; pmd_none(*pmd) ? 268 beqz r1, 3f ; pmd_none(*pmd) ?
265; 269;
266 and3 r1, r1, #0xeff 270 and3 r1, r1, #0x3ff
267 ldi r4, #611 ; _KERNPG_TABLE(=611) 271 ldi r4, #0x163 ; _KERNPG_TABLE(=0x163)
268 bne r1, r4, 3f ; !pmd_bad(*pmd) ? 272 bne r1, r4, 3f ; pmd_bad(*pmd) ?
269 273
270 .fillinsn 274 .fillinsn
2714: 2754: