diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-10-17 19:09:09 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-10-17 19:12:33 -0400 |
commit | 0f8f86c7bdd1c954fbe153af437a0d91a6c5721a (patch) | |
tree | 94a8d419a470a4f9852ca397bb9bbe48db92ff5c /arch/m32r | |
parent | dca2d6ac09d9ef59ff46820d4f0c94b08a671202 (diff) | |
parent | f39cdf25bf77219676ec5360980ac40b1a7e144a (diff) |
Merge commit 'perf/core' into perf/hw-breakpoint
Conflicts:
kernel/Makefile
kernel/trace/Makefile
kernel/trace/trace.h
samples/Makefile
Merge reason: We need to be uptodate with the perf events development
branch because we plan to rewrite the breakpoints API on top of
perf events.
Diffstat (limited to 'arch/m32r')
-rw-r--r-- | arch/m32r/Kconfig | 6 | ||||
-rw-r--r-- | arch/m32r/boot/compressed/install.sh | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/hardirq.h | 15 | ||||
-rw-r--r-- | arch/m32r/include/asm/io.h | 7 | ||||
-rw-r--r-- | arch/m32r/include/asm/mman.h | 18 | ||||
-rw-r--r-- | arch/m32r/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/page.h | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/m32r/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/m32r/include/asm/thread_info.h | 15 | ||||
-rw-r--r-- | arch/m32r/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/m32r/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/init_task.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/m32r_ksyms.c | 6 | ||||
-rw-r--r-- | arch/m32r/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 31 | ||||
-rw-r--r-- | arch/m32r/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/time.c | 83 | ||||
-rw-r--r-- | arch/m32r/kernel/traps.c | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/vmlinux.lds.S | 88 | ||||
-rw-r--r-- | arch/m32r/lib/delay.c | 4 | ||||
-rw-r--r-- | arch/m32r/mm/discontig.c | 5 | ||||
-rw-r--r-- | arch/m32r/mm/init.c | 2 | ||||
-rw-r--r-- | arch/m32r/mm/mmu.S | 12 |
24 files changed, 100 insertions, 237 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index cabba332cc48..c41234f1b825 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -41,6 +41,12 @@ config HZ | |||
41 | int | 41 | int |
42 | default 100 | 42 | default 100 |
43 | 43 | ||
44 | config GENERIC_TIME | ||
45 | def_bool y | ||
46 | |||
47 | config ARCH_USES_GETTIMEOFFSET | ||
48 | def_bool y | ||
49 | |||
44 | source "init/Kconfig" | 50 | source "init/Kconfig" |
45 | 51 | ||
46 | source "kernel/Kconfig.freezer" | 52 | source "kernel/Kconfig.freezer" |
diff --git a/arch/m32r/boot/compressed/install.sh b/arch/m32r/boot/compressed/install.sh index 6d72e9e72697..16e5a0a13437 100644 --- a/arch/m32r/boot/compressed/install.sh +++ b/arch/m32r/boot/compressed/install.sh | |||
@@ -24,8 +24,8 @@ | |||
24 | 24 | ||
25 | # User may have a custom install script | 25 | # User may have a custom install script |
26 | 26 | ||
27 | if [ -x /sbin/installkernel ]; then | 27 | if [ -x /sbin/${INSTALLKERNEL} ]; then |
28 | exec /sbin/installkernel "$@" | 28 | exec /sbin/${INSTALLKERNEL} "$@" |
29 | fi | 29 | fi |
30 | 30 | ||
31 | if [ "$2" = "zImage" ]; then | 31 | if [ "$2" = "zImage" ]; then |
diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index cb8aa762f235..4c31c0ae215e 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h | |||
@@ -2,14 +2,7 @@ | |||
2 | #ifndef __ASM_HARDIRQ_H | 2 | #ifndef __ASM_HARDIRQ_H |
3 | #define __ASM_HARDIRQ_H | 3 | #define __ASM_HARDIRQ_H |
4 | 4 | ||
5 | #include <linux/threads.h> | 5 | #include <asm/irq.h> |
6 | #include <linux/irq.h> | ||
7 | |||
8 | typedef struct { | ||
9 | unsigned int __softirq_pending; | ||
10 | } ____cacheline_aligned irq_cpustat_t; | ||
11 | |||
12 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
13 | 6 | ||
14 | #if NR_IRQS > 256 | 7 | #if NR_IRQS > 256 |
15 | #define HARDIRQ_BITS 9 | 8 | #define HARDIRQ_BITS 9 |
@@ -26,11 +19,7 @@ typedef struct { | |||
26 | # error HARDIRQ_BITS is too low! | 19 | # error HARDIRQ_BITS is too low! |
27 | #endif | 20 | #endif |
28 | 21 | ||
29 | static inline void ack_bad_irq(int irq) | 22 | #include <asm-generic/hardirq.h> |
30 | { | ||
31 | printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); | ||
32 | BUG(); | ||
33 | } | ||
34 | 23 | ||
35 | #endif /* __ASM_HARDIRQ_H */ | 24 | #endif /* __ASM_HARDIRQ_H */ |
36 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index d06933bd6318..4010f1fc5b65 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h | |||
@@ -162,6 +162,13 @@ static inline void _writel(unsigned long l, unsigned long addr) | |||
162 | #define __raw_writew writew | 162 | #define __raw_writew writew |
163 | #define __raw_writel writel | 163 | #define __raw_writel writel |
164 | 164 | ||
165 | #define ioread8 read | ||
166 | #define ioread16 readw | ||
167 | #define ioread32 readl | ||
168 | #define iowrite8 writeb | ||
169 | #define iowrite16 writew | ||
170 | #define iowrite32 writel | ||
171 | |||
165 | #define mmiowb() | 172 | #define mmiowb() |
166 | 173 | ||
167 | #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ | 174 | #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ |
diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/asm/mman.h index 04a5f40aa401..8eebf89f5ab1 100644 --- a/arch/m32r/include/asm/mman.h +++ b/arch/m32r/include/asm/mman.h | |||
@@ -1,17 +1 @@ | |||
1 | #ifndef __M32R_MMAN_H__ | #include <asm-generic/mman.h> | |
2 | #define __M32R_MMAN_H__ | ||
3 | |||
4 | #include <asm-generic/mman-common.h> | ||
5 | |||
6 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | ||
7 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
8 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
9 | #define MAP_LOCKED 0x2000 /* pages are locked */ | ||
10 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ | ||
11 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
12 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
13 | |||
14 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
15 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
16 | |||
17 | #endif /* __M32R_MMAN_H__ */ | ||
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index 91909e5dd9d0..a70a3df33635 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h | |||
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
127 | 127 | ||
128 | if (prev != next) { | 128 | if (prev != next) { |
129 | #ifdef CONFIG_SMP | 129 | #ifdef CONFIG_SMP |
130 | cpu_set(cpu, next->cpu_vm_mask); | 130 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
131 | #endif /* CONFIG_SMP */ | 131 | #endif /* CONFIG_SMP */ |
132 | /* Set MPTB = next->pgd */ | 132 | /* Set MPTB = next->pgd */ |
133 | *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; | 133 | *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; |
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
135 | } | 135 | } |
136 | #ifdef CONFIG_SMP | 136 | #ifdef CONFIG_SMP |
137 | else | 137 | else |
138 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) | 138 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) |
139 | activate_context(next); | 139 | activate_context(next); |
140 | #endif /* CONFIG_SMP */ | 140 | #endif /* CONFIG_SMP */ |
141 | } | 141 | } |
diff --git a/arch/m32r/include/asm/page.h b/arch/m32r/include/asm/page.h index 11777f7a5628..725ede8f2889 100644 --- a/arch/m32r/include/asm/page.h +++ b/arch/m32r/include/asm/page.h | |||
@@ -1,9 +1,11 @@ | |||
1 | #ifndef _ASM_M32R_PAGE_H | 1 | #ifndef _ASM_M32R_PAGE_H |
2 | #define _ASM_M32R_PAGE_H | 2 | #define _ASM_M32R_PAGE_H |
3 | 3 | ||
4 | #include <linux/const.h> | ||
5 | |||
4 | /* PAGE_SHIFT determines the page size */ | 6 | /* PAGE_SHIFT determines the page size */ |
5 | #define PAGE_SHIFT 12 | 7 | #define PAGE_SHIFT 12 |
6 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | 8 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
7 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 9 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
8 | 10 | ||
9 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h index 1a997fc148a2..8397c249989b 100644 --- a/arch/m32r/include/asm/processor.h +++ b/arch/m32r/include/asm/processor.h | |||
@@ -140,8 +140,6 @@ unsigned long get_wchan(struct task_struct *p); | |||
140 | #define KSTK_EIP(tsk) ((tsk)->thread.lr) | 140 | #define KSTK_EIP(tsk) ((tsk)->thread.lr) |
141 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) | 141 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) |
142 | 142 | ||
143 | #define THREAD_SIZE (2*PAGE_SIZE) | ||
144 | |||
145 | #define cpu_relax() barrier() | 143 | #define cpu_relax() barrier() |
146 | 144 | ||
147 | #endif /* _ASM_M32R_PROCESSOR_H */ | 145 | #endif /* _ASM_M32R_PROCESSOR_H */ |
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h index b96a6d2ffbc3..e67ded1aab91 100644 --- a/arch/m32r/include/asm/smp.h +++ b/arch/m32r/include/asm/smp.h | |||
@@ -88,7 +88,7 @@ extern void smp_send_timer(void); | |||
88 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 88 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); |
89 | 89 | ||
90 | extern void arch_send_call_function_single_ipi(int cpu); | 90 | extern void arch_send_call_function_single_ipi(int cpu); |
91 | extern void arch_send_call_function_ipi(cpumask_t mask); | 91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
92 | 92 | ||
93 | #endif /* not __ASSEMBLY__ */ | 93 | #endif /* not __ASSEMBLY__ */ |
94 | 94 | ||
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 71578151a403..ed240b6e8e77 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h | |||
@@ -55,6 +55,8 @@ struct thread_info { | |||
55 | 55 | ||
56 | #define PREEMPT_ACTIVE 0x10000000 | 56 | #define PREEMPT_ACTIVE 0x10000000 |
57 | 57 | ||
58 | #define THREAD_SIZE (PAGE_SIZE << 1) | ||
59 | |||
58 | /* | 60 | /* |
59 | * macros/functions for gaining access to the thread information structure | 61 | * macros/functions for gaining access to the thread information structure |
60 | */ | 62 | */ |
@@ -76,8 +78,6 @@ struct thread_info { | |||
76 | #define init_thread_info (init_thread_union.thread_info) | 78 | #define init_thread_info (init_thread_union.thread_info) |
77 | #define init_stack (init_thread_union.stack) | 79 | #define init_stack (init_thread_union.stack) |
78 | 80 | ||
79 | #define THREAD_SIZE (2*PAGE_SIZE) | ||
80 | |||
81 | /* how to get the thread information struct from C */ | 81 | /* how to get the thread information struct from C */ |
82 | static inline struct thread_info *current_thread_info(void) | 82 | static inline struct thread_info *current_thread_info(void) |
83 | { | 83 | { |
@@ -125,17 +125,6 @@ static inline unsigned int get_thread_fault_code(void) | |||
125 | return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; | 125 | return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; |
126 | } | 126 | } |
127 | 127 | ||
128 | #else /* !__ASSEMBLY__ */ | ||
129 | |||
130 | #define THREAD_SIZE 8192 | ||
131 | |||
132 | /* how to get the thread information struct from ASM */ | ||
133 | #define GET_THREAD_INFO(reg) GET_THREAD_INFO reg | ||
134 | .macro GET_THREAD_INFO reg | ||
135 | ldi \reg, #-THREAD_SIZE | ||
136 | and \reg, sp | ||
137 | .endm | ||
138 | |||
139 | #endif | 128 | #endif |
140 | 129 | ||
141 | /* | 130 | /* |
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 612d35b082a6..403869833b98 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S | |||
@@ -118,6 +118,13 @@ | |||
118 | #define resume_kernel restore_all | 118 | #define resume_kernel restore_all |
119 | #endif | 119 | #endif |
120 | 120 | ||
121 | /* how to get the thread information struct from ASM */ | ||
122 | #define GET_THREAD_INFO(reg) GET_THREAD_INFO reg | ||
123 | .macro GET_THREAD_INFO reg | ||
124 | ldi \reg, #-THREAD_SIZE | ||
125 | and \reg, sp | ||
126 | .endm | ||
127 | |||
121 | ENTRY(ret_from_fork) | 128 | ENTRY(ret_from_fork) |
122 | pop r0 | 129 | pop r0 |
123 | bl schedule_tail | 130 | bl schedule_tail |
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S index 0a7194439eb1..a46652dd83e6 100644 --- a/arch/m32r/kernel/head.S +++ b/arch/m32r/kernel/head.S | |||
@@ -268,13 +268,13 @@ ENTRY(empty_zero_page) | |||
268 | /*------------------------------------------------------------------------ | 268 | /*------------------------------------------------------------------------ |
269 | * Stack area | 269 | * Stack area |
270 | */ | 270 | */ |
271 | .section .spi | 271 | .section .init.data, "aw" |
272 | ALIGN | 272 | ALIGN |
273 | .global spi_stack_top | 273 | .global spi_stack_top |
274 | .zero 1024 | 274 | .zero 1024 |
275 | spi_stack_top: | 275 | spi_stack_top: |
276 | 276 | ||
277 | .section .spu | 277 | .section .init.data, "aw" |
278 | ALIGN | 278 | ALIGN |
279 | .global spu_stack_top | 279 | .global spu_stack_top |
280 | .zero 1024 | 280 | .zero 1024 |
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index fce57e5d3f91..6c42d5f8df50 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c | |||
@@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | |||
20 | * way process stacks are handled. This is done by having a special | 20 | * way process stacks are handled. This is done by having a special |
21 | * "init_task" linker map entry.. | 21 | * "init_task" linker map entry.. |
22 | */ | 22 | */ |
23 | union thread_union init_thread_union | 23 | union thread_union init_thread_union __init_task_data = |
24 | __attribute__((__section__(".data.init_task"))) = | 24 | { INIT_THREAD_INFO(init_task) }; |
25 | { INIT_THREAD_INFO(init_task) }; | ||
26 | 25 | ||
27 | /* | 26 | /* |
28 | * Initial task structure. | 27 | * Initial task structure. |
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index 22624b51d4d3..700570747a90 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c | |||
@@ -23,12 +23,6 @@ EXPORT_SYMBOL(__ioremap); | |||
23 | EXPORT_SYMBOL(iounmap); | 23 | EXPORT_SYMBOL(iounmap); |
24 | EXPORT_SYMBOL(kernel_thread); | 24 | EXPORT_SYMBOL(kernel_thread); |
25 | 25 | ||
26 | /* Networking helper routines. */ | ||
27 | /* Delay loops */ | ||
28 | EXPORT_SYMBOL(__udelay); | ||
29 | EXPORT_SYMBOL(__delay); | ||
30 | EXPORT_SYMBOL(__const_udelay); | ||
31 | |||
32 | EXPORT_SYMBOL(strncpy_from_user); | 26 | EXPORT_SYMBOL(strncpy_from_user); |
33 | EXPORT_SYMBOL(__strncpy_from_user); | 27 | EXPORT_SYMBOL(__strncpy_from_user); |
34 | EXPORT_SYMBOL(clear_user); | 28 | EXPORT_SYMBOL(clear_user); |
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 98b8feb12ed8..98682bba0ed9 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
@@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, | |||
77 | struct user * dummy = NULL; | 77 | struct user * dummy = NULL; |
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3)) | 80 | if ((off & 3) || off > sizeof(struct user) - 3) |
81 | return -EIO; | 81 | return -EIO; |
82 | 82 | ||
83 | off >>= 2; | 83 | off >>= 2; |
@@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, | |||
139 | struct user * dummy = NULL; | 139 | struct user * dummy = NULL; |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | if ((off & 3) || off < 0 || | 142 | if ((off & 3) || off > sizeof(struct user) - 3) |
143 | off > sizeof(struct user) - 3) | ||
144 | return -EIO; | 143 | return -EIO; |
145 | 144 | ||
146 | off >>= 2; | 145 | off >>= 2; |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 929e5c9d3ad9..8a88f1f0a3e2 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/sched.h> | ||
20 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
21 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
22 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
@@ -85,7 +86,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *); | |||
85 | void smp_local_timer_interrupt(void); | 86 | void smp_local_timer_interrupt(void); |
86 | 87 | ||
87 | static void send_IPI_allbutself(int, int); | 88 | static void send_IPI_allbutself(int, int); |
88 | static void send_IPI_mask(cpumask_t, int, int); | 89 | static void send_IPI_mask(const struct cpumask *, int, int); |
89 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 90 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); |
90 | 91 | ||
91 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 92 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
@@ -113,7 +114,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int); | |||
113 | void smp_send_reschedule(int cpu_id) | 114 | void smp_send_reschedule(int cpu_id) |
114 | { | 115 | { |
115 | WARN_ON(cpu_is_offline(cpu_id)); | 116 | WARN_ON(cpu_is_offline(cpu_id)); |
116 | send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); | 117 | send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); |
117 | } | 118 | } |
118 | 119 | ||
119 | /*==========================================================================* | 120 | /*==========================================================================* |
@@ -168,7 +169,7 @@ void smp_flush_cache_all(void) | |||
168 | spin_lock(&flushcache_lock); | 169 | spin_lock(&flushcache_lock); |
169 | mask=cpus_addr(cpumask); | 170 | mask=cpus_addr(cpumask); |
170 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); | 171 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); |
171 | send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); | 172 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); |
172 | _flush_cache_copyback_all(); | 173 | _flush_cache_copyback_all(); |
173 | while (flushcache_cpumask) | 174 | while (flushcache_cpumask) |
174 | mb(); | 175 | mb(); |
@@ -264,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
264 | preempt_disable(); | 265 | preempt_disable(); |
265 | cpu_id = smp_processor_id(); | 266 | cpu_id = smp_processor_id(); |
266 | mmc = &mm->context[cpu_id]; | 267 | mmc = &mm->context[cpu_id]; |
267 | cpu_mask = mm->cpu_vm_mask; | 268 | cpu_mask = *mm_cpumask(mm); |
268 | cpu_clear(cpu_id, cpu_mask); | 269 | cpu_clear(cpu_id, cpu_mask); |
269 | 270 | ||
270 | if (*mmc != NO_CONTEXT) { | 271 | if (*mmc != NO_CONTEXT) { |
@@ -273,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
273 | if (mm == current->mm) | 274 | if (mm == current->mm) |
274 | activate_context(mm); | 275 | activate_context(mm); |
275 | else | 276 | else |
276 | cpu_clear(cpu_id, mm->cpu_vm_mask); | 277 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); |
277 | local_irq_restore(flags); | 278 | local_irq_restore(flags); |
278 | } | 279 | } |
279 | if (!cpus_empty(cpu_mask)) | 280 | if (!cpus_empty(cpu_mask)) |
@@ -334,7 +335,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
334 | preempt_disable(); | 335 | preempt_disable(); |
335 | cpu_id = smp_processor_id(); | 336 | cpu_id = smp_processor_id(); |
336 | mmc = &mm->context[cpu_id]; | 337 | mmc = &mm->context[cpu_id]; |
337 | cpu_mask = mm->cpu_vm_mask; | 338 | cpu_mask = *mm_cpumask(mm); |
338 | cpu_clear(cpu_id, cpu_mask); | 339 | cpu_clear(cpu_id, cpu_mask); |
339 | 340 | ||
340 | #ifdef DEBUG_SMP | 341 | #ifdef DEBUG_SMP |
@@ -424,7 +425,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
424 | * We have to send the IPI only to | 425 | * We have to send the IPI only to |
425 | * CPUs affected. | 426 | * CPUs affected. |
426 | */ | 427 | */ |
427 | send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); | 428 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); |
428 | 429 | ||
429 | while (!cpus_empty(flush_cpumask)) { | 430 | while (!cpus_empty(flush_cpumask)) { |
430 | /* nothing. lockup detection does not belong here */ | 431 | /* nothing. lockup detection does not belong here */ |
@@ -469,7 +470,7 @@ void smp_invalidate_interrupt(void) | |||
469 | if (flush_mm == current->active_mm) | 470 | if (flush_mm == current->active_mm) |
470 | activate_context(flush_mm); | 471 | activate_context(flush_mm); |
471 | else | 472 | else |
472 | cpu_clear(cpu_id, flush_mm->cpu_vm_mask); | 473 | cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); |
473 | } else { | 474 | } else { |
474 | unsigned long va = flush_va; | 475 | unsigned long va = flush_va; |
475 | 476 | ||
@@ -546,14 +547,14 @@ static void stop_this_cpu(void *dummy) | |||
546 | for ( ; ; ); | 547 | for ( ; ; ); |
547 | } | 548 | } |
548 | 549 | ||
549 | void arch_send_call_function_ipi(cpumask_t mask) | 550 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
550 | { | 551 | { |
551 | send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); | 552 | send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); |
552 | } | 553 | } |
553 | 554 | ||
554 | void arch_send_call_function_single_ipi(int cpu) | 555 | void arch_send_call_function_single_ipi(int cpu) |
555 | { | 556 | { |
556 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); | 557 | send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); |
557 | } | 558 | } |
558 | 559 | ||
559 | /*==========================================================================* | 560 | /*==========================================================================* |
@@ -729,7 +730,7 @@ static void send_IPI_allbutself(int ipi_num, int try) | |||
729 | cpumask = cpu_online_map; | 730 | cpumask = cpu_online_map; |
730 | cpu_clear(smp_processor_id(), cpumask); | 731 | cpu_clear(smp_processor_id(), cpumask); |
731 | 732 | ||
732 | send_IPI_mask(cpumask, ipi_num, try); | 733 | send_IPI_mask(&cpumask, ipi_num, try); |
733 | } | 734 | } |
734 | 735 | ||
735 | /*==========================================================================* | 736 | /*==========================================================================* |
@@ -752,7 +753,7 @@ static void send_IPI_allbutself(int ipi_num, int try) | |||
752 | * ---------- --- -------------------------------------------------------- | 753 | * ---------- --- -------------------------------------------------------- |
753 | * | 754 | * |
754 | *==========================================================================*/ | 755 | *==========================================================================*/ |
755 | static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) | 756 | static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) |
756 | { | 757 | { |
757 | cpumask_t physid_mask, tmp; | 758 | cpumask_t physid_mask, tmp; |
758 | int cpu_id, phys_id; | 759 | int cpu_id, phys_id; |
@@ -761,11 +762,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) | |||
761 | if (num_cpus <= 1) /* NO MP */ | 762 | if (num_cpus <= 1) /* NO MP */ |
762 | return; | 763 | return; |
763 | 764 | ||
764 | cpus_and(tmp, cpumask, cpu_online_map); | 765 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
765 | BUG_ON(!cpus_equal(cpumask, tmp)); | 766 | BUG_ON(!cpumask_equal(cpumask, &tmp)); |
766 | 767 | ||
767 | physid_mask = CPU_MASK_NONE; | 768 | physid_mask = CPU_MASK_NONE; |
768 | for_each_cpu_mask(cpu_id, cpumask){ | 769 | for_each_cpu(cpu_id, cpumask) { |
769 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) | 770 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) |
770 | cpu_set(phys_id, physid_mask); | 771 | cpu_set(phys_id, physid_mask); |
771 | } | 772 | } |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 2547d6c4a827..e034844cfc0d 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) | 178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) |
179 | physid_set(phys_id, phys_cpu_present_map); | 179 | physid_set(phys_id, phys_cpu_present_map); |
180 | #ifndef CONFIG_HOTPLUG_CPU | 180 | #ifndef CONFIG_HOTPLUG_CPU |
181 | cpu_present_map = cpu_possible_map; | 181 | init_cpu_present(&cpu_possible_map); |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | show_mp_info(nr_cpu); | 184 | show_mp_info(nr_cpu); |
@@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
213 | if (!physid_isset(phys_id, phys_cpu_present_map)) | 213 | if (!physid_isset(phys_id, phys_cpu_present_map)) |
214 | continue; | 214 | continue; |
215 | 215 | ||
216 | if ((max_cpus >= 0) && (max_cpus <= cpucount + 1)) | 216 | if (max_cpus <= cpucount + 1) |
217 | continue; | 217 | continue; |
218 | 218 | ||
219 | do_boot_cpu(phys_id); | 219 | do_boot_cpu(phys_id); |
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index cada3ba4b990..e7fee0f198d5 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c | |||
@@ -33,6 +33,15 @@ | |||
33 | 33 | ||
34 | #include <asm/hw_irq.h> | 34 | #include <asm/hw_irq.h> |
35 | 35 | ||
36 | #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) | ||
37 | /* this needs a better home */ | ||
38 | DEFINE_SPINLOCK(rtc_lock); | ||
39 | |||
40 | #ifdef CONFIG_RTC_DRV_CMOS_MODULE | ||
41 | EXPORT_SYMBOL(rtc_lock); | ||
42 | #endif | ||
43 | #endif /* pc-style 'CMOS' RTC support */ | ||
44 | |||
36 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
37 | extern void smp_local_timer_interrupt(void); | 46 | extern void smp_local_timer_interrupt(void); |
38 | #endif | 47 | #endif |
@@ -48,7 +57,7 @@ extern void smp_local_timer_interrupt(void); | |||
48 | 57 | ||
49 | static unsigned long latch; | 58 | static unsigned long latch; |
50 | 59 | ||
51 | static unsigned long do_gettimeoffset(void) | 60 | u32 arch_gettimeoffset(void) |
52 | { | 61 | { |
53 | unsigned long elapsed_time = 0; /* [us] */ | 62 | unsigned long elapsed_time = 0; /* [us] */ |
54 | 63 | ||
@@ -93,78 +102,9 @@ static unsigned long do_gettimeoffset(void) | |||
93 | #error no chip configuration | 102 | #error no chip configuration |
94 | #endif | 103 | #endif |
95 | 104 | ||
96 | return elapsed_time; | 105 | return elapsed_time * 1000; |
97 | } | ||
98 | |||
99 | /* | ||
100 | * This version of gettimeofday has near microsecond resolution. | ||
101 | */ | ||
102 | void do_gettimeofday(struct timeval *tv) | ||
103 | { | ||
104 | unsigned long seq; | ||
105 | unsigned long usec, sec; | ||
106 | unsigned long max_ntp_tick = tick_usec - tickadj; | ||
107 | |||
108 | do { | ||
109 | seq = read_seqbegin(&xtime_lock); | ||
110 | |||
111 | usec = do_gettimeoffset(); | ||
112 | |||
113 | /* | ||
114 | * If time_adjust is negative then NTP is slowing the clock | ||
115 | * so make sure not to go into next possible interval. | ||
116 | * Better to lose some accuracy than have time go backwards.. | ||
117 | */ | ||
118 | if (unlikely(time_adjust < 0)) | ||
119 | usec = min(usec, max_ntp_tick); | ||
120 | |||
121 | sec = xtime.tv_sec; | ||
122 | usec += (xtime.tv_nsec / 1000); | ||
123 | } while (read_seqretry(&xtime_lock, seq)); | ||
124 | |||
125 | while (usec >= 1000000) { | ||
126 | usec -= 1000000; | ||
127 | sec++; | ||
128 | } | ||
129 | |||
130 | tv->tv_sec = sec; | ||
131 | tv->tv_usec = usec; | ||
132 | } | ||
133 | |||
134 | EXPORT_SYMBOL(do_gettimeofday); | ||
135 | |||
136 | int do_settimeofday(struct timespec *tv) | ||
137 | { | ||
138 | time_t wtm_sec, sec = tv->tv_sec; | ||
139 | long wtm_nsec, nsec = tv->tv_nsec; | ||
140 | |||
141 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
142 | return -EINVAL; | ||
143 | |||
144 | write_seqlock_irq(&xtime_lock); | ||
145 | /* | ||
146 | * This is revolting. We need to set "xtime" correctly. However, the | ||
147 | * value in this location is the value at the most recent update of | ||
148 | * wall time. Discover what correction gettimeofday() would have | ||
149 | * made, and then undo it! | ||
150 | */ | ||
151 | nsec -= do_gettimeoffset() * NSEC_PER_USEC; | ||
152 | |||
153 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
154 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
155 | |||
156 | set_normalized_timespec(&xtime, sec, nsec); | ||
157 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
158 | |||
159 | ntp_clear(); | ||
160 | write_sequnlock_irq(&xtime_lock); | ||
161 | clock_was_set(); | ||
162 | |||
163 | return 0; | ||
164 | } | 106 | } |
165 | 107 | ||
166 | EXPORT_SYMBOL(do_settimeofday); | ||
167 | |||
168 | /* | 108 | /* |
169 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be | 109 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be |
170 | * called 500 ms after the second nowtime has started, because when | 110 | * called 500 ms after the second nowtime has started, because when |
@@ -192,6 +132,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
192 | #ifndef CONFIG_SMP | 132 | #ifndef CONFIG_SMP |
193 | profile_tick(CPU_PROFILING); | 133 | profile_tick(CPU_PROFILING); |
194 | #endif | 134 | #endif |
135 | /* XXX FIXME. Uh, the xtime_lock should be held here, no? */ | ||
195 | do_timer(1); | 136 | do_timer(1); |
196 | 137 | ||
197 | #ifndef CONFIG_SMP | 138 | #ifndef CONFIG_SMP |
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 03b14e55cd89..fbd109031df3 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c | |||
@@ -104,8 +104,8 @@ static void set_eit_vector_entries(void) | |||
104 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; | 104 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; |
105 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; | 105 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; |
106 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; | 106 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; |
107 | eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; | 107 | eit_vector[189] = 0; /* CPU_BOOT_IPI */ |
108 | eit_vector[190] = 0; | 108 | eit_vector[190] = (unsigned long)smp_call_function_single_interrupt; |
109 | eit_vector[191] = 0; | 109 | eit_vector[191] = 0; |
110 | #endif | 110 | #endif |
111 | _flush_cache_copyback_all(); | 111 | _flush_cache_copyback_all(); |
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 4179adf6c624..8ceb6181d805 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <asm-generic/vmlinux.lds.h> | 4 | #include <asm-generic/vmlinux.lds.h> |
5 | #include <asm/addrspace.h> | 5 | #include <asm/addrspace.h> |
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <asm/thread_info.h> | ||
7 | 8 | ||
8 | OUTPUT_ARCH(m32r) | 9 | OUTPUT_ARCH(m32r) |
9 | #if defined(__LITTLE_ENDIAN__) | 10 | #if defined(__LITTLE_ENDIAN__) |
@@ -40,93 +41,25 @@ SECTIONS | |||
40 | #endif | 41 | #endif |
41 | _etext = .; /* End of text section */ | 42 | _etext = .; /* End of text section */ |
42 | 43 | ||
43 | . = ALIGN(16); /* Exception table */ | 44 | EXCEPTION_TABLE(16) |
44 | __start___ex_table = .; | ||
45 | __ex_table : { *(__ex_table) } | ||
46 | __stop___ex_table = .; | ||
47 | |||
48 | RODATA | 45 | RODATA |
49 | 46 | RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) | |
50 | /* writeable */ | ||
51 | .data : { /* Data */ | ||
52 | *(.spu) | ||
53 | *(.spi) | ||
54 | DATA_DATA | ||
55 | CONSTRUCTORS | ||
56 | } | ||
57 | |||
58 | . = ALIGN(4096); | ||
59 | __nosave_begin = .; | ||
60 | .data_nosave : { *(.data.nosave) } | ||
61 | . = ALIGN(4096); | ||
62 | __nosave_end = .; | ||
63 | |||
64 | . = ALIGN(32); | ||
65 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
66 | |||
67 | _edata = .; /* End of data section */ | 47 | _edata = .; /* End of data section */ |
68 | 48 | ||
69 | . = ALIGN(8192); /* init_task */ | ||
70 | .data.init_task : { *(.data.init_task) } | ||
71 | |||
72 | /* will be freed after init */ | 49 | /* will be freed after init */ |
73 | . = ALIGN(4096); /* Init code and data */ | 50 | . = ALIGN(PAGE_SIZE); /* Init code and data */ |
74 | __init_begin = .; | 51 | __init_begin = .; |
75 | .init.text : { | 52 | INIT_TEXT_SECTION(PAGE_SIZE) |
76 | _sinittext = .; | 53 | INIT_DATA_SECTION(16) |
77 | INIT_TEXT | 54 | PERCPU(PAGE_SIZE) |
78 | _einittext = .; | 55 | . = ALIGN(PAGE_SIZE); |
79 | } | ||
80 | .init.data : { INIT_DATA } | ||
81 | . = ALIGN(16); | ||
82 | __setup_start = .; | ||
83 | .init.setup : { *(.init.setup) } | ||
84 | __setup_end = .; | ||
85 | __initcall_start = .; | ||
86 | .initcall.init : { | ||
87 | INITCALLS | ||
88 | } | ||
89 | __initcall_end = .; | ||
90 | __con_initcall_start = .; | ||
91 | .con_initcall.init : { *(.con_initcall.init) } | ||
92 | __con_initcall_end = .; | ||
93 | SECURITY_INIT | ||
94 | . = ALIGN(4); | ||
95 | __alt_instructions = .; | ||
96 | .altinstructions : { *(.altinstructions) } | ||
97 | __alt_instructions_end = .; | ||
98 | .altinstr_replacement : { *(.altinstr_replacement) } | ||
99 | /* .exit.text is discard at runtime, not link time, to deal with references | ||
100 | from .altinstructions and .eh_frame */ | ||
101 | .exit.text : { EXIT_TEXT } | ||
102 | .exit.data : { EXIT_DATA } | ||
103 | |||
104 | #ifdef CONFIG_BLK_DEV_INITRD | ||
105 | . = ALIGN(4096); | ||
106 | __initramfs_start = .; | ||
107 | .init.ramfs : { *(.init.ramfs) } | ||
108 | __initramfs_end = .; | ||
109 | #endif | ||
110 | |||
111 | PERCPU(4096) | ||
112 | . = ALIGN(4096); | ||
113 | __init_end = .; | 56 | __init_end = .; |
114 | /* freed after init ends here */ | 57 | /* freed after init ends here */ |
115 | 58 | ||
116 | __bss_start = .; /* BSS */ | 59 | BSS_SECTION(0, 0, 4) |
117 | .bss : { *(.bss) } | ||
118 | . = ALIGN(4); | ||
119 | __bss_stop = .; | ||
120 | 60 | ||
121 | _end = . ; | 61 | _end = . ; |
122 | 62 | ||
123 | /* Sections to be discarded */ | ||
124 | /DISCARD/ : { | ||
125 | EXIT_TEXT | ||
126 | EXIT_DATA | ||
127 | *(.exitcall.exit) | ||
128 | } | ||
129 | |||
130 | /* Stabs debugging sections. */ | 63 | /* Stabs debugging sections. */ |
131 | .stab 0 : { *(.stab) } | 64 | .stab 0 : { *(.stab) } |
132 | .stabstr 0 : { *(.stabstr) } | 65 | .stabstr 0 : { *(.stabstr) } |
@@ -135,4 +68,7 @@ SECTIONS | |||
135 | .stab.index 0 : { *(.stab.index) } | 68 | .stab.index 0 : { *(.stab.index) } |
136 | .stab.indexstr 0 : { *(.stab.indexstr) } | 69 | .stab.indexstr 0 : { *(.stab.indexstr) } |
137 | .comment 0 : { *(.comment) } | 70 | .comment 0 : { *(.comment) } |
71 | |||
72 | /* Sections to be discarded */ | ||
73 | DISCARDS | ||
138 | } | 74 | } |
diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c index ced549be80f5..940f4837e42b 100644 --- a/arch/m32r/lib/delay.c +++ b/arch/m32r/lib/delay.c | |||
@@ -122,4 +122,8 @@ void __ndelay(unsigned long nsecs) | |||
122 | { | 122 | { |
123 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | 123 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ |
124 | } | 124 | } |
125 | |||
126 | EXPORT_SYMBOL(__delay); | ||
127 | EXPORT_SYMBOL(__const_udelay); | ||
128 | EXPORT_SYMBOL(__udelay); | ||
125 | EXPORT_SYMBOL(__ndelay); | 129 | EXPORT_SYMBOL(__ndelay); |
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index b7a78ad429b7..5d2858f6eede 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c | |||
@@ -32,6 +32,9 @@ typedef struct { | |||
32 | } mem_prof_t; | 32 | } mem_prof_t; |
33 | static mem_prof_t mem_prof[MAX_NUMNODES]; | 33 | static mem_prof_t mem_prof[MAX_NUMNODES]; |
34 | 34 | ||
35 | extern unsigned long memory_start; | ||
36 | extern unsigned long memory_end; | ||
37 | |||
35 | static void __init mem_prof_init(void) | 38 | static void __init mem_prof_init(void) |
36 | { | 39 | { |
37 | unsigned long start_pfn, holes, free_pfn; | 40 | unsigned long start_pfn, holes, free_pfn; |
@@ -42,7 +45,7 @@ static void __init mem_prof_init(void) | |||
42 | /* Node#0 SDRAM */ | 45 | /* Node#0 SDRAM */ |
43 | mp = &mem_prof[0]; | 46 | mp = &mem_prof[0]; |
44 | mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); | 47 | mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); |
45 | mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE); | 48 | mp->pages = PFN_DOWN(memory_end - memory_start); |
46 | mp->holes = 0; | 49 | mp->holes = 0; |
47 | mp->free_pfn = PFN_UP(__pa(_end)); | 50 | mp->free_pfn = PFN_UP(__pa(_end)); |
48 | 51 | ||
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 24d429f9358a..9f581df3952b 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c | |||
@@ -171,7 +171,7 @@ void __init mem_init(void) | |||
171 | 171 | ||
172 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | 172 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
173 | "%dk reserved, %dk data, %dk init)\n", | 173 | "%dk reserved, %dk data, %dk init)\n", |
174 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 174 | nr_free_pages() << (PAGE_SHIFT-10), |
175 | num_physpages << (PAGE_SHIFT-10), | 175 | num_physpages << (PAGE_SHIFT-10), |
176 | codesize >> 10, | 176 | codesize >> 10, |
177 | reservedpages << (PAGE_SHIFT-10), | 177 | reservedpages << (PAGE_SHIFT-10), |
diff --git a/arch/m32r/mm/mmu.S b/arch/m32r/mm/mmu.S index 49a6d16a3d58..e9491a5ae827 100644 --- a/arch/m32r/mm/mmu.S +++ b/arch/m32r/mm/mmu.S | |||
@@ -150,9 +150,13 @@ ENTRY(tme_handler) | |||
150 | 150 | ||
151 | ; pmd = pmd_offset(pgd, address); | 151 | ; pmd = pmd_offset(pgd, address); |
152 | ld r3, @r3 ; r3: pmd data | 152 | ld r3, @r3 ; r3: pmd data |
153 | ldi r2, #-4096 | ||
154 | beqz r3, 3f ; pmd_none(*pmd) ? | 153 | beqz r3, 3f ; pmd_none(*pmd) ? |
155 | 154 | ||
155 | and3 r2, r3, #0xfff | ||
156 | add3 r2, r2, #-355 ; _KERNPG_TABLE(=0x163) | ||
157 | bnez r2, 3f ; pmd_bad(*pmd) ? | ||
158 | ldi r2, #-4096 | ||
159 | |||
156 | ; pte = pte_offset(pmd, address); | 160 | ; pte = pte_offset(pmd, address); |
157 | and r2, r3 ; r2: pte base addr | 161 | and r2, r3 ; r2: pte base addr |
158 | srl3 r3, r0, #10 | 162 | srl3 r3, r0, #10 |
@@ -263,9 +267,9 @@ ENTRY(tme_handler) | |||
263 | ld r1, @r3 ; r1: pmd | 267 | ld r1, @r3 ; r1: pmd |
264 | beqz r1, 3f ; pmd_none(*pmd) ? | 268 | beqz r1, 3f ; pmd_none(*pmd) ? |
265 | ; | 269 | ; |
266 | and3 r1, r1, #0xeff | 270 | and3 r1, r1, #0x3ff |
267 | ldi r4, #611 ; _KERNPG_TABLE(=611) | 271 | ldi r4, #0x163 ; _KERNPG_TABLE(=0x163) |
268 | bne r1, r4, 3f ; !pmd_bad(*pmd) ? | 272 | bne r1, r4, 3f ; pmd_bad(*pmd) ? |
269 | 273 | ||
270 | .fillinsn | 274 | .fillinsn |
271 | 4: | 275 | 4: |