aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
committerMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
commit772320e84588dcbe1600ffb83e5f328f2209ac2a (patch)
treea7de21b79340aeaa17c58126f6b801b82c77b53a /arch/m32r
parent1ce53adf13a54375d2a5c7cdbe341b2558389615 (diff)
parent9fe6206f400646a2322096b56c59891d530e8d51 (diff)
Merge commit 'v2.6.35' into kbuild/kbuild
Conflicts: arch/powerpc/Makefile
Diffstat (limited to 'arch/m32r')
-rw-r--r--arch/m32r/include/asm/atomic.h2
-rw-r--r--arch/m32r/include/asm/local.h25
-rw-r--r--arch/m32r/include/asm/ptrace.h2
-rw-r--r--arch/m32r/include/asm/scatterlist.h15
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/include/asm/tlbflush.h2
-rw-r--r--arch/m32r/include/asm/unistd.h1
-rw-r--r--arch/m32r/kernel/process.c2
-rw-r--r--arch/m32r/kernel/ptrace.c97
-rw-r--r--arch/m32r/kernel/sys_m32r.c92
-rw-r--r--arch/m32r/kernel/time.c47
-rw-r--r--arch/m32r/mm/fault-nommu.c2
-rw-r--r--arch/m32r/mm/fault.c20
-rw-r--r--arch/m32r/mm/init.c1
14 files changed, 53 insertions, 257 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 63f0cf0f50dd..d44a51e5271b 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -26,7 +26,7 @@
26 * 26 *
27 * Atomically reads the value of @v. 27 * Atomically reads the value of @v.
28 */ 28 */
29#define atomic_read(v) ((v)->counter) 29#define atomic_read(v) (*(volatile int *)&(v)->counter)
30 30
31/** 31/**
32 * atomic_set - set atomic variable 32 * atomic_set - set atomic variable
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 22256d138630..734bca87018a 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
338 * a variable, not an address. 338 * a variable, not an address.
339 */ 339 */
340 340
341/* Need to disable preemption for the cpu local counters otherwise we could
342 still access a variable of a previous CPU in a non local way. */
343#define cpu_local_wrap_v(l) \
344 ({ local_t res__; \
345 preempt_disable(); \
346 res__ = (l); \
347 preempt_enable(); \
348 res__; })
349#define cpu_local_wrap(l) \
350 ({ preempt_disable(); \
351 l; \
352 preempt_enable(); }) \
353
354#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
355#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
356#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
357#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
358#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
359#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
360
361#define __cpu_local_inc(l) cpu_local_inc(l)
362#define __cpu_local_dec(l) cpu_local_dec(l)
363#define __cpu_local_add(i, l) cpu_local_add((i), (l))
364#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
365
366#endif /* __M32R_LOCAL_H */ 341#endif /* __M32R_LOCAL_H */
diff --git a/arch/m32r/include/asm/ptrace.h b/arch/m32r/include/asm/ptrace.h
index a0755b982028..840a1231edeb 100644
--- a/arch/m32r/include/asm/ptrace.h
+++ b/arch/m32r/include/asm/ptrace.h
@@ -120,6 +120,8 @@ struct pt_regs {
120 120
121#include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */ 121#include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */
122 122
123#define arch_has_single_step() (1)
124
123struct task_struct; 125struct task_struct;
124extern void init_debug_traps(struct task_struct *); 126extern void init_debug_traps(struct task_struct *);
125#define arch_ptrace_attach(child) \ 127#define arch_ptrace_attach(child) \
diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h
index 1ed372c73d0b..aeeddd8dac17 100644
--- a/arch/m32r/include/asm/scatterlist.h
+++ b/arch/m32r/include/asm/scatterlist.h
@@ -1,20 +1,7 @@
1#ifndef _ASM_M32R_SCATTERLIST_H 1#ifndef _ASM_M32R_SCATTERLIST_H
2#define _ASM_M32R_SCATTERLIST_H 2#define _ASM_M32R_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 char * address; /* Location data is to be transferred to, NULL for
11 * highmem page */
12 unsigned long page_link;
13 unsigned int offset;/* for highmem, page offset */
14
15 dma_addr_t dma_address;
16 unsigned int length;
17};
18 5
19#define ISA_DMA_THRESHOLD (0x1fffffff) 6#define ISA_DMA_THRESHOLD (0x1fffffff)
20 7
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index ed240b6e8e77..71faff5bcc27 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -142,7 +142,7 @@ static inline unsigned int get_thread_fault_code(void)
142#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ 142#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
143#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 143#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
144#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 144#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
145#define TIF_MEMDIE 18 /* OOM killer killed process */ 145#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
146#define TIF_FREEZE 19 /* is freezing for suspend */ 146#define TIF_FREEZE 19 /* is freezing for suspend */
147 147
148#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 148#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/m32r/include/asm/tlbflush.h b/arch/m32r/include/asm/tlbflush.h
index 0ef95307784e..92614b0ccf17 100644
--- a/arch/m32r/include/asm/tlbflush.h
+++ b/arch/m32r/include/asm/tlbflush.h
@@ -92,6 +92,6 @@ static __inline__ void __flush_tlb_all(void)
92 ); 92 );
93} 93}
94 94
95extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 95extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
96 96
97#endif /* _ASM_M32R_TLBFLUSH_H */ 97#endif /* _ASM_M32R_TLBFLUSH_H */
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index cf701c933249..76125777483c 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -339,6 +339,7 @@
339#define __ARCH_WANT_STAT64 339#define __ARCH_WANT_STAT64
340#define __ARCH_WANT_SYS_ALARM 340#define __ARCH_WANT_SYS_ALARM
341#define __ARCH_WANT_SYS_GETHOSTNAME 341#define __ARCH_WANT_SYS_GETHOSTNAME
342#define __ARCH_WANT_SYS_IPC
342#define __ARCH_WANT_SYS_PAUSE 343#define __ARCH_WANT_SYS_PAUSE
343#define __ARCH_WANT_SYS_TIME 344#define __ARCH_WANT_SYS_TIME
344#define __ARCH_WANT_SYS_UTIME 345#define __ARCH_WANT_SYS_UTIME
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 67a01e1e4283..bc8c8c1511b2 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -21,10 +21,10 @@
21 */ 21 */
22 22
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/slab.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/ptrace.h> 26#include <linux/ptrace.h>
26#include <linux/unistd.h> 27#include <linux/unistd.h>
27#include <linux/slab.h>
28#include <linux/hardirq.h> 28#include <linux/hardirq.h>
29 29
30#include <asm/io.h> 30#include <asm/io.h>
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 98682bba0ed9..e555091eb97c 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -580,6 +580,35 @@ init_debug_traps(struct task_struct *child)
580 } 580 }
581} 581}
582 582
583void user_enable_single_step(struct task_struct *child)
584{
585 unsigned long next_pc;
586 unsigned long pc, insn;
587
588 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
589
590 /* Compute next pc. */
591 pc = get_stack_long(child, PT_BPC);
592
593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
594 != sizeof(insn))
595 break;
596
597 compute_next_pc(insn, pc, &next_pc, child);
598 if (next_pc & 0x80000000)
599 break;
600
601 if (embed_debug_trap(child, next_pc))
602 break;
603
604 invalidate_cache();
605}
606
607void user_disable_single_step(struct task_struct *child)
608{
609 unregister_all_debug_traps(child);
610 invalidate_cache();
611}
583 612
584/* 613/*
585 * Called by kernel/ptrace.c when detaching.. 614 * Called by kernel/ptrace.c when detaching..
@@ -630,74 +659,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
630 ret = ptrace_write_user(child, addr, data); 659 ret = ptrace_write_user(child, addr, data);
631 break; 660 break;
632 661
633 /*
634 * continue/restart and stop at next (return from) syscall
635 */
636 case PTRACE_SYSCALL:
637 case PTRACE_CONT:
638 ret = -EIO;
639 if (!valid_signal(data))
640 break;
641 if (request == PTRACE_SYSCALL)
642 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
643 else
644 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
645 child->exit_code = data;
646 wake_up_process(child);
647 ret = 0;
648 break;
649
650 /*
651 * make the child exit. Best I can do is send it a sigkill.
652 * perhaps it should be put in the status that it wants to
653 * exit.
654 */
655 case PTRACE_KILL: {
656 ret = 0;
657 unregister_all_debug_traps(child);
658 invalidate_cache();
659 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
660 break;
661 child->exit_code = SIGKILL;
662 wake_up_process(child);
663 break;
664 }
665
666 /*
667 * execute single instruction.
668 */
669 case PTRACE_SINGLESTEP: {
670 unsigned long next_pc;
671 unsigned long pc, insn;
672
673 ret = -EIO;
674 if (!valid_signal(data))
675 break;
676 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
677
678 /* Compute next pc. */
679 pc = get_stack_long(child, PT_BPC);
680
681 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
682 != sizeof(insn))
683 break;
684
685 compute_next_pc(insn, pc, &next_pc, child);
686 if (next_pc & 0x80000000)
687 break;
688
689 if (embed_debug_trap(child, next_pc))
690 break;
691
692 invalidate_cache();
693 child->exit_code = data;
694
695 /* give it a chance to run. */
696 wake_up_process(child);
697 ret = 0;
698 break;
699 }
700
701 case PTRACE_GETREGS: 662 case PTRACE_GETREGS:
702 ret = ptrace_getregs(child, (void __user *)data); 663 ret = ptrace_getregs(child, (void __user *)data);
703 break; 664 break;
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index d3c865c5a6ba..0a00f467edfa 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -76,98 +76,6 @@ asmlinkage int sys_tas(int __user *addr)
76 return oldval; 76 return oldval;
77} 77}
78 78
79/*
80 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
81 *
82 * This is really horribly ugly.
83 */
84asmlinkage int sys_ipc(uint call, int first, int second,
85 int third, void __user *ptr, long fifth)
86{
87 int version, ret;
88
89 version = call >> 16; /* hack for backward compatibility */
90 call &= 0xffff;
91
92 switch (call) {
93 case SEMOP:
94 return sys_semtimedop(first, (struct sembuf __user *)ptr,
95 second, NULL);
96 case SEMTIMEDOP:
97 return sys_semtimedop(first, (struct sembuf __user *)ptr,
98 second, (const struct timespec __user *)fifth);
99 case SEMGET:
100 return sys_semget (first, second, third);
101 case SEMCTL: {
102 union semun fourth;
103 if (!ptr)
104 return -EINVAL;
105 if (get_user(fourth.__pad, (void __user * __user *) ptr))
106 return -EFAULT;
107 return sys_semctl (first, second, third, fourth);
108 }
109
110 case MSGSND:
111 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
112 second, third);
113 case MSGRCV:
114 switch (version) {
115 case 0: {
116 struct ipc_kludge tmp;
117 if (!ptr)
118 return -EINVAL;
119
120 if (copy_from_user(&tmp,
121 (struct ipc_kludge __user *) ptr,
122 sizeof (tmp)))
123 return -EFAULT;
124 return sys_msgrcv (first, tmp.msgp, second,
125 tmp.msgtyp, third);
126 }
127 default:
128 return sys_msgrcv (first,
129 (struct msgbuf __user *) ptr,
130 second, fifth, third);
131 }
132 case MSGGET:
133 return sys_msgget ((key_t) first, second);
134 case MSGCTL:
135 return sys_msgctl (first, second,
136 (struct msqid_ds __user *) ptr);
137 case SHMAT: {
138 ulong raddr;
139
140 if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
141 sizeof(ulong)))
142 return -EFAULT;
143 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
144 if (ret)
145 return ret;
146 return put_user (raddr, (ulong __user *) third);
147 }
148 case SHMDT:
149 return sys_shmdt ((char __user *)ptr);
150 case SHMGET:
151 return sys_shmget (first, second, third);
152 case SHMCTL:
153 return sys_shmctl (first, second,
154 (struct shmid_ds __user *) ptr);
155 default:
156 return -ENOSYS;
157 }
158}
159
160asmlinkage int sys_uname(struct old_utsname __user * name)
161{
162 int err;
163 if (!name)
164 return -EFAULT;
165 down_read(&uts_sem);
166 err = copy_to_user(name, utsname(), sizeof (*name));
167 up_read(&uts_sem);
168 return err?-EFAULT:0;
169}
170
171asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) 79asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
172{ 80{
173 /* This should flush more selectively ... */ 81 /* This should flush more selectively ... */
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 9cedcef11575..bda86820bffd 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -106,24 +106,6 @@ u32 arch_gettimeoffset(void)
106} 106}
107 107
108/* 108/*
109 * In order to set the CMOS clock precisely, set_rtc_mmss has to be
110 * called 500 ms after the second nowtime has started, because when
111 * nowtime is written into the registers of the CMOS clock, it will
112 * jump to the next second precisely 500 ms later. Check the Motorola
113 * MC146818A or Dallas DS12887 data sheet for details.
114 *
115 * BUG: This routine does not handle hour overflow properly; it just
116 * sets the minutes. Usually you won't notice until after reboot!
117 */
118static inline int set_rtc_mmss(unsigned long nowtime)
119{
120 return 0;
121}
122
123/* last time the cmos clock got updated */
124static long last_rtc_update = 0;
125
126/*
127 * timer_interrupt() needs to keep up the real-time clock, 109 * timer_interrupt() needs to keep up the real-time clock,
128 * as well as call the "do_timer()" routine every clocktick 110 * as well as call the "do_timer()" routine every clocktick
129 */ 111 */
@@ -138,23 +120,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
138#ifndef CONFIG_SMP 120#ifndef CONFIG_SMP
139 update_process_times(user_mode(get_irq_regs())); 121 update_process_times(user_mode(get_irq_regs()));
140#endif 122#endif
141 /*
142 * If we have an externally synchronized Linux clock, then update
143 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
144 * called as close as possible to 500 ms before the new second starts.
145 */
146 write_seqlock(&xtime_lock);
147 if (ntp_synced()
148 && xtime.tv_sec > last_rtc_update + 660
149 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
150 && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
151 {
152 if (set_rtc_mmss(xtime.tv_sec) == 0)
153 last_rtc_update = xtime.tv_sec;
154 else /* do it again in 60 s */
155 last_rtc_update = xtime.tv_sec - 600;
156 }
157 write_sequnlock(&xtime_lock);
158 /* As we return to user mode fire off the other CPU schedulers.. 123 /* As we return to user mode fire off the other CPU schedulers..
159 this is basically because we don't yet share IRQ's around. 124 this is basically because we don't yet share IRQ's around.
160 This message is rigged to be safe on the 386 - basically it's 125 This message is rigged to be safe on the 386 - basically it's
@@ -174,7 +139,7 @@ static struct irqaction irq0 = {
174 .name = "MFT2", 139 .name = "MFT2",
175}; 140};
176 141
177void __init time_init(void) 142void read_persistent_clock(struct timespec *ts)
178{ 143{
179 unsigned int epoch, year, mon, day, hour, min, sec; 144 unsigned int epoch, year, mon, day, hour, min, sec;
180 145
@@ -194,11 +159,13 @@ void __init time_init(void)
194 epoch = 1952; 159 epoch = 1952;
195 year += epoch; 160 year += epoch;
196 161
197 xtime.tv_sec = mktime(year, mon, day, hour, min, sec); 162 ts->tv_sec = mktime(year, mon, day, hour, min, sec);
198 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); 163 ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
199 set_normalized_timespec(&wall_to_monotonic, 164}
200 -xtime.tv_sec, -xtime.tv_nsec);
201 165
166
167void __init time_init(void)
168{
202#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \ 169#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
203 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \ 170 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
204 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) 171 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c
index 88469178ea6b..888aab1157ed 100644
--- a/arch/m32r/mm/fault-nommu.c
+++ b/arch/m32r/mm/fault-nommu.c
@@ -95,7 +95,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
95 * update_mmu_cache() 95 * update_mmu_cache()
96 *======================================================================*/ 96 *======================================================================*/
97void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 97void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
98 pte_t pte) 98 pte_t *ptep)
99{ 99{
100 BUG(); 100 BUG();
101} 101}
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 7274b47f4c22..b8ec002aef8e 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -188,7 +188,6 @@ good_area:
188 if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC)) 188 if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
189 goto bad_area; 189 goto bad_area;
190 190
191survive:
192 /* 191 /*
193 * If for any reason at all we couldn't handle the fault, 192 * If for any reason at all we couldn't handle the fault,
194 * make sure we exit gracefully rather than endlessly redo 193 * make sure we exit gracefully rather than endlessly redo
@@ -271,15 +270,10 @@ no_context:
271 */ 270 */
272out_of_memory: 271out_of_memory:
273 up_read(&mm->mmap_sem); 272 up_read(&mm->mmap_sem);
274 if (is_global_init(tsk)) { 273 if (!(error_code & ACE_USERMODE))
275 yield(); 274 goto no_context;
276 down_read(&mm->mmap_sem); 275 pagefault_out_of_memory();
277 goto survive; 276 return;
278 }
279 printk("VM: killing process %s\n", tsk->comm);
280 if (error_code & ACE_USERMODE)
281 do_group_exit(SIGKILL);
282 goto no_context;
283 277
284do_sigbus: 278do_sigbus:
285 up_read(&mm->mmap_sem); 279 up_read(&mm->mmap_sem);
@@ -336,7 +330,7 @@ vmalloc_fault:
336 330
337 addr = (address & PAGE_MASK); 331 addr = (address & PAGE_MASK);
338 set_thread_fault_code(error_code); 332 set_thread_fault_code(error_code);
339 update_mmu_cache(NULL, addr, *pte_k); 333 update_mmu_cache(NULL, addr, pte_k);
340 set_thread_fault_code(0); 334 set_thread_fault_code(0);
341 return; 335 return;
342 } 336 }
@@ -349,7 +343,7 @@ vmalloc_fault:
349#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) 343#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
350#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) 344#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
351void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, 345void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
352 pte_t pte) 346 pte_t *ptep)
353{ 347{
354 volatile unsigned long *entry1, *entry2; 348 volatile unsigned long *entry1, *entry2;
355 unsigned long pte_data, flags; 349 unsigned long pte_data, flags;
@@ -365,7 +359,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
365 359
366 vaddr = (vaddr & PAGE_MASK) | get_asid(); 360 vaddr = (vaddr & PAGE_MASK) | get_asid();
367 361
368 pte_data = pte_val(pte); 362 pte_data = pte_val(*ptep);
369 363
370#ifdef CONFIG_CHIP_OPSP 364#ifdef CONFIG_CHIP_OPSP
371 entry1 = (unsigned long *)ITLB_BASE; 365 entry1 = (unsigned long *)ITLB_BASE;
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 9f581df3952b..73e2205ebf5a 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20#include <linux/nodemask.h> 20#include <linux/nodemask.h>
21#include <linux/pfn.h> 21#include <linux/pfn.h>
22#include <linux/gfp.h>
22#include <asm/types.h> 23#include <asm/types.h>
23#include <asm/processor.h> 24#include <asm/processor.h>
24#include <asm/page.h> 25#include <asm/page.h>