aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r/kernel')
-rw-r--r--arch/m32r/kernel/.gitignore1
-rw-r--r--arch/m32r/kernel/entry.S5
-rw-r--r--arch/m32r/kernel/irq.c2
-rw-r--r--arch/m32r/kernel/process.c7
-rw-r--r--arch/m32r/kernel/ptrace.c109
-rw-r--r--arch/m32r/kernel/signal.c103
-rw-r--r--arch/m32r/kernel/sys_m32r.c96
-rw-r--r--arch/m32r/kernel/time.c47
8 files changed, 95 insertions, 275 deletions
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/m32r/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 403869833b98..225412bc227e 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -235,10 +235,9 @@ work_resched:
235work_notifysig: ; deal with pending signals and 235work_notifysig: ; deal with pending signals and
236 ; notify-resume requests 236 ; notify-resume requests
237 mv r0, sp ; arg1 : struct pt_regs *regs 237 mv r0, sp ; arg1 : struct pt_regs *regs
238 ldi r1, #0 ; arg2 : sigset_t *oldset 238 mv r1, r9 ; arg2 : __u32 thread_info_flags
239 mv r2, r9 ; arg3 : __u32 thread_info_flags
240 bl do_notify_resume 239 bl do_notify_resume
241 bra restore_all 240 bra resume_userspace
242 241
243 ; perform syscall exit tracing 242 ; perform syscall exit tracing
244 ALIGN 243 ALIGN
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 3c71f776872c..7db26f1f082d 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v)
51 for_each_online_cpu(j) 51 for_each_online_cpu(j)
52 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 52 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
53#endif 53#endif
54 seq_printf(p, " %14s", irq_desc[i].chip->typename); 54 seq_printf(p, " %14s", irq_desc[i].chip->name);
55 seq_printf(p, " %s", action->name); 55 seq_printf(p, " %s", action->name);
56 56
57 for (action=action->next; action; action = action->next) 57 for (action=action->next; action; action = action->next)
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 67a01e1e4283..422bea9f1dbc 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -21,10 +21,10 @@
21 */ 21 */
22 22
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/slab.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/ptrace.h> 26#include <linux/ptrace.h>
26#include <linux/unistd.h> 27#include <linux/unistd.h>
27#include <linux/slab.h>
28#include <linux/hardirq.h> 28#include <linux/hardirq.h>
29 29
30#include <asm/io.h> 30#include <asm/io.h>
@@ -288,8 +288,9 @@ asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2,
288/* 288/*
289 * sys_execve() executes a new program. 289 * sys_execve() executes a new program.
290 */ 290 */
291asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, 291asmlinkage int sys_execve(const char __user *ufilename,
292 char __user * __user *uenvp, 292 const char __user *const __user *uargv,
293 const char __user *const __user *uenvp,
293 unsigned long r3, unsigned long r4, unsigned long r5, 294 unsigned long r3, unsigned long r4, unsigned long r5,
294 unsigned long r6, struct pt_regs regs) 295 unsigned long r6, struct pt_regs regs)
295{ 296{
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 98682bba0ed9..20743754f2b2 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -580,6 +580,36 @@ init_debug_traps(struct task_struct *child)
580 } 580 }
581} 581}
582 582
583void user_enable_single_step(struct task_struct *child)
584{
585 unsigned long next_pc;
586 unsigned long pc, insn;
587
588 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
589
590 /* Compute next pc. */
591 pc = get_stack_long(child, PT_BPC);
592
593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
594 != sizeof(insn))
595 return -EIO;
596
597 compute_next_pc(insn, pc, &next_pc, child);
598 if (next_pc & 0x80000000)
599 return -EIO;
600
601 if (embed_debug_trap(child, next_pc))
602 return -EIO;
603
604 invalidate_cache();
605 return 0;
606}
607
608void user_disable_single_step(struct task_struct *child)
609{
610 unregister_all_debug_traps(child);
611 invalidate_cache();
612}
583 613
584/* 614/*
585 * Called by kernel/ptrace.c when detaching.. 615 * Called by kernel/ptrace.c when detaching..
@@ -592,9 +622,11 @@ void ptrace_disable(struct task_struct *child)
592} 622}
593 623
594long 624long
595arch_ptrace(struct task_struct *child, long request, long addr, long data) 625arch_ptrace(struct task_struct *child, long request,
626 unsigned long addr, unsigned long data)
596{ 627{
597 int ret; 628 int ret;
629 unsigned long __user *datap = (unsigned long __user *) data;
598 630
599 switch (request) { 631 switch (request) {
600 /* 632 /*
@@ -609,8 +641,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
609 * read the word at location addr in the USER area. 641 * read the word at location addr in the USER area.
610 */ 642 */
611 case PTRACE_PEEKUSR: 643 case PTRACE_PEEKUSR:
612 ret = ptrace_read_user(child, addr, 644 ret = ptrace_read_user(child, addr, datap);
613 (unsigned long __user *)data);
614 break; 645 break;
615 646
616 /* 647 /*
@@ -630,80 +661,12 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
630 ret = ptrace_write_user(child, addr, data); 661 ret = ptrace_write_user(child, addr, data);
631 break; 662 break;
632 663
633 /*
634 * continue/restart and stop at next (return from) syscall
635 */
636 case PTRACE_SYSCALL:
637 case PTRACE_CONT:
638 ret = -EIO;
639 if (!valid_signal(data))
640 break;
641 if (request == PTRACE_SYSCALL)
642 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
643 else
644 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
645 child->exit_code = data;
646 wake_up_process(child);
647 ret = 0;
648 break;
649
650 /*
651 * make the child exit. Best I can do is send it a sigkill.
652 * perhaps it should be put in the status that it wants to
653 * exit.
654 */
655 case PTRACE_KILL: {
656 ret = 0;
657 unregister_all_debug_traps(child);
658 invalidate_cache();
659 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
660 break;
661 child->exit_code = SIGKILL;
662 wake_up_process(child);
663 break;
664 }
665
666 /*
667 * execute single instruction.
668 */
669 case PTRACE_SINGLESTEP: {
670 unsigned long next_pc;
671 unsigned long pc, insn;
672
673 ret = -EIO;
674 if (!valid_signal(data))
675 break;
676 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
677
678 /* Compute next pc. */
679 pc = get_stack_long(child, PT_BPC);
680
681 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
682 != sizeof(insn))
683 break;
684
685 compute_next_pc(insn, pc, &next_pc, child);
686 if (next_pc & 0x80000000)
687 break;
688
689 if (embed_debug_trap(child, next_pc))
690 break;
691
692 invalidate_cache();
693 child->exit_code = data;
694
695 /* give it a chance to run. */
696 wake_up_process(child);
697 ret = 0;
698 break;
699 }
700
701 case PTRACE_GETREGS: 664 case PTRACE_GETREGS:
702 ret = ptrace_getregs(child, (void __user *)data); 665 ret = ptrace_getregs(child, datap);
703 break; 666 break;
704 667
705 case PTRACE_SETREGS: 668 case PTRACE_SETREGS:
706 ret = ptrace_setregs(child, (void __user *)data); 669 ret = ptrace_setregs(child, datap);
707 break; 670 break;
708 671
709 default: 672 default:
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 144b0f124fc7..a08697f0886d 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -30,35 +30,6 @@
30 30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32 32
33int do_signal(struct pt_regs *, sigset_t *);
34
35asmlinkage int
36sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
37 unsigned long r2, unsigned long r3, unsigned long r4,
38 unsigned long r5, unsigned long r6, struct pt_regs *regs)
39{
40 sigset_t newset;
41
42 /* XXX: Don't preclude handling different sized sigset_t's. */
43 if (sigsetsize != sizeof(sigset_t))
44 return -EINVAL;
45
46 if (copy_from_user(&newset, unewset, sizeof(newset)))
47 return -EFAULT;
48 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
49
50 spin_lock_irq(&current->sighand->siglock);
51 current->saved_sigmask = current->blocked;
52 current->blocked = newset;
53 recalc_sigpending();
54 spin_unlock_irq(&current->sighand->siglock);
55
56 current->state = TASK_INTERRUPTIBLE;
57 schedule();
58 set_thread_flag(TIF_RESTORE_SIGMASK);
59 return -ERESTARTNOHAND;
60}
61
62asmlinkage int 33asmlinkage int
63sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 34sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
64 unsigned long r2, unsigned long r3, unsigned long r4, 35 unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +189,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
218 return (void __user *)((sp - frame_size) & -8ul); 189 return (void __user *)((sp - frame_size) & -8ul);
219} 190}
220 191
221static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 192static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
222 sigset_t *set, struct pt_regs *regs) 193 sigset_t *set, struct pt_regs *regs)
223{ 194{
224 struct rt_sigframe __user *frame; 195 struct rt_sigframe __user *frame;
@@ -275,22 +246,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
275 current->comm, current->pid, frame, regs->pc); 246 current->comm, current->pid, frame, regs->pc);
276#endif 247#endif
277 248
278 return; 249 return 0;
279 250
280give_sigsegv: 251give_sigsegv:
281 force_sigsegv(sig, current); 252 force_sigsegv(sig, current);
253 return -EFAULT;
254}
255
256static int prev_insn(struct pt_regs *regs)
257{
258 u16 inst;
259 if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
260 return -EFAULT;
261 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
262 regs->bpc -= 2;
263 else
264 regs->bpc -= 4;
265 regs->syscall_nr = -1;
266 return 0;
282} 267}
283 268
284/* 269/*
285 * OK, we're invoking a handler 270 * OK, we're invoking a handler
286 */ 271 */
287 272
288static void 273static int
289handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, 274handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
290 sigset_t *oldset, struct pt_regs *regs) 275 sigset_t *oldset, struct pt_regs *regs)
291{ 276{
292 unsigned short inst;
293
294 /* Are we from a system call? */ 277 /* Are we from a system call? */
295 if (regs->syscall_nr >= 0) { 278 if (regs->syscall_nr >= 0) {
296 /* If so, check system call restarting.. */ 279 /* If so, check system call restarting.. */
@@ -308,16 +291,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
308 /* fallthrough */ 291 /* fallthrough */
309 case -ERESTARTNOINTR: 292 case -ERESTARTNOINTR:
310 regs->r0 = regs->orig_r0; 293 regs->r0 = regs->orig_r0;
311 inst = *(unsigned short *)(regs->bpc - 2); 294 if (prev_insn(regs) < 0)
312 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 295 return -EFAULT;
313 regs->bpc -= 2;
314 else
315 regs->bpc -= 4;
316 } 296 }
317 } 297 }
318 298
319 /* Set up the stack frame */ 299 /* Set up the stack frame */
320 setup_rt_frame(sig, ka, info, oldset, regs); 300 if (setup_rt_frame(sig, ka, info, oldset, regs))
301 return -EFAULT;
321 302
322 spin_lock_irq(&current->sighand->siglock); 303 spin_lock_irq(&current->sighand->siglock);
323 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 304 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +306,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
325 sigaddset(&current->blocked,sig); 306 sigaddset(&current->blocked,sig);
326 recalc_sigpending(); 307 recalc_sigpending();
327 spin_unlock_irq(&current->sighand->siglock); 308 spin_unlock_irq(&current->sighand->siglock);
309 return 0;
328} 310}
329 311
330/* 312/*
@@ -332,12 +314,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
332 * want to handle. Thus you cannot kill init even with a SIGKILL even by 314 * want to handle. Thus you cannot kill init even with a SIGKILL even by
333 * mistake. 315 * mistake.
334 */ 316 */
335int do_signal(struct pt_regs *regs, sigset_t *oldset) 317static void do_signal(struct pt_regs *regs)
336{ 318{
337 siginfo_t info; 319 siginfo_t info;
338 int signr; 320 int signr;
339 struct k_sigaction ka; 321 struct k_sigaction ka;
340 unsigned short inst; 322 sigset_t *oldset;
341 323
342 /* 324 /*
343 * We want the common case to go fast, which 325 * We want the common case to go fast, which
@@ -346,12 +328,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
346 * if so. 328 * if so.
347 */ 329 */
348 if (!user_mode(regs)) 330 if (!user_mode(regs))
349 return 1; 331 return;
350 332
351 if (try_to_freeze()) 333 if (try_to_freeze())
352 goto no_signal; 334 goto no_signal;
353 335
354 if (!oldset) 336 if (test_thread_flag(TIF_RESTORE_SIGMASK))
337 oldset = &current->saved_sigmask;
338 else
355 oldset = &current->blocked; 339 oldset = &current->blocked;
356 340
357 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 341 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +347,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
363 */ 347 */
364 348
365 /* Whee! Actually deliver the signal. */ 349 /* Whee! Actually deliver the signal. */
366 handle_signal(signr, &ka, &info, oldset, regs); 350 if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
367 return 1; 351 clear_thread_flag(TIF_RESTORE_SIGMASK);
352
353 return;
368 } 354 }
369 355
370 no_signal: 356 no_signal:
@@ -375,31 +361,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
375 regs->r0 == -ERESTARTSYS || 361 regs->r0 == -ERESTARTSYS ||
376 regs->r0 == -ERESTARTNOINTR) { 362 regs->r0 == -ERESTARTNOINTR) {
377 regs->r0 = regs->orig_r0; 363 regs->r0 = regs->orig_r0;
378 inst = *(unsigned short *)(regs->bpc - 2); 364 prev_insn(regs);
379 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 365 } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
380 regs->bpc -= 2;
381 else
382 regs->bpc -= 4;
383 }
384 if (regs->r0 == -ERESTART_RESTARTBLOCK){
385 regs->r0 = regs->orig_r0; 366 regs->r0 = regs->orig_r0;
386 regs->r7 = __NR_restart_syscall; 367 regs->r7 = __NR_restart_syscall;
387 inst = *(unsigned short *)(regs->bpc - 2); 368 prev_insn(regs);
388 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
389 regs->bpc -= 2;
390 else
391 regs->bpc -= 4;
392 } 369 }
393 } 370 }
394 return 0; 371 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
372 clear_thread_flag(TIF_RESTORE_SIGMASK);
373 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
374 }
395} 375}
396 376
397/* 377/*
398 * notification of userspace execution resumption 378 * notification of userspace execution resumption
399 * - triggered by current->work.notify_resume 379 * - triggered by current->work.notify_resume
400 */ 380 */
401void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, 381void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
402 __u32 thread_info_flags)
403{ 382{
404 /* Pending single-step? */ 383 /* Pending single-step? */
405 if (thread_info_flags & _TIF_SINGLESTEP) 384 if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +386,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
407 386
408 /* deal with pending signal delivery */ 387 /* deal with pending signal delivery */
409 if (thread_info_flags & _TIF_SIGPENDING) 388 if (thread_info_flags & _TIF_SIGPENDING)
410 do_signal(regs,oldset); 389 do_signal(regs);
411 390
412 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 391 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
413 clear_thread_flag(TIF_NOTIFY_RESUME); 392 clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index d3c865c5a6ba..d841fb6cc703 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -76,98 +76,6 @@ asmlinkage int sys_tas(int __user *addr)
76 return oldval; 76 return oldval;
77} 77}
78 78
79/*
80 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
81 *
82 * This is really horribly ugly.
83 */
84asmlinkage int sys_ipc(uint call, int first, int second,
85 int third, void __user *ptr, long fifth)
86{
87 int version, ret;
88
89 version = call >> 16; /* hack for backward compatibility */
90 call &= 0xffff;
91
92 switch (call) {
93 case SEMOP:
94 return sys_semtimedop(first, (struct sembuf __user *)ptr,
95 second, NULL);
96 case SEMTIMEDOP:
97 return sys_semtimedop(first, (struct sembuf __user *)ptr,
98 second, (const struct timespec __user *)fifth);
99 case SEMGET:
100 return sys_semget (first, second, third);
101 case SEMCTL: {
102 union semun fourth;
103 if (!ptr)
104 return -EINVAL;
105 if (get_user(fourth.__pad, (void __user * __user *) ptr))
106 return -EFAULT;
107 return sys_semctl (first, second, third, fourth);
108 }
109
110 case MSGSND:
111 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
112 second, third);
113 case MSGRCV:
114 switch (version) {
115 case 0: {
116 struct ipc_kludge tmp;
117 if (!ptr)
118 return -EINVAL;
119
120 if (copy_from_user(&tmp,
121 (struct ipc_kludge __user *) ptr,
122 sizeof (tmp)))
123 return -EFAULT;
124 return sys_msgrcv (first, tmp.msgp, second,
125 tmp.msgtyp, third);
126 }
127 default:
128 return sys_msgrcv (first,
129 (struct msgbuf __user *) ptr,
130 second, fifth, third);
131 }
132 case MSGGET:
133 return sys_msgget ((key_t) first, second);
134 case MSGCTL:
135 return sys_msgctl (first, second,
136 (struct msqid_ds __user *) ptr);
137 case SHMAT: {
138 ulong raddr;
139
140 if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
141 sizeof(ulong)))
142 return -EFAULT;
143 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
144 if (ret)
145 return ret;
146 return put_user (raddr, (ulong __user *) third);
147 }
148 case SHMDT:
149 return sys_shmdt ((char __user *)ptr);
150 case SHMGET:
151 return sys_shmget (first, second, third);
152 case SHMCTL:
153 return sys_shmctl (first, second,
154 (struct shmid_ds __user *) ptr);
155 default:
156 return -ENOSYS;
157 }
158}
159
160asmlinkage int sys_uname(struct old_utsname __user * name)
161{
162 int err;
163 if (!name)
164 return -EFAULT;
165 down_read(&uts_sem);
166 err = copy_to_user(name, utsname(), sizeof (*name));
167 up_read(&uts_sem);
168 return err?-EFAULT:0;
169}
170
171asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) 79asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
172{ 80{
173 /* This should flush more selectively ... */ 81 /* This should flush more selectively ... */
@@ -185,7 +93,9 @@ asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
185 * Do a system call from kernel instead of calling sys_execve so we 93 * Do a system call from kernel instead of calling sys_execve so we
186 * end up with proper pt_regs. 94 * end up with proper pt_regs.
187 */ 95 */
188int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 96int kernel_execve(const char *filename,
97 const char *const argv[],
98 const char *const envp[])
189{ 99{
190 register long __scno __asm__ ("r7") = __NR_execve; 100 register long __scno __asm__ ("r7") = __NR_execve;
191 register long __arg3 __asm__ ("r2") = (long)(envp); 101 register long __arg3 __asm__ ("r2") = (long)(envp);
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 9cedcef11575..bda86820bffd 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -106,24 +106,6 @@ u32 arch_gettimeoffset(void)
106} 106}
107 107
108/* 108/*
109 * In order to set the CMOS clock precisely, set_rtc_mmss has to be
110 * called 500 ms after the second nowtime has started, because when
111 * nowtime is written into the registers of the CMOS clock, it will
112 * jump to the next second precisely 500 ms later. Check the Motorola
113 * MC146818A or Dallas DS12887 data sheet for details.
114 *
115 * BUG: This routine does not handle hour overflow properly; it just
116 * sets the minutes. Usually you won't notice until after reboot!
117 */
118static inline int set_rtc_mmss(unsigned long nowtime)
119{
120 return 0;
121}
122
123/* last time the cmos clock got updated */
124static long last_rtc_update = 0;
125
126/*
127 * timer_interrupt() needs to keep up the real-time clock, 109 * timer_interrupt() needs to keep up the real-time clock,
128 * as well as call the "do_timer()" routine every clocktick 110 * as well as call the "do_timer()" routine every clocktick
129 */ 111 */
@@ -138,23 +120,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
138#ifndef CONFIG_SMP 120#ifndef CONFIG_SMP
139 update_process_times(user_mode(get_irq_regs())); 121 update_process_times(user_mode(get_irq_regs()));
140#endif 122#endif
141 /*
142 * If we have an externally synchronized Linux clock, then update
143 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
144 * called as close as possible to 500 ms before the new second starts.
145 */
146 write_seqlock(&xtime_lock);
147 if (ntp_synced()
148 && xtime.tv_sec > last_rtc_update + 660
149 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
150 && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
151 {
152 if (set_rtc_mmss(xtime.tv_sec) == 0)
153 last_rtc_update = xtime.tv_sec;
154 else /* do it again in 60 s */
155 last_rtc_update = xtime.tv_sec - 600;
156 }
157 write_sequnlock(&xtime_lock);
158 /* As we return to user mode fire off the other CPU schedulers.. 123 /* As we return to user mode fire off the other CPU schedulers..
159 this is basically because we don't yet share IRQ's around. 124 this is basically because we don't yet share IRQ's around.
160 This message is rigged to be safe on the 386 - basically it's 125 This message is rigged to be safe on the 386 - basically it's
@@ -174,7 +139,7 @@ static struct irqaction irq0 = {
174 .name = "MFT2", 139 .name = "MFT2",
175}; 140};
176 141
177void __init time_init(void) 142void read_persistent_clock(struct timespec *ts)
178{ 143{
179 unsigned int epoch, year, mon, day, hour, min, sec; 144 unsigned int epoch, year, mon, day, hour, min, sec;
180 145
@@ -194,11 +159,13 @@ void __init time_init(void)
194 epoch = 1952; 159 epoch = 1952;
195 year += epoch; 160 year += epoch;
196 161
197 xtime.tv_sec = mktime(year, mon, day, hour, min, sec); 162 ts->tv_sec = mktime(year, mon, day, hour, min, sec);
198 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); 163 ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
199 set_normalized_timespec(&wall_to_monotonic, 164}
200 -xtime.tv_sec, -xtime.tv_nsec);
201 165
166
167void __init time_init(void)
168{
202#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \ 169#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
203 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \ 170 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
204 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) 171 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)