diff options
Diffstat (limited to 'arch/m32r')
-rw-r--r-- | arch/m32r/Kconfig | 6 | ||||
-rw-r--r-- | arch/m32r/boot/compressed/install.sh | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/hardirq.h | 15 | ||||
-rw-r--r-- | arch/m32r/include/asm/mman.h | 18 | ||||
-rw-r--r-- | arch/m32r/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/m32r/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/m32r/kernel/init_task.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 30 | ||||
-rw-r--r-- | arch/m32r/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/m32r/kernel/time.c | 74 | ||||
-rw-r--r-- | arch/m32r/mm/init.c | 2 |
12 files changed, 39 insertions, 130 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index cabba332cc48..c41234f1b825 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -41,6 +41,12 @@ config HZ | |||
41 | int | 41 | int |
42 | default 100 | 42 | default 100 |
43 | 43 | ||
44 | config GENERIC_TIME | ||
45 | def_bool y | ||
46 | |||
47 | config ARCH_USES_GETTIMEOFFSET | ||
48 | def_bool y | ||
49 | |||
44 | source "init/Kconfig" | 50 | source "init/Kconfig" |
45 | 51 | ||
46 | source "kernel/Kconfig.freezer" | 52 | source "kernel/Kconfig.freezer" |
diff --git a/arch/m32r/boot/compressed/install.sh b/arch/m32r/boot/compressed/install.sh index 6d72e9e72697..16e5a0a13437 100644 --- a/arch/m32r/boot/compressed/install.sh +++ b/arch/m32r/boot/compressed/install.sh | |||
@@ -24,8 +24,8 @@ | |||
24 | 24 | ||
25 | # User may have a custom install script | 25 | # User may have a custom install script |
26 | 26 | ||
27 | if [ -x /sbin/installkernel ]; then | 27 | if [ -x /sbin/${INSTALLKERNEL} ]; then |
28 | exec /sbin/installkernel "$@" | 28 | exec /sbin/${INSTALLKERNEL} "$@" |
29 | fi | 29 | fi |
30 | 30 | ||
31 | if [ "$2" = "zImage" ]; then | 31 | if [ "$2" = "zImage" ]; then |
diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index cb8aa762f235..4c31c0ae215e 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h | |||
@@ -2,14 +2,7 @@ | |||
2 | #ifndef __ASM_HARDIRQ_H | 2 | #ifndef __ASM_HARDIRQ_H |
3 | #define __ASM_HARDIRQ_H | 3 | #define __ASM_HARDIRQ_H |
4 | 4 | ||
5 | #include <linux/threads.h> | 5 | #include <asm/irq.h> |
6 | #include <linux/irq.h> | ||
7 | |||
8 | typedef struct { | ||
9 | unsigned int __softirq_pending; | ||
10 | } ____cacheline_aligned irq_cpustat_t; | ||
11 | |||
12 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
13 | 6 | ||
14 | #if NR_IRQS > 256 | 7 | #if NR_IRQS > 256 |
15 | #define HARDIRQ_BITS 9 | 8 | #define HARDIRQ_BITS 9 |
@@ -26,11 +19,7 @@ typedef struct { | |||
26 | # error HARDIRQ_BITS is too low! | 19 | # error HARDIRQ_BITS is too low! |
27 | #endif | 20 | #endif |
28 | 21 | ||
29 | static inline void ack_bad_irq(int irq) | 22 | #include <asm-generic/hardirq.h> |
30 | { | ||
31 | printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); | ||
32 | BUG(); | ||
33 | } | ||
34 | 23 | ||
35 | #endif /* __ASM_HARDIRQ_H */ | 24 | #endif /* __ASM_HARDIRQ_H */ |
36 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/asm/mman.h index 04a5f40aa401..8eebf89f5ab1 100644 --- a/arch/m32r/include/asm/mman.h +++ b/arch/m32r/include/asm/mman.h | |||
@@ -1,17 +1 @@ | |||
1 | #ifndef __M32R_MMAN_H__ | #include <asm-generic/mman.h> | |
2 | #define __M32R_MMAN_H__ | ||
3 | |||
4 | #include <asm-generic/mman-common.h> | ||
5 | |||
6 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | ||
7 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
8 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
9 | #define MAP_LOCKED 0x2000 /* pages are locked */ | ||
10 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ | ||
11 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
12 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
13 | |||
14 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
15 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
16 | |||
17 | #endif /* __M32R_MMAN_H__ */ | ||
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index 91909e5dd9d0..a70a3df33635 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h | |||
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
127 | 127 | ||
128 | if (prev != next) { | 128 | if (prev != next) { |
129 | #ifdef CONFIG_SMP | 129 | #ifdef CONFIG_SMP |
130 | cpu_set(cpu, next->cpu_vm_mask); | 130 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
131 | #endif /* CONFIG_SMP */ | 131 | #endif /* CONFIG_SMP */ |
132 | /* Set MPTB = next->pgd */ | 132 | /* Set MPTB = next->pgd */ |
133 | *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; | 133 | *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; |
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
135 | } | 135 | } |
136 | #ifdef CONFIG_SMP | 136 | #ifdef CONFIG_SMP |
137 | else | 137 | else |
138 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) | 138 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) |
139 | activate_context(next); | 139 | activate_context(next); |
140 | #endif /* CONFIG_SMP */ | 140 | #endif /* CONFIG_SMP */ |
141 | } | 141 | } |
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h index b96a6d2ffbc3..e67ded1aab91 100644 --- a/arch/m32r/include/asm/smp.h +++ b/arch/m32r/include/asm/smp.h | |||
@@ -88,7 +88,7 @@ extern void smp_send_timer(void); | |||
88 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 88 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); |
89 | 89 | ||
90 | extern void arch_send_call_function_single_ipi(int cpu); | 90 | extern void arch_send_call_function_single_ipi(int cpu); |
91 | extern void arch_send_call_function_ipi(cpumask_t mask); | 91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
92 | 92 | ||
93 | #endif /* not __ASSEMBLY__ */ | 93 | #endif /* not __ASSEMBLY__ */ |
94 | 94 | ||
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index fce57e5d3f91..6c42d5f8df50 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c | |||
@@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | |||
20 | * way process stacks are handled. This is done by having a special | 20 | * way process stacks are handled. This is done by having a special |
21 | * "init_task" linker map entry.. | 21 | * "init_task" linker map entry.. |
22 | */ | 22 | */ |
23 | union thread_union init_thread_union | 23 | union thread_union init_thread_union __init_task_data = |
24 | __attribute__((__section__(".data.init_task"))) = | 24 | { INIT_THREAD_INFO(init_task) }; |
25 | { INIT_THREAD_INFO(init_task) }; | ||
26 | 25 | ||
27 | /* | 26 | /* |
28 | * Initial task structure. | 27 | * Initial task structure. |
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 98b8feb12ed8..98682bba0ed9 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
@@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, | |||
77 | struct user * dummy = NULL; | 77 | struct user * dummy = NULL; |
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3)) | 80 | if ((off & 3) || off > sizeof(struct user) - 3) |
81 | return -EIO; | 81 | return -EIO; |
82 | 82 | ||
83 | off >>= 2; | 83 | off >>= 2; |
@@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, | |||
139 | struct user * dummy = NULL; | 139 | struct user * dummy = NULL; |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | if ((off & 3) || off < 0 || | 142 | if ((off & 3) || off > sizeof(struct user) - 3) |
143 | off > sizeof(struct user) - 3) | ||
144 | return -EIO; | 143 | return -EIO; |
145 | 144 | ||
146 | off >>= 2; | 145 | off >>= 2; |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 929e5c9d3ad9..1b7598e6f6e8 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *); | |||
85 | void smp_local_timer_interrupt(void); | 85 | void smp_local_timer_interrupt(void); |
86 | 86 | ||
87 | static void send_IPI_allbutself(int, int); | 87 | static void send_IPI_allbutself(int, int); |
88 | static void send_IPI_mask(cpumask_t, int, int); | 88 | static void send_IPI_mask(const struct cpumask *, int, int); |
89 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 89 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); |
90 | 90 | ||
91 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 91 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
@@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int); | |||
113 | void smp_send_reschedule(int cpu_id) | 113 | void smp_send_reschedule(int cpu_id) |
114 | { | 114 | { |
115 | WARN_ON(cpu_is_offline(cpu_id)); | 115 | WARN_ON(cpu_is_offline(cpu_id)); |
116 | send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); | 116 | send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1); |
117 | } | 117 | } |
118 | 118 | ||
119 | /*==========================================================================* | 119 | /*==========================================================================* |
@@ -168,7 +168,7 @@ void smp_flush_cache_all(void) | |||
168 | spin_lock(&flushcache_lock); | 168 | spin_lock(&flushcache_lock); |
169 | mask=cpus_addr(cpumask); | 169 | mask=cpus_addr(cpumask); |
170 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); | 170 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); |
171 | send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); | 171 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); |
172 | _flush_cache_copyback_all(); | 172 | _flush_cache_copyback_all(); |
173 | while (flushcache_cpumask) | 173 | while (flushcache_cpumask) |
174 | mb(); | 174 | mb(); |
@@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
264 | preempt_disable(); | 264 | preempt_disable(); |
265 | cpu_id = smp_processor_id(); | 265 | cpu_id = smp_processor_id(); |
266 | mmc = &mm->context[cpu_id]; | 266 | mmc = &mm->context[cpu_id]; |
267 | cpu_mask = mm->cpu_vm_mask; | 267 | cpu_mask = *mm_cpumask(mm); |
268 | cpu_clear(cpu_id, cpu_mask); | 268 | cpu_clear(cpu_id, cpu_mask); |
269 | 269 | ||
270 | if (*mmc != NO_CONTEXT) { | 270 | if (*mmc != NO_CONTEXT) { |
@@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
273 | if (mm == current->mm) | 273 | if (mm == current->mm) |
274 | activate_context(mm); | 274 | activate_context(mm); |
275 | else | 275 | else |
276 | cpu_clear(cpu_id, mm->cpu_vm_mask); | 276 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); |
277 | local_irq_restore(flags); | 277 | local_irq_restore(flags); |
278 | } | 278 | } |
279 | if (!cpus_empty(cpu_mask)) | 279 | if (!cpus_empty(cpu_mask)) |
@@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
334 | preempt_disable(); | 334 | preempt_disable(); |
335 | cpu_id = smp_processor_id(); | 335 | cpu_id = smp_processor_id(); |
336 | mmc = &mm->context[cpu_id]; | 336 | mmc = &mm->context[cpu_id]; |
337 | cpu_mask = mm->cpu_vm_mask; | 337 | cpu_mask = *mm_cpumask(mm); |
338 | cpu_clear(cpu_id, cpu_mask); | 338 | cpu_clear(cpu_id, cpu_mask); |
339 | 339 | ||
340 | #ifdef DEBUG_SMP | 340 | #ifdef DEBUG_SMP |
@@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
424 | * We have to send the IPI only to | 424 | * We have to send the IPI only to |
425 | * CPUs affected. | 425 | * CPUs affected. |
426 | */ | 426 | */ |
427 | send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); | 427 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); |
428 | 428 | ||
429 | while (!cpus_empty(flush_cpumask)) { | 429 | while (!cpus_empty(flush_cpumask)) { |
430 | /* nothing. lockup detection does not belong here */ | 430 | /* nothing. lockup detection does not belong here */ |
@@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void) | |||
469 | if (flush_mm == current->active_mm) | 469 | if (flush_mm == current->active_mm) |
470 | activate_context(flush_mm); | 470 | activate_context(flush_mm); |
471 | else | 471 | else |
472 | cpu_clear(cpu_id, flush_mm->cpu_vm_mask); | 472 | cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm)); |
473 | } else { | 473 | } else { |
474 | unsigned long va = flush_va; | 474 | unsigned long va = flush_va; |
475 | 475 | ||
@@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy) | |||
546 | for ( ; ; ); | 546 | for ( ; ; ); |
547 | } | 547 | } |
548 | 548 | ||
549 | void arch_send_call_function_ipi(cpumask_t mask) | 549 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
550 | { | 550 | { |
551 | send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); | 551 | send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); |
552 | } | 552 | } |
553 | 553 | ||
554 | void arch_send_call_function_single_ipi(int cpu) | 554 | void arch_send_call_function_single_ipi(int cpu) |
555 | { | 555 | { |
556 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); | 556 | send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0); |
557 | } | 557 | } |
558 | 558 | ||
559 | /*==========================================================================* | 559 | /*==========================================================================* |
@@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try) | |||
729 | cpumask = cpu_online_map; | 729 | cpumask = cpu_online_map; |
730 | cpu_clear(smp_processor_id(), cpumask); | 730 | cpu_clear(smp_processor_id(), cpumask); |
731 | 731 | ||
732 | send_IPI_mask(cpumask, ipi_num, try); | 732 | send_IPI_mask(&cpumask, ipi_num, try); |
733 | } | 733 | } |
734 | 734 | ||
735 | /*==========================================================================* | 735 | /*==========================================================================* |
@@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try) | |||
752 | * ---------- --- -------------------------------------------------------- | 752 | * ---------- --- -------------------------------------------------------- |
753 | * | 753 | * |
754 | *==========================================================================*/ | 754 | *==========================================================================*/ |
755 | static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) | 755 | static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) |
756 | { | 756 | { |
757 | cpumask_t physid_mask, tmp; | 757 | cpumask_t physid_mask, tmp; |
758 | int cpu_id, phys_id; | 758 | int cpu_id, phys_id; |
@@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) | |||
761 | if (num_cpus <= 1) /* NO MP */ | 761 | if (num_cpus <= 1) /* NO MP */ |
762 | return; | 762 | return; |
763 | 763 | ||
764 | cpus_and(tmp, cpumask, cpu_online_map); | 764 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
765 | BUG_ON(!cpus_equal(cpumask, tmp)); | 765 | BUG_ON(!cpumask_equal(cpumask, &tmp)); |
766 | 766 | ||
767 | physid_mask = CPU_MASK_NONE; | 767 | physid_mask = CPU_MASK_NONE; |
768 | for_each_cpu_mask(cpu_id, cpumask){ | 768 | for_each_cpu(cpu_id, cpumask) { |
769 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) | 769 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) |
770 | cpu_set(phys_id, physid_mask); | 770 | cpu_set(phys_id, physid_mask); |
771 | } | 771 | } |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 2547d6c4a827..e034844cfc0d 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) | 178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) |
179 | physid_set(phys_id, phys_cpu_present_map); | 179 | physid_set(phys_id, phys_cpu_present_map); |
180 | #ifndef CONFIG_HOTPLUG_CPU | 180 | #ifndef CONFIG_HOTPLUG_CPU |
181 | cpu_present_map = cpu_possible_map; | 181 | init_cpu_present(&cpu_possible_map); |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | show_mp_info(nr_cpu); | 184 | show_mp_info(nr_cpu); |
@@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
213 | if (!physid_isset(phys_id, phys_cpu_present_map)) | 213 | if (!physid_isset(phys_id, phys_cpu_present_map)) |
214 | continue; | 214 | continue; |
215 | 215 | ||
216 | if ((max_cpus >= 0) && (max_cpus <= cpucount + 1)) | 216 | if (max_cpus <= cpucount + 1) |
217 | continue; | 217 | continue; |
218 | 218 | ||
219 | do_boot_cpu(phys_id); | 219 | do_boot_cpu(phys_id); |
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index cada3ba4b990..ba61c4c73202 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c | |||
@@ -48,7 +48,7 @@ extern void smp_local_timer_interrupt(void); | |||
48 | 48 | ||
49 | static unsigned long latch; | 49 | static unsigned long latch; |
50 | 50 | ||
51 | static unsigned long do_gettimeoffset(void) | 51 | u32 arch_gettimeoffset(void) |
52 | { | 52 | { |
53 | unsigned long elapsed_time = 0; /* [us] */ | 53 | unsigned long elapsed_time = 0; /* [us] */ |
54 | 54 | ||
@@ -93,79 +93,10 @@ static unsigned long do_gettimeoffset(void) | |||
93 | #error no chip configuration | 93 | #error no chip configuration |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | return elapsed_time; | 96 | return elapsed_time * 1000; |
97 | } | 97 | } |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * This version of gettimeofday has near microsecond resolution. | ||
101 | */ | ||
102 | void do_gettimeofday(struct timeval *tv) | ||
103 | { | ||
104 | unsigned long seq; | ||
105 | unsigned long usec, sec; | ||
106 | unsigned long max_ntp_tick = tick_usec - tickadj; | ||
107 | |||
108 | do { | ||
109 | seq = read_seqbegin(&xtime_lock); | ||
110 | |||
111 | usec = do_gettimeoffset(); | ||
112 | |||
113 | /* | ||
114 | * If time_adjust is negative then NTP is slowing the clock | ||
115 | * so make sure not to go into next possible interval. | ||
116 | * Better to lose some accuracy than have time go backwards.. | ||
117 | */ | ||
118 | if (unlikely(time_adjust < 0)) | ||
119 | usec = min(usec, max_ntp_tick); | ||
120 | |||
121 | sec = xtime.tv_sec; | ||
122 | usec += (xtime.tv_nsec / 1000); | ||
123 | } while (read_seqretry(&xtime_lock, seq)); | ||
124 | |||
125 | while (usec >= 1000000) { | ||
126 | usec -= 1000000; | ||
127 | sec++; | ||
128 | } | ||
129 | |||
130 | tv->tv_sec = sec; | ||
131 | tv->tv_usec = usec; | ||
132 | } | ||
133 | |||
134 | EXPORT_SYMBOL(do_gettimeofday); | ||
135 | |||
136 | int do_settimeofday(struct timespec *tv) | ||
137 | { | ||
138 | time_t wtm_sec, sec = tv->tv_sec; | ||
139 | long wtm_nsec, nsec = tv->tv_nsec; | ||
140 | |||
141 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
142 | return -EINVAL; | ||
143 | |||
144 | write_seqlock_irq(&xtime_lock); | ||
145 | /* | ||
146 | * This is revolting. We need to set "xtime" correctly. However, the | ||
147 | * value in this location is the value at the most recent update of | ||
148 | * wall time. Discover what correction gettimeofday() would have | ||
149 | * made, and then undo it! | ||
150 | */ | ||
151 | nsec -= do_gettimeoffset() * NSEC_PER_USEC; | ||
152 | |||
153 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
154 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
155 | |||
156 | set_normalized_timespec(&xtime, sec, nsec); | ||
157 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
158 | |||
159 | ntp_clear(); | ||
160 | write_sequnlock_irq(&xtime_lock); | ||
161 | clock_was_set(); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | EXPORT_SYMBOL(do_settimeofday); | ||
167 | |||
168 | /* | ||
169 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be | 100 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be |
170 | * called 500 ms after the second nowtime has started, because when | 101 | * called 500 ms after the second nowtime has started, because when |
171 | * nowtime is written into the registers of the CMOS clock, it will | 102 | * nowtime is written into the registers of the CMOS clock, it will |
@@ -192,6 +123,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
192 | #ifndef CONFIG_SMP | 123 | #ifndef CONFIG_SMP |
193 | profile_tick(CPU_PROFILING); | 124 | profile_tick(CPU_PROFILING); |
194 | #endif | 125 | #endif |
126 | /* XXX FIXME. Uh, the xtime_lock should be held here, no? */ | ||
195 | do_timer(1); | 127 | do_timer(1); |
196 | 128 | ||
197 | #ifndef CONFIG_SMP | 129 | #ifndef CONFIG_SMP |
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 24d429f9358a..9f581df3952b 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c | |||
@@ -171,7 +171,7 @@ void __init mem_init(void) | |||
171 | 171 | ||
172 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | 172 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
173 | "%dk reserved, %dk data, %dk init)\n", | 173 | "%dk reserved, %dk data, %dk init)\n", |
174 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 174 | nr_free_pages() << (PAGE_SHIFT-10), |
175 | num_physpages << (PAGE_SHIFT-10), | 175 | num_physpages << (PAGE_SHIFT-10), |
176 | codesize >> 10, | 176 | codesize >> 10, |
177 | reservedpages << (PAGE_SHIFT-10), | 177 | reservedpages << (PAGE_SHIFT-10), |