diff options
| author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
|---|---|---|
| committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
| commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
| tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/microblaze/kernel | |
| parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
| parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) | |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/microblaze/kernel')
37 files changed, 1756 insertions, 1589 deletions
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index d487729683de..e51bc1520825 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile | |||
| @@ -2,12 +2,22 @@ | |||
| 2 | # Makefile | 2 | # Makefile |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | ifdef CONFIG_FUNCTION_TRACER | ||
| 6 | # Do not trace early boot code and low level code | ||
| 7 | CFLAGS_REMOVE_timer.o = -pg | ||
| 8 | CFLAGS_REMOVE_intc.o = -pg | ||
| 9 | CFLAGS_REMOVE_early_printk.o = -pg | ||
| 10 | CFLAGS_REMOVE_selfmod.o = -pg | ||
| 11 | CFLAGS_REMOVE_heartbeat.o = -pg | ||
| 12 | CFLAGS_REMOVE_ftrace.o = -pg | ||
| 13 | endif | ||
| 14 | |||
| 5 | extra-y := head.o vmlinux.lds | 15 | extra-y := head.o vmlinux.lds |
| 6 | 16 | ||
| 7 | obj-y += exceptions.o \ | 17 | obj-y += dma.o exceptions.o \ |
| 8 | hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ | 18 | hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ |
| 9 | of_platform.o process.o prom.o prom_parse.o ptrace.o \ | 19 | of_platform.o process.o prom.o prom_parse.o ptrace.o \ |
| 10 | setup.o signal.o sys_microblaze.o timer.o traps.o | 20 | setup.o signal.o sys_microblaze.o timer.o traps.o reset.o |
| 11 | 21 | ||
| 12 | obj-y += cpu/ | 22 | obj-y += cpu/ |
| 13 | 23 | ||
| @@ -16,5 +26,7 @@ obj-$(CONFIG_SELFMOD) += selfmod.o | |||
| 16 | obj-$(CONFIG_HEART_BEAT) += heartbeat.o | 26 | obj-$(CONFIG_HEART_BEAT) += heartbeat.o |
| 17 | obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o | 27 | obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o |
| 18 | obj-$(CONFIG_MMU) += misc.o | 28 | obj-$(CONFIG_MMU) += misc.o |
| 29 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | ||
| 30 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o | ||
| 19 | 31 | ||
| 20 | obj-y += entry$(MMU).o | 32 | obj-y += entry$(MMU).o |
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 7bc7b68f97db..c1b459c97571 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/hardirq.h> | 16 | #include <linux/hardirq.h> |
| 17 | #include <linux/thread_info.h> | 17 | #include <linux/thread_info.h> |
| 18 | #include <linux/kbuild.h> | 18 | #include <linux/kbuild.h> |
| 19 | #include <asm/cpuinfo.h> | ||
| 19 | 20 | ||
| 20 | int main(int argc, char *argv[]) | 21 | int main(int argc, char *argv[]) |
| 21 | { | 22 | { |
| @@ -90,6 +91,7 @@ int main(int argc, char *argv[]) | |||
| 90 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 91 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
| 91 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); | 92 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); |
| 92 | DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); | 93 | DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); |
| 94 | DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); | ||
| 93 | BLANK(); | 95 | BLANK(); |
| 94 | 96 | ||
| 95 | /* struct cpu_context */ | 97 | /* struct cpu_context */ |
diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile index 20646e549271..59cc7bceaf8c 100644 --- a/arch/microblaze/kernel/cpu/Makefile +++ b/arch/microblaze/kernel/cpu/Makefile | |||
| @@ -2,6 +2,10 @@ | |||
| 2 | # Build the appropriate CPU version support | 2 | # Build the appropriate CPU version support |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | ifdef CONFIG_FUNCTION_TRACER | ||
| 6 | CFLAGS_REMOVE_cache.o = -pg | ||
| 7 | endif | ||
| 8 | |||
| 5 | EXTRA_CFLAGS += -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \ | 9 | EXTRA_CFLAGS += -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \ |
| 6 | -DCPU_REV=$(CPU_REV) | 10 | -DCPU_REV=$(CPU_REV) |
| 7 | 11 | ||
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index af866a450125..109876e8d643 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> | 4 | * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> |
| 5 | * Copyright (C) 2007-2009 PetaLogix | 5 | * Copyright (C) 2007-2009 PetaLogix |
| 6 | * Copyright (C) 2007 John Williams <john.williams@petalogix.com> | 6 | * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com> |
| 7 | * | 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General | 8 | * This file is subject to the terms and conditions of the GNU General |
| 9 | * Public License. See the file COPYING in the main directory of this | 9 | * Public License. See the file COPYING in the main directory of this |
| @@ -13,243 +13,655 @@ | |||
| 13 | #include <asm/cacheflush.h> | 13 | #include <asm/cacheflush.h> |
| 14 | #include <linux/cache.h> | 14 | #include <linux/cache.h> |
| 15 | #include <asm/cpuinfo.h> | 15 | #include <asm/cpuinfo.h> |
| 16 | #include <asm/pvr.h> | ||
| 16 | 17 | ||
| 17 | /* Exported functions */ | 18 | static inline void __enable_icache_msr(void) |
| 19 | { | ||
| 20 | __asm__ __volatile__ (" msrset r0, %0; \ | ||
| 21 | nop; " \ | ||
| 22 | : : "i" (MSR_ICE) : "memory"); | ||
| 23 | } | ||
| 24 | |||
| 25 | static inline void __disable_icache_msr(void) | ||
| 26 | { | ||
| 27 | __asm__ __volatile__ (" msrclr r0, %0; \ | ||
| 28 | nop; " \ | ||
| 29 | : : "i" (MSR_ICE) : "memory"); | ||
| 30 | } | ||
| 18 | 31 | ||
| 19 | void _enable_icache(void) | 32 | static inline void __enable_dcache_msr(void) |
| 20 | { | 33 | { |
| 21 | if (cpuinfo.use_icache) { | 34 | __asm__ __volatile__ (" msrset r0, %0; \ |
| 22 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 35 | nop; " \ |
| 23 | __asm__ __volatile__ (" \ | 36 | : \ |
| 24 | msrset r0, %0; \ | 37 | : "i" (MSR_DCE) \ |
| 25 | nop; " \ | ||
| 26 | : \ | ||
| 27 | : "i" (MSR_ICE) \ | ||
| 28 | : "memory"); | 38 | : "memory"); |
| 29 | #else | ||
| 30 | __asm__ __volatile__ (" \ | ||
| 31 | mfs r12, rmsr; \ | ||
| 32 | nop; \ | ||
| 33 | ori r12, r12, %0; \ | ||
| 34 | mts rmsr, r12; \ | ||
| 35 | nop; " \ | ||
| 36 | : \ | ||
| 37 | : "i" (MSR_ICE) \ | ||
| 38 | : "memory", "r12"); | ||
| 39 | #endif | ||
| 40 | } | ||
| 41 | } | 39 | } |
| 42 | 40 | ||
| 43 | void _disable_icache(void) | 41 | static inline void __disable_dcache_msr(void) |
| 44 | { | 42 | { |
| 45 | if (cpuinfo.use_icache) { | 43 | __asm__ __volatile__ (" msrclr r0, %0; \ |
| 46 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 44 | nop; " \ |
| 47 | __asm__ __volatile__ (" \ | 45 | : \ |
| 48 | msrclr r0, %0; \ | 46 | : "i" (MSR_DCE) \ |
| 49 | nop; " \ | ||
| 50 | : \ | ||
| 51 | : "i" (MSR_ICE) \ | ||
| 52 | : "memory"); | 47 | : "memory"); |
| 53 | #else | 48 | } |
| 54 | __asm__ __volatile__ (" \ | 49 | |
| 55 | mfs r12, rmsr; \ | 50 | static inline void __enable_icache_nomsr(void) |
| 56 | nop; \ | 51 | { |
| 57 | andi r12, r12, ~%0; \ | 52 | __asm__ __volatile__ (" mfs r12, rmsr; \ |
| 58 | mts rmsr, r12; \ | 53 | nop; \ |
| 59 | nop; " \ | 54 | ori r12, r12, %0; \ |
| 60 | : \ | 55 | mts rmsr, r12; \ |
| 61 | : "i" (MSR_ICE) \ | 56 | nop; " \ |
| 57 | : \ | ||
| 58 | : "i" (MSR_ICE) \ | ||
| 62 | : "memory", "r12"); | 59 | : "memory", "r12"); |
| 63 | #endif | ||
| 64 | } | ||
| 65 | } | 60 | } |
| 66 | 61 | ||
| 67 | void _invalidate_icache(unsigned int addr) | 62 | static inline void __disable_icache_nomsr(void) |
| 68 | { | 63 | { |
| 69 | if (cpuinfo.use_icache) { | 64 | __asm__ __volatile__ (" mfs r12, rmsr; \ |
| 70 | __asm__ __volatile__ (" \ | 65 | nop; \ |
| 71 | wic %0, r0" \ | 66 | andi r12, r12, ~%0; \ |
| 72 | : \ | 67 | mts rmsr, r12; \ |
| 73 | : "r" (addr)); | 68 | nop; " \ |
| 74 | } | 69 | : \ |
| 70 | : "i" (MSR_ICE) \ | ||
| 71 | : "memory", "r12"); | ||
| 75 | } | 72 | } |
| 76 | 73 | ||
| 77 | void _enable_dcache(void) | 74 | static inline void __enable_dcache_nomsr(void) |
| 78 | { | 75 | { |
| 79 | if (cpuinfo.use_dcache) { | 76 | __asm__ __volatile__ (" mfs r12, rmsr; \ |
| 80 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 77 | nop; \ |
| 81 | __asm__ __volatile__ (" \ | 78 | ori r12, r12, %0; \ |
| 82 | msrset r0, %0; \ | 79 | mts rmsr, r12; \ |
| 83 | nop; " \ | 80 | nop; " \ |
| 84 | : \ | 81 | : \ |
| 85 | : "i" (MSR_DCE) \ | 82 | : "i" (MSR_DCE) \ |
| 86 | : "memory"); | 83 | : "memory", "r12"); |
| 87 | #else | 84 | } |
| 88 | __asm__ __volatile__ (" \ | 85 | |
| 89 | mfs r12, rmsr; \ | 86 | static inline void __disable_dcache_nomsr(void) |
| 90 | nop; \ | 87 | { |
| 91 | ori r12, r12, %0; \ | 88 | __asm__ __volatile__ (" mfs r12, rmsr; \ |
| 92 | mts rmsr, r12; \ | 89 | nop; \ |
| 93 | nop; " \ | 90 | andi r12, r12, ~%0; \ |
| 94 | : \ | 91 | mts rmsr, r12; \ |
| 95 | : "i" (MSR_DCE) \ | 92 | nop; " \ |
| 93 | : \ | ||
| 94 | : "i" (MSR_DCE) \ | ||
| 96 | : "memory", "r12"); | 95 | : "memory", "r12"); |
| 96 | } | ||
| 97 | |||
| 98 | |||
| 99 | /* Helper macro for computing the limits of cache range loops | ||
| 100 | * | ||
| 101 | * End address can be unaligned which is OK for C implementation. | ||
| 102 | * ASM implementation align it in ASM macros | ||
| 103 | */ | ||
| 104 | #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \ | ||
| 105 | do { \ | ||
| 106 | int align = ~(cache_line_length - 1); \ | ||
| 107 | end = min(start + cache_size, end); \ | ||
| 108 | start &= align; \ | ||
| 109 | } while (0); | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Helper macro to loop over the specified cache_size/line_length and | ||
| 113 | * execute 'op' on that cacheline | ||
| 114 | */ | ||
| 115 | #define CACHE_ALL_LOOP(cache_size, line_length, op) \ | ||
| 116 | do { \ | ||
| 117 | unsigned int len = cache_size - line_length; \ | ||
| 118 | int step = -line_length; \ | ||
| 119 | WARN_ON(step >= 0); \ | ||
| 120 | \ | ||
| 121 | __asm__ __volatile__ (" 1: " #op " %0, r0; \ | ||
| 122 | bgtid %0, 1b; \ | ||
| 123 | addk %0, %0, %1; \ | ||
| 124 | " : : "r" (len), "r" (step) \ | ||
| 125 | : "memory"); \ | ||
| 126 | } while (0); | ||
| 127 | |||
| 128 | /* Used for wdc.flush/clear which can use rB for offset which is not possible | ||
| 129 | * to use for simple wdc or wic. | ||
| 130 | * | ||
| 131 | * start address is cache aligned | ||
| 132 | * end address is not aligned, if end is aligned then I have to substract | ||
| 133 | * cacheline length because I can't flush/invalidate the next cacheline. | ||
| 134 | * If is not, I align it because I will flush/invalidate whole line. | ||
| 135 | */ | ||
| 136 | #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \ | ||
| 137 | do { \ | ||
| 138 | int step = -line_length; \ | ||
| 139 | int align = ~(line_length - 1); \ | ||
| 140 | int count; \ | ||
| 141 | end = ((end & align) == end) ? end - line_length : end & align; \ | ||
| 142 | count = end - start; \ | ||
| 143 | WARN_ON(count < 0); \ | ||
| 144 | \ | ||
| 145 | __asm__ __volatile__ (" 1: " #op " %0, %1; \ | ||
| 146 | bgtid %1, 1b; \ | ||
| 147 | addk %1, %1, %2; \ | ||
| 148 | " : : "r" (start), "r" (count), \ | ||
| 149 | "r" (step) : "memory"); \ | ||
| 150 | } while (0); | ||
| 151 | |||
| 152 | /* It is used only first parameter for OP - for wic, wdc */ | ||
| 153 | #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ | ||
| 154 | do { \ | ||
| 155 | int volatile temp; \ | ||
| 156 | int align = ~(line_length - 1); \ | ||
| 157 | end = ((end & align) == end) ? end - line_length : end & align; \ | ||
| 158 | WARN_ON(end - start < 0); \ | ||
| 159 | \ | ||
| 160 | __asm__ __volatile__ (" 1: " #op " %1, r0; \ | ||
| 161 | cmpu %0, %1, %2; \ | ||
| 162 | bgtid %0, 1b; \ | ||
| 163 | addk %1, %1, %3; \ | ||
| 164 | " : : "r" (temp), "r" (start), "r" (end),\ | ||
| 165 | "r" (line_length) : "memory"); \ | ||
| 166 | } while (0); | ||
| 167 | |||
| 168 | #define ASM_LOOP | ||
| 169 | |||
| 170 | static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) | ||
| 171 | { | ||
| 172 | unsigned long flags; | ||
| 173 | #ifndef ASM_LOOP | ||
| 174 | int i; | ||
| 97 | #endif | 175 | #endif |
| 98 | } | 176 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
| 177 | (unsigned int)start, (unsigned int) end); | ||
| 178 | |||
| 179 | CACHE_LOOP_LIMITS(start, end, | ||
| 180 | cpuinfo.icache_line_length, cpuinfo.icache_size); | ||
| 181 | |||
| 182 | local_irq_save(flags); | ||
| 183 | __disable_icache_msr(); | ||
| 184 | |||
| 185 | #ifdef ASM_LOOP | ||
| 186 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); | ||
| 187 | #else | ||
| 188 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
| 189 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
| 190 | : : "r" (i)); | ||
| 191 | #endif | ||
| 192 | __enable_icache_msr(); | ||
| 193 | local_irq_restore(flags); | ||
| 99 | } | 194 | } |
| 100 | 195 | ||
| 101 | void _disable_dcache(void) | 196 | static void __flush_icache_range_nomsr_irq(unsigned long start, |
| 197 | unsigned long end) | ||
| 102 | { | 198 | { |
| 103 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 199 | unsigned long flags; |
| 104 | __asm__ __volatile__ (" \ | 200 | #ifndef ASM_LOOP |
| 105 | msrclr r0, %0; \ | 201 | int i; |
| 106 | nop; " \ | 202 | #endif |
| 107 | : \ | 203 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
| 108 | : "i" (MSR_DCE) \ | 204 | (unsigned int)start, (unsigned int) end); |
| 109 | : "memory"); | 205 | |
| 206 | CACHE_LOOP_LIMITS(start, end, | ||
| 207 | cpuinfo.icache_line_length, cpuinfo.icache_size); | ||
| 208 | |||
| 209 | local_irq_save(flags); | ||
| 210 | __disable_icache_nomsr(); | ||
| 211 | |||
| 212 | #ifdef ASM_LOOP | ||
| 213 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); | ||
| 110 | #else | 214 | #else |
| 111 | __asm__ __volatile__ (" \ | 215 | for (i = start; i < end; i += cpuinfo.icache_line_length) |
| 112 | mfs r12, rmsr; \ | 216 | __asm__ __volatile__ ("wic %0, r0;" \ |
| 113 | nop; \ | 217 | : : "r" (i)); |
| 114 | andi r12, r12, ~%0; \ | ||
| 115 | mts rmsr, r12; \ | ||
| 116 | nop; " \ | ||
| 117 | : \ | ||
| 118 | : "i" (MSR_DCE) \ | ||
| 119 | : "memory", "r12"); | ||
| 120 | #endif | 218 | #endif |
| 219 | |||
| 220 | __enable_icache_nomsr(); | ||
| 221 | local_irq_restore(flags); | ||
| 121 | } | 222 | } |
| 122 | 223 | ||
| 123 | void _invalidate_dcache(unsigned int addr) | 224 | static void __flush_icache_range_noirq(unsigned long start, |
| 225 | unsigned long end) | ||
| 124 | { | 226 | { |
| 125 | __asm__ __volatile__ (" \ | 227 | #ifndef ASM_LOOP |
| 126 | wdc %0, r0" \ | 228 | int i; |
| 127 | : \ | 229 | #endif |
| 128 | : "r" (addr)); | 230 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, |
| 231 | (unsigned int)start, (unsigned int) end); | ||
| 232 | |||
| 233 | CACHE_LOOP_LIMITS(start, end, | ||
| 234 | cpuinfo.icache_line_length, cpuinfo.icache_size); | ||
| 235 | #ifdef ASM_LOOP | ||
| 236 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); | ||
| 237 | #else | ||
| 238 | for (i = start; i < end; i += cpuinfo.icache_line_length) | ||
| 239 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
| 240 | : : "r" (i)); | ||
| 241 | #endif | ||
| 129 | } | 242 | } |
| 130 | 243 | ||
| 131 | void __invalidate_icache_all(void) | 244 | static void __flush_icache_all_msr_irq(void) |
| 132 | { | 245 | { |
| 133 | unsigned int i; | 246 | unsigned long flags; |
| 134 | unsigned flags; | 247 | #ifndef ASM_LOOP |
| 248 | int i; | ||
| 249 | #endif | ||
| 250 | pr_debug("%s\n", __func__); | ||
| 135 | 251 | ||
| 136 | if (cpuinfo.use_icache) { | 252 | local_irq_save(flags); |
| 137 | local_irq_save(flags); | 253 | __disable_icache_msr(); |
| 138 | __disable_icache(); | 254 | #ifdef ASM_LOOP |
| 255 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); | ||
| 256 | #else | ||
| 257 | for (i = 0; i < cpuinfo.icache_size; | ||
| 258 | i += cpuinfo.icache_line_length) | ||
| 259 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
| 260 | : : "r" (i)); | ||
| 261 | #endif | ||
| 262 | __enable_icache_msr(); | ||
| 263 | local_irq_restore(flags); | ||
| 264 | } | ||
| 139 | 265 | ||
| 140 | /* Just loop through cache size and invalidate, no need to add | 266 | static void __flush_icache_all_nomsr_irq(void) |
| 141 | CACHE_BASE address */ | 267 | { |
| 142 | for (i = 0; i < cpuinfo.icache_size; | 268 | unsigned long flags; |
| 143 | i += cpuinfo.icache_line) | 269 | #ifndef ASM_LOOP |
| 144 | __invalidate_icache(i); | 270 | int i; |
| 271 | #endif | ||
| 272 | pr_debug("%s\n", __func__); | ||
| 145 | 273 | ||
| 146 | __enable_icache(); | 274 | local_irq_save(flags); |
| 147 | local_irq_restore(flags); | 275 | __disable_icache_nomsr(); |
| 148 | } | 276 | #ifdef ASM_LOOP |
| 277 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); | ||
| 278 | #else | ||
| 279 | for (i = 0; i < cpuinfo.icache_size; | ||
| 280 | i += cpuinfo.icache_line_length) | ||
| 281 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
| 282 | : : "r" (i)); | ||
| 283 | #endif | ||
| 284 | __enable_icache_nomsr(); | ||
| 285 | local_irq_restore(flags); | ||
| 149 | } | 286 | } |
| 150 | 287 | ||
| 151 | void __invalidate_icache_range(unsigned long start, unsigned long end) | 288 | static void __flush_icache_all_noirq(void) |
| 152 | { | 289 | { |
| 153 | unsigned int i; | 290 | #ifndef ASM_LOOP |
| 154 | unsigned flags; | 291 | int i; |
| 155 | unsigned int align; | 292 | #endif |
| 293 | pr_debug("%s\n", __func__); | ||
| 294 | #ifdef ASM_LOOP | ||
| 295 | CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); | ||
| 296 | #else | ||
| 297 | for (i = 0; i < cpuinfo.icache_size; | ||
| 298 | i += cpuinfo.icache_line_length) | ||
| 299 | __asm__ __volatile__ ("wic %0, r0;" \ | ||
| 300 | : : "r" (i)); | ||
| 301 | #endif | ||
| 302 | } | ||
| 156 | 303 | ||
| 157 | if (cpuinfo.use_icache) { | 304 | static void __invalidate_dcache_all_msr_irq(void) |
| 158 | /* | 305 | { |
| 159 | * No need to cover entire cache range, | 306 | unsigned long flags; |
| 160 | * just cover cache footprint | 307 | #ifndef ASM_LOOP |
| 161 | */ | 308 | int i; |
| 162 | end = min(start + cpuinfo.icache_size, end); | 309 | #endif |
| 163 | align = ~(cpuinfo.icache_line - 1); | 310 | pr_debug("%s\n", __func__); |
| 164 | start &= align; /* Make sure we are aligned */ | 311 | |
| 165 | /* Push end up to the next cache line */ | 312 | local_irq_save(flags); |
| 166 | end = ((end & align) + cpuinfo.icache_line); | 313 | __disable_dcache_msr(); |
| 314 | #ifdef ASM_LOOP | ||
| 315 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); | ||
| 316 | #else | ||
| 317 | for (i = 0; i < cpuinfo.dcache_size; | ||
| 318 | i += cpuinfo.dcache_line_length) | ||
| 319 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
| 320 | : : "r" (i)); | ||
| 321 | #endif | ||
| 322 | __enable_dcache_msr(); | ||
| 323 | local_irq_restore(flags); | ||
| 324 | } | ||
| 167 | 325 | ||
| 168 | local_irq_save(flags); | 326 | static void __invalidate_dcache_all_nomsr_irq(void) |
| 169 | __disable_icache(); | 327 | { |
| 328 | unsigned long flags; | ||
| 329 | #ifndef ASM_LOOP | ||
| 330 | int i; | ||
| 331 | #endif | ||
| 332 | pr_debug("%s\n", __func__); | ||
| 170 | 333 | ||
| 171 | for (i = start; i < end; i += cpuinfo.icache_line) | 334 | local_irq_save(flags); |
| 172 | __invalidate_icache(i); | 335 | __disable_dcache_nomsr(); |
| 336 | #ifdef ASM_LOOP | ||
| 337 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); | ||
| 338 | #else | ||
| 339 | for (i = 0; i < cpuinfo.dcache_size; | ||
| 340 | i += cpuinfo.dcache_line_length) | ||
| 341 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
| 342 | : : "r" (i)); | ||
| 343 | #endif | ||
| 344 | __enable_dcache_nomsr(); | ||
| 345 | local_irq_restore(flags); | ||
| 346 | } | ||
| 173 | 347 | ||
| 174 | __enable_icache(); | 348 | static void __invalidate_dcache_all_noirq_wt(void) |
| 175 | local_irq_restore(flags); | 349 | { |
| 176 | } | 350 | #ifndef ASM_LOOP |
| 351 | int i; | ||
| 352 | #endif | ||
| 353 | pr_debug("%s\n", __func__); | ||
| 354 | #ifdef ASM_LOOP | ||
| 355 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) | ||
| 356 | #else | ||
| 357 | for (i = 0; i < cpuinfo.dcache_size; | ||
| 358 | i += cpuinfo.dcache_line_length) | ||
| 359 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
| 360 | : : "r" (i)); | ||
| 361 | #endif | ||
| 177 | } | 362 | } |
| 178 | 363 | ||
| 179 | void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page) | 364 | /* FIXME It is blindly invalidation as is expected |
| 365 | * but can't be called on noMMU in microblaze_cache_init below | ||
| 366 | * | ||
| 367 | * MS: noMMU kernel won't boot if simple wdc is used | ||
| 368 | * The reason should be that there are discared data which kernel needs | ||
| 369 | */ | ||
| 370 | static void __invalidate_dcache_all_wb(void) | ||
| 180 | { | 371 | { |
| 181 | __invalidate_icache_all(); | 372 | #ifndef ASM_LOOP |
| 373 | int i; | ||
| 374 | #endif | ||
| 375 | pr_debug("%s\n", __func__); | ||
| 376 | #ifdef ASM_LOOP | ||
| 377 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, | ||
| 378 | wdc) | ||
| 379 | #else | ||
| 380 | for (i = 0; i < cpuinfo.dcache_size; | ||
| 381 | i += cpuinfo.dcache_line_length) | ||
| 382 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
| 383 | : : "r" (i)); | ||
| 384 | #endif | ||
| 182 | } | 385 | } |
| 183 | 386 | ||
| 184 | void __invalidate_icache_user_range(struct vm_area_struct *vma, | 387 | static void __invalidate_dcache_range_wb(unsigned long start, |
| 185 | struct page *page, unsigned long adr, | 388 | unsigned long end) |
| 186 | int len) | ||
| 187 | { | 389 | { |
| 188 | __invalidate_icache_all(); | 390 | #ifndef ASM_LOOP |
| 391 | int i; | ||
| 392 | #endif | ||
| 393 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | ||
| 394 | (unsigned int)start, (unsigned int) end); | ||
| 395 | |||
| 396 | CACHE_LOOP_LIMITS(start, end, | ||
| 397 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); | ||
| 398 | #ifdef ASM_LOOP | ||
| 399 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); | ||
| 400 | #else | ||
| 401 | for (i = start; i < end; i += cpuinfo.dcache_line_length) | ||
| 402 | __asm__ __volatile__ ("wdc.clear %0, r0;" \ | ||
| 403 | : : "r" (i)); | ||
| 404 | #endif | ||
| 189 | } | 405 | } |
| 190 | 406 | ||
| 191 | void __invalidate_cache_sigtramp(unsigned long addr) | 407 | static void __invalidate_dcache_range_nomsr_wt(unsigned long start, |
| 408 | unsigned long end) | ||
| 192 | { | 409 | { |
| 193 | __invalidate_icache_range(addr, addr + 8); | 410 | #ifndef ASM_LOOP |
| 411 | int i; | ||
| 412 | #endif | ||
| 413 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | ||
| 414 | (unsigned int)start, (unsigned int) end); | ||
| 415 | CACHE_LOOP_LIMITS(start, end, | ||
| 416 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); | ||
| 417 | |||
| 418 | #ifdef ASM_LOOP | ||
| 419 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); | ||
| 420 | #else | ||
| 421 | for (i = start; i < end; i += cpuinfo.dcache_line_length) | ||
| 422 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
| 423 | : : "r" (i)); | ||
| 424 | #endif | ||
| 194 | } | 425 | } |
| 195 | 426 | ||
| 196 | void __invalidate_dcache_all(void) | 427 | static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, |
| 428 | unsigned long end) | ||
| 197 | { | 429 | { |
| 198 | unsigned int i; | 430 | unsigned long flags; |
| 199 | unsigned flags; | 431 | #ifndef ASM_LOOP |
| 432 | int i; | ||
| 433 | #endif | ||
| 434 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | ||
| 435 | (unsigned int)start, (unsigned int) end); | ||
| 436 | CACHE_LOOP_LIMITS(start, end, | ||
| 437 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); | ||
| 200 | 438 | ||
| 201 | if (cpuinfo.use_dcache) { | 439 | local_irq_save(flags); |
| 202 | local_irq_save(flags); | 440 | __disable_dcache_msr(); |
| 203 | __disable_dcache(); | ||
| 204 | 441 | ||
| 205 | /* | 442 | #ifdef ASM_LOOP |
| 206 | * Just loop through cache size and invalidate, | 443 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
| 207 | * no need to add CACHE_BASE address | 444 | #else |
| 208 | */ | 445 | for (i = start; i < end; i += cpuinfo.dcache_line_length) |
| 209 | for (i = 0; i < cpuinfo.dcache_size; | 446 | __asm__ __volatile__ ("wdc %0, r0;" \ |
| 210 | i += cpuinfo.dcache_line) | 447 | : : "r" (i)); |
| 211 | __invalidate_dcache(i); | 448 | #endif |
| 212 | 449 | ||
| 213 | __enable_dcache(); | 450 | __enable_dcache_msr(); |
| 214 | local_irq_restore(flags); | 451 | local_irq_restore(flags); |
| 215 | } | ||
| 216 | } | 452 | } |
| 217 | 453 | ||
| 218 | void __invalidate_dcache_range(unsigned long start, unsigned long end) | 454 | static void __invalidate_dcache_range_nomsr_irq(unsigned long start, |
| 455 | unsigned long end) | ||
| 219 | { | 456 | { |
| 220 | unsigned int i; | 457 | unsigned long flags; |
| 221 | unsigned flags; | 458 | #ifndef ASM_LOOP |
| 222 | unsigned int align; | 459 | int i; |
| 460 | #endif | ||
| 461 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | ||
| 462 | (unsigned int)start, (unsigned int) end); | ||
| 223 | 463 | ||
| 224 | if (cpuinfo.use_dcache) { | 464 | CACHE_LOOP_LIMITS(start, end, |
| 225 | /* | 465 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); |
| 226 | * No need to cover entire cache range, | ||
| 227 | * just cover cache footprint | ||
| 228 | */ | ||
| 229 | end = min(start + cpuinfo.dcache_size, end); | ||
| 230 | align = ~(cpuinfo.dcache_line - 1); | ||
| 231 | start &= align; /* Make sure we are aligned */ | ||
| 232 | /* Push end up to the next cache line */ | ||
| 233 | end = ((end & align) + cpuinfo.dcache_line); | ||
| 234 | local_irq_save(flags); | ||
| 235 | __disable_dcache(); | ||
| 236 | 466 | ||
| 237 | for (i = start; i < end; i += cpuinfo.dcache_line) | 467 | local_irq_save(flags); |
| 238 | __invalidate_dcache(i); | 468 | __disable_dcache_nomsr(); |
| 239 | 469 | ||
| 240 | __enable_dcache(); | 470 | #ifdef ASM_LOOP |
| 241 | local_irq_restore(flags); | 471 | CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); |
| 242 | } | 472 | #else |
| 473 | for (i = start; i < end; i += cpuinfo.dcache_line_length) | ||
| 474 | __asm__ __volatile__ ("wdc %0, r0;" \ | ||
| 475 | : : "r" (i)); | ||
| 476 | #endif | ||
| 477 | |||
| 478 | __enable_dcache_nomsr(); | ||
| 479 | local_irq_restore(flags); | ||
| 480 | } | ||
| 481 | |||
| 482 | static void __flush_dcache_all_wb(void) | ||
| 483 | { | ||
| 484 | #ifndef ASM_LOOP | ||
| 485 | int i; | ||
| 486 | #endif | ||
| 487 | pr_debug("%s\n", __func__); | ||
| 488 | #ifdef ASM_LOOP | ||
| 489 | CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, | ||
| 490 | wdc.flush); | ||
| 491 | #else | ||
| 492 | for (i = 0; i < cpuinfo.dcache_size; | ||
| 493 | i += cpuinfo.dcache_line_length) | ||
| 494 | __asm__ __volatile__ ("wdc.flush %0, r0;" \ | ||
| 495 | : : "r" (i)); | ||
| 496 | #endif | ||
| 243 | } | 497 | } |
| 244 | 498 | ||
| 245 | void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page) | 499 | static void __flush_dcache_range_wb(unsigned long start, unsigned long end) |
| 246 | { | 500 | { |
| 247 | __invalidate_dcache_all(); | 501 | #ifndef ASM_LOOP |
| 502 | int i; | ||
| 503 | #endif | ||
| 504 | pr_debug("%s: start 0x%x, end 0x%x\n", __func__, | ||
| 505 | (unsigned int)start, (unsigned int) end); | ||
| 506 | |||
| 507 | CACHE_LOOP_LIMITS(start, end, | ||
| 508 | cpuinfo.dcache_line_length, cpuinfo.dcache_size); | ||
| 509 | #ifdef ASM_LOOP | ||
| 510 | CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); | ||
| 511 | #else | ||
| 512 | for (i = start; i < end; i += cpuinfo.dcache_line_length) | ||
| 513 | __asm__ __volatile__ ("wdc.flush %0, r0;" \ | ||
| 514 | : : "r" (i)); | ||
| 515 | #endif | ||
| 248 | } | 516 | } |
| 249 | 517 | ||
| 250 | void __invalidate_dcache_user_range(struct vm_area_struct *vma, | 518 | /* struct for wb caches and for wt caches */ |
| 251 | struct page *page, unsigned long adr, | 519 | struct scache *mbc; |
| 252 | int len) | 520 | |
| 521 | /* new wb cache model */ | ||
| 522 | const struct scache wb_msr = { | ||
| 523 | .ie = __enable_icache_msr, | ||
| 524 | .id = __disable_icache_msr, | ||
| 525 | .ifl = __flush_icache_all_noirq, | ||
| 526 | .iflr = __flush_icache_range_noirq, | ||
| 527 | .iin = __flush_icache_all_noirq, | ||
| 528 | .iinr = __flush_icache_range_noirq, | ||
| 529 | .de = __enable_dcache_msr, | ||
| 530 | .dd = __disable_dcache_msr, | ||
| 531 | .dfl = __flush_dcache_all_wb, | ||
| 532 | .dflr = __flush_dcache_range_wb, | ||
| 533 | .din = __invalidate_dcache_all_wb, | ||
| 534 | .dinr = __invalidate_dcache_range_wb, | ||
| 535 | }; | ||
| 536 | |||
| 537 | /* There is only difference in ie, id, de, dd functions */ | ||
| 538 | const struct scache wb_nomsr = { | ||
| 539 | .ie = __enable_icache_nomsr, | ||
| 540 | .id = __disable_icache_nomsr, | ||
| 541 | .ifl = __flush_icache_all_noirq, | ||
| 542 | .iflr = __flush_icache_range_noirq, | ||
| 543 | .iin = __flush_icache_all_noirq, | ||
| 544 | .iinr = __flush_icache_range_noirq, | ||
| 545 | .de = __enable_dcache_nomsr, | ||
| 546 | .dd = __disable_dcache_nomsr, | ||
| 547 | .dfl = __flush_dcache_all_wb, | ||
| 548 | .dflr = __flush_dcache_range_wb, | ||
| 549 | .din = __invalidate_dcache_all_wb, | ||
| 550 | .dinr = __invalidate_dcache_range_wb, | ||
| 551 | }; | ||
| 552 | |||
| 553 | /* Old wt cache model with disabling irq and turn off cache */ | ||
| 554 | const struct scache wt_msr = { | ||
| 555 | .ie = __enable_icache_msr, | ||
| 556 | .id = __disable_icache_msr, | ||
| 557 | .ifl = __flush_icache_all_msr_irq, | ||
| 558 | .iflr = __flush_icache_range_msr_irq, | ||
| 559 | .iin = __flush_icache_all_msr_irq, | ||
| 560 | .iinr = __flush_icache_range_msr_irq, | ||
| 561 | .de = __enable_dcache_msr, | ||
| 562 | .dd = __disable_dcache_msr, | ||
| 563 | .dfl = __invalidate_dcache_all_msr_irq, | ||
| 564 | .dflr = __invalidate_dcache_range_msr_irq_wt, | ||
| 565 | .din = __invalidate_dcache_all_msr_irq, | ||
| 566 | .dinr = __invalidate_dcache_range_msr_irq_wt, | ||
| 567 | }; | ||
| 568 | |||
| 569 | const struct scache wt_nomsr = { | ||
| 570 | .ie = __enable_icache_nomsr, | ||
| 571 | .id = __disable_icache_nomsr, | ||
| 572 | .ifl = __flush_icache_all_nomsr_irq, | ||
| 573 | .iflr = __flush_icache_range_nomsr_irq, | ||
| 574 | .iin = __flush_icache_all_nomsr_irq, | ||
| 575 | .iinr = __flush_icache_range_nomsr_irq, | ||
| 576 | .de = __enable_dcache_nomsr, | ||
| 577 | .dd = __disable_dcache_nomsr, | ||
| 578 | .dfl = __invalidate_dcache_all_nomsr_irq, | ||
| 579 | .dflr = __invalidate_dcache_range_nomsr_irq, | ||
| 580 | .din = __invalidate_dcache_all_nomsr_irq, | ||
| 581 | .dinr = __invalidate_dcache_range_nomsr_irq, | ||
| 582 | }; | ||
| 583 | |||
| 584 | /* New wt cache model for newer Microblaze versions */ | ||
| 585 | const struct scache wt_msr_noirq = { | ||
| 586 | .ie = __enable_icache_msr, | ||
| 587 | .id = __disable_icache_msr, | ||
| 588 | .ifl = __flush_icache_all_noirq, | ||
| 589 | .iflr = __flush_icache_range_noirq, | ||
| 590 | .iin = __flush_icache_all_noirq, | ||
| 591 | .iinr = __flush_icache_range_noirq, | ||
| 592 | .de = __enable_dcache_msr, | ||
| 593 | .dd = __disable_dcache_msr, | ||
| 594 | .dfl = __invalidate_dcache_all_noirq_wt, | ||
| 595 | .dflr = __invalidate_dcache_range_nomsr_wt, | ||
| 596 | .din = __invalidate_dcache_all_noirq_wt, | ||
| 597 | .dinr = __invalidate_dcache_range_nomsr_wt, | ||
| 598 | }; | ||
| 599 | |||
| 600 | const struct scache wt_nomsr_noirq = { | ||
| 601 | .ie = __enable_icache_nomsr, | ||
| 602 | .id = __disable_icache_nomsr, | ||
| 603 | .ifl = __flush_icache_all_noirq, | ||
| 604 | .iflr = __flush_icache_range_noirq, | ||
| 605 | .iin = __flush_icache_all_noirq, | ||
| 606 | .iinr = __flush_icache_range_noirq, | ||
| 607 | .de = __enable_dcache_nomsr, | ||
| 608 | .dd = __disable_dcache_nomsr, | ||
| 609 | .dfl = __invalidate_dcache_all_noirq_wt, | ||
| 610 | .dflr = __invalidate_dcache_range_nomsr_wt, | ||
| 611 | .din = __invalidate_dcache_all_noirq_wt, | ||
| 612 | .dinr = __invalidate_dcache_range_nomsr_wt, | ||
| 613 | }; | ||
| 614 | |||
| 615 | /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */ | ||
| 616 | #define CPUVER_7_20_A 0x0c | ||
| 617 | #define CPUVER_7_20_D 0x0f | ||
| 618 | |||
| 619 | #define INFO(s) printk(KERN_INFO "cache: " s "\n"); | ||
| 620 | |||
| 621 | void microblaze_cache_init(void) | ||
| 253 | { | 622 | { |
| 254 | __invalidate_dcache_all(); | 623 | if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) { |
| 624 | if (cpuinfo.dcache_wb) { | ||
| 625 | INFO("wb_msr"); | ||
| 626 | mbc = (struct scache *)&wb_msr; | ||
| 627 | if (cpuinfo.ver_code < CPUVER_7_20_D) { | ||
| 628 | /* MS: problem with signal handling - hw bug */ | ||
| 629 | INFO("WB won't work properly"); | ||
| 630 | } | ||
| 631 | } else { | ||
| 632 | if (cpuinfo.ver_code >= CPUVER_7_20_A) { | ||
| 633 | INFO("wt_msr_noirq"); | ||
| 634 | mbc = (struct scache *)&wt_msr_noirq; | ||
| 635 | } else { | ||
| 636 | INFO("wt_msr"); | ||
| 637 | mbc = (struct scache *)&wt_msr; | ||
| 638 | } | ||
| 639 | } | ||
| 640 | } else { | ||
| 641 | if (cpuinfo.dcache_wb) { | ||
| 642 | INFO("wb_nomsr"); | ||
| 643 | mbc = (struct scache *)&wb_nomsr; | ||
| 644 | if (cpuinfo.ver_code < CPUVER_7_20_D) { | ||
| 645 | /* MS: problem with signal handling - hw bug */ | ||
| 646 | INFO("WB won't work properly"); | ||
| 647 | } | ||
| 648 | } else { | ||
| 649 | if (cpuinfo.ver_code >= CPUVER_7_20_A) { | ||
| 650 | INFO("wt_nomsr_noirq"); | ||
| 651 | mbc = (struct scache *)&wt_nomsr_noirq; | ||
| 652 | } else { | ||
| 653 | INFO("wt_nomsr"); | ||
| 654 | mbc = (struct scache *)&wt_nomsr; | ||
| 655 | } | ||
| 656 | } | ||
| 657 | } | ||
| 658 | /* FIXME Invalidation is done in U-BOOT | ||
| 659 | * WT cache: Data is already written to main memory | ||
| 660 | * WB cache: Discard data on noMMU which caused that kernel doesn't boot | ||
| 661 | */ | ||
| 662 | /* invalidate_dcache(); */ | ||
| 663 | enable_dcache(); | ||
| 664 | |||
| 665 | invalidate_icache(); | ||
| 666 | enable_icache(); | ||
| 255 | } | 667 | } |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c index c259786e7faa..f72dbd66c844 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c | |||
| @@ -21,8 +21,14 @@ | |||
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #define CI(c, p) { ci->c = PVR_##p(pvr); } | 23 | #define CI(c, p) { ci->c = PVR_##p(pvr); } |
| 24 | |||
| 25 | #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) | ||
| 24 | #define err_printk(x) \ | 26 | #define err_printk(x) \ |
| 25 | early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); | 27 | early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); |
| 28 | #else | ||
| 29 | #define err_printk(x) \ | ||
| 30 | printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n"); | ||
| 31 | #endif | ||
| 26 | 32 | ||
| 27 | void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) | 33 | void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) |
| 28 | { | 34 | { |
| @@ -70,7 +76,7 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) | |||
| 70 | CI(use_icache, USE_ICACHE); | 76 | CI(use_icache, USE_ICACHE); |
| 71 | CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); | 77 | CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); |
| 72 | CI(icache_write, ICACHE_ALLOW_WR); | 78 | CI(icache_write, ICACHE_ALLOW_WR); |
| 73 | CI(icache_line, ICACHE_LINE_LEN); | 79 | ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2; |
| 74 | CI(icache_size, ICACHE_BYTE_SIZE); | 80 | CI(icache_size, ICACHE_BYTE_SIZE); |
| 75 | CI(icache_base, ICACHE_BASEADDR); | 81 | CI(icache_base, ICACHE_BASEADDR); |
| 76 | CI(icache_high, ICACHE_HIGHADDR); | 82 | CI(icache_high, ICACHE_HIGHADDR); |
| @@ -78,11 +84,16 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) | |||
| 78 | CI(use_dcache, USE_DCACHE); | 84 | CI(use_dcache, USE_DCACHE); |
| 79 | CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); | 85 | CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); |
| 80 | CI(dcache_write, DCACHE_ALLOW_WR); | 86 | CI(dcache_write, DCACHE_ALLOW_WR); |
| 81 | CI(dcache_line, DCACHE_LINE_LEN); | 87 | ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2; |
| 82 | CI(dcache_size, DCACHE_BYTE_SIZE); | 88 | CI(dcache_size, DCACHE_BYTE_SIZE); |
| 83 | CI(dcache_base, DCACHE_BASEADDR); | 89 | CI(dcache_base, DCACHE_BASEADDR); |
| 84 | CI(dcache_high, DCACHE_HIGHADDR); | 90 | CI(dcache_high, DCACHE_HIGHADDR); |
| 85 | 91 | ||
| 92 | temp = PVR_DCACHE_USE_WRITEBACK(pvr); | ||
| 93 | if (ci->dcache_wb != temp) | ||
| 94 | err_printk("DCACHE WB"); | ||
| 95 | ci->dcache_wb = temp; | ||
| 96 | |||
| 86 | CI(use_dopb, D_OPB); | 97 | CI(use_dopb, D_OPB); |
| 87 | CI(use_iopb, I_OPB); | 98 | CI(use_iopb, I_OPB); |
| 88 | CI(use_dlmb, D_LMB); | 99 | CI(use_dlmb, D_LMB); |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c index adb448f93d5f..6095aa6b5c88 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-static.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c | |||
| @@ -72,12 +72,12 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) | |||
| 72 | ci->use_icache = fcpu(cpu, "xlnx,use-icache"); | 72 | ci->use_icache = fcpu(cpu, "xlnx,use-icache"); |
| 73 | ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits"); | 73 | ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits"); |
| 74 | ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr"); | 74 | ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr"); |
| 75 | ci->icache_line = fcpu(cpu, "xlnx,icache-line-len") << 2; | 75 | ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2; |
| 76 | if (!ci->icache_line) { | 76 | if (!ci->icache_line_length) { |
| 77 | if (fcpu(cpu, "xlnx,icache-use-fsl")) | 77 | if (fcpu(cpu, "xlnx,icache-use-fsl")) |
| 78 | ci->icache_line = 4 << 2; | 78 | ci->icache_line_length = 4 << 2; |
| 79 | else | 79 | else |
| 80 | ci->icache_line = 1 << 2; | 80 | ci->icache_line_length = 1 << 2; |
| 81 | } | 81 | } |
| 82 | ci->icache_size = fcpu(cpu, "i-cache-size"); | 82 | ci->icache_size = fcpu(cpu, "i-cache-size"); |
| 83 | ci->icache_base = fcpu(cpu, "i-cache-baseaddr"); | 83 | ci->icache_base = fcpu(cpu, "i-cache-baseaddr"); |
| @@ -86,16 +86,17 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) | |||
| 86 | ci->use_dcache = fcpu(cpu, "xlnx,use-dcache"); | 86 | ci->use_dcache = fcpu(cpu, "xlnx,use-dcache"); |
| 87 | ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag"); | 87 | ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag"); |
| 88 | ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr"); | 88 | ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr"); |
| 89 | ci->dcache_line = fcpu(cpu, "xlnx,dcache-line-len") << 2; | 89 | ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2; |
| 90 | if (!ci->dcache_line) { | 90 | if (!ci->dcache_line_length) { |
| 91 | if (fcpu(cpu, "xlnx,dcache-use-fsl")) | 91 | if (fcpu(cpu, "xlnx,dcache-use-fsl")) |
| 92 | ci->dcache_line = 4 << 2; | 92 | ci->dcache_line_length = 4 << 2; |
| 93 | else | 93 | else |
| 94 | ci->dcache_line = 1 << 2; | 94 | ci->dcache_line_length = 1 << 2; |
| 95 | } | 95 | } |
| 96 | ci->dcache_size = fcpu(cpu, "d-cache-size"); | 96 | ci->dcache_size = fcpu(cpu, "d-cache-size"); |
| 97 | ci->dcache_base = fcpu(cpu, "d-cache-baseaddr"); | 97 | ci->dcache_base = fcpu(cpu, "d-cache-baseaddr"); |
| 98 | ci->dcache_high = fcpu(cpu, "d-cache-highaddr"); | 98 | ci->dcache_high = fcpu(cpu, "d-cache-highaddr"); |
| 99 | ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback"); | ||
| 99 | 100 | ||
| 100 | ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); | 101 | ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); |
| 101 | ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); | 102 | ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 3539babc1c18..255ef880351e 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| 12 | #include <linux/slab.h> | ||
| 13 | #include <asm/cpuinfo.h> | 12 | #include <asm/cpuinfo.h> |
| 14 | #include <asm/pvr.h> | 13 | #include <asm/pvr.h> |
| 15 | 14 | ||
| @@ -29,11 +28,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { | |||
| 29 | {"7.20.a", 0x0c}, | 28 | {"7.20.a", 0x0c}, |
| 30 | {"7.20.b", 0x0d}, | 29 | {"7.20.b", 0x0d}, |
| 31 | {"7.20.c", 0x0e}, | 30 | {"7.20.c", 0x0e}, |
| 32 | /* FIXME There is no keycode defined in MBV for these versions */ | 31 | {"7.20.d", 0x0f}, |
| 33 | {"2.10.a", 0x10}, | 32 | {"7.30.a", 0x10}, |
| 34 | {"3.00.a", 0x20}, | ||
| 35 | {"4.00.a", 0x30}, | ||
| 36 | {"4.00.b", 0x40}, | ||
| 37 | {NULL, 0}, | 33 | {NULL, 0}, |
| 38 | }; | 34 | }; |
| 39 | 35 | ||
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c index 4dcfccdbc364..4216eb1eaa32 100644 --- a/arch/microblaze/kernel/cpu/mb.c +++ b/arch/microblaze/kernel/cpu/mb.c | |||
| @@ -98,16 +98,22 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 98 | 98 | ||
| 99 | if (cpuinfo.use_icache) | 99 | if (cpuinfo.use_icache) |
| 100 | count += seq_printf(m, | 100 | count += seq_printf(m, |
| 101 | "Icache:\t\t%ukB\n", | 101 | "Icache:\t\t%ukB\tline length:\t%dB\n", |
| 102 | cpuinfo.icache_size >> 10); | 102 | cpuinfo.icache_size >> 10, |
| 103 | cpuinfo.icache_line_length); | ||
| 103 | else | 104 | else |
| 104 | count += seq_printf(m, "Icache:\t\tno\n"); | 105 | count += seq_printf(m, "Icache:\t\tno\n"); |
| 105 | 106 | ||
| 106 | if (cpuinfo.use_dcache) | 107 | if (cpuinfo.use_dcache) { |
| 107 | count += seq_printf(m, | 108 | count += seq_printf(m, |
| 108 | "Dcache:\t\t%ukB\n", | 109 | "Dcache:\t\t%ukB\tline length:\t%dB\n", |
| 109 | cpuinfo.dcache_size >> 10); | 110 | cpuinfo.dcache_size >> 10, |
| 110 | else | 111 | cpuinfo.dcache_line_length); |
| 112 | if (cpuinfo.dcache_wb) | ||
| 113 | count += seq_printf(m, "\t\twrite-back\n"); | ||
| 114 | else | ||
| 115 | count += seq_printf(m, "\t\twrite-through\n"); | ||
| 116 | } else | ||
| 111 | count += seq_printf(m, "Dcache:\t\tno\n"); | 117 | count += seq_printf(m, "Dcache:\t\tno\n"); |
| 112 | 118 | ||
| 113 | count += seq_printf(m, | 119 | count += seq_printf(m, |
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c index c9a4340ddd53..9bee9382bf74 100644 --- a/arch/microblaze/kernel/cpu/pvr.c +++ b/arch/microblaze/kernel/cpu/pvr.c | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | 45 | ||
| 46 | int cpu_has_pvr(void) | 46 | int cpu_has_pvr(void) |
| 47 | { | 47 | { |
| 48 | unsigned flags; | 48 | unsigned long flags; |
| 49 | unsigned pvr0; | 49 | unsigned pvr0; |
| 50 | 50 | ||
| 51 | local_save_flags(flags); | 51 | local_save_flags(flags); |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c new file mode 100644 index 000000000000..9dcd90b5df55 --- /dev/null +++ b/arch/microblaze/kernel/dma.c | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2009-2010 PetaLogix | ||
| 3 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | ||
| 4 | * | ||
| 5 | * Provide default implementations of the DMA mapping callbacks for | ||
| 6 | * directly mapped busses. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/device.h> | ||
| 10 | #include <linux/dma-mapping.h> | ||
| 11 | #include <linux/gfp.h> | ||
| 12 | #include <linux/dma-debug.h> | ||
| 13 | #include <asm/bug.h> | ||
| 14 | #include <asm/cacheflush.h> | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Generic direct DMA implementation | ||
| 18 | * | ||
| 19 | * This implementation supports a per-device offset that can be applied if | ||
| 20 | * the address at which memory is visible to devices is not 0. Platform code | ||
| 21 | * can set archdata.dma_data to an unsigned long holding the offset. By | ||
| 22 | * default the offset is PCI_DRAM_OFFSET. | ||
| 23 | */ | ||
| 24 | static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, | ||
| 25 | size_t size, enum dma_data_direction direction) | ||
| 26 | { | ||
| 27 | switch (direction) { | ||
| 28 | case DMA_TO_DEVICE: | ||
| 29 | flush_dcache_range(paddr + offset, paddr + offset + size); | ||
| 30 | break; | ||
| 31 | case DMA_FROM_DEVICE: | ||
| 32 | invalidate_dcache_range(paddr + offset, paddr + offset + size); | ||
| 33 | break; | ||
| 34 | default: | ||
| 35 | BUG(); | ||
| 36 | } | ||
| 37 | } | ||
| 38 | |||
| 39 | static unsigned long get_dma_direct_offset(struct device *dev) | ||
| 40 | { | ||
| 41 | if (likely(dev)) | ||
| 42 | return (unsigned long)dev->archdata.dma_data; | ||
| 43 | |||
| 44 | return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ | ||
| 45 | } | ||
| 46 | |||
| 47 | #define NOT_COHERENT_CACHE | ||
| 48 | |||
| 49 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | ||
| 50 | dma_addr_t *dma_handle, gfp_t flag) | ||
| 51 | { | ||
| 52 | #ifdef NOT_COHERENT_CACHE | ||
| 53 | return consistent_alloc(flag, size, dma_handle); | ||
| 54 | #else | ||
| 55 | void *ret; | ||
| 56 | struct page *page; | ||
| 57 | int node = dev_to_node(dev); | ||
| 58 | |||
| 59 | /* ignore region specifiers */ | ||
| 60 | flag &= ~(__GFP_HIGHMEM); | ||
| 61 | |||
| 62 | page = alloc_pages_node(node, flag, get_order(size)); | ||
| 63 | if (page == NULL) | ||
| 64 | return NULL; | ||
| 65 | ret = page_address(page); | ||
| 66 | memset(ret, 0, size); | ||
| 67 | *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); | ||
| 68 | |||
| 69 | return ret; | ||
| 70 | #endif | ||
| 71 | } | ||
| 72 | |||
| 73 | static void dma_direct_free_coherent(struct device *dev, size_t size, | ||
| 74 | void *vaddr, dma_addr_t dma_handle) | ||
| 75 | { | ||
| 76 | #ifdef NOT_COHERENT_CACHE | ||
| 77 | consistent_free(size, vaddr); | ||
| 78 | #else | ||
| 79 | free_pages((unsigned long)vaddr, get_order(size)); | ||
| 80 | #endif | ||
| 81 | } | ||
| 82 | |||
| 83 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | ||
| 84 | int nents, enum dma_data_direction direction, | ||
| 85 | struct dma_attrs *attrs) | ||
| 86 | { | ||
| 87 | struct scatterlist *sg; | ||
| 88 | int i; | ||
| 89 | |||
| 90 | /* FIXME this part of code is untested */ | ||
| 91 | for_each_sg(sgl, sg, nents, i) { | ||
| 92 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | ||
| 93 | sg->dma_length = sg->length; | ||
| 94 | __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, | ||
| 95 | sg->length, direction); | ||
| 96 | } | ||
| 97 | |||
| 98 | return nents; | ||
| 99 | } | ||
| 100 | |||
| 101 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
| 102 | int nents, enum dma_data_direction direction, | ||
| 103 | struct dma_attrs *attrs) | ||
| 104 | { | ||
| 105 | } | ||
| 106 | |||
| 107 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | ||
| 108 | { | ||
| 109 | return 1; | ||
| 110 | } | ||
| 111 | |||
| 112 | static inline dma_addr_t dma_direct_map_page(struct device *dev, | ||
| 113 | struct page *page, | ||
| 114 | unsigned long offset, | ||
| 115 | size_t size, | ||
| 116 | enum dma_data_direction direction, | ||
| 117 | struct dma_attrs *attrs) | ||
| 118 | { | ||
| 119 | __dma_sync_page(page_to_phys(page), offset, size, direction); | ||
| 120 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline void dma_direct_unmap_page(struct device *dev, | ||
| 124 | dma_addr_t dma_address, | ||
| 125 | size_t size, | ||
| 126 | enum dma_data_direction direction, | ||
| 127 | struct dma_attrs *attrs) | ||
| 128 | { | ||
| 129 | /* There is not necessary to do cache cleanup | ||
| 130 | * | ||
| 131 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | ||
| 132 | * dma_address is physical address | ||
| 133 | */ | ||
| 134 | __dma_sync_page(dma_address, 0 , size, direction); | ||
| 135 | } | ||
| 136 | |||
| 137 | struct dma_map_ops dma_direct_ops = { | ||
| 138 | .alloc_coherent = dma_direct_alloc_coherent, | ||
| 139 | .free_coherent = dma_direct_free_coherent, | ||
| 140 | .map_sg = dma_direct_map_sg, | ||
| 141 | .unmap_sg = dma_direct_unmap_sg, | ||
| 142 | .dma_supported = dma_direct_dma_supported, | ||
| 143 | .map_page = dma_direct_map_page, | ||
| 144 | .unmap_page = dma_direct_unmap_page, | ||
| 145 | }; | ||
| 146 | EXPORT_SYMBOL(dma_direct_ops); | ||
| 147 | |||
| 148 | /* Number of entries preallocated for DMA-API debugging */ | ||
| 149 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
| 150 | |||
| 151 | static int __init dma_init(void) | ||
| 152 | { | ||
| 153 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
| 154 | |||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | fs_initcall(dma_init); | ||
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index 9083d85376a4..8cc18cd2cce6 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S | |||
| @@ -122,7 +122,7 @@ ENTRY(_interrupt) | |||
| 122 | 122 | ||
| 123 | ret_from_intr: | 123 | ret_from_intr: |
| 124 | lwi r11, r1, PT_MODE | 124 | lwi r11, r1, PT_MODE |
| 125 | bneid r11, 3f | 125 | bneid r11, no_intr_resched |
| 126 | 126 | ||
| 127 | lwi r6, r31, TS_THREAD_INFO /* get thread info */ | 127 | lwi r6, r31, TS_THREAD_INFO /* get thread info */ |
| 128 | lwi r19, r6, TI_FLAGS /* get flags in thread info */ | 128 | lwi r19, r6, TI_FLAGS /* get flags in thread info */ |
| @@ -133,16 +133,18 @@ ret_from_intr: | |||
| 133 | bralid r15, schedule | 133 | bralid r15, schedule |
| 134 | nop | 134 | nop |
| 135 | 1: andi r11, r19, _TIF_SIGPENDING | 135 | 1: andi r11, r19, _TIF_SIGPENDING |
| 136 | beqid r11, no_intr_reshed | 136 | beqid r11, no_intr_resched |
| 137 | addk r5, r1, r0 | 137 | addk r5, r1, r0 |
| 138 | addk r7, r0, r0 | 138 | addk r7, r0, r0 |
| 139 | bralid r15, do_signal | 139 | bralid r15, do_signal |
| 140 | addk r6, r0, r0 | 140 | addk r6, r0, r0 |
| 141 | 141 | ||
| 142 | no_intr_reshed: | 142 | no_intr_resched: |
| 143 | /* Disable interrupts, we are now committed to the state restore */ | ||
| 144 | disable_irq | ||
| 145 | |||
| 143 | /* save mode indicator */ | 146 | /* save mode indicator */ |
| 144 | lwi r11, r1, PT_MODE | 147 | lwi r11, r1, PT_MODE |
| 145 | 3: | ||
| 146 | swi r11, r0, PER_CPU(KM) | 148 | swi r11, r0, PER_CPU(KM) |
| 147 | 149 | ||
| 148 | /* save r31 */ | 150 | /* save r31 */ |
| @@ -208,8 +210,6 @@ ENTRY(_user_exception) | |||
| 208 | lwi r1, r1, TS_THREAD_INFO /* get the thread info */ | 210 | lwi r1, r1, TS_THREAD_INFO /* get the thread info */ |
| 209 | /* calculate kernel stack pointer */ | 211 | /* calculate kernel stack pointer */ |
| 210 | addik r1, r1, THREAD_SIZE - PT_SIZE | 212 | addik r1, r1, THREAD_SIZE - PT_SIZE |
| 211 | swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */ | ||
| 212 | lwi r11, r0, PER_CPU(KM) /* load mode indicator */ | ||
| 213 | 2: | 213 | 2: |
| 214 | swi r11, r1, PT_MODE /* store the mode */ | 214 | swi r11, r1, PT_MODE /* store the mode */ |
| 215 | lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ | 215 | lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ |
| @@ -476,6 +476,8 @@ ENTRY(ret_from_fork) | |||
| 476 | nop | 476 | nop |
| 477 | 477 | ||
| 478 | work_pending: | 478 | work_pending: |
| 479 | enable_irq | ||
| 480 | |||
| 479 | andi r11, r19, _TIF_NEED_RESCHED | 481 | andi r11, r19, _TIF_NEED_RESCHED |
| 480 | beqi r11, 1f | 482 | beqi r11, 1f |
| 481 | bralid r15, schedule | 483 | bralid r15, schedule |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index e3ecb36dd554..c0ede25c5b99 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
| @@ -31,6 +31,8 @@ | |||
| 31 | #include <linux/errno.h> | 31 | #include <linux/errno.h> |
| 32 | #include <asm/signal.h> | 32 | #include <asm/signal.h> |
| 33 | 33 | ||
| 34 | #undef DEBUG | ||
| 35 | |||
| 34 | /* The size of a state save frame. */ | 36 | /* The size of a state save frame. */ |
| 35 | #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) | 37 | #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) |
| 36 | 38 | ||
| @@ -303,7 +305,7 @@ C_ENTRY(_user_exception): | |||
| 303 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 305 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
| 304 | addi r11, r0, 1; | 306 | addi r11, r0, 1; |
| 305 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | 307 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ |
| 306 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | 308 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
| 307 | /* Save away the syscall number. */ | 309 | /* Save away the syscall number. */ |
| 308 | swi r12, r1, PTO+PT_R0; | 310 | swi r12, r1, PTO+PT_R0; |
| 309 | tovirt(r1,r1) | 311 | tovirt(r1,r1) |
| @@ -320,8 +322,7 @@ C_ENTRY(_user_exception): | |||
| 320 | rtid r11, 0 | 322 | rtid r11, 0 |
| 321 | nop | 323 | nop |
| 322 | 3: | 324 | 3: |
| 323 | add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ | 325 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
| 324 | lwi r11, r11, TS_THREAD_INFO /* get thread info */ | ||
| 325 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ | 326 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ |
| 326 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | 327 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
| 327 | beqi r11, 4f | 328 | beqi r11, 4f |
| @@ -352,10 +353,12 @@ C_ENTRY(_user_exception): | |||
| 352 | add r12, r12, r12; /* convert num -> ptr */ | 353 | add r12, r12, r12; /* convert num -> ptr */ |
| 353 | add r12, r12, r12; | 354 | add r12, r12, r12; |
| 354 | 355 | ||
| 356 | #ifdef DEBUG | ||
| 355 | /* Trac syscalls and stored them to r0_ram */ | 357 | /* Trac syscalls and stored them to r0_ram */ |
| 356 | lwi r3, r12, 0x400 + r0_ram | 358 | lwi r3, r12, 0x400 + r0_ram |
| 357 | addi r3, r3, 1 | 359 | addi r3, r3, 1 |
| 358 | swi r3, r12, 0x400 + r0_ram | 360 | swi r3, r12, 0x400 + r0_ram |
| 361 | #endif | ||
| 359 | 362 | ||
| 360 | # Find and jump into the syscall handler. | 363 | # Find and jump into the syscall handler. |
| 361 | lwi r12, r12, sys_call_table | 364 | lwi r12, r12, sys_call_table |
| @@ -378,60 +381,50 @@ C_ENTRY(ret_from_trap): | |||
| 378 | /* See if returning to kernel mode, if so, skip resched &c. */ | 381 | /* See if returning to kernel mode, if so, skip resched &c. */ |
| 379 | bnei r11, 2f; | 382 | bnei r11, 2f; |
| 380 | 383 | ||
| 384 | swi r3, r1, PTO + PT_R3 | ||
| 385 | swi r4, r1, PTO + PT_R4 | ||
| 386 | |||
| 381 | /* We're returning to user mode, so check for various conditions that | 387 | /* We're returning to user mode, so check for various conditions that |
| 382 | * trigger rescheduling. */ | 388 | * trigger rescheduling. */ |
| 383 | # FIXME: Restructure all these flag checks. | 389 | /* FIXME: Restructure all these flag checks. */ |
| 384 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 390 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
| 385 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
| 386 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 391 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 387 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | 392 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
| 388 | beqi r11, 1f | 393 | beqi r11, 1f |
| 389 | 394 | ||
| 390 | swi r3, r1, PTO + PT_R3 | ||
| 391 | swi r4, r1, PTO + PT_R4 | ||
| 392 | brlid r15, do_syscall_trace_leave | 395 | brlid r15, do_syscall_trace_leave |
| 393 | addik r5, r1, PTO + PT_R0 | 396 | addik r5, r1, PTO + PT_R0 |
| 394 | lwi r3, r1, PTO + PT_R3 | ||
| 395 | lwi r4, r1, PTO + PT_R4 | ||
| 396 | 1: | 397 | 1: |
| 397 | |||
| 398 | /* We're returning to user mode, so check for various conditions that | 398 | /* We're returning to user mode, so check for various conditions that |
| 399 | * trigger rescheduling. */ | 399 | * trigger rescheduling. */ |
| 400 | /* Get current task ptr into r11 */ | 400 | /* get thread info from current task */ |
| 401 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 401 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
| 402 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
| 403 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 402 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 404 | andi r11, r11, _TIF_NEED_RESCHED; | 403 | andi r11, r11, _TIF_NEED_RESCHED; |
| 405 | beqi r11, 5f; | 404 | beqi r11, 5f; |
| 406 | 405 | ||
| 407 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
| 408 | swi r4, r1, PTO + PT_R4; | ||
| 409 | bralid r15, schedule; /* Call scheduler */ | 406 | bralid r15, schedule; /* Call scheduler */ |
| 410 | nop; /* delay slot */ | 407 | nop; /* delay slot */ |
| 411 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | ||
| 412 | lwi r4, r1, PTO + PT_R4; | ||
| 413 | 408 | ||
| 414 | /* Maybe handle a signal */ | 409 | /* Maybe handle a signal */ |
| 415 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 410 | 5: /* get thread info from current task*/ |
| 416 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | 411 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
| 417 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 412 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 418 | andi r11, r11, _TIF_SIGPENDING; | 413 | andi r11, r11, _TIF_SIGPENDING; |
| 419 | beqi r11, 1f; /* Signals to handle, handle them */ | 414 | beqi r11, 1f; /* Signals to handle, handle them */ |
| 420 | 415 | ||
| 421 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
| 422 | swi r4, r1, PTO + PT_R4; | ||
| 423 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 416 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
| 424 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
| 425 | addi r7, r0, 1; /* Arg 3: int in_syscall */ | 417 | addi r7, r0, 1; /* Arg 3: int in_syscall */ |
| 426 | bralid r15, do_signal; /* Handle any signals */ | 418 | bralid r15, do_signal; /* Handle any signals */ |
| 427 | nop; | 419 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
| 420 | |||
| 421 | /* Finally, return to user state. */ | ||
| 422 | 1: | ||
| 428 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | 423 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ |
| 429 | lwi r4, r1, PTO + PT_R4; | 424 | lwi r4, r1, PTO + PT_R4; |
| 430 | 425 | ||
| 431 | /* Finally, return to user state. */ | 426 | swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ |
| 432 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 427 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
| 433 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
| 434 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
| 435 | VM_OFF; | 428 | VM_OFF; |
| 436 | tophys(r1,r1); | 429 | tophys(r1,r1); |
| 437 | RESTORE_REGS; | 430 | RESTORE_REGS; |
| @@ -496,17 +489,6 @@ C_ENTRY(sys_execve): | |||
| 496 | brid microblaze_execve; /* Do real work (tail-call).*/ | 489 | brid microblaze_execve; /* Do real work (tail-call).*/ |
| 497 | nop; | 490 | nop; |
| 498 | 491 | ||
| 499 | C_ENTRY(sys_rt_sigsuspend_wrapper): | ||
| 500 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
| 501 | swi r4, r1, PTO+PT_R4; | ||
| 502 | la r7, r1, PTO; /* add user context as 3rd arg */ | ||
| 503 | brlid r15, sys_rt_sigsuspend; /* Do real work.*/ | ||
| 504 | nop; | ||
| 505 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
| 506 | lwi r4, r1, PTO+PT_R4; | ||
| 507 | bri ret_from_trap /* fall through will not work here due to align */ | ||
| 508 | nop; | ||
| 509 | |||
| 510 | C_ENTRY(sys_rt_sigreturn_wrapper): | 492 | C_ENTRY(sys_rt_sigreturn_wrapper): |
| 511 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | 493 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ |
| 512 | swi r4, r1, PTO+PT_R4; | 494 | swi r4, r1, PTO+PT_R4; |
| @@ -572,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper): | |||
| 572 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | 554 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ |
| 573 | addi r11, r0, 1; \ | 555 | addi r11, r0, 1; \ |
| 574 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ | 556 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ |
| 575 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | 557 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \ |
| 576 | /* Save away the syscall number. */ \ | 558 | /* Save away the syscall number. */ \ |
| 577 | swi r0, r1, PTO+PT_R0; \ | 559 | swi r0, r1, PTO+PT_R0; \ |
| 578 | tovirt(r1,r1) | 560 | tovirt(r1,r1) |
| @@ -680,9 +662,7 @@ C_ENTRY(ret_from_exc): | |||
| 680 | 662 | ||
| 681 | /* We're returning to user mode, so check for various conditions that | 663 | /* We're returning to user mode, so check for various conditions that |
| 682 | trigger rescheduling. */ | 664 | trigger rescheduling. */ |
| 683 | /* Get current task ptr into r11 */ | 665 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
| 684 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
| 685 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
| 686 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 666 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 687 | andi r11, r11, _TIF_NEED_RESCHED; | 667 | andi r11, r11, _TIF_NEED_RESCHED; |
| 688 | beqi r11, 5f; | 668 | beqi r11, 5f; |
| @@ -692,8 +672,7 @@ C_ENTRY(ret_from_exc): | |||
| 692 | nop; /* delay slot */ | 672 | nop; /* delay slot */ |
| 693 | 673 | ||
| 694 | /* Maybe handle a signal */ | 674 | /* Maybe handle a signal */ |
| 695 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 675 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
| 696 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
| 697 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 676 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 698 | andi r11, r11, _TIF_SIGPENDING; | 677 | andi r11, r11, _TIF_SIGPENDING; |
| 699 | beqi r11, 1f; /* Signals to handle, handle them */ | 678 | beqi r11, 1f; /* Signals to handle, handle them */ |
| @@ -711,20 +690,14 @@ C_ENTRY(ret_from_exc): | |||
| 711 | * (in a possibly modified form) after do_signal returns. | 690 | * (in a possibly modified form) after do_signal returns. |
| 712 | * store return registers separately because this macros is use | 691 | * store return registers separately because this macros is use |
| 713 | * for others exceptions */ | 692 | * for others exceptions */ |
| 714 | swi r3, r1, PTO + PT_R3; | ||
| 715 | swi r4, r1, PTO + PT_R4; | ||
| 716 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 693 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
| 717 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
| 718 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 694 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
| 719 | bralid r15, do_signal; /* Handle any signals */ | 695 | bralid r15, do_signal; /* Handle any signals */ |
| 720 | nop; | 696 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
| 721 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
| 722 | lwi r4, r1, PTO+PT_R4; | ||
| 723 | 697 | ||
| 724 | /* Finally, return to user state. */ | 698 | /* Finally, return to user state. */ |
| 725 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 699 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ |
| 726 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 700 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
| 727 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
| 728 | VM_OFF; | 701 | VM_OFF; |
| 729 | tophys(r1,r1); | 702 | tophys(r1,r1); |
| 730 | 703 | ||
| @@ -813,7 +786,7 @@ C_ENTRY(_interrupt): | |||
| 813 | swi r11, r0, TOPHYS(PER_CPU(KM)); | 786 | swi r11, r0, TOPHYS(PER_CPU(KM)); |
| 814 | 787 | ||
| 815 | 2: | 788 | 2: |
| 816 | lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 789 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
| 817 | swi r0, r1, PTO + PT_R0; | 790 | swi r0, r1, PTO + PT_R0; |
| 818 | tovirt(r1,r1) | 791 | tovirt(r1,r1) |
| 819 | la r5, r1, PTO; | 792 | la r5, r1, PTO; |
| @@ -828,8 +801,7 @@ ret_from_irq: | |||
| 828 | lwi r11, r1, PTO + PT_MODE; | 801 | lwi r11, r1, PTO + PT_MODE; |
| 829 | bnei r11, 2f; | 802 | bnei r11, 2f; |
| 830 | 803 | ||
| 831 | add r11, r0, CURRENT_TASK; | 804 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
| 832 | lwi r11, r11, TS_THREAD_INFO; | ||
| 833 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ | 805 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ |
| 834 | andi r11, r11, _TIF_NEED_RESCHED; | 806 | andi r11, r11, _TIF_NEED_RESCHED; |
| 835 | beqi r11, 5f | 807 | beqi r11, 5f |
| @@ -837,8 +809,7 @@ ret_from_irq: | |||
| 837 | nop; /* delay slot */ | 809 | nop; /* delay slot */ |
| 838 | 810 | ||
| 839 | /* Maybe handle a signal */ | 811 | /* Maybe handle a signal */ |
| 840 | 5: add r11, r0, CURRENT_TASK; | 812 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ |
| 841 | lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */ | ||
| 842 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 813 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 843 | andi r11, r11, _TIF_SIGPENDING; | 814 | andi r11, r11, _TIF_SIGPENDING; |
| 844 | beqid r11, no_intr_resched | 815 | beqid r11, no_intr_resched |
| @@ -853,8 +824,7 @@ no_intr_resched: | |||
| 853 | /* Disable interrupts, we are now committed to the state restore */ | 824 | /* Disable interrupts, we are now committed to the state restore */ |
| 854 | disable_irq | 825 | disable_irq |
| 855 | swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ | 826 | swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ |
| 856 | add r11, r0, CURRENT_TASK; | 827 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); |
| 857 | swi r11, r0, PER_CPU(CURRENT_SAVE); | ||
| 858 | VM_OFF; | 828 | VM_OFF; |
| 859 | tophys(r1,r1); | 829 | tophys(r1,r1); |
| 860 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | 830 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ |
| @@ -864,7 +834,28 @@ no_intr_resched: | |||
| 864 | lwi r1, r1, PT_R1 - PT_SIZE; | 834 | lwi r1, r1, PT_R1 - PT_SIZE; |
| 865 | bri 6f; | 835 | bri 6f; |
| 866 | /* MS: Return to kernel state. */ | 836 | /* MS: Return to kernel state. */ |
| 867 | 2: VM_OFF /* MS: turn off MMU */ | 837 | 2: |
| 838 | #ifdef CONFIG_PREEMPT | ||
| 839 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | ||
| 840 | /* MS: get preempt_count from thread info */ | ||
| 841 | lwi r5, r11, TI_PREEMPT_COUNT; | ||
| 842 | bgti r5, restore; | ||
| 843 | |||
| 844 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | ||
| 845 | andi r5, r5, _TIF_NEED_RESCHED; | ||
| 846 | beqi r5, restore /* if zero jump over */ | ||
| 847 | |||
| 848 | preempt: | ||
| 849 | /* interrupts are off that's why I am calling preempt_chedule_irq */ | ||
| 850 | bralid r15, preempt_schedule_irq | ||
| 851 | nop | ||
| 852 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ | ||
| 853 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | ||
| 854 | andi r5, r5, _TIF_NEED_RESCHED; | ||
| 855 | bnei r5, preempt /* if non zero jump to resched */ | ||
| 856 | restore: | ||
| 857 | #endif | ||
| 858 | VM_OFF /* MS: turn off MMU */ | ||
| 868 | tophys(r1,r1) | 859 | tophys(r1,r1) |
| 869 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | 860 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ |
| 870 | lwi r4, r1, PTO + PT_R4; | 861 | lwi r4, r1, PTO + PT_R4; |
| @@ -926,7 +917,7 @@ C_ENTRY(_debug_exception): | |||
| 926 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 917 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
| 927 | addi r11, r0, 1; | 918 | addi r11, r0, 1; |
| 928 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | 919 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ |
| 929 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | 920 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
| 930 | /* Save away the syscall number. */ | 921 | /* Save away the syscall number. */ |
| 931 | swi r0, r1, PTO+PT_R0; | 922 | swi r0, r1, PTO+PT_R0; |
| 932 | tovirt(r1,r1) | 923 | tovirt(r1,r1) |
| @@ -946,8 +937,7 @@ dbtrap_call: rtbd r11, 0; | |||
| 946 | bnei r11, 2f; | 937 | bnei r11, 2f; |
| 947 | 938 | ||
| 948 | /* Get current task ptr into r11 */ | 939 | /* Get current task ptr into r11 */ |
| 949 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 940 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
| 950 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
| 951 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 941 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 952 | andi r11, r11, _TIF_NEED_RESCHED; | 942 | andi r11, r11, _TIF_NEED_RESCHED; |
| 953 | beqi r11, 5f; | 943 | beqi r11, 5f; |
| @@ -960,8 +950,7 @@ dbtrap_call: rtbd r11, 0; | |||
| 960 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ | 950 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ |
| 961 | 951 | ||
| 962 | /* Maybe handle a signal */ | 952 | /* Maybe handle a signal */ |
| 963 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 953 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
| 964 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
| 965 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 954 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 966 | andi r11, r11, _TIF_SIGPENDING; | 955 | andi r11, r11, _TIF_SIGPENDING; |
| 967 | beqi r11, 1f; /* Signals to handle, handle them */ | 956 | beqi r11, 1f; /* Signals to handle, handle them */ |
| @@ -977,16 +966,14 @@ dbtrap_call: rtbd r11, 0; | |||
| 977 | (in a possibly modified form) after do_signal returns. */ | 966 | (in a possibly modified form) after do_signal returns. */ |
| 978 | 967 | ||
| 979 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 968 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
| 980 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
| 981 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 969 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
| 982 | bralid r15, do_signal; /* Handle any signals */ | 970 | bralid r15, do_signal; /* Handle any signals */ |
| 983 | nop; | 971 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
| 984 | 972 | ||
| 985 | 973 | ||
| 986 | /* Finally, return to user state. */ | 974 | /* Finally, return to user state. */ |
| 987 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 975 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ |
| 988 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 976 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
| 989 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
| 990 | VM_OFF; | 977 | VM_OFF; |
| 991 | tophys(r1,r1); | 978 | tophys(r1,r1); |
| 992 | 979 | ||
| @@ -1018,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */ | |||
| 1018 | 1005 | ||
| 1019 | ENTRY(_switch_to) | 1006 | ENTRY(_switch_to) |
| 1020 | /* prepare return value */ | 1007 | /* prepare return value */ |
| 1021 | addk r3, r0, r31 | 1008 | addk r3, r0, CURRENT_TASK |
| 1022 | 1009 | ||
| 1023 | /* save registers in cpu_context */ | 1010 | /* save registers in cpu_context */ |
| 1024 | /* use r11 and r12, volatile registers, as temp register */ | 1011 | /* use r11 and r12, volatile registers, as temp register */ |
| @@ -1062,10 +1049,10 @@ ENTRY(_switch_to) | |||
| 1062 | nop | 1049 | nop |
| 1063 | swi r12, r11, CC_FSR | 1050 | swi r12, r11, CC_FSR |
| 1064 | 1051 | ||
| 1065 | /* update r31, the current */ | 1052 | /* update r31, the current-give me pointer to task which will be next */ |
| 1066 | lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ | 1053 | lwi CURRENT_TASK, r6, TI_TASK |
| 1067 | /* stored it to current_save too */ | 1054 | /* stored it to current_save too */ |
| 1068 | swi r31, r0, PER_CPU(CURRENT_SAVE) | 1055 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) |
| 1069 | 1056 | ||
| 1070 | /* get new process' cpu context and restore */ | 1057 | /* get new process' cpu context and restore */ |
| 1071 | /* give me start where start context of next task */ | 1058 | /* give me start where start context of next task */ |
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index d9f70f83097f..02cbdfe5aa8d 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c | |||
| @@ -121,7 +121,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
| 121 | } | 121 | } |
| 122 | printk(KERN_WARNING "Divide by zero exception " \ | 122 | printk(KERN_WARNING "Divide by zero exception " \ |
| 123 | "in kernel mode.\n"); | 123 | "in kernel mode.\n"); |
| 124 | die("Divide by exception", regs, SIGBUS); | 124 | die("Divide by zero exception", regs, SIGBUS); |
| 125 | break; | 125 | break; |
| 126 | case MICROBLAZE_FPU_EXCEPTION: | 126 | case MICROBLAZE_FPU_EXCEPTION: |
| 127 | pr_debug(KERN_WARNING "FPU exception\n"); | 127 | pr_debug(KERN_WARNING "FPU exception\n"); |
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c new file mode 100644 index 000000000000..515feb404555 --- /dev/null +++ b/arch/microblaze/kernel/ftrace.c | |||
| @@ -0,0 +1,231 @@ | |||
| 1 | /* | ||
| 2 | * Ftrace support for Microblaze. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> | ||
| 5 | * Copyright (C) 2009 PetaLogix | ||
| 6 | * | ||
| 7 | * Based on MIPS and PowerPC ftrace code | ||
| 8 | * | ||
| 9 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 10 | * License. See the file "COPYING" in the main directory of this archive | ||
| 11 | * for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <asm/cacheflush.h> | ||
| 15 | #include <linux/ftrace.h> | ||
| 16 | |||
| 17 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 18 | /* | ||
| 19 | * Hook the return address and push it in the stack of return addrs | ||
| 20 | * in current thread info. | ||
| 21 | */ | ||
| 22 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
| 23 | { | ||
| 24 | unsigned long old; | ||
| 25 | int faulted, err; | ||
| 26 | struct ftrace_graph_ent trace; | ||
| 27 | unsigned long return_hooker = (unsigned long) | ||
| 28 | &return_to_handler; | ||
| 29 | |||
| 30 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
| 31 | return; | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Protect against fault, even if it shouldn't | ||
| 35 | * happen. This tool is too much intrusive to | ||
| 36 | * ignore such a protection. | ||
| 37 | */ | ||
| 38 | asm volatile(" 1: lwi %0, %2, 0; \ | ||
| 39 | 2: swi %3, %2, 0; \ | ||
| 40 | addik %1, r0, 0; \ | ||
| 41 | 3: \ | ||
| 42 | .section .fixup, \"ax\"; \ | ||
| 43 | 4: brid 3b; \ | ||
| 44 | addik %1, r0, 1; \ | ||
| 45 | .previous; \ | ||
| 46 | .section __ex_table,\"a\"; \ | ||
| 47 | .word 1b,4b; \ | ||
| 48 | .word 2b,4b; \ | ||
| 49 | .previous;" \ | ||
| 50 | : "=&r" (old), "=r" (faulted) | ||
| 51 | : "r" (parent), "r" (return_hooker) | ||
| 52 | ); | ||
| 53 | |||
| 54 | if (unlikely(faulted)) { | ||
| 55 | ftrace_graph_stop(); | ||
| 56 | WARN_ON(1); | ||
| 57 | return; | ||
| 58 | } | ||
| 59 | |||
| 60 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); | ||
| 61 | if (err == -EBUSY) { | ||
| 62 | *parent = old; | ||
| 63 | return; | ||
| 64 | } | ||
| 65 | |||
| 66 | trace.func = self_addr; | ||
| 67 | /* Only trace if the calling function expects to */ | ||
| 68 | if (!ftrace_graph_entry(&trace)) { | ||
| 69 | current->curr_ret_stack--; | ||
| 70 | *parent = old; | ||
| 71 | } | ||
| 72 | } | ||
| 73 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
| 74 | |||
| 75 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 76 | /* save value to addr - it is save to do it in asm */ | ||
| 77 | static int ftrace_modify_code(unsigned long addr, unsigned int value) | ||
| 78 | { | ||
| 79 | int faulted = 0; | ||
| 80 | |||
| 81 | __asm__ __volatile__(" 1: swi %2, %1, 0; \ | ||
| 82 | addik %0, r0, 0; \ | ||
| 83 | 2: \ | ||
| 84 | .section .fixup, \"ax\"; \ | ||
| 85 | 3: brid 2b; \ | ||
| 86 | addik %0, r0, 1; \ | ||
| 87 | .previous; \ | ||
| 88 | .section __ex_table,\"a\"; \ | ||
| 89 | .word 1b,3b; \ | ||
| 90 | .previous;" \ | ||
| 91 | : "=r" (faulted) | ||
| 92 | : "r" (addr), "r" (value) | ||
| 93 | ); | ||
| 94 | |||
| 95 | if (unlikely(faulted)) | ||
| 96 | return -EFAULT; | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | #define MICROBLAZE_NOP 0x80000000 | ||
| 102 | #define MICROBLAZE_BRI 0xb800000C | ||
| 103 | |||
| 104 | static unsigned int recorded; /* if save was or not */ | ||
| 105 | static unsigned int imm; /* saving whole imm instruction */ | ||
| 106 | |||
| 107 | /* There are two approaches howto solve ftrace_make nop function - look below */ | ||
| 108 | #undef USE_FTRACE_NOP | ||
| 109 | |||
| 110 | #ifdef USE_FTRACE_NOP | ||
| 111 | static unsigned int bralid; /* saving whole bralid instruction */ | ||
| 112 | #endif | ||
| 113 | |||
| 114 | int ftrace_make_nop(struct module *mod, | ||
| 115 | struct dyn_ftrace *rec, unsigned long addr) | ||
| 116 | { | ||
| 117 | /* we have this part of code which we are working with | ||
| 118 | * b000c000 imm -16384 | ||
| 119 | * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> | ||
| 120 | * 80000000 or r0, r0, r0 | ||
| 121 | * | ||
| 122 | * The first solution (!USE_FTRACE_NOP-could be called branch solution) | ||
| 123 | * b000c000 bri 12 (0xC - jump to any other instruction) | ||
| 124 | * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> | ||
| 125 | * 80000000 or r0, r0, r0 | ||
| 126 | * any other instruction | ||
| 127 | * | ||
| 128 | * The second solution (USE_FTRACE_NOP) - no jump just nops | ||
| 129 | * 80000000 or r0, r0, r0 | ||
| 130 | * 80000000 or r0, r0, r0 | ||
| 131 | * 80000000 or r0, r0, r0 | ||
| 132 | */ | ||
| 133 | int ret = 0; | ||
| 134 | |||
| 135 | if (recorded == 0) { | ||
| 136 | recorded = 1; | ||
| 137 | imm = *(unsigned int *)rec->ip; | ||
| 138 | pr_debug("%s: imm:0x%x\n", __func__, imm); | ||
| 139 | #ifdef USE_FTRACE_NOP | ||
| 140 | bralid = *(unsigned int *)(rec->ip + 4); | ||
| 141 | pr_debug("%s: bralid 0x%x\n", __func__, bralid); | ||
| 142 | #endif /* USE_FTRACE_NOP */ | ||
| 143 | } | ||
| 144 | |||
| 145 | #ifdef USE_FTRACE_NOP | ||
| 146 | ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); | ||
| 147 | ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); | ||
| 148 | #else /* USE_FTRACE_NOP */ | ||
| 149 | ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); | ||
| 150 | #endif /* USE_FTRACE_NOP */ | ||
| 151 | return ret; | ||
| 152 | } | ||
| 153 | |||
| 154 | /* I believe that first is called ftrace_make_nop before this function */ | ||
| 155 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
| 156 | { | ||
| 157 | int ret; | ||
| 158 | pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", | ||
| 159 | __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); | ||
| 160 | ret = ftrace_modify_code(rec->ip, imm); | ||
| 161 | #ifdef USE_FTRACE_NOP | ||
| 162 | pr_debug("%s: bralid:0x%x\n", __func__, bralid); | ||
| 163 | ret += ftrace_modify_code(rec->ip + 4, bralid); | ||
| 164 | #endif /* USE_FTRACE_NOP */ | ||
| 165 | return ret; | ||
| 166 | } | ||
| 167 | |||
| 168 | int __init ftrace_dyn_arch_init(void *data) | ||
| 169 | { | ||
| 170 | /* The return code is retured via data */ | ||
| 171 | *(unsigned long *)data = 0; | ||
| 172 | |||
| 173 | return 0; | ||
| 174 | } | ||
| 175 | |||
| 176 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
| 177 | { | ||
| 178 | unsigned long ip = (unsigned long)(&ftrace_call); | ||
| 179 | unsigned int upper = (unsigned int)func; | ||
| 180 | unsigned int lower = (unsigned int)func; | ||
| 181 | int ret = 0; | ||
| 182 | |||
| 183 | /* create proper saving to ftrace_call poll */ | ||
| 184 | upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ | ||
| 185 | lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ | ||
| 186 | |||
| 187 | pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", | ||
| 188 | __func__, (unsigned int)func, (unsigned int)ip, upper, lower); | ||
| 189 | |||
| 190 | /* save upper and lower code */ | ||
| 191 | ret = ftrace_modify_code(ip, upper); | ||
| 192 | ret += ftrace_modify_code(ip + 4, lower); | ||
| 193 | |||
| 194 | /* We just need to replace the rtsd r15, 8 with NOP */ | ||
| 195 | ret += ftrace_modify_code((unsigned long)&ftrace_caller, | ||
| 196 | MICROBLAZE_NOP); | ||
| 197 | |||
| 198 | /* All changes are done - lets do caches consistent */ | ||
| 199 | flush_icache(); | ||
| 200 | return ret; | ||
| 201 | } | ||
| 202 | |||
| 203 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 204 | unsigned int old_jump; /* saving place for jump instruction */ | ||
| 205 | |||
| 206 | int ftrace_enable_ftrace_graph_caller(void) | ||
| 207 | { | ||
| 208 | unsigned int ret; | ||
| 209 | unsigned long ip = (unsigned long)(&ftrace_call_graph); | ||
| 210 | |||
| 211 | old_jump = *(unsigned int *)ip; /* save jump over instruction */ | ||
| 212 | ret = ftrace_modify_code(ip, MICROBLAZE_NOP); | ||
| 213 | flush_icache(); | ||
| 214 | |||
| 215 | pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); | ||
| 216 | return ret; | ||
| 217 | } | ||
| 218 | |||
| 219 | int ftrace_disable_ftrace_graph_caller(void) | ||
| 220 | { | ||
| 221 | unsigned int ret; | ||
| 222 | unsigned long ip = (unsigned long)(&ftrace_call_graph); | ||
| 223 | |||
| 224 | ret = ftrace_modify_code(ip, old_jump); | ||
| 225 | flush_icache(); | ||
| 226 | |||
| 227 | pr_debug("%s\n", __func__); | ||
| 228 | return ret; | ||
| 229 | } | ||
| 230 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
| 231 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 697ce3007f30..1bf739888260 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
| @@ -28,10 +28,11 @@ | |||
| 28 | * for more details. | 28 | * for more details. |
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | #include <linux/init.h> | ||
| 31 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
| 32 | #include <asm/thread_info.h> | 33 | #include <asm/thread_info.h> |
| 33 | #include <asm/page.h> | 34 | #include <asm/page.h> |
| 34 | #include <asm/prom.h> /* for OF_DT_HEADER */ | 35 | #include <linux/of_fdt.h> /* for OF_DT_HEADER */ |
| 35 | 36 | ||
| 36 | #ifdef CONFIG_MMU | 37 | #ifdef CONFIG_MMU |
| 37 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ | 38 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ |
| @@ -49,8 +50,14 @@ swapper_pg_dir: | |||
| 49 | 50 | ||
| 50 | #endif /* CONFIG_MMU */ | 51 | #endif /* CONFIG_MMU */ |
| 51 | 52 | ||
| 52 | .text | 53 | __HEAD |
| 53 | ENTRY(_start) | 54 | ENTRY(_start) |
| 55 | #if CONFIG_KERNEL_BASE_ADDR == 0 | ||
| 56 | brai TOPHYS(real_start) | ||
| 57 | .org 0x100 | ||
| 58 | real_start: | ||
| 59 | #endif | ||
| 60 | |||
| 54 | mfs r1, rmsr | 61 | mfs r1, rmsr |
| 55 | andi r1, r1, ~2 | 62 | andi r1, r1, ~2 |
| 56 | mts rmsr, r1 | 63 | mts rmsr, r1 |
| @@ -99,8 +106,8 @@ no_fdt_arg: | |||
| 99 | tophys(r4,r4) /* convert to phys address */ | 106 | tophys(r4,r4) /* convert to phys address */ |
| 100 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ | 107 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ |
| 101 | _copy_command_line: | 108 | _copy_command_line: |
| 102 | lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ | 109 | lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */ |
| 103 | sb r7, r4, r6 /* addr[r4+r6]= r7*/ | 110 | sb r2, r4, r6 /* addr[r4+r6]= r2*/ |
| 104 | addik r6, r6, 1 /* increment counting */ | 111 | addik r6, r6, 1 /* increment counting */ |
| 105 | bgtid r3, _copy_command_line /* loop for all entries */ | 112 | bgtid r3, _copy_command_line /* loop for all entries */ |
| 106 | addik r3, r3, -1 /* descrement loop */ | 113 | addik r3, r3, -1 /* descrement loop */ |
| @@ -128,7 +135,7 @@ _copy_bram: | |||
| 128 | * virtual to physical. | 135 | * virtual to physical. |
| 129 | */ | 136 | */ |
| 130 | nop | 137 | nop |
| 131 | addik r3, r0, 63 /* Invalidate all TLB entries */ | 138 | addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */ |
| 132 | _invalidate: | 139 | _invalidate: |
| 133 | mts rtlbx, r3 | 140 | mts rtlbx, r3 |
| 134 | mts rtlbhi, r0 /* flush: ensure V is clear */ | 141 | mts rtlbhi, r0 /* flush: ensure V is clear */ |
| @@ -136,6 +143,11 @@ _invalidate: | |||
| 136 | addik r3, r3, -1 | 143 | addik r3, r3, -1 |
| 137 | /* sync */ | 144 | /* sync */ |
| 138 | 145 | ||
| 146 | /* Setup the kernel PID */ | ||
| 147 | mts rpid,r0 /* Load the kernel PID */ | ||
| 148 | nop | ||
| 149 | bri 4 | ||
| 150 | |||
| 139 | /* | 151 | /* |
| 140 | * We should still be executing code at physical address area | 152 | * We should still be executing code at physical address area |
| 141 | * RAM_BASEADDR at this point. However, kernel code is at | 153 | * RAM_BASEADDR at this point. However, kernel code is at |
| @@ -146,10 +158,6 @@ _invalidate: | |||
| 146 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ | 158 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ |
| 147 | tophys(r4,r3) /* Load the kernel physical address */ | 159 | tophys(r4,r3) /* Load the kernel physical address */ |
| 148 | 160 | ||
| 149 | mts rpid,r0 /* Load the kernel PID */ | ||
| 150 | nop | ||
| 151 | bri 4 | ||
| 152 | |||
| 153 | /* | 161 | /* |
| 154 | * Configure and load two entries into TLB slots 0 and 1. | 162 | * Configure and load two entries into TLB slots 0 and 1. |
| 155 | * In case we are pinning TLBs, these are reserved in by the | 163 | * In case we are pinning TLBs, these are reserved in by the |
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c index 1bdf20222b92..522751737cfa 100644 --- a/arch/microblaze/kernel/heartbeat.c +++ b/arch/microblaze/kernel/heartbeat.c | |||
| @@ -45,6 +45,7 @@ void heartbeat(void) | |||
| 45 | void setup_heartbeat(void) | 45 | void setup_heartbeat(void) |
| 46 | { | 46 | { |
| 47 | struct device_node *gpio = NULL; | 47 | struct device_node *gpio = NULL; |
| 48 | int *prop; | ||
| 48 | int j; | 49 | int j; |
| 49 | char *gpio_list[] = { | 50 | char *gpio_list[] = { |
| 50 | "xlnx,xps-gpio-1.00.a", | 51 | "xlnx,xps-gpio-1.00.a", |
| @@ -58,10 +59,14 @@ void setup_heartbeat(void) | |||
| 58 | break; | 59 | break; |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | base_addr = *(int *) of_get_property(gpio, "reg", NULL); | 62 | if (gpio) { |
| 62 | base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); | 63 | base_addr = *(int *) of_get_property(gpio, "reg", NULL); |
| 63 | printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr); | 64 | base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); |
| 65 | printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr); | ||
| 64 | 66 | ||
| 65 | if (*(int *) of_get_property(gpio, "xlnx,is-bidir", NULL)) | 67 | /* GPIO is configured as output */ |
| 66 | out_be32(base_addr + 4, 0); /* GPIO is configured as output */ | 68 | prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL); |
| 69 | if (prop) | ||
| 70 | out_be32(base_addr + 4, 0); | ||
| 71 | } | ||
| 67 | } | 72 | } |
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 2b86c03aa841..995a2123635b 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
| @@ -313,13 +313,13 @@ _hw_exception_handler: | |||
| 313 | mfs r5, rmsr; | 313 | mfs r5, rmsr; |
| 314 | nop | 314 | nop |
| 315 | swi r5, r1, 0; | 315 | swi r5, r1, 0; |
| 316 | mfs r3, resr | 316 | mfs r4, resr |
| 317 | nop | 317 | nop |
| 318 | mfs r4, rear; | 318 | mfs r3, rear; |
| 319 | nop | 319 | nop |
| 320 | 320 | ||
| 321 | #ifndef CONFIG_MMU | 321 | #ifndef CONFIG_MMU |
| 322 | andi r5, r3, 0x1000; /* Check ESR[DS] */ | 322 | andi r5, r4, 0x1000; /* Check ESR[DS] */ |
| 323 | beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ | 323 | beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ |
| 324 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ | 324 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ |
| 325 | nop | 325 | nop |
| @@ -327,13 +327,14 @@ not_in_delay_slot: | |||
| 327 | swi r17, r1, PT_R17 | 327 | swi r17, r1, PT_R17 |
| 328 | #endif | 328 | #endif |
| 329 | 329 | ||
| 330 | andi r5, r3, 0x1F; /* Extract ESR[EXC] */ | 330 | andi r5, r4, 0x1F; /* Extract ESR[EXC] */ |
| 331 | 331 | ||
| 332 | #ifdef CONFIG_MMU | 332 | #ifdef CONFIG_MMU |
| 333 | /* Calculate exception vector offset = r5 << 2 */ | 333 | /* Calculate exception vector offset = r5 << 2 */ |
| 334 | addk r6, r5, r5; /* << 1 */ | 334 | addk r6, r5, r5; /* << 1 */ |
| 335 | addk r6, r6, r6; /* << 2 */ | 335 | addk r6, r6, r6; /* << 2 */ |
| 336 | 336 | ||
| 337 | #ifdef DEBUG | ||
| 337 | /* counting which exception happen */ | 338 | /* counting which exception happen */ |
| 338 | lwi r5, r0, 0x200 + TOPHYS(r0_ram) | 339 | lwi r5, r0, 0x200 + TOPHYS(r0_ram) |
| 339 | addi r5, r5, 1 | 340 | addi r5, r5, 1 |
| @@ -341,6 +342,7 @@ not_in_delay_slot: | |||
| 341 | lwi r5, r6, 0x200 + TOPHYS(r0_ram) | 342 | lwi r5, r6, 0x200 + TOPHYS(r0_ram) |
| 342 | addi r5, r5, 1 | 343 | addi r5, r5, 1 |
| 343 | swi r5, r6, 0x200 + TOPHYS(r0_ram) | 344 | swi r5, r6, 0x200 + TOPHYS(r0_ram) |
| 345 | #endif | ||
| 344 | /* end */ | 346 | /* end */ |
| 345 | /* Load the HW Exception vector */ | 347 | /* Load the HW Exception vector */ |
| 346 | lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) | 348 | lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) |
| @@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
| 376 | swi r18, r1, PT_R18 | 378 | swi r18, r1, PT_R18 |
| 377 | 379 | ||
| 378 | or r5, r1, r0 | 380 | or r5, r1, r0 |
| 379 | andi r6, r3, 0x1F; /* Load ESR[EC] */ | 381 | andi r6, r4, 0x1F; /* Load ESR[EC] */ |
| 380 | lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ | 382 | lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ |
| 381 | swi r7, r1, PT_MODE | 383 | swi r7, r1, PT_MODE |
| 382 | mfs r7, rfsr | 384 | mfs r7, rfsr |
| @@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
| 426 | */ | 428 | */ |
| 427 | handle_unaligned_ex: | 429 | handle_unaligned_ex: |
| 428 | /* Working registers already saved: R3, R4, R5, R6 | 430 | /* Working registers already saved: R3, R4, R5, R6 |
| 429 | * R3 = ESR | 431 | * R4 = ESR |
| 430 | * R4 = EAR | 432 | * R3 = EAR |
| 431 | */ | 433 | */ |
| 432 | #ifdef CONFIG_MMU | 434 | #ifdef CONFIG_MMU |
| 433 | andi r6, r3, 0x1000 /* Check ESR[DS] */ | 435 | andi r6, r4, 0x1000 /* Check ESR[DS] */ |
| 434 | beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ | 436 | beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ |
| 435 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ | 437 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ |
| 436 | nop | 438 | nop |
| @@ -439,7 +441,7 @@ _no_delayslot: | |||
| 439 | RESTORE_STATE; | 441 | RESTORE_STATE; |
| 440 | bri unaligned_data_trap | 442 | bri unaligned_data_trap |
| 441 | #endif | 443 | #endif |
| 442 | andi r6, r3, 0x3E0; /* Mask and extract the register operand */ | 444 | andi r6, r4, 0x3E0; /* Mask and extract the register operand */ |
| 443 | srl r6, r6; /* r6 >> 5 */ | 445 | srl r6, r6; /* r6 >> 5 */ |
| 444 | srl r6, r6; | 446 | srl r6, r6; |
| 445 | srl r6, r6; | 447 | srl r6, r6; |
| @@ -448,33 +450,33 @@ _no_delayslot: | |||
| 448 | /* Store the register operand in a temporary location */ | 450 | /* Store the register operand in a temporary location */ |
| 449 | sbi r6, r0, TOPHYS(ex_reg_op); | 451 | sbi r6, r0, TOPHYS(ex_reg_op); |
| 450 | 452 | ||
| 451 | andi r6, r3, 0x400; /* Extract ESR[S] */ | 453 | andi r6, r4, 0x400; /* Extract ESR[S] */ |
| 452 | bnei r6, ex_sw; | 454 | bnei r6, ex_sw; |
| 453 | ex_lw: | 455 | ex_lw: |
| 454 | andi r6, r3, 0x800; /* Extract ESR[W] */ | 456 | andi r6, r4, 0x800; /* Extract ESR[W] */ |
| 455 | beqi r6, ex_lhw; | 457 | beqi r6, ex_lhw; |
| 456 | lbui r5, r4, 0; /* Exception address in r4 */ | 458 | lbui r5, r3, 0; /* Exception address in r3 */ |
| 457 | /* Load a word, byte-by-byte from destination address | 459 | /* Load a word, byte-by-byte from destination address |
| 458 | and save it in tmp space */ | 460 | and save it in tmp space */ |
| 459 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); | 461 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); |
| 460 | lbui r5, r4, 1; | 462 | lbui r5, r3, 1; |
| 461 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); | 463 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); |
| 462 | lbui r5, r4, 2; | 464 | lbui r5, r3, 2; |
| 463 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); | 465 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); |
| 464 | lbui r5, r4, 3; | 466 | lbui r5, r3, 3; |
| 465 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); | 467 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); |
| 466 | /* Get the destination register value into r3 */ | 468 | /* Get the destination register value into r4 */ |
| 467 | lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); | 469 | lwi r4, r0, TOPHYS(ex_tmp_data_loc_0); |
| 468 | bri ex_lw_tail; | 470 | bri ex_lw_tail; |
| 469 | ex_lhw: | 471 | ex_lhw: |
| 470 | lbui r5, r4, 0; /* Exception address in r4 */ | 472 | lbui r5, r3, 0; /* Exception address in r3 */ |
| 471 | /* Load a half-word, byte-by-byte from destination | 473 | /* Load a half-word, byte-by-byte from destination |
| 472 | address and save it in tmp space */ | 474 | address and save it in tmp space */ |
| 473 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); | 475 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); |
| 474 | lbui r5, r4, 1; | 476 | lbui r5, r3, 1; |
| 475 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); | 477 | sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); |
| 476 | /* Get the destination register value into r3 */ | 478 | /* Get the destination register value into r4 */ |
| 477 | lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); | 479 | lhui r4, r0, TOPHYS(ex_tmp_data_loc_0); |
| 478 | ex_lw_tail: | 480 | ex_lw_tail: |
| 479 | /* Get the destination register number into r5 */ | 481 | /* Get the destination register number into r5 */ |
| 480 | lbui r5, r0, TOPHYS(ex_reg_op); | 482 | lbui r5, r0, TOPHYS(ex_reg_op); |
| @@ -502,25 +504,25 @@ ex_sw_tail: | |||
| 502 | andi r6, r6, 0x800; /* Extract ESR[W] */ | 504 | andi r6, r6, 0x800; /* Extract ESR[W] */ |
| 503 | beqi r6, ex_shw; | 505 | beqi r6, ex_shw; |
| 504 | /* Get the word - delay slot */ | 506 | /* Get the word - delay slot */ |
| 505 | swi r3, r0, TOPHYS(ex_tmp_data_loc_0); | 507 | swi r4, r0, TOPHYS(ex_tmp_data_loc_0); |
| 506 | /* Store the word, byte-by-byte into destination address */ | 508 | /* Store the word, byte-by-byte into destination address */ |
| 507 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); | 509 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_0); |
| 508 | sbi r3, r4, 0; | 510 | sbi r4, r3, 0; |
| 509 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); | 511 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_1); |
| 510 | sbi r3, r4, 1; | 512 | sbi r4, r3, 1; |
| 511 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); | 513 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); |
| 512 | sbi r3, r4, 2; | 514 | sbi r4, r3, 2; |
| 513 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); | 515 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); |
| 514 | sbi r3, r4, 3; | 516 | sbi r4, r3, 3; |
| 515 | bri ex_handler_done; | 517 | bri ex_handler_done; |
| 516 | 518 | ||
| 517 | ex_shw: | 519 | ex_shw: |
| 518 | /* Store the lower half-word, byte-by-byte into destination address */ | 520 | /* Store the lower half-word, byte-by-byte into destination address */ |
| 519 | swi r3, r0, TOPHYS(ex_tmp_data_loc_0); | 521 | swi r4, r0, TOPHYS(ex_tmp_data_loc_0); |
| 520 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); | 522 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); |
| 521 | sbi r3, r4, 0; | 523 | sbi r4, r3, 0; |
| 522 | lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); | 524 | lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); |
| 523 | sbi r3, r4, 1; | 525 | sbi r4, r3, 1; |
| 524 | ex_sw_end: /* Exception handling of store word, ends. */ | 526 | ex_sw_end: /* Exception handling of store word, ends. */ |
| 525 | 527 | ||
| 526 | ex_handler_done: | 528 | ex_handler_done: |
| @@ -560,21 +562,16 @@ ex_handler_done: | |||
| 560 | */ | 562 | */ |
| 561 | mfs r11, rpid | 563 | mfs r11, rpid |
| 562 | nop | 564 | nop |
| 563 | bri 4 | ||
| 564 | mfs r3, rear /* Get faulting address */ | ||
| 565 | nop | ||
| 566 | /* If we are faulting a kernel address, we have to use the | 565 | /* If we are faulting a kernel address, we have to use the |
| 567 | * kernel page tables. | 566 | * kernel page tables. |
| 568 | */ | 567 | */ |
| 569 | ori r4, r0, CONFIG_KERNEL_START | 568 | ori r5, r0, CONFIG_KERNEL_START |
| 570 | cmpu r4, r3, r4 | 569 | cmpu r5, r3, r5 |
| 571 | bgti r4, ex3 | 570 | bgti r5, ex3 |
| 572 | /* First, check if it was a zone fault (which means a user | 571 | /* First, check if it was a zone fault (which means a user |
| 573 | * tried to access a kernel or read-protected page - always | 572 | * tried to access a kernel or read-protected page - always |
| 574 | * a SEGV). All other faults here must be stores, so no | 573 | * a SEGV). All other faults here must be stores, so no |
| 575 | * need to check ESR_S as well. */ | 574 | * need to check ESR_S as well. */ |
| 576 | mfs r4, resr | ||
| 577 | nop | ||
| 578 | andi r4, r4, 0x800 /* ESR_Z - zone protection */ | 575 | andi r4, r4, 0x800 /* ESR_Z - zone protection */ |
| 579 | bnei r4, ex2 | 576 | bnei r4, ex2 |
| 580 | 577 | ||
| @@ -589,8 +586,6 @@ ex_handler_done: | |||
| 589 | * tried to access a kernel or read-protected page - always | 586 | * tried to access a kernel or read-protected page - always |
| 590 | * a SEGV). All other faults here must be stores, so no | 587 | * a SEGV). All other faults here must be stores, so no |
| 591 | * need to check ESR_S as well. */ | 588 | * need to check ESR_S as well. */ |
| 592 | mfs r4, resr | ||
| 593 | nop | ||
| 594 | andi r4, r4, 0x800 /* ESR_Z */ | 589 | andi r4, r4, 0x800 /* ESR_Z */ |
| 595 | bnei r4, ex2 | 590 | bnei r4, ex2 |
| 596 | /* get current task address */ | 591 | /* get current task address */ |
| @@ -665,8 +660,6 @@ ex_handler_done: | |||
| 665 | * R3 = ESR | 660 | * R3 = ESR |
| 666 | */ | 661 | */ |
| 667 | 662 | ||
| 668 | mfs r3, rear /* Get faulting address */ | ||
| 669 | nop | ||
| 670 | RESTORE_STATE; | 663 | RESTORE_STATE; |
| 671 | bri page_fault_instr_trap | 664 | bri page_fault_instr_trap |
| 672 | 665 | ||
| @@ -677,18 +670,15 @@ ex_handler_done: | |||
| 677 | */ | 670 | */ |
| 678 | handle_data_tlb_miss_exception: | 671 | handle_data_tlb_miss_exception: |
| 679 | /* Working registers already saved: R3, R4, R5, R6 | 672 | /* Working registers already saved: R3, R4, R5, R6 |
| 680 | * R3 = ESR | 673 | * R3 = EAR, R4 = ESR |
| 681 | */ | 674 | */ |
| 682 | mfs r11, rpid | 675 | mfs r11, rpid |
| 683 | nop | 676 | nop |
| 684 | bri 4 | ||
| 685 | mfs r3, rear /* Get faulting address */ | ||
| 686 | nop | ||
| 687 | 677 | ||
| 688 | /* If we are faulting a kernel address, we have to use the | 678 | /* If we are faulting a kernel address, we have to use the |
| 689 | * kernel page tables. */ | 679 | * kernel page tables. */ |
| 690 | ori r4, r0, CONFIG_KERNEL_START | 680 | ori r6, r0, CONFIG_KERNEL_START |
| 691 | cmpu r4, r3, r4 | 681 | cmpu r4, r3, r6 |
| 692 | bgti r4, ex5 | 682 | bgti r4, ex5 |
| 693 | ori r4, r0, swapper_pg_dir | 683 | ori r4, r0, swapper_pg_dir |
| 694 | mts rpid, r0 /* TLB will have 0 TID */ | 684 | mts rpid, r0 /* TLB will have 0 TID */ |
| @@ -731,9 +721,8 @@ ex_handler_done: | |||
| 731 | * Many of these bits are software only. Bits we don't set | 721 | * Many of these bits are software only. Bits we don't set |
| 732 | * here we (properly should) assume have the appropriate value. | 722 | * here we (properly should) assume have the appropriate value. |
| 733 | */ | 723 | */ |
| 724 | brid finish_tlb_load | ||
| 734 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ | 725 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ |
| 735 | |||
| 736 | bri finish_tlb_load | ||
| 737 | ex7: | 726 | ex7: |
| 738 | /* The bailout. Restore registers to pre-exception conditions | 727 | /* The bailout. Restore registers to pre-exception conditions |
| 739 | * and call the heavyweights to help us out. | 728 | * and call the heavyweights to help us out. |
| @@ -754,9 +743,6 @@ ex_handler_done: | |||
| 754 | */ | 743 | */ |
| 755 | mfs r11, rpid | 744 | mfs r11, rpid |
| 756 | nop | 745 | nop |
| 757 | bri 4 | ||
| 758 | mfs r3, rear /* Get faulting address */ | ||
| 759 | nop | ||
| 760 | 746 | ||
| 761 | /* If we are faulting a kernel address, we have to use the | 747 | /* If we are faulting a kernel address, we have to use the |
| 762 | * kernel page tables. | 748 | * kernel page tables. |
| @@ -792,7 +778,7 @@ ex_handler_done: | |||
| 792 | lwi r4, r5, 0 /* Get Linux PTE */ | 778 | lwi r4, r5, 0 /* Get Linux PTE */ |
| 793 | 779 | ||
| 794 | andi r6, r4, _PAGE_PRESENT | 780 | andi r6, r4, _PAGE_PRESENT |
| 795 | beqi r6, ex7 | 781 | beqi r6, ex10 |
| 796 | 782 | ||
| 797 | ori r4, r4, _PAGE_ACCESSED | 783 | ori r4, r4, _PAGE_ACCESSED |
| 798 | swi r4, r5, 0 | 784 | swi r4, r5, 0 |
| @@ -805,9 +791,8 @@ ex_handler_done: | |||
| 805 | * Many of these bits are software only. Bits we don't set | 791 | * Many of these bits are software only. Bits we don't set |
| 806 | * here we (properly should) assume have the appropriate value. | 792 | * here we (properly should) assume have the appropriate value. |
| 807 | */ | 793 | */ |
| 794 | brid finish_tlb_load | ||
| 808 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ | 795 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ |
| 809 | |||
| 810 | bri finish_tlb_load | ||
| 811 | ex10: | 796 | ex10: |
| 812 | /* The bailout. Restore registers to pre-exception conditions | 797 | /* The bailout. Restore registers to pre-exception conditions |
| 813 | * and call the heavyweights to help us out. | 798 | * and call the heavyweights to help us out. |
| @@ -837,9 +822,9 @@ ex_handler_done: | |||
| 837 | andi r5, r5, (MICROBLAZE_TLB_SIZE-1) | 822 | andi r5, r5, (MICROBLAZE_TLB_SIZE-1) |
| 838 | ori r6, r0, 1 | 823 | ori r6, r0, 1 |
| 839 | cmp r31, r5, r6 | 824 | cmp r31, r5, r6 |
| 840 | blti r31, sem | 825 | blti r31, ex12 |
| 841 | addik r5, r6, 1 | 826 | addik r5, r6, 1 |
| 842 | sem: | 827 | ex12: |
| 843 | /* MS: save back current TLB index */ | 828 | /* MS: save back current TLB index */ |
| 844 | swi r5, r0, TOPHYS(tlb_index) | 829 | swi r5, r0, TOPHYS(tlb_index) |
| 845 | 830 | ||
| @@ -859,7 +844,6 @@ ex_handler_done: | |||
| 859 | nop | 844 | nop |
| 860 | 845 | ||
| 861 | /* Done...restore registers and get out of here. */ | 846 | /* Done...restore registers and get out of here. */ |
| 862 | ex12: | ||
| 863 | mts rpid, r11 | 847 | mts rpid, r11 |
| 864 | nop | 848 | nop |
| 865 | bri 4 | 849 | bri 4 |
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index 6eea6f92b84e..03172c1da770 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c | |||
| @@ -42,8 +42,16 @@ unsigned int nr_irq; | |||
| 42 | 42 | ||
| 43 | static void intc_enable_or_unmask(unsigned int irq) | 43 | static void intc_enable_or_unmask(unsigned int irq) |
| 44 | { | 44 | { |
| 45 | unsigned long mask = 1 << irq; | ||
| 45 | pr_debug("enable_or_unmask: %d\n", irq); | 46 | pr_debug("enable_or_unmask: %d\n", irq); |
| 46 | out_be32(INTC_BASE + SIE, 1 << irq); | 47 | out_be32(INTC_BASE + SIE, mask); |
| 48 | |||
| 49 | /* ack level irqs because they can't be acked during | ||
| 50 | * ack function since the handle_level_irq function | ||
| 51 | * acks the irq before calling the interrupt handler | ||
| 52 | */ | ||
| 53 | if (irq_desc[irq].status & IRQ_LEVEL) | ||
| 54 | out_be32(INTC_BASE + IAR, mask); | ||
| 47 | } | 55 | } |
| 48 | 56 | ||
| 49 | static void intc_disable_or_mask(unsigned int irq) | 57 | static void intc_disable_or_mask(unsigned int irq) |
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 7d5ddd62d4d2..8f120aca123d 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| 12 | #include <linux/ftrace.h> | ||
| 12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 13 | #include <linux/hardirq.h> | 14 | #include <linux/hardirq.h> |
| 14 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
| @@ -32,7 +33,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |||
| 32 | 33 | ||
| 33 | static u32 concurrent_irq; | 34 | static u32 concurrent_irq; |
| 34 | 35 | ||
| 35 | void do_IRQ(struct pt_regs *regs) | 36 | void __irq_entry do_IRQ(struct pt_regs *regs) |
| 36 | { | 37 | { |
| 37 | unsigned int irq; | 38 | unsigned int irq; |
| 38 | struct pt_regs *old_regs = set_irq_regs(regs); | 39 | struct pt_regs *old_regs = set_irq_regs(regs); |
| @@ -68,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 68 | } | 69 | } |
| 69 | 70 | ||
| 70 | if (i < nr_irq) { | 71 | if (i < nr_irq) { |
| 71 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 72 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
| 72 | action = irq_desc[i].action; | 73 | action = irq_desc[i].action; |
| 73 | if (!action) | 74 | if (!action) |
| 74 | goto skip; | 75 | goto skip; |
| @@ -89,7 +90,22 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 89 | 90 | ||
| 90 | seq_putc(p, '\n'); | 91 | seq_putc(p, '\n'); |
| 91 | skip: | 92 | skip: |
| 92 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 93 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
| 93 | } | 94 | } |
| 94 | return 0; | 95 | return 0; |
| 95 | } | 96 | } |
| 97 | |||
| 98 | /* MS: There is no any advance mapping mechanism. We are using simple 32bit | ||
| 99 | intc without any cascades or any connection that's why mapping is 1:1 */ | ||
| 100 | unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) | ||
| 101 | { | ||
| 102 | return hwirq; | ||
| 103 | } | ||
| 104 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
| 105 | |||
| 106 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
| 107 | u32 *intspec, unsigned int intsize) | ||
| 108 | { | ||
| 109 | return intspec[0]; | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S new file mode 100644 index 000000000000..e7eaa7a8cbd3 --- /dev/null +++ b/arch/microblaze/kernel/mcount.S | |||
| @@ -0,0 +1,170 @@ | |||
| 1 | /* | ||
| 2 | * Low-level ftrace handling | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> | ||
| 5 | * Copyright (C) 2009 PetaLogix | ||
| 6 | * | ||
| 7 | * This file is subject to the terms and conditions of the GNU General | ||
| 8 | * Public License. See the file COPYING in the main directory of this | ||
| 9 | * archive for more details. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/linkage.h> | ||
| 13 | |||
| 14 | #define NOALIGN_ENTRY(name) .globl name; name: | ||
| 15 | |||
| 16 | /* FIXME MS: I think that I don't need to save all regs */ | ||
| 17 | #define SAVE_REGS \ | ||
| 18 | addik r1, r1, -120; \ | ||
| 19 | swi r2, r1, 4; \ | ||
| 20 | swi r3, r1, 8; \ | ||
| 21 | swi r4, r1, 12; \ | ||
| 22 | swi r5, r1, 116; \ | ||
| 23 | swi r6, r1, 16; \ | ||
| 24 | swi r7, r1, 20; \ | ||
| 25 | swi r8, r1, 24; \ | ||
| 26 | swi r9, r1, 28; \ | ||
| 27 | swi r10, r1, 32; \ | ||
| 28 | swi r11, r1, 36; \ | ||
| 29 | swi r12, r1, 40; \ | ||
| 30 | swi r13, r1, 44; \ | ||
| 31 | swi r14, r1, 48; \ | ||
| 32 | swi r16, r1, 52; \ | ||
| 33 | swi r17, r1, 56; \ | ||
| 34 | swi r18, r1, 60; \ | ||
| 35 | swi r19, r1, 64; \ | ||
| 36 | swi r20, r1, 68; \ | ||
| 37 | swi r21, r1, 72; \ | ||
| 38 | swi r22, r1, 76; \ | ||
| 39 | swi r23, r1, 80; \ | ||
| 40 | swi r24, r1, 84; \ | ||
| 41 | swi r25, r1, 88; \ | ||
| 42 | swi r26, r1, 92; \ | ||
| 43 | swi r27, r1, 96; \ | ||
| 44 | swi r28, r1, 100; \ | ||
| 45 | swi r29, r1, 104; \ | ||
| 46 | swi r30, r1, 108; \ | ||
| 47 | swi r31, r1, 112; | ||
| 48 | |||
| 49 | #define RESTORE_REGS \ | ||
| 50 | lwi r2, r1, 4; \ | ||
| 51 | lwi r3, r1, 8; \ | ||
| 52 | lwi r4, r1, 12; \ | ||
| 53 | lwi r5, r1, 116; \ | ||
| 54 | lwi r6, r1, 16; \ | ||
| 55 | lwi r7, r1, 20; \ | ||
| 56 | lwi r8, r1, 24; \ | ||
| 57 | lwi r9, r1, 28; \ | ||
| 58 | lwi r10, r1, 32; \ | ||
| 59 | lwi r11, r1, 36; \ | ||
| 60 | lwi r12, r1, 40; \ | ||
| 61 | lwi r13, r1, 44; \ | ||
| 62 | lwi r14, r1, 48; \ | ||
| 63 | lwi r16, r1, 52; \ | ||
| 64 | lwi r17, r1, 56; \ | ||
| 65 | lwi r18, r1, 60; \ | ||
| 66 | lwi r19, r1, 64; \ | ||
| 67 | lwi r20, r1, 68; \ | ||
| 68 | lwi r21, r1, 72; \ | ||
| 69 | lwi r22, r1, 76; \ | ||
| 70 | lwi r23, r1, 80; \ | ||
| 71 | lwi r24, r1, 84; \ | ||
| 72 | lwi r25, r1, 88; \ | ||
| 73 | lwi r26, r1, 92; \ | ||
| 74 | lwi r27, r1, 96; \ | ||
| 75 | lwi r28, r1, 100; \ | ||
| 76 | lwi r29, r1, 104; \ | ||
| 77 | lwi r30, r1, 108; \ | ||
| 78 | lwi r31, r1, 112; \ | ||
| 79 | addik r1, r1, 120; | ||
| 80 | |||
| 81 | ENTRY(ftrace_stub) | ||
| 82 | rtsd r15, 8; | ||
| 83 | nop; | ||
| 84 | |||
| 85 | ENTRY(_mcount) | ||
| 86 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 87 | ENTRY(ftrace_caller) | ||
| 88 | /* MS: It is just barrier which is removed from C code */ | ||
| 89 | rtsd r15, 8 | ||
| 90 | nop | ||
| 91 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
| 92 | SAVE_REGS | ||
| 93 | swi r15, r1, 0; | ||
| 94 | /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */ | ||
| 95 | lwi r5, r0, function_trace_stop; | ||
| 96 | bneid r5, end; | ||
| 97 | nop; | ||
| 98 | /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */ | ||
| 99 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 100 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
| 101 | lwi r5, r0, ftrace_graph_return; | ||
| 102 | addik r6, r0, ftrace_stub; /* asm implementation */ | ||
| 103 | cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */ | ||
| 104 | beqid r5, end_graph_tracer; | ||
| 105 | nop; | ||
| 106 | |||
| 107 | lwi r6, r0, ftrace_graph_entry; | ||
| 108 | addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */ | ||
| 109 | cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */ | ||
| 110 | beqid r5, end_graph_tracer; | ||
| 111 | nop; | ||
| 112 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
| 113 | NOALIGN_ENTRY(ftrace_call_graph) | ||
| 114 | /* MS: jump over graph function - replaced from C code */ | ||
| 115 | bri end_graph_tracer | ||
| 116 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
| 117 | addik r5, r1, 120; /* MS: load parent addr */ | ||
| 118 | addik r6, r15, 0; /* MS: load current function addr */ | ||
| 119 | bralid r15, prepare_ftrace_return; | ||
| 120 | nop; | ||
| 121 | /* MS: graph was taken that's why - can jump over function trace */ | ||
| 122 | brid end; | ||
| 123 | nop; | ||
| 124 | end_graph_tracer: | ||
| 125 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
| 126 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
| 127 | /* MS: test function trace if is taken or not */ | ||
| 128 | lwi r20, r0, ftrace_trace_function; | ||
| 129 | addik r6, r0, ftrace_stub; | ||
| 130 | cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */ | ||
| 131 | beqid r5, end; /* MS: not taken -> jump over */ | ||
| 132 | nop; | ||
| 133 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
| 134 | NOALIGN_ENTRY(ftrace_call) | ||
| 135 | /* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */ | ||
| 136 | nop | ||
| 137 | nop | ||
| 138 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
| 139 | /* static normal trace */ | ||
| 140 | lwi r6, r1, 120; /* MS: load parent addr */ | ||
| 141 | addik r5, r15, 0; /* MS: load current function addr */ | ||
| 142 | /* MS: here is dependency on previous code */ | ||
| 143 | brald r15, r20; /* MS: jump to ftrace handler */ | ||
| 144 | nop; | ||
| 145 | end: | ||
| 146 | lwi r15, r1, 0; | ||
| 147 | RESTORE_REGS | ||
| 148 | |||
| 149 | rtsd r15, 8; /* MS: jump back */ | ||
| 150 | nop; | ||
| 151 | |||
| 152 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 153 | ENTRY(return_to_handler) | ||
| 154 | nop; /* MS: just barrier for rtsd r15, 8 */ | ||
| 155 | nop; | ||
| 156 | SAVE_REGS | ||
| 157 | swi r15, r1, 0; | ||
| 158 | |||
| 159 | /* MS: find out returning address */ | ||
| 160 | bralid r15, ftrace_return_to_handler; | ||
| 161 | nop; | ||
| 162 | |||
| 163 | /* MS: return value from ftrace_return_to_handler is my returning addr | ||
| 164 | * must be before restore regs because I have to restore r3 content */ | ||
| 165 | addik r15, r3, 0; | ||
| 166 | RESTORE_REGS | ||
| 167 | |||
| 168 | rtsd r15, 8; /* MS: jump back */ | ||
| 169 | nop; | ||
| 170 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index 59ff20e33e0c..ff85f7718035 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| 19 | #include <asm/page.h> | 19 | #include <asm/page.h> |
| 20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
| 21 | #include <linux/ftrace.h> | ||
| 21 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
| 22 | 23 | ||
| 23 | /* | 24 | /* |
| @@ -47,3 +48,18 @@ extern void __umodsi3(void); | |||
| 47 | EXPORT_SYMBOL(__umodsi3); | 48 | EXPORT_SYMBOL(__umodsi3); |
| 48 | extern char *_ebss; | 49 | extern char *_ebss; |
| 49 | EXPORT_SYMBOL_GPL(_ebss); | 50 | EXPORT_SYMBOL_GPL(_ebss); |
| 51 | #ifdef CONFIG_FUNCTION_TRACER | ||
| 52 | extern void _mcount(void); | ||
| 53 | EXPORT_SYMBOL(_mcount); | ||
| 54 | #endif | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Assembly functions that may be used (directly or indirectly) by modules | ||
| 58 | */ | ||
| 59 | EXPORT_SYMBOL(__copy_tofrom_user); | ||
| 60 | EXPORT_SYMBOL(__strncpy_user); | ||
| 61 | |||
| 62 | #ifdef CONFIG_OPT_LIB_ASM | ||
| 63 | EXPORT_SYMBOL(memcpy); | ||
| 64 | EXPORT_SYMBOL(memmove); | ||
| 65 | #endif | ||
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index df16c6287a8e..0fb5fc6c1fc2 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S | |||
| @@ -26,9 +26,10 @@ | |||
| 26 | * We avoid flushing the pinned 0, 1 and possibly 2 entries. | 26 | * We avoid flushing the pinned 0, 1 and possibly 2 entries. |
| 27 | */ | 27 | */ |
| 28 | .globl _tlbia; | 28 | .globl _tlbia; |
| 29 | .type _tlbia, @function | ||
| 29 | .align 4; | 30 | .align 4; |
| 30 | _tlbia: | 31 | _tlbia: |
| 31 | addik r12, r0, 63 /* flush all entries (63 - 3) */ | 32 | addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */ |
| 32 | /* isync */ | 33 | /* isync */ |
| 33 | _tlbia_1: | 34 | _tlbia_1: |
| 34 | mts rtlbx, r12 | 35 | mts rtlbx, r12 |
| @@ -41,11 +42,13 @@ _tlbia_1: | |||
| 41 | /* sync */ | 42 | /* sync */ |
| 42 | rtsd r15, 8 | 43 | rtsd r15, 8 |
| 43 | nop | 44 | nop |
| 45 | .size _tlbia, . - _tlbia | ||
| 44 | 46 | ||
| 45 | /* | 47 | /* |
| 46 | * Flush MMU TLB for a particular address (in r5) | 48 | * Flush MMU TLB for a particular address (in r5) |
| 47 | */ | 49 | */ |
| 48 | .globl _tlbie; | 50 | .globl _tlbie; |
| 51 | .type _tlbie, @function | ||
| 49 | .align 4; | 52 | .align 4; |
| 50 | _tlbie: | 53 | _tlbie: |
| 51 | mts rtlbsx, r5 /* look up the address in TLB */ | 54 | mts rtlbsx, r5 /* look up the address in TLB */ |
| @@ -59,17 +62,20 @@ _tlbie_1: | |||
| 59 | rtsd r15, 8 | 62 | rtsd r15, 8 |
| 60 | nop | 63 | nop |
| 61 | 64 | ||
| 65 | .size _tlbie, . - _tlbie | ||
| 66 | |||
| 62 | /* | 67 | /* |
| 63 | * Allocate TLB entry for early console | 68 | * Allocate TLB entry for early console |
| 64 | */ | 69 | */ |
| 65 | .globl early_console_reg_tlb_alloc; | 70 | .globl early_console_reg_tlb_alloc; |
| 71 | .type early_console_reg_tlb_alloc, @function | ||
| 66 | .align 4; | 72 | .align 4; |
| 67 | early_console_reg_tlb_alloc: | 73 | early_console_reg_tlb_alloc: |
| 68 | /* | 74 | /* |
| 69 | * Load a TLB entry for the UART, so that microblaze_progress() can use | 75 | * Load a TLB entry for the UART, so that microblaze_progress() can use |
| 70 | * the UARTs nice and early. We use a 4k real==virtual mapping. | 76 | * the UARTs nice and early. We use a 4k real==virtual mapping. |
| 71 | */ | 77 | */ |
| 72 | ori r4, r0, 63 | 78 | ori r4, r0, MICROBLAZE_TLB_SIZE - 1 |
| 73 | mts rtlbx, r4 /* TLB slot 2 */ | 79 | mts rtlbx, r4 /* TLB slot 2 */ |
| 74 | 80 | ||
| 75 | or r4,r5,r0 | 81 | or r4,r5,r0 |
| @@ -86,35 +92,4 @@ early_console_reg_tlb_alloc: | |||
| 86 | rtsd r15, 8 | 92 | rtsd r15, 8 |
| 87 | nop | 93 | nop |
| 88 | 94 | ||
| 89 | /* | 95 | .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc |
| 90 | * Copy a whole page (4096 bytes). | ||
| 91 | */ | ||
| 92 | #define COPY_16_BYTES \ | ||
| 93 | lwi r7, r6, 0; \ | ||
| 94 | lwi r8, r6, 4; \ | ||
| 95 | lwi r9, r6, 8; \ | ||
| 96 | lwi r10, r6, 12; \ | ||
| 97 | swi r7, r5, 0; \ | ||
| 98 | swi r8, r5, 4; \ | ||
| 99 | swi r9, r5, 8; \ | ||
| 100 | swi r10, r5, 12 | ||
| 101 | |||
| 102 | |||
| 103 | /* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/ | ||
| 104 | #define DCACHE_LINE_BYTES (4 * 4) | ||
| 105 | |||
| 106 | .globl copy_page; | ||
| 107 | .align 4; | ||
| 108 | copy_page: | ||
| 109 | ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 | ||
| 110 | _copy_page_loop: | ||
| 111 | COPY_16_BYTES | ||
| 112 | #if DCACHE_LINE_BYTES >= 32 | ||
| 113 | COPY_16_BYTES | ||
| 114 | #endif | ||
| 115 | addik r6, r6, DCACHE_LINE_BYTES | ||
| 116 | addik r5, r5, DCACHE_LINE_BYTES | ||
| 117 | bneid r11, _copy_page_loop | ||
| 118 | addik r11, r11, -1 | ||
| 119 | rtsd r15, 8 | ||
| 120 | nop | ||
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c index 5a45b1adfef1..0e73f6606547 100644 --- a/arch/microblaze/kernel/module.c +++ b/arch/microblaze/kernel/module.c | |||
| @@ -12,11 +12,11 @@ | |||
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/elf.h> | 13 | #include <linux/elf.h> |
| 14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
| 15 | #include <linux/slab.h> | ||
| 16 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 17 | #include <linux/string.h> | 16 | #include <linux/string.h> |
| 18 | 17 | ||
| 19 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
| 19 | #include <asm/cacheflush.h> | ||
| 20 | 20 | ||
| 21 | void *module_alloc(unsigned long size) | 21 | void *module_alloc(unsigned long size) |
| 22 | { | 22 | { |
| @@ -152,6 +152,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
| 152 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | 152 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, |
| 153 | struct module *module) | 153 | struct module *module) |
| 154 | { | 154 | { |
| 155 | flush_dcache(); | ||
| 155 | return 0; | 156 | return 0; |
| 156 | } | 157 | } |
| 157 | 158 | ||
diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c index acf4574d0f18..0dc755286d38 100644 --- a/arch/microblaze/kernel/of_platform.c +++ b/arch/microblaze/kernel/of_platform.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/mod_devicetable.h> | 19 | #include <linux/mod_devicetable.h> |
| 20 | #include <linux/slab.h> | ||
| 21 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
| 22 | #include <linux/of.h> | 21 | #include <linux/of.h> |
| 23 | #include <linux/of_device.h> | 22 | #include <linux/of_device.h> |
| @@ -185,7 +184,7 @@ EXPORT_SYMBOL(of_find_device_by_node); | |||
| 185 | static int of_dev_phandle_match(struct device *dev, void *data) | 184 | static int of_dev_phandle_match(struct device *dev, void *data) |
| 186 | { | 185 | { |
| 187 | phandle *ph = data; | 186 | phandle *ph = data; |
| 188 | return to_of_device(dev)->node->linux_phandle == *ph; | 187 | return to_of_device(dev)->node->phandle == *ph; |
| 189 | } | 188 | } |
| 190 | 189 | ||
| 191 | struct of_device *of_find_device_by_phandle(phandle ph) | 190 | struct of_device *of_find_device_by_phandle(phandle ph) |
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index c592d475b3d8..09bed44dfcd3 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
| @@ -15,6 +15,8 @@ | |||
| 15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
| 16 | #include <asm/system.h> | 16 | #include <asm/system.h> |
| 17 | #include <asm/pgalloc.h> | 17 | #include <asm/pgalloc.h> |
| 18 | #include <asm/uaccess.h> /* for USER_DS macros */ | ||
| 19 | #include <asm/cacheflush.h> | ||
| 18 | 20 | ||
| 19 | void show_regs(struct pt_regs *regs) | 21 | void show_regs(struct pt_regs *regs) |
| 20 | { | 22 | { |
| @@ -73,7 +75,10 @@ __setup("hlt", hlt_setup); | |||
| 73 | 75 | ||
| 74 | void default_idle(void) | 76 | void default_idle(void) |
| 75 | { | 77 | { |
| 76 | if (!hlt_counter) { | 78 | if (likely(hlt_counter)) { |
| 79 | while (!need_resched()) | ||
| 80 | cpu_relax(); | ||
| 81 | } else { | ||
| 77 | clear_thread_flag(TIF_POLLING_NRFLAG); | 82 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 78 | smp_mb__after_clear_bit(); | 83 | smp_mb__after_clear_bit(); |
| 79 | local_irq_disable(); | 84 | local_irq_disable(); |
| @@ -81,9 +86,7 @@ void default_idle(void) | |||
| 81 | cpu_sleep(); | 86 | cpu_sleep(); |
| 82 | local_irq_enable(); | 87 | local_irq_enable(); |
| 83 | set_thread_flag(TIF_POLLING_NRFLAG); | 88 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 84 | } else | 89 | } |
| 85 | while (!need_resched()) | ||
| 86 | cpu_relax(); | ||
| 87 | } | 90 | } |
| 88 | 91 | ||
| 89 | void cpu_idle(void) | 92 | void cpu_idle(void) |
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index c005cc6f1aaf..a15ef6d67ca9 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c | |||
| @@ -42,697 +42,20 @@ | |||
| 42 | #include <asm/sections.h> | 42 | #include <asm/sections.h> |
| 43 | #include <asm/pci-bridge.h> | 43 | #include <asm/pci-bridge.h> |
| 44 | 44 | ||
| 45 | static int __initdata dt_root_addr_cells; | 45 | void __init early_init_dt_scan_chosen_arch(unsigned long node) |
| 46 | static int __initdata dt_root_size_cells; | ||
| 47 | |||
| 48 | typedef u32 cell_t; | ||
| 49 | |||
| 50 | static struct boot_param_header *initial_boot_params; | ||
| 51 | |||
| 52 | /* export that to outside world */ | ||
| 53 | struct device_node *of_chosen; | ||
| 54 | |||
| 55 | static inline char *find_flat_dt_string(u32 offset) | ||
| 56 | { | ||
| 57 | return ((char *)initial_boot_params) + | ||
| 58 | initial_boot_params->off_dt_strings + offset; | ||
| 59 | } | ||
| 60 | |||
| 61 | /** | ||
| 62 | * This function is used to scan the flattened device-tree, it is | ||
| 63 | * used to extract the memory informations at boot before we can | ||
| 64 | * unflatten the tree | ||
| 65 | */ | ||
| 66 | int __init of_scan_flat_dt(int (*it)(unsigned long node, | ||
| 67 | const char *uname, int depth, | ||
| 68 | void *data), | ||
| 69 | void *data) | ||
| 70 | { | ||
| 71 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
| 72 | initial_boot_params->off_dt_struct; | ||
| 73 | int rc = 0; | ||
| 74 | int depth = -1; | ||
| 75 | |||
| 76 | do { | ||
| 77 | u32 tag = *((u32 *)p); | ||
| 78 | char *pathp; | ||
| 79 | |||
| 80 | p += 4; | ||
| 81 | if (tag == OF_DT_END_NODE) { | ||
| 82 | depth--; | ||
| 83 | continue; | ||
| 84 | } | ||
| 85 | if (tag == OF_DT_NOP) | ||
| 86 | continue; | ||
| 87 | if (tag == OF_DT_END) | ||
| 88 | break; | ||
| 89 | if (tag == OF_DT_PROP) { | ||
| 90 | u32 sz = *((u32 *)p); | ||
| 91 | p += 8; | ||
| 92 | if (initial_boot_params->version < 0x10) | ||
| 93 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
| 94 | p += sz; | ||
| 95 | p = _ALIGN(p, 4); | ||
| 96 | continue; | ||
| 97 | } | ||
| 98 | if (tag != OF_DT_BEGIN_NODE) { | ||
| 99 | printk(KERN_WARNING "Invalid tag %x scanning flattened" | ||
| 100 | " device tree !\n", tag); | ||
| 101 | return -EINVAL; | ||
| 102 | } | ||
| 103 | depth++; | ||
| 104 | pathp = (char *)p; | ||
| 105 | p = _ALIGN(p + strlen(pathp) + 1, 4); | ||
| 106 | if ((*pathp) == '/') { | ||
| 107 | char *lp, *np; | ||
| 108 | for (lp = NULL, np = pathp; *np; np++) | ||
| 109 | if ((*np) == '/') | ||
| 110 | lp = np+1; | ||
| 111 | if (lp != NULL) | ||
| 112 | pathp = lp; | ||
| 113 | } | ||
| 114 | rc = it(p, pathp, depth, data); | ||
| 115 | if (rc != 0) | ||
| 116 | break; | ||
| 117 | } while (1); | ||
| 118 | |||
| 119 | return rc; | ||
| 120 | } | ||
| 121 | |||
| 122 | unsigned long __init of_get_flat_dt_root(void) | ||
| 123 | { | ||
| 124 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
| 125 | initial_boot_params->off_dt_struct; | ||
| 126 | |||
| 127 | while (*((u32 *)p) == OF_DT_NOP) | ||
| 128 | p += 4; | ||
| 129 | BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE); | ||
| 130 | p += 4; | ||
| 131 | return _ALIGN(p + strlen((char *)p) + 1, 4); | ||
| 132 | } | ||
| 133 | |||
| 134 | /** | ||
| 135 | * This function can be used within scan_flattened_dt callback to get | ||
| 136 | * access to properties | ||
| 137 | */ | ||
| 138 | void *__init of_get_flat_dt_prop(unsigned long node, const char *name, | ||
| 139 | unsigned long *size) | ||
| 140 | { | ||
| 141 | unsigned long p = node; | ||
| 142 | |||
| 143 | do { | ||
| 144 | u32 tag = *((u32 *)p); | ||
| 145 | u32 sz, noff; | ||
| 146 | const char *nstr; | ||
| 147 | |||
| 148 | p += 4; | ||
| 149 | if (tag == OF_DT_NOP) | ||
| 150 | continue; | ||
| 151 | if (tag != OF_DT_PROP) | ||
| 152 | return NULL; | ||
| 153 | |||
| 154 | sz = *((u32 *)p); | ||
| 155 | noff = *((u32 *)(p + 4)); | ||
| 156 | p += 8; | ||
| 157 | if (initial_boot_params->version < 0x10) | ||
| 158 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
| 159 | |||
| 160 | nstr = find_flat_dt_string(noff); | ||
| 161 | if (nstr == NULL) { | ||
| 162 | printk(KERN_WARNING "Can't find property index" | ||
| 163 | " name !\n"); | ||
| 164 | return NULL; | ||
| 165 | } | ||
| 166 | if (strcmp(name, nstr) == 0) { | ||
| 167 | if (size) | ||
| 168 | *size = sz; | ||
| 169 | return (void *)p; | ||
| 170 | } | ||
| 171 | p += sz; | ||
| 172 | p = _ALIGN(p, 4); | ||
| 173 | } while (1); | ||
| 174 | } | ||
| 175 | |||
| 176 | int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) | ||
| 177 | { | ||
| 178 | const char *cp; | ||
| 179 | unsigned long cplen, l; | ||
| 180 | |||
| 181 | cp = of_get_flat_dt_prop(node, "compatible", &cplen); | ||
| 182 | if (cp == NULL) | ||
| 183 | return 0; | ||
| 184 | while (cplen > 0) { | ||
| 185 | if (strncasecmp(cp, compat, strlen(compat)) == 0) | ||
| 186 | return 1; | ||
| 187 | l = strlen(cp) + 1; | ||
| 188 | cp += l; | ||
| 189 | cplen -= l; | ||
| 190 | } | ||
| 191 | |||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, | ||
| 196 | unsigned long align) | ||
| 197 | { | ||
| 198 | void *res; | ||
| 199 | |||
| 200 | *mem = _ALIGN(*mem, align); | ||
| 201 | res = (void *)*mem; | ||
| 202 | *mem += size; | ||
| 203 | |||
| 204 | return res; | ||
| 205 | } | ||
| 206 | |||
| 207 | static unsigned long __init unflatten_dt_node(unsigned long mem, | ||
| 208 | unsigned long *p, | ||
| 209 | struct device_node *dad, | ||
| 210 | struct device_node ***allnextpp, | ||
| 211 | unsigned long fpsize) | ||
| 212 | { | ||
| 213 | struct device_node *np; | ||
| 214 | struct property *pp, **prev_pp = NULL; | ||
| 215 | char *pathp; | ||
| 216 | u32 tag; | ||
| 217 | unsigned int l, allocl; | ||
| 218 | int has_name = 0; | ||
| 219 | int new_format = 0; | ||
| 220 | |||
| 221 | tag = *((u32 *)(*p)); | ||
| 222 | if (tag != OF_DT_BEGIN_NODE) { | ||
| 223 | printk("Weird tag at start of node: %x\n", tag); | ||
| 224 | return mem; | ||
| 225 | } | ||
| 226 | *p += 4; | ||
| 227 | pathp = (char *)*p; | ||
| 228 | l = allocl = strlen(pathp) + 1; | ||
| 229 | *p = _ALIGN(*p + l, 4); | ||
| 230 | |||
| 231 | /* version 0x10 has a more compact unit name here instead of the full | ||
| 232 | * path. we accumulate the full path size using "fpsize", we'll rebuild | ||
| 233 | * it later. We detect this because the first character of the name is | ||
| 234 | * not '/'. | ||
| 235 | */ | ||
| 236 | if ((*pathp) != '/') { | ||
| 237 | new_format = 1; | ||
| 238 | if (fpsize == 0) { | ||
| 239 | /* root node: special case. fpsize accounts for path | ||
| 240 | * plus terminating zero. root node only has '/', so | ||
| 241 | * fpsize should be 2, but we want to avoid the first | ||
| 242 | * level nodes to have two '/' so we use fpsize 1 here | ||
| 243 | */ | ||
| 244 | fpsize = 1; | ||
| 245 | allocl = 2; | ||
| 246 | } else { | ||
| 247 | /* account for '/' and path size minus terminal 0 | ||
| 248 | * already in 'l' | ||
| 249 | */ | ||
| 250 | fpsize += l; | ||
| 251 | allocl = fpsize; | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, | ||
| 256 | __alignof__(struct device_node)); | ||
| 257 | if (allnextpp) { | ||
| 258 | memset(np, 0, sizeof(*np)); | ||
| 259 | np->full_name = ((char *)np) + sizeof(struct device_node); | ||
| 260 | if (new_format) { | ||
| 261 | char *p2 = np->full_name; | ||
| 262 | /* rebuild full path for new format */ | ||
| 263 | if (dad && dad->parent) { | ||
| 264 | strcpy(p2, dad->full_name); | ||
| 265 | #ifdef DEBUG | ||
| 266 | if ((strlen(p2) + l + 1) != allocl) { | ||
| 267 | pr_debug("%s: p: %d, l: %d, a: %d\n", | ||
| 268 | pathp, (int)strlen(p2), | ||
| 269 | l, allocl); | ||
| 270 | } | ||
| 271 | #endif | ||
| 272 | p2 += strlen(p2); | ||
| 273 | } | ||
| 274 | *(p2++) = '/'; | ||
| 275 | memcpy(p2, pathp, l); | ||
| 276 | } else | ||
| 277 | memcpy(np->full_name, pathp, l); | ||
| 278 | prev_pp = &np->properties; | ||
| 279 | **allnextpp = np; | ||
| 280 | *allnextpp = &np->allnext; | ||
| 281 | if (dad != NULL) { | ||
| 282 | np->parent = dad; | ||
| 283 | /* we temporarily use the next field as `last_child'*/ | ||
| 284 | if (dad->next == NULL) | ||
| 285 | dad->child = np; | ||
| 286 | else | ||
| 287 | dad->next->sibling = np; | ||
| 288 | dad->next = np; | ||
| 289 | } | ||
| 290 | kref_init(&np->kref); | ||
| 291 | } | ||
| 292 | while (1) { | ||
| 293 | u32 sz, noff; | ||
| 294 | char *pname; | ||
| 295 | |||
| 296 | tag = *((u32 *)(*p)); | ||
| 297 | if (tag == OF_DT_NOP) { | ||
| 298 | *p += 4; | ||
| 299 | continue; | ||
| 300 | } | ||
| 301 | if (tag != OF_DT_PROP) | ||
| 302 | break; | ||
| 303 | *p += 4; | ||
| 304 | sz = *((u32 *)(*p)); | ||
| 305 | noff = *((u32 *)((*p) + 4)); | ||
| 306 | *p += 8; | ||
| 307 | if (initial_boot_params->version < 0x10) | ||
| 308 | *p = _ALIGN(*p, sz >= 8 ? 8 : 4); | ||
| 309 | |||
| 310 | pname = find_flat_dt_string(noff); | ||
| 311 | if (pname == NULL) { | ||
| 312 | printk(KERN_INFO | ||
| 313 | "Can't find property name in list !\n"); | ||
| 314 | break; | ||
| 315 | } | ||
| 316 | if (strcmp(pname, "name") == 0) | ||
| 317 | has_name = 1; | ||
| 318 | l = strlen(pname) + 1; | ||
| 319 | pp = unflatten_dt_alloc(&mem, sizeof(struct property), | ||
| 320 | __alignof__(struct property)); | ||
| 321 | if (allnextpp) { | ||
| 322 | if (strcmp(pname, "linux,phandle") == 0) { | ||
| 323 | np->node = *((u32 *)*p); | ||
| 324 | if (np->linux_phandle == 0) | ||
| 325 | np->linux_phandle = np->node; | ||
| 326 | } | ||
| 327 | if (strcmp(pname, "ibm,phandle") == 0) | ||
| 328 | np->linux_phandle = *((u32 *)*p); | ||
| 329 | pp->name = pname; | ||
| 330 | pp->length = sz; | ||
| 331 | pp->value = (void *)*p; | ||
| 332 | *prev_pp = pp; | ||
| 333 | prev_pp = &pp->next; | ||
| 334 | } | ||
| 335 | *p = _ALIGN((*p) + sz, 4); | ||
| 336 | } | ||
| 337 | /* with version 0x10 we may not have the name property, recreate | ||
| 338 | * it here from the unit name if absent | ||
| 339 | */ | ||
| 340 | if (!has_name) { | ||
| 341 | char *p1 = pathp, *ps = pathp, *pa = NULL; | ||
| 342 | int sz; | ||
| 343 | |||
| 344 | while (*p1) { | ||
| 345 | if ((*p1) == '@') | ||
| 346 | pa = p1; | ||
| 347 | if ((*p1) == '/') | ||
| 348 | ps = p1 + 1; | ||
| 349 | p1++; | ||
| 350 | } | ||
| 351 | if (pa < ps) | ||
| 352 | pa = p1; | ||
| 353 | sz = (pa - ps) + 1; | ||
| 354 | pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, | ||
| 355 | __alignof__(struct property)); | ||
| 356 | if (allnextpp) { | ||
| 357 | pp->name = "name"; | ||
| 358 | pp->length = sz; | ||
| 359 | pp->value = pp + 1; | ||
| 360 | *prev_pp = pp; | ||
| 361 | prev_pp = &pp->next; | ||
| 362 | memcpy(pp->value, ps, sz - 1); | ||
| 363 | ((char *)pp->value)[sz - 1] = 0; | ||
| 364 | pr_debug("fixed up name for %s -> %s\n", pathp, | ||
| 365 | (char *)pp->value); | ||
| 366 | } | ||
| 367 | } | ||
| 368 | if (allnextpp) { | ||
| 369 | *prev_pp = NULL; | ||
| 370 | np->name = of_get_property(np, "name", NULL); | ||
| 371 | np->type = of_get_property(np, "device_type", NULL); | ||
| 372 | |||
| 373 | if (!np->name) | ||
| 374 | np->name = "<NULL>"; | ||
| 375 | if (!np->type) | ||
| 376 | np->type = "<NULL>"; | ||
| 377 | } | ||
| 378 | while (tag == OF_DT_BEGIN_NODE) { | ||
| 379 | mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); | ||
| 380 | tag = *((u32 *)(*p)); | ||
| 381 | } | ||
| 382 | if (tag != OF_DT_END_NODE) { | ||
| 383 | printk(KERN_INFO "Weird tag at end of node: %x\n", tag); | ||
| 384 | return mem; | ||
| 385 | } | ||
| 386 | *p += 4; | ||
| 387 | return mem; | ||
| 388 | } | ||
| 389 | |||
| 390 | /** | ||
| 391 | * unflattens the device-tree passed by the firmware, creating the | ||
| 392 | * tree of struct device_node. It also fills the "name" and "type" | ||
| 393 | * pointers of the nodes so the normal device-tree walking functions | ||
| 394 | * can be used (this used to be done by finish_device_tree) | ||
| 395 | */ | ||
| 396 | void __init unflatten_device_tree(void) | ||
| 397 | { | ||
| 398 | unsigned long start, mem, size; | ||
| 399 | struct device_node **allnextp = &allnodes; | ||
| 400 | |||
| 401 | pr_debug(" -> unflatten_device_tree()\n"); | ||
| 402 | |||
| 403 | /* First pass, scan for size */ | ||
| 404 | start = ((unsigned long)initial_boot_params) + | ||
| 405 | initial_boot_params->off_dt_struct; | ||
| 406 | size = unflatten_dt_node(0, &start, NULL, NULL, 0); | ||
| 407 | size = (size | 3) + 1; | ||
| 408 | |||
| 409 | pr_debug(" size is %lx, allocating...\n", size); | ||
| 410 | |||
| 411 | /* Allocate memory for the expanded device tree */ | ||
| 412 | mem = lmb_alloc(size + 4, __alignof__(struct device_node)); | ||
| 413 | mem = (unsigned long) __va(mem); | ||
| 414 | |||
| 415 | ((u32 *)mem)[size / 4] = 0xdeadbeef; | ||
| 416 | |||
| 417 | pr_debug(" unflattening %lx...\n", mem); | ||
| 418 | |||
| 419 | /* Second pass, do actual unflattening */ | ||
| 420 | start = ((unsigned long)initial_boot_params) + | ||
| 421 | initial_boot_params->off_dt_struct; | ||
| 422 | unflatten_dt_node(mem, &start, NULL, &allnextp, 0); | ||
| 423 | if (*((u32 *)start) != OF_DT_END) | ||
| 424 | printk(KERN_WARNING "Weird tag at end of tree: %08x\n", | ||
| 425 | *((u32 *)start)); | ||
| 426 | if (((u32 *)mem)[size / 4] != 0xdeadbeef) | ||
| 427 | printk(KERN_WARNING "End of tree marker overwritten: %08x\n", | ||
| 428 | ((u32 *)mem)[size / 4]); | ||
| 429 | *allnextp = NULL; | ||
| 430 | |||
| 431 | /* Get pointer to OF "/chosen" node for use everywhere */ | ||
| 432 | of_chosen = of_find_node_by_path("/chosen"); | ||
| 433 | if (of_chosen == NULL) | ||
| 434 | of_chosen = of_find_node_by_path("/chosen@0"); | ||
| 435 | |||
| 436 | pr_debug(" <- unflatten_device_tree()\n"); | ||
| 437 | } | ||
| 438 | |||
| 439 | #define early_init_dt_scan_drconf_memory(node) 0 | ||
| 440 | |||
| 441 | static int __init early_init_dt_scan_cpus(unsigned long node, | ||
| 442 | const char *uname, int depth, | ||
| 443 | void *data) | ||
| 444 | { | ||
| 445 | static int logical_cpuid; | ||
| 446 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); | ||
| 447 | const u32 *intserv; | ||
| 448 | int i, nthreads; | ||
| 449 | int found = 0; | ||
| 450 | |||
| 451 | /* We are scanning "cpu" nodes only */ | ||
| 452 | if (type == NULL || strcmp(type, "cpu") != 0) | ||
| 453 | return 0; | ||
| 454 | |||
| 455 | /* Get physical cpuid */ | ||
| 456 | intserv = of_get_flat_dt_prop(node, "reg", NULL); | ||
| 457 | nthreads = 1; | ||
| 458 | |||
| 459 | /* | ||
| 460 | * Now see if any of these threads match our boot cpu. | ||
| 461 | * NOTE: This must match the parsing done in smp_setup_cpu_maps. | ||
| 462 | */ | ||
| 463 | for (i = 0; i < nthreads; i++) { | ||
| 464 | /* | ||
| 465 | * version 2 of the kexec param format adds the phys cpuid of | ||
| 466 | * booted proc. | ||
| 467 | */ | ||
| 468 | if (initial_boot_params && initial_boot_params->version >= 2) { | ||
| 469 | if (intserv[i] == | ||
| 470 | initial_boot_params->boot_cpuid_phys) { | ||
| 471 | found = 1; | ||
| 472 | break; | ||
| 473 | } | ||
| 474 | } else { | ||
| 475 | /* | ||
| 476 | * Check if it's the boot-cpu, set it's hw index now, | ||
| 477 | * unfortunately this format did not support booting | ||
| 478 | * off secondary threads. | ||
| 479 | */ | ||
| 480 | if (of_get_flat_dt_prop(node, | ||
| 481 | "linux,boot-cpu", NULL) != NULL) { | ||
| 482 | found = 1; | ||
| 483 | break; | ||
| 484 | } | ||
| 485 | } | ||
| 486 | |||
| 487 | #ifdef CONFIG_SMP | ||
| 488 | /* logical cpu id is always 0 on UP kernels */ | ||
| 489 | logical_cpuid++; | ||
| 490 | #endif | ||
| 491 | } | ||
| 492 | |||
| 493 | if (found) { | ||
| 494 | pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid, | ||
| 495 | intserv[i]); | ||
| 496 | boot_cpuid = logical_cpuid; | ||
| 497 | } | ||
| 498 | |||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 503 | static void __init early_init_dt_check_for_initrd(unsigned long node) | ||
| 504 | { | ||
| 505 | unsigned long l; | ||
| 506 | u32 *prop; | ||
| 507 | |||
| 508 | pr_debug("Looking for initrd properties... "); | ||
| 509 | |||
| 510 | prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); | ||
| 511 | if (prop) { | ||
| 512 | initrd_start = (unsigned long) | ||
| 513 | __va((u32)of_read_ulong(prop, l/4)); | ||
| 514 | |||
| 515 | prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); | ||
| 516 | if (prop) { | ||
| 517 | initrd_end = (unsigned long) | ||
| 518 | __va((u32)of_read_ulong(prop, 1/4)); | ||
| 519 | initrd_below_start_ok = 1; | ||
| 520 | } else { | ||
| 521 | initrd_start = 0; | ||
| 522 | } | ||
| 523 | } | ||
| 524 | |||
| 525 | pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", | ||
| 526 | initrd_start, initrd_end); | ||
| 527 | } | ||
| 528 | #else | ||
| 529 | static inline void early_init_dt_check_for_initrd(unsigned long node) | ||
| 530 | { | ||
| 531 | } | ||
| 532 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
| 533 | |||
| 534 | static int __init early_init_dt_scan_chosen(unsigned long node, | ||
| 535 | const char *uname, int depth, void *data) | ||
| 536 | { | ||
| 537 | unsigned long l; | ||
| 538 | char *p; | ||
| 539 | |||
| 540 | pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); | ||
| 541 | |||
| 542 | if (depth != 1 || | ||
| 543 | (strcmp(uname, "chosen") != 0 && | ||
| 544 | strcmp(uname, "chosen@0") != 0)) | ||
| 545 | return 0; | ||
| 546 | |||
| 547 | #ifdef CONFIG_KEXEC | ||
| 548 | lprop = (u64 *)of_get_flat_dt_prop(node, | ||
| 549 | "linux,crashkernel-base", NULL); | ||
| 550 | if (lprop) | ||
| 551 | crashk_res.start = *lprop; | ||
| 552 | |||
| 553 | lprop = (u64 *)of_get_flat_dt_prop(node, | ||
| 554 | "linux,crashkernel-size", NULL); | ||
| 555 | if (lprop) | ||
| 556 | crashk_res.end = crashk_res.start + *lprop - 1; | ||
| 557 | #endif | ||
| 558 | |||
| 559 | early_init_dt_check_for_initrd(node); | ||
| 560 | |||
| 561 | /* Retreive command line */ | ||
| 562 | p = of_get_flat_dt_prop(node, "bootargs", &l); | ||
| 563 | if (p != NULL && l > 0) | ||
| 564 | strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); | ||
| 565 | |||
| 566 | #ifdef CONFIG_CMDLINE | ||
| 567 | #ifndef CONFIG_CMDLINE_FORCE | ||
| 568 | if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) | ||
| 569 | #endif | ||
| 570 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
| 571 | #endif /* CONFIG_CMDLINE */ | ||
| 572 | |||
| 573 | pr_debug("Command line is: %s\n", cmd_line); | ||
| 574 | |||
| 575 | /* break now */ | ||
| 576 | return 1; | ||
| 577 | } | ||
| 578 | |||
| 579 | static int __init early_init_dt_scan_root(unsigned long node, | ||
| 580 | const char *uname, int depth, void *data) | ||
| 581 | { | ||
| 582 | u32 *prop; | ||
| 583 | |||
| 584 | if (depth != 0) | ||
| 585 | return 0; | ||
| 586 | |||
| 587 | prop = of_get_flat_dt_prop(node, "#size-cells", NULL); | ||
| 588 | dt_root_size_cells = (prop == NULL) ? 1 : *prop; | ||
| 589 | pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); | ||
| 590 | |||
| 591 | prop = of_get_flat_dt_prop(node, "#address-cells", NULL); | ||
| 592 | dt_root_addr_cells = (prop == NULL) ? 2 : *prop; | ||
| 593 | pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); | ||
| 594 | |||
| 595 | /* break now */ | ||
| 596 | return 1; | ||
| 597 | } | ||
| 598 | |||
| 599 | static u64 __init dt_mem_next_cell(int s, cell_t **cellp) | ||
| 600 | { | ||
| 601 | cell_t *p = *cellp; | ||
| 602 | |||
| 603 | *cellp = p + s; | ||
| 604 | return of_read_number(p, s); | ||
| 605 | } | ||
| 606 | |||
| 607 | static int __init early_init_dt_scan_memory(unsigned long node, | ||
| 608 | const char *uname, int depth, void *data) | ||
| 609 | { | 46 | { |
| 610 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); | 47 | /* No Microblaze specific code here */ |
| 611 | cell_t *reg, *endp; | ||
| 612 | unsigned long l; | ||
| 613 | |||
| 614 | /* Look for the ibm,dynamic-reconfiguration-memory node */ | ||
| 615 | /* if (depth == 1 && | ||
| 616 | strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) | ||
| 617 | return early_init_dt_scan_drconf_memory(node); | ||
| 618 | */ | ||
| 619 | /* We are scanning "memory" nodes only */ | ||
| 620 | if (type == NULL) { | ||
| 621 | /* | ||
| 622 | * The longtrail doesn't have a device_type on the | ||
| 623 | * /memory node, so look for the node called /memory@0. | ||
| 624 | */ | ||
| 625 | if (depth != 1 || strcmp(uname, "memory@0") != 0) | ||
| 626 | return 0; | ||
| 627 | } else if (strcmp(type, "memory") != 0) | ||
| 628 | return 0; | ||
| 629 | |||
| 630 | reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); | ||
| 631 | if (reg == NULL) | ||
| 632 | reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); | ||
| 633 | if (reg == NULL) | ||
| 634 | return 0; | ||
| 635 | |||
| 636 | endp = reg + (l / sizeof(cell_t)); | ||
| 637 | |||
| 638 | pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", | ||
| 639 | uname, l, reg[0], reg[1], reg[2], reg[3]); | ||
| 640 | |||
| 641 | while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { | ||
| 642 | u64 base, size; | ||
| 643 | |||
| 644 | base = dt_mem_next_cell(dt_root_addr_cells, ®); | ||
| 645 | size = dt_mem_next_cell(dt_root_size_cells, ®); | ||
| 646 | |||
| 647 | if (size == 0) | ||
| 648 | continue; | ||
| 649 | pr_debug(" - %llx , %llx\n", (unsigned long long)base, | ||
| 650 | (unsigned long long)size); | ||
| 651 | |||
| 652 | lmb_add(base, size); | ||
| 653 | } | ||
| 654 | return 0; | ||
| 655 | } | 48 | } |
| 656 | 49 | ||
| 657 | #ifdef CONFIG_PHYP_DUMP | 50 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
| 658 | /** | ||
| 659 | * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg | ||
| 660 | * | ||
| 661 | * Function to find the largest size we need to reserve | ||
| 662 | * during early boot process. | ||
| 663 | * | ||
| 664 | * It either looks for boot param and returns that OR | ||
| 665 | * returns larger of 256 or 5% rounded down to multiples of 256MB. | ||
| 666 | * | ||
| 667 | */ | ||
| 668 | static inline unsigned long phyp_dump_calculate_reserve_size(void) | ||
| 669 | { | 51 | { |
| 670 | unsigned long tmp; | 52 | lmb_add(base, size); |
| 671 | |||
| 672 | if (phyp_dump_info->reserve_bootvar) | ||
| 673 | return phyp_dump_info->reserve_bootvar; | ||
| 674 | |||
| 675 | /* divide by 20 to get 5% of value */ | ||
| 676 | tmp = lmb_end_of_DRAM(); | ||
| 677 | do_div(tmp, 20); | ||
| 678 | |||
| 679 | /* round it down in multiples of 256 */ | ||
| 680 | tmp = tmp & ~0x0FFFFFFFUL; | ||
| 681 | |||
| 682 | return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END); | ||
| 683 | } | 53 | } |
| 684 | 54 | ||
| 685 | /** | 55 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
| 686 | * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory | ||
| 687 | * | ||
| 688 | * This routine may reserve memory regions in the kernel only | ||
| 689 | * if the system is supported and a dump was taken in last | ||
| 690 | * boot instance or if the hardware is supported and the | ||
| 691 | * scratch area needs to be setup. In other instances it returns | ||
| 692 | * without reserving anything. The memory in case of dump being | ||
| 693 | * active is freed when the dump is collected (by userland tools). | ||
| 694 | */ | ||
| 695 | static void __init phyp_dump_reserve_mem(void) | ||
| 696 | { | 56 | { |
| 697 | unsigned long base, size; | 57 | return lmb_alloc(size, align); |
| 698 | unsigned long variable_reserve_size; | ||
| 699 | |||
| 700 | if (!phyp_dump_info->phyp_dump_configured) { | ||
| 701 | printk(KERN_ERR "Phyp-dump not supported on this hardware\n"); | ||
| 702 | return; | ||
| 703 | } | ||
| 704 | |||
| 705 | if (!phyp_dump_info->phyp_dump_at_boot) { | ||
| 706 | printk(KERN_INFO "Phyp-dump disabled at boot time\n"); | ||
| 707 | return; | ||
| 708 | } | ||
| 709 | |||
| 710 | variable_reserve_size = phyp_dump_calculate_reserve_size(); | ||
| 711 | |||
| 712 | if (phyp_dump_info->phyp_dump_is_active) { | ||
| 713 | /* Reserve *everything* above RMR.Area freed by userland tools*/ | ||
| 714 | base = variable_reserve_size; | ||
| 715 | size = lmb_end_of_DRAM() - base; | ||
| 716 | |||
| 717 | /* XXX crashed_ram_end is wrong, since it may be beyond | ||
| 718 | * the memory_limit, it will need to be adjusted. */ | ||
| 719 | lmb_reserve(base, size); | ||
| 720 | |||
| 721 | phyp_dump_info->init_reserve_start = base; | ||
| 722 | phyp_dump_info->init_reserve_size = size; | ||
| 723 | } else { | ||
| 724 | size = phyp_dump_info->cpu_state_size + | ||
| 725 | phyp_dump_info->hpte_region_size + | ||
| 726 | variable_reserve_size; | ||
| 727 | base = lmb_end_of_DRAM() - size; | ||
| 728 | lmb_reserve(base, size); | ||
| 729 | phyp_dump_info->init_reserve_start = base; | ||
| 730 | phyp_dump_info->init_reserve_size = size; | ||
| 731 | } | ||
| 732 | } | 58 | } |
| 733 | #else | ||
| 734 | static inline void __init phyp_dump_reserve_mem(void) {} | ||
| 735 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ | ||
| 736 | 59 | ||
| 737 | #ifdef CONFIG_EARLY_PRINTK | 60 | #ifdef CONFIG_EARLY_PRINTK |
| 738 | /* MS this is Microblaze specifig function */ | 61 | /* MS this is Microblaze specifig function */ |
| @@ -775,11 +98,6 @@ void __init early_init_devtree(void *params) | |||
| 775 | /* Setup flat device-tree pointer */ | 98 | /* Setup flat device-tree pointer */ |
| 776 | initial_boot_params = params; | 99 | initial_boot_params = params; |
| 777 | 100 | ||
| 778 | #ifdef CONFIG_PHYP_DUMP | ||
| 779 | /* scan tree to see if dump occured during last boot */ | ||
| 780 | of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); | ||
| 781 | #endif | ||
| 782 | |||
| 783 | /* Retrieve various informations from the /chosen node of the | 101 | /* Retrieve various informations from the /chosen node of the |
| 784 | * device-tree, including the platform type, initrd location and | 102 | * device-tree, including the platform type, initrd location and |
| 785 | * size, TCE reserve, and more ... | 103 | * size, TCE reserve, and more ... |
| @@ -799,33 +117,18 @@ void __init early_init_devtree(void *params) | |||
| 799 | 117 | ||
| 800 | pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size()); | 118 | pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size()); |
| 801 | 119 | ||
| 802 | pr_debug("Scanning CPUs ...\n"); | ||
| 803 | |||
| 804 | /* Retreive CPU related informations from the flat tree | ||
| 805 | * (altivec support, boot CPU ID, ...) | ||
| 806 | */ | ||
| 807 | of_scan_flat_dt(early_init_dt_scan_cpus, NULL); | ||
| 808 | |||
| 809 | pr_debug(" <- early_init_devtree()\n"); | 120 | pr_debug(" <- early_init_devtree()\n"); |
| 810 | } | 121 | } |
| 811 | 122 | ||
| 812 | /** | 123 | #ifdef CONFIG_BLK_DEV_INITRD |
| 813 | * Indicates whether the root node has a given value in its | 124 | void __init early_init_dt_setup_initrd_arch(unsigned long start, |
| 814 | * compatible property. | 125 | unsigned long end) |
| 815 | */ | ||
| 816 | int machine_is_compatible(const char *compat) | ||
| 817 | { | 126 | { |
| 818 | struct device_node *root; | 127 | initrd_start = (unsigned long)__va(start); |
| 819 | int rc = 0; | 128 | initrd_end = (unsigned long)__va(end); |
| 820 | 129 | initrd_below_start_ok = 1; | |
| 821 | root = of_find_node_by_path("/"); | ||
| 822 | if (root) { | ||
| 823 | rc = of_device_is_compatible(root, compat); | ||
| 824 | of_node_put(root); | ||
| 825 | } | ||
| 826 | return rc; | ||
| 827 | } | 130 | } |
| 828 | EXPORT_SYMBOL(machine_is_compatible); | 131 | #endif |
| 829 | 132 | ||
| 830 | /******* | 133 | /******* |
| 831 | * | 134 | * |
| @@ -838,296 +141,6 @@ EXPORT_SYMBOL(machine_is_compatible); | |||
| 838 | * | 141 | * |
| 839 | *******/ | 142 | *******/ |
| 840 | 143 | ||
| 841 | /** | ||
| 842 | * of_find_node_by_phandle - Find a node given a phandle | ||
| 843 | * @handle: phandle of the node to find | ||
| 844 | * | ||
| 845 | * Returns a node pointer with refcount incremented, use | ||
| 846 | * of_node_put() on it when done. | ||
| 847 | */ | ||
| 848 | struct device_node *of_find_node_by_phandle(phandle handle) | ||
| 849 | { | ||
| 850 | struct device_node *np; | ||
| 851 | |||
| 852 | read_lock(&devtree_lock); | ||
| 853 | for (np = allnodes; np != NULL; np = np->allnext) | ||
| 854 | if (np->linux_phandle == handle) | ||
| 855 | break; | ||
| 856 | of_node_get(np); | ||
| 857 | read_unlock(&devtree_lock); | ||
| 858 | return np; | ||
| 859 | } | ||
| 860 | EXPORT_SYMBOL(of_find_node_by_phandle); | ||
| 861 | |||
| 862 | /** | ||
| 863 | * of_find_all_nodes - Get next node in global list | ||
| 864 | * @prev: Previous node or NULL to start iteration | ||
| 865 | * of_node_put() will be called on it | ||
| 866 | * | ||
| 867 | * Returns a node pointer with refcount incremented, use | ||
| 868 | * of_node_put() on it when done. | ||
| 869 | */ | ||
| 870 | struct device_node *of_find_all_nodes(struct device_node *prev) | ||
| 871 | { | ||
| 872 | struct device_node *np; | ||
| 873 | |||
| 874 | read_lock(&devtree_lock); | ||
| 875 | np = prev ? prev->allnext : allnodes; | ||
| 876 | for (; np != NULL; np = np->allnext) | ||
| 877 | if (of_node_get(np)) | ||
| 878 | break; | ||
| 879 | of_node_put(prev); | ||
| 880 | read_unlock(&devtree_lock); | ||
| 881 | return np; | ||
| 882 | } | ||
| 883 | EXPORT_SYMBOL(of_find_all_nodes); | ||
| 884 | |||
| 885 | /** | ||
| 886 | * of_node_get - Increment refcount of a node | ||
| 887 | * @node: Node to inc refcount, NULL is supported to | ||
| 888 | * simplify writing of callers | ||
| 889 | * | ||
| 890 | * Returns node. | ||
| 891 | */ | ||
| 892 | struct device_node *of_node_get(struct device_node *node) | ||
| 893 | { | ||
| 894 | if (node) | ||
| 895 | kref_get(&node->kref); | ||
| 896 | return node; | ||
| 897 | } | ||
| 898 | EXPORT_SYMBOL(of_node_get); | ||
| 899 | |||
| 900 | static inline struct device_node *kref_to_device_node(struct kref *kref) | ||
| 901 | { | ||
| 902 | return container_of(kref, struct device_node, kref); | ||
| 903 | } | ||
| 904 | |||
| 905 | /** | ||
| 906 | * of_node_release - release a dynamically allocated node | ||
| 907 | * @kref: kref element of the node to be released | ||
| 908 | * | ||
| 909 | * In of_node_put() this function is passed to kref_put() | ||
| 910 | * as the destructor. | ||
| 911 | */ | ||
| 912 | static void of_node_release(struct kref *kref) | ||
| 913 | { | ||
| 914 | struct device_node *node = kref_to_device_node(kref); | ||
| 915 | struct property *prop = node->properties; | ||
| 916 | |||
| 917 | /* We should never be releasing nodes that haven't been detached. */ | ||
| 918 | if (!of_node_check_flag(node, OF_DETACHED)) { | ||
| 919 | printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n", | ||
| 920 | node->full_name); | ||
| 921 | dump_stack(); | ||
| 922 | kref_init(&node->kref); | ||
| 923 | return; | ||
| 924 | } | ||
| 925 | |||
| 926 | if (!of_node_check_flag(node, OF_DYNAMIC)) | ||
| 927 | return; | ||
| 928 | |||
| 929 | while (prop) { | ||
| 930 | struct property *next = prop->next; | ||
| 931 | kfree(prop->name); | ||
| 932 | kfree(prop->value); | ||
| 933 | kfree(prop); | ||
| 934 | prop = next; | ||
| 935 | |||
| 936 | if (!prop) { | ||
| 937 | prop = node->deadprops; | ||
| 938 | node->deadprops = NULL; | ||
| 939 | } | ||
| 940 | } | ||
| 941 | kfree(node->full_name); | ||
| 942 | kfree(node->data); | ||
| 943 | kfree(node); | ||
| 944 | } | ||
| 945 | |||
| 946 | /** | ||
| 947 | * of_node_put - Decrement refcount of a node | ||
| 948 | * @node: Node to dec refcount, NULL is supported to | ||
| 949 | * simplify writing of callers | ||
| 950 | * | ||
| 951 | */ | ||
| 952 | void of_node_put(struct device_node *node) | ||
| 953 | { | ||
| 954 | if (node) | ||
| 955 | kref_put(&node->kref, of_node_release); | ||
| 956 | } | ||
| 957 | EXPORT_SYMBOL(of_node_put); | ||
| 958 | |||
| 959 | /* | ||
| 960 | * Plug a device node into the tree and global list. | ||
| 961 | */ | ||
| 962 | void of_attach_node(struct device_node *np) | ||
| 963 | { | ||
| 964 | unsigned long flags; | ||
| 965 | |||
| 966 | write_lock_irqsave(&devtree_lock, flags); | ||
| 967 | np->sibling = np->parent->child; | ||
| 968 | np->allnext = allnodes; | ||
| 969 | np->parent->child = np; | ||
| 970 | allnodes = np; | ||
| 971 | write_unlock_irqrestore(&devtree_lock, flags); | ||
| 972 | } | ||
| 973 | |||
| 974 | /* | ||
| 975 | * "Unplug" a node from the device tree. The caller must hold | ||
| 976 | * a reference to the node. The memory associated with the node | ||
| 977 | * is not freed until its refcount goes to zero. | ||
| 978 | */ | ||
| 979 | void of_detach_node(struct device_node *np) | ||
| 980 | { | ||
| 981 | struct device_node *parent; | ||
| 982 | unsigned long flags; | ||
| 983 | |||
| 984 | write_lock_irqsave(&devtree_lock, flags); | ||
| 985 | |||
| 986 | parent = np->parent; | ||
| 987 | if (!parent) | ||
| 988 | goto out_unlock; | ||
| 989 | |||
| 990 | if (allnodes == np) | ||
| 991 | allnodes = np->allnext; | ||
| 992 | else { | ||
| 993 | struct device_node *prev; | ||
| 994 | for (prev = allnodes; | ||
| 995 | prev->allnext != np; | ||
| 996 | prev = prev->allnext) | ||
| 997 | ; | ||
| 998 | prev->allnext = np->allnext; | ||
| 999 | } | ||
| 1000 | |||
| 1001 | if (parent->child == np) | ||
| 1002 | parent->child = np->sibling; | ||
| 1003 | else { | ||
| 1004 | struct device_node *prevsib; | ||
| 1005 | for (prevsib = np->parent->child; | ||
| 1006 | prevsib->sibling != np; | ||
| 1007 | prevsib = prevsib->sibling) | ||
| 1008 | ; | ||
| 1009 | prevsib->sibling = np->sibling; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | of_node_set_flag(np, OF_DETACHED); | ||
| 1013 | |||
| 1014 | out_unlock: | ||
| 1015 | write_unlock_irqrestore(&devtree_lock, flags); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | /* | ||
| 1019 | * Add a property to a node | ||
| 1020 | */ | ||
| 1021 | int prom_add_property(struct device_node *np, struct property *prop) | ||
| 1022 | { | ||
| 1023 | struct property **next; | ||
| 1024 | unsigned long flags; | ||
| 1025 | |||
| 1026 | prop->next = NULL; | ||
| 1027 | write_lock_irqsave(&devtree_lock, flags); | ||
| 1028 | next = &np->properties; | ||
| 1029 | while (*next) { | ||
| 1030 | if (strcmp(prop->name, (*next)->name) == 0) { | ||
| 1031 | /* duplicate ! don't insert it */ | ||
| 1032 | write_unlock_irqrestore(&devtree_lock, flags); | ||
| 1033 | return -1; | ||
| 1034 | } | ||
| 1035 | next = &(*next)->next; | ||
| 1036 | } | ||
| 1037 | *next = prop; | ||
| 1038 | write_unlock_irqrestore(&devtree_lock, flags); | ||
| 1039 | |||
| 1040 | #ifdef CONFIG_PROC_DEVICETREE | ||
| 1041 | /* try to add to proc as well if it was initialized */ | ||
| 1042 | if (np->pde) | ||
| 1043 | proc_device_tree_add_prop(np->pde, prop); | ||
| 1044 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
| 1045 | |||
| 1046 | return 0; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | /* | ||
| 1050 | * Remove a property from a node. Note that we don't actually | ||
| 1051 | * remove it, since we have given out who-knows-how-many pointers | ||
| 1052 | * to the data using get-property. Instead we just move the property | ||
| 1053 | * to the "dead properties" list, so it won't be found any more. | ||
| 1054 | */ | ||
| 1055 | int prom_remove_property(struct device_node *np, struct property *prop) | ||
| 1056 | { | ||
| 1057 | struct property **next; | ||
| 1058 | unsigned long flags; | ||
| 1059 | int found = 0; | ||
| 1060 | |||
| 1061 | write_lock_irqsave(&devtree_lock, flags); | ||
| 1062 | next = &np->properties; | ||
| 1063 | while (*next) { | ||
| 1064 | if (*next == prop) { | ||
| 1065 | /* found the node */ | ||
| 1066 | *next = prop->next; | ||
| 1067 | prop->next = np->deadprops; | ||
| 1068 | np->deadprops = prop; | ||
| 1069 | found = 1; | ||
| 1070 | break; | ||
| 1071 | } | ||
| 1072 | next = &(*next)->next; | ||
| 1073 | } | ||
| 1074 | write_unlock_irqrestore(&devtree_lock, flags); | ||
| 1075 | |||
| 1076 | if (!found) | ||
| 1077 | return -ENODEV; | ||
| 1078 | |||
| 1079 | #ifdef CONFIG_PROC_DEVICETREE | ||
| 1080 | /* try to remove the proc node as well */ | ||
| 1081 | if (np->pde) | ||
| 1082 | proc_device_tree_remove_prop(np->pde, prop); | ||
| 1083 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
| 1084 | |||
| 1085 | return 0; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | /* | ||
| 1089 | * Update a property in a node. Note that we don't actually | ||
| 1090 | * remove it, since we have given out who-knows-how-many pointers | ||
| 1091 | * to the data using get-property. Instead we just move the property | ||
| 1092 | * to the "dead properties" list, and add the new property to the | ||
| 1093 | * property list | ||
| 1094 | */ | ||
| 1095 | int prom_update_property(struct device_node *np, | ||
| 1096 | struct property *newprop, | ||
| 1097 | struct property *oldprop) | ||
| 1098 | { | ||
| 1099 | struct property **next; | ||
| 1100 | unsigned long flags; | ||
| 1101 | int found = 0; | ||
| 1102 | |||
| 1103 | write_lock_irqsave(&devtree_lock, flags); | ||
| 1104 | next = &np->properties; | ||
| 1105 | while (*next) { | ||
| 1106 | if (*next == oldprop) { | ||
| 1107 | /* found the node */ | ||
| 1108 | newprop->next = oldprop->next; | ||
| 1109 | *next = newprop; | ||
| 1110 | oldprop->next = np->deadprops; | ||
| 1111 | np->deadprops = oldprop; | ||
| 1112 | found = 1; | ||
| 1113 | break; | ||
| 1114 | } | ||
| 1115 | next = &(*next)->next; | ||
| 1116 | } | ||
| 1117 | write_unlock_irqrestore(&devtree_lock, flags); | ||
| 1118 | |||
| 1119 | if (!found) | ||
| 1120 | return -ENODEV; | ||
| 1121 | |||
| 1122 | #ifdef CONFIG_PROC_DEVICETREE | ||
| 1123 | /* try to add to proc as well if it was initialized */ | ||
| 1124 | if (np->pde) | ||
| 1125 | proc_device_tree_update_prop(np->pde, newprop, oldprop); | ||
| 1126 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
| 1127 | |||
| 1128 | return 0; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) | 144 | #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) |
| 1132 | static struct debugfs_blob_wrapper flat_dt_blob; | 145 | static struct debugfs_blob_wrapper flat_dt_blob; |
| 1133 | 146 | ||
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c index ae0352ecd5a9..bf7e6c27e318 100644 --- a/arch/microblaze/kernel/prom_parse.c +++ b/arch/microblaze/kernel/prom_parse.c | |||
| @@ -256,7 +256,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | |||
| 256 | if (ppdev == NULL) { | 256 | if (ppdev == NULL) { |
| 257 | struct pci_controller *host; | 257 | struct pci_controller *host; |
| 258 | host = pci_bus_to_host(pdev->bus); | 258 | host = pci_bus_to_host(pdev->bus); |
| 259 | ppnode = host ? host->arch_data : NULL; | 259 | ppnode = host ? host->dn : NULL; |
| 260 | /* No node for host bridge ? give up */ | 260 | /* No node for host bridge ? give up */ |
| 261 | if (ppnode == NULL) | 261 | if (ppnode == NULL) |
| 262 | return -EINVAL; | 262 | return -EINVAL; |
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 4b3ac32754de..a4a7770c6140 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c | |||
| @@ -75,29 +75,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 75 | { | 75 | { |
| 76 | int rval; | 76 | int rval; |
| 77 | unsigned long val = 0; | 77 | unsigned long val = 0; |
| 78 | unsigned long copied; | ||
| 79 | 78 | ||
| 80 | switch (request) { | 79 | switch (request) { |
| 81 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | ||
| 82 | case PTRACE_PEEKDATA: | ||
| 83 | pr_debug("PEEKTEXT/PEEKDATA at %08lX\n", addr); | ||
| 84 | copied = access_process_vm(child, addr, &val, sizeof(val), 0); | ||
| 85 | rval = -EIO; | ||
| 86 | if (copied != sizeof(val)) | ||
| 87 | break; | ||
| 88 | rval = put_user(val, (unsigned long *)data); | ||
| 89 | break; | ||
| 90 | |||
| 91 | case PTRACE_POKETEXT: /* write the word at location addr. */ | ||
| 92 | case PTRACE_POKEDATA: | ||
| 93 | pr_debug("POKETEXT/POKEDATA to %08lX\n", addr); | ||
| 94 | rval = 0; | ||
| 95 | if (access_process_vm(child, addr, &data, sizeof(data), 1) | ||
| 96 | == sizeof(data)) | ||
| 97 | break; | ||
| 98 | rval = -EIO; | ||
| 99 | break; | ||
| 100 | |||
| 101 | /* Read/write the word at location ADDR in the registers. */ | 80 | /* Read/write the word at location ADDR in the registers. */ |
| 102 | case PTRACE_PEEKUSR: | 81 | case PTRACE_PEEKUSR: |
| 103 | case PTRACE_POKEUSR: | 82 | case PTRACE_POKEUSR: |
| @@ -130,50 +109,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 130 | if (rval == 0 && request == PTRACE_PEEKUSR) | 109 | if (rval == 0 && request == PTRACE_PEEKUSR) |
| 131 | rval = put_user(val, (unsigned long *)data); | 110 | rval = put_user(val, (unsigned long *)data); |
| 132 | break; | 111 | break; |
| 133 | /* Continue and stop at next (return from) syscall */ | ||
| 134 | case PTRACE_SYSCALL: | ||
| 135 | pr_debug("PTRACE_SYSCALL\n"); | ||
| 136 | case PTRACE_SINGLESTEP: | ||
| 137 | pr_debug("PTRACE_SINGLESTEP\n"); | ||
| 138 | /* Restart after a signal. */ | ||
| 139 | case PTRACE_CONT: | ||
| 140 | pr_debug("PTRACE_CONT\n"); | ||
| 141 | rval = -EIO; | ||
| 142 | if (!valid_signal(data)) | ||
| 143 | break; | ||
| 144 | |||
| 145 | if (request == PTRACE_SYSCALL) | ||
| 146 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
| 147 | else | ||
| 148 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
| 149 | |||
| 150 | child->exit_code = data; | ||
| 151 | pr_debug("wakeup_process\n"); | ||
| 152 | wake_up_process(child); | ||
| 153 | rval = 0; | ||
| 154 | break; | ||
| 155 | |||
| 156 | /* | ||
| 157 | * make the child exit. Best I can do is send it a sigkill. | ||
| 158 | * perhaps it should be put in the status that it wants to | ||
| 159 | * exit. | ||
| 160 | */ | ||
| 161 | case PTRACE_KILL: | ||
| 162 | pr_debug("PTRACE_KILL\n"); | ||
| 163 | rval = 0; | ||
| 164 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
| 165 | break; | ||
| 166 | child->exit_code = SIGKILL; | ||
| 167 | wake_up_process(child); | ||
| 168 | break; | ||
| 169 | |||
| 170 | case PTRACE_DETACH: /* detach a process that was attached. */ | ||
| 171 | pr_debug("PTRACE_DETACH\n"); | ||
| 172 | rval = ptrace_detach(child, data); | ||
| 173 | break; | ||
| 174 | default: | 112 | default: |
| 175 | /* rval = ptrace_request(child, request, addr, data); noMMU */ | 113 | rval = ptrace_request(child, request, addr, data); |
| 176 | rval = -EIO; | ||
| 177 | } | 114 | } |
| 178 | return rval; | 115 | return rval; |
| 179 | } | 116 | } |
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c new file mode 100644 index 000000000000..a1721a33042e --- /dev/null +++ b/arch/microblaze/kernel/reset.c | |||
| @@ -0,0 +1,140 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> | ||
| 3 | * Copyright (C) 2009 PetaLogix | ||
| 4 | * | ||
| 5 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 6 | * License. See the file "COPYING" in the main directory of this archive | ||
| 7 | * for more details. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/init.h> | ||
| 11 | #include <linux/of_platform.h> | ||
| 12 | #include <asm/prom.h> | ||
| 13 | |||
| 14 | /* Trigger specific functions */ | ||
| 15 | #ifdef CONFIG_GPIOLIB | ||
| 16 | |||
| 17 | #include <linux/of_gpio.h> | ||
| 18 | |||
| 19 | static int handle; /* reset pin handle */ | ||
| 20 | static unsigned int reset_val; | ||
| 21 | |||
| 22 | static int of_reset_gpio_handle(void) | ||
| 23 | { | ||
| 24 | int ret; /* variable which stored handle reset gpio pin */ | ||
| 25 | struct device_node *root; /* root node */ | ||
| 26 | struct device_node *gpio; /* gpio node */ | ||
| 27 | struct of_gpio_chip *of_gc = NULL; | ||
| 28 | enum of_gpio_flags flags ; | ||
| 29 | const void *gpio_spec; | ||
| 30 | |||
| 31 | /* find out root node */ | ||
| 32 | root = of_find_node_by_path("/"); | ||
| 33 | |||
| 34 | /* give me handle for gpio node to be possible allocate pin */ | ||
| 35 | ret = of_parse_phandles_with_args(root, "hard-reset-gpios", | ||
| 36 | "#gpio-cells", 0, &gpio, &gpio_spec); | ||
| 37 | if (ret) { | ||
| 38 | pr_debug("%s: can't parse gpios property\n", __func__); | ||
| 39 | goto err0; | ||
| 40 | } | ||
| 41 | |||
| 42 | of_gc = gpio->data; | ||
| 43 | if (!of_gc) { | ||
| 44 | pr_debug("%s: gpio controller %s isn't registered\n", | ||
| 45 | root->full_name, gpio->full_name); | ||
| 46 | ret = -ENODEV; | ||
| 47 | goto err1; | ||
| 48 | } | ||
| 49 | |||
| 50 | ret = of_gc->xlate(of_gc, root, gpio_spec, &flags); | ||
| 51 | if (ret < 0) | ||
| 52 | goto err1; | ||
| 53 | |||
| 54 | ret += of_gc->gc.base; | ||
| 55 | err1: | ||
| 56 | of_node_put(gpio); | ||
| 57 | err0: | ||
| 58 | pr_debug("%s exited with status %d\n", __func__, ret); | ||
| 59 | return ret; | ||
| 60 | } | ||
| 61 | |||
| 62 | void of_platform_reset_gpio_probe(void) | ||
| 63 | { | ||
| 64 | int ret; | ||
| 65 | handle = of_reset_gpio_handle(); | ||
| 66 | |||
| 67 | if (!gpio_is_valid(handle)) { | ||
| 68 | printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n", | ||
| 69 | handle, "reset"); | ||
| 70 | } | ||
| 71 | |||
| 72 | ret = gpio_request(handle, "reset"); | ||
| 73 | if (ret < 0) { | ||
| 74 | printk(KERN_INFO "GPIO pin is already allocated\n"); | ||
| 75 | return; | ||
| 76 | } | ||
| 77 | |||
| 78 | /* get current setup value */ | ||
| 79 | reset_val = gpio_get_value(handle); | ||
| 80 | /* FIXME maybe worth to perform any action */ | ||
| 81 | pr_debug("Reset: Gpio output state: 0x%x\n", reset_val); | ||
| 82 | |||
| 83 | /* Setup GPIO as output */ | ||
| 84 | ret = gpio_direction_output(handle, 0); | ||
| 85 | if (ret < 0) | ||
| 86 | goto err; | ||
| 87 | |||
| 88 | /* Setup output direction */ | ||
| 89 | gpio_set_value(handle, 0); | ||
| 90 | |||
| 91 | printk(KERN_INFO "RESET: Registered gpio device: %d, current val: %d\n", | ||
| 92 | handle, reset_val); | ||
| 93 | return; | ||
| 94 | err: | ||
| 95 | gpio_free(handle); | ||
| 96 | return; | ||
| 97 | } | ||
| 98 | |||
| 99 | |||
| 100 | static void gpio_system_reset(void) | ||
| 101 | { | ||
| 102 | gpio_set_value(handle, 1 - reset_val); | ||
| 103 | } | ||
| 104 | #else | ||
| 105 | #define gpio_system_reset() do {} while (0) | ||
| 106 | void of_platform_reset_gpio_probe(void) | ||
| 107 | { | ||
| 108 | return; | ||
| 109 | } | ||
| 110 | #endif | ||
| 111 | |||
| 112 | void machine_restart(char *cmd) | ||
| 113 | { | ||
| 114 | printk(KERN_NOTICE "Machine restart...\n"); | ||
| 115 | gpio_system_reset(); | ||
| 116 | dump_stack(); | ||
| 117 | while (1) | ||
| 118 | ; | ||
| 119 | } | ||
| 120 | |||
| 121 | void machine_shutdown(void) | ||
| 122 | { | ||
| 123 | printk(KERN_NOTICE "Machine shutdown...\n"); | ||
| 124 | while (1) | ||
| 125 | ; | ||
| 126 | } | ||
| 127 | |||
| 128 | void machine_halt(void) | ||
| 129 | { | ||
| 130 | printk(KERN_NOTICE "Machine halt...\n"); | ||
| 131 | while (1) | ||
| 132 | ; | ||
| 133 | } | ||
| 134 | |||
| 135 | void machine_power_off(void) | ||
| 136 | { | ||
| 137 | printk(KERN_NOTICE "Machine power off...\n"); | ||
| 138 | while (1) | ||
| 139 | ; | ||
| 140 | } | ||
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 8c1e0f4dcf18..17c98dbcec88 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
| @@ -22,7 +22,10 @@ | |||
| 22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
| 23 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
| 24 | #include <linux/param.h> | 24 | #include <linux/param.h> |
| 25 | #include <linux/pci.h> | ||
| 25 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
| 27 | #include <linux/of_platform.h> | ||
| 28 | #include <linux/dma-mapping.h> | ||
| 26 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
| 27 | #include <asm/entry.h> | 30 | #include <asm/entry.h> |
| 28 | #include <asm/cpuinfo.h> | 31 | #include <asm/cpuinfo.h> |
| @@ -52,16 +55,12 @@ void __init setup_arch(char **cmdline_p) | |||
| 52 | /* irq_early_init(); */ | 55 | /* irq_early_init(); */ |
| 53 | setup_cpuinfo(); | 56 | setup_cpuinfo(); |
| 54 | 57 | ||
| 55 | __invalidate_icache_all(); | 58 | microblaze_cache_init(); |
| 56 | __enable_icache(); | ||
| 57 | |||
| 58 | __invalidate_dcache_all(); | ||
| 59 | __enable_dcache(); | ||
| 60 | |||
| 61 | panic_timeout = 120; | ||
| 62 | 59 | ||
| 63 | setup_memory(); | 60 | setup_memory(); |
| 64 | 61 | ||
| 62 | xilinx_pci_init(); | ||
| 63 | |||
| 65 | #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) | 64 | #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) |
| 66 | printk(KERN_NOTICE "Self modified code enable\n"); | 65 | printk(KERN_NOTICE "Self modified code enable\n"); |
| 67 | #endif | 66 | #endif |
| @@ -93,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr) | |||
| 93 | } | 92 | } |
| 94 | #endif /* CONFIG_MTD_UCLINUX_EBSS */ | 93 | #endif /* CONFIG_MTD_UCLINUX_EBSS */ |
| 95 | 94 | ||
| 95 | #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) | ||
| 96 | #define eprintk early_printk | ||
| 97 | #else | ||
| 98 | #define eprintk printk | ||
| 99 | #endif | ||
| 100 | |||
| 96 | void __init machine_early_init(const char *cmdline, unsigned int ram, | 101 | void __init machine_early_init(const char *cmdline, unsigned int ram, |
| 97 | unsigned int fdt, unsigned int msr) | 102 | unsigned int fdt, unsigned int msr) |
| 98 | { | 103 | { |
| @@ -131,6 +136,8 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
| 131 | strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); | 136 | strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); |
| 132 | #endif | 137 | #endif |
| 133 | 138 | ||
| 139 | lockdep_init(); | ||
| 140 | |||
| 134 | /* initialize device tree for usage in early_printk */ | 141 | /* initialize device tree for usage in early_printk */ |
| 135 | early_init_devtree((void *)_fdt_start); | 142 | early_init_devtree((void *)_fdt_start); |
| 136 | 143 | ||
| @@ -138,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
| 138 | setup_early_printk(NULL); | 145 | setup_early_printk(NULL); |
| 139 | #endif | 146 | #endif |
| 140 | 147 | ||
| 141 | early_printk("Ramdisk addr 0x%08x, ", ram); | 148 | eprintk("Ramdisk addr 0x%08x, ", ram); |
| 142 | if (fdt) | 149 | if (fdt) |
| 143 | early_printk("FDT at 0x%08x\n", fdt); | 150 | eprintk("FDT at 0x%08x\n", fdt); |
| 144 | else | 151 | else |
| 145 | early_printk("Compiled-in FDT at 0x%08x\n", | 152 | eprintk("Compiled-in FDT at 0x%08x\n", |
| 146 | (unsigned int)_fdt_start); | 153 | (unsigned int)_fdt_start); |
| 147 | 154 | ||
| 148 | #ifdef CONFIG_MTD_UCLINUX | 155 | #ifdef CONFIG_MTD_UCLINUX |
| 149 | early_printk("Found romfs @ 0x%08x (0x%08x)\n", | 156 | eprintk("Found romfs @ 0x%08x (0x%08x)\n", |
| 150 | romfs_base, romfs_size); | 157 | romfs_base, romfs_size); |
| 151 | early_printk("#### klimit %p ####\n", old_klimit); | 158 | eprintk("#### klimit %p ####\n", old_klimit); |
| 152 | BUG_ON(romfs_size < 0); /* What else can we do? */ | 159 | BUG_ON(romfs_size < 0); /* What else can we do? */ |
| 153 | 160 | ||
| 154 | early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", | 161 | eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", |
| 155 | romfs_size, romfs_base, (unsigned)&_ebss); | 162 | romfs_size, romfs_base, (unsigned)&_ebss); |
| 156 | 163 | ||
| 157 | early_printk("New klimit: 0x%08x\n", (unsigned)klimit); | 164 | eprintk("New klimit: 0x%08x\n", (unsigned)klimit); |
| 158 | #endif | 165 | #endif |
| 159 | 166 | ||
| 160 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 167 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR |
| 161 | if (msr) | 168 | if (msr) |
| 162 | early_printk("!!!Your kernel has setup MSR instruction but " | 169 | eprintk("!!!Your kernel has setup MSR instruction but " |
| 163 | "CPU don't have it %d\n", msr); | 170 | "CPU don't have it %d\n", msr); |
| 164 | #else | 171 | #else |
| 165 | if (!msr) | 172 | if (!msr) |
| 166 | early_printk("!!!Your kernel not setup MSR instruction but " | 173 | eprintk("!!!Your kernel not setup MSR instruction but " |
| 167 | "CPU have it %d\n", msr); | 174 | "CPU have it %d\n", msr); |
| 168 | #endif | 175 | #endif |
| 169 | 176 | ||
| @@ -187,31 +194,36 @@ static int microblaze_debugfs_init(void) | |||
| 187 | arch_initcall(microblaze_debugfs_init); | 194 | arch_initcall(microblaze_debugfs_init); |
| 188 | #endif | 195 | #endif |
| 189 | 196 | ||
| 190 | void machine_restart(char *cmd) | 197 | static int dflt_bus_notify(struct notifier_block *nb, |
| 198 | unsigned long action, void *data) | ||
| 191 | { | 199 | { |
| 192 | printk(KERN_NOTICE "Machine restart...\n"); | 200 | struct device *dev = data; |
| 193 | dump_stack(); | ||
| 194 | while (1) | ||
| 195 | ; | ||
| 196 | } | ||
| 197 | 201 | ||
| 198 | void machine_shutdown(void) | 202 | /* We are only intereted in device addition */ |
| 199 | { | 203 | if (action != BUS_NOTIFY_ADD_DEVICE) |
| 200 | printk(KERN_NOTICE "Machine shutdown...\n"); | 204 | return 0; |
| 201 | while (1) | ||
| 202 | ; | ||
| 203 | } | ||
| 204 | 205 | ||
| 205 | void machine_halt(void) | 206 | set_dma_ops(dev, &dma_direct_ops); |
| 206 | { | 207 | |
| 207 | printk(KERN_NOTICE "Machine halt...\n"); | 208 | return NOTIFY_DONE; |
| 208 | while (1) | ||
| 209 | ; | ||
| 210 | } | 209 | } |
| 211 | 210 | ||
| 212 | void machine_power_off(void) | 211 | static struct notifier_block dflt_plat_bus_notifier = { |
| 212 | .notifier_call = dflt_bus_notify, | ||
| 213 | .priority = INT_MAX, | ||
| 214 | }; | ||
| 215 | |||
| 216 | static struct notifier_block dflt_of_bus_notifier = { | ||
| 217 | .notifier_call = dflt_bus_notify, | ||
| 218 | .priority = INT_MAX, | ||
| 219 | }; | ||
| 220 | |||
| 221 | static int __init setup_bus_notifier(void) | ||
| 213 | { | 222 | { |
| 214 | printk(KERN_NOTICE "Machine power off...\n"); | 223 | bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); |
| 215 | while (1) | 224 | bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier); |
| 216 | ; | 225 | |
| 226 | return 0; | ||
| 217 | } | 227 | } |
| 228 | |||
| 229 | arch_initcall(setup_bus_notifier); | ||
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 1c80e4fc40ce..d8d3bb396cd6 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c | |||
| @@ -44,7 +44,6 @@ | |||
| 44 | 44 | ||
| 45 | asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall); | 45 | asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall); |
| 46 | 46 | ||
| 47 | |||
| 48 | asmlinkage long | 47 | asmlinkage long |
| 49 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 48 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
| 50 | struct pt_regs *regs) | 49 | struct pt_regs *regs) |
| @@ -176,6 +175,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 176 | struct rt_sigframe __user *frame; | 175 | struct rt_sigframe __user *frame; |
| 177 | int err = 0; | 176 | int err = 0; |
| 178 | int signal; | 177 | int signal; |
| 178 | unsigned long address = 0; | ||
| 179 | #ifdef CONFIG_MMU | ||
| 180 | pmd_t *pmdp; | ||
| 181 | pte_t *ptep; | ||
| 182 | #endif | ||
| 179 | 183 | ||
| 180 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 184 | frame = get_sigframe(ka, regs, sizeof(*frame)); |
| 181 | 185 | ||
| @@ -216,8 +220,29 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 216 | Negative 8 offset because return is rtsd r15, 8 */ | 220 | Negative 8 offset because return is rtsd r15, 8 */ |
| 217 | regs->r15 = ((unsigned long)frame->tramp)-8; | 221 | regs->r15 = ((unsigned long)frame->tramp)-8; |
| 218 | 222 | ||
| 219 | __invalidate_cache_sigtramp((unsigned long)frame->tramp); | 223 | address = ((unsigned long)frame->tramp); |
| 220 | 224 | #ifdef CONFIG_MMU | |
| 225 | pmdp = pmd_offset(pud_offset( | ||
| 226 | pgd_offset(current->mm, address), | ||
| 227 | address), address); | ||
| 228 | |||
| 229 | preempt_disable(); | ||
| 230 | ptep = pte_offset_map(pmdp, address); | ||
| 231 | if (pte_present(*ptep)) { | ||
| 232 | address = (unsigned long) page_address(pte_page(*ptep)); | ||
| 233 | /* MS: I need add offset in page */ | ||
| 234 | address += ((unsigned long)frame->tramp) & ~PAGE_MASK; | ||
| 235 | /* MS address is virtual */ | ||
| 236 | address = virt_to_phys(address); | ||
| 237 | invalidate_icache_range(address, address + 8); | ||
| 238 | flush_dcache_range(address, address + 8); | ||
| 239 | } | ||
| 240 | pte_unmap(ptep); | ||
| 241 | preempt_enable(); | ||
| 242 | #else | ||
| 243 | flush_icache_range(address, address + 8); | ||
| 244 | flush_dcache_range(address, address + 8); | ||
| 245 | #endif | ||
| 221 | if (err) | 246 | if (err) |
| 222 | goto give_sigsegv; | 247 | goto give_sigsegv; |
| 223 | 248 | ||
| @@ -233,6 +258,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 233 | 258 | ||
| 234 | set_fs(USER_DS); | 259 | set_fs(USER_DS); |
| 235 | 260 | ||
| 261 | /* the tracer may want to single-step inside the handler */ | ||
| 262 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
| 263 | ptrace_notify(SIGTRAP); | ||
| 264 | |||
| 236 | #ifdef DEBUG_SIG | 265 | #ifdef DEBUG_SIG |
| 237 | printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n", | 266 | printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n", |
| 238 | current->comm, current->pid, frame, regs->pc); | 267 | current->comm, current->pid, frame, regs->pc); |
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c new file mode 100644 index 000000000000..123692f22647 --- /dev/null +++ b/arch/microblaze/kernel/stacktrace.c | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | /* | ||
| 2 | * Stack trace support for Microblaze. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> | ||
| 5 | * Copyright (C) 2009 PetaLogix | ||
| 6 | * | ||
| 7 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 8 | * License. See the file "COPYING" in the main directory of this archive | ||
| 9 | * for more details. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/sched.h> | ||
| 13 | #include <linux/stacktrace.h> | ||
| 14 | #include <linux/thread_info.h> | ||
| 15 | #include <linux/ptrace.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | |||
| 18 | /* FIXME initial support */ | ||
| 19 | void save_stack_trace(struct stack_trace *trace) | ||
| 20 | { | ||
| 21 | unsigned long *sp; | ||
| 22 | unsigned long addr; | ||
| 23 | asm("addik %0, r1, 0" : "=r" (sp)); | ||
| 24 | |||
| 25 | while (!kstack_end(sp)) { | ||
| 26 | addr = *sp++; | ||
| 27 | if (__kernel_text_address(addr)) { | ||
| 28 | if (trace->skip > 0) | ||
| 29 | trace->skip--; | ||
| 30 | else | ||
| 31 | trace->entries[trace->nr_entries++] = addr; | ||
| 32 | |||
| 33 | if (trace->nr_entries >= trace->max_entries) | ||
| 34 | break; | ||
| 35 | } | ||
| 36 | } | ||
| 37 | } | ||
| 38 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
| 39 | |||
| 40 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
| 41 | { | ||
| 42 | unsigned int *sp; | ||
| 43 | unsigned long addr; | ||
| 44 | |||
| 45 | struct thread_info *ti = task_thread_info(tsk); | ||
| 46 | |||
| 47 | if (tsk == current) | ||
| 48 | asm("addik %0, r1, 0" : "=r" (sp)); | ||
| 49 | else | ||
| 50 | sp = (unsigned int *)ti->cpu_context.r1; | ||
| 51 | |||
| 52 | while (!kstack_end(sp)) { | ||
| 53 | addr = *sp++; | ||
| 54 | if (__kernel_text_address(addr)) { | ||
| 55 | if (trace->skip > 0) | ||
| 56 | trace->skip--; | ||
| 57 | else | ||
| 58 | trace->entries[trace->nr_entries++] = addr; | ||
| 59 | |||
| 60 | if (trace->nr_entries >= trace->max_entries) | ||
| 61 | break; | ||
| 62 | } | ||
| 63 | } | ||
| 64 | } | ||
| 65 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index 07cabed4b947..f4e00b7f1259 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/semaphore.h> | 30 | #include <linux/semaphore.h> |
| 31 | #include <linux/uaccess.h> | 31 | #include <linux/uaccess.h> |
| 32 | #include <linux/unistd.h> | 32 | #include <linux/unistd.h> |
| 33 | #include <linux/slab.h> | ||
| 33 | 34 | ||
| 34 | #include <asm/syscalls.h> | 35 | #include <asm/syscalls.h> |
| 35 | 36 | ||
| @@ -62,46 +63,14 @@ out: | |||
| 62 | return error; | 63 | return error; |
| 63 | } | 64 | } |
| 64 | 65 | ||
| 65 | asmlinkage long | ||
| 66 | sys_mmap2(unsigned long addr, unsigned long len, | ||
| 67 | unsigned long prot, unsigned long flags, | ||
| 68 | unsigned long fd, unsigned long pgoff) | ||
| 69 | { | ||
| 70 | struct file *file = NULL; | ||
| 71 | int ret = -EBADF; | ||
| 72 | |||
| 73 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
| 74 | if (!(flags & MAP_ANONYMOUS)) { | ||
| 75 | file = fget(fd); | ||
| 76 | if (!file) { | ||
| 77 | printk(KERN_INFO "no fd in mmap\r\n"); | ||
| 78 | goto out; | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 82 | down_write(¤t->mm->mmap_sem); | ||
| 83 | ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
| 84 | up_write(¤t->mm->mmap_sem); | ||
| 85 | if (file) | ||
| 86 | fput(file); | ||
| 87 | out: | ||
| 88 | return ret; | ||
| 89 | } | ||
| 90 | |||
| 91 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | 66 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, |
| 92 | unsigned long prot, unsigned long flags, | 67 | unsigned long prot, unsigned long flags, |
| 93 | unsigned long fd, off_t pgoff) | 68 | unsigned long fd, off_t pgoff) |
| 94 | { | 69 | { |
| 95 | int err = -EINVAL; | 70 | if (pgoff & ~PAGE_MASK) |
| 96 | 71 | return -EINVAL; | |
| 97 | if (pgoff & ~PAGE_MASK) { | ||
| 98 | printk(KERN_INFO "no pagemask in mmap\r\n"); | ||
| 99 | goto out; | ||
| 100 | } | ||
| 101 | 72 | ||
| 102 | err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); | 73 | return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); |
| 103 | out: | ||
| 104 | return err; | ||
| 105 | } | 74 | } |
| 106 | 75 | ||
| 107 | /* | 76 | /* |
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index ecec19155135..03376dc814c9 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S | |||
| @@ -183,7 +183,7 @@ ENTRY(sys_call_table) | |||
| 183 | .long sys_rt_sigpending | 183 | .long sys_rt_sigpending |
| 184 | .long sys_rt_sigtimedwait | 184 | .long sys_rt_sigtimedwait |
| 185 | .long sys_rt_sigqueueinfo | 185 | .long sys_rt_sigqueueinfo |
| 186 | .long sys_rt_sigsuspend_wrapper | 186 | .long sys_rt_sigsuspend |
| 187 | .long sys_pread64 /* 180 */ | 187 | .long sys_pread64 /* 180 */ |
| 188 | .long sys_pwrite64 | 188 | .long sys_pwrite64 |
| 189 | .long sys_chown | 189 | .long sys_chown |
| @@ -196,7 +196,7 @@ ENTRY(sys_call_table) | |||
| 196 | .long sys_ni_syscall /* reserved for streams2 */ | 196 | .long sys_ni_syscall /* reserved for streams2 */ |
| 197 | .long sys_vfork /* 190 */ | 197 | .long sys_vfork /* 190 */ |
| 198 | .long sys_getrlimit | 198 | .long sys_getrlimit |
| 199 | .long sys_mmap2 /* mmap2 */ | 199 | .long sys_mmap_pgoff /* mmap2 */ |
| 200 | .long sys_truncate64 | 200 | .long sys_truncate64 |
| 201 | .long sys_ftruncate64 | 201 | .long sys_ftruncate64 |
| 202 | .long sys_stat64 /* 195 */ | 202 | .long sys_stat64 /* 195 */ |
| @@ -303,7 +303,7 @@ ENTRY(sys_call_table) | |||
| 303 | .long sys_mkdirat | 303 | .long sys_mkdirat |
| 304 | .long sys_mknodat | 304 | .long sys_mknodat |
| 305 | .long sys_fchownat | 305 | .long sys_fchownat |
| 306 | .long sys_ni_syscall | 306 | .long sys_futimesat |
| 307 | .long sys_fstatat64 /* 300 */ | 307 | .long sys_fstatat64 /* 300 */ |
| 308 | .long sys_unlinkat | 308 | .long sys_unlinkat |
| 309 | .long sys_renameat | 309 | .long sys_renameat |
| @@ -366,8 +366,9 @@ ENTRY(sys_call_table) | |||
| 366 | .long sys_shutdown | 366 | .long sys_shutdown |
| 367 | .long sys_sendmsg /* 360 */ | 367 | .long sys_sendmsg /* 360 */ |
| 368 | .long sys_recvmsg | 368 | .long sys_recvmsg |
| 369 | .long sys_ni_syscall | 369 | .long sys_accept4 |
| 370 | .long sys_ni_syscall | 370 | .long sys_ni_syscall |
| 371 | .long sys_ni_syscall | 371 | .long sys_ni_syscall |
| 372 | .long sys_rt_tgsigqueueinfo /* 365 */ | 372 | .long sys_rt_tgsigqueueinfo /* 365 */ |
| 373 | .long sys_perf_event_open | 373 | .long sys_perf_event_open |
| 374 | .long sys_recvmmsg | ||
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 5499deae7fa6..ed61b2f17719 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c | |||
| @@ -183,6 +183,31 @@ static cycle_t microblaze_read(struct clocksource *cs) | |||
| 183 | return (cycle_t) (in_be32(TIMER_BASE + TCR1)); | 183 | return (cycle_t) (in_be32(TIMER_BASE + TCR1)); |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | static struct timecounter microblaze_tc = { | ||
| 187 | .cc = NULL, | ||
| 188 | }; | ||
| 189 | |||
| 190 | static cycle_t microblaze_cc_read(const struct cyclecounter *cc) | ||
| 191 | { | ||
| 192 | return microblaze_read(NULL); | ||
| 193 | } | ||
| 194 | |||
| 195 | static struct cyclecounter microblaze_cc = { | ||
| 196 | .read = microblaze_cc_read, | ||
| 197 | .mask = CLOCKSOURCE_MASK(32), | ||
| 198 | .shift = 24, | ||
| 199 | }; | ||
| 200 | |||
| 201 | int __init init_microblaze_timecounter(void) | ||
| 202 | { | ||
| 203 | microblaze_cc.mult = div_sc(cpuinfo.cpu_clock_freq, NSEC_PER_SEC, | ||
| 204 | microblaze_cc.shift); | ||
| 205 | |||
| 206 | timecounter_init(µblaze_tc, µblaze_cc, sched_clock()); | ||
| 207 | |||
| 208 | return 0; | ||
| 209 | } | ||
| 210 | |||
| 186 | static struct clocksource clocksource_microblaze = { | 211 | static struct clocksource clocksource_microblaze = { |
| 187 | .name = "microblaze_clocksource", | 212 | .name = "microblaze_clocksource", |
| 188 | .rating = 300, | 213 | .rating = 300, |
| @@ -204,6 +229,9 @@ static int __init microblaze_clocksource_init(void) | |||
| 204 | out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT); | 229 | out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT); |
| 205 | /* start timer1 - up counting without interrupt */ | 230 | /* start timer1 - up counting without interrupt */ |
| 206 | out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT); | 231 | out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT); |
| 232 | |||
| 233 | /* register timecounter - for ftrace support */ | ||
| 234 | init_microblaze_timecounter(); | ||
| 207 | return 0; | 235 | return 0; |
| 208 | } | 236 | } |
| 209 | 237 | ||
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index eaaaf805f31b..75e49202a5ed 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c | |||
| @@ -22,13 +22,11 @@ void trap_init(void) | |||
| 22 | __enable_hw_exceptions(); | 22 | __enable_hw_exceptions(); |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | static int kstack_depth_to_print = 24; | 25 | static unsigned long kstack_depth_to_print = 24; |
| 26 | 26 | ||
| 27 | static int __init kstack_setup(char *s) | 27 | static int __init kstack_setup(char *s) |
| 28 | { | 28 | { |
| 29 | kstack_depth_to_print = strict_strtoul(s, 0, NULL); | 29 | return !strict_strtoul(s, 0, &kstack_depth_to_print); |
| 30 | |||
| 31 | return 1; | ||
| 32 | } | 30 | } |
| 33 | __setup("kstack=", kstack_setup); | 31 | __setup("kstack=", kstack_setup); |
| 34 | 32 | ||
| @@ -97,37 +95,3 @@ void dump_stack(void) | |||
| 97 | show_stack(NULL, NULL); | 95 | show_stack(NULL, NULL); |
| 98 | } | 96 | } |
| 99 | EXPORT_SYMBOL(dump_stack); | 97 | EXPORT_SYMBOL(dump_stack); |
| 100 | |||
| 101 | #ifdef CONFIG_MMU | ||
| 102 | void __bug(const char *file, int line, void *data) | ||
| 103 | { | ||
| 104 | if (data) | ||
| 105 | printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n", | ||
| 106 | file, line, data); | ||
| 107 | else | ||
| 108 | printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line); | ||
| 109 | |||
| 110 | machine_halt(); | ||
| 111 | } | ||
| 112 | |||
| 113 | int bad_trap(int trap_num, struct pt_regs *regs) | ||
| 114 | { | ||
| 115 | printk(KERN_CRIT | ||
| 116 | "unimplemented trap %d called at 0x%08lx, pid %d!\n", | ||
| 117 | trap_num, regs->pc, current->pid); | ||
| 118 | return -ENOSYS; | ||
| 119 | } | ||
| 120 | |||
| 121 | int debug_trap(struct pt_regs *regs) | ||
| 122 | { | ||
| 123 | int i; | ||
| 124 | printk(KERN_CRIT "debug trap\n"); | ||
| 125 | for (i = 0; i < 32; i++) { | ||
| 126 | /* printk("r%i:%08X\t",i,regs->gpr[i]); */ | ||
| 127 | if ((i % 4) == 3) | ||
| 128 | printk(KERN_CRIT "\n"); | ||
| 129 | } | ||
| 130 | printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr); | ||
| 131 | return -ENOSYS; | ||
| 132 | } | ||
| 133 | #endif | ||
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index e704188d7855..db72d7124602 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S | |||
| @@ -24,13 +24,15 @@ SECTIONS { | |||
| 24 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 24 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
| 25 | _text = . ; | 25 | _text = . ; |
| 26 | _stext = . ; | 26 | _stext = . ; |
| 27 | *(.text .text.*) | 27 | HEAD_TEXT |
| 28 | TEXT_TEXT | ||
| 28 | *(.fixup) | 29 | *(.fixup) |
| 29 | EXIT_TEXT | 30 | EXIT_TEXT |
| 30 | EXIT_CALL | 31 | EXIT_CALL |
| 31 | SCHED_TEXT | 32 | SCHED_TEXT |
| 32 | LOCK_TEXT | 33 | LOCK_TEXT |
| 33 | KPROBES_TEXT | 34 | KPROBES_TEXT |
| 35 | IRQENTRY_TEXT | ||
| 34 | . = ALIGN (4) ; | 36 | . = ALIGN (4) ; |
| 35 | _etext = . ; | 37 | _etext = . ; |
| 36 | } | 38 | } |
| @@ -86,6 +88,7 @@ SECTIONS { | |||
| 86 | _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ; | 88 | _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ; |
| 87 | } | 89 | } |
| 88 | 90 | ||
| 91 | . = ALIGN(PAGE_SIZE); | ||
| 89 | __init_begin = .; | 92 | __init_begin = .; |
| 90 | 93 | ||
| 91 | INIT_TEXT_SECTION(PAGE_SIZE) | 94 | INIT_TEXT_SECTION(PAGE_SIZE) |
