diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-05 11:59:22 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-05 11:59:22 -0400 |
| commit | bbc4fd12a635492ad9d12bb418124fa2d5f0d734 (patch) | |
| tree | fe245d350db180d499a9e9d6dc84bbb308f33dc2 | |
| parent | 673b864fd76a29031aa0b4b08fc80886d527b3b7 (diff) | |
| parent | 2d5973cb5ac5d04662f86e19a06a4c52fa4c4ae3 (diff) | |
Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze
* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (49 commits)
microblaze: Add KGDB support
microblaze: Support brki rX, 0x18 for user application debugging
microblaze: Remove nop after MSRCLR/SET, MTS, MFS instructions
microblaze: Simplify syscall rutine
microblaze: Move PT_MODE saving to delay slot
microblaze: Fix _interrupt function
microblaze: Fix _user_exception function
microblaze: Put together addik instructions
microblaze: Use delay slot in syscall macros
microblaze: Save kernel mode in delay slot
microblaze: Do not mix register saving and mode setting
microblaze: Move SAVE_STATE upward
microblaze: entry.S: Macro optimization
microblaze: Optimize hw exception rutine
microblaze: Implement clear_ums macro and fix SAVE_STATE macro
microblaze: Remove additional setup for kernel_mode
microblaze: Optimize SAVE_STATE macro
microblaze: Remove additional loading
microblaze: Completely remove working with R11 register
microblaze: Do not setup BIP in _debug_exception
...
34 files changed, 977 insertions, 544 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 505a08592423..be3855250db6 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
| @@ -14,6 +14,7 @@ config MICROBLAZE | |||
| 14 | select USB_ARCH_HAS_EHCI | 14 | select USB_ARCH_HAS_EHCI |
| 15 | select ARCH_WANT_OPTIONAL_GPIOLIB | 15 | select ARCH_WANT_OPTIONAL_GPIOLIB |
| 16 | select HAVE_OPROFILE | 16 | select HAVE_OPROFILE |
| 17 | select HAVE_ARCH_KGDB | ||
| 17 | select HAVE_DMA_ATTRS | 18 | select HAVE_DMA_ATTRS |
| 18 | select HAVE_DMA_API_DEBUG | 19 | select HAVE_DMA_API_DEBUG |
| 19 | select TRACING_SUPPORT | 20 | select TRACING_SUPPORT |
| @@ -223,6 +224,36 @@ config TASK_SIZE | |||
| 223 | hex "Size of user task space" if TASK_SIZE_BOOL | 224 | hex "Size of user task space" if TASK_SIZE_BOOL |
| 224 | default "0x80000000" | 225 | default "0x80000000" |
| 225 | 226 | ||
| 227 | choice | ||
| 228 | prompt "Page size" | ||
| 229 | default MICROBLAZE_4K_PAGES | ||
| 230 | depends on ADVANCED_OPTIONS && !MMU | ||
| 231 | help | ||
| 232 | Select the kernel logical page size. Increasing the page size | ||
| 233 | will reduce software overhead at each page boundary, allow | ||
| 234 | hardware prefetch mechanisms to be more effective, and allow | ||
| 235 | larger dma transfers increasing IO efficiency and reducing | ||
| 236 | overhead. However the utilization of memory will increase. | ||
| 237 | For example, each cached file will using a multiple of the | ||
| 238 | page size to hold its contents and the difference between the | ||
| 239 | end of file and the end of page is wasted. | ||
| 240 | |||
| 241 | If unsure, choose 4K_PAGES. | ||
| 242 | |||
| 243 | config MICROBLAZE_4K_PAGES | ||
| 244 | bool "4k page size" | ||
| 245 | |||
| 246 | config MICROBLAZE_8K_PAGES | ||
| 247 | bool "8k page size" | ||
| 248 | |||
| 249 | config MICROBLAZE_16K_PAGES | ||
| 250 | bool "16k page size" | ||
| 251 | |||
| 252 | config MICROBLAZE_32K_PAGES | ||
| 253 | bool "32k page size" | ||
| 254 | |||
| 255 | endchoice | ||
| 256 | |||
| 226 | endmenu | 257 | endmenu |
| 227 | 258 | ||
| 228 | source "mm/Kconfig" | 259 | source "mm/Kconfig" |
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug index 9dc708a7f700..e6e5e0da28c3 100644 --- a/arch/microblaze/Kconfig.debug +++ b/arch/microblaze/Kconfig.debug | |||
| @@ -10,6 +10,7 @@ source "lib/Kconfig.debug" | |||
| 10 | 10 | ||
| 11 | config EARLY_PRINTK | 11 | config EARLY_PRINTK |
| 12 | bool "Early printk function for kernel" | 12 | bool "Early printk function for kernel" |
| 13 | depends on SERIAL_UARTLITE_CONSOLE | ||
| 13 | default n | 14 | default n |
| 14 | help | 15 | help |
| 15 | This option turns on/off early printk messages to console. | 16 | This option turns on/off early printk messages to console. |
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 57f50c2371c6..be01d78750d9 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile | |||
| @@ -35,13 +35,14 @@ quiet_cmd_cp = CP $< $@$2 | |||
| 35 | cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) | 35 | cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) |
| 36 | 36 | ||
| 37 | quiet_cmd_strip = STRIP $@ | 37 | quiet_cmd_strip = STRIP $@ |
| 38 | cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@ | 38 | cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ |
| 39 | -K _fdt_start vmlinux -o $@ | ||
| 39 | 40 | ||
| 40 | quiet_cmd_uimage = UIMAGE $@.ub | 41 | quiet_cmd_uimage = UIMAGE $@.ub |
| 41 | cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \ | 42 | cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \ |
| 42 | -C none -n 'Linux-$(KERNELRELEASE)' \ | 43 | -C none -n 'Linux-$(KERNELRELEASE)' \ |
| 43 | -a $(CONFIG_KERNEL_BASE_ADDR) -e $(CONFIG_KERNEL_BASE_ADDR) \ | 44 | -a $(CONFIG_KERNEL_BASE_ADDR) -e $(CONFIG_KERNEL_BASE_ADDR) \ |
| 44 | -d $@ $@.ub | 45 | -d $@ $@.ub |
| 45 | 46 | ||
| 46 | $(obj)/simpleImage.%: vmlinux FORCE | 47 | $(obj)/simpleImage.%: vmlinux FORCE |
| 47 | $(call if_changed,cp,.unstrip) | 48 | $(call if_changed,cp,.unstrip) |
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index a6edd356cd08..7ebd955460d9 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | /* Somebody depends on this; sigh... */ | 18 | /* Somebody depends on this; sigh... */ |
| 19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
| 20 | #include <linux/io.h> | ||
| 20 | 21 | ||
| 21 | /* Look at Documentation/cachetlb.txt */ | 22 | /* Look at Documentation/cachetlb.txt */ |
| 22 | 23 | ||
| @@ -60,7 +61,6 @@ void microblaze_cache_init(void); | |||
| 60 | #define invalidate_icache() mbc->iin(); | 61 | #define invalidate_icache() mbc->iin(); |
| 61 | #define invalidate_icache_range(start, end) mbc->iinr(start, end); | 62 | #define invalidate_icache_range(start, end) mbc->iinr(start, end); |
| 62 | 63 | ||
| 63 | |||
| 64 | #define flush_icache_user_range(vma, pg, adr, len) flush_icache(); | 64 | #define flush_icache_user_range(vma, pg, adr, len) flush_icache(); |
| 65 | #define flush_icache_page(vma, pg) do { } while (0) | 65 | #define flush_icache_page(vma, pg) do { } while (0) |
| 66 | 66 | ||
| @@ -72,9 +72,15 @@ void microblaze_cache_init(void); | |||
| 72 | #define flush_dcache() mbc->dfl(); | 72 | #define flush_dcache() mbc->dfl(); |
| 73 | #define flush_dcache_range(start, end) mbc->dflr(start, end); | 73 | #define flush_dcache_range(start, end) mbc->dflr(start, end); |
| 74 | 74 | ||
| 75 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | 75 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
| 76 | /* D-cache aliasing problem can't happen - cache is between MMU and ram */ | 76 | /* MS: We have to implement it because of rootfs-jffs2 issue on WB */ |
| 77 | #define flush_dcache_page(page) do { } while (0) | 77 | #define flush_dcache_page(page) \ |
| 78 | do { \ | ||
| 79 | unsigned long addr = (unsigned long) page_address(page); /* virtual */ \ | ||
| 80 | addr = (u32)virt_to_phys((void *)addr); \ | ||
| 81 | flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \ | ||
| 82 | } while (0); | ||
| 83 | |||
| 78 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 84 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 79 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 85 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| 80 | 86 | ||
| @@ -97,8 +103,10 @@ void microblaze_cache_init(void); | |||
| 97 | 103 | ||
| 98 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 104 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
| 99 | do { \ | 105 | do { \ |
| 106 | u32 addr = virt_to_phys(dst); \ | ||
| 107 | invalidate_icache_range((unsigned) (addr), (unsigned) (addr) + (len));\ | ||
| 100 | memcpy((dst), (src), (len)); \ | 108 | memcpy((dst), (src), (len)); \ |
| 101 | flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ | 109 | flush_dcache_range((unsigned) (addr), (unsigned) (addr) + (len));\ |
| 102 | } while (0) | 110 | } while (0) |
| 103 | 111 | ||
| 104 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 112 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 18b3731c8509..507389580709 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h | |||
| @@ -79,12 +79,6 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
| 79 | return ops->dma_supported(dev, mask); | 79 | return ops->dma_supported(dev, mask); |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | #ifdef CONFIG_PCI | ||
| 83 | /* We have our own implementation of pci_set_dma_mask() */ | ||
| 84 | #define HAVE_ARCH_PCI_SET_DMA_MASK | ||
| 85 | |||
| 86 | #endif | ||
| 87 | |||
| 88 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | 82 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
| 89 | { | 83 | { |
| 90 | struct dma_map_ops *ops = get_dma_ops(dev); | 84 | struct dma_map_ops *ops = get_dma_ops(dev); |
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h index 7d4acf2b278e..732caf1be741 100644 --- a/arch/microblaze/include/asm/elf.h +++ b/arch/microblaze/include/asm/elf.h | |||
| @@ -77,7 +77,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |||
| 77 | #define ELF_DATA ELFDATA2MSB | 77 | #define ELF_DATA ELFDATA2MSB |
| 78 | #endif | 78 | #endif |
| 79 | 79 | ||
| 80 | #define ELF_EXEC_PAGESIZE 4096 | 80 | #define ELF_EXEC_PAGESIZE PAGE_SIZE |
| 81 | 81 | ||
| 82 | 82 | ||
| 83 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ | 83 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ |
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h index 4c7b5d037c88..6479097b802b 100644 --- a/arch/microblaze/include/asm/exceptions.h +++ b/arch/microblaze/include/asm/exceptions.h | |||
| @@ -14,6 +14,11 @@ | |||
| 14 | #define _ASM_MICROBLAZE_EXCEPTIONS_H | 14 | #define _ASM_MICROBLAZE_EXCEPTIONS_H |
| 15 | 15 | ||
| 16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
| 17 | |||
| 18 | #ifndef CONFIG_MMU | ||
| 19 | #define EX_HANDLER_STACK_SIZ (4*19) | ||
| 20 | #endif | ||
| 21 | |||
| 17 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
| 18 | 23 | ||
| 19 | /* Macros to enable and disable HW exceptions in the MSR */ | 24 | /* Macros to enable and disable HW exceptions in the MSR */ |
| @@ -64,22 +69,6 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
| 64 | void die(const char *str, struct pt_regs *fp, long err); | 69 | void die(const char *str, struct pt_regs *fp, long err); |
| 65 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); | 70 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); |
| 66 | 71 | ||
| 67 | #if defined(CONFIG_KGDB) | ||
| 68 | void (*debugger)(struct pt_regs *regs); | ||
| 69 | int (*debugger_bpt)(struct pt_regs *regs); | ||
| 70 | int (*debugger_sstep)(struct pt_regs *regs); | ||
| 71 | int (*debugger_iabr_match)(struct pt_regs *regs); | ||
| 72 | int (*debugger_dabr_match)(struct pt_regs *regs); | ||
| 73 | void (*debugger_fault_handler)(struct pt_regs *regs); | ||
| 74 | #else | ||
| 75 | #define debugger(regs) do { } while (0) | ||
| 76 | #define debugger_bpt(regs) 0 | ||
| 77 | #define debugger_sstep(regs) 0 | ||
| 78 | #define debugger_iabr_match(regs) 0 | ||
| 79 | #define debugger_dabr_match(regs) 0 | ||
| 80 | #define debugger_fault_handler ((void (*)(struct pt_regs *))0) | ||
| 81 | #endif | ||
| 82 | |||
| 83 | #endif /*__ASSEMBLY__ */ | 72 | #endif /*__ASSEMBLY__ */ |
| 84 | #endif /* __KERNEL__ */ | 73 | #endif /* __KERNEL__ */ |
| 85 | #endif /* _ASM_MICROBLAZE_EXCEPTIONS_H */ | 74 | #endif /* _ASM_MICROBLAZE_EXCEPTIONS_H */ |
diff --git a/arch/microblaze/include/asm/kgdb.h b/arch/microblaze/include/asm/kgdb.h new file mode 100644 index 000000000000..78b17d40b235 --- /dev/null +++ b/arch/microblaze/include/asm/kgdb.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #ifdef __KERNEL__ | ||
| 2 | #ifndef __MICROBLAZE_KGDB_H__ | ||
| 3 | #define __MICROBLAZE_KGDB_H__ | ||
| 4 | |||
| 5 | #ifndef __ASSEMBLY__ | ||
| 6 | |||
| 7 | #define CACHE_FLUSH_IS_SAFE 1 | ||
| 8 | #define BUFMAX 2048 | ||
| 9 | |||
| 10 | /* | ||
| 11 | * 32 32-bit general purpose registers (r0-r31) | ||
| 12 | * 6 32-bit special registers (pc, msr, ear, esr, fsr, btr) | ||
| 13 | * 12 32-bit PVR | ||
| 14 | * 7 32-bit MMU Regs (redr, rpid, rzpr, rtlbx, rtlbsx, rtlblo, rtlbhi) | ||
| 15 | * ------ | ||
| 16 | * 57 registers | ||
| 17 | */ | ||
| 18 | #define NUMREGBYTES (57 * 4) | ||
| 19 | |||
| 20 | #define BREAK_INSTR_SIZE 4 | ||
| 21 | static inline void arch_kgdb_breakpoint(void) | ||
| 22 | { | ||
| 23 | __asm__ __volatile__("brki r16, 0x18;"); | ||
| 24 | } | ||
| 25 | |||
| 26 | #endif /* __ASSEMBLY__ */ | ||
| 27 | #endif /* __MICROBLAZE_KGDB_H__ */ | ||
| 28 | #endif /* __KERNEL__ */ | ||
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 464ff32bee3d..c12c6dfafd9f 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
| @@ -23,8 +23,16 @@ | |||
| 23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
| 24 | 24 | ||
| 25 | /* PAGE_SHIFT determines the page size */ | 25 | /* PAGE_SHIFT determines the page size */ |
| 26 | #define PAGE_SHIFT (12) | 26 | #if defined(CONFIG_MICROBLAZE_32K_PAGES) |
| 27 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | 27 | #define PAGE_SHIFT 15 |
| 28 | #elif defined(CONFIG_MICROBLAZE_16K_PAGES) | ||
| 29 | #define PAGE_SHIFT 14 | ||
| 30 | #elif defined(CONFIG_MICROBLAZE_8K_PAGES) | ||
| 31 | #define PAGE_SHIFT 13 | ||
| 32 | #else | ||
| 33 | #define PAGE_SHIFT 12 | ||
| 34 | #endif | ||
| 35 | #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) | ||
| 28 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 36 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 29 | 37 | ||
| 30 | #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR)) | 38 | #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR)) |
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h index e38abc7714b6..9578666e98ba 100644 --- a/arch/microblaze/include/asm/pvr.h +++ b/arch/microblaze/include/asm/pvr.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #define PVR_MSR_BIT 0x400 | 16 | #define PVR_MSR_BIT 0x400 |
| 17 | 17 | ||
| 18 | struct pvr_s { | 18 | struct pvr_s { |
| 19 | unsigned pvr[16]; | 19 | unsigned pvr[12]; |
| 20 | }; | 20 | }; |
| 21 | 21 | ||
| 22 | /* The following taken from Xilinx's standalone BSP pvr.h */ | 22 | /* The following taken from Xilinx's standalone BSP pvr.h */ |
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 7f31394985e0..782b5c89248e 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h | |||
| @@ -28,8 +28,6 @@ void disable_early_printk(void); | |||
| 28 | void heartbeat(void); | 28 | void heartbeat(void); |
| 29 | void setup_heartbeat(void); | 29 | void setup_heartbeat(void); |
| 30 | 30 | ||
| 31 | unsigned long long sched_clock(void); | ||
| 32 | |||
| 33 | # ifdef CONFIG_MMU | 31 | # ifdef CONFIG_MMU |
| 34 | extern void mmu_reset(void); | 32 | extern void mmu_reset(void); |
| 35 | extern void early_console_reg_tlb_alloc(unsigned int addr); | 33 | extern void early_console_reg_tlb_alloc(unsigned int addr); |
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h index 81e1f7d5b4cb..e6a2284571dc 100644 --- a/arch/microblaze/include/asm/system.h +++ b/arch/microblaze/include/asm/system.h | |||
| @@ -45,7 +45,6 @@ extern struct task_struct *_switch_to(struct thread_info *prev, | |||
| 45 | #define smp_rmb() rmb() | 45 | #define smp_rmb() rmb() |
| 46 | #define smp_wmb() wmb() | 46 | #define smp_wmb() wmb() |
| 47 | 47 | ||
| 48 | void show_trace(struct task_struct *task, unsigned long *stack); | ||
| 49 | void __bad_xchg(volatile void *ptr, int size); | 48 | void __bad_xchg(volatile void *ptr, int size); |
| 50 | 49 | ||
| 51 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 50 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 26460d15b338..d840f4a2d3c9 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
| @@ -359,7 +359,7 @@ extern long __user_bad(void); | |||
| 359 | __copy_tofrom_user((__force void __user *)(to), \ | 359 | __copy_tofrom_user((__force void __user *)(to), \ |
| 360 | (void __user *)(from), (n)) | 360 | (void __user *)(from), (n)) |
| 361 | #define __copy_from_user_inatomic(to, from, n) \ | 361 | #define __copy_from_user_inatomic(to, from, n) \ |
| 362 | copy_from_user((to), (from), (n)) | 362 | __copy_from_user((to), (from), (n)) |
| 363 | 363 | ||
| 364 | static inline long copy_from_user(void *to, | 364 | static inline long copy_from_user(void *to, |
| 365 | const void __user *from, unsigned long n) | 365 | const void __user *from, unsigned long n) |
| @@ -373,7 +373,7 @@ static inline long copy_from_user(void *to, | |||
| 373 | #define __copy_to_user(to, from, n) \ | 373 | #define __copy_to_user(to, from, n) \ |
| 374 | __copy_tofrom_user((void __user *)(to), \ | 374 | __copy_tofrom_user((void __user *)(to), \ |
| 375 | (__force const void __user *)(from), (n)) | 375 | (__force const void __user *)(from), (n)) |
| 376 | #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) | 376 | #define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n)) |
| 377 | 377 | ||
| 378 | static inline long copy_to_user(void __user *to, | 378 | static inline long copy_to_user(void __user *to, |
| 379 | const void *from, unsigned long n) | 379 | const void *from, unsigned long n) |
diff --git a/arch/microblaze/include/asm/unwind.h b/arch/microblaze/include/asm/unwind.h new file mode 100644 index 000000000000..d248b7de4b13 --- /dev/null +++ b/arch/microblaze/include/asm/unwind.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Backtrace support for Microblaze | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 Digital Design Corporation | ||
| 5 | * | ||
| 6 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 7 | * License. See the file "COPYING" in the main directory of this archive | ||
| 8 | * for more details. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __MICROBLAZE_UNWIND_H | ||
| 12 | #define __MICROBLAZE_UNWIND_H | ||
| 13 | |||
| 14 | struct stack_trace; | ||
| 15 | |||
| 16 | struct trap_handler_info { | ||
| 17 | unsigned long start_addr; | ||
| 18 | unsigned long end_addr; | ||
| 19 | const char *trap_name; | ||
| 20 | }; | ||
| 21 | extern struct trap_handler_info microblaze_trap_handlers; | ||
| 22 | |||
| 23 | extern const char _hw_exception_handler; | ||
| 24 | extern const char ex_handler_unhandled; | ||
| 25 | |||
| 26 | void microblaze_unwind(struct task_struct *task, struct stack_trace *trace); | ||
| 27 | |||
| 28 | #endif /* __MICROBLAZE_UNWIND_H */ | ||
| 29 | |||
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index e51bc1520825..5eecc9f1fbd9 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile | |||
| @@ -17,7 +17,7 @@ extra-y := head.o vmlinux.lds | |||
| 17 | obj-y += dma.o exceptions.o \ | 17 | obj-y += dma.o exceptions.o \ |
| 18 | hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ | 18 | hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ |
| 19 | of_platform.o process.o prom.o prom_parse.o ptrace.o \ | 19 | of_platform.o process.o prom.o prom_parse.o ptrace.o \ |
| 20 | setup.o signal.o sys_microblaze.o timer.o traps.o reset.o | 20 | reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o |
| 21 | 21 | ||
| 22 | obj-y += cpu/ | 22 | obj-y += cpu/ |
| 23 | 23 | ||
| @@ -28,5 +28,6 @@ obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o | |||
| 28 | obj-$(CONFIG_MMU) += misc.o | 28 | obj-$(CONFIG_MMU) += misc.o |
| 29 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 29 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
| 30 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o | 30 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o |
| 31 | obj-$(CONFIG_KGDB) += kgdb.o | ||
| 31 | 32 | ||
| 32 | obj-y += entry$(MMU).o | 33 | obj-y += entry$(MMU).o |
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c index 4216eb1eaa32..7086e3564281 100644 --- a/arch/microblaze/kernel/cpu/mb.c +++ b/arch/microblaze/kernel/cpu/mb.c | |||
| @@ -126,6 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 126 | cpuinfo.pvr_user1, | 126 | cpuinfo.pvr_user1, |
| 127 | cpuinfo.pvr_user2); | 127 | cpuinfo.pvr_user2); |
| 128 | 128 | ||
| 129 | count += seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE); | ||
| 129 | return 0; | 130 | return 0; |
| 130 | } | 131 | } |
| 131 | 132 | ||
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index 8cc18cd2cce6..ca84368570b6 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S | |||
| @@ -588,3 +588,31 @@ sys_rt_sigsuspend_wrapper: | |||
| 588 | #include "syscall_table.S" | 588 | #include "syscall_table.S" |
| 589 | 589 | ||
| 590 | syscall_table_size=(.-sys_call_table) | 590 | syscall_table_size=(.-sys_call_table) |
| 591 | |||
| 592 | type_SYSCALL: | ||
| 593 | .ascii "SYSCALL\0" | ||
| 594 | type_IRQ: | ||
| 595 | .ascii "IRQ\0" | ||
| 596 | type_IRQ_PREEMPT: | ||
| 597 | .ascii "IRQ (PREEMPTED)\0" | ||
| 598 | type_SYSCALL_PREEMPT: | ||
| 599 | .ascii " SYSCALL (PREEMPTED)\0" | ||
| 600 | |||
| 601 | /* | ||
| 602 | * Trap decoding for stack unwinder | ||
| 603 | * Tuples are (start addr, end addr, string) | ||
| 604 | * If return address lies on [start addr, end addr], | ||
| 605 | * unwinder displays 'string' | ||
| 606 | */ | ||
| 607 | |||
| 608 | .align 4 | ||
| 609 | .global microblaze_trap_handlers | ||
| 610 | microblaze_trap_handlers: | ||
| 611 | /* Exact matches come first */ | ||
| 612 | .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL | ||
| 613 | .word ret_from_intr; .word ret_from_intr ; .word type_IRQ | ||
| 614 | /* Fuzzy matches go here */ | ||
| 615 | .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT | ||
| 616 | .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT | ||
| 617 | /* End of table */ | ||
| 618 | .word 0 ; .word 0 ; .word 0 | ||
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index c0ede25c5b99..304882e56459 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
| @@ -48,128 +48,107 @@ | |||
| 48 | */ | 48 | */ |
| 49 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 49 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR |
| 50 | .macro clear_bip | 50 | .macro clear_bip |
| 51 | msrclr r11, MSR_BIP | 51 | msrclr r0, MSR_BIP |
| 52 | nop | ||
| 53 | .endm | 52 | .endm |
| 54 | 53 | ||
| 55 | .macro set_bip | 54 | .macro set_bip |
| 56 | msrset r11, MSR_BIP | 55 | msrset r0, MSR_BIP |
| 57 | nop | ||
| 58 | .endm | 56 | .endm |
| 59 | 57 | ||
| 60 | .macro clear_eip | 58 | .macro clear_eip |
| 61 | msrclr r11, MSR_EIP | 59 | msrclr r0, MSR_EIP |
| 62 | nop | ||
| 63 | .endm | 60 | .endm |
| 64 | 61 | ||
| 65 | .macro set_ee | 62 | .macro set_ee |
| 66 | msrset r11, MSR_EE | 63 | msrset r0, MSR_EE |
| 67 | nop | ||
| 68 | .endm | 64 | .endm |
| 69 | 65 | ||
| 70 | .macro disable_irq | 66 | .macro disable_irq |
| 71 | msrclr r11, MSR_IE | 67 | msrclr r0, MSR_IE |
| 72 | nop | ||
| 73 | .endm | 68 | .endm |
| 74 | 69 | ||
| 75 | .macro enable_irq | 70 | .macro enable_irq |
| 76 | msrset r11, MSR_IE | 71 | msrset r0, MSR_IE |
| 77 | nop | ||
| 78 | .endm | 72 | .endm |
| 79 | 73 | ||
| 80 | .macro set_ums | 74 | .macro set_ums |
| 81 | msrset r11, MSR_UMS | 75 | msrset r0, MSR_UMS |
| 82 | nop | 76 | msrclr r0, MSR_VMS |
| 83 | msrclr r11, MSR_VMS | ||
| 84 | nop | ||
| 85 | .endm | 77 | .endm |
| 86 | 78 | ||
| 87 | .macro set_vms | 79 | .macro set_vms |
| 88 | msrclr r11, MSR_UMS | 80 | msrclr r0, MSR_UMS |
| 89 | nop | 81 | msrset r0, MSR_VMS |
| 90 | msrset r11, MSR_VMS | 82 | .endm |
| 91 | nop | 83 | |
| 84 | .macro clear_ums | ||
| 85 | msrclr r0, MSR_UMS | ||
| 92 | .endm | 86 | .endm |
| 93 | 87 | ||
| 94 | .macro clear_vms_ums | 88 | .macro clear_vms_ums |
| 95 | msrclr r11, MSR_VMS | 89 | msrclr r0, MSR_VMS | MSR_UMS |
| 96 | nop | ||
| 97 | msrclr r11, MSR_UMS | ||
| 98 | nop | ||
| 99 | .endm | 90 | .endm |
| 100 | #else | 91 | #else |
| 101 | .macro clear_bip | 92 | .macro clear_bip |
| 102 | mfs r11, rmsr | 93 | mfs r11, rmsr |
| 103 | nop | ||
| 104 | andi r11, r11, ~MSR_BIP | 94 | andi r11, r11, ~MSR_BIP |
| 105 | mts rmsr, r11 | 95 | mts rmsr, r11 |
| 106 | nop | ||
| 107 | .endm | 96 | .endm |
| 108 | 97 | ||
| 109 | .macro set_bip | 98 | .macro set_bip |
| 110 | mfs r11, rmsr | 99 | mfs r11, rmsr |
| 111 | nop | ||
| 112 | ori r11, r11, MSR_BIP | 100 | ori r11, r11, MSR_BIP |
| 113 | mts rmsr, r11 | 101 | mts rmsr, r11 |
| 114 | nop | ||
| 115 | .endm | 102 | .endm |
| 116 | 103 | ||
| 117 | .macro clear_eip | 104 | .macro clear_eip |
| 118 | mfs r11, rmsr | 105 | mfs r11, rmsr |
| 119 | nop | ||
| 120 | andi r11, r11, ~MSR_EIP | 106 | andi r11, r11, ~MSR_EIP |
| 121 | mts rmsr, r11 | 107 | mts rmsr, r11 |
| 122 | nop | ||
| 123 | .endm | 108 | .endm |
| 124 | 109 | ||
| 125 | .macro set_ee | 110 | .macro set_ee |
| 126 | mfs r11, rmsr | 111 | mfs r11, rmsr |
| 127 | nop | ||
| 128 | ori r11, r11, MSR_EE | 112 | ori r11, r11, MSR_EE |
| 129 | mts rmsr, r11 | 113 | mts rmsr, r11 |
| 130 | nop | ||
| 131 | .endm | 114 | .endm |
| 132 | 115 | ||
| 133 | .macro disable_irq | 116 | .macro disable_irq |
| 134 | mfs r11, rmsr | 117 | mfs r11, rmsr |
| 135 | nop | ||
| 136 | andi r11, r11, ~MSR_IE | 118 | andi r11, r11, ~MSR_IE |
| 137 | mts rmsr, r11 | 119 | mts rmsr, r11 |
| 138 | nop | ||
| 139 | .endm | 120 | .endm |
| 140 | 121 | ||
| 141 | .macro enable_irq | 122 | .macro enable_irq |
| 142 | mfs r11, rmsr | 123 | mfs r11, rmsr |
| 143 | nop | ||
| 144 | ori r11, r11, MSR_IE | 124 | ori r11, r11, MSR_IE |
| 145 | mts rmsr, r11 | 125 | mts rmsr, r11 |
| 146 | nop | ||
| 147 | .endm | 126 | .endm |
| 148 | 127 | ||
| 149 | .macro set_ums | 128 | .macro set_ums |
| 150 | mfs r11, rmsr | 129 | mfs r11, rmsr |
| 151 | nop | ||
| 152 | ori r11, r11, MSR_VMS | 130 | ori r11, r11, MSR_VMS |
| 153 | andni r11, r11, MSR_UMS | 131 | andni r11, r11, MSR_UMS |
| 154 | mts rmsr, r11 | 132 | mts rmsr, r11 |
| 155 | nop | ||
| 156 | .endm | 133 | .endm |
| 157 | 134 | ||
| 158 | .macro set_vms | 135 | .macro set_vms |
| 159 | mfs r11, rmsr | 136 | mfs r11, rmsr |
| 160 | nop | ||
| 161 | ori r11, r11, MSR_VMS | 137 | ori r11, r11, MSR_VMS |
| 162 | andni r11, r11, MSR_UMS | 138 | andni r11, r11, MSR_UMS |
| 163 | mts rmsr, r11 | 139 | mts rmsr, r11 |
| 164 | nop | 140 | .endm |
| 141 | |||
| 142 | .macro clear_ums | ||
| 143 | mfs r11, rmsr | ||
| 144 | andni r11, r11, MSR_UMS | ||
| 145 | mts rmsr,r11 | ||
| 165 | .endm | 146 | .endm |
| 166 | 147 | ||
| 167 | .macro clear_vms_ums | 148 | .macro clear_vms_ums |
| 168 | mfs r11, rmsr | 149 | mfs r11, rmsr |
| 169 | nop | ||
| 170 | andni r11, r11, (MSR_VMS|MSR_UMS) | 150 | andni r11, r11, (MSR_VMS|MSR_UMS) |
| 171 | mts rmsr,r11 | 151 | mts rmsr,r11 |
| 172 | nop | ||
| 173 | .endm | 152 | .endm |
| 174 | #endif | 153 | #endif |
| 175 | 154 | ||
| @@ -180,18 +159,22 @@ | |||
| 180 | 159 | ||
| 181 | /* turn on virtual protected mode save */ | 160 | /* turn on virtual protected mode save */ |
| 182 | #define VM_ON \ | 161 | #define VM_ON \ |
| 183 | set_ums; \ | 162 | set_ums; \ |
| 184 | rted r0, 2f; \ | 163 | rted r0, 2f; \ |
| 185 | 2: nop; | 164 | nop; \ |
| 165 | 2: | ||
| 186 | 166 | ||
| 187 | /* turn off virtual protected mode save and user mode save*/ | 167 | /* turn off virtual protected mode save and user mode save*/ |
| 188 | #define VM_OFF \ | 168 | #define VM_OFF \ |
| 189 | clear_vms_ums; \ | 169 | clear_vms_ums; \ |
| 190 | rted r0, TOPHYS(1f); \ | 170 | rted r0, TOPHYS(1f); \ |
| 191 | 1: nop; | 171 | nop; \ |
| 172 | 1: | ||
| 192 | 173 | ||
| 193 | #define SAVE_REGS \ | 174 | #define SAVE_REGS \ |
| 194 | swi r2, r1, PTO+PT_R2; /* Save SDA */ \ | 175 | swi r2, r1, PTO+PT_R2; /* Save SDA */ \ |
| 176 | swi r3, r1, PTO+PT_R3; \ | ||
| 177 | swi r4, r1, PTO+PT_R4; \ | ||
| 195 | swi r5, r1, PTO+PT_R5; \ | 178 | swi r5, r1, PTO+PT_R5; \ |
| 196 | swi r6, r1, PTO+PT_R6; \ | 179 | swi r6, r1, PTO+PT_R6; \ |
| 197 | swi r7, r1, PTO+PT_R7; \ | 180 | swi r7, r1, PTO+PT_R7; \ |
| @@ -218,14 +201,14 @@ | |||
| 218 | swi r30, r1, PTO+PT_R30; \ | 201 | swi r30, r1, PTO+PT_R30; \ |
| 219 | swi r31, r1, PTO+PT_R31; /* Save current task reg */ \ | 202 | swi r31, r1, PTO+PT_R31; /* Save current task reg */ \ |
| 220 | mfs r11, rmsr; /* save MSR */ \ | 203 | mfs r11, rmsr; /* save MSR */ \ |
| 221 | nop; \ | ||
| 222 | swi r11, r1, PTO+PT_MSR; | 204 | swi r11, r1, PTO+PT_MSR; |
| 223 | 205 | ||
| 224 | #define RESTORE_REGS \ | 206 | #define RESTORE_REGS \ |
| 225 | lwi r11, r1, PTO+PT_MSR; \ | 207 | lwi r11, r1, PTO+PT_MSR; \ |
| 226 | mts rmsr , r11; \ | 208 | mts rmsr , r11; \ |
| 227 | nop; \ | ||
| 228 | lwi r2, r1, PTO+PT_R2; /* restore SDA */ \ | 209 | lwi r2, r1, PTO+PT_R2; /* restore SDA */ \ |
| 210 | lwi r3, r1, PTO+PT_R3; \ | ||
| 211 | lwi r4, r1, PTO+PT_R4; \ | ||
| 229 | lwi r5, r1, PTO+PT_R5; \ | 212 | lwi r5, r1, PTO+PT_R5; \ |
| 230 | lwi r6, r1, PTO+PT_R6; \ | 213 | lwi r6, r1, PTO+PT_R6; \ |
| 231 | lwi r7, r1, PTO+PT_R7; \ | 214 | lwi r7, r1, PTO+PT_R7; \ |
| @@ -252,6 +235,39 @@ | |||
| 252 | lwi r30, r1, PTO+PT_R30; \ | 235 | lwi r30, r1, PTO+PT_R30; \ |
| 253 | lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */ | 236 | lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */ |
| 254 | 237 | ||
| 238 | #define SAVE_STATE \ | ||
| 239 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ | ||
| 240 | /* See if already in kernel mode.*/ \ | ||
| 241 | mfs r1, rmsr; \ | ||
| 242 | andi r1, r1, MSR_UMS; \ | ||
| 243 | bnei r1, 1f; \ | ||
| 244 | /* Kernel-mode state save. */ \ | ||
| 245 | /* Reload kernel stack-ptr. */ \ | ||
| 246 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | ||
| 247 | /* FIXME: I can add these two lines to one */ \ | ||
| 248 | /* tophys(r1,r1); */ \ | ||
| 249 | /* addik r1, r1, -STATE_SAVE_SIZE; */ \ | ||
| 250 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ | ||
| 251 | SAVE_REGS \ | ||
| 252 | brid 2f; \ | ||
| 253 | swi r1, r1, PTO+PT_MODE; \ | ||
| 254 | 1: /* User-mode state save. */ \ | ||
| 255 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | ||
| 256 | tophys(r1,r1); \ | ||
| 257 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ | ||
| 258 | /* MS these three instructions can be added to one */ \ | ||
| 259 | /* addik r1, r1, THREAD_SIZE; */ \ | ||
| 260 | /* tophys(r1,r1); */ \ | ||
| 261 | /* addik r1, r1, -STATE_SAVE_SIZE; */ \ | ||
| 262 | addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ | ||
| 263 | SAVE_REGS \ | ||
| 264 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | ||
| 265 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | ||
| 266 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \ | ||
| 267 | /* MS: I am clearing UMS even in case when I come from kernel space */ \ | ||
| 268 | clear_ums; \ | ||
| 269 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | ||
| 270 | |||
| 255 | .text | 271 | .text |
| 256 | 272 | ||
| 257 | /* | 273 | /* |
| @@ -267,45 +283,23 @@ | |||
| 267 | * are masked. This is nice, means we don't have to CLI before state save | 283 | * are masked. This is nice, means we don't have to CLI before state save |
| 268 | */ | 284 | */ |
| 269 | C_ENTRY(_user_exception): | 285 | C_ENTRY(_user_exception): |
| 270 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
| 271 | addi r14, r14, 4 /* return address is 4 byte after call */ | 286 | addi r14, r14, 4 /* return address is 4 byte after call */ |
| 272 | swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ | 287 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ |
| 273 | |||
| 274 | lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/ | ||
| 275 | beqi r11, 1f; /* Jump ahead if coming from user */ | ||
| 276 | /* Kernel-mode state save. */ | ||
| 277 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ | ||
| 278 | tophys(r1,r11); | ||
| 279 | swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ | ||
| 280 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
| 281 | |||
| 282 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | ||
| 283 | SAVE_REGS | ||
| 284 | |||
| 285 | addi r11, r0, 1; /* Was in kernel-mode. */ | ||
| 286 | swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */ | ||
| 287 | brid 2f; | ||
| 288 | nop; /* Fill delay slot */ | ||
| 289 | 288 | ||
| 290 | /* User-mode state save. */ | ||
| 291 | 1: | ||
| 292 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
| 293 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | 289 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
| 294 | tophys(r1,r1); | 290 | tophys(r1,r1); |
| 295 | lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ | 291 | lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ |
| 296 | /* calculate kernel stack pointer from task struct 8k */ | 292 | /* MS these three instructions can be added to one */ |
| 297 | addik r1, r1, THREAD_SIZE; | 293 | /* addik r1, r1, THREAD_SIZE; */ |
| 298 | tophys(r1,r1); | 294 | /* tophys(r1,r1); */ |
| 299 | 295 | /* addik r1, r1, -STATE_SAVE_SIZE; */ | |
| 300 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | 296 | addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; |
| 301 | SAVE_REGS | 297 | SAVE_REGS |
| 302 | 298 | ||
| 303 | swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ | ||
| 304 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | 299 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
| 305 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 300 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
| 306 | addi r11, r0, 1; | 301 | clear_ums; |
| 307 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | 302 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
| 308 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | ||
| 309 | /* Save away the syscall number. */ | 303 | /* Save away the syscall number. */ |
| 310 | swi r12, r1, PTO+PT_R0; | 304 | swi r12, r1, PTO+PT_R0; |
| 311 | tovirt(r1,r1) | 305 | tovirt(r1,r1) |
| @@ -316,10 +310,8 @@ C_ENTRY(_user_exception): | |||
| 316 | * register should point to the location where | 310 | * register should point to the location where |
| 317 | * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ | 311 | * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ |
| 318 | 312 | ||
| 319 | # Step into virtual mode. | 313 | /* Step into virtual mode */ |
| 320 | set_vms; | 314 | rtbd r0, 3f |
| 321 | addik r11, r0, 3f | ||
| 322 | rtid r11, 0 | ||
| 323 | nop | 315 | nop |
| 324 | 3: | 316 | 3: |
| 325 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ | 317 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
| @@ -363,24 +355,17 @@ C_ENTRY(_user_exception): | |||
| 363 | # Find and jump into the syscall handler. | 355 | # Find and jump into the syscall handler. |
| 364 | lwi r12, r12, sys_call_table | 356 | lwi r12, r12, sys_call_table |
| 365 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 357 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
| 366 | la r15, r0, ret_from_trap-8 | 358 | addi r15, r0, ret_from_trap-8 |
| 367 | bra r12 | 359 | bra r12 |
| 368 | 360 | ||
| 369 | /* The syscall number is invalid, return an error. */ | 361 | /* The syscall number is invalid, return an error. */ |
| 370 | 5: | 362 | 5: |
| 363 | rtsd r15, 8; /* looks like a normal subroutine return */ | ||
| 371 | addi r3, r0, -ENOSYS; | 364 | addi r3, r0, -ENOSYS; |
| 372 | rtsd r15,8; /* looks like a normal subroutine return */ | ||
| 373 | or r0, r0, r0 | ||
| 374 | |||
| 375 | 365 | ||
| 376 | /* Entry point used to return from a syscall/trap */ | 366 | /* Entry point used to return from a syscall/trap */ |
| 377 | /* We re-enable BIP bit before state restore */ | 367 | /* We re-enable BIP bit before state restore */ |
| 378 | C_ENTRY(ret_from_trap): | 368 | C_ENTRY(ret_from_trap): |
| 379 | set_bip; /* Ints masked for state restore*/ | ||
| 380 | lwi r11, r1, PTO+PT_MODE; | ||
| 381 | /* See if returning to kernel mode, if so, skip resched &c. */ | ||
| 382 | bnei r11, 2f; | ||
| 383 | |||
| 384 | swi r3, r1, PTO + PT_R3 | 369 | swi r3, r1, PTO + PT_R3 |
| 385 | swi r4, r1, PTO + PT_R4 | 370 | swi r4, r1, PTO + PT_R4 |
| 386 | 371 | ||
| @@ -413,32 +398,19 @@ C_ENTRY(ret_from_trap): | |||
| 413 | andi r11, r11, _TIF_SIGPENDING; | 398 | andi r11, r11, _TIF_SIGPENDING; |
| 414 | beqi r11, 1f; /* Signals to handle, handle them */ | 399 | beqi r11, 1f; /* Signals to handle, handle them */ |
| 415 | 400 | ||
| 416 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 401 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
| 417 | addi r7, r0, 1; /* Arg 3: int in_syscall */ | 402 | addi r7, r0, 1; /* Arg 3: int in_syscall */ |
| 418 | bralid r15, do_signal; /* Handle any signals */ | 403 | bralid r15, do_signal; /* Handle any signals */ |
| 419 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 404 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
| 420 | 405 | ||
| 421 | /* Finally, return to user state. */ | 406 | /* Finally, return to user state. */ |
| 422 | 1: | 407 | 1: set_bip; /* Ints masked for state restore */ |
| 423 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | ||
| 424 | lwi r4, r1, PTO + PT_R4; | ||
| 425 | |||
| 426 | swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | ||
| 427 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ | 408 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
| 428 | VM_OFF; | 409 | VM_OFF; |
| 429 | tophys(r1,r1); | 410 | tophys(r1,r1); |
| 430 | RESTORE_REGS; | 411 | RESTORE_REGS; |
| 431 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 412 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ |
| 432 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ | 413 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ |
| 433 | bri 6f; | ||
| 434 | |||
| 435 | /* Return to kernel state. */ | ||
| 436 | 2: VM_OFF; | ||
| 437 | tophys(r1,r1); | ||
| 438 | RESTORE_REGS; | ||
| 439 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | ||
| 440 | tovirt(r1,r1); | ||
| 441 | 6: | ||
| 442 | TRAP_return: /* Make global symbol for debugging */ | 414 | TRAP_return: /* Make global symbol for debugging */ |
| 443 | rtbd r14, 0; /* Instructions to return from an IRQ */ | 415 | rtbd r14, 0; /* Instructions to return from an IRQ */ |
| 444 | nop; | 416 | nop; |
| @@ -450,12 +422,11 @@ TRAP_return: /* Make global symbol for debugging */ | |||
| 450 | C_ENTRY(sys_fork_wrapper): | 422 | C_ENTRY(sys_fork_wrapper): |
| 451 | addi r5, r0, SIGCHLD /* Arg 0: flags */ | 423 | addi r5, r0, SIGCHLD /* Arg 0: flags */ |
| 452 | lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */ | 424 | lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */ |
| 453 | la r7, r1, PTO /* Arg 2: parent context */ | 425 | addik r7, r1, PTO /* Arg 2: parent context */ |
| 454 | add r8. r0, r0 /* Arg 3: (unused) */ | 426 | add r8. r0, r0 /* Arg 3: (unused) */ |
| 455 | add r9, r0, r0; /* Arg 4: (unused) */ | 427 | add r9, r0, r0; /* Arg 4: (unused) */ |
| 456 | add r10, r0, r0; /* Arg 5: (unused) */ | ||
| 457 | brid do_fork /* Do real work (tail-call) */ | 428 | brid do_fork /* Do real work (tail-call) */ |
| 458 | nop; | 429 | add r10, r0, r0; /* Arg 5: (unused) */ |
| 459 | 430 | ||
| 460 | /* This the initial entry point for a new child thread, with an appropriate | 431 | /* This the initial entry point for a new child thread, with an appropriate |
| 461 | stack in place that makes it look the the child is in the middle of an | 432 | stack in place that makes it look the the child is in the middle of an |
| @@ -466,35 +437,31 @@ C_ENTRY(ret_from_fork): | |||
| 466 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ | 437 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ |
| 467 | add r3, r5, r0; /* switch_thread returns the prev task */ | 438 | add r3, r5, r0; /* switch_thread returns the prev task */ |
| 468 | /* ( in the delay slot ) */ | 439 | /* ( in the delay slot ) */ |
| 469 | add r3, r0, r0; /* Child's fork call should return 0. */ | ||
| 470 | brid ret_from_trap; /* Do normal trap return */ | 440 | brid ret_from_trap; /* Do normal trap return */ |
| 471 | nop; | 441 | add r3, r0, r0; /* Child's fork call should return 0. */ |
| 472 | 442 | ||
| 473 | C_ENTRY(sys_vfork): | 443 | C_ENTRY(sys_vfork): |
| 474 | brid microblaze_vfork /* Do real work (tail-call) */ | 444 | brid microblaze_vfork /* Do real work (tail-call) */ |
| 475 | la r5, r1, PTO | 445 | addik r5, r1, PTO |
| 476 | 446 | ||
| 477 | C_ENTRY(sys_clone): | 447 | C_ENTRY(sys_clone): |
| 478 | bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ | 448 | bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ |
| 479 | lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */ | 449 | lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */ |
| 480 | 1: la r7, r1, PTO; /* Arg 2: parent context */ | 450 | 1: addik r7, r1, PTO; /* Arg 2: parent context */ |
| 481 | add r8, r0, r0; /* Arg 3: (unused) */ | 451 | add r8, r0, r0; /* Arg 3: (unused) */ |
| 482 | add r9, r0, r0; /* Arg 4: (unused) */ | 452 | add r9, r0, r0; /* Arg 4: (unused) */ |
| 483 | add r10, r0, r0; /* Arg 5: (unused) */ | ||
| 484 | brid do_fork /* Do real work (tail-call) */ | 453 | brid do_fork /* Do real work (tail-call) */ |
| 485 | nop; | 454 | add r10, r0, r0; /* Arg 5: (unused) */ |
| 486 | 455 | ||
| 487 | C_ENTRY(sys_execve): | 456 | C_ENTRY(sys_execve): |
| 488 | la r8, r1, PTO; /* add user context as 4th arg */ | ||
| 489 | brid microblaze_execve; /* Do real work (tail-call).*/ | 457 | brid microblaze_execve; /* Do real work (tail-call).*/ |
| 490 | nop; | 458 | addik r8, r1, PTO; /* add user context as 4th arg */ |
| 491 | 459 | ||
| 492 | C_ENTRY(sys_rt_sigreturn_wrapper): | 460 | C_ENTRY(sys_rt_sigreturn_wrapper): |
| 493 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | 461 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ |
| 494 | swi r4, r1, PTO+PT_R4; | 462 | swi r4, r1, PTO+PT_R4; |
| 495 | la r5, r1, PTO; /* add user context as 1st arg */ | ||
| 496 | brlid r15, sys_rt_sigreturn /* Do real work */ | 463 | brlid r15, sys_rt_sigreturn /* Do real work */ |
| 497 | nop; | 464 | addik r5, r1, PTO; /* add user context as 1st arg */ |
| 498 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | 465 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ |
| 499 | lwi r4, r1, PTO+PT_R4; | 466 | lwi r4, r1, PTO+PT_R4; |
| 500 | bri ret_from_trap /* fall through will not work here due to align */ | 467 | bri ret_from_trap /* fall through will not work here due to align */ |
| @@ -503,83 +470,23 @@ C_ENTRY(sys_rt_sigreturn_wrapper): | |||
| 503 | /* | 470 | /* |
| 504 | * HW EXCEPTION rutine start | 471 | * HW EXCEPTION rutine start |
| 505 | */ | 472 | */ |
| 506 | |||
| 507 | #define SAVE_STATE \ | ||
| 508 | swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \ | ||
| 509 | set_bip; /*equalize initial state for all possible entries*/\ | ||
| 510 | clear_eip; \ | ||
| 511 | enable_irq; \ | ||
| 512 | set_ee; \ | ||
| 513 | /* See if already in kernel mode.*/ \ | ||
| 514 | lwi r11, r0, TOPHYS(PER_CPU(KM)); \ | ||
| 515 | beqi r11, 1f; /* Jump ahead if coming from user */\ | ||
| 516 | /* Kernel-mode state save. */ \ | ||
| 517 | /* Reload kernel stack-ptr. */ \ | ||
| 518 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | ||
| 519 | tophys(r1,r11); \ | ||
| 520 | swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \ | ||
| 521 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\ | ||
| 522 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\ | ||
| 523 | /* store return registers separately because \ | ||
| 524 | * this macros is use for others exceptions */ \ | ||
| 525 | swi r3, r1, PTO + PT_R3; \ | ||
| 526 | swi r4, r1, PTO + PT_R4; \ | ||
| 527 | SAVE_REGS \ | ||
| 528 | /* PC, before IRQ/trap - this is one instruction above */ \ | ||
| 529 | swi r17, r1, PTO+PT_PC; \ | ||
| 530 | \ | ||
| 531 | addi r11, r0, 1; /* Was in kernel-mode. */ \ | ||
| 532 | swi r11, r1, PTO+PT_MODE; \ | ||
| 533 | brid 2f; \ | ||
| 534 | nop; /* Fill delay slot */ \ | ||
| 535 | 1: /* User-mode state save. */ \ | ||
| 536 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\ | ||
| 537 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | ||
| 538 | tophys(r1,r1); \ | ||
| 539 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ | ||
| 540 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\ | ||
| 541 | tophys(r1,r1); \ | ||
| 542 | \ | ||
| 543 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\ | ||
| 544 | /* store return registers separately because this macros \ | ||
| 545 | * is use for others exceptions */ \ | ||
| 546 | swi r3, r1, PTO + PT_R3; \ | ||
| 547 | swi r4, r1, PTO + PT_R4; \ | ||
| 548 | SAVE_REGS \ | ||
| 549 | /* PC, before IRQ/trap - this is one instruction above FIXME*/ \ | ||
| 550 | swi r17, r1, PTO+PT_PC; \ | ||
| 551 | \ | ||
| 552 | swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \ | ||
| 553 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | ||
| 554 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | ||
| 555 | addi r11, r0, 1; \ | ||
| 556 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ | ||
| 557 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \ | ||
| 558 | /* Save away the syscall number. */ \ | ||
| 559 | swi r0, r1, PTO+PT_R0; \ | ||
| 560 | tovirt(r1,r1) | ||
| 561 | |||
| 562 | C_ENTRY(full_exception_trap): | 473 | C_ENTRY(full_exception_trap): |
| 563 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
| 564 | /* adjust exception address for privileged instruction | 474 | /* adjust exception address for privileged instruction |
| 565 | * for finding where is it */ | 475 | * for finding where is it */ |
| 566 | addik r17, r17, -4 | 476 | addik r17, r17, -4 |
| 567 | SAVE_STATE /* Save registers */ | 477 | SAVE_STATE /* Save registers */ |
| 478 | /* PC, before IRQ/trap - this is one instruction above */ | ||
| 479 | swi r17, r1, PTO+PT_PC; | ||
| 480 | tovirt(r1,r1) | ||
| 568 | /* FIXME this can be store directly in PT_ESR reg. | 481 | /* FIXME this can be store directly in PT_ESR reg. |
| 569 | * I tested it but there is a fault */ | 482 | * I tested it but there is a fault */ |
| 570 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 483 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
| 571 | la r15, r0, ret_from_exc - 8 | 484 | addik r15, r0, ret_from_exc - 8 |
| 572 | la r5, r1, PTO /* parameter struct pt_regs * regs */ | ||
| 573 | mfs r6, resr | 485 | mfs r6, resr |
| 574 | nop | ||
| 575 | mfs r7, rfsr; /* save FSR */ | 486 | mfs r7, rfsr; /* save FSR */ |
| 576 | nop | ||
| 577 | mts rfsr, r0; /* Clear sticky fsr */ | 487 | mts rfsr, r0; /* Clear sticky fsr */ |
| 578 | nop | 488 | rted r0, full_exception |
| 579 | la r12, r0, full_exception | 489 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ |
| 580 | set_vms; | ||
| 581 | rtbd r12, 0; | ||
| 582 | nop; | ||
| 583 | 490 | ||
| 584 | /* | 491 | /* |
| 585 | * Unaligned data trap. | 492 | * Unaligned data trap. |
| @@ -592,19 +499,27 @@ C_ENTRY(full_exception_trap): | |||
| 592 | * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" | 499 | * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" |
| 593 | */ | 500 | */ |
| 594 | C_ENTRY(unaligned_data_trap): | 501 | C_ENTRY(unaligned_data_trap): |
| 595 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | 502 | /* MS: I have to save r11 value and then restore it because |
| 503 | * set_bit, clear_eip, set_ee use r11 as temp register if MSR | ||
| 504 | * instructions are not used. We don't need to do if MSR instructions | ||
| 505 | * are used and they use r0 instead of r11. | ||
| 506 | * I am using ENTRY_SP which should be primary used only for stack | ||
| 507 | * pointer saving. */ | ||
| 508 | swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | ||
| 509 | set_bip; /* equalize initial state for all possible entries */ | ||
| 510 | clear_eip; | ||
| 511 | set_ee; | ||
| 512 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | ||
| 596 | SAVE_STATE /* Save registers.*/ | 513 | SAVE_STATE /* Save registers.*/ |
| 514 | /* PC, before IRQ/trap - this is one instruction above */ | ||
| 515 | swi r17, r1, PTO+PT_PC; | ||
| 516 | tovirt(r1,r1) | ||
| 597 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 517 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
| 598 | la r15, r0, ret_from_exc-8 | 518 | addik r15, r0, ret_from_exc-8 |
| 599 | mfs r3, resr /* ESR */ | 519 | mfs r3, resr /* ESR */ |
| 600 | nop | ||
| 601 | mfs r4, rear /* EAR */ | 520 | mfs r4, rear /* EAR */ |
| 602 | nop | 521 | rtbd r0, _unaligned_data_exception |
| 603 | la r7, r1, PTO /* parameter struct pt_regs * regs */ | 522 | addik r7, r1, PTO /* parameter struct pt_regs * regs */ |
| 604 | la r12, r0, _unaligned_data_exception | ||
| 605 | set_vms; | ||
| 606 | rtbd r12, 0; /* interrupts enabled */ | ||
| 607 | nop; | ||
| 608 | 523 | ||
| 609 | /* | 524 | /* |
| 610 | * Page fault traps. | 525 | * Page fault traps. |
| @@ -625,38 +540,32 @@ C_ENTRY(unaligned_data_trap): | |||
| 625 | */ | 540 | */ |
| 626 | /* data and intruction trap - which is choose is resolved int fault.c */ | 541 | /* data and intruction trap - which is choose is resolved int fault.c */ |
| 627 | C_ENTRY(page_fault_data_trap): | 542 | C_ENTRY(page_fault_data_trap): |
| 628 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
| 629 | SAVE_STATE /* Save registers.*/ | 543 | SAVE_STATE /* Save registers.*/ |
| 544 | /* PC, before IRQ/trap - this is one instruction above */ | ||
| 545 | swi r17, r1, PTO+PT_PC; | ||
| 546 | tovirt(r1,r1) | ||
| 630 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 547 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
| 631 | la r15, r0, ret_from_exc-8 | 548 | addik r15, r0, ret_from_exc-8 |
| 632 | la r5, r1, PTO /* parameter struct pt_regs * regs */ | ||
| 633 | mfs r6, rear /* parameter unsigned long address */ | 549 | mfs r6, rear /* parameter unsigned long address */ |
| 634 | nop | ||
| 635 | mfs r7, resr /* parameter unsigned long error_code */ | 550 | mfs r7, resr /* parameter unsigned long error_code */ |
| 636 | nop | 551 | rted r0, do_page_fault |
| 637 | la r12, r0, do_page_fault | 552 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ |
| 638 | set_vms; | ||
| 639 | rtbd r12, 0; /* interrupts enabled */ | ||
| 640 | nop; | ||
| 641 | 553 | ||
| 642 | C_ENTRY(page_fault_instr_trap): | 554 | C_ENTRY(page_fault_instr_trap): |
| 643 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
| 644 | SAVE_STATE /* Save registers.*/ | 555 | SAVE_STATE /* Save registers.*/ |
| 556 | /* PC, before IRQ/trap - this is one instruction above */ | ||
| 557 | swi r17, r1, PTO+PT_PC; | ||
| 558 | tovirt(r1,r1) | ||
| 645 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 559 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
| 646 | la r15, r0, ret_from_exc-8 | 560 | addik r15, r0, ret_from_exc-8 |
| 647 | la r5, r1, PTO /* parameter struct pt_regs * regs */ | ||
| 648 | mfs r6, rear /* parameter unsigned long address */ | 561 | mfs r6, rear /* parameter unsigned long address */ |
| 649 | nop | ||
| 650 | ori r7, r0, 0 /* parameter unsigned long error_code */ | 562 | ori r7, r0, 0 /* parameter unsigned long error_code */ |
| 651 | la r12, r0, do_page_fault | 563 | rted r0, do_page_fault |
| 652 | set_vms; | 564 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ |
| 653 | rtbd r12, 0; /* interrupts enabled */ | ||
| 654 | nop; | ||
| 655 | 565 | ||
| 656 | /* Entry point used to return from an exception. */ | 566 | /* Entry point used to return from an exception. */ |
| 657 | C_ENTRY(ret_from_exc): | 567 | C_ENTRY(ret_from_exc): |
| 658 | set_bip; /* Ints masked for state restore*/ | 568 | lwi r11, r1, PTO + PT_MODE; |
| 659 | lwi r11, r1, PTO+PT_MODE; | ||
| 660 | bnei r11, 2f; /* See if returning to kernel mode, */ | 569 | bnei r11, 2f; /* See if returning to kernel mode, */ |
| 661 | /* ... if so, skip resched &c. */ | 570 | /* ... if so, skip resched &c. */ |
| 662 | 571 | ||
| @@ -687,32 +596,27 @@ C_ENTRY(ret_from_exc): | |||
| 687 | * traps), but signal handlers may want to examine or change the | 596 | * traps), but signal handlers may want to examine or change the |
| 688 | * complete register state. Here we save anything not saved by | 597 | * complete register state. Here we save anything not saved by |
| 689 | * the normal entry sequence, so that it may be safely restored | 598 | * the normal entry sequence, so that it may be safely restored |
| 690 | * (in a possibly modified form) after do_signal returns. | 599 | * (in a possibly modified form) after do_signal returns. */ |
| 691 | * store return registers separately because this macros is use | 600 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
| 692 | * for others exceptions */ | ||
| 693 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | ||
| 694 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 601 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
| 695 | bralid r15, do_signal; /* Handle any signals */ | 602 | bralid r15, do_signal; /* Handle any signals */ |
| 696 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 603 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
| 697 | 604 | ||
| 698 | /* Finally, return to user state. */ | 605 | /* Finally, return to user state. */ |
| 699 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 606 | 1: set_bip; /* Ints masked for state restore */ |
| 700 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ | 607 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
| 701 | VM_OFF; | 608 | VM_OFF; |
| 702 | tophys(r1,r1); | 609 | tophys(r1,r1); |
| 703 | 610 | ||
| 704 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
| 705 | lwi r4, r1, PTO+PT_R4; | ||
| 706 | RESTORE_REGS; | 611 | RESTORE_REGS; |
| 707 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 612 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ |
| 708 | 613 | ||
| 709 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ | 614 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ |
| 710 | bri 6f; | 615 | bri 6f; |
| 711 | /* Return to kernel state. */ | 616 | /* Return to kernel state. */ |
| 712 | 2: VM_OFF; | 617 | 2: set_bip; /* Ints masked for state restore */ |
| 618 | VM_OFF; | ||
| 713 | tophys(r1,r1); | 619 | tophys(r1,r1); |
| 714 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
| 715 | lwi r4, r1, PTO+PT_R4; | ||
| 716 | RESTORE_REGS; | 620 | RESTORE_REGS; |
| 717 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 621 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ |
| 718 | 622 | ||
| @@ -736,36 +640,23 @@ C_ENTRY(_interrupt): | |||
| 736 | /* MS: we are in physical address */ | 640 | /* MS: we are in physical address */ |
| 737 | /* Save registers, switch to proper stack, convert SP to virtual.*/ | 641 | /* Save registers, switch to proper stack, convert SP to virtual.*/ |
| 738 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | 642 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
| 739 | swi r11, r0, TOPHYS(PER_CPU(R11_SAVE)); | ||
| 740 | /* MS: See if already in kernel mode. */ | 643 | /* MS: See if already in kernel mode. */ |
| 741 | lwi r11, r0, TOPHYS(PER_CPU(KM)); | 644 | mfs r1, rmsr |
| 742 | beqi r11, 1f; /* MS: Jump ahead if coming from user */ | 645 | nop |
| 646 | andi r1, r1, MSR_UMS | ||
| 647 | bnei r1, 1f | ||
| 743 | 648 | ||
| 744 | /* Kernel-mode state save. */ | 649 | /* Kernel-mode state save. */ |
| 745 | or r11, r1, r0 | 650 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
| 746 | tophys(r1,r11); /* MS: I have in r1 physical address where stack is */ | 651 | tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ |
| 747 | /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/ | ||
| 748 | swi r11, r1, (PT_R1 - PT_SIZE); | ||
| 749 | /* MS: restore r11 because of saving in SAVE_REGS */ | ||
| 750 | lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE)); | ||
| 751 | /* save registers */ | 652 | /* save registers */ |
| 752 | /* MS: Make room on the stack -> activation record */ | 653 | /* MS: Make room on the stack -> activation record */ |
| 753 | addik r1, r1, -STATE_SAVE_SIZE; | 654 | addik r1, r1, -STATE_SAVE_SIZE; |
| 754 | /* MS: store return registers separately because | ||
| 755 | * this macros is use for others exceptions */ | ||
| 756 | swi r3, r1, PTO + PT_R3; | ||
| 757 | swi r4, r1, PTO + PT_R4; | ||
| 758 | SAVE_REGS | 655 | SAVE_REGS |
| 759 | /* MS: store mode */ | ||
| 760 | addi r11, r0, 1; /* MS: Was in kernel-mode. */ | ||
| 761 | swi r11, r1, PTO + PT_MODE; /* MS: and save it */ | ||
| 762 | brid 2f; | 656 | brid 2f; |
| 763 | nop; /* MS: Fill delay slot */ | 657 | swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */ |
| 764 | |||
| 765 | 1: | 658 | 1: |
| 766 | /* User-mode state save. */ | 659 | /* User-mode state save. */ |
| 767 | /* MS: restore r11 -> FIXME move before SAVE_REG */ | ||
| 768 | lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE)); | ||
| 769 | /* MS: get the saved current */ | 660 | /* MS: get the saved current */ |
| 770 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 661 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
| 771 | tophys(r1,r1); | 662 | tophys(r1,r1); |
| @@ -774,27 +665,18 @@ C_ENTRY(_interrupt): | |||
| 774 | tophys(r1,r1); | 665 | tophys(r1,r1); |
| 775 | /* save registers */ | 666 | /* save registers */ |
| 776 | addik r1, r1, -STATE_SAVE_SIZE; | 667 | addik r1, r1, -STATE_SAVE_SIZE; |
| 777 | swi r3, r1, PTO+PT_R3; | ||
| 778 | swi r4, r1, PTO+PT_R4; | ||
| 779 | SAVE_REGS | 668 | SAVE_REGS |
| 780 | /* calculate mode */ | 669 | /* calculate mode */ |
| 781 | swi r0, r1, PTO + PT_MODE; | 670 | swi r0, r1, PTO + PT_MODE; |
| 782 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | 671 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
| 783 | swi r11, r1, PTO+PT_R1; | 672 | swi r11, r1, PTO+PT_R1; |
| 784 | /* setup kernel mode to KM */ | 673 | clear_ums; |
| 785 | addi r11, r0, 1; | ||
| 786 | swi r11, r0, TOPHYS(PER_CPU(KM)); | ||
| 787 | |||
| 788 | 2: | 674 | 2: |
| 789 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 675 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
| 790 | swi r0, r1, PTO + PT_R0; | ||
| 791 | tovirt(r1,r1) | 676 | tovirt(r1,r1) |
| 792 | la r5, r1, PTO; | 677 | addik r15, r0, irq_call; |
| 793 | set_vms; | 678 | irq_call:rtbd r0, do_IRQ; |
| 794 | la r11, r0, do_IRQ; | 679 | addik r5, r1, PTO; |
| 795 | la r15, r0, irq_call; | ||
| 796 | irq_call:rtbd r11, 0; | ||
| 797 | nop; | ||
| 798 | 680 | ||
| 799 | /* MS: we are in virtual mode */ | 681 | /* MS: we are in virtual mode */ |
| 800 | ret_from_irq: | 682 | ret_from_irq: |
| @@ -815,7 +697,7 @@ ret_from_irq: | |||
| 815 | beqid r11, no_intr_resched | 697 | beqid r11, no_intr_resched |
| 816 | /* Handle a signal return; Pending signals should be in r18. */ | 698 | /* Handle a signal return; Pending signals should be in r18. */ |
| 817 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 699 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
| 818 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 700 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
| 819 | bralid r15, do_signal; /* Handle any signals */ | 701 | bralid r15, do_signal; /* Handle any signals */ |
| 820 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 702 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
| 821 | 703 | ||
| @@ -823,12 +705,9 @@ ret_from_irq: | |||
| 823 | no_intr_resched: | 705 | no_intr_resched: |
| 824 | /* Disable interrupts, we are now committed to the state restore */ | 706 | /* Disable interrupts, we are now committed to the state restore */ |
| 825 | disable_irq | 707 | disable_irq |
| 826 | swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ | ||
| 827 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); | 708 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); |
| 828 | VM_OFF; | 709 | VM_OFF; |
| 829 | tophys(r1,r1); | 710 | tophys(r1,r1); |
| 830 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | ||
| 831 | lwi r4, r1, PTO + PT_R4; | ||
| 832 | RESTORE_REGS | 711 | RESTORE_REGS |
| 833 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | 712 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ |
| 834 | lwi r1, r1, PT_R1 - PT_SIZE; | 713 | lwi r1, r1, PT_R1 - PT_SIZE; |
| @@ -857,8 +736,6 @@ restore: | |||
| 857 | #endif | 736 | #endif |
| 858 | VM_OFF /* MS: turn off MMU */ | 737 | VM_OFF /* MS: turn off MMU */ |
| 859 | tophys(r1,r1) | 738 | tophys(r1,r1) |
| 860 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | ||
| 861 | lwi r4, r1, PTO + PT_R4; | ||
| 862 | RESTORE_REGS | 739 | RESTORE_REGS |
| 863 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | 740 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ |
| 864 | tovirt(r1,r1); | 741 | tovirt(r1,r1); |
| @@ -868,86 +745,91 @@ IRQ_return: /* MS: Make global symbol for debugging */ | |||
| 868 | nop | 745 | nop |
| 869 | 746 | ||
| 870 | /* | 747 | /* |
| 871 | * `Debug' trap | 748 | * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18 |
| 872 | * We enter dbtrap in "BIP" (breakpoint) mode. | 749 | * and call handling function with saved pt_regs |
| 873 | * So we exit the breakpoint mode with an 'rtbd' and proceed with the | ||
| 874 | * original dbtrap. | ||
| 875 | * however, wait to save state first | ||
| 876 | */ | 750 | */ |
| 877 | C_ENTRY(_debug_exception): | 751 | C_ENTRY(_debug_exception): |
| 878 | /* BIP bit is set on entry, no interrupts can occur */ | 752 | /* BIP bit is set on entry, no interrupts can occur */ |
| 879 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | 753 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
| 880 | 754 | ||
| 881 | swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ | 755 | mfs r1, rmsr |
| 882 | set_bip; /*equalize initial state for all possible entries*/ | 756 | nop |
| 883 | clear_eip; | 757 | andi r1, r1, MSR_UMS |
| 884 | enable_irq; | 758 | bnei r1, 1f |
| 885 | lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/ | 759 | /* MS: Kernel-mode state save - kgdb */ |
| 886 | beqi r11, 1f; /* Jump ahead if coming from user */ | 760 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
| 887 | /* Kernel-mode state save. */ | ||
| 888 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ | ||
| 889 | tophys(r1,r11); | ||
| 890 | swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ | ||
| 891 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
| 892 | 761 | ||
| 893 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | 762 | /* BIP bit is set on entry, no interrupts can occur */ |
| 894 | swi r3, r1, PTO + PT_R3; | 763 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; |
| 895 | swi r4, r1, PTO + PT_R4; | ||
| 896 | SAVE_REGS; | 764 | SAVE_REGS; |
| 765 | /* save all regs to pt_reg structure */ | ||
| 766 | swi r0, r1, PTO+PT_R0; /* R0 must be saved too */ | ||
| 767 | swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */ | ||
| 768 | swi r16, r1, PTO+PT_R16 | ||
| 769 | swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */ | ||
| 770 | swi r17, r1, PTO+PT_R17 | ||
| 771 | /* save special purpose registers to pt_regs */ | ||
| 772 | mfs r11, rear; | ||
| 773 | swi r11, r1, PTO+PT_EAR; | ||
| 774 | mfs r11, resr; | ||
| 775 | swi r11, r1, PTO+PT_ESR; | ||
| 776 | mfs r11, rfsr; | ||
| 777 | swi r11, r1, PTO+PT_FSR; | ||
| 778 | |||
| 779 | /* stack pointer is in physical address at it is decrease | ||
| 780 | * by STATE_SAVE_SIZE but we need to get correct R1 value */ | ||
| 781 | addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE; | ||
| 782 | swi r11, r1, PTO+PT_R1 | ||
| 783 | /* MS: r31 - current pointer isn't changed */ | ||
| 784 | tovirt(r1,r1) | ||
| 785 | #ifdef CONFIG_KGDB | ||
| 786 | addi r5, r1, PTO /* pass pt_reg address as the first arg */ | ||
| 787 | la r15, r0, dbtrap_call; /* return address */ | ||
| 788 | rtbd r0, microblaze_kgdb_break | ||
| 789 | nop; | ||
| 790 | #endif | ||
| 791 | /* MS: Place handler for brki from kernel space if KGDB is OFF. | ||
| 792 | * It is very unlikely that another brki instruction is called. */ | ||
| 793 | bri 0 | ||
| 897 | 794 | ||
| 898 | addi r11, r0, 1; /* Was in kernel-mode. */ | 795 | /* MS: User-mode state save - gdb */ |
| 899 | swi r11, r1, PTO + PT_MODE; | 796 | 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
| 900 | brid 2f; | ||
| 901 | nop; /* Fill delay slot */ | ||
| 902 | 1: /* User-mode state save. */ | ||
| 903 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
| 904 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | ||
| 905 | tophys(r1,r1); | 797 | tophys(r1,r1); |
| 906 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ | 798 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ |
| 907 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ | 799 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ |
| 908 | tophys(r1,r1); | 800 | tophys(r1,r1); |
| 909 | 801 | ||
| 910 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | 802 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ |
| 911 | swi r3, r1, PTO + PT_R3; | ||
| 912 | swi r4, r1, PTO + PT_R4; | ||
| 913 | SAVE_REGS; | 803 | SAVE_REGS; |
| 914 | 804 | swi r17, r1, PTO+PT_R17; | |
| 915 | swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ | 805 | swi r16, r1, PTO+PT_R16; |
| 806 | swi r16, r1, PTO+PT_PC; /* Save LP */ | ||
| 807 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ | ||
| 916 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | 808 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
| 917 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 809 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
| 918 | addi r11, r0, 1; | 810 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
| 919 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | ||
| 920 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | ||
| 921 | /* Save away the syscall number. */ | ||
| 922 | swi r0, r1, PTO+PT_R0; | ||
| 923 | tovirt(r1,r1) | 811 | tovirt(r1,r1) |
| 924 | |||
| 925 | addi r5, r0, SIGTRAP /* send the trap signal */ | ||
| 926 | add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
| 927 | addk r7, r0, r0 /* 3rd param zero */ | ||
| 928 | |||
| 929 | set_vms; | 812 | set_vms; |
| 930 | la r11, r0, send_sig; | 813 | addik r5, r1, PTO; |
| 931 | la r15, r0, dbtrap_call; | 814 | addik r15, r0, dbtrap_call; |
| 932 | dbtrap_call: rtbd r11, 0; | 815 | dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ |
| 933 | nop; | 816 | rtbd r0, sw_exception |
| 817 | nop | ||
| 934 | 818 | ||
| 935 | set_bip; /* Ints masked for state restore*/ | 819 | /* MS: The first instruction for the second part of the gdb/kgdb */ |
| 936 | lwi r11, r1, PTO+PT_MODE; | 820 | set_bip; /* Ints masked for state restore */ |
| 821 | lwi r11, r1, PTO + PT_MODE; | ||
| 937 | bnei r11, 2f; | 822 | bnei r11, 2f; |
| 938 | 823 | /* MS: Return to user space - gdb */ | |
| 939 | /* Get current task ptr into r11 */ | 824 | /* Get current task ptr into r11 */ |
| 940 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ | 825 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
| 941 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 826 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
| 942 | andi r11, r11, _TIF_NEED_RESCHED; | 827 | andi r11, r11, _TIF_NEED_RESCHED; |
| 943 | beqi r11, 5f; | 828 | beqi r11, 5f; |
| 944 | 829 | ||
| 945 | /* Call the scheduler before returning from a syscall/trap. */ | 830 | /* Call the scheduler before returning from a syscall/trap. */ |
| 946 | |||
| 947 | bralid r15, schedule; /* Call scheduler */ | 831 | bralid r15, schedule; /* Call scheduler */ |
| 948 | nop; /* delay slot */ | 832 | nop; /* delay slot */ |
| 949 | /* XXX Is PT_DTRACE handling needed here? */ | ||
| 950 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ | ||
| 951 | 833 | ||
| 952 | /* Maybe handle a signal */ | 834 | /* Maybe handle a signal */ |
| 953 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ | 835 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
| @@ -955,54 +837,40 @@ dbtrap_call: rtbd r11, 0; | |||
| 955 | andi r11, r11, _TIF_SIGPENDING; | 837 | andi r11, r11, _TIF_SIGPENDING; |
| 956 | beqi r11, 1f; /* Signals to handle, handle them */ | 838 | beqi r11, 1f; /* Signals to handle, handle them */ |
| 957 | 839 | ||
| 958 | /* Handle a signal return; Pending signals should be in r18. */ | 840 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
| 959 | /* Not all registers are saved by the normal trap/interrupt entry | ||
| 960 | points (for instance, call-saved registers (because the normal | ||
| 961 | C-compiler calling sequence in the kernel makes sure they're | ||
| 962 | preserved), and call-clobbered registers in the case of | ||
| 963 | traps), but signal handlers may want to examine or change the | ||
| 964 | complete register state. Here we save anything not saved by | ||
| 965 | the normal entry sequence, so that it may be safely restored | ||
| 966 | (in a possibly modified form) after do_signal returns. */ | ||
| 967 | |||
| 968 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | ||
| 969 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 841 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
| 970 | bralid r15, do_signal; /* Handle any signals */ | 842 | bralid r15, do_signal; /* Handle any signals */ |
| 971 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 843 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
| 972 | 844 | ||
| 973 | |||
| 974 | /* Finally, return to user state. */ | 845 | /* Finally, return to user state. */ |
| 975 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | 846 | 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
| 976 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
| 977 | VM_OFF; | 847 | VM_OFF; |
| 978 | tophys(r1,r1); | 848 | tophys(r1,r1); |
| 979 | 849 | /* MS: Restore all regs */ | |
| 980 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
| 981 | lwi r4, r1, PTO+PT_R4; | ||
| 982 | RESTORE_REGS | 850 | RESTORE_REGS |
| 983 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 851 | lwi r17, r1, PTO+PT_R17; |
| 984 | 852 | lwi r16, r1, PTO+PT_R16; | |
| 985 | 853 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */ | |
| 986 | lwi r1, r1, PT_R1 - PT_SIZE; | 854 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */ |
| 987 | /* Restore user stack pointer. */ | 855 | DBTRAP_return_user: /* MS: Make global symbol for debugging */ |
| 988 | bri 6f; | 856 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ |
| 857 | nop; | ||
| 989 | 858 | ||
| 990 | /* Return to kernel state. */ | 859 | /* MS: Return to kernel state - kgdb */ |
| 991 | 2: VM_OFF; | 860 | 2: VM_OFF; |
| 992 | tophys(r1,r1); | 861 | tophys(r1,r1); |
| 993 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | 862 | /* MS: Restore all regs */ |
| 994 | lwi r4, r1, PTO+PT_R4; | ||
| 995 | RESTORE_REGS | 863 | RESTORE_REGS |
| 996 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 864 | lwi r14, r1, PTO+PT_R14; |
| 997 | 865 | lwi r16, r1, PTO+PT_PC; | |
| 866 | lwi r17, r1, PTO+PT_R17; | ||
| 867 | addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */ | ||
| 998 | tovirt(r1,r1); | 868 | tovirt(r1,r1); |
| 999 | 6: | 869 | DBTRAP_return_kernel: /* MS: Make global symbol for debugging */ |
| 1000 | DBTRAP_return: /* Make global symbol for debugging */ | 870 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ |
| 1001 | rtbd r14, 0; /* Instructions to return from an IRQ */ | ||
| 1002 | nop; | 871 | nop; |
| 1003 | 872 | ||
| 1004 | 873 | ||
| 1005 | |||
| 1006 | ENTRY(_switch_to) | 874 | ENTRY(_switch_to) |
| 1007 | /* prepare return value */ | 875 | /* prepare return value */ |
| 1008 | addk r3, r0, CURRENT_TASK | 876 | addk r3, r0, CURRENT_TASK |
| @@ -1037,16 +905,12 @@ ENTRY(_switch_to) | |||
| 1037 | swi r30, r11, CC_R30 | 905 | swi r30, r11, CC_R30 |
| 1038 | /* special purpose registers */ | 906 | /* special purpose registers */ |
| 1039 | mfs r12, rmsr | 907 | mfs r12, rmsr |
| 1040 | nop | ||
| 1041 | swi r12, r11, CC_MSR | 908 | swi r12, r11, CC_MSR |
| 1042 | mfs r12, rear | 909 | mfs r12, rear |
| 1043 | nop | ||
| 1044 | swi r12, r11, CC_EAR | 910 | swi r12, r11, CC_EAR |
| 1045 | mfs r12, resr | 911 | mfs r12, resr |
| 1046 | nop | ||
| 1047 | swi r12, r11, CC_ESR | 912 | swi r12, r11, CC_ESR |
| 1048 | mfs r12, rfsr | 913 | mfs r12, rfsr |
| 1049 | nop | ||
| 1050 | swi r12, r11, CC_FSR | 914 | swi r12, r11, CC_FSR |
| 1051 | 915 | ||
| 1052 | /* update r31, the current-give me pointer to task which will be next */ | 916 | /* update r31, the current-give me pointer to task which will be next */ |
| @@ -1085,10 +949,8 @@ ENTRY(_switch_to) | |||
| 1085 | /* special purpose registers */ | 949 | /* special purpose registers */ |
| 1086 | lwi r12, r11, CC_FSR | 950 | lwi r12, r11, CC_FSR |
| 1087 | mts rfsr, r12 | 951 | mts rfsr, r12 |
| 1088 | nop | ||
| 1089 | lwi r12, r11, CC_MSR | 952 | lwi r12, r11, CC_MSR |
| 1090 | mts rmsr, r12 | 953 | mts rmsr, r12 |
| 1091 | nop | ||
| 1092 | 954 | ||
| 1093 | rtsd r15, 8 | 955 | rtsd r15, 8 |
| 1094 | nop | 956 | nop |
| @@ -1096,15 +958,6 @@ ENTRY(_switch_to) | |||
| 1096 | ENTRY(_reset) | 958 | ENTRY(_reset) |
| 1097 | brai 0x70; /* Jump back to FS-boot */ | 959 | brai 0x70; /* Jump back to FS-boot */ |
| 1098 | 960 | ||
| 1099 | ENTRY(_break) | ||
| 1100 | mfs r5, rmsr | ||
| 1101 | nop | ||
| 1102 | swi r5, r0, 0x250 + TOPHYS(r0_ram) | ||
| 1103 | mfs r5, resr | ||
| 1104 | nop | ||
| 1105 | swi r5, r0, 0x254 + TOPHYS(r0_ram) | ||
| 1106 | bri 0 | ||
| 1107 | |||
| 1108 | /* These are compiled and loaded into high memory, then | 961 | /* These are compiled and loaded into high memory, then |
| 1109 | * copied into place in mach_early_setup */ | 962 | * copied into place in mach_early_setup */ |
| 1110 | .section .init.ivt, "ax" | 963 | .section .init.ivt, "ax" |
| @@ -1116,14 +969,38 @@ ENTRY(_break) | |||
| 1116 | nop | 969 | nop |
| 1117 | brai TOPHYS(_user_exception); /* syscall handler */ | 970 | brai TOPHYS(_user_exception); /* syscall handler */ |
| 1118 | brai TOPHYS(_interrupt); /* Interrupt handler */ | 971 | brai TOPHYS(_interrupt); /* Interrupt handler */ |
| 1119 | brai TOPHYS(_break); /* nmi trap handler */ | 972 | brai TOPHYS(_debug_exception); /* debug trap handler */ |
| 1120 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ | 973 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ |
| 1121 | 974 | ||
| 1122 | .org 0x60 | ||
| 1123 | brai TOPHYS(_debug_exception); /* debug trap handler*/ | ||
| 1124 | |||
| 1125 | .section .rodata,"a" | 975 | .section .rodata,"a" |
| 1126 | #include "syscall_table.S" | 976 | #include "syscall_table.S" |
| 1127 | 977 | ||
| 1128 | syscall_table_size=(.-sys_call_table) | 978 | syscall_table_size=(.-sys_call_table) |
| 1129 | 979 | ||
| 980 | type_SYSCALL: | ||
| 981 | .ascii "SYSCALL\0" | ||
| 982 | type_IRQ: | ||
| 983 | .ascii "IRQ\0" | ||
| 984 | type_IRQ_PREEMPT: | ||
| 985 | .ascii "IRQ (PREEMPTED)\0" | ||
| 986 | type_SYSCALL_PREEMPT: | ||
| 987 | .ascii " SYSCALL (PREEMPTED)\0" | ||
| 988 | |||
| 989 | /* | ||
| 990 | * Trap decoding for stack unwinder | ||
| 991 | * Tuples are (start addr, end addr, string) | ||
| 992 | * If return address lies on [start addr, end addr], | ||
| 993 | * unwinder displays 'string' | ||
| 994 | */ | ||
| 995 | |||
| 996 | .align 4 | ||
| 997 | .global microblaze_trap_handlers | ||
| 998 | microblaze_trap_handlers: | ||
| 999 | /* Exact matches come first */ | ||
| 1000 | .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL | ||
| 1001 | .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ | ||
| 1002 | /* Fuzzy matches go here */ | ||
| 1003 | .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT | ||
| 1004 | .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT | ||
| 1005 | /* End of table */ | ||
| 1006 | .word 0 ; .word 0 ; .word 0 | ||
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index 02cbdfe5aa8d..b98ee8d0c1cd 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c | |||
| @@ -48,12 +48,17 @@ void die(const char *str, struct pt_regs *fp, long err) | |||
| 48 | do_exit(err); | 48 | do_exit(err); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | /* for user application debugging */ | ||
| 52 | void sw_exception(struct pt_regs *regs) | ||
| 53 | { | ||
| 54 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); | ||
| 55 | } | ||
| 56 | |||
| 51 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | 57 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) |
| 52 | { | 58 | { |
| 53 | siginfo_t info; | 59 | siginfo_t info; |
| 54 | 60 | ||
| 55 | if (kernel_mode(regs)) { | 61 | if (kernel_mode(regs)) { |
| 56 | debugger(regs); | ||
| 57 | die("Exception in kernel mode", regs, signr); | 62 | die("Exception in kernel mode", regs, signr); |
| 58 | } | 63 | } |
| 59 | info.si_signo = signr; | 64 | info.si_signo = signr; |
| @@ -143,7 +148,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
| 143 | #ifdef CONFIG_MMU | 148 | #ifdef CONFIG_MMU |
| 144 | case MICROBLAZE_PRIVILEGED_EXCEPTION: | 149 | case MICROBLAZE_PRIVILEGED_EXCEPTION: |
| 145 | pr_debug(KERN_WARNING "Privileged exception\n"); | 150 | pr_debug(KERN_WARNING "Privileged exception\n"); |
| 146 | /* "brk r0,r0" - used as debug breakpoint */ | 151 | /* "brk r0,r0" - used as debug breakpoint - old toolchain */ |
| 147 | if (get_user(code, (unsigned long *)regs->pc) == 0 | 152 | if (get_user(code, (unsigned long *)regs->pc) == 0 |
| 148 | && code == 0x980c0000) { | 153 | && code == 0x980c0000) { |
| 149 | _exception(SIGTRAP, regs, TRAP_BRKPT, addr); | 154 | _exception(SIGTRAP, regs, TRAP_BRKPT, addr); |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 1bf739888260..42434008209e 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
| @@ -43,10 +43,10 @@ | |||
| 43 | .global empty_zero_page | 43 | .global empty_zero_page |
| 44 | .align 12 | 44 | .align 12 |
| 45 | empty_zero_page: | 45 | empty_zero_page: |
| 46 | .space 4096 | 46 | .space PAGE_SIZE |
| 47 | .global swapper_pg_dir | 47 | .global swapper_pg_dir |
| 48 | swapper_pg_dir: | 48 | swapper_pg_dir: |
| 49 | .space 4096 | 49 | .space PAGE_SIZE |
| 50 | 50 | ||
| 51 | #endif /* CONFIG_MMU */ | 51 | #endif /* CONFIG_MMU */ |
| 52 | 52 | ||
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 995a2123635b..781195438ee6 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
| @@ -78,9 +78,6 @@ | |||
| 78 | #include <asm/asm-offsets.h> | 78 | #include <asm/asm-offsets.h> |
| 79 | 79 | ||
| 80 | /* Helpful Macros */ | 80 | /* Helpful Macros */ |
| 81 | #ifndef CONFIG_MMU | ||
| 82 | #define EX_HANDLER_STACK_SIZ (4*19) | ||
| 83 | #endif | ||
| 84 | #define NUM_TO_REG(num) r ## num | 81 | #define NUM_TO_REG(num) r ## num |
| 85 | 82 | ||
| 86 | #ifdef CONFIG_MMU | 83 | #ifdef CONFIG_MMU |
| @@ -988,6 +985,7 @@ ex_unaligned_fixup: | |||
| 988 | .end _unaligned_data_exception | 985 | .end _unaligned_data_exception |
| 989 | #endif /* CONFIG_MMU */ | 986 | #endif /* CONFIG_MMU */ |
| 990 | 987 | ||
| 988 | .global ex_handler_unhandled | ||
| 991 | ex_handler_unhandled: | 989 | ex_handler_unhandled: |
| 992 | /* FIXME add handle function for unhandled exception - dump register */ | 990 | /* FIXME add handle function for unhandled exception - dump register */ |
| 993 | bri 0 | 991 | bri 0 |
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 8f120aca123d..598f1fd61c89 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c | |||
| @@ -37,6 +37,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) | |||
| 37 | { | 37 | { |
| 38 | unsigned int irq; | 38 | unsigned int irq; |
| 39 | struct pt_regs *old_regs = set_irq_regs(regs); | 39 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 40 | trace_hardirqs_off(); | ||
| 40 | 41 | ||
| 41 | irq_enter(); | 42 | irq_enter(); |
| 42 | irq = get_irq(regs); | 43 | irq = get_irq(regs); |
| @@ -53,6 +54,7 @@ next_irq: | |||
| 53 | 54 | ||
| 54 | irq_exit(); | 55 | irq_exit(); |
| 55 | set_irq_regs(old_regs); | 56 | set_irq_regs(old_regs); |
| 57 | trace_hardirqs_on(); | ||
| 56 | } | 58 | } |
| 57 | 59 | ||
| 58 | int show_interrupts(struct seq_file *p, void *v) | 60 | int show_interrupts(struct seq_file *p, void *v) |
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c new file mode 100644 index 000000000000..bfc006b7f2d8 --- /dev/null +++ b/arch/microblaze/kernel/kgdb.c | |||
| @@ -0,0 +1,147 @@ | |||
| 1 | /* | ||
| 2 | * Microblaze KGDB support | ||
| 3 | * | ||
| 4 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 5 | * License. See the file "COPYING" in the main directory of this archive | ||
| 6 | * for more details. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/kgdb.h> | ||
| 10 | #include <linux/kdebug.h> | ||
| 11 | #include <linux/irq.h> | ||
| 12 | #include <linux/io.h> | ||
| 13 | #include <asm/cacheflush.h> | ||
| 14 | #include <asm/asm-offsets.h> | ||
| 15 | #include <asm/pvr.h> | ||
| 16 | |||
| 17 | #define GDB_REG 0 | ||
| 18 | #define GDB_PC 32 | ||
| 19 | #define GDB_MSR 33 | ||
| 20 | #define GDB_EAR 34 | ||
| 21 | #define GDB_ESR 35 | ||
| 22 | #define GDB_FSR 36 | ||
| 23 | #define GDB_BTR 37 | ||
| 24 | #define GDB_PVR 38 | ||
| 25 | #define GDB_REDR 50 | ||
| 26 | #define GDB_RPID 51 | ||
| 27 | #define GDB_RZPR 52 | ||
| 28 | #define GDB_RTLBX 53 | ||
| 29 | #define GDB_RTLBSX 54 /* mfs can't read it */ | ||
| 30 | #define GDB_RTLBLO 55 | ||
| 31 | #define GDB_RTLBHI 56 | ||
| 32 | |||
| 33 | /* keep pvr separately because it is unchangeble */ | ||
| 34 | struct pvr_s pvr; | ||
| 35 | |||
| 36 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
| 37 | { | ||
| 38 | int i; | ||
| 39 | unsigned long *pt_regb = (unsigned long *)regs; | ||
| 40 | int temp; | ||
| 41 | /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ | ||
| 42 | for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) | ||
| 43 | gdb_regs[i] = pt_regb[i]; | ||
| 44 | |||
| 45 | /* Branch target register can't be changed */ | ||
| 46 | __asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : ); | ||
| 47 | gdb_regs[GDB_BTR] = temp; | ||
| 48 | |||
| 49 | /* pvr part - we have 11 pvr regs */ | ||
| 50 | for (i = 0; i < sizeof(struct pvr_s)/4; i++) | ||
| 51 | gdb_regs[GDB_PVR + i] = pvr.pvr[i]; | ||
| 52 | |||
| 53 | /* read special registers - can't be changed */ | ||
| 54 | __asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : ); | ||
| 55 | gdb_regs[GDB_REDR] = temp; | ||
| 56 | __asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : ); | ||
| 57 | gdb_regs[GDB_RPID] = temp; | ||
| 58 | __asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : ); | ||
| 59 | gdb_regs[GDB_RZPR] = temp; | ||
| 60 | __asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : ); | ||
| 61 | gdb_regs[GDB_RTLBX] = temp; | ||
| 62 | __asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : ); | ||
| 63 | gdb_regs[GDB_RTLBLO] = temp; | ||
| 64 | __asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : ); | ||
| 65 | gdb_regs[GDB_RTLBHI] = temp; | ||
| 66 | } | ||
| 67 | |||
| 68 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
| 69 | { | ||
| 70 | int i; | ||
| 71 | unsigned long *pt_regb = (unsigned long *)regs; | ||
| 72 | |||
| 73 | /* pt_regs and gdb_regs have the same 37 values. | ||
| 74 | * The rest of gdb_regs are unused and can't be changed. | ||
| 75 | * r0 register value can't be changed too. */ | ||
| 76 | for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++) | ||
| 77 | pt_regb[i] = gdb_regs[i]; | ||
| 78 | } | ||
| 79 | |||
| 80 | void microblaze_kgdb_break(struct pt_regs *regs) | ||
| 81 | { | ||
| 82 | if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) | ||
| 83 | return 0; | ||
| 84 | |||
| 85 | /* Jump over the first arch_kgdb_breakpoint which is barrier to | ||
| 86 | * get kgdb work. The same solution is used for powerpc */ | ||
| 87 | if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) | ||
| 88 | regs->pc += BREAK_INSTR_SIZE; | ||
| 89 | } | ||
| 90 | |||
| 91 | /* untested */ | ||
| 92 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | ||
| 93 | { | ||
| 94 | int i; | ||
| 95 | unsigned long *pt_regb = (unsigned long *)(p->thread.regs); | ||
| 96 | |||
| 97 | /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ | ||
| 98 | for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) | ||
| 99 | gdb_regs[i] = pt_regb[i]; | ||
| 100 | |||
| 101 | /* pvr part - we have 11 pvr regs */ | ||
| 102 | for (i = 0; i < sizeof(struct pvr_s)/4; i++) | ||
| 103 | gdb_regs[GDB_PVR + i] = pvr.pvr[i]; | ||
| 104 | } | ||
| 105 | |||
| 106 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) | ||
| 107 | { | ||
| 108 | regs->pc = ip; | ||
| 109 | } | ||
| 110 | |||
| 111 | int kgdb_arch_handle_exception(int vector, int signo, int err_code, | ||
| 112 | char *remcom_in_buffer, char *remcom_out_buffer, | ||
| 113 | struct pt_regs *regs) | ||
| 114 | { | ||
| 115 | char *ptr; | ||
| 116 | unsigned long address; | ||
| 117 | int cpu = smp_processor_id(); | ||
| 118 | |||
| 119 | switch (remcom_in_buffer[0]) { | ||
| 120 | case 'c': | ||
| 121 | /* handle the optional parameter */ | ||
| 122 | ptr = &remcom_in_buffer[1]; | ||
| 123 | if (kgdb_hex2long(&ptr, &address)) | ||
| 124 | regs->pc = address; | ||
| 125 | |||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | return -1; /* this means that we do not want to exit from the handler */ | ||
| 129 | } | ||
| 130 | |||
| 131 | int kgdb_arch_init(void) | ||
| 132 | { | ||
| 133 | get_pvr(&pvr); /* Fill PVR structure */ | ||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | void kgdb_arch_exit(void) | ||
| 138 | { | ||
| 139 | /* Nothing to do */ | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Global data | ||
| 144 | */ | ||
| 145 | struct kgdb_arch arch_kgdb_ops = { | ||
| 146 | .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */ | ||
| 147 | }; | ||
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index 0fb5fc6c1fc2..206da3da361f 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S | |||
| @@ -76,7 +76,7 @@ early_console_reg_tlb_alloc: | |||
| 76 | * the UARTs nice and early. We use a 4k real==virtual mapping. | 76 | * the UARTs nice and early. We use a 4k real==virtual mapping. |
| 77 | */ | 77 | */ |
| 78 | ori r4, r0, MICROBLAZE_TLB_SIZE - 1 | 78 | ori r4, r0, MICROBLAZE_TLB_SIZE - 1 |
| 79 | mts rtlbx, r4 /* TLB slot 2 */ | 79 | mts rtlbx, r4 /* TLB slot 63 */ |
| 80 | 80 | ||
| 81 | or r4,r5,r0 | 81 | or r4,r5,r0 |
| 82 | andi r4,r4,0xfffff000 | 82 | andi r4,r4,0xfffff000 |
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 09bed44dfcd3..ba7c4b16ed35 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
| @@ -76,8 +76,11 @@ __setup("hlt", hlt_setup); | |||
| 76 | void default_idle(void) | 76 | void default_idle(void) |
| 77 | { | 77 | { |
| 78 | if (likely(hlt_counter)) { | 78 | if (likely(hlt_counter)) { |
| 79 | while (!need_resched()) | 79 | local_irq_disable(); |
| 80 | cpu_relax(); | 80 | stop_critical_timings(); |
| 81 | cpu_relax(); | ||
| 82 | start_critical_timings(); | ||
| 83 | local_irq_enable(); | ||
| 81 | } else { | 84 | } else { |
| 82 | clear_thread_flag(TIF_POLLING_NRFLAG); | 85 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 83 | smp_mb__after_clear_bit(); | 86 | smp_mb__after_clear_bit(); |
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index a4a7770c6140..dc03ffc8174a 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c | |||
| @@ -38,6 +38,8 @@ | |||
| 38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
| 39 | #include <linux/uaccess.h> | 39 | #include <linux/uaccess.h> |
| 40 | #include <asm/asm-offsets.h> | 40 | #include <asm/asm-offsets.h> |
| 41 | #include <asm/cacheflush.h> | ||
| 42 | #include <asm/io.h> | ||
| 41 | 43 | ||
| 42 | /* Returns the address where the register at REG_OFFS in P is stashed away. */ | 44 | /* Returns the address where the register at REG_OFFS in P is stashed away. */ |
| 43 | static microblaze_reg_t *reg_save_addr(unsigned reg_offs, | 45 | static microblaze_reg_t *reg_save_addr(unsigned reg_offs, |
| @@ -101,8 +103,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 101 | microblaze_reg_t *reg_addr = reg_save_addr(addr, child); | 103 | microblaze_reg_t *reg_addr = reg_save_addr(addr, child); |
| 102 | if (request == PTRACE_PEEKUSR) | 104 | if (request == PTRACE_PEEKUSR) |
| 103 | val = *reg_addr; | 105 | val = *reg_addr; |
| 104 | else | 106 | else { |
| 107 | #if 1 | ||
| 105 | *reg_addr = data; | 108 | *reg_addr = data; |
| 109 | #else | ||
| 110 | /* MS potential problem on WB system | ||
| 111 | * Be aware that reg_addr is virtual address | ||
| 112 | * virt_to_phys conversion is necessary. | ||
| 113 | * This could be sensible solution. | ||
| 114 | */ | ||
| 115 | u32 paddr = virt_to_phys((u32)reg_addr); | ||
| 116 | invalidate_icache_range(paddr, paddr + 4); | ||
| 117 | *reg_addr = data; | ||
| 118 | flush_dcache_range(paddr, paddr + 4); | ||
| 119 | #endif | ||
| 120 | } | ||
| 106 | } else | 121 | } else |
| 107 | rval = -EIO; | 122 | rval = -EIO; |
| 108 | 123 | ||
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c index 123692f22647..84bc6686102c 100644 --- a/arch/microblaze/kernel/stacktrace.c +++ b/arch/microblaze/kernel/stacktrace.c | |||
| @@ -14,52 +14,18 @@ | |||
| 14 | #include <linux/thread_info.h> | 14 | #include <linux/thread_info.h> |
| 15 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <asm/unwind.h> | ||
| 17 | 18 | ||
| 18 | /* FIXME initial support */ | ||
| 19 | void save_stack_trace(struct stack_trace *trace) | 19 | void save_stack_trace(struct stack_trace *trace) |
| 20 | { | 20 | { |
| 21 | unsigned long *sp; | 21 | /* Exclude our helper functions from the trace*/ |
| 22 | unsigned long addr; | 22 | trace->skip += 2; |
| 23 | asm("addik %0, r1, 0" : "=r" (sp)); | 23 | microblaze_unwind(NULL, trace); |
| 24 | |||
| 25 | while (!kstack_end(sp)) { | ||
| 26 | addr = *sp++; | ||
| 27 | if (__kernel_text_address(addr)) { | ||
| 28 | if (trace->skip > 0) | ||
| 29 | trace->skip--; | ||
| 30 | else | ||
| 31 | trace->entries[trace->nr_entries++] = addr; | ||
| 32 | |||
| 33 | if (trace->nr_entries >= trace->max_entries) | ||
| 34 | break; | ||
| 35 | } | ||
| 36 | } | ||
| 37 | } | 24 | } |
| 38 | EXPORT_SYMBOL_GPL(save_stack_trace); | 25 | EXPORT_SYMBOL_GPL(save_stack_trace); |
| 39 | 26 | ||
| 40 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 27 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 41 | { | 28 | { |
| 42 | unsigned int *sp; | 29 | microblaze_unwind(tsk, trace); |
| 43 | unsigned long addr; | ||
| 44 | |||
| 45 | struct thread_info *ti = task_thread_info(tsk); | ||
| 46 | |||
| 47 | if (tsk == current) | ||
| 48 | asm("addik %0, r1, 0" : "=r" (sp)); | ||
| 49 | else | ||
| 50 | sp = (unsigned int *)ti->cpu_context.r1; | ||
| 51 | |||
| 52 | while (!kstack_end(sp)) { | ||
| 53 | addr = *sp++; | ||
| 54 | if (__kernel_text_address(addr)) { | ||
| 55 | if (trace->skip > 0) | ||
| 56 | trace->skip--; | ||
| 57 | else | ||
| 58 | trace->entries[trace->nr_entries++] = addr; | ||
| 59 | |||
| 60 | if (trace->nr_entries >= trace->max_entries) | ||
| 61 | break; | ||
| 62 | } | ||
| 63 | } | ||
| 64 | } | 30 | } |
| 65 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | 31 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index ed61b2f17719..b1380ae93ae1 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
| 29 | #include <asm/irq.h> | 29 | #include <asm/irq.h> |
| 30 | #include <asm/system.h> | 30 | #include <asm/system.h> |
| 31 | #include <linux/cnt32_to_63.h> | ||
| 31 | 32 | ||
| 32 | #ifdef CONFIG_SELFMOD_TIMER | 33 | #ifdef CONFIG_SELFMOD_TIMER |
| 33 | #include <asm/selfmod.h> | 34 | #include <asm/selfmod.h> |
| @@ -135,7 +136,7 @@ static void microblaze_timer_set_mode(enum clock_event_mode mode, | |||
| 135 | static struct clock_event_device clockevent_microblaze_timer = { | 136 | static struct clock_event_device clockevent_microblaze_timer = { |
| 136 | .name = "microblaze_clockevent", | 137 | .name = "microblaze_clockevent", |
| 137 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | 138 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, |
| 138 | .shift = 24, | 139 | .shift = 8, |
| 139 | .rating = 300, | 140 | .rating = 300, |
| 140 | .set_next_event = microblaze_timer_set_next_event, | 141 | .set_next_event = microblaze_timer_set_next_event, |
| 141 | .set_mode = microblaze_timer_set_mode, | 142 | .set_mode = microblaze_timer_set_mode, |
| @@ -195,7 +196,7 @@ static cycle_t microblaze_cc_read(const struct cyclecounter *cc) | |||
| 195 | static struct cyclecounter microblaze_cc = { | 196 | static struct cyclecounter microblaze_cc = { |
| 196 | .read = microblaze_cc_read, | 197 | .read = microblaze_cc_read, |
| 197 | .mask = CLOCKSOURCE_MASK(32), | 198 | .mask = CLOCKSOURCE_MASK(32), |
| 198 | .shift = 24, | 199 | .shift = 8, |
| 199 | }; | 200 | }; |
| 200 | 201 | ||
| 201 | int __init init_microblaze_timecounter(void) | 202 | int __init init_microblaze_timecounter(void) |
| @@ -213,7 +214,7 @@ static struct clocksource clocksource_microblaze = { | |||
| 213 | .rating = 300, | 214 | .rating = 300, |
| 214 | .read = microblaze_read, | 215 | .read = microblaze_read, |
| 215 | .mask = CLOCKSOURCE_MASK(32), | 216 | .mask = CLOCKSOURCE_MASK(32), |
| 216 | .shift = 24, /* I can shift it */ | 217 | .shift = 8, /* I can shift it */ |
| 217 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 218 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 218 | }; | 219 | }; |
| 219 | 220 | ||
| @@ -235,6 +236,12 @@ static int __init microblaze_clocksource_init(void) | |||
| 235 | return 0; | 236 | return 0; |
| 236 | } | 237 | } |
| 237 | 238 | ||
| 239 | /* | ||
| 240 | * We have to protect accesses before timer initialization | ||
| 241 | * and return 0 for sched_clock function below. | ||
| 242 | */ | ||
| 243 | static int timer_initialized; | ||
| 244 | |||
| 238 | void __init time_init(void) | 245 | void __init time_init(void) |
| 239 | { | 246 | { |
| 240 | u32 irq, i = 0; | 247 | u32 irq, i = 0; |
| @@ -289,4 +296,15 @@ void __init time_init(void) | |||
| 289 | #endif | 296 | #endif |
| 290 | microblaze_clocksource_init(); | 297 | microblaze_clocksource_init(); |
| 291 | microblaze_clockevent_init(); | 298 | microblaze_clockevent_init(); |
| 299 | timer_initialized = 1; | ||
| 300 | } | ||
| 301 | |||
| 302 | unsigned long long notrace sched_clock(void) | ||
| 303 | { | ||
| 304 | if (timer_initialized) { | ||
| 305 | struct clocksource *cs = &clocksource_microblaze; | ||
| 306 | cycle_t cyc = cnt32_to_63(cs->read(NULL)); | ||
| 307 | return clocksource_cyc2ns(cyc, cs->mult, cs->shift); | ||
| 308 | } | ||
| 309 | return 0; | ||
| 292 | } | 310 | } |
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index 75e49202a5ed..ba034d421ec2 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c | |||
| @@ -16,13 +16,14 @@ | |||
| 16 | 16 | ||
| 17 | #include <asm/exceptions.h> | 17 | #include <asm/exceptions.h> |
| 18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 19 | #include <asm/unwind.h> | ||
| 19 | 20 | ||
| 20 | void trap_init(void) | 21 | void trap_init(void) |
| 21 | { | 22 | { |
| 22 | __enable_hw_exceptions(); | 23 | __enable_hw_exceptions(); |
| 23 | } | 24 | } |
| 24 | 25 | ||
| 25 | static unsigned long kstack_depth_to_print = 24; | 26 | static unsigned long kstack_depth_to_print; /* 0 == entire stack */ |
| 26 | 27 | ||
| 27 | static int __init kstack_setup(char *s) | 28 | static int __init kstack_setup(char *s) |
| 28 | { | 29 | { |
| @@ -30,31 +31,47 @@ static int __init kstack_setup(char *s) | |||
| 30 | } | 31 | } |
| 31 | __setup("kstack=", kstack_setup); | 32 | __setup("kstack=", kstack_setup); |
| 32 | 33 | ||
| 33 | void show_trace(struct task_struct *task, unsigned long *stack) | 34 | void show_stack(struct task_struct *task, unsigned long *sp) |
| 34 | { | 35 | { |
| 35 | unsigned long addr; | 36 | unsigned long words_to_show; |
| 36 | 37 | u32 fp = (u32) sp; | |
| 37 | if (!stack) | 38 | |
| 38 | stack = (unsigned long *)&stack; | 39 | if (fp == 0) { |
| 40 | if (task) { | ||
| 41 | fp = ((struct thread_info *) | ||
| 42 | (task->stack))->cpu_context.r1; | ||
| 43 | } else { | ||
| 44 | /* Pick up caller of dump_stack() */ | ||
| 45 | fp = (u32)&sp - 8; | ||
| 46 | } | ||
| 47 | } | ||
| 39 | 48 | ||
| 40 | printk(KERN_NOTICE "Call Trace: "); | 49 | words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2; |
| 41 | #ifdef CONFIG_KALLSYMS | 50 | if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print)) |
| 42 | printk(KERN_NOTICE "\n"); | 51 | words_to_show = kstack_depth_to_print; |
| 43 | #endif | 52 | |
| 44 | while (!kstack_end(stack)) { | 53 | pr_info("Kernel Stack:\n"); |
| 45 | addr = *stack++; | 54 | |
| 46 | /* | 55 | /* |
| 47 | * If the address is either in the text segment of the | 56 | * Make the first line an 'odd' size if necessary to get |
| 48 | * kernel, or in the region which contains vmalloc'ed | 57 | * remaining lines to start at an address multiple of 0x10 |
| 49 | * memory, it *may* be the address of a calling | 58 | */ |
| 50 | * routine; if so, print it so that someone tracing | 59 | if (fp & 0xF) { |
| 51 | * down the cause of the crash will be able to figure | 60 | unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2; |
| 52 | * out the call path that was taken. | 61 | if (line1_words < words_to_show) { |
| 53 | */ | 62 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, |
| 54 | if (kernel_text_address(addr)) | 63 | 4, (void *)fp, line1_words << 2, 0); |
| 55 | print_ip_sym(addr); | 64 | fp += line1_words << 2; |
| 65 | words_to_show -= line1_words; | ||
| 66 | } | ||
| 56 | } | 67 | } |
| 57 | printk(KERN_NOTICE "\n"); | 68 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp, |
| 69 | words_to_show << 2, 0); | ||
| 70 | printk(KERN_INFO "\n\n"); | ||
| 71 | |||
| 72 | pr_info("Call Trace:\n"); | ||
| 73 | microblaze_unwind(task, NULL); | ||
| 74 | pr_info("\n"); | ||
| 58 | 75 | ||
| 59 | if (!task) | 76 | if (!task) |
| 60 | task = current; | 77 | task = current; |
| @@ -62,34 +79,6 @@ void show_trace(struct task_struct *task, unsigned long *stack) | |||
| 62 | debug_show_held_locks(task); | 79 | debug_show_held_locks(task); |
| 63 | } | 80 | } |
| 64 | 81 | ||
| 65 | void show_stack(struct task_struct *task, unsigned long *sp) | ||
| 66 | { | ||
| 67 | unsigned long *stack; | ||
| 68 | int i; | ||
| 69 | |||
| 70 | if (sp == NULL) { | ||
| 71 | if (task) | ||
| 72 | sp = (unsigned long *) ((struct thread_info *) | ||
| 73 | (task->stack))->cpu_context.r1; | ||
| 74 | else | ||
| 75 | sp = (unsigned long *)&sp; | ||
| 76 | } | ||
| 77 | |||
| 78 | stack = sp; | ||
| 79 | |||
| 80 | printk(KERN_INFO "\nStack:\n "); | ||
| 81 | |||
| 82 | for (i = 0; i < kstack_depth_to_print; i++) { | ||
| 83 | if (kstack_end(sp)) | ||
| 84 | break; | ||
| 85 | if (i && ((i % 8) == 0)) | ||
| 86 | printk("\n "); | ||
| 87 | printk("%08lx ", *sp++); | ||
| 88 | } | ||
| 89 | printk("\n"); | ||
| 90 | show_trace(task, stack); | ||
| 91 | } | ||
| 92 | |||
| 93 | void dump_stack(void) | 82 | void dump_stack(void) |
| 94 | { | 83 | { |
| 95 | show_stack(NULL, NULL); | 84 | show_stack(NULL, NULL); |
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c new file mode 100644 index 000000000000..fefac5c33586 --- /dev/null +++ b/arch/microblaze/kernel/unwind.c | |||
| @@ -0,0 +1,318 @@ | |||
| 1 | /* | ||
| 2 | * Backtrace support for Microblaze | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 Digital Design Corporation | ||
| 5 | * | ||
| 6 | * Based on arch/sh/kernel/cpu/sh5/unwind.c code which is: | ||
| 7 | * Copyright (C) 2004 Paul Mundt | ||
| 8 | * Copyright (C) 2004 Richard Curnow | ||
| 9 | * | ||
| 10 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 11 | * License. See the file "COPYING" in the main directory of this archive | ||
| 12 | * for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | /* #define DEBUG 1 */ | ||
| 16 | #include <linux/kallsyms.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/sched.h> | ||
| 19 | #include <linux/stacktrace.h> | ||
| 20 | #include <linux/types.h> | ||
| 21 | #include <linux/errno.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/io.h> | ||
| 24 | #include <asm/sections.h> | ||
| 25 | #include <asm/exceptions.h> | ||
| 26 | #include <asm/unwind.h> | ||
| 27 | |||
| 28 | struct stack_trace; | ||
| 29 | |||
| 30 | /* | ||
| 31 | * On Microblaze, finding the previous stack frame is a little tricky. | ||
| 32 | * At this writing (3/2010), Microblaze does not support CONFIG_FRAME_POINTERS, | ||
| 33 | * and even if it did, gcc (4.1.2) does not store the frame pointer at | ||
| 34 | * a consistent offset within each frame. To determine frame size, it is | ||
| 35 | * necessary to search for the assembly instruction that creates or reclaims | ||
| 36 | * the frame and extract the size from it. | ||
| 37 | * | ||
| 38 | * Microblaze stores the stack pointer in r1, and creates a frame via | ||
| 39 | * | ||
| 40 | * addik r1, r1, -FRAME_SIZE | ||
| 41 | * | ||
| 42 | * The frame is reclaimed via | ||
| 43 | * | ||
| 44 | * addik r1, r1, FRAME_SIZE | ||
| 45 | * | ||
| 46 | * Frame creation occurs at or near the top of a function. | ||
| 47 | * Depending on the compiler, reclaim may occur at the end, or before | ||
| 48 | * a mid-function return. | ||
| 49 | * | ||
| 50 | * A stack frame is usually not created in a leaf function. | ||
| 51 | * | ||
| 52 | */ | ||
| 53 | |||
| 54 | /** | ||
| 55 | * get_frame_size - Extract the stack adjustment from an | ||
| 56 | * "addik r1, r1, adjust" instruction | ||
| 57 | * @instr : Microblaze instruction | ||
| 58 | * | ||
| 59 | * Return - Number of stack bytes the instruction reserves or reclaims | ||
| 60 | */ | ||
| 61 | inline long get_frame_size(unsigned long instr) | ||
| 62 | { | ||
| 63 | return abs((s16)(instr & 0xFFFF)); | ||
| 64 | } | ||
| 65 | |||
| 66 | /** | ||
| 67 | * find_frame_creation - Search backward to find the instruction that creates | ||
| 68 | * the stack frame (hopefully, for the same function the | ||
| 69 | * initial PC is in). | ||
| 70 | * @pc : Program counter at which to begin the search | ||
| 71 | * | ||
| 72 | * Return - PC at which stack frame creation occurs | ||
| 73 | * NULL if this cannot be found, i.e. a leaf function | ||
| 74 | */ | ||
| 75 | static unsigned long *find_frame_creation(unsigned long *pc) | ||
| 76 | { | ||
| 77 | int i; | ||
| 78 | |||
| 79 | /* NOTE: Distance to search is arbitrary | ||
| 80 | * 250 works well for most things, | ||
| 81 | * 750 picks up things like tcp_recvmsg(), | ||
| 82 | * 1000 needed for fat_fill_super() | ||
| 83 | */ | ||
| 84 | for (i = 0; i < 1000; i++, pc--) { | ||
| 85 | unsigned long instr; | ||
| 86 | s16 frame_size; | ||
| 87 | |||
| 88 | if (!kernel_text_address((unsigned long) pc)) | ||
| 89 | return NULL; | ||
| 90 | |||
| 91 | instr = *pc; | ||
| 92 | |||
| 93 | /* addik r1, r1, foo ? */ | ||
| 94 | if ((instr & 0xFFFF0000) != 0x30210000) | ||
| 95 | continue; /* No */ | ||
| 96 | |||
| 97 | frame_size = get_frame_size(instr); | ||
| 98 | if ((frame_size < 8) || (frame_size & 3)) { | ||
| 99 | pr_debug(" Invalid frame size %d at 0x%p\n", | ||
| 100 | frame_size, pc); | ||
| 101 | return NULL; | ||
| 102 | } | ||
| 103 | |||
| 104 | pr_debug(" Found frame creation at 0x%p, size %d\n", pc, | ||
| 105 | frame_size); | ||
| 106 | return pc; | ||
| 107 | } | ||
| 108 | |||
| 109 | return NULL; | ||
| 110 | } | ||
| 111 | |||
| 112 | /** | ||
| 113 | * lookup_prev_stack_frame - Find the stack frame of the previous function. | ||
| 114 | * @fp : Frame (stack) pointer for current function | ||
| 115 | * @pc : Program counter within current function | ||
| 116 | * @leaf_return : r15 value within current function. If the current function | ||
| 117 | * is a leaf, this is the caller's return address. | ||
| 118 | * @pprev_fp : On exit, set to frame (stack) pointer for previous function | ||
| 119 | * @pprev_pc : On exit, set to current function caller's return address | ||
| 120 | * | ||
| 121 | * Return - 0 on success, -EINVAL if the previous frame cannot be found | ||
| 122 | */ | ||
| 123 | static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, | ||
| 124 | unsigned long leaf_return, | ||
| 125 | unsigned long *pprev_fp, | ||
| 126 | unsigned long *pprev_pc) | ||
| 127 | { | ||
| 128 | unsigned long *prologue = NULL; | ||
| 129 | |||
| 130 | /* _switch_to is a special leaf function */ | ||
| 131 | if (pc != (unsigned long) &_switch_to) | ||
| 132 | prologue = find_frame_creation((unsigned long *)pc); | ||
| 133 | |||
| 134 | if (prologue) { | ||
| 135 | long frame_size = get_frame_size(*prologue); | ||
| 136 | |||
| 137 | *pprev_fp = fp + frame_size; | ||
| 138 | *pprev_pc = *(unsigned long *)fp; | ||
| 139 | } else { | ||
| 140 | if (!leaf_return) | ||
| 141 | return -EINVAL; | ||
| 142 | *pprev_pc = leaf_return; | ||
| 143 | *pprev_fp = fp; | ||
| 144 | } | ||
| 145 | |||
| 146 | /* NOTE: don't check kernel_text_address here, to allow display | ||
| 147 | * of userland return address | ||
| 148 | */ | ||
| 149 | return (!*pprev_pc || (*pprev_pc & 3)) ? -EINVAL : 0; | ||
| 150 | } | ||
| 151 | |||
| 152 | static void microblaze_unwind_inner(struct task_struct *task, | ||
| 153 | unsigned long pc, unsigned long fp, | ||
| 154 | unsigned long leaf_return, | ||
| 155 | struct stack_trace *trace); | ||
| 156 | |||
| 157 | /** | ||
| 158 | * unwind_trap - Unwind through a system trap, that stored previous state | ||
| 159 | * on the stack. | ||
| 160 | */ | ||
| 161 | #ifdef CONFIG_MMU | ||
| 162 | static inline void unwind_trap(struct task_struct *task, unsigned long pc, | ||
| 163 | unsigned long fp, struct stack_trace *trace) | ||
| 164 | { | ||
| 165 | /* To be implemented */ | ||
| 166 | } | ||
| 167 | #else | ||
| 168 | static inline void unwind_trap(struct task_struct *task, unsigned long pc, | ||
| 169 | unsigned long fp, struct stack_trace *trace) | ||
| 170 | { | ||
| 171 | const struct pt_regs *regs = (const struct pt_regs *) fp; | ||
| 172 | microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); | ||
| 173 | } | ||
| 174 | #endif | ||
| 175 | |||
| 176 | /** | ||
| 177 | * microblaze_unwind_inner - Unwind the stack from the specified point | ||
| 178 | * @task : Task whose stack we are to unwind (may be NULL) | ||
| 179 | * @pc : Program counter from which we start unwinding | ||
| 180 | * @fp : Frame (stack) pointer from which we start unwinding | ||
| 181 | * @leaf_return : Value of r15 at pc. If the function is a leaf, this is | ||
| 182 | * the caller's return address. | ||
| 183 | * @trace : Where to store stack backtrace (PC values). | ||
| 184 | * NULL == print backtrace to kernel log | ||
| 185 | */ | ||
| 186 | void microblaze_unwind_inner(struct task_struct *task, | ||
| 187 | unsigned long pc, unsigned long fp, | ||
| 188 | unsigned long leaf_return, | ||
| 189 | struct stack_trace *trace) | ||
| 190 | { | ||
| 191 | int ofs = 0; | ||
| 192 | |||
| 193 | pr_debug(" Unwinding with PC=%p, FP=%p\n", (void *)pc, (void *)fp); | ||
| 194 | if (!pc || !fp || (pc & 3) || (fp & 3)) { | ||
| 195 | pr_debug(" Invalid state for unwind, aborting\n"); | ||
| 196 | return; | ||
| 197 | } | ||
| 198 | for (; pc != 0;) { | ||
| 199 | unsigned long next_fp, next_pc = 0; | ||
| 200 | unsigned long return_to = pc + 2 * sizeof(unsigned long); | ||
| 201 | const struct trap_handler_info *handler = | ||
| 202 | µblaze_trap_handlers; | ||
| 203 | |||
| 204 | /* Is previous function the HW exception handler? */ | ||
| 205 | if ((return_to >= (unsigned long)&_hw_exception_handler) | ||
| 206 | &&(return_to < (unsigned long)&ex_handler_unhandled)) { | ||
| 207 | /* | ||
| 208 | * HW exception handler doesn't save all registers, | ||
| 209 | * so we open-code a special case of unwind_trap() | ||
| 210 | */ | ||
| 211 | #ifndef CONFIG_MMU | ||
| 212 | const struct pt_regs *regs = | ||
| 213 | (const struct pt_regs *) fp; | ||
| 214 | #endif | ||
| 215 | pr_info("HW EXCEPTION\n"); | ||
| 216 | #ifndef CONFIG_MMU | ||
| 217 | microblaze_unwind_inner(task, regs->r17 - 4, | ||
| 218 | fp + EX_HANDLER_STACK_SIZ, | ||
| 219 | regs->r15, trace); | ||
| 220 | #endif | ||
| 221 | return; | ||
| 222 | } | ||
| 223 | |||
| 224 | /* Is previous function a trap handler? */ | ||
| 225 | for (; handler->start_addr; ++handler) { | ||
| 226 | if ((return_to >= handler->start_addr) | ||
| 227 | && (return_to <= handler->end_addr)) { | ||
| 228 | if (!trace) | ||
| 229 | pr_info("%s\n", handler->trap_name); | ||
| 230 | unwind_trap(task, pc, fp, trace); | ||
| 231 | return; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | pc -= ofs; | ||
| 235 | |||
| 236 | if (trace) { | ||
| 237 | #ifdef CONFIG_STACKTRACE | ||
| 238 | if (trace->skip > 0) | ||
| 239 | trace->skip--; | ||
| 240 | else | ||
| 241 | trace->entries[trace->nr_entries++] = pc; | ||
| 242 | |||
| 243 | if (trace->nr_entries >= trace->max_entries) | ||
| 244 | break; | ||
| 245 | #endif | ||
| 246 | } else { | ||
| 247 | /* Have we reached userland? */ | ||
| 248 | if (unlikely(pc == task_pt_regs(task)->pc)) { | ||
| 249 | pr_info("[<%p>] PID %lu [%s]\n", | ||
| 250 | (void *) pc, | ||
| 251 | (unsigned long) task->pid, | ||
| 252 | task->comm); | ||
| 253 | break; | ||
| 254 | } else | ||
| 255 | print_ip_sym(pc); | ||
| 256 | } | ||
| 257 | |||
| 258 | /* Stop when we reach anything not part of the kernel */ | ||
| 259 | if (!kernel_text_address(pc)) | ||
| 260 | break; | ||
| 261 | |||
| 262 | if (lookup_prev_stack_frame(fp, pc, leaf_return, &next_fp, | ||
| 263 | &next_pc) == 0) { | ||
| 264 | ofs = sizeof(unsigned long); | ||
| 265 | pc = next_pc & ~3; | ||
| 266 | fp = next_fp; | ||
| 267 | leaf_return = 0; | ||
| 268 | } else { | ||
| 269 | pr_debug(" Failed to find previous stack frame\n"); | ||
| 270 | break; | ||
| 271 | } | ||
| 272 | |||
| 273 | pr_debug(" Next PC=%p, next FP=%p\n", | ||
| 274 | (void *)next_pc, (void *)next_fp); | ||
| 275 | } | ||
| 276 | } | ||
| 277 | |||
| 278 | /** | ||
| 279 | * microblaze_unwind - Stack unwinder for Microblaze (external entry point) | ||
| 280 | * @task : Task whose stack we are to unwind (NULL == current) | ||
| 281 | * @trace : Where to store stack backtrace (PC values). | ||
| 282 | * NULL == print backtrace to kernel log | ||
| 283 | */ | ||
| 284 | void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) | ||
| 285 | { | ||
| 286 | if (task) { | ||
| 287 | if (task == current) { | ||
| 288 | const struct pt_regs *regs = task_pt_regs(task); | ||
| 289 | microblaze_unwind_inner(task, regs->pc, regs->r1, | ||
| 290 | regs->r15, trace); | ||
| 291 | } else { | ||
| 292 | struct thread_info *thread_info = | ||
| 293 | (struct thread_info *)(task->stack); | ||
| 294 | const struct cpu_context *cpu_context = | ||
| 295 | &thread_info->cpu_context; | ||
| 296 | |||
| 297 | microblaze_unwind_inner(task, | ||
| 298 | (unsigned long) &_switch_to, | ||
| 299 | cpu_context->r1, | ||
| 300 | cpu_context->r15, trace); | ||
| 301 | } | ||
| 302 | } else { | ||
| 303 | unsigned long pc, fp; | ||
| 304 | |||
| 305 | __asm__ __volatile__ ("or %0, r1, r0" : "=r" (fp)); | ||
| 306 | |||
| 307 | __asm__ __volatile__ ( | ||
| 308 | "brlid %0, 0f;" | ||
| 309 | "nop;" | ||
| 310 | "0:" | ||
| 311 | : "=r" (pc) | ||
| 312 | ); | ||
| 313 | |||
| 314 | /* Since we are not a leaf function, use leaf_return = 0 */ | ||
| 315 | microblaze_unwind_inner(current, pc, fp, 0, trace); | ||
| 316 | } | ||
| 317 | } | ||
| 318 | |||
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index db72d7124602..a09f2962fbec 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze") | 11 | OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze") |
| 12 | OUTPUT_ARCH(microblaze) | 12 | OUTPUT_ARCH(microblaze) |
| 13 | ENTRY(_start) | 13 | ENTRY(microblaze_start) |
| 14 | 14 | ||
| 15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
| 16 | #include <asm-generic/vmlinux.lds.h> | 16 | #include <asm-generic/vmlinux.lds.h> |
| @@ -20,7 +20,7 @@ jiffies = jiffies_64 + 4; | |||
| 20 | 20 | ||
| 21 | SECTIONS { | 21 | SECTIONS { |
| 22 | . = CONFIG_KERNEL_START; | 22 | . = CONFIG_KERNEL_START; |
| 23 | _start = CONFIG_KERNEL_BASE_ADDR; | 23 | microblaze_start = CONFIG_KERNEL_BASE_ADDR; |
| 24 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 24 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
| 25 | _text = . ; | 25 | _text = . ; |
| 26 | _stext = . ; | 26 | _stext = . ; |
| @@ -55,7 +55,7 @@ SECTIONS { | |||
| 55 | */ | 55 | */ |
| 56 | .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { | 56 | .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { |
| 57 | _ssrw = .; | 57 | _ssrw = .; |
| 58 | . = ALIGN(4096); /* page aligned when MMU used - origin 0x8 */ | 58 | . = ALIGN(PAGE_SIZE); /* page aligned when MMU used */ |
| 59 | *(.sdata2) | 59 | *(.sdata2) |
| 60 | . = ALIGN(8); | 60 | . = ALIGN(8); |
| 61 | _essrw = .; | 61 | _essrw = .; |
| @@ -70,7 +70,7 @@ SECTIONS { | |||
| 70 | /* Reserve some low RAM for r0 based memory references */ | 70 | /* Reserve some low RAM for r0 based memory references */ |
| 71 | . = ALIGN(0x4) ; | 71 | . = ALIGN(0x4) ; |
| 72 | r0_ram = . ; | 72 | r0_ram = . ; |
| 73 | . = . + 4096; /* a page should be enough */ | 73 | . = . + PAGE_SIZE; /* a page should be enough */ |
| 74 | 74 | ||
| 75 | /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ | 75 | /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ |
| 76 | . = ALIGN(8); | 76 | . = ALIGN(8); |
| @@ -120,7 +120,7 @@ SECTIONS { | |||
| 120 | 120 | ||
| 121 | __init_end_before_initramfs = .; | 121 | __init_end_before_initramfs = .; |
| 122 | 122 | ||
| 123 | .init.ramfs ALIGN(4096) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { | 123 | .init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { |
| 124 | __initramfs_start = .; | 124 | __initramfs_start = .; |
| 125 | *(.init.ramfs) | 125 | *(.init.ramfs) |
| 126 | __initramfs_end = .; | 126 | __initramfs_end = .; |
| @@ -132,11 +132,11 @@ SECTIONS { | |||
| 132 | * so that __init_end == __bss_start. This will make image.elf | 132 | * so that __init_end == __bss_start. This will make image.elf |
| 133 | * consistent with the image.bin | 133 | * consistent with the image.bin |
| 134 | */ | 134 | */ |
| 135 | /* . = ALIGN(4096); */ | 135 | /* . = ALIGN(PAGE_SIZE); */ |
| 136 | } | 136 | } |
| 137 | __init_end = .; | 137 | __init_end = .; |
| 138 | 138 | ||
| 139 | .bss ALIGN (4096) : AT(ADDR(.bss) - LOAD_OFFSET) { | 139 | .bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) { |
| 140 | /* page aligned when MMU used */ | 140 | /* page aligned when MMU used */ |
| 141 | __bss_start = . ; | 141 | __bss_start = . ; |
| 142 | *(.bss*) | 142 | *(.bss*) |
| @@ -145,7 +145,7 @@ SECTIONS { | |||
| 145 | __bss_stop = . ; | 145 | __bss_stop = . ; |
| 146 | _ebss = . ; | 146 | _ebss = . ; |
| 147 | } | 147 | } |
| 148 | . = ALIGN(4096); | 148 | . = ALIGN(PAGE_SIZE); |
| 149 | _end = .; | 149 | _end = .; |
| 150 | 150 | ||
| 151 | DISCARDS | 151 | DISCARDS |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index bab922993185..57bd2a09610c 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
| @@ -37,10 +37,6 @@ | |||
| 37 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
| 38 | #include <asm/exceptions.h> | 38 | #include <asm/exceptions.h> |
| 39 | 39 | ||
| 40 | #if defined(CONFIG_KGDB) | ||
| 41 | int debugger_kernel_faults = 1; | ||
| 42 | #endif | ||
| 43 | |||
| 44 | static unsigned long pte_misses; /* updated by do_page_fault() */ | 40 | static unsigned long pte_misses; /* updated by do_page_fault() */ |
| 45 | static unsigned long pte_errors; /* updated by do_page_fault() */ | 41 | static unsigned long pte_errors; /* updated by do_page_fault() */ |
| 46 | 42 | ||
| @@ -81,10 +77,6 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | |||
| 81 | } | 77 | } |
| 82 | 78 | ||
| 83 | /* kernel has accessed a bad area */ | 79 | /* kernel has accessed a bad area */ |
| 84 | #if defined(CONFIG_KGDB) | ||
| 85 | if (debugger_kernel_faults) | ||
| 86 | debugger(regs); | ||
| 87 | #endif | ||
| 88 | die("kernel access of bad area", regs, sig); | 80 | die("kernel access of bad area", regs, sig); |
| 89 | } | 81 | } |
| 90 | 82 | ||
| @@ -115,13 +107,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 115 | if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) | 107 | if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) |
| 116 | is_write = 0; | 108 | is_write = 0; |
| 117 | 109 | ||
| 118 | #if defined(CONFIG_KGDB) | ||
| 119 | if (debugger_fault_handler && regs->trap == 0x300) { | ||
| 120 | debugger_fault_handler(regs); | ||
| 121 | return; | ||
| 122 | } | ||
| 123 | #endif /* CONFIG_KGDB */ | ||
| 124 | |||
| 125 | if (unlikely(in_atomic() || !mm)) { | 110 | if (unlikely(in_atomic() || !mm)) { |
| 126 | if (kernel_mode(regs)) | 111 | if (kernel_mode(regs)) |
| 127 | goto bad_area_nosemaphore; | 112 | goto bad_area_nosemaphore; |
| @@ -226,7 +211,6 @@ good_area: | |||
| 226 | * make sure we exit gracefully rather than endlessly redo | 211 | * make sure we exit gracefully rather than endlessly redo |
| 227 | * the fault. | 212 | * the fault. |
| 228 | */ | 213 | */ |
| 229 | survive: | ||
| 230 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); | 214 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
| 231 | if (unlikely(fault & VM_FAULT_ERROR)) { | 215 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 232 | if (fault & VM_FAULT_OOM) | 216 | if (fault & VM_FAULT_OOM) |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index db5934989926..65eb00419d19 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
| @@ -134,13 +134,8 @@ void __init setup_memory(void) | |||
| 134 | * for 4GB of memory, using 4kB pages), plus 1 page | 134 | * for 4GB of memory, using 4kB pages), plus 1 page |
| 135 | * (in case the address isn't page-aligned). | 135 | * (in case the address isn't page-aligned). |
| 136 | */ | 136 | */ |
| 137 | #ifndef CONFIG_MMU | 137 | map_size = init_bootmem_node(NODE_DATA(0), |
| 138 | map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)), | ||
| 139 | min_low_pfn, max_low_pfn); | ||
| 140 | #else | ||
| 141 | map_size = init_bootmem_node(&contig_page_data, | ||
| 142 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); | 138 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); |
| 143 | #endif | ||
| 144 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); | 139 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); |
| 145 | 140 | ||
| 146 | /* free bootmem is whole main memory */ | 141 | /* free bootmem is whole main memory */ |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dfdc0347b05d..154ff43aaa81 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -528,7 +528,7 @@ config LOCKDEP | |||
| 528 | bool | 528 | bool |
| 529 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 529 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| 530 | select STACKTRACE | 530 | select STACKTRACE |
| 531 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 | 531 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE |
| 532 | select KALLSYMS | 532 | select KALLSYMS |
| 533 | select KALLSYMS_ALL | 533 | select KALLSYMS_ALL |
| 534 | 534 | ||
| @@ -958,13 +958,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
| 958 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 958 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
| 959 | depends on !X86_64 | 959 | depends on !X86_64 |
| 960 | select STACKTRACE | 960 | select STACKTRACE |
| 961 | select FRAME_POINTER if !PPC && !S390 | 961 | select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE |
| 962 | help | 962 | help |
| 963 | Provide stacktrace filter for fault-injection capabilities | 963 | Provide stacktrace filter for fault-injection capabilities |
| 964 | 964 | ||
| 965 | config LATENCYTOP | 965 | config LATENCYTOP |
| 966 | bool "Latency measuring infrastructure" | 966 | bool "Latency measuring infrastructure" |
| 967 | select FRAME_POINTER if !MIPS && !PPC && !S390 | 967 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE |
| 968 | select KALLSYMS | 968 | select KALLSYMS |
| 969 | select KALLSYMS_ALL | 969 | select KALLSYMS_ALL |
| 970 | select STACKTRACE | 970 | select STACKTRACE |
