From f9e2bdfdbb4c9da13422b349227be8c7b41dbd44 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 9 Sep 2009 17:14:19 +0900 Subject: sh: Factor in cpu id for selection of cache colour fixmap. In the SMP VIPT case the page copy/clear ops still perform colouring, care needs to be taken that CPUs don't end up stepping on each other, so we give them a bit of room to work with. At the same time, we reduce the worst-case colouring given that these pages are always consumed. Signed-off-by: Paul Mundt --- arch/sh/include/asm/fixmap.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 721fcc4d5e98..76c5a3099cb8 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h @@ -14,9 +14,9 @@ #define _ASM_FIXMAP_H #include +#include #include #ifdef CONFIG_HIGHMEM -#include #include #endif @@ -46,9 +46,9 @@ * fix-mapped? */ enum fixed_addresses { -#define FIX_N_COLOURS 16 +#define FIX_N_COLOURS 8 FIX_CMAP_BEGIN, - FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS), FIX_UNCACHED, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ -- cgit v1.2.2 From 8bd642b17bea31f8361b61c16c8d154638414df4 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:24 +0000 Subject: sh: Obliterate the P1 area macros Replace the use of PHYSADDR() with __pa(). PHYSADDR() is based on the idea that all addresses in P1SEG are untranslated, so we can access an address's physical page as an offset from P1SEG. This doesn't work for CONFIG_PMB/CONFIG_PMB_FIXED because pages in P1SEG and P2SEG are used for PMB mappings and so can be translated to any physical address. Likewise, replace a P1SEGADDR() use with virt_to_phys(). Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/addrspace.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 80d40813e057..ebd6e49ba39e 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -28,9 +28,6 @@ /* Returns the privileged segment base of a given address */ #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) -/* Returns the physical address of a PnSEG (n=1,2) address */ -#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) - #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) /* * Map an address to a certain privileged segment -- cgit v1.2.2 From 1f69b6af9171f50135cce8023c84d82fbf42a8f5 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:25 +0000 Subject: sh: Prepare for dynamic PMB support To allow the MMU to be switched between 29bit and 32bit mode at runtime some constants need to swapped for functions that return a runtime value. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/addrspace.h | 6 ++++++ arch/sh/include/asm/mmu.h | 3 ++- arch/sh/include/asm/pgtable.h | 26 ++++++++++++++++++++++---- arch/sh/include/asm/pgtable_32.h | 2 +- arch/sh/include/asm/scatterlist.h | 2 +- 5 files changed, 32 insertions(+), 7 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index ebd6e49ba39e..99d6b3ecbe22 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -57,5 +57,11 @@ #define P3_ADDR_MAX P4SEG #endif +#ifndef __ASSEMBLY__ +#ifdef CONFIG_PMB +extern int __in_29bit_mode(void); +#endif /* CONFIG_PMB */ +#endif /* __ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index f5963037c9d6..5025e12b7864 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -7,6 +7,8 @@ #define PMB_PASCR 0xff000070 #define PMB_IRMCR 0xff000078 +#define PASCR_SE 0x80000000 + #define PMB_ADDR 0xf6100000 #define PMB_DATA 0xf7100000 #define PMB_ENTRY_MAX 16 @@ -75,4 +77,3 @@ void pmb_unmap(unsigned long addr); #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ - diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 4f3efa7d5a64..5dff5787dfeb 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -75,13 +75,31 @@ static inline unsigned long long neff_sign_extend(unsigned long val) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 -#ifdef CONFIG_32BIT -#define PHYS_ADDR_MASK 0xffffffff +#define PHYS_ADDR_MASK29 0x1fffffff +#define PHYS_ADDR_MASK32 0xffffffff + +#ifdef CONFIG_PMB +static inline unsigned long phys_addr_mask(void) +{ + /* Is the MMU in 29bit mode? */ + if (__in_29bit_mode()) + return PHYS_ADDR_MASK29; + + return PHYS_ADDR_MASK32; +} +#elif CONFIG_32BIT +static inline unsigned long phys_addr_mask(void) +{ + return PHYS_ADDR_MASK32; +} #else -#define PHYS_ADDR_MASK 0x1fffffff +static inline unsigned long phys_addr_mask(void) +{ + return PHYS_ADDR_MASK29; +} #endif -#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) +#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) #ifdef CONFIG_SUPERH32 diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index c0d359ce337b..b35435516203 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -108,7 +108,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) #endif -#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) +#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS)) /* Hardware flags, page size encoding */ #if !defined(CONFIG_MMU) diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h index 327cc2e4c97b..e38d1d4c7f6f 100644 --- a/arch/sh/include/asm/scatterlist.h +++ b/arch/sh/include/asm/scatterlist.h @@ -1,7 +1,7 @@ #ifndef __ASM_SH_SCATTERLIST_H #define __ASM_SH_SCATTERLIST_H -#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK +#define ISA_DMA_THRESHOLD phys_addr_mask() #include -- cgit v1.2.2 From 8386aebb9e15a94137693ea4f4df84207f71cc75 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:28 +0000 Subject: sh: Make most PMB functions static There's no need to export the internal PMB functions for allocating, freeing and modifying PMB entries, etc. This way we can restrict the interface for PMB. Also remove the static from pmb_init() so that we have more freedom in setting up the initial PMB entries and turning on MMU 32bit mode. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 5025e12b7864..9c84b4546c8d 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -64,16 +64,10 @@ struct pmb_entry { }; /* arch/sh/mm/pmb.c */ -int __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int *entry); -int set_pmb_entry(struct pmb_entry *pmbe); -void clear_pmb_entry(struct pmb_entry *pmbe); -struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, - unsigned long flags); -void pmb_free(struct pmb_entry *pmbe); long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); +int pmb_init(void); #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ -- cgit v1.2.2 From 3105121949b609964f370d42d1b90fe7fc01d6b1 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:30 +0000 Subject: sh: Remap physical memory into P1 and P2 in pmb_init() Eventually we'll have complete control over what physical memory gets mapped where and we can probably do other interesting things. For now though, when the MMU is in 32-bit mode, we map physical memory into the P1 and P2 virtual address ranges with the same semantics as they have in 29-bit mode. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/io.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 5be45ea4dfec..0cf2a5708e26 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -246,7 +246,7 @@ void __iounmap(void __iomem *addr); static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) unsigned long last_addr = offset + size - 1; #endif void __iomem *ret; @@ -255,7 +255,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. -- cgit v1.2.2 From 20b5014b3e5fe7b874a3f6a1dc03b0c21cb222cd Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:33 +0000 Subject: sh: Fold fixed-PMB support into dynamic PMB support The initialisation process differs for CONFIG_PMB and for CONFIG_PMB_FIXED. For CONFIG_PMB_FIXED we need to register the PMB entries that were allocated by the bootloader. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 9c84b4546c8d..c7426ad9926e 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -15,6 +15,8 @@ #define PMB_E_MASK 0x0000000f #define PMB_E_SHIFT 8 +#define PMB_PFN_MASK 0xff000000 + #define PMB_SZ_16M 0x00000000 #define PMB_SZ_64M 0x00000010 #define PMB_SZ_128M 0x00000080 -- cgit v1.2.2 From 2a8bc923455f320da6c460258c21d2235ab2edc8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 10 Oct 2009 22:24:55 +0900 Subject: sh: Shut up CONFIG_32BIT=n compiler warnings. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 5dff5787dfeb..ba3046e4f06f 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -87,7 +87,7 @@ static inline unsigned long phys_addr_mask(void) return PHYS_ADDR_MASK32; } -#elif CONFIG_32BIT +#elif defined(CONFIG_32BIT) static inline unsigned long phys_addr_mask(void) { return PHYS_ADDR_MASK32; -- cgit v1.2.2 From a6a2f2ad67506090e332f440457553c0ec011d68 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 9 Oct 2009 23:20:54 +0100 Subject: sh: Teach the DWARF unwinder about modules Pass a module's .eh_frame section to the DWARF unwinder at module load time so that the section's FDEs and CIEs can be registered with the DWARF unwinder. This allows us to unwind the stack through module code when generating backtraces. Signed-off-by: Matt Fleming --- arch/sh/include/asm/dwarf.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index c367ed3373c5..aacdc746d07c 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -241,6 +241,12 @@ struct dwarf_cie { unsigned long flags; #define DWARF_CIE_Z_AUGMENTATION (1 << 0) + + /* + * 'mod' will be non-NULL if this CIE came from a module's + * .eh_frame section. + */ + struct module *mod; }; /** @@ -255,6 +261,12 @@ struct dwarf_fde { unsigned char *instructions; unsigned char *end; struct list_head link; + + /* + * 'mod' will be non-NULL if this FDE came from a module's + * .eh_frame section. + */ + struct module *mod; }; /** @@ -364,6 +376,9 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); +extern int dwarf_parse_section(char *, char *, struct module *); +extern void dwarf_module_unload(struct module *); + #endif /* !__ASSEMBLY__ */ #define CFI_STARTPROC .cfi_startproc -- cgit v1.2.2 From ed4fe7f488008f38d5f423f0bcc736b1779d6ddc Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 10 Oct 2009 16:03:11 +0100 Subject: sh: Fix memory leak in dwarf_unwind_stack() If we broke out of the while (1) loop because the return address of "frame" was zero, then "frame" needs to be free'd before we return. Signed-off-by: Matt Fleming --- arch/sh/include/asm/dwarf.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index aacdc746d07c..eef87539963d 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -376,6 +376,7 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); +extern void dwarf_free_frame(struct dwarf_frame *); extern int dwarf_parse_section(char *, char *, struct module *); extern void dwarf_module_unload(struct module *); -- cgit v1.2.2 From d26cddbbd23b81eac4fcf340b633e97b40b8d3a1 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 11 Oct 2009 17:56:17 +0100 Subject: sh: tracing: Use the DWARF unwinder for CALLER_ADDRx The major reason for implementing the DWARF unwinder in the first place was so that we could stop using __builtin_return_address(n), which doesn't work on SH for n > 0. Signed-off-by: Matt Fleming --- arch/sh/include/asm/ftrace.h | 47 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 12f3a31f20af..5ea9030725c0 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -32,6 +32,53 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } + +#ifdef CONFIG_DWARF_UNWINDER +#include + +#define HAVE_ARCH_CALLER_ADDR + +static inline unsigned long dwarf_return_address(int depth) +{ + struct dwarf_frame *frame; + unsigned long ra; + int i; + + for (i = 0, frame = NULL, ra = 0; i <= depth; i++) { + struct dwarf_frame *tmp; + + tmp = dwarf_unwind_stack(ra, frame); + + if (frame) + dwarf_free_frame(frame); + + frame = tmp; + + if (!frame || !frame->return_addr) + break; + + ra = frame->return_addr; + } + + /* Failed to unwind the stack to the specified depth. */ + WARN_ON(i != depth + 1); + + if (frame) + dwarf_free_frame(frame); + + return ra; +} + +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 dwarf_return_address(1) +#define CALLER_ADDR2 dwarf_return_address(2) +#define CALLER_ADDR3 dwarf_return_address(3) +#define CALLER_ADDR4 dwarf_return_address(4) +#define CALLER_ADDR5 dwarf_return_address(5) +#define CALLER_ADDR6 dwarf_return_address(6) + +#endif /* CONFIG_DWARF_UNWINDER */ + #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ -- cgit v1.2.2 From ac4fac8cb24ab209ae373a3e3e9995dff7d0c394 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 13 Oct 2009 13:10:14 +0900 Subject: sh: Generalize CALLER_ADDRx support. This splits out the unwinder implementation and adds a new return_address() abstraction modelled after the ARM code. The DWARF unwinder is tied in to this, returning NULL otherwise in the case of being unable to support arbitrary depths. This enables us to get correct behaviour with the unwinder enabled, as well as disabling the arbitrary depth support when frame pointers are enabled, as arbitrary depths with __builtin_return_address() are not supported regardless. With this abstraction it's also possible to layer on a simplified implementation with frame pointers in the event that the unwinder isn't enabled, although this is left as a future exercise. Signed-off-by: Paul Mundt --- arch/sh/include/asm/dwarf.h | 5 +++++ arch/sh/include/asm/ftrace.h | 50 +++++++------------------------------------- 2 files changed, 13 insertions(+), 42 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index fc51e66f2380..d985148af19f 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -194,6 +194,11 @@ #define DWARF_ARCH_RA_REG 17 #ifndef __ASSEMBLY__ + +#include +#include +#include + /* * Read either the frame pointer (r14) or the stack pointer (r15). * NOTE: this MUST be inlined. diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 5ea9030725c0..28875a3e4116 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -32,52 +32,18 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } - -#ifdef CONFIG_DWARF_UNWINDER -#include +/* arch/sh/kernel/return_address.c */ +extern void *return_address(unsigned int); #define HAVE_ARCH_CALLER_ADDR -static inline unsigned long dwarf_return_address(int depth) -{ - struct dwarf_frame *frame; - unsigned long ra; - int i; - - for (i = 0, frame = NULL, ra = 0; i <= depth; i++) { - struct dwarf_frame *tmp; - - tmp = dwarf_unwind_stack(ra, frame); - - if (frame) - dwarf_free_frame(frame); - - frame = tmp; - - if (!frame || !frame->return_addr) - break; - - ra = frame->return_addr; - } - - /* Failed to unwind the stack to the specified depth. */ - WARN_ON(i != depth + 1); - - if (frame) - dwarf_free_frame(frame); - - return ra; -} - #define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 dwarf_return_address(1) -#define CALLER_ADDR2 dwarf_return_address(2) -#define CALLER_ADDR3 dwarf_return_address(3) -#define CALLER_ADDR4 dwarf_return_address(4) -#define CALLER_ADDR5 dwarf_return_address(5) -#define CALLER_ADDR6 dwarf_return_address(6) - -#endif /* CONFIG_DWARF_UNWINDER */ +#define CALLER_ADDR1 ((unsigned long)return_address(1)) +#define CALLER_ADDR2 ((unsigned long)return_address(2)) +#define CALLER_ADDR3 ((unsigned long)return_address(3)) +#define CALLER_ADDR4 ((unsigned long)return_address(4)) +#define CALLER_ADDR5 ((unsigned long)return_address(5)) +#define CALLER_ADDR6 ((unsigned long)return_address(6)) #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ -- cgit v1.2.2 From 5a3abba77dc0eb0b00332c21899123cdfa3b19e5 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 13 Oct 2009 13:32:19 +0900 Subject: sh: Tidy up the dwarf module helpers. This enables us to build the dwarf unwinder both with modules enabled and disabled in addition to reducing code size in the latter case. The helpers are also consolidated, and modified to resemble the BUG module helpers. Signed-off-by: Paul Mundt --- arch/sh/include/asm/dwarf.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index d985148af19f..bdccbbfdc0bd 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -198,6 +198,7 @@ #include #include #include +#include /* * Read either the frame pointer (r14) or the stack pointer (r15). @@ -382,8 +383,10 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); extern void dwarf_free_frame(struct dwarf_frame *); -extern int dwarf_parse_section(char *, char *, struct module *); -extern void dwarf_module_unload(struct module *); + +extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *, + struct module *); +extern void module_dwarf_cleanup(struct module *); #endif /* !__ASSEMBLY__ */ @@ -412,6 +415,10 @@ extern void dwarf_module_unload(struct module *); static inline void dwarf_unwinder_init(void) { } + +#define module_dwarf_finalize(hdr, sechdrs, me) (0) +#define module_dwarf_cleanup(mod) do { } while (0) + #endif #endif /* CONFIG_DWARF_UNWINDER */ -- cgit v1.2.2 From 36c871992697eaaf88a3682c2c3003a41c54b8c0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 14 Oct 2009 11:49:49 +0900 Subject: sh: Provide CALLER_ADDRx definitions even when ftrace is disabled. Despite being located in the ftrace header, the CALLER_ADDRx definitions are used by generic code. As such, we have to provide it generically, and given that there is no real dependence on ftrace in the first place, the definitions can just be moved out. Signed-off-by: Paul Mundt --- arch/sh/include/asm/ftrace.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 28875a3e4116..13e9966464c2 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -32,6 +32,11 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_FUNCTION_TRACER */ + +#ifndef __ASSEMBLY__ + /* arch/sh/kernel/return_address.c */ extern void *return_address(unsigned int); @@ -46,6 +51,5 @@ extern void *return_address(unsigned int); #define CALLER_ADDR6 ((unsigned long)return_address(6)) #endif /* __ASSEMBLY__ */ -#endif /* CONFIG_FUNCTION_TRACER */ #endif /* __ASM_SH_FTRACE_H */ -- cgit v1.2.2 From 56bfc42f6cba3e831094c01a23fbbb17a20bbdf8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 14 Oct 2009 16:05:42 +0900 Subject: sh: TS_RESTORE_SIGMASK conversion. Replace TIF_RESTORE_SIGMASK with TS_RESTORE_SIGMASK and define our own set_restore_sigmask() function. This saves the costly SMP-safe set_bit operation, which we do not need for the sigmask flag since TIF_SIGPENDING always has to be set too. Based on the x86 and powerpc change. Signed-off-by: Paul Mundt --- arch/sh/include/asm/thread_info.h | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index bdeb9d46d17d..23eeed89467a 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -19,6 +19,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ + __u32 status; /* thread synchronous flags */ __u32 cpu; int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space */ @@ -111,7 +112,6 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ #define TIF_SINGLESTEP 4 /* singlestepping active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ @@ -125,7 +125,6 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) @@ -149,13 +148,32 @@ extern void free_thread_info(struct thread_info *ti); /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ - _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ - _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) + _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ + _TIF_SYSCALL_TRACEPOINT) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ + +#ifndef __ASSEMBLY__ +#define HAVE_SET_RESTORE_SIGMASK 1 +static inline void set_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + ti->status |= TS_RESTORE_SIGMASK; + set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); +} +#endif /* !__ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* __ASM_SH_THREAD_INFO_H */ -- cgit v1.2.2 From 731ba3301de41d2ffae9dd3e0f85f7361d8ad8f4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 14 Oct 2009 16:42:28 +0900 Subject: sh: Count NMIs in irq_cpustat_t. This plugs in support for NMI counting per-CPU via irq_cpustat_t. Modelled after the x86 implementation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/hardirq.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h index a5be4afa790b..48b191313a99 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h @@ -1,9 +1,16 @@ #ifndef __ASM_SH_HARDIRQ_H #define __ASM_SH_HARDIRQ_H -extern void ack_bad_irq(unsigned int irq); -#define ack_bad_irq ack_bad_irq +#include +#include + +typedef struct { + unsigned int __softirq_pending; + unsigned int __nmi_count; /* arch dependent */ +} ____cacheline_aligned irq_cpustat_t; -#include +#include /* Standard mappings for irq_cpustat_t above */ + +extern void ack_bad_irq(unsigned int irq); #endif /* __ASM_SH_HARDIRQ_H */ -- cgit v1.2.2 From f533c3d340536198a4889a42a68d6c0d79a504e7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 16 Oct 2009 17:20:58 +0900 Subject: sh: Idle loop chainsawing for SMP-based light sleep. This does a bit of chainsawing of the idle loop code to get light sleep working on SMP. Previously this was forcing secondary CPUs in to sleep mode with them not coming back if they didn't have their own local timers. Given that we use clockevents broadcasting by default, the CPU managing the clockevents can't have IRQs disabled before entering its sleep state. This unfortunately leaves us with the age-old need_resched() race in between local_irq_enable() and cpu_sleep(), but at present this is unavoidable. After some more experimentation it may be possible to layer on SR.BL bit manipulation over top of this scheme to inhibit the race condition, but given the current potential for missing wakeups, this is left as a future exercise. Signed-off-by: Paul Mundt --- arch/sh/include/asm/bugs.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 46260fcbdf4b..02a19a1c033a 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -14,11 +14,15 @@ #include +extern void select_idle_routine(void); + static void __init check_bugs(void) { extern unsigned long loops_per_jiffy; char *p = &init_utsname()->machine[2]; /* "sh" */ + select_idle_routine(); + current_cpu_data.loops_per_jiffy = loops_per_jiffy; switch (current_cpu_data.family) { -- cgit v1.2.2 From 896f0c0e8e4ee02ee72a203aef79f362d5f7b7cc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 16 Oct 2009 18:00:02 +0900 Subject: sh: Support SCHED_MC for SH-X3 multi-cores. This enables SCHED_MC support for SH-X3 multi-cores. Presently this is just a simple wrapper around the possible map, but this allows for tying in support for some of the more exotic NUMA clusters where we can actually do something with the topology. Signed-off-by: Paul Mundt --- arch/sh/include/asm/topology.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 65e7bd2f2240..37cdadd975ac 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h @@ -40,6 +40,14 @@ #endif +#define mc_capable() (1) + +const struct cpumask *cpu_coregroup_mask(unsigned int cpu); + +extern cpumask_t cpu_core_map[NR_CPUS]; + +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) + #include #endif /* _ASM_SH_TOPOLOGY_H */ -- cgit v1.2.2 From cae19b5902d52ff059f5df98ea993a00e5686af1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 16 Oct 2009 18:20:42 +0900 Subject: sh: Kill off legacy UBC wakeup cruft. This code was added for some ancient SH-4 solution engines with peculiar boot ROMs that did silly things to the UBC MSTP bits. None of these have been in the wild for years, and these days the clock framework wraps up the MSTP bits, meaning that the UBC code is one of the few interfaces that is stomping MSTP bits underneath the clock framework. At this point the risks far outweigh any benefit this code provided, so just kill it off. Signed-off-by: Paul Mundt --- arch/sh/include/asm/ubc.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h index 4ca4b7717371..9bf961684431 100644 --- a/arch/sh/include/asm/ubc.h +++ b/arch/sh/include/asm/ubc.h @@ -60,16 +60,5 @@ #define BRCR_UBDE (1 << 0) #endif -#ifndef __ASSEMBLY__ -/* arch/sh/kernel/cpu/ubc.S */ -extern void ubc_sleep(void); - -#ifdef CONFIG_UBC_WAKEUP -extern void ubc_wakeup(void); -#else -#define ubc_wakeup() do { } while (0) -#endif -#endif - #endif /* __KERNEL__ */ #endif /* __ASM_SH_UBC_H */ -- cgit v1.2.2 From 03fdb708926d5df2d9b9e62222c1666e20caa9e3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 17 Oct 2009 21:06:39 +0900 Subject: sh: Convert to asm-generic/irqflags.h. This simplifies the irqflags support by switching over to the asm-generic version. The necessary support functions are brought out-of-line for both SHcompact and SHmedia instruction sets. Signed-off-by: Paul Mundt --- arch/sh/include/asm/irqflags.h | 31 ++---------- arch/sh/include/asm/irqflags_32.h | 99 --------------------------------------- arch/sh/include/asm/irqflags_64.h | 85 --------------------------------- arch/sh/include/asm/system_32.h | 29 ++++++++++++ arch/sh/include/asm/system_64.h | 26 ++++++++++ 5 files changed, 58 insertions(+), 212 deletions(-) delete mode 100644 arch/sh/include/asm/irqflags_32.h delete mode 100644 arch/sh/include/asm/irqflags_64.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h index 46e71da5be6b..a741153b41c2 100644 --- a/arch/sh/include/asm/irqflags.h +++ b/arch/sh/include/asm/irqflags.h @@ -1,34 +1,9 @@ #ifndef __ASM_SH_IRQFLAGS_H #define __ASM_SH_IRQFLAGS_H -#ifdef CONFIG_SUPERH32 -#include "irqflags_32.h" -#else -#include "irqflags_64.h" -#endif +#define RAW_IRQ_DISABLED 0xf0 +#define RAW_IRQ_ENABLED 0x00 -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return (flags != 0); -} - -static inline int raw_irqs_disabled(void) -{ - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); -} - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) -{ - if ((flags & 0xf0) != 0xf0) - raw_local_irq_enable(); -} +#include #endif /* __ASM_SH_IRQFLAGS_H */ diff --git a/arch/sh/include/asm/irqflags_32.h b/arch/sh/include/asm/irqflags_32.h deleted file mode 100644 index 60218f541340..000000000000 --- a/arch/sh/include/asm/irqflags_32.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef __ASM_SH_IRQFLAGS_32_H -#define __ASM_SH_IRQFLAGS_32_H - -static inline void raw_local_irq_enable(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %1, %0\n\t" -#ifdef CONFIG_CPU_HAS_SR_RB - "stc r6_bank, %1\n\t" - "or %1, %0\n\t" -#endif - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x000000f0) - : "memory" - ); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); -} - -static inline void set_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or %2, %0\n\t" - "and %3, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "r" (0x10000000), "r" (0xffffff0f) - : "memory" - ); -} - -static inline void clear_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %2, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x10000000) - : "memory" - ); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags, __dummy; - - __asm__ __volatile__ ( - "stc sr, %1\n\t" - "mov %1, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - "mov %1, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags), "=&r" (__dummy) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -#endif /* __ASM_SH_IRQFLAGS_32_H */ diff --git a/arch/sh/include/asm/irqflags_64.h b/arch/sh/include/asm/irqflags_64.h deleted file mode 100644 index 88f65222c1d4..000000000000 --- a/arch/sh/include/asm/irqflags_64.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef __ASM_SH_IRQFLAGS_64_H -#define __ASM_SH_IRQFLAGS_64_H - -#include - -#define SR_MASK_LL 0x00000000000000f0LL -#define SR_BL_LL 0x0000000010000000LL - -static inline void raw_local_irq_enable(void) -{ - unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long long __dummy0, __dummy1 = SR_MASK_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline void set_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); - -} - -static inline void clear_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long long __dummy = SR_MASK_LL; - unsigned long flags; - - __asm__ __volatile__ ( - "getcon " __SR ", %0\n\t" - "and %0, %1, %0" - : "=&r" (flags) - : "r" (__dummy)); - - return flags; -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long long __dummy0, __dummy1 = SR_MASK_LL; - unsigned long flags; - - __asm__ __volatile__ ( - "getcon " __SR ", %1\n\t" - "or %1, r63, %0\n\t" - "or %1, %2, %1\n\t" - "putcon %1, " __SR "\n\t" - "and %0, %2, %0" - : "=&r" (flags), "=&r" (__dummy0) - : "r" (__dummy1)); - - return flags; -} - -#endif /* __ASM_SH_IRQFLAGS_64_H */ diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 607d413f6168..06814f5b59c7 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -232,4 +232,33 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs); +static inline void set_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "or %2, %0\n\t" + "and %3, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "r" (0x10000000), "r" (0xffffff0f) + : "memory" + ); +} + +static inline void clear_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %2, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x10000000) + : "memory" + ); +} + #endif /* __ASM_SH_SYSTEM_32_H */ diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 8e4a03e7966c..ab1dd917ea87 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h @@ -12,6 +12,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ +#include #include /* @@ -47,4 +48,29 @@ static inline reg_size_t register_align(void *val) return (unsigned long long)(signed long long)(signed long)val; } +#define SR_BL_LL 0x0000000010000000LL + +static inline void set_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "or %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); + +} + +static inline void clear_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "and %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} + #endif /* __ASM_SH_SYSTEM_64_H */ -- cgit v1.2.2 From 1c8db713e21c82e14d0d1be14a09dae224472396 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 18 Oct 2009 15:36:02 +0900 Subject: sh: Fix up smp_mb__xxx() memory barriers for SH-4A SMP. In the past these were simply wrapping to barrier() which was sufficient on SH SMP platforms predating SH-4A. Unfortunately due to ll/sc semantics an explicit synco is needed in these cases, which is sorted for us by just switching these over to smp_mb(). smp_mb() also has the benefit of being wrapped to barrier() in the UP and non-SH4A cases, so old behaviour is maintained for those parts. Signed-off-by: Paul Mundt --- arch/sh/include/asm/atomic.h | 9 ++++----- arch/sh/include/asm/bitops.h | 4 ++-- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index e8e78137c6f5..b16388d71954 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -78,11 +78,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -/* Atomic operations are already serializing on SH */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() #include #include diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index ebe595b7ab1f..98511e4d28cb 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -26,8 +26,8 @@ /* * clear_bit() doesn't provide any barrier for the compiler. */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() #ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) -- cgit v1.2.2 From 73c926bee0e4b7739bbb992a0a3df561178dd522 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 20 Oct 2009 12:55:56 +0900 Subject: sh: Convert to asm-generic/dma-mapping-common.h This converts the old DMA mapping support to the new generic dma-mapping-common.h abstraction. Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-mapping.h | 200 +++++--------------------------------- arch/sh/include/asm/pci.h | 10 +- 2 files changed, 27 insertions(+), 183 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 69d56dd4c968..b9a8f18f35a2 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -1,21 +1,32 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -#include -#include -#include -#include -#include -#include +extern struct dma_map_ops *dma_ops; +extern void no_iommu_init(void); -extern struct bus_type pci_bus_type; +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + return dma_ops; +} + +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); -#define dma_supported(dev, mask) (1) + if (ops->dma_supported) + return ops->dma_supported(dev, mask); + + return 1; +} static inline int dma_set_mask(struct device *dev, u64 mask) { + struct dma_map_ops *ops = get_dma_ops(dev); + if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); *dev->dma_mask = mask; @@ -35,160 +46,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_is_consistent(d, h) (1) -static inline dma_addr_t dma_map_single(struct device *dev, - void *ptr, size_t size, - enum dma_data_direction dir) -{ - dma_addr_t addr = virt_to_phys(ptr); - -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return addr; -#endif - dma_cache_sync(dev, ptr, size, dir); - - debug_dma_map_page(dev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, addr, true); - - return addr; -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, addr, size, dir, true); -} - -static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); -#endif - sg[i].dma_address = sg_phys(&sg[i]); - sg[i].dma_length = sg[i].length; - } - - debug_dma_map_sg(dev, sg, nents, i, dir); - - return nents; -} - -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - debug_dma_unmap_sg(dev, sg, nents, dir); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - return dma_map_single(dev, page_address(page) + offset, size, dir); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction dir) -{ - dma_unmap_single(dev, dma_address, size, dir); -} - -static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; -#endif - dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); -} - -static inline void dma_sync_single_range(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; -#endif - dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); -} - -static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nelems; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); -#endif - sg[i].dma_address = sg_phys(&sg[i]); - sg[i].dma_length = sg[i].length; - } -} - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) -{ - __dma_sync_single(dev, dma_handle, size, dir); - debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, - size_t size, - enum dma_data_direction dir) -{ - __dma_sync_single(dev, dma_handle, size, dir); - debug_dma_sync_single_for_device(dev, dma_handle, size, dir); -} - -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); - debug_dma_sync_single_range_for_cpu(dev, dma_handle, - offset, size, direction); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); - debug_dma_sync_single_range_for_device(dev, dma_handle, - offset, size, direction); -} - - -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - __dma_sync_sg(dev, sg, nelems, dir); - debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); -} - -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - __dma_sync_sg(dev, sg, nelems, dir); - debug_dma_sync_sg_for_device(dev, sg, nelems, dir); -} - static inline int dma_get_cache_alignment(void) { /* @@ -200,20 +57,15 @@ static inline int dma_get_cache_alignment(void) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return dma_addr == 0; -} + struct dma_map_ops *ops = get_dma_ops(dev); -#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); - -extern void -dma_release_declared_memory(struct device *dev); + return dma_addr == 0; +} -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); +#include +#include #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 4163950cd1c6..6bf276b4f85d 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -3,8 +3,6 @@ #ifdef __KERNEL__ -#include - /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ @@ -54,13 +52,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ -#define PCI_DMA_BUS_IS_PHYS (1) - -#include -#include -#include -#include -#include +#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) /* pci_unmap_{single,page} being a nop depends upon the * configuration. -- cgit v1.2.2 From f32154c9b580f11017b01bf093514c900c09364e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 26 Oct 2009 09:50:51 +0900 Subject: sh: Add dma-mapping support for dma_alloc/free_coherent() overrides. This moves the current dma_alloc/free_coherent() calls to a generic variant and plugs them in for the nommu default. Other variants can override the defaults in the dma mapping ops directly. Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-mapping.h | 48 ++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 8 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index b9a8f18f35a2..653076018df0 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -9,6 +9,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } +#include +#include + static inline int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -33,12 +36,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return 0; } -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); @@ -65,7 +62,42 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -#include -#include +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; + + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) + return memory; + if (!ops->alloc_coherent) + return NULL; + + memory = ops->alloc_coherent(dev, size, dma_handle, gfp); + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); + + return memory; +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + WARN_ON(irqs_disabled()); /* for portability */ + + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; + + debug_dma_free_coherent(dev, size, vaddr, dma_handle); + if (ops->free_coherent) + ops->free_coherent(dev, size, vaddr, dma_handle); +} + +/* arch/sh/mm/consistent.c */ +extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t flag); +extern void dma_generic_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); #endif /* __ASM_SH_DMA_MAPPING_H */ -- cgit v1.2.2 From f72f7876ae0bc0f018fca140e66aa16fedb57d89 Mon Sep 17 00:00:00 2001 From: Valentin R Sitsikov Date: Fri, 16 Oct 2009 10:45:47 +0000 Subject: sh: fix watchdog timer for sh7780/sh7785 Signed-off-by: Valentin Sitdikov Signed-off-by: Paul Mundt --- arch/sh/include/asm/watchdog.h | 59 +++++++++++++++++++++++++++++++++- arch/sh/include/cpu-sh4/cpu/watchdog.h | 13 ++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h index 2fe7cee9e43a..19dfff5c8511 100644 --- a/arch/sh/include/asm/watchdog.h +++ b/arch/sh/include/asm/watchdog.h @@ -2,6 +2,8 @@ * include/asm-sh/watchdog.h * * Copyright (C) 2002, 2003 Paul Mundt + * Copyright (C) 2009 Siemens AG + * Copyright (C) 2009 Valentin Sitdikov * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -61,6 +63,61 @@ #define WTCSR_CKS_2048 0x06 #define WTCSR_CKS_4096 0x07 +#if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780) +/** + * sh_wdt_read_cnt - Read from Counter + * Reads back the WTCNT value. + */ +static inline __u32 sh_wdt_read_cnt(void) +{ + return ctrl_inl(WTCNT_R); +} + +/** + * sh_wdt_write_cnt - Write to Counter + * @val: Value to write + * + * Writes the given value @val to the lower byte of the timer counter. + * The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_cnt(__u32 val) +{ + ctrl_outl((WTCNT_HIGH << 24) | (__u32)val, WTCNT); +} + +/** + * sh_wdt_write_bst - Write to Counter + * @val: Value to write + * + * Writes the given value @val to the lower byte of the timer counter. + * The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_bst(__u32 val) +{ + ctrl_outl((WTBST_HIGH << 24) | (__u32)val, WTBST); +} +/** + * sh_wdt_read_csr - Read from Control/Status Register + * + * Reads back the WTCSR value. + */ +static inline __u32 sh_wdt_read_csr(void) +{ + return ctrl_inl(WTCSR_R); +} + +/** + * sh_wdt_write_csr - Write to Control/Status Register + * @val: Value to write + * + * Writes the given value @val to the lower byte of the control/status + * register. The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_csr(__u32 val) +{ + ctrl_outl((WTCSR_HIGH << 24) | (__u32)val, WTCSR); +} +#else /** * sh_wdt_read_cnt - Read from Counter * Reads back the WTCNT value. @@ -103,6 +160,6 @@ static inline void sh_wdt_write_csr(__u8 val) { ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR); } - +#endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */ #endif /* __KERNEL__ */ #endif /* __ASM_SH_WATCHDOG_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/watchdog.h b/arch/sh/include/cpu-sh4/cpu/watchdog.h index 259f6a0ce23d..7672301d0c70 100644 --- a/arch/sh/include/cpu-sh4/cpu/watchdog.h +++ b/arch/sh/include/cpu-sh4/cpu/watchdog.h @@ -2,6 +2,8 @@ * include/asm-sh/cpu-sh4/watchdog.h * * Copyright (C) 2002, 2003 Paul Mundt + * Copyright (C) 2009 Siemens AG + * Copyright (C) 2009 Sitdikov Valentin * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -10,9 +12,20 @@ #ifndef __ASM_CPU_SH4_WATCHDOG_H #define __ASM_CPU_SH4_WATCHDOG_H +#if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780) +/* Prefix definition */ +#define WTBST_HIGH 0x55 +/* Register definitions */ +#define WTCNT_R 0xffcc0010 /*WDTCNT*/ +#define WTCSR 0xffcc0004 /*WDTCSR*/ +#define WTCNT 0xffcc0000 /*WDTST*/ +#define WTST WTCNT +#define WTBST 0xffcc0008 /*WDTBST*/ +#else /* Register definitions */ #define WTCNT 0xffc00008 #define WTCSR 0xffc0000c +#endif /* Bit definitions */ #define WTCSR_TME 0x80 -- cgit v1.2.2 From 01be5d63fd4645eab1d05a7caa04462c11c8b7a1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 27 Oct 2009 10:35:02 +0900 Subject: sh: Revamp PCI DMA coherence Kconfig bits. Leaving this configurable caused more trouble than it was ever worth, so just make it explicit. Boards that are verified one way or the other can fix up their selects accordingly. We presently default to non-coherent for most platforms. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pci.h | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 6bf276b4f85d..67f3999b544e 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -57,19 +57,13 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) /* pci_unmap_{single,page} being a nop depends upon the * configuration. */ -#ifdef CONFIG_SH_PCIDMA_NONCOHERENT -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) +#ifdef CONFIG_DMA_NONCOHERENT +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) #else #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) -- cgit v1.2.2 From 478fb158005b55c8484f23a6beb1b69f5a612162 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 27 Oct 2009 10:41:58 +0900 Subject: sh: Fix up dma_is_consistent(). This fixes up the dma_is_consistent() definition for the various coherence options. Signed-off-by: Paul Mundt --- arch/sh/include/asm/dma-mapping.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 653076018df0..87ced133a363 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -41,7 +41,12 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +#ifdef CONFIG_DMA_COHERENT #define dma_is_consistent(d, h) (1) +#else +#define dma_is_consistent(d, h) (0) +#endif static inline int dma_get_cache_alignment(void) { -- cgit v1.2.2 From ac44e6694755744fe96442919da1f2c7e87a2a61 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 28 Oct 2009 17:57:54 +0900 Subject: sh: perf events: Add preliminary support for SH-4A counters. This adds in preliminary support for the SH-4A performance counters. Presently only the first 2 counters are supported, as these are the ones of the most interest to the perf tool and end users. Counter chaining is not presently handled, so these are simply implemented as 32-bit counters. This also establishes a perf event support framework for other hardware counters, which the existing SH-4 oprofile code will migrate over to as the SH-4A support evolves. Signed-off-by: Paul Mundt --- arch/sh/include/asm/perf_event.h | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h index 11a302297ab7..3d0c9f36d150 100644 --- a/arch/sh/include/asm/perf_event.h +++ b/arch/sh/include/asm/perf_event.h @@ -1,8 +1,35 @@ #ifndef __ASM_SH_PERF_EVENT_H #define __ASM_SH_PERF_EVENT_H -/* SH only supports software events through this interface. */ -static inline void set_perf_event_pending(void) {} +struct hw_perf_event; + +#define MAX_HWEVENTS 2 + +struct sh_pmu { + const char *name; + unsigned int num_events; + void (*disable_all)(void); + void (*enable_all)(void); + void (*enable)(struct hw_perf_event *, int); + void (*disable)(struct hw_perf_event *, int); + u64 (*read)(int); + int (*event_map)(int); + unsigned int max_events; + unsigned long raw_event_mask; + const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; +}; + +/* arch/sh/kernel/perf_event.c */ +extern int register_sh_pmu(struct sh_pmu *); +extern int reserve_pmc_hardware(void); +extern void release_pmc_hardware(void); + +static inline void set_perf_event_pending(void) +{ + /* Nothing to see here, move along. */ +} #define PERF_EVENT_INDEX_OFFSET 0 -- cgit v1.2.2 From 49f42644fd01bc7bd9b6b0a080fee1a89dc66665 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 29 Oct 2009 10:51:48 +0000 Subject: sh: Add notifiers chains for cpu/board code This patch adds atomic notifier chains for pre/post sleep events. Useful for cpu code and boards that need to save and restore register state before and after entering a sleep mode. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 5c8ea28ff7a4..d1cc5221645d 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -2,6 +2,7 @@ #define _ASM_SH_SUSPEND_H #ifndef __ASSEMBLY__ +#include static inline int arch_prepare_suspend(void) { return 0; } #include @@ -19,6 +20,16 @@ void sh_mobile_setup_cpuidle(void); static inline void sh_mobile_setup_cpuidle(void) {} #endif +/* notifier chains for pre/post sleep hooks */ +extern struct atomic_notifier_head sh_mobile_pre_sleep_notifier_list; +extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list; + +/* priority levels for notifiers */ +#define SH_MOBILE_SLEEP_BOARD 0 +#define SH_MOBILE_SLEEP_CPU 1 +#define SH_MOBILE_PRE(x) (x) +#define SH_MOBILE_POST(x) (-(x)) + #endif /* flags passed to assembly suspend code */ -- cgit v1.2.2 From 159f8cd99ea0e3613cbb6aeea574af438f33d8d7 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 29 Oct 2009 10:52:06 +0000 Subject: sh: Allow boards to register memory pre/post sleep code Add code to allow boards registering self-contained functions for going to/from self-refresh. At this point the board code is unused. When all supported boards have been converted then the new sleep code will make use of these functions. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index d1cc5221645d..fab58cc2ecd9 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -30,6 +30,10 @@ extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list; #define SH_MOBILE_PRE(x) (x) #define SH_MOBILE_POST(x) (-(x)) +/* board code registration function for self-refresh assembly snippets */ +void sh_mobile_register_self_refresh(unsigned long flags, + void *pre_start, void *pre_end, + void *post_start, void *post_end); #endif /* flags passed to assembly suspend code */ -- cgit v1.2.2 From 323ef8dba67fb7b9c709457bd0374d88cfb8f25f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 30 Oct 2009 04:24:07 +0000 Subject: sh: Rework SuperH Mobile sleep mode code Rework the SuperH Mobile sleep code from including board specific code to allowing each board to provide pre/post code snippets. These snippets should contain sdram management code to enter and leave self-refresh. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index fab58cc2ecd9..8e2c55dc5fe6 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -34,6 +34,33 @@ extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list; void sh_mobile_register_self_refresh(unsigned long flags, void *pre_start, void *pre_end, void *post_start, void *post_end); + +/* register structure for address/data information */ +struct sh_sleep_regs { + unsigned long stbcr; +}; + +/* data area for low-level sleep code */ +struct sh_sleep_data { + /* current sleep mode (SUSP_SH_...) */ + unsigned long mode; + + /* addresses of board specific self-refresh snippets */ + unsigned long sf_pre; + unsigned long sf_post; + + /* register state saved and restored by the assembly code */ + unsigned long vbr; + unsigned long spc; + unsigned long sr; + + /* structure for keeping register addresses */ + struct sh_sleep_regs addr; + + /* structure for saving/restoring register state */ + struct sh_sleep_regs data; +}; + #endif /* flags passed to assembly suspend code */ -- cgit v1.2.2 From 02bf89347c7d6a6aeae64f02536dac038c402fce Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 30 Oct 2009 04:24:15 +0000 Subject: sh: Keep track of allowed sleep modes Add code to keep track of supported sleep modes. This to only export cpuidle modes that are backed by board support code. Also, do not allow suspend-to-ram if sdram board code is missing. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 8e2c55dc5fe6..8eddf236fb85 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -61,6 +61,9 @@ struct sh_sleep_data { struct sh_sleep_regs data; }; +/* a bitmap of supported sleep modes (SUSP_SH..) */ +extern unsigned long sh_mobile_sleep_supported; + #endif /* flags passed to assembly suspend code */ -- cgit v1.2.2 From 99675a7a45ed3cec54d6e1d11f13bcaacaf0909b Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 30 Oct 2009 04:24:23 +0000 Subject: sh: Add MMU and Cache handling sleep mode code Add MMU and cache handling functionality to the SuperH Mobile sleep code. The MMU and cache registers are saved and restored. The MMU is disabled and the cache is flushed and disabled before entering sleep modes if the SUSP_SH_MMU flag is set. This flag should be set in the case of R-standby and most likely for future U-standby support as well. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 8eddf236fb85..702025d960a0 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -38,6 +38,20 @@ void sh_mobile_register_self_refresh(unsigned long flags, /* register structure for address/data information */ struct sh_sleep_regs { unsigned long stbcr; + + /* MMU */ + unsigned long pteh; + unsigned long ptel; + unsigned long ttb; + unsigned long tea; + unsigned long mmucr; + unsigned long ptea; + unsigned long pascr; + unsigned long irmcr; + + /* Cache */ + unsigned long ccr; + unsigned long ramcr; }; /* data area for low-level sleep code */ @@ -72,5 +86,6 @@ extern unsigned long sh_mobile_sleep_supported; #define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */ #define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */ #define SUSP_SH_SF (1 << 4) /* Enable self-refresh */ +#define SUSP_SH_MMU (1 << 5) /* Save/restore MMU and cache */ #endif /* _ASM_SH_SUSPEND_H */ -- cgit v1.2.2 From bb3e0eed9dd51987c7462bae2880a3d4d750c55a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 30 Oct 2009 04:24:40 +0000 Subject: sh: Add R-standby sleep mode support Add R-standby specific bits to the SuperH Mobile sleep code. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 702025d960a0..fe9c2a1ad047 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -38,6 +38,7 @@ void sh_mobile_register_self_refresh(unsigned long flags, /* register structure for address/data information */ struct sh_sleep_regs { unsigned long stbcr; + unsigned long bar; /* MMU */ unsigned long pteh; @@ -63,10 +64,14 @@ struct sh_sleep_data { unsigned long sf_pre; unsigned long sf_post; + /* address of resume code */ + unsigned long resume; + /* register state saved and restored by the assembly code */ unsigned long vbr; unsigned long spc; unsigned long sr; + unsigned long sp; /* structure for keeping register addresses */ struct sh_sleep_regs addr; -- cgit v1.2.2 From a37c6c7aec38a693f87ee5ccc6e60a5b3ee700f2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 4 Nov 2009 11:44:21 +0900 Subject: sh: mach-se: Convert SE7722 FPGA to dynamic IRQ allocation. This gets rid of the arbitrary set of vectors used by the SE7722 FPGA interrupt controller and witches over to a completely dynamic set. No assumptions regarding a contiguous range are made, and the platform resources themselves need to be filled in lazily. Signed-off-by: Paul Mundt --- arch/sh/include/mach-se/mach/se7722.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/mach-se/mach/se7722.h b/arch/sh/include/mach-se/mach/se7722.h index e971d9a82f4a..16505bfb8a9e 100644 --- a/arch/sh/include/mach-se/mach/se7722.h +++ b/arch/sh/include/mach-se/mach/se7722.h @@ -92,18 +92,11 @@ #define SE7722_FPGA_IRQ_MRSHPC1 3 /* IRQ1 */ #define SE7722_FPGA_IRQ_MRSHPC2 4 /* IRQ1 */ #define SE7722_FPGA_IRQ_MRSHPC3 5 /* IRQ1 */ - #define SE7722_FPGA_IRQ_NR 6 -#define SE7722_FPGA_IRQ_BASE 110 - -#define MRSHPC_IRQ3 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC3) -#define MRSHPC_IRQ2 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC2) -#define MRSHPC_IRQ1 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC1) -#define MRSHPC_IRQ0 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC0) -#define SMC_IRQ (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_SMC) -#define USB_IRQ (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_USB) /* arch/sh/boards/se/7722/irq.c */ +extern unsigned int se7722_fpga_irq[]; + void init_se7722_IRQ(void); #define __IO_PREFIX se7722 -- cgit v1.2.2 From e9c58fc57b17bfa75c256fb4f45ce22de6626858 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 12 Nov 2009 16:36:26 +0900 Subject: sh: Use the generic I/O port base for slowdown. This fixes up the build and behaviour for various configurations. Namely the CONFIG_32BIT cases where legacy mappings do not exist, as well as the sh64 build. Signed-off-by: Paul Mundt --- arch/sh/include/asm/io.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 0cf2a5708e26..512cd3e9d0ca 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -90,15 +90,11 @@ #define ctrl_outl __raw_writel #define ctrl_outq __raw_writeq +extern unsigned long generic_io_base; + static inline void ctrl_delay(void) { -#ifdef CONFIG_CPU_SH4 - __raw_readw(CCN_PVR); -#elif defined(P2SEG) - __raw_readw(P2SEG); -#else -#error "Need a dummy address for delay" -#endif + __raw_readw(generic_io_base); } #define __BUILD_MEMORY_STRING(bwlq, type) \ @@ -186,8 +182,6 @@ __BUILD_MEMORY_STRING(q, u64) #define IO_SPACE_LIMIT 0xffffffff -extern unsigned long generic_io_base; - /* * This function provides a method for the generic case where a * board-specific ioport_map simply needs to return the port + some -- cgit v1.2.2 From 626ac8e1388ac128495a3b7188e9d86464de6c5b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 12 Nov 2009 16:39:47 +0900 Subject: sh64: Fix up the CONFIG_GENERIC_BUG=n build. sh64 doesn't use GENERIC_BUG, which presently causes the handle_BUG() code to blow up. Fix up the dependencies and get it all building again. Signed-off-by: Paul Mundt --- arch/sh/include/asm/system.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index b5c5acdc8c0e..c15415b4b169 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -171,10 +171,6 @@ BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); BUILD_TRAP_HANDLER(nmi); -#ifdef CONFIG_BUG -extern void handle_BUG(struct pt_regs *); -#endif - #define arch_align_stack(x) (x) struct mem_access { -- cgit v1.2.2