From 3159e7d62ad13f71ef3fe029c145594d8caa580d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 5 Sep 2008 15:39:12 +0900 Subject: sh: Add support for memory hot-remove. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index b75a7acd62fb..d4681a55c852 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -292,4 +292,21 @@ int memory_add_physaddr_to_nid(u64 addr) } EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif + +#ifdef CONFIG_MEMORY_HOTREMOVE +int remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long end_pfn = start_pfn + (size >> PAGE_SHIFT); + int ret; + + ret = offline_pages(start_pfn, end_pfn, 120 * HZ); + if (unlikely(ret)) + printk("%s: Failed, offline_pages() == %d\n", __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(remove_memory); #endif + +#endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v1.2.2 From c6feb6142cb85228e73497a309f475a0d7279318 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Fri, 5 Sep 2008 16:06:42 +0900 Subject: sh: early cached_to_uncached initialization. statically initialise the cached_to_uncached offset, so that we can use it immediatly. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d4681a55c852..f1a494283c4e 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -23,7 +23,19 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -unsigned long cached_to_uncached = 0; + +#ifdef CONFIG_SUPERH32 +/* + * Handle trivial transitions between cached and uncached + * segments, making use of the 1:1 mapping relationship in + * 512MB lowmem. + * + * This is the offset of the uncached section from its cached alias. + * Default value only valid in 29 bit mode, in 32bit mode will be + * overridden in pmb_init. + */ +unsigned long cached_to_uncached = P2SEG - P1SEG; +#endif #ifdef CONFIG_MMU static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) @@ -58,9 +70,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) } set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); - - if (cached_to_uncached) - flush_tlb_one(get_asid(), addr); + flush_tlb_one(get_asid(), addr); } /* @@ -165,15 +175,6 @@ void __init paging_init(void) #ifdef CONFIG_SUPERH32 /* Set up the uncached fixmap */ set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); - -#ifdef CONFIG_29BIT - /* - * Handle trivial transitions between cached and uncached - * segments, making use of the 1:1 mapping relationship in - * 512MB lowmem. - */ - cached_to_uncached = P2SEG - P1SEG; -#endif #endif } -- cgit v1.2.2 From b6c20e4290a1ef92bcef5ec9dd8e5c7d036153aa Mon Sep 17 00:00:00 2001 From: Marek Skuczynski Date: Fri, 5 Sep 2008 16:42:58 +0900 Subject: sh: remove unnecessary memset after alloc_bootmem_low_pages Because alloc_bootmem functions return the allocated memory always zeroed, an additional call of memset on allocated memory is unnecessary. Signed-off-by: Marek Skuczynski Signed-off-by: Carl Shaw Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index f1a494283c4e..31211bfdc6d8 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -123,7 +123,6 @@ void __init page_table_range_init(unsigned long start, unsigned long end, if (!pmd_present(*pmd)) { pte_t *pte_table; pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); - memset(pte_table, 0, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte_table); } -- cgit v1.2.2 From c15c5f8c2bf0b00d036c5c6b67264764a6e5dffc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 20 Sep 2008 20:21:33 +0900 Subject: sh: Support kernel stacks smaller than a page. This follows the powerpc commit f6a616800e68b61807d0f7bb0d5dc70665ef8046 '[POWERPC] Fix kernel stack allocation alignment'. SH has traditionally forced the thread order to be relative to the page size, so there were never any situations where the same bug was triggered by slub. Regardless, the usage of > 8kB stacks for the larger page sizes is overkill, so we switch to using slab allocations there, as per the powerpc change. Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'arch/sh/mm/init.c') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 31211bfdc6d8..2a53943924b2 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -265,6 +265,35 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif +#if THREAD_SHIFT < PAGE_SHIFT +static struct kmem_cache *thread_info_cache; + +struct thread_info *alloc_thread_info(struct task_struct *tsk) +{ + struct thread_info *ti; + + ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); + if (unlikely(ti == NULL)) + return NULL; +#ifdef CONFIG_DEBUG_STACK_USAGE + memset(ti, 0, THREAD_SIZE); +#endif + return ti; +} + +void free_thread_info(struct thread_info *ti) +{ + kmem_cache_free(thread_info_cache, ti); +} + +void thread_info_cache_init(void) +{ + thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, + THREAD_SIZE, 0, NULL); + BUG_ON(thread_info_cache == NULL); +} +#endif /* THREAD_SHIFT < PAGE_SHIFT */ + #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { -- cgit v1.2.2