From 74bf4312fff083ab25c3f357cc653ada7995e5f6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:29:18 -0800 Subject: [SPARC64]: Move away from virtual page tables, part 1. We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 91 +++----------------------------------------------- 1 file changed, 5 insertions(+), 86 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e44ee26cee8..da068f6b2595 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -408,8 +408,7 @@ unsigned long prom_virt_to_phys(unsigned long promva, int *error) /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> - * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte - * scheme (also, see rant in inherit_locked_prom_mappings()). + * HI_OBP_ADDRESS range are handled in ktlb.S. */ static inline int in_obp_range(unsigned long vaddr) { @@ -539,75 +538,6 @@ static void __init inherit_prom_mappings(void) prom_printf("done.\n"); } -/* The OBP specifications for sun4u mark 0xfffffffc00000000 and - * upwards as reserved for use by the firmware (I wonder if this - * will be the same on Cheetah...). We use this virtual address - * range for the VPTE table mappings of the nucleus so we need - * to zap them when we enter the PROM. -DaveM - */ -static void __flush_nucleus_vptes(void) -{ - unsigned long prom_reserved_base = 0xfffffffc00000000UL; - int i; - - /* Only DTLB must be checked for VPTE entries. */ - if (tlb_type == spitfire) { - for (i = 0; i < 63; i++) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no cheetah+ - * page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_dtlb_tag(i); - if (((tag & ~(PAGE_MASK)) == 0) && - ((tag & (PAGE_MASK)) >= prom_reserved_base)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - for (i = 0; i < 512; i++) { - unsigned long tag = cheetah_get_dtlb_tag(i, 2); - - if ((tag & ~PAGE_MASK) == 0 && - (tag & PAGE_MASK) >= prom_reserved_base) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_dtlb_data(i, 0x0UL, 2); - } - - if (tlb_type != cheetah_plus) - continue; - - tag = cheetah_get_dtlb_tag(i, 3); - - if ((tag & ~PAGE_MASK) == 0 && - (tag & PAGE_MASK) >= prom_reserved_base) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_dtlb_data(i, 0x0UL, 3); - } - } - } else { - /* Implement me :-) */ - BUG(); - } -} - static int prom_ditlb_set; struct prom_tlb_entry { int tlb_ent; @@ -635,9 +565,6 @@ void prom_world(int enter) : "i" (PSTATE_IE)); if (enter) { - /* Kick out nucleus VPTEs. */ - __flush_nucleus_vptes(); - /* Install PROM world. */ for (i = 0; i < 16; i++) { if (prom_dtlb[i].tlb_ent != -1) { @@ -1039,18 +966,7 @@ out: struct pgtable_cache_struct pgt_quicklists; #endif -/* OK, we have to color these pages. The page tables are accessed - * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S - * code, as well as by PAGE_OFFSET range direct-mapped addresses by - * other parts of the kernel. By coloring, we make sure that the tlbmiss - * fast handlers do not get data from old/garbage dcache lines that - * correspond to an old/stale virtual address (user/kernel) that - * previously mapped the pagetable page while accessing vpte range - * addresses. The idea is that if the vpte color and PAGE_OFFSET range - * color is the same, then when the kernel initializes the pagetable - * using the later address range, accesses with the first address - * range will see the newly initialized data rather than the garbage. - */ +/* XXX We don't need to color these things in the D-cache any longer. */ #ifdef DCACHE_ALIASING_POSSIBLE #define DC_ALIAS_SHIFT 1 #else @@ -1419,6 +1335,9 @@ void kernel_map_pages(struct page *page, int numpages, int enable) kernel_map_range(phys_start, phys_end, (enable ? PAGE_KERNEL : __pgprot(0))); + flush_tsb_kernel_range(PAGE_OFFSET + phys_start, + PAGE_OFFSET + phys_end); + /* we should perform an IPI and flush all tlbs, * but that can deadlock->flush only current cpu. */ -- cgit v1.2.2 From 05e28f9de65a38bb0c769080e91b6976e7e1e70c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:13 -0800 Subject: [SPARC64]: No need to D-cache color page tables any longer. Unlike the virtual page tables, the new TSB scheme does not require this ugly hack. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 71 +++++--------------------------------------------- 1 file changed, 6 insertions(+), 65 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index da068f6b2595..936ae1a594ac 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -145,6 +145,10 @@ int bigkernel = 0; #define PGT_CACHE_LOW 25 #define PGT_CACHE_HIGH 50 +#ifndef CONFIG_SMP +struct pgtable_cache_struct pgt_quicklists; +#endif + void check_pgt_cache(void) { preempt_disable(); @@ -152,10 +156,8 @@ void check_pgt_cache(void) do { if (pgd_quicklist) free_pgd_slow(get_pgd_fast()); - if (pte_quicklist[0]) - free_pte_slow(pte_alloc_one_fast(NULL, 0)); - if (pte_quicklist[1]) - free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))); + if (pte_quicklist) + free_pte_slow(pte_alloc_one_fast()); } while (pgtable_cache_size > PGT_CACHE_LOW); } preempt_enable(); @@ -962,67 +964,6 @@ out: spin_unlock(&ctx_alloc_lock); } -#ifndef CONFIG_SMP -struct pgtable_cache_struct pgt_quicklists; -#endif - -/* XXX We don't need to color these things in the D-cache any longer. */ -#ifdef DCACHE_ALIASING_POSSIBLE -#define DC_ALIAS_SHIFT 1 -#else -#define DC_ALIAS_SHIFT 0 -#endif -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) -{ - struct page *page; - unsigned long color; - - { - pte_t *ptep = pte_alloc_one_fast(mm, address); - - if (ptep) - return ptep; - } - - color = VPTE_COLOR(address); - page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT); - if (page) { - unsigned long *to_free; - unsigned long paddr; - pte_t *pte; - -#ifdef DCACHE_ALIASING_POSSIBLE - set_page_count(page, 1); - ClearPageCompound(page); - - set_page_count((page + 1), 1); - ClearPageCompound(page + 1); -#endif - paddr = (unsigned long) page_address(page); - memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT)); - - if (!color) { - pte = (pte_t *) paddr; - to_free = (unsigned long *) (paddr + PAGE_SIZE); - } else { - pte = (pte_t *) (paddr + PAGE_SIZE); - to_free = (unsigned long *) paddr; - } - -#ifdef DCACHE_ALIASING_POSSIBLE - /* Now free the other one up, adjust cache size. */ - preempt_disable(); - *to_free = (unsigned long) pte_quicklist[color ^ 0x1]; - pte_quicklist[color ^ 0x1] = to_free; - pgtable_cache_size++; - preempt_enable(); -#endif - - return pte; - } - return NULL; -} - void sparc_ultra_dump_itlb(void) { int slot; -- cgit v1.2.2 From 3c936465249f863f322154ff1aaa628b84ee5750 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:27 -0800 Subject: [SPARC64]: Kill pgtable quicklists and use SLAB. Taking a nod from the powerpc port. With the per-cpu caching of both the page allocator and SLAB, the pgtable quicklist scheme becomes relatively silly and primitive. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 936ae1a594ac..7c456afaa9a5 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -141,26 +141,25 @@ unsigned long sparc64_kern_sec_context __read_mostly; int bigkernel = 0; -/* XXX Tune this... */ -#define PGT_CACHE_LOW 25 -#define PGT_CACHE_HIGH 50 +kmem_cache_t *pgtable_cache __read_mostly; -#ifndef CONFIG_SMP -struct pgtable_cache_struct pgt_quicklists; -#endif +static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) +{ + clear_page(addr); +} -void check_pgt_cache(void) +void pgtable_cache_init(void) { - preempt_disable(); - if (pgtable_cache_size > PGT_CACHE_HIGH) { - do { - if (pgd_quicklist) - free_pgd_slow(get_pgd_fast()); - if (pte_quicklist) - free_pte_slow(pte_alloc_one_fast()); - } while (pgtable_cache_size > PGT_CACHE_LOW); + pgtable_cache = kmem_cache_create("pgtable_cache", + PAGE_SIZE, PAGE_SIZE, + SLAB_HWCACHE_ALIGN | + SLAB_MUST_HWCACHE_ALIGN, + zero_ctor, + NULL); + if (!pgtable_cache) { + prom_printf("pgtable_cache_init(): Could not create!\n"); + prom_halt(); } - preempt_enable(); } #ifdef CONFIG_DEBUG_DCFLUSH @@ -340,7 +339,6 @@ void show_mem(void) nr_swap_pages << (PAGE_SHIFT-10)); printk("%ld pages of RAM\n", num_physpages); printk("%d free pages\n", nr_free_pages()); - printk("%d pages in page table cache\n",pgtable_cache_size); } void mmu_info(struct seq_file *m) -- cgit v1.2.2 From bd40791e1d289d807b8580abe1f117e9c62894e4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:38 -0800 Subject: [SPARC64]: Dynamically grow TSB in response to RSS growth. As the RSS grows, grow the TSB in order to reduce the likelyhood of hash collisions and thus poor hit rates in the TSB. This definitely needs some serious tuning. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 7c456afaa9a5..a8119cb4fa32 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -246,9 +246,11 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { + struct mm_struct *mm; struct page *page; unsigned long pfn; unsigned long pg_flags; + unsigned long mm_rss; pfn = pte_pfn(pte); if (pfn_valid(pfn) && @@ -270,6 +272,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p put_cpu(); } + + mm = vma->vm_mm; + mm_rss = get_mm_rss(mm); + if (mm_rss >= mm->context.tsb_rss_limit) + tsb_grow(mm, mm_rss, GFP_ATOMIC); } void flush_dcache_page(struct page *page) -- cgit v1.2.2 From b70c0fa1613c4f69b4a340a0e2bee387560ebbb1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:04 -0800 Subject: [SPARC64]: Preload TSB entries from update_mmu_cache(). Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a8119cb4fa32..1e8a5a33639d 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -277,6 +277,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p mm_rss = get_mm_rss(mm); if (mm_rss >= mm->context.tsb_rss_limit) tsb_grow(mm, mm_rss, GFP_ATOMIC); + + if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { + struct tsb *tsb; + unsigned long tag; + + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & + (mm->context.tsb_nentries - 1UL)]; + tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL; + tsb_insert(tsb, tag, pte_val(pte)); + } } void flush_dcache_page(struct page *page) -- cgit v1.2.2 From 3487d1d4414fbfab5d98ec559e6f84f55520cb15 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:25 -0800 Subject: [SPARC64]: Kill PROM locked TLB entry preservation code. It is totally unnecessary complexity. After we take over the trap table, we handle all PROM tlb misses fully. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 295 ++----------------------------------------------- 1 file changed, 10 insertions(+), 285 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e8a5a33639d..f4d22ccb4cf0 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -555,294 +555,12 @@ static void __init inherit_prom_mappings(void) prom_printf("done.\n"); } -static int prom_ditlb_set; -struct prom_tlb_entry { - int tlb_ent; - unsigned long tlb_tag; - unsigned long tlb_data; -}; -struct prom_tlb_entry prom_itlb[16], prom_dtlb[16]; - void prom_world(int enter) { - unsigned long pstate; - int i; - if (!enter) set_fs((mm_segment_t) { get_thread_current_ds() }); - if (!prom_ditlb_set) - return; - - /* Make sure the following runs atomically. */ - __asm__ __volatile__("flushw\n\t" - "rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - if (enter) { - /* Install PROM world. */ - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - } - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_itlb[i].tlb_tag), - "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - } - } - } else { - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); - else - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); - } - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); - else - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL); - } - } - } - __asm__ __volatile__("wrpr %0, 0, %%pstate" - : : "r" (pstate)); -} - -void inherit_locked_prom_mappings(int save_p) -{ - int i; - int dtlb_seen = 0; - int itlb_seen = 0; - - /* Fucking losing PROM has more mappings in the TLB, but - * it (conveniently) fails to mention any of these in the - * translations property. The only ones that matter are - * the locked PROM tlb entries, so we impose the following - * irrecovable rule on the PROM, it is allowed 8 locked - * entries in the ITLB and 8 in the DTLB. - * - * Supposedly the upper 16GB of the address space is - * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED - * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface - * used between the client program and the firmware on sun5 - * systems to coordinate mmu mappings is also COMPLETELY - * UNDOCUMENTED!!!!!! Thanks S(t)un! - */ - if (save_p) { - for (i = 0; i < 16; i++) { - prom_itlb[i].tlb_ent = -1; - prom_dtlb[i].tlb_ent = -1; - } - } - if (tlb_type == spitfire) { - int high = sparc64_highest_unlocked_tlb_ent; - for (i = 0; i <= high; i++) { - unsigned long data; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no cheetah+ - * page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - data = spitfire_get_dtlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_dtlb_tag(i); - if (save_p) { - prom_dtlb[dtlb_seen].tlb_ent = i; - prom_dtlb[dtlb_seen].tlb_tag = tag; - prom_dtlb[dtlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - - dtlb_seen++; - if (dtlb_seen > 15) - break; - } - } - - for (i = 0; i < high; i++) { - unsigned long data; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - data = spitfire_get_itlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_itlb_tag(i); - if (save_p) { - prom_itlb[itlb_seen].tlb_ent = i; - prom_itlb[itlb_seen].tlb_tag = tag; - prom_itlb[itlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - spitfire_put_itlb_data(i, 0x0UL); - - itlb_seen++; - if (itlb_seen > 15) - break; - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - int high = sparc64_highest_unlocked_tlb_ent; - - for (i = 0; i <= high; i++) { - unsigned long data; - - data = cheetah_get_ldtlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - tag = cheetah_get_ldtlb_tag(i); - if (save_p) { - prom_dtlb[dtlb_seen].tlb_ent = i; - prom_dtlb[dtlb_seen].tlb_tag = tag; - prom_dtlb[dtlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_ldtlb_data(i, 0x0UL); - - dtlb_seen++; - if (dtlb_seen > 15) - break; - } - } - - for (i = 0; i < high; i++) { - unsigned long data; - - data = cheetah_get_litlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - tag = cheetah_get_litlb_tag(i); - if (save_p) { - prom_itlb[itlb_seen].tlb_ent = i; - prom_itlb[itlb_seen].tlb_tag = tag; - prom_itlb[itlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - cheetah_put_litlb_data(i, 0x0UL); - - itlb_seen++; - if (itlb_seen > 15) - break; - } - } - } else { - /* Implement me :-) */ - BUG(); - } - if (save_p) - prom_ditlb_set = 1; -} - -/* Give PROM back his world, done during reboots... */ -void prom_reload_locked(void) -{ - int i; - - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - } - - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_itlb[i].tlb_tag), - "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - else - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - } - } + __asm__ __volatile__("flushw"); } #ifdef DCACHE_ALIASING_POSSIBLE @@ -1066,6 +784,15 @@ void sparc_ultra_dump_dtlb(void) } } +static inline void spitfire_errata32(void) +{ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); +} + extern unsigned long cmdline_memory_size; unsigned long __init bootmem_init(unsigned long *pages_avail) @@ -1375,8 +1102,6 @@ void __init paging_init(void) setup_tba(this_is_starfire); } - inherit_locked_prom_mappings(1); - __flush_tlb_all(); /* Setup bootmem... */ -- cgit v1.2.2 From a8b900d801697609d1b56cc9c110148c64678068 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:37 -0800 Subject: [SPARC64]: Kill sole argument passed to setup_tba(). No longer used, and move extern declaration to a header file. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index f4d22ccb4cf0..20e7af552ce4 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1092,15 +1092,8 @@ void __init paging_init(void) inherit_prom_mappings(); - /* Ok, we can use our TLB miss and window trap handlers safely. - * We need to do a quick peek here to see if we are on StarFire - * or not, so setup_tba can setup the IRQ globals correctly (it - * needs to get the hard smp processor id correctly). - */ - { - extern void setup_tba(int); - setup_tba(this_is_starfire); - } + /* Ok, we can use our TLB miss and window trap handlers safely. */ + setup_tba(); __flush_tlb_all(); -- cgit v1.2.2 From 9954863975910a1b9372b7d5006a6cba43bdd288 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:34:34 -0800 Subject: [SPARC64]: Kill swapper_pgd_zero, totally unused. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 20e7af552ce4..2c21d85de78f 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -388,7 +388,6 @@ struct linux_prom_translation { /* Exported for kernel TLB miss handling in ktlb.S */ struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; -unsigned int swapper_pgd_zero __read_mostly; extern unsigned long prom_boot_page; extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); @@ -1088,8 +1087,6 @@ void __init paging_init(void) pud_set(pud_offset(&swapper_pg_dir[0], 0), swapper_low_pmd_dir + (shift / sizeof(pgd_t))); - swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); - inherit_prom_mappings(); /* Ok, we can use our TLB miss and window trap handlers safely. */ -- cgit v1.2.2 From 517af33237ecfc3c8a93b335365fa61e741ceca4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Feb 2006 15:55:21 -0800 Subject: [SPARC64]: Access TSB with physical addresses when possible. This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 2c21d85de78f..4893f3e2c336 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -39,6 +39,7 @@ #include #include #include +#include extern void device_scan(void); @@ -244,6 +245,16 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c : "g1", "g7"); } +static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) +{ + unsigned long tsb_addr = (unsigned long) ent; + + if (tlb_type == cheetah_plus) + tsb_addr = __pa(tsb_addr); + + __tsb_insert(tsb_addr, tag, pte); +} + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; @@ -1040,6 +1051,24 @@ unsigned long __init find_ecache_flush_span(unsigned long size) return ~0UL; } +static void __init tsb_phys_patch(void) +{ + struct tsb_phys_patch_entry *p; + + p = &__tsb_phys_patch; + while (p < &__tsb_phys_patch_end) { + unsigned long addr = p->addr; + + *(unsigned int *) addr = p->insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + p++; + } +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1052,6 +1081,9 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; + if (tlb_type == cheetah_plus) + tsb_phys_patch(); + /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); -- cgit v1.2.2 From f4e841da30b4bcbb8f1cc20a01157a788ff58b21 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Feb 2006 16:16:24 -0800 Subject: [SPARC64]: Turn off TSB growing for now. There are several tricky races involved with growing the TSB. So just use base-size TSBs for user contexts and we can revisit enabling this later. One part of the SMP problems is that tsb_context_switch() can see partially updated TSB configuration state if tsb_grow() is running in parallel. That's easily solved with a seqlock taken as a writer by tsb_grow() and taken as a reader to capture all the TSB config state in tsb_context_switch(). Then there is flush_tsb_user() running in parallel with a tsb_grow(). In theory we could take the seqlock as a reader there too, and just resample the TSB pointer and reflush but that looks really ugly. Lastly, I believe there is a case with threads that results in a TSB entry lock bit being set spuriously which will cause the next access to that TSB entry to wedge the cpu (since the TSB entry lock bit will never clear). It's either copy_tsb() or some bug elsewhere in the TSB assembly. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 4893f3e2c336..1af63307b24f 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -261,7 +261,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p struct page *page; unsigned long pfn; unsigned long pg_flags; - unsigned long mm_rss; pfn = pte_pfn(pte); if (pfn_valid(pfn) && @@ -285,10 +284,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; - mm_rss = get_mm_rss(mm); - if (mm_rss >= mm->context.tsb_rss_limit) - tsb_grow(mm, mm_rss, GFP_ATOMIC); - if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { struct tsb *tsb; unsigned long tag; -- cgit v1.2.2 From a43fe0e789f5445f5224511034f410adf11f153b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:10:53 -0800 Subject: [SPARC64]: Add some hypervisor tlb_type checks. And more consistently check cheetah{,_plus} instead of assuming anything not spitfire is cheetah{,_plus}. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1af63307b24f..ab50cd9618f3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -335,7 +335,7 @@ out: void __kprobes flush_icache_range(unsigned long start, unsigned long end) { - /* Cheetah has coherent I-cache. */ + /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ if (tlb_type == spitfire) { unsigned long kaddr; @@ -372,6 +372,8 @@ void mmu_info(struct seq_file *m) seq_printf(m, "MMU Type\t: Cheetah+\n"); else if (tlb_type == spitfire) seq_printf(m, "MMU Type\t: Spitfire\n"); + else if (tlb_type == hypervisor) + seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); else seq_printf(m, "MMU Type\t: ???\n"); @@ -581,7 +583,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end) if (++n >= 512) break; } - } else { + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { start = __pa(start); end = __pa(end); for (va = start; va < end; va += 32) -- cgit v1.2.2 From d257d5da39a78b32721ca84b2ba7f461f2f7ed7f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Feb 2006 23:44:37 -0800 Subject: [SPARC64]: Initial sun4v TLB miss handling infrastructure. Things are a little tricky because, unlike sun4u, we have to: 1) do a hypervisor trap to do the TLB load. 2) do the TSB lookup calculations by hand Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ab50cd9618f3..e9aac424877f 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1050,8 +1050,25 @@ unsigned long __init find_ecache_flush_span(unsigned long size) static void __init tsb_phys_patch(void) { + struct tsb_ldquad_phys_patch_entry *pquad; struct tsb_phys_patch_entry *p; + pquad = &__tsb_ldquad_phys_patch; + while (pquad < &__tsb_ldquad_phys_patch_end) { + unsigned long addr = pquad->addr; + + if (tlb_type == hypervisor) + *(unsigned int *) addr = pquad->sun4v_insn; + else + *(unsigned int *) addr = pquad->sun4u_insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + pquad++; + } + p = &__tsb_phys_patch; while (p < &__tsb_phys_patch_end) { unsigned long addr = p->addr; @@ -1069,6 +1086,7 @@ static void __init tsb_phys_patch(void) /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); +extern void sun4v_patch_tlb_handlers(void); static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; @@ -1078,9 +1096,13 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || + tlb_type == hypervisor) tsb_phys_patch(); + if (tlb_type == hypervisor) + sun4v_patch_tlb_handlers(); + /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); -- cgit v1.2.2 From 481295f982b21b1dbe71cbf41d3a93028fee30d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 21:51:08 -0800 Subject: [SPARC64]: Register per-cpu fault status area with sun4v hypervisor. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index e9aac424877f..4c95cf34075b 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -40,6 +40,7 @@ #include #include #include +#include extern void device_scan(void); @@ -1083,6 +1084,24 @@ static void __init tsb_phys_patch(void) } } +/* Register this cpu's fault status area with the hypervisor. */ +void __cpuinit sun4v_register_fault_status(void) +{ + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + int cpu = hard_smp_processor_id(); + struct trap_per_cpu *tb = &trap_block[cpu]; + unsigned long pa; + + pa = kern_base + ((unsigned long) tb - KERNBASE); + arg0 = HV_FAST_MMU_FAULT_AREA_CONF; + arg1 = pa; + __asm__ __volatile__("ta %4" + : "=&r" (arg0), "=&r" (arg1) + : "0" (arg0), "1" (arg1), + "i" (HV_FAST_TRAP)); +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1096,12 +1115,17 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; + kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); + sun4v_register_fault_status(); + } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); @@ -1112,9 +1136,6 @@ void __init paging_init(void) pfn_base = phys_base >> PAGE_SHIFT; - kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; - set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); -- cgit v1.2.2 From 8b11bd12aff76e02cdc2cbc9e439bba88d281223 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 22:13:05 -0800 Subject: [SPARC64]: Patch up mmu context register writes for sun4v. sun4v uses ASI_MMU instead of ASI_DMMU Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 4c95cf34075b..6504d6eb5372 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -792,15 +792,6 @@ void sparc_ultra_dump_dtlb(void) } } -static inline void spitfire_errata32(void) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); -} - extern unsigned long cmdline_memory_size; unsigned long __init bootmem_init(unsigned long *pages_avail) -- cgit v1.2.2 From d82ace7dc4073b090a55b9740700e32b9a9ae302 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 02:52:44 -0800 Subject: [SPARC64]: Detect sun4v early in boot process. We look for "SUNW,sun4v" in the 'compatible' property of the root OBP device tree node. Protect every %ver register access, to make sure it is not touched on sun4v, as %ver is hyperprivileged there. Lock kernel TLB entries using hypervisor calls instead of calls into OBP. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 58 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 12 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 6504d6eb5372..e602b857071a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -514,6 +514,29 @@ static void __init read_obp_translations(void) } } +static void __init hypervisor_tlb_lock(unsigned long vaddr, + unsigned long pte, + unsigned long mmu) +{ + register unsigned long func asm("%o0"); + register unsigned long arg0 asm("%o1"); + register unsigned long arg1 asm("%o2"); + register unsigned long arg2 asm("%o3"); + register unsigned long arg3 asm("%o4"); + + func = HV_FAST_MMU_MAP_PERM_ADDR; + arg0 = vaddr; + arg1 = 0; + arg2 = pte; + arg3 = mmu; + __asm__ __volatile__("ta 0x80" + : "=&r" (func), "=&r" (arg0), + "=&r" (arg1), "=&r" (arg2), + "=&r" (arg3) + : "0" (func), "1" (arg0), "2" (arg1), + "3" (arg2), "4" (arg3)); +} + static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; @@ -527,19 +550,30 @@ static void __init remap_kernel(void) kern_locked_tte_data = tte_data; - /* Now lock us into the TLBs via OBP. */ - prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); - prom_itlb_load(tlb_ent, tte_data, tte_vaddr); - if (bigkernel) { - tlb_ent -= 1; - prom_dtlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); - prom_itlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); + /* Now lock us into the TLBs via Hypervisor or OBP. */ + if (tlb_type == hypervisor) { + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); + if (bigkernel) { + tte_vaddr += 0x400000; + tte_data += 0x400000; + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); + } + } else { + prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); + prom_itlb_load(tlb_ent, tte_data, tte_vaddr); + if (bigkernel) { + tlb_ent -= 1; + prom_dtlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); + prom_itlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); + } + sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; } - sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | CTX_CHEETAH_PLUS_NUC); -- cgit v1.2.2 From 164c220fa3947abbada65329d168f421b461a2a7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 22:57:21 -0800 Subject: [SPARC64]: Fix hypervisor call arg passing. Function goes in %o5, args go in %o0 --> %o5. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index e602b857071a..7faba33202a9 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, unsigned long pte, unsigned long mmu) { - register unsigned long func asm("%o0"); - register unsigned long arg0 asm("%o1"); - register unsigned long arg1 asm("%o2"); - register unsigned long arg2 asm("%o3"); - register unsigned long arg3 asm("%o4"); + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + register unsigned long arg2 asm("%o2"); + register unsigned long arg3 asm("%o3"); func = HV_FAST_MMU_MAP_PERM_ADDR; arg0 = vaddr; @@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void) /* Register this cpu's fault status area with the hypervisor. */ void __cpuinit sun4v_register_fault_status(void) { + register unsigned long func asm("%o5"); register unsigned long arg0 asm("%o0"); - register unsigned long arg1 asm("%o1"); int cpu = hard_smp_processor_id(); struct trap_per_cpu *tb = &trap_block[cpu]; unsigned long pa; pa = kern_base + ((unsigned long) tb - KERNBASE); - arg0 = HV_FAST_MMU_FAULT_AREA_CONF; - arg1 = pa; + func = HV_FAST_MMU_FAULT_AREA_CONF; + arg0 = pa; __asm__ __volatile__("ta %4" - : "=&r" (arg0), "=&r" (arg1) - : "0" (arg0), "1" (arg1), + : "=&r" (func), "=&r" (arg0) + : "0" (func), "1" (arg0), "i" (HV_FAST_TRAP)); } -- cgit v1.2.2 From 12eaa328f9fb2d3fcb5afb682c762690d05a3cd8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 10 Feb 2006 15:39:51 -0800 Subject: [SPARC64]: Use ASI_SCRATCHPAD address 0x0 properly. This is where the virtual address of the fault status area belongs. To set it up we don't make a hypervisor call, instead we call OBP's SUNW,set-trap-table with the real address of the fault status area as the second argument. And right before that call we write the virtual address into ASI_SCRATCHPAD vaddr 0x0. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 7faba33202a9..88eb6f6be562 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1109,24 +1109,6 @@ static void __init tsb_phys_patch(void) } } -/* Register this cpu's fault status area with the hypervisor. */ -void __cpuinit sun4v_register_fault_status(void) -{ - register unsigned long func asm("%o5"); - register unsigned long arg0 asm("%o0"); - int cpu = hard_smp_processor_id(); - struct trap_per_cpu *tb = &trap_block[cpu]; - unsigned long pa; - - pa = kern_base + ((unsigned long) tb - KERNBASE); - func = HV_FAST_MMU_FAULT_AREA_CONF; - arg0 = pa; - __asm__ __volatile__("ta %4" - : "=&r" (func), "=&r" (arg0) - : "0" (func), "1" (arg0), - "i" (HV_FAST_TRAP)); -} - /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1147,10 +1129,8 @@ void __init paging_init(void) tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) { + if (tlb_type == hypervisor) sun4v_patch_tlb_handlers(); - sun4v_register_fault_status(); - } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); -- cgit v1.2.2 From 490384e752a43aa281ed533e9de2da36df25c337 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 14:41:18 -0800 Subject: [SPARC64]: Register kernel TSB with hypervisor. We do this right after we take over the trap table from OBP. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 88eb6f6be562..92756da273bd 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1109,6 +1109,69 @@ static void __init tsb_phys_patch(void) } } +/* Don't mark as init, we give this to the Hypervisor. */ +static struct hv_tsb_descr ktsb_descr[2]; +extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; + +static void __init sun4v_ktsb_init(void) +{ + unsigned long ktsb_pa; + + ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); + + switch (PAGE_SIZE) { + case 8 * 1024: + default: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; + break; + + case 64 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; + break; + + case 512 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; + break; + + case 4 * 1024 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; + break; + }; + + ktsb_descr[0].assoc = 0; + ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; + ktsb_descr[0].ctx_idx = 0; + ktsb_descr[0].tsb_base = ktsb_pa; + ktsb_descr[0].resv = 0; + + /* XXX When we have a kernel large page size TSB, describe + * XXX it in ktsb_descr[1] here. + */ +} + +void __cpuinit sun4v_ktsb_register(void) +{ + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + unsigned long pa; + + pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); + + func = HV_FAST_MMU_TSB_CTX0; + /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ + arg0 = 1; + arg1 = pa; + __asm__ __volatile__("ta %6" + : "=&r" (func), "=&r" (arg0), "=&r" (arg1) + : "0" (func), "1" (arg0), "2" (arg1), + "i" (HV_FAST_TRAP)); +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1129,8 +1192,10 @@ void __init paging_init(void) tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); + sun4v_ktsb_init(); + } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); @@ -1171,6 +1236,9 @@ void __init paging_init(void) __flush_tlb_all(); + if (tlb_type == hypervisor) + sun4v_ktsb_register(); + /* Setup bootmem... */ pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail); -- cgit v1.2.2 From c4bce90ea2069e5a87beac806de3090ab32128d5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 21:57:54 -0800 Subject: [SPARC64]: Deal with PTE layout differences in SUN4V. Yes, you heard it right, they changed the PTE layout for SUN4V. Ho hum... This is the simple and inefficient way to support this. It'll get optimized, don't worry. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 703 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 609 insertions(+), 94 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 92756da273bd..9c2fc239f3ee 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly; unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; unsigned long pfn_base __read_mostly; +unsigned long kern_linear_pte_xor __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long __tsb_insert(tsb_addr, tag, pte); } +unsigned long _PAGE_ALL_SZ_BITS __read_mostly; +unsigned long _PAGE_SZBITS __read_mostly; + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; @@ -398,39 +403,9 @@ struct linux_prom_translation { struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; -extern unsigned long prom_boot_page; -extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); -extern int prom_get_mmu_ihandle(void); -extern void register_prom_callbacks(void); - /* Exported for SMP bootup purposes. */ unsigned long kern_locked_tte_data; -/* - * Translate PROM's mapping we capture at boot time into physical address. - * The second parameter is only set from prom_callback() invocations. - */ -unsigned long prom_virt_to_phys(unsigned long promva, int *error) -{ - int i; - - for (i = 0; i < prom_trans_ents; i++) { - struct linux_prom_translation *p = &prom_trans[i]; - - if (promva >= p->virt && - promva < (p->virt + p->size)) { - unsigned long base = p->data & _PAGE_PADDR; - - if (error) - *error = 0; - return base + (promva & (8192 - 1)); - } - } - if (error) - *error = 1; - return 0UL; -} - /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * HI_OBP_ADDRESS range are handled in ktlb.S. @@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, "3" (arg2), "4" (arg3)); } +static unsigned long kern_large_tte(unsigned long paddr); + static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; @@ -544,9 +521,7 @@ static void __init remap_kernel(void) tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | - _PAGE_CP | _PAGE_CV | _PAGE_P | - _PAGE_L | _PAGE_W)); + tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; @@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void) prom_printf("Remapping the kernel... "); remap_kernel(); prom_printf("done.\n"); - - prom_printf("Registering callbacks... "); - register_prom_callbacks(); - prom_printf("done.\n"); } void prom_world(int enter) @@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) } #endif /* DCACHE_ALIASING_POSSIBLE */ -/* If not locked, zap it. */ -void __flush_tlb_all(void) -{ - unsigned long pstate; - int i; - - __asm__ __volatile__("flushw\n\t" - "rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - if (tlb_type == spitfire) { - for (i = 0; i < 64; i++) { - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - } - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - if (!(spitfire_get_itlb_data(i) & _PAGE_L)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - spitfire_put_itlb_data(i, 0x0UL); - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - cheetah_flush_dtlb_all(); - cheetah_flush_itlb_all(); - } - __asm__ __volatile__("wrpr %0, 0, %%pstate" - : : "r" (pstate)); -} - /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * @@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void); static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; +static void sun4u_pgprot_init(void); +static void sun4v_pgprot_init(void); + void __init paging_init(void) { unsigned long end_pfn, pages_avail, shift; @@ -1188,6 +1105,11 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + if (tlb_type == hypervisor) + sun4v_pgprot_init(); + else + sun4u_pgprot_init(); + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_phys_patch(); @@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end) } } #endif + +/* SUN4U pte bits... */ +#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */ +#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */ +#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */ +#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */ +#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */ +#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */ +#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */ +#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */ +#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */ +#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */ +#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */ +#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */ +#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */ +#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */ +#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */ +#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */ +#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */ +#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */ +#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */ +#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */ +#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */ +#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */ +#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */ +#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */ +#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */ +#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */ +#define _PAGE_W_4U 0x0000000000000002 /* Writable */ + +/* SUN4V pte bits... */ +#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */ +#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */ +#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */ +#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */ +#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */ +#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */ +#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */ +#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */ +#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */ +#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */ +#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */ +#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */ +#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */ +#define _PAGE_W_4V 0x0000000000000040 /* Writable */ +#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */ +#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */ +#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */ +#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */ +#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */ +#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */ +#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */ +#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */ +#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */ +#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */ +#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */ +#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */ + +#if PAGE_SHIFT == 13 +#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V +#elif PAGE_SHIFT == 16 +#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V +#elif PAGE_SHIFT == 19 +#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V +#elif PAGE_SHIFT == 22 +#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U +#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V +#else +#error Wrong PAGE_SHIFT specified +#endif + +#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) +#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) +#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V +#endif + +#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) +#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) +#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) +#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) +#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) +#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) + +pgprot_t PAGE_KERNEL __read_mostly; +EXPORT_SYMBOL(PAGE_KERNEL); + +pgprot_t PAGE_KERNEL_LOCKED __read_mostly; +pgprot_t PAGE_COPY __read_mostly; +pgprot_t PAGE_EXEC __read_mostly; +unsigned long pg_iobits __read_mostly; + +unsigned long _PAGE_IE __read_mostly; +unsigned long _PAGE_E __read_mostly; +unsigned long _PAGE_CACHE __read_mostly; + +static void prot_init_common(unsigned long page_none, + unsigned long page_shared, + unsigned long page_copy, + unsigned long page_readonly, + unsigned long page_exec_bit) +{ + PAGE_COPY = __pgprot(page_copy); + + protection_map[0x0] = __pgprot(page_none); + protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x4] = __pgprot(page_readonly); + protection_map[0x5] = __pgprot(page_readonly); + protection_map[0x6] = __pgprot(page_copy); + protection_map[0x7] = __pgprot(page_copy); + protection_map[0x8] = __pgprot(page_none); + protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xc] = __pgprot(page_readonly); + protection_map[0xd] = __pgprot(page_readonly); + protection_map[0xe] = __pgprot(page_shared); + protection_map[0xf] = __pgprot(page_shared); +} + +static void __init sun4u_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U); + PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U | _PAGE_L_4U); + PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); + + _PAGE_IE = _PAGE_IE_4U; + _PAGE_E = _PAGE_E_4U; + _PAGE_CACHE = _PAGE_CACHE_4U; + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | + __ACCESS_BITS_4U | _PAGE_E_4U); + + kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ + 0xfffff80000000000; + kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + _PAGE_SZBITS = _PAGE_SZBITS_4U; + _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | + _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | + _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); + + + page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + + page_exec_bit = _PAGE_EXEC_4U; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +static void __init sun4v_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | + _PAGE_CACHE_4V | _PAGE_P_4V | + __ACCESS_BITS_4V | __DIRTY_BITS_4V | + _PAGE_EXEC_4V); + PAGE_KERNEL_LOCKED = PAGE_KERNEL; + PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); + + _PAGE_IE = _PAGE_IE_4V; + _PAGE_E = _PAGE_E_4V; + _PAGE_CACHE = _PAGE_CACHE_4V; + + kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + 0xfffff80000000000; + kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | + __ACCESS_BITS_4V | _PAGE_E_4V); + + _PAGE_SZBITS = _PAGE_SZBITS_4V; + _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | + _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | + _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | + _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); + + page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + + page_exec_bit = _PAGE_EXEC_4V; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +unsigned long pte_sz_bits(unsigned long sz) +{ + if (tlb_type == hypervisor) { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4V; + case 64 * 1024: + return _PAGE_SZ64K_4V; + case 512 * 1024: + return _PAGE_SZ512K_4V; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4V; + }; + } else { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4U; + case 64 * 1024: + return _PAGE_SZ64K_4U; + case 512 * 1024: + return _PAGE_SZ512K_4U; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4U; + }; + } +} + +pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) +{ + pte_t pte; + if (tlb_type == hypervisor) { + pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) & + ~(unsigned long)_PAGE_CACHE_4V); + } else { + pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) & + ~(unsigned long)_PAGE_CACHE_4U); + } + pte_val(pte) |= (((unsigned long)space) << 32); + pte_val(pte) |= pte_sz_bits(page_size); + return pte; +} + +unsigned long pte_present(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_PRESENT_4V : _PAGE_PRESENT_4U)); +} + +unsigned long pte_file(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_FILE_4V : _PAGE_FILE_4U)); +} + +unsigned long pte_read(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_READ_4V : _PAGE_READ_4U)); +} + +unsigned long pte_exec(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_EXEC_4V : _PAGE_EXEC_4U)); +} + +unsigned long pte_write(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_WRITE_4V : _PAGE_WRITE_4U)); +} + +unsigned long pte_dirty(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U)); +} + +unsigned long pte_young(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U)); +} + +pte_t pte_wrprotect(pte_t pte) +{ + unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_WRITE_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_rdprotect(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_READ_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_READ_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkclean(pte_t pte) +{ + unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkold(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_ACCESSED_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkyoung(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_ACCESSED_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkwrite(pte_t pte) +{ + unsigned long mask = _PAGE_WRITE_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_WRITE_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkdirty(pte_t pte) +{ + unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkhuge(pte_t pte) +{ + unsigned long mask = _PAGE_SZHUGE_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_SZHUGE_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pgoff_to_pte(unsigned long off) +{ + unsigned long bit = _PAGE_FILE_4U; + + if (tlb_type == hypervisor) + bit = _PAGE_FILE_4V; + + return __pte((off << PAGE_SHIFT) | bit); +} + +pgprot_t pgprot_noncached(pgprot_t prot) +{ + unsigned long val = pgprot_val(prot); + unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U; + unsigned long on = _PAGE_E_4U; + + if (tlb_type == hypervisor) { + off = _PAGE_CP_4V | _PAGE_CV_4V; + on = _PAGE_E_4V; + } + + return __pgprot((val & ~off) | on); +} + +pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + unsigned long sz_bits = _PAGE_SZBITS_4U; + + if (tlb_type == hypervisor) + sz_bits = _PAGE_SZBITS_4V; + + return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits); +} + +unsigned long pte_pfn(pte_t pte) +{ + unsigned long mask = _PAGE_PADDR_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + return (pte_val(pte) & mask) >> PAGE_SHIFT; +} + +pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) +{ + unsigned long preserve_mask; + unsigned long val; + + preserve_mask = (_PAGE_PADDR_4U | + _PAGE_MODIFIED_4U | + _PAGE_ACCESSED_4U | + _PAGE_CP_4U | + _PAGE_CV_4U | + _PAGE_E_4U | + _PAGE_PRESENT_4U | + _PAGE_SZBITS_4U); + if (tlb_type == hypervisor) + preserve_mask = (_PAGE_PADDR_4V | + _PAGE_MODIFIED_4V | + _PAGE_ACCESSED_4V | + _PAGE_CP_4V | + _PAGE_CV_4V | + _PAGE_E_4V | + _PAGE_PRESENT_4V | + _PAGE_SZBITS_4V); + + val = (pte_val(orig_pte) & preserve_mask); + + return __pte(val | (pgprot_val(new_prot) & ~preserve_mask)); +} + +static unsigned long kern_large_tte(unsigned long paddr) +{ + unsigned long val; + + val = (_PAGE_VALID | _PAGE_SZ4MB_4U | + _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | + _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); + if (tlb_type == hypervisor) + val = (_PAGE_VALID | _PAGE_SZ4MB_4V | + _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | + _PAGE_EXEC_4V | _PAGE_W_4V); + + return val | paddr; +} + +/* + * Translate PROM's mapping we capture at boot time into physical address. + * The second parameter is only set from prom_callback() invocations. + */ +unsigned long prom_virt_to_phys(unsigned long promva, int *error) +{ + unsigned long mask; + int i; + + mask = _PAGE_PADDR_4U; + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + for (i = 0; i < prom_trans_ents; i++) { + struct linux_prom_translation *p = &prom_trans[i]; + + if (promva >= p->virt && + promva < (p->virt + p->size)) { + unsigned long base = p->data & mask; + + if (error) + *error = 0; + return base + (promva & (8192 - 1)); + } + } + if (error) + *error = 1; + return 0UL; +} + +/* XXX We should kill off this ugly thing at so me point. XXX */ +unsigned long sun4u_get_pte(unsigned long addr) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + unsigned long mask = _PAGE_PADDR_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + if (addr >= PAGE_OFFSET) + return addr & mask; + + if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) + return prom_virt_to_phys(addr, NULL); + + pgdp = pgd_offset_k(addr); + pudp = pud_offset(pgdp, addr); + pmdp = pmd_offset(pudp, addr); + ptep = pte_offset_kernel(pmdp, addr); + + return pte_val(*ptep) & mask; +} + +/* If not locked, zap it. */ +void __flush_tlb_all(void) +{ + unsigned long pstate; + int i; + + __asm__ __volatile__("flushw\n\t" + "rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); + if (tlb_type == spitfire) { + for (i = 0; i < 64; i++) { + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); + spitfire_put_dtlb_data(i, 0x0UL); + } + + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); + spitfire_put_itlb_data(i, 0x0UL); + } + } + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { + cheetah_flush_dtlb_all(); + cheetah_flush_itlb_all(); + } + __asm__ __volatile__("wrpr %0, 0, %%pstate" + : : "r" (pstate)); +} -- cgit v1.2.2 From ff02e0d26f139ad95ec3a7e94f88faccaa180dff Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 17:07:51 -0800 Subject: [SPARC64]: Move PTE field definitions back into asm/pgtable.h Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 84 -------------------------------------------------- 1 file changed, 84 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9c2fc239f3ee..81f9f4bffaff 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1334,90 +1334,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -/* SUN4U pte bits... */ -#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */ -#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */ -#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */ -#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */ -#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */ -#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */ -#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */ -#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */ -#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */ -#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */ -#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */ -#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */ -#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */ -#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */ -#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */ -#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */ -#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */ -#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */ -#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */ -#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */ -#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */ -#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */ -#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */ -#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */ -#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */ -#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */ -#define _PAGE_W_4U 0x0000000000000002 /* Writable */ - -/* SUN4V pte bits... */ -#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */ -#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */ -#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */ -#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */ -#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */ -#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */ -#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */ -#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */ -#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */ -#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */ -#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */ -#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */ -#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */ -#define _PAGE_W_4V 0x0000000000000040 /* Writable */ -#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */ -#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */ -#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */ -#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */ -#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */ -#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */ -#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */ -#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */ -#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */ -#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */ -#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */ -#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */ - -#if PAGE_SHIFT == 13 -#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V -#elif PAGE_SHIFT == 16 -#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V -#elif PAGE_SHIFT == 19 -#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V -#elif PAGE_SHIFT == 22 -#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U -#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V -#else -#error Wrong PAGE_SHIFT specified -#endif - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V -#endif - #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) -- cgit v1.2.2 From cf627156c450cd5a0741b31f55181db3400d4887 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 21:10:07 -0800 Subject: [SPARC64]: Use inline patching for critical PTE operations. This handles the SUN4U vs SUN4V PTE layout differences with near zero performance cost. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 211 +------------------------------------------------ 1 file changed, 3 insertions(+), 208 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 81f9f4bffaff..6f860c39db82 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1502,217 +1502,12 @@ unsigned long pte_sz_bits(unsigned long sz) pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; - if (tlb_type == hypervisor) { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) & - ~(unsigned long)_PAGE_CACHE_4V); - } else { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) & - ~(unsigned long)_PAGE_CACHE_4U); - } + + pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); - return pte; -} - -unsigned long pte_present(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_PRESENT_4V : _PAGE_PRESENT_4U)); -} - -unsigned long pte_file(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_FILE_4V : _PAGE_FILE_4U)); -} - -unsigned long pte_read(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_READ_4V : _PAGE_READ_4U)); -} - -unsigned long pte_exec(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_EXEC_4V : _PAGE_EXEC_4U)); -} - -unsigned long pte_write(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_WRITE_4V : _PAGE_WRITE_4U)); -} - -unsigned long pte_dirty(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U)); -} - -unsigned long pte_young(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U)); -} -pte_t pte_wrprotect(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_rdprotect(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_READ_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_READ_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkclean(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkold(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkyoung(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkwrite(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkdirty(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkhuge(pte_t pte) -{ - unsigned long mask = _PAGE_SZHUGE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_SZHUGE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pgoff_to_pte(unsigned long off) -{ - unsigned long bit = _PAGE_FILE_4U; - - if (tlb_type == hypervisor) - bit = _PAGE_FILE_4V; - - return __pte((off << PAGE_SHIFT) | bit); -} - -pgprot_t pgprot_noncached(pgprot_t prot) -{ - unsigned long val = pgprot_val(prot); - unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U; - unsigned long on = _PAGE_E_4U; - - if (tlb_type == hypervisor) { - off = _PAGE_CP_4V | _PAGE_CV_4V; - on = _PAGE_E_4V; - } - - return __pgprot((val & ~off) | on); -} - -pte_t pfn_pte(unsigned long pfn, pgprot_t prot) -{ - unsigned long sz_bits = _PAGE_SZBITS_4U; - - if (tlb_type == hypervisor) - sz_bits = _PAGE_SZBITS_4V; - - return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits); -} - -unsigned long pte_pfn(pte_t pte) -{ - unsigned long mask = _PAGE_PADDR_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_PADDR_4V; - - return (pte_val(pte) & mask) >> PAGE_SHIFT; -} - -pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) -{ - unsigned long preserve_mask; - unsigned long val; - - preserve_mask = (_PAGE_PADDR_4U | - _PAGE_MODIFIED_4U | - _PAGE_ACCESSED_4U | - _PAGE_CP_4U | - _PAGE_CV_4U | - _PAGE_E_4U | - _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U); - if (tlb_type == hypervisor) - preserve_mask = (_PAGE_PADDR_4V | - _PAGE_MODIFIED_4V | - _PAGE_ACCESSED_4V | - _PAGE_CP_4V | - _PAGE_CV_4V | - _PAGE_E_4V | - _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V); - - val = (pte_val(orig_pte) & preserve_mask); - - return __pte(val | (pgprot_val(new_prot) & ~preserve_mask)); + return pte; } static unsigned long kern_large_tte(unsigned long paddr) -- cgit v1.2.2 From 3b3ab2eb9cf07ef1bc7a676c19aab994adb41a87 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 09:54:42 -0800 Subject: [SPARC64]: Use phys tsb address in tsb_insert() in SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 6f860c39db82..0137d3dc6aee 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -252,7 +252,7 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long { unsigned long tsb_addr = (unsigned long) ent; - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_addr = __pa(tsb_addr); __tsb_insert(tsb_addr, tag, pte); -- cgit v1.2.2 From 3f19a84e39619053f117bd5bb9183c5bfea7db45 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 12:03:20 -0800 Subject: [SPARC64]: Set associativity of kernel TSB descriptor correctly. It should be 1, not 0. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 0137d3dc6aee..950d58082e28 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1056,7 +1056,7 @@ static void __init sun4v_ktsb_init(void) break; }; - ktsb_descr[0].assoc = 0; + ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; ktsb_descr[0].ctx_idx = 0; ktsb_descr[0].tsb_base = ktsb_pa; -- cgit v1.2.2 From 12e126ad229abc718d05600027fcd5794c1e31e5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 14:40:30 -0800 Subject: [SPARC64]: Check for errors in hypervisor_tlb_lock(). Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 950d58082e28..bd9e3205674b 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -510,6 +510,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, "=&r" (arg3) : "0" (func), "1" (arg0), "2" (arg1), "3" (arg2), "4" (arg3)); + if (arg0 != 0) { + prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " + "errors with %lx\n", vaddr, 0, pte, mmu, arg0); + prom_halt(); + } } static unsigned long kern_large_tte(unsigned long paddr); -- cgit v1.2.2 From 8b234274418d6d79527c4ac3a72da446ca4cb35f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 18:01:02 -0800 Subject: [SPARC64]: More TLB/TSB handling fixes. The SUN4V convention with non-shared TSBs is that the context bit of the TAG is clear. So we have to choose an "invalid" bit and initialize new TSBs appropriately. Otherwise a zero TAG looks "valid". Make sure, for the window fixup cases, that we use the right global registers and that we don't potentially trample on the live global registers in etrap/rtrap handling (%g2 and %g6) and that we put the missing virtual address properly in %g5. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index bd9e3205674b..aa2aec6373c3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -296,7 +296,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & (mm->context.tsb_nentries - 1UL)]; - tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL; + tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); } } @@ -1110,6 +1110,8 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); + if (tlb_type == hypervisor) sun4v_pgprot_init(); else -- cgit v1.2.2 From 0f15952ac8641bde1045162ffd4a7b474cc318b0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Feb 2006 12:43:16 -0800 Subject: [SPARC64]: Export a PAGE_SHARED symbol. For drivers/media/*, noticed by Fabbione. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index aa2aec6373c3..c7aa4404edca 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1353,6 +1353,10 @@ EXPORT_SYMBOL(PAGE_KERNEL); pgprot_t PAGE_KERNEL_LOCKED __read_mostly; pgprot_t PAGE_COPY __read_mostly; + +pgprot_t PAGE_SHARED __read_mostly; +EXPORT_SYMBOL(PAGE_SHARED); + pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; @@ -1367,6 +1371,7 @@ static void prot_init_common(unsigned long page_none, unsigned long page_exec_bit) { PAGE_COPY = __pgprot(page_copy); + PAGE_SHARED = __pgprot(page_shared); protection_map[0x0] = __pgprot(page_none); protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); -- cgit v1.2.2 From 9cc3a1ac9a819cadff05ca37bb7f208013a22035 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 20:51:13 -0800 Subject: [SPARC64]: Make use of Niagara 256MB PTEs for kernel mappings. We use a bitmap, one bit for every 256MB of memory. If the bit is set we can use a 256MB PTE for linear mappings, else we have to use a 4MB PTE. SUN4V support is there, and we can very easily add support for Panther cpu 256MB PTEs in the future. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 82 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 69 insertions(+), 13 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index c7aa4404edca..b5869f00d2d1 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -45,6 +45,19 @@ extern void device_scan(void); +#define MAX_PHYS_ADDRESS (1UL << 42UL) +#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) +#define KPTE_BITMAP_BYTES \ + ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) + +unsigned long kern_linear_pte_xor[2] __read_mostly; + +/* A bitmap, one bit for every 256MB of physical memory. If the bit + * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else + * if set we should use a 256MB page (via kern_linear_pte_xor[1]). + */ +unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; + #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; @@ -119,7 +132,6 @@ unsigned long phys_base __read_mostly; unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; unsigned long pfn_base __read_mostly; -unsigned long kern_linear_pte_xor __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -878,6 +890,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) return end_pfn; } +static struct linux_prom64_registers pall[MAX_BANKS] __initdata; +static int pall_ents __initdata; + #ifdef CONFIG_DEBUG_PAGEALLOC static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) { @@ -933,14 +948,41 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, return alloc_bytes; } -static struct linux_prom64_registers pall[MAX_BANKS] __initdata; -static int pall_ents __initdata; - extern unsigned int kvmap_linear_patch[1]; +#endif /* CONFIG_DEBUG_PAGEALLOC */ + +static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) +{ + const unsigned long shift_256MB = 28; + const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); + const unsigned long size_256MB = (1UL << shift_256MB); + + while (start < end) { + long remains; + + if (start & mask_256MB) { + start = (start + size_256MB) & ~mask_256MB; + continue; + } + + remains = end - start; + while (remains >= size_256MB) { + unsigned long index = start >> shift_256MB; + + __set_bit(index, kpte_linear_bitmap); + + start += size_256MB; + remains -= size_256MB; + } + } +} static void __init kernel_physical_mapping_init(void) { - unsigned long i, mem_alloced = 0UL; + unsigned long i; +#ifdef CONFIG_DEBUG_PAGEALLOC + unsigned long mem_alloced = 0UL; +#endif read_obp_memory("reg", &pall[0], &pall_ents); @@ -949,10 +991,16 @@ static void __init kernel_physical_mapping_init(void) phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; + + mark_kpte_bitmap(phys_start, phys_end); + +#ifdef CONFIG_DEBUG_PAGEALLOC mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL); +#endif } +#ifdef CONFIG_DEBUG_PAGEALLOC printk("Allocated %ld bytes for kernel page tables.\n", mem_alloced); @@ -960,8 +1008,10 @@ static void __init kernel_physical_mapping_init(void) flushi(&kvmap_linear_patch[0]); __flush_tlb_all(); +#endif } +#ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; @@ -1172,9 +1222,7 @@ void __init paging_init(void) pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail); -#ifdef CONFIG_DEBUG_PAGEALLOC kernel_physical_mapping_init(); -#endif { unsigned long zones_size[MAX_NR_ZONES]; @@ -1413,10 +1461,13 @@ static void __init sun4u_pgprot_init(void) pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | __ACCESS_BITS_4U | _PAGE_E_4U); - kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 0xfffff80000000000; - kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U | - _PAGE_P_4U | _PAGE_W_4U); + kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + /* XXX Should use 256MB on Panther. XXX */ + kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; _PAGE_SZBITS = _PAGE_SZBITS_4U; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | @@ -1454,10 +1505,15 @@ static void __init sun4v_pgprot_init(void) _PAGE_E = _PAGE_E_4V; _PAGE_CACHE = _PAGE_CACHE_4V; - kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + 0xfffff80000000000; + kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 0xfffff80000000000; - kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); + kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); -- cgit v1.2.2 From d7744a09504d5ae84edc8289a02254e1f2102410 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 22:31:11 -0800 Subject: [SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings. It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b5869f00d2d1..2a123135b042 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly; */ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; +/* A special kernel TSB for 4MB and 256MB linear mappings. */ +struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; + #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; @@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; + /* First KTSB for PAGE_SIZE mappings. */ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); switch (PAGE_SIZE) { @@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void) ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].resv = 0; - /* XXX When we have a kernel large page size TSB, describe - * XXX it in ktsb_descr[1] here. - */ + /* Second KTSB for 4MB/256MB mappings. */ + ktsb_pa = (kern_base + + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); + + ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; + ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | + HV_PGSZ_MASK_256MB); + ktsb_descr[1].assoc = 1; + ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; + ktsb_descr[1].ctx_idx = 0; + ktsb_descr[1].tsb_base = ktsb_pa; + ktsb_descr[1].resv = 0; } void __cpuinit sun4v_ktsb_register(void) @@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void) pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); func = HV_FAST_MMU_TSB_CTX0; - /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ - arg0 = 1; + arg0 = 2; arg1 = pa; __asm__ __volatile__("ta %6" : "=&r" (func), "=&r" (arg0), "=&r" (arg1) @@ -1160,7 +1172,9 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); + memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); if (tlb_type == hypervisor) sun4v_pgprot_init(); -- cgit v1.2.2 From b2bef4424cb4522f53e34d98d3deb0916478338b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 01:55:55 -0800 Subject: [SPARC64]: Export _PAGE_E and _PAGE_CACHE to modules. SBUS flash driver needs it. Noticed by Fabbione. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 2a123135b042..16f0db38d932 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1423,8 +1423,12 @@ pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; + unsigned long _PAGE_E __read_mostly; +EXPORT_SYMBOL(_PAGE_E); + unsigned long _PAGE_CACHE __read_mostly; +EXPORT_SYMBOL(_PAGE_CACHE); static void prot_init_common(unsigned long page_none, unsigned long page_shared, -- cgit v1.2.2 From a0663a79ad4faebe1db4a56e2e767b120b12333a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 14:19:28 -0800 Subject: [SPARC64]: Fix TLB context allocation with SMT style shared TLBs. The context allocation scheme we use depends upon there being a 1<-->1 mapping from cpu to physical TLB for correctness. Chips like Niagara break this assumption. So what we do is notify all cpus with a cross call when the context version number changes, and if necessary this makes them allocate a valid context for the address space they are running at the time. Stress tested with make -j1024, make -j2048, and make -j4096 kernel builds on a 32-strand, 8 core, T2000 with 16GB of ram. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 16f0db38d932..ccf083aecb65 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -629,17 +629,20 @@ void __flush_dcache_range(unsigned long start, unsigned long end) * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). + * + * Always invoked with interrupts disabled. */ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; - + int new_version; spin_lock(&ctx_alloc_lock); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); + new_version = 0; if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { @@ -662,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm) mmu_context_bmap[i + 2] = 0; mmu_context_bmap[i + 3] = 0; } + new_version = 1; goto out; } } @@ -671,6 +675,9 @@ out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; spin_unlock(&ctx_alloc_lock); + + if (unlikely(new_version)) + smp_new_mmu_context_version(); } void sparc_ultra_dump_itlb(void) -- cgit v1.2.2 From 7a591cfe4efef8a232e4938d44ae6693b319f6d7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 19:44:50 -0800 Subject: [SPARC64]: Avoid dcache-dirty page state management on sun4v. It is totally wasted work, since we have no D-cache aliasing issues on sun4v. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ccf083aecb65..87d5d1af1adb 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -188,8 +188,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif -__inline__ void flush_dcache_page_impl(struct page *page) +inline void flush_dcache_page_impl(struct page *page) { + BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif @@ -279,29 +280,31 @@ unsigned long _PAGE_SZBITS __read_mostly; void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; - struct page *page; - unsigned long pfn; - unsigned long pg_flags; - - pfn = pte_pfn(pte); - if (pfn_valid(pfn) && - (page = pfn_to_page(pfn), page_mapping(page)) && - ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { - int cpu = ((pg_flags >> PG_dcache_cpu_shift) & - PG_dcache_cpu_mask); - int this_cpu = get_cpu(); - - /* This is just to optimize away some function calls - * in the SMP case. - */ - if (cpu == this_cpu) - flush_dcache_page_impl(page); - else - smp_flush_dcache_page_impl(page, cpu); - clear_dcache_dirty_cpu(page, cpu); + if (tlb_type != hypervisor) { + unsigned long pfn = pte_pfn(pte); + unsigned long pg_flags; + struct page *page; + + if (pfn_valid(pfn) && + (page = pfn_to_page(pfn), page_mapping(page)) && + ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { + int cpu = ((pg_flags >> PG_dcache_cpu_shift) & + PG_dcache_cpu_mask); + int this_cpu = get_cpu(); + + /* This is just to optimize away some function calls + * in the SMP case. + */ + if (cpu == this_cpu) + flush_dcache_page_impl(page); + else + smp_flush_dcache_page_impl(page, cpu); + + clear_dcache_dirty_cpu(page, cpu); - put_cpu(); + put_cpu(); + } } mm = vma->vm_mm; @@ -321,6 +324,9 @@ void flush_dcache_page(struct page *page) struct address_space *mapping; int this_cpu; + if (tlb_type == hypervisor) + return; + /* Do not bother with the expensive D-cache flush if it * is merely the zero page. The 'bigcore' testcase in GDB * causes this case to run millions of times. -- cgit v1.2.2 From 74ae998772041b62e9ad420d602e4f7dbb182cd6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 18:26:24 -0800 Subject: [SPARC64]: Simplify TSB insert checks. Don't try to avoid putting non-base page sized entries into the user TSB. It actually costs us more to check this than it helps. Eventually we'll have a multiple TSB scheme for user processes. Once a process starts using larger pages, we'll allocate and use such a TSB. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 87d5d1af1adb..5930e87dafbc 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -280,6 +280,8 @@ unsigned long _PAGE_SZBITS __read_mostly; void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; + struct tsb *tsb; + unsigned long tag; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); @@ -308,15 +310,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; - if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { - struct tsb *tsb; - unsigned long tag; - - tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & - (mm->context.tsb_nentries - 1UL)]; - tag = (address >> 22UL); - tsb_insert(tsb, tag, pte_val(pte)); - } + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & + (mm->context.tsb_nentries - 1UL)]; + tag = (address >> 22UL); + tsb_insert(tsb, tag, pte_val(pte)); } void flush_dcache_page(struct page *page) -- cgit v1.2.2 From f7c00338cfeef125032aa12aa8ebeacf9e117e81 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 22:18:50 -0800 Subject: [SPARC64]: Fix loop termination in mark_kpte_bitmap() If we were aligned, but didn't have at least 256MB left to process, we would loop forever. Thanks to fabbione for the report and testing the fix. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5930e87dafbc..9bbd0bf64af0 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -973,12 +973,15 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) while (start < end) { long remains; + remains = end - start; + if (remains < size_256MB) + break; + if (start & mask_256MB) { start = (start + size_256MB) & ~mask_256MB; continue; } - remains = end - start; while (remains >= size_256MB) { unsigned long index = start >> shift_256MB; -- cgit v1.2.2 From a77754b4d0731321db266c6c60ffcd7c62757da5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Mar 2006 19:59:50 -0800 Subject: [SPARC64]: Bulletproof MMU context locking. 1) Always spin_lock_init() in init_context(). The caller essentially clears it out, or copies the mm info from the parent. In both cases we need to explicitly initialize the spinlock. 2) Always do explicit IRQ disabling while taking mm->context.lock and ctx_alloc_lock. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9bbd0bf64af0..a63939347b3d 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -639,9 +639,10 @@ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; + unsigned long flags; int new_version; - spin_lock(&ctx_alloc_lock); + spin_lock_irqsave(&ctx_alloc_lock, flags); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); @@ -677,7 +678,7 @@ void get_new_mmu_context(struct mm_struct *mm) out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; - spin_unlock(&ctx_alloc_lock); + spin_unlock_irqrestore(&ctx_alloc_lock, flags); if (unlikely(new_version)) smp_new_mmu_context_version(); -- cgit v1.2.2 From d1112018b4bc82adf5c8a9c15a08954328f023ae Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 02:16:07 -0800 Subject: [SPARC64]: Move over to sparsemem. This has been pending for a long time, and the fact that we waste a ton of ram on some configurations kind of pushed things over the edge. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 134 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 98 insertions(+), 36 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a63939347b3d..5f67b53b3a5b 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -130,11 +130,9 @@ static void __init read_obp_memory(const char *property, unsigned long *sparc64_valid_addr_bitmap __read_mostly; -/* Ugly, but necessary... -DaveM */ -unsigned long phys_base __read_mostly; +/* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; -unsigned long pfn_base __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -368,16 +366,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) } } -unsigned long page_to_pfn(struct page *page) -{ - return (unsigned long) ((page - mem_map) + pfn_base); -} - -struct page *pfn_to_page(unsigned long pfn) -{ - return (mem_map + (pfn - pfn_base)); -} - void show_mem(void) { printk("Mem-info:\n"); @@ -773,9 +761,78 @@ void sparc_ultra_dump_dtlb(void) extern unsigned long cmdline_memory_size; -unsigned long __init bootmem_init(unsigned long *pages_avail) +/* Find a free area for the bootmem map, avoiding the kernel image + * and the initial ramdisk. + */ +static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, + unsigned long end_pfn) { - unsigned long bootmap_size, start_pfn, end_pfn; + unsigned long avoid_start, avoid_end, bootmap_size; + int i; + + bootmap_size = ((end_pfn - start_pfn) + 7) / 8; + bootmap_size = ALIGN(bootmap_size, sizeof(long)); + + avoid_start = avoid_end = 0; +#ifdef CONFIG_BLK_DEV_INITRD + avoid_start = initrd_start; + avoid_end = PAGE_ALIGN(initrd_end); +#endif + +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n", + kern_base, PAGE_ALIGN(kern_base + kern_size), + avoid_start, avoid_end); +#endif + for (i = 0; i < pavail_ents; i++) { + unsigned long start, end; + + start = pavail[i].phys_addr; + end = start + pavail[i].reg_size; + + while (start < end) { + if (start >= kern_base && + start < PAGE_ALIGN(kern_base + kern_size)) { + start = PAGE_ALIGN(kern_base + kern_size); + continue; + } + if (start >= avoid_start && start < avoid_end) { + start = avoid_end; + continue; + } + + if ((end - start) < bootmap_size) + break; + + if (start < kern_base && + (start + bootmap_size) > kern_base) { + start = PAGE_ALIGN(kern_base + kern_size); + continue; + } + + if (start < avoid_start && + (start + bootmap_size) > avoid_start) { + start = avoid_end; + continue; + } + + /* OK, it doesn't overlap anything, use it. */ +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n", + start >> PAGE_SHIFT, start); +#endif + return start >> PAGE_SHIFT; + } + } + + prom_printf("Cannot find free area for bootmap, aborting.\n"); + prom_halt(); +} + +static unsigned long __init bootmem_init(unsigned long *pages_avail, + unsigned long phys_base) +{ + unsigned long bootmap_size, end_pfn; unsigned long end_of_phys_memory = 0UL; unsigned long bootmap_pfn, bytes_avail, size; int i; @@ -813,14 +870,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) *pages_avail = bytes_avail >> PAGE_SHIFT; - /* Start with page aligned address of last symbol in kernel - * image. The kernel is hard mapped below PAGE_OFFSET in a - * 4MB locked TLB translation. - */ - start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT; - - bootmap_pfn = start_pfn; - end_pfn = end_of_phys_memory >> PAGE_SHIFT; #ifdef CONFIG_BLK_DEV_INITRD @@ -837,23 +886,23 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) "(0x%016lx > 0x%016lx)\ndisabling initrd\n", initrd_end, end_of_phys_memory); initrd_start = 0; - } - if (initrd_start) { - if (initrd_start >= (start_pfn << PAGE_SHIFT) && - initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) - bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; + initrd_end = 0; } } #endif /* Initialize the boot-time allocator. */ max_pfn = max_low_pfn = end_pfn; - min_low_pfn = pfn_base; + min_low_pfn = (phys_base >> PAGE_SHIFT); + + bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn); #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", min_low_pfn, bootmap_pfn, max_low_pfn); #endif - bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); + bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, + (phys_base >> PAGE_SHIFT), + end_pfn); /* Now register the available physical memory with the * allocator. @@ -901,6 +950,20 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; + for (i = 0; i < pavail_ents; i++) { + unsigned long start_pfn, end_pfn; + + start_pfn = pavail[i].phys_addr >> PAGE_SHIFT; + end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT)); +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("memory_present(0, %lx, %lx)\n", + start_pfn, end_pfn); +#endif + memory_present(0, start_pfn, end_pfn); + } + + sparse_init(); + return end_pfn; } @@ -1180,7 +1243,7 @@ static void sun4v_pgprot_init(void); void __init paging_init(void) { - unsigned long end_pfn, pages_avail, shift; + unsigned long end_pfn, pages_avail, shift, phys_base; unsigned long real_end, i; kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; @@ -1211,8 +1274,6 @@ void __init paging_init(void) for (i = 0; i < pavail_ents; i++) phys_base = min(phys_base, pavail[i].phys_addr); - pfn_base = phys_base >> PAGE_SHIFT; - set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); @@ -1248,7 +1309,9 @@ void __init paging_init(void) /* Setup bootmem... */ pages_avail = 0; - last_valid_pfn = end_pfn = bootmem_init(&pages_avail); + last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); + + max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); kernel_physical_mapping_init(); @@ -1261,7 +1324,7 @@ void __init paging_init(void) for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; - npages = end_pfn - pfn_base; + npages = end_pfn - (phys_base >> PAGE_SHIFT); zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = npages - pages_avail; @@ -1336,7 +1399,6 @@ void __init mem_init(void) taint_real_pages(); - max_mapnr = last_valid_pfn - pfn_base; high_memory = __va(last_valid_pfn << PAGE_SHIFT); #ifdef CONFIG_DEBUG_BOOTMEM -- cgit v1.2.2 From 17b0e199a10184d8c5bbbd79a4cee993bb1fb257 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 15:57:03 -0800 Subject: [SPARC64]: Fix 32-bit truncation which broke sparsemem. The page->flags manipulations done by the D-cache dirty state tracking was broken because the constants were not marked with "UL" to make them 64-bit, which means we were clobbering the upper 32-bits of page->flags all the time. This doesn't jive well with sparsemem which stores the section and indexing information in the top 32-bits of page->flags. This is yet another sparc64 bug which has been with us forever. While we're here, tidy up some things in bootmem_init() and paginig_init(): 1) Pass min_low_pfn to init_bootmem_node(), it's identical to (phys_base >> PAGE_SHIFT) but we should use consistent with the variable names we print in CONFIG_BOOTMEM_DEBUG 2) max_mapnr, although no longer used, was being set inaccurately, we shouldn't subtract pfn_base any more. 3) All the games with phys_base in the zones_*[] arrays we pass to free_area_init_node() are no longer necessary. Thanks to Josh Grebe and Fabbione for the bug reports and testing. Fix also verified locally on an SB2500 which had a memory layout that triggered the same problem. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5f67b53b3a5b..b40f6477dea0 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -205,8 +205,8 @@ inline void flush_dcache_page_impl(struct page *page) } #define PG_dcache_dirty PG_arch_1 -#define PG_dcache_cpu_shift 24 -#define PG_dcache_cpu_mask (256 - 1) +#define PG_dcache_cpu_shift 24UL +#define PG_dcache_cpu_mask (256UL - 1UL) #if NR_CPUS > 256 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus @@ -901,8 +901,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail, min_low_pfn, bootmap_pfn, max_low_pfn); #endif bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, - (phys_base >> PAGE_SHIFT), - end_pfn); + min_low_pfn, end_pfn); /* Now register the available physical memory with the * allocator. @@ -1311,25 +1310,24 @@ void __init paging_init(void) pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); - max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); + max_mapnr = last_valid_pfn; kernel_physical_mapping_init(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; - unsigned long npages; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; - npages = end_pfn - (phys_base >> PAGE_SHIFT); - zones_size[ZONE_DMA] = npages; - zholes_size[ZONE_DMA] = npages - pages_avail; + zones_size[ZONE_DMA] = end_pfn; + zholes_size[ZONE_DMA] = end_pfn - pages_avail; free_area_init_node(0, &contig_page_data, zones_size, - phys_base >> PAGE_SHIFT, zholes_size); + __pa(PAGE_OFFSET) >> PAGE_SHIFT, + zholes_size); } device_scan(); -- cgit v1.2.2 From 7a1ac5264108fc3ed22d17a3cdd76212ed1666d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Mar 2006 02:02:32 -0800 Subject: [SPARC64]: Fix and re-enable dynamic TSB sizing. This is good for up to %50 performance improvement of some test cases. The problem has been the race conditions, and hopefully I've plugged them all up here. 1) There was a serious race in switch_mm() wrt. lazy TLB switching to and from kernel threads. We could erroneously skip a tsb_context_switch() and thus use a stale TSB across a TSB grow event. There is a big comment now in that function describing exactly how it can happen. 2) All code paths that do something with the TSB need to be guarded with the mm->context.lock spinlock. This makes page table flushing paths properly synchronize with both TSB growing and TLB context changes. 3) TSB growing events are moved to the end of successful fault processing. Previously it was in update_mmu_cache() but that is deadlock prone. At the end of do_sparc64_fault() we hold no spinlocks that could deadlock the TSB grow sequence. We also have dropped the address space semaphore. While we're here, add prefetching to the copy_tsb() routine and put it in assembler into the tsb.S file. This piece of code is quite time critical. There are some small negative side effects to this code which can be improved upon. In particular we grab the mm->context.lock even for the tsb insert done by update_mmu_cache() now and that's a bit excessive. We can get rid of that locking, and the same lock taking in flush_tsb_user(), by disabling PSTATE_IE around the whole operation including the capturing of the tsb pointer and tsb_nentries value. That would work because anyone growing the TSB won't free up the old TSB until all cpus respond to the TSB change cross call. I'm not quite so confident in that optimization to put it in right now, but eventually we might be able to and the description is here for reference. This code seems very solid now. It passes several parallel GCC bootstrap builds, and our favorite "nut cruncher" stress test which is a full "make -j8192" build of a "make allmodconfig" kernel. That puts about 256 processes on each cpu's run queue, makes lots of process cpu migrations occur, causes lots of page table and TLB flushing activity, incurs many context version number changes, and it swaps the machine real far out to disk even though there is 16GB of ram on this test system. :-) Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b40f6477dea0..d703b67bc7b9 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -279,7 +279,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p { struct mm_struct *mm; struct tsb *tsb; - unsigned long tag; + unsigned long tag, flags; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); @@ -308,10 +308,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; + + spin_lock_irqsave(&mm->context.lock, flags); + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & (mm->context.tsb_nentries - 1UL)]; tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); + + spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) -- cgit v1.2.2 From 9b4006dcf6a8c43bd482b9c1ec576f0ed270ef23 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 18:12:42 -0800 Subject: [SPARC64]: Use SLAB caches for TSB tables. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index d703b67bc7b9..a1a364e537c7 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -165,6 +165,8 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) clear_page(addr); } +extern void tsb_cache_init(void); + void pgtable_cache_init(void) { pgtable_cache = kmem_cache_create("pgtable_cache", @@ -174,9 +176,10 @@ void pgtable_cache_init(void) zero_ctor, NULL); if (!pgtable_cache) { - prom_printf("pgtable_cache_init(): Could not create!\n"); + prom_printf("Could not create pgtable_cache\n"); prom_halt(); } + tsb_cache_init(); } #ifdef CONFIG_DEBUG_DCFLUSH -- cgit v1.2.2 From 88d7079458f87d6f2d2261b2f87b7b9416019f5e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 19:16:23 -0800 Subject: [SPARC64]: Allow CONFIG_MEMORY_HOTPLUG to build. online_page() is straightforward, and then add a dummy remove_memory() that returns -EINVAL just like i386. There is no point in implementing remove_memory() since __remove_pages() has no implementation either. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/sparc64/mm/init.c') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a1a364e537c7..c2b556106fc1 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1805,3 +1805,21 @@ void __flush_tlb_all(void) __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); } + +#ifdef CONFIG_MEMORY_HOTPLUG + +void online_page(struct page *page) +{ + ClearPageReserved(page); + set_page_count(page, 0); + free_cold_page(page); + totalram_pages++; + num_physpages++; +} + +int remove_memory(u64 start, u64 size) +{ + return -EINVAL; +} + +#endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v1.2.2