diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 21:14:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 21:14:11 -0400 |
commit | 6ee127b7dd63afe4d6d0a58293786bf4bf336850 (patch) | |
tree | 3c4b9f2122c346758f5cf401979a76defbf89b02 /arch/sparc | |
parent | 1e30c1b3866968ff584ae3cd3909ef93957bb215 (diff) | |
parent | 427f23cb3abc21c3784df8ae907c4c3a31d885d1 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller:
"Nothing much this merge window for sparc.
1) Fix FPU state management in sparc32, from Tkhai Kirill.
2) More sparc32 mm layer code cleanups, largely more side effects of
the sun4c support removal in the 3.5 From Sam Ravnborg.
3) Remove unused code in sparc64, from Bjorn Helgaas and Kirill Tkhai.
4) Some declaration and comment tidies in PROM support code, from
Geert Uytterhoeven."
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: (24 commits)
sparc32, copy_thread: Clear TIF_USEDFPU flag of created task instead of current
sparc32: delete dead code in show_mem()
sparc32: move kmap_init() to highmem.c
sparc32: move probe_memory() to srmmu.c
sparc32: drop unused BAD_PAGE stuff
sparc32: centralize all mmu context handling in srmmu.c
sparc32: drop quicklist
sparc32: drop sparc model check in paging_init
sparc32: drop sparc_unmapped_base
sparc32,leon: drop leon_init()
sparc32: drop fixmap.h
sparc32: fixmap.h cleanup
sparc32: drop unused kmap_atomic_to_page
sparc32: drop swapper_pg_dir
sparc32: beautify srmmu_inherit_prom_mappings()
sparc32: use void * in nocache get/free
sparc32: fix coding-style in srmmu.c
sparc32: sort includes in srmmu.c
sparc32: define a few srmmu functions __init
sparc64: remove unused function straddles_64bit_va_hole()
...
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/fixmap.h | 110 | ||||
-rw-r--r-- | arch/sparc/include/asm/highmem.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/leon.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_context_32.h | 8 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_32.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgalloc_32.h | 29 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_32.h | 44 | ||||
-rw-r--r-- | arch/sparc/include/asm/vaddrs.h | 22 | ||||
-rw-r--r-- | arch/sparc/kernel/head_32.S | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/leon_kernel.c | 16 | ||||
-rw-r--r-- | arch/sparc/kernel/process_32.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/setup_32.c | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/sys_sparc_64.c | 17 | ||||
-rw-r--r-- | arch/sparc/lib/NG2memcpy.S | 72 | ||||
-rw-r--r-- | arch/sparc/lib/U1memcpy.S | 4 | ||||
-rw-r--r-- | arch/sparc/lib/copy_page.S | 56 | ||||
-rw-r--r-- | arch/sparc/mm/fault_32.c | 18 | ||||
-rw-r--r-- | arch/sparc/mm/highmem.c | 42 | ||||
-rw-r--r-- | arch/sparc/mm/init_32.c | 58 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 332 | ||||
-rw-r--r-- | arch/sparc/prom/init_32.c | 7 | ||||
-rw-r--r-- | arch/sparc/prom/init_64.c | 4 |
22 files changed, 331 insertions, 522 deletions
diff --git a/arch/sparc/include/asm/fixmap.h b/arch/sparc/include/asm/fixmap.h deleted file mode 100644 index f18fc0755adf..000000000000 --- a/arch/sparc/include/asm/fixmap.h +++ /dev/null | |||
@@ -1,110 +0,0 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_FIXMAP_H | ||
14 | #define _ASM_FIXMAP_H | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <asm/page.h> | ||
18 | #ifdef CONFIG_HIGHMEM | ||
19 | #include <linux/threads.h> | ||
20 | #include <asm/kmap_types.h> | ||
21 | #endif | ||
22 | |||
23 | /* | ||
24 | * Here we define all the compile-time 'special' virtual | ||
25 | * addresses. The point is to have a constant address at | ||
26 | * compile time, but to set the physical address only | ||
27 | * in the boot process. We allocate these special addresses | ||
28 | * from the top of unused virtual memory (0xfd000000 - 1 page) backwards. | ||
29 | * Also this lets us do fail-safe vmalloc(), we | ||
30 | * can guarantee that these special addresses and | ||
31 | * vmalloc()-ed addresses never overlap. | ||
32 | * | ||
33 | * these 'compile-time allocated' memory buffers are | ||
34 | * fixed-size 4k pages. (or larger if used with an increment | ||
35 | * highger than 1) use fixmap_set(idx,phys) to associate | ||
36 | * physical memory with fixmap indices. | ||
37 | * | ||
38 | * TLB entries of such buffers will not be flushed across | ||
39 | * task switches. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * on UP currently we will have no trace of the fixmap mechanism, | ||
44 | * no page table allocations, etc. This might change in the | ||
45 | * future, say framebuffers for the console driver(s) could be | ||
46 | * fix-mapped? | ||
47 | */ | ||
48 | enum fixed_addresses { | ||
49 | FIX_HOLE, | ||
50 | #ifdef CONFIG_HIGHMEM | ||
51 | FIX_KMAP_BEGIN, | ||
52 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
53 | #endif | ||
54 | __end_of_fixed_addresses | ||
55 | }; | ||
56 | |||
57 | extern void __set_fixmap (enum fixed_addresses idx, | ||
58 | unsigned long phys, pgprot_t flags); | ||
59 | |||
60 | #define set_fixmap(idx, phys) \ | ||
61 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
62 | /* | ||
63 | * Some hardware wants to get fixmapped without caching. | ||
64 | */ | ||
65 | #define set_fixmap_nocache(idx, phys) \ | ||
66 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
67 | /* | ||
68 | * used by vmalloc.c. | ||
69 | * | ||
70 | * Leave one empty page between IO pages at 0xfd000000 and | ||
71 | * the start of the fixmap. | ||
72 | */ | ||
73 | #define FIXADDR_TOP (0xfcfff000UL) | ||
74 | #define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT) | ||
75 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | ||
76 | |||
77 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
78 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
79 | |||
80 | extern void __this_fixmap_does_not_exist(void); | ||
81 | |||
82 | /* | ||
83 | * 'index to address' translation. If anyone tries to use the idx | ||
84 | * directly without tranlation, we catch the bug with a NULL-deference | ||
85 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
86 | */ | ||
87 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
88 | { | ||
89 | /* | ||
90 | * this branch gets completely eliminated after inlining, | ||
91 | * except when someone tries to use fixaddr indices in an | ||
92 | * illegal way. (such as mixing up address types or using | ||
93 | * out-of-range indices). | ||
94 | * | ||
95 | * If it doesn't get removed, the linker will complain | ||
96 | * loudly with a reasonably clear error message.. | ||
97 | */ | ||
98 | if (idx >= __end_of_fixed_addresses) | ||
99 | __this_fixmap_does_not_exist(); | ||
100 | |||
101 | return __fix_to_virt(idx); | ||
102 | } | ||
103 | |||
104 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
105 | { | ||
106 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
107 | return __virt_to_fix(vaddr); | ||
108 | } | ||
109 | |||
110 | #endif | ||
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h index 3b6e00dd96e5..4f9e15c757e2 100644 --- a/arch/sparc/include/asm/highmem.h +++ b/arch/sparc/include/asm/highmem.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
22 | 22 | ||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <asm/fixmap.h> | ||
25 | #include <asm/vaddrs.h> | 24 | #include <asm/vaddrs.h> |
26 | #include <asm/kmap_types.h> | 25 | #include <asm/kmap_types.h> |
27 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
@@ -29,7 +28,6 @@ | |||
29 | /* declarations for highmem.c */ | 28 | /* declarations for highmem.c */ |
30 | extern unsigned long highstart_pfn, highend_pfn; | 29 | extern unsigned long highstart_pfn, highend_pfn; |
31 | 30 | ||
32 | extern pte_t *kmap_pte; | ||
33 | extern pgprot_t kmap_prot; | 31 | extern pgprot_t kmap_prot; |
34 | extern pte_t *pkmap_page_table; | 32 | extern pte_t *pkmap_page_table; |
35 | 33 | ||
@@ -72,7 +70,6 @@ static inline void kunmap(struct page *page) | |||
72 | 70 | ||
73 | extern void *kmap_atomic(struct page *page); | 71 | extern void *kmap_atomic(struct page *page); |
74 | extern void __kunmap_atomic(void *kvaddr); | 72 | extern void __kunmap_atomic(void *kvaddr); |
75 | extern struct page *kmap_atomic_to_page(void *vaddr); | ||
76 | 73 | ||
77 | #define flush_cache_kmaps() flush_cache_all() | 74 | #define flush_cache_kmaps() flush_cache_all() |
78 | 75 | ||
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 3375c6293893..15a716934e4d 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h | |||
@@ -82,7 +82,6 @@ static inline unsigned long leon_load_reg(unsigned long paddr) | |||
82 | #define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x)) | 82 | #define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x)) |
83 | #define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v)) | 83 | #define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v)) |
84 | 84 | ||
85 | extern void leon_init(void); | ||
86 | extern void leon_switch_mm(void); | 85 | extern void leon_switch_mm(void); |
87 | extern void leon_init_IRQ(void); | 86 | extern void leon_init_IRQ(void); |
88 | 87 | ||
diff --git a/arch/sparc/include/asm/mmu_context_32.h b/arch/sparc/include/asm/mmu_context_32.h index 01456c900720..2df2a9be8f6d 100644 --- a/arch/sparc/include/asm/mmu_context_32.h +++ b/arch/sparc/include/asm/mmu_context_32.h | |||
@@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
9 | { | 9 | { |
10 | } | 10 | } |
11 | 11 | ||
12 | /* | 12 | /* Initialize a new mmu context. This is invoked when a new |
13 | * Initialize a new mmu context. This is invoked when a new | ||
14 | * address space instance (unique or shared) is instantiated. | 13 | * address space instance (unique or shared) is instantiated. |
15 | */ | 14 | */ |
16 | #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) | 15 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
17 | 16 | ||
18 | /* | 17 | /* Destroy a dead context. This occurs when mmput drops the |
19 | * Destroy a dead context. This occurs when mmput drops the | ||
20 | * mm_users count to zero, the mmaps have been released, and | 18 | * mm_users count to zero, the mmaps have been released, and |
21 | * all the page tables have been flushed. Our job is to destroy | 19 | * all the page tables have been flushed. Our job is to destroy |
22 | * any remaining processor-specific state. | 20 | * any remaining processor-specific state. |
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index fab78a308ebf..f82a1f36b655 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h | |||
@@ -107,8 +107,7 @@ typedef unsigned long iopgprot_t; | |||
107 | 107 | ||
108 | typedef struct page *pgtable_t; | 108 | typedef struct page *pgtable_t; |
109 | 109 | ||
110 | extern unsigned long sparc_unmapped_base; | 110 | #define TASK_UNMAPPED_BASE 0x50000000 |
111 | #define TASK_UNMAPPED_BASE sparc_unmapped_base | ||
112 | 111 | ||
113 | #else /* !(__ASSEMBLY__) */ | 112 | #else /* !(__ASSEMBLY__) */ |
114 | 113 | ||
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h index e5b169b46d21..9b1c36de0f18 100644 --- a/arch/sparc/include/asm/pgalloc_32.h +++ b/arch/sparc/include/asm/pgalloc_32.h | |||
@@ -11,28 +11,15 @@ | |||
11 | 11 | ||
12 | struct page; | 12 | struct page; |
13 | 13 | ||
14 | extern struct pgtable_cache_struct { | 14 | void *srmmu_get_nocache(int size, int align); |
15 | unsigned long *pgd_cache; | 15 | void srmmu_free_nocache(void *addr, int size); |
16 | unsigned long *pte_cache; | ||
17 | unsigned long pgtable_cache_sz; | ||
18 | unsigned long pgd_cache_sz; | ||
19 | } pgt_quicklists; | ||
20 | |||
21 | unsigned long srmmu_get_nocache(int size, int align); | ||
22 | void srmmu_free_nocache(unsigned long vaddr, int size); | ||
23 | |||
24 | #define pgd_quicklist (pgt_quicklists.pgd_cache) | ||
25 | #define pmd_quicklist ((unsigned long *)0) | ||
26 | #define pte_quicklist (pgt_quicklists.pte_cache) | ||
27 | #define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz) | ||
28 | #define pgd_cache_size (pgt_quicklists.pgd_cache_sz) | ||
29 | 16 | ||
30 | #define check_pgt_cache() do { } while (0) | 17 | #define check_pgt_cache() do { } while (0) |
31 | 18 | ||
32 | pgd_t *get_pgd_fast(void); | 19 | pgd_t *get_pgd_fast(void); |
33 | static inline void free_pgd_fast(pgd_t *pgd) | 20 | static inline void free_pgd_fast(pgd_t *pgd) |
34 | { | 21 | { |
35 | srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); | 22 | srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE); |
36 | } | 23 | } |
37 | 24 | ||
38 | #define pgd_free(mm, pgd) free_pgd_fast(pgd) | 25 | #define pgd_free(mm, pgd) free_pgd_fast(pgd) |
@@ -50,13 +37,13 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) | |||
50 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, | 37 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, |
51 | unsigned long address) | 38 | unsigned long address) |
52 | { | 39 | { |
53 | return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, | 40 | return srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, |
54 | SRMMU_PMD_TABLE_SIZE); | 41 | SRMMU_PMD_TABLE_SIZE); |
55 | } | 42 | } |
56 | 43 | ||
57 | static inline void free_pmd_fast(pmd_t * pmd) | 44 | static inline void free_pmd_fast(pmd_t * pmd) |
58 | { | 45 | { |
59 | srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); | 46 | srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE); |
60 | } | 47 | } |
61 | 48 | ||
62 | #define pmd_free(mm, pmd) free_pmd_fast(pmd) | 49 | #define pmd_free(mm, pmd) free_pmd_fast(pmd) |
@@ -73,13 +60,13 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); | |||
73 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 60 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
74 | unsigned long address) | 61 | unsigned long address) |
75 | { | 62 | { |
76 | return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | 63 | return srmmu_get_nocache(PTE_SIZE, PTE_SIZE); |
77 | } | 64 | } |
78 | 65 | ||
79 | 66 | ||
80 | static inline void free_pte_fast(pte_t *pte) | 67 | static inline void free_pte_fast(pte_t *pte) |
81 | { | 68 | { |
82 | srmmu_free_nocache((unsigned long)pte, PTE_SIZE); | 69 | srmmu_free_nocache(pte, PTE_SIZE); |
83 | } | 70 | } |
84 | 71 | ||
85 | #define pte_free_kernel(mm, pte) free_pte_fast(pte) | 72 | #define pte_free_kernel(mm, pte) free_pte_fast(pte) |
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index cbbbed5cb3aa..6fc13483f702 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h | |||
@@ -52,8 +52,9 @@ extern unsigned long calc_highpages(void); | |||
52 | #define PAGE_READONLY SRMMU_PAGE_RDONLY | 52 | #define PAGE_READONLY SRMMU_PAGE_RDONLY |
53 | #define PAGE_KERNEL SRMMU_PAGE_KERNEL | 53 | #define PAGE_KERNEL SRMMU_PAGE_KERNEL |
54 | 54 | ||
55 | /* Top-level page directory */ | 55 | /* Top-level page directory - dummy used by init-mm. |
56 | extern pgd_t swapper_pg_dir[1024]; | 56 | * srmmu.c will assign the real one (which is dynamically sized) */ |
57 | #define swapper_pg_dir NULL | ||
57 | 58 | ||
58 | extern void paging_init(void); | 59 | extern void paging_init(void); |
59 | 60 | ||
@@ -78,8 +79,6 @@ extern unsigned long ptr_in_current_pgd; | |||
78 | #define __S110 PAGE_SHARED | 79 | #define __S110 PAGE_SHARED |
79 | #define __S111 PAGE_SHARED | 80 | #define __S111 PAGE_SHARED |
80 | 81 | ||
81 | extern int num_contexts; | ||
82 | |||
83 | /* First physical page can be anywhere, the following is needed so that | 82 | /* First physical page can be anywhere, the following is needed so that |
84 | * va-->pa and vice versa conversions work properly without performance | 83 | * va-->pa and vice versa conversions work properly without performance |
85 | * hit for all __pa()/__va() operations. | 84 | * hit for all __pa()/__va() operations. |
@@ -88,18 +87,11 @@ extern unsigned long phys_base; | |||
88 | extern unsigned long pfn_base; | 87 | extern unsigned long pfn_base; |
89 | 88 | ||
90 | /* | 89 | /* |
91 | * BAD_PAGETABLE is used when we need a bogus page-table, while | ||
92 | * BAD_PAGE is used for a bogus page. | ||
93 | * | ||
94 | * ZERO_PAGE is a global shared page that is always zero: used | 90 | * ZERO_PAGE is a global shared page that is always zero: used |
95 | * for zero-mapped memory areas etc.. | 91 | * for zero-mapped memory areas etc.. |
96 | */ | 92 | */ |
97 | extern pte_t * __bad_pagetable(void); | ||
98 | extern pte_t __bad_page(void); | ||
99 | extern unsigned long empty_zero_page; | 93 | extern unsigned long empty_zero_page; |
100 | 94 | ||
101 | #define BAD_PAGETABLE __bad_pagetable() | ||
102 | #define BAD_PAGE __bad_page() | ||
103 | #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) | 95 | #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) |
104 | 96 | ||
105 | /* | 97 | /* |
@@ -398,36 +390,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff) | |||
398 | */ | 390 | */ |
399 | #define PTE_FILE_MAX_BITS 24 | 391 | #define PTE_FILE_MAX_BITS 24 |
400 | 392 | ||
401 | /* | ||
402 | */ | ||
403 | struct ctx_list { | ||
404 | struct ctx_list *next; | ||
405 | struct ctx_list *prev; | ||
406 | unsigned int ctx_number; | ||
407 | struct mm_struct *ctx_mm; | ||
408 | }; | ||
409 | |||
410 | extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */ | ||
411 | extern struct ctx_list ctx_free; /* Head of free list */ | ||
412 | extern struct ctx_list ctx_used; /* Head of used contexts list */ | ||
413 | |||
414 | #define NO_CONTEXT -1 | ||
415 | |||
416 | static inline void remove_from_ctx_list(struct ctx_list *entry) | ||
417 | { | ||
418 | entry->next->prev = entry->prev; | ||
419 | entry->prev->next = entry->next; | ||
420 | } | ||
421 | |||
422 | static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) | ||
423 | { | ||
424 | entry->next = head; | ||
425 | (entry->prev = head->prev)->next = entry; | ||
426 | head->prev = entry; | ||
427 | } | ||
428 | #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) | ||
429 | #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) | ||
430 | |||
431 | static inline unsigned long | 393 | static inline unsigned long |
432 | __get_phys (unsigned long addr) | 394 | __get_phys (unsigned long addr) |
433 | { | 395 | { |
diff --git a/arch/sparc/include/asm/vaddrs.h b/arch/sparc/include/asm/vaddrs.h index da6535d88a72..c3dbcf902034 100644 --- a/arch/sparc/include/asm/vaddrs.h +++ b/arch/sparc/include/asm/vaddrs.h | |||
@@ -30,6 +30,28 @@ | |||
30 | */ | 30 | */ |
31 | #define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */ | 31 | #define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */ |
32 | 32 | ||
33 | #ifndef __ASSEMBLY__ | ||
34 | #include <asm/kmap_types.h> | ||
35 | |||
36 | enum fixed_addresses { | ||
37 | FIX_HOLE, | ||
38 | #ifdef CONFIG_HIGHMEM | ||
39 | FIX_KMAP_BEGIN, | ||
40 | FIX_KMAP_END = (KM_TYPE_NR * NR_CPUS), | ||
41 | #endif | ||
42 | __end_of_fixed_addresses | ||
43 | }; | ||
44 | #endif | ||
45 | |||
46 | /* Leave one empty page between IO pages at 0xfd000000 and | ||
47 | * the top of the fixmap. | ||
48 | */ | ||
49 | #define FIXADDR_TOP (0xfcfff000UL) | ||
50 | #define FIXADDR_SIZE ((FIX_KMAP_END + 1) << PAGE_SHIFT) | ||
51 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | ||
52 | |||
53 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
54 | |||
33 | #define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */ | 55 | #define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */ |
34 | #define IOBASE_VADDR 0xfe000000 | 56 | #define IOBASE_VADDR 0xfe000000 |
35 | #define IOBASE_END 0xfe600000 | 57 | #define IOBASE_END 0xfe600000 |
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index afeb1d770303..3d92c0a8f6c4 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S | |||
@@ -58,8 +58,6 @@ sun4e_notsup: | |||
58 | /* This was the only reasonable way I could think of to properly align | 58 | /* This was the only reasonable way I could think of to properly align |
59 | * these page-table data structures. | 59 | * these page-table data structures. |
60 | */ | 60 | */ |
61 | .globl swapper_pg_dir | ||
62 | swapper_pg_dir: .skip PAGE_SIZE | ||
63 | .globl empty_zero_page | 61 | .globl empty_zero_page |
64 | empty_zero_page: .skip PAGE_SIZE | 62 | empty_zero_page: .skip PAGE_SIZE |
65 | 63 | ||
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index e34e2c40c060..f8b6eee40bde 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -486,17 +486,6 @@ void __init leon_trans_init(struct device_node *dp) | |||
486 | } | 486 | } |
487 | } | 487 | } |
488 | 488 | ||
489 | void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0; | ||
490 | |||
491 | void __init leon_node_init(struct device_node *dp, struct device_node ***nextp) | ||
492 | { | ||
493 | if (prom_amba_init && | ||
494 | strcmp(dp->type, "ambapp") == 0 && | ||
495 | strcmp(dp->name, "ambapp0") == 0) { | ||
496 | prom_amba_init(dp, nextp); | ||
497 | } | ||
498 | } | ||
499 | |||
500 | #ifdef CONFIG_SMP | 489 | #ifdef CONFIG_SMP |
501 | void leon_clear_profile_irq(int cpu) | 490 | void leon_clear_profile_irq(int cpu) |
502 | { | 491 | { |
@@ -522,8 +511,3 @@ void __init leon_init_IRQ(void) | |||
522 | sparc_config.clear_clock_irq = leon_clear_clock_irq; | 511 | sparc_config.clear_clock_irq = leon_clear_clock_irq; |
523 | sparc_config.load_profile_irq = leon_load_profile_irq; | 512 | sparc_config.load_profile_irq = leon_load_profile_irq; |
524 | } | 513 | } |
525 | |||
526 | void __init leon_init(void) | ||
527 | { | ||
528 | of_pdt_build_more = &leon_node_init; | ||
529 | } | ||
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index cb36e82dcd5d..14006d8aca28 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -333,9 +333,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
333 | put_psr(get_psr() | PSR_EF); | 333 | put_psr(get_psr() | PSR_EF); |
334 | fpsave(&p->thread.float_regs[0], &p->thread.fsr, | 334 | fpsave(&p->thread.float_regs[0], &p->thread.fsr, |
335 | &p->thread.fpqueue[0], &p->thread.fpqdepth); | 335 | &p->thread.fpqueue[0], &p->thread.fpqdepth); |
336 | #ifdef CONFIG_SMP | ||
337 | clear_thread_flag(TIF_USEDFPU); | ||
338 | #endif | ||
339 | } | 336 | } |
340 | 337 | ||
341 | /* | 338 | /* |
@@ -413,6 +410,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
413 | #ifdef CONFIG_SMP | 410 | #ifdef CONFIG_SMP |
414 | /* FPU must be disabled on SMP. */ | 411 | /* FPU must be disabled on SMP. */ |
415 | childregs->psr &= ~PSR_EF; | 412 | childregs->psr &= ~PSR_EF; |
413 | clear_tsk_thread_flag(p, TIF_USEDFPU); | ||
416 | #endif | 414 | #endif |
417 | 415 | ||
418 | /* Set the return value for the child. */ | 416 | /* Set the return value for the child. */ |
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index efe3e64bba38..38bf80a22f02 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
@@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p) | |||
371 | (*(linux_dbvec->teach_debugger))(); | 371 | (*(linux_dbvec->teach_debugger))(); |
372 | } | 372 | } |
373 | 373 | ||
374 | init_mm.context = (unsigned long) NO_CONTEXT; | ||
375 | init_task.thread.kregs = &fake_swapper_regs; | 374 | init_task.thread.kregs = &fake_swapper_regs; |
376 | 375 | ||
377 | /* Run-time patch instructions to match the cpu model */ | 376 | /* Run-time patch instructions to match the cpu model */ |
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 275f74fd6f6a..c38e5aaae56f 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -66,23 +66,6 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len) | |||
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
69 | /* Does start,end straddle the VA-space hole? */ | ||
70 | static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) | ||
71 | { | ||
72 | unsigned long va_exclude_start, va_exclude_end; | ||
73 | |||
74 | va_exclude_start = VA_EXCLUDE_START; | ||
75 | va_exclude_end = VA_EXCLUDE_END; | ||
76 | |||
77 | if (likely(start < va_exclude_start && end < va_exclude_start)) | ||
78 | return 0; | ||
79 | |||
80 | if (likely(start >= va_exclude_end && end >= va_exclude_end)) | ||
81 | return 0; | ||
82 | |||
83 | return 1; | ||
84 | } | ||
85 | |||
86 | /* These functions differ from the default implementations in | 69 | /* These functions differ from the default implementations in |
87 | * mm/mmap.c in two ways: | 70 | * mm/mmap.c in two ways: |
88 | * | 71 | * |
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index 0aed75653b50..03eadf66b0d3 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S | |||
@@ -90,49 +90,49 @@ | |||
90 | faligndata %x7, %x8, %f14; | 90 | faligndata %x7, %x8, %f14; |
91 | 91 | ||
92 | #define FREG_MOVE_1(x0) \ | 92 | #define FREG_MOVE_1(x0) \ |
93 | fmovd %x0, %f0; | 93 | fsrc2 %x0, %f0; |
94 | #define FREG_MOVE_2(x0, x1) \ | 94 | #define FREG_MOVE_2(x0, x1) \ |
95 | fmovd %x0, %f0; \ | 95 | fsrc2 %x0, %f0; \ |
96 | fmovd %x1, %f2; | 96 | fsrc2 %x1, %f2; |
97 | #define FREG_MOVE_3(x0, x1, x2) \ | 97 | #define FREG_MOVE_3(x0, x1, x2) \ |
98 | fmovd %x0, %f0; \ | 98 | fsrc2 %x0, %f0; \ |
99 | fmovd %x1, %f2; \ | 99 | fsrc2 %x1, %f2; \ |
100 | fmovd %x2, %f4; | 100 | fsrc2 %x2, %f4; |
101 | #define FREG_MOVE_4(x0, x1, x2, x3) \ | 101 | #define FREG_MOVE_4(x0, x1, x2, x3) \ |
102 | fmovd %x0, %f0; \ | 102 | fsrc2 %x0, %f0; \ |
103 | fmovd %x1, %f2; \ | 103 | fsrc2 %x1, %f2; \ |
104 | fmovd %x2, %f4; \ | 104 | fsrc2 %x2, %f4; \ |
105 | fmovd %x3, %f6; | 105 | fsrc2 %x3, %f6; |
106 | #define FREG_MOVE_5(x0, x1, x2, x3, x4) \ | 106 | #define FREG_MOVE_5(x0, x1, x2, x3, x4) \ |
107 | fmovd %x0, %f0; \ | 107 | fsrc2 %x0, %f0; \ |
108 | fmovd %x1, %f2; \ | 108 | fsrc2 %x1, %f2; \ |
109 | fmovd %x2, %f4; \ | 109 | fsrc2 %x2, %f4; \ |
110 | fmovd %x3, %f6; \ | 110 | fsrc2 %x3, %f6; \ |
111 | fmovd %x4, %f8; | 111 | fsrc2 %x4, %f8; |
112 | #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ | 112 | #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ |
113 | fmovd %x0, %f0; \ | 113 | fsrc2 %x0, %f0; \ |
114 | fmovd %x1, %f2; \ | 114 | fsrc2 %x1, %f2; \ |
115 | fmovd %x2, %f4; \ | 115 | fsrc2 %x2, %f4; \ |
116 | fmovd %x3, %f6; \ | 116 | fsrc2 %x3, %f6; \ |
117 | fmovd %x4, %f8; \ | 117 | fsrc2 %x4, %f8; \ |
118 | fmovd %x5, %f10; | 118 | fsrc2 %x5, %f10; |
119 | #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ | 119 | #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ |
120 | fmovd %x0, %f0; \ | 120 | fsrc2 %x0, %f0; \ |
121 | fmovd %x1, %f2; \ | 121 | fsrc2 %x1, %f2; \ |
122 | fmovd %x2, %f4; \ | 122 | fsrc2 %x2, %f4; \ |
123 | fmovd %x3, %f6; \ | 123 | fsrc2 %x3, %f6; \ |
124 | fmovd %x4, %f8; \ | 124 | fsrc2 %x4, %f8; \ |
125 | fmovd %x5, %f10; \ | 125 | fsrc2 %x5, %f10; \ |
126 | fmovd %x6, %f12; | 126 | fsrc2 %x6, %f12; |
127 | #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ | 127 | #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ |
128 | fmovd %x0, %f0; \ | 128 | fsrc2 %x0, %f0; \ |
129 | fmovd %x1, %f2; \ | 129 | fsrc2 %x1, %f2; \ |
130 | fmovd %x2, %f4; \ | 130 | fsrc2 %x2, %f4; \ |
131 | fmovd %x3, %f6; \ | 131 | fsrc2 %x3, %f6; \ |
132 | fmovd %x4, %f8; \ | 132 | fsrc2 %x4, %f8; \ |
133 | fmovd %x5, %f10; \ | 133 | fsrc2 %x5, %f10; \ |
134 | fmovd %x6, %f12; \ | 134 | fsrc2 %x6, %f12; \ |
135 | fmovd %x7, %f14; | 135 | fsrc2 %x7, %f14; |
136 | #define FREG_LOAD_1(base, x0) \ | 136 | #define FREG_LOAD_1(base, x0) \ |
137 | EX_LD(LOAD(ldd, base + 0x00, %x0)) | 137 | EX_LD(LOAD(ldd, base + 0x00, %x0)) |
138 | #define FREG_LOAD_2(base, x0, x1) \ | 138 | #define FREG_LOAD_2(base, x0, x1) \ |
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S index bafd2fc07acb..b67142b7768e 100644 --- a/arch/sparc/lib/U1memcpy.S +++ b/arch/sparc/lib/U1memcpy.S | |||
@@ -109,7 +109,7 @@ | |||
109 | #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ | 109 | #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ |
110 | subcc %left, 8, %left; \ | 110 | subcc %left, 8, %left; \ |
111 | bl,pn %xcc, 95f; \ | 111 | bl,pn %xcc, 95f; \ |
112 | fsrc1 %f0, %f1; | 112 | fsrc2 %f0, %f1; |
113 | 113 | ||
114 | #define UNEVEN_VISCHUNK(dest, f0, f1, left) \ | 114 | #define UNEVEN_VISCHUNK(dest, f0, f1, left) \ |
115 | UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ | 115 | UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ |
@@ -201,7 +201,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
201 | andn %o1, (0x40 - 1), %o1 | 201 | andn %o1, (0x40 - 1), %o1 |
202 | and %g2, 7, %g2 | 202 | and %g2, 7, %g2 |
203 | andncc %g3, 0x7, %g3 | 203 | andncc %g3, 0x7, %g3 |
204 | fmovd %f0, %f2 | 204 | fsrc2 %f0, %f2 |
205 | sub %g3, 0x8, %g3 | 205 | sub %g3, 0x8, %g3 |
206 | sub %o2, %GLOBAL_SPARE, %o2 | 206 | sub %o2, %GLOBAL_SPARE, %o2 |
207 | 207 | ||
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S index b243d3b606ba..4d2df328e514 100644 --- a/arch/sparc/lib/copy_page.S +++ b/arch/sparc/lib/copy_page.S | |||
@@ -34,10 +34,10 @@ | |||
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \ | 36 | #define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \ |
37 | fmovd %reg0, %f48; fmovd %reg1, %f50; \ | 37 | fsrc2 %reg0, %f48; fsrc2 %reg1, %f50; \ |
38 | fmovd %reg2, %f52; fmovd %reg3, %f54; \ | 38 | fsrc2 %reg2, %f52; fsrc2 %reg3, %f54; \ |
39 | fmovd %reg4, %f56; fmovd %reg5, %f58; \ | 39 | fsrc2 %reg4, %f56; fsrc2 %reg5, %f58; \ |
40 | fmovd %reg6, %f60; fmovd %reg7, %f62; | 40 | fsrc2 %reg6, %f60; fsrc2 %reg7, %f62; |
41 | 41 | ||
42 | .text | 42 | .text |
43 | 43 | ||
@@ -104,60 +104,60 @@ cheetah_copy_page_insn: | |||
104 | prefetch [%o1 + 0x140], #one_read | 104 | prefetch [%o1 + 0x140], #one_read |
105 | ldd [%o1 + 0x010], %f4 | 105 | ldd [%o1 + 0x010], %f4 |
106 | prefetch [%o1 + 0x180], #one_read | 106 | prefetch [%o1 + 0x180], #one_read |
107 | fmovd %f0, %f16 | 107 | fsrc2 %f0, %f16 |
108 | ldd [%o1 + 0x018], %f6 | 108 | ldd [%o1 + 0x018], %f6 |
109 | fmovd %f2, %f18 | 109 | fsrc2 %f2, %f18 |
110 | ldd [%o1 + 0x020], %f8 | 110 | ldd [%o1 + 0x020], %f8 |
111 | fmovd %f4, %f20 | 111 | fsrc2 %f4, %f20 |
112 | ldd [%o1 + 0x028], %f10 | 112 | ldd [%o1 + 0x028], %f10 |
113 | fmovd %f6, %f22 | 113 | fsrc2 %f6, %f22 |
114 | ldd [%o1 + 0x030], %f12 | 114 | ldd [%o1 + 0x030], %f12 |
115 | fmovd %f8, %f24 | 115 | fsrc2 %f8, %f24 |
116 | ldd [%o1 + 0x038], %f14 | 116 | ldd [%o1 + 0x038], %f14 |
117 | fmovd %f10, %f26 | 117 | fsrc2 %f10, %f26 |
118 | ldd [%o1 + 0x040], %f0 | 118 | ldd [%o1 + 0x040], %f0 |
119 | 1: ldd [%o1 + 0x048], %f2 | 119 | 1: ldd [%o1 + 0x048], %f2 |
120 | fmovd %f12, %f28 | 120 | fsrc2 %f12, %f28 |
121 | ldd [%o1 + 0x050], %f4 | 121 | ldd [%o1 + 0x050], %f4 |
122 | fmovd %f14, %f30 | 122 | fsrc2 %f14, %f30 |
123 | stda %f16, [%o0] ASI_BLK_P | 123 | stda %f16, [%o0] ASI_BLK_P |
124 | ldd [%o1 + 0x058], %f6 | 124 | ldd [%o1 + 0x058], %f6 |
125 | fmovd %f0, %f16 | 125 | fsrc2 %f0, %f16 |
126 | ldd [%o1 + 0x060], %f8 | 126 | ldd [%o1 + 0x060], %f8 |
127 | fmovd %f2, %f18 | 127 | fsrc2 %f2, %f18 |
128 | ldd [%o1 + 0x068], %f10 | 128 | ldd [%o1 + 0x068], %f10 |
129 | fmovd %f4, %f20 | 129 | fsrc2 %f4, %f20 |
130 | ldd [%o1 + 0x070], %f12 | 130 | ldd [%o1 + 0x070], %f12 |
131 | fmovd %f6, %f22 | 131 | fsrc2 %f6, %f22 |
132 | ldd [%o1 + 0x078], %f14 | 132 | ldd [%o1 + 0x078], %f14 |
133 | fmovd %f8, %f24 | 133 | fsrc2 %f8, %f24 |
134 | ldd [%o1 + 0x080], %f0 | 134 | ldd [%o1 + 0x080], %f0 |
135 | prefetch [%o1 + 0x180], #one_read | 135 | prefetch [%o1 + 0x180], #one_read |
136 | fmovd %f10, %f26 | 136 | fsrc2 %f10, %f26 |
137 | subcc %o2, 1, %o2 | 137 | subcc %o2, 1, %o2 |
138 | add %o0, 0x40, %o0 | 138 | add %o0, 0x40, %o0 |
139 | bne,pt %xcc, 1b | 139 | bne,pt %xcc, 1b |
140 | add %o1, 0x40, %o1 | 140 | add %o1, 0x40, %o1 |
141 | 141 | ||
142 | ldd [%o1 + 0x048], %f2 | 142 | ldd [%o1 + 0x048], %f2 |
143 | fmovd %f12, %f28 | 143 | fsrc2 %f12, %f28 |
144 | ldd [%o1 + 0x050], %f4 | 144 | ldd [%o1 + 0x050], %f4 |
145 | fmovd %f14, %f30 | 145 | fsrc2 %f14, %f30 |
146 | stda %f16, [%o0] ASI_BLK_P | 146 | stda %f16, [%o0] ASI_BLK_P |
147 | ldd [%o1 + 0x058], %f6 | 147 | ldd [%o1 + 0x058], %f6 |
148 | fmovd %f0, %f16 | 148 | fsrc2 %f0, %f16 |
149 | ldd [%o1 + 0x060], %f8 | 149 | ldd [%o1 + 0x060], %f8 |
150 | fmovd %f2, %f18 | 150 | fsrc2 %f2, %f18 |
151 | ldd [%o1 + 0x068], %f10 | 151 | ldd [%o1 + 0x068], %f10 |
152 | fmovd %f4, %f20 | 152 | fsrc2 %f4, %f20 |
153 | ldd [%o1 + 0x070], %f12 | 153 | ldd [%o1 + 0x070], %f12 |
154 | fmovd %f6, %f22 | 154 | fsrc2 %f6, %f22 |
155 | add %o0, 0x40, %o0 | 155 | add %o0, 0x40, %o0 |
156 | ldd [%o1 + 0x078], %f14 | 156 | ldd [%o1 + 0x078], %f14 |
157 | fmovd %f8, %f24 | 157 | fsrc2 %f8, %f24 |
158 | fmovd %f10, %f26 | 158 | fsrc2 %f10, %f26 |
159 | fmovd %f12, %f28 | 159 | fsrc2 %f12, %f28 |
160 | fmovd %f14, %f30 | 160 | fsrc2 %f14, %f30 |
161 | stda %f16, [%o0] ASI_BLK_P | 161 | stda %f16, [%o0] ASI_BLK_P |
162 | membar #Sync | 162 | membar #Sync |
163 | VISExitHalf | 163 | VISExitHalf |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index f46cf6be3370..77ac917be152 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -32,24 +32,6 @@ | |||
32 | 32 | ||
33 | int show_unhandled_signals = 1; | 33 | int show_unhandled_signals = 1; |
34 | 34 | ||
35 | /* At boot time we determine these two values necessary for setting | ||
36 | * up the segment maps and page table entries (pte's). | ||
37 | */ | ||
38 | |||
39 | int num_contexts; | ||
40 | |||
41 | /* Return how much physical memory we have. */ | ||
42 | unsigned long probe_memory(void) | ||
43 | { | ||
44 | unsigned long total = 0; | ||
45 | int i; | ||
46 | |||
47 | for (i = 0; sp_banks[i].num_bytes; i++) | ||
48 | total += sp_banks[i].num_bytes; | ||
49 | |||
50 | return total; | ||
51 | } | ||
52 | |||
53 | static void unhandled_fault(unsigned long, struct task_struct *, | 35 | static void unhandled_fault(unsigned long, struct task_struct *, |
54 | struct pt_regs *) __attribute__ ((noreturn)); | 36 | struct pt_regs *) __attribute__ ((noreturn)); |
55 | 37 | ||
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index 055c66cf1bf4..449f864f0cef 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c | |||
@@ -22,13 +22,31 @@ | |||
22 | * shared by CPUs, and so precious, and establishing them requires IPI. | 22 | * shared by CPUs, and so precious, and establishing them requires IPI. |
23 | * Atomic kmaps are lightweight and we may have NCPUS more of them. | 23 | * Atomic kmaps are lightweight and we may have NCPUS more of them. |
24 | */ | 24 | */ |
25 | #include <linux/mm.h> | ||
26 | #include <linux/highmem.h> | 25 | #include <linux/highmem.h> |
27 | #include <linux/export.h> | 26 | #include <linux/export.h> |
28 | #include <asm/pgalloc.h> | 27 | #include <linux/mm.h> |
28 | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include <asm/fixmap.h> | 31 | #include <asm/pgalloc.h> |
32 | #include <asm/vaddrs.h> | ||
33 | |||
34 | pgprot_t kmap_prot; | ||
35 | |||
36 | static pte_t *kmap_pte; | ||
37 | |||
38 | void __init kmap_init(void) | ||
39 | { | ||
40 | unsigned long address; | ||
41 | pmd_t *dir; | ||
42 | |||
43 | address = __fix_to_virt(FIX_KMAP_BEGIN); | ||
44 | dir = pmd_offset(pgd_offset_k(address), address); | ||
45 | |||
46 | /* cache the first kmap pte */ | ||
47 | kmap_pte = pte_offset_kernel(dir, address); | ||
48 | kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); | ||
49 | } | ||
32 | 50 | ||
33 | void *kmap_atomic(struct page *page) | 51 | void *kmap_atomic(struct page *page) |
34 | { | 52 | { |
@@ -110,21 +128,3 @@ void __kunmap_atomic(void *kvaddr) | |||
110 | pagefault_enable(); | 128 | pagefault_enable(); |
111 | } | 129 | } |
112 | EXPORT_SYMBOL(__kunmap_atomic); | 130 | EXPORT_SYMBOL(__kunmap_atomic); |
113 | |||
114 | /* We may be fed a pagetable here by ptep_to_xxx and others. */ | ||
115 | struct page *kmap_atomic_to_page(void *ptr) | ||
116 | { | ||
117 | unsigned long idx, vaddr = (unsigned long)ptr; | ||
118 | pte_t *pte; | ||
119 | |||
120 | if (vaddr < SRMMU_NOCACHE_VADDR) | ||
121 | return virt_to_page(ptr); | ||
122 | if (vaddr < PKMAP_BASE) | ||
123 | return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT); | ||
124 | BUG_ON(vaddr < FIXADDR_START); | ||
125 | BUG_ON(vaddr > FIXADDR_TOP); | ||
126 | |||
127 | idx = virt_to_fix(vaddr); | ||
128 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | ||
129 | return pte_page(*pte); | ||
130 | } | ||
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index ef5c779ec855..dde85ef1c56d 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c | |||
@@ -45,9 +45,6 @@ unsigned long pfn_base; | |||
45 | EXPORT_SYMBOL(pfn_base); | 45 | EXPORT_SYMBOL(pfn_base); |
46 | 46 | ||
47 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; | 47 | struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; |
48 | unsigned long sparc_unmapped_base; | ||
49 | |||
50 | struct pgtable_cache_struct pgt_quicklists; | ||
51 | 48 | ||
52 | /* Initial ramdisk setup */ | 49 | /* Initial ramdisk setup */ |
53 | extern unsigned int sparc_ramdisk_image; | 50 | extern unsigned int sparc_ramdisk_image; |
@@ -55,19 +52,6 @@ extern unsigned int sparc_ramdisk_size; | |||
55 | 52 | ||
56 | unsigned long highstart_pfn, highend_pfn; | 53 | unsigned long highstart_pfn, highend_pfn; |
57 | 54 | ||
58 | pte_t *kmap_pte; | ||
59 | pgprot_t kmap_prot; | ||
60 | |||
61 | #define kmap_get_fixmap_pte(vaddr) \ | ||
62 | pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) | ||
63 | |||
64 | void __init kmap_init(void) | ||
65 | { | ||
66 | /* cache the first kmap pte */ | ||
67 | kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); | ||
68 | kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); | ||
69 | } | ||
70 | |||
71 | void show_mem(unsigned int filter) | 55 | void show_mem(unsigned int filter) |
72 | { | 56 | { |
73 | printk("Mem-info:\n"); | 57 | printk("Mem-info:\n"); |
@@ -76,33 +60,8 @@ void show_mem(unsigned int filter) | |||
76 | nr_swap_pages << (PAGE_SHIFT-10)); | 60 | nr_swap_pages << (PAGE_SHIFT-10)); |
77 | printk("%ld pages of RAM\n", totalram_pages); | 61 | printk("%ld pages of RAM\n", totalram_pages); |
78 | printk("%ld free pages\n", nr_free_pages()); | 62 | printk("%ld free pages\n", nr_free_pages()); |
79 | #if 0 /* undefined pgtable_cache_size, pgd_cache_size */ | ||
80 | printk("%ld pages in page table cache\n",pgtable_cache_size); | ||
81 | #ifndef CONFIG_SMP | ||
82 | if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) | ||
83 | printk("%ld entries in page dir cache\n",pgd_cache_size); | ||
84 | #endif | ||
85 | #endif | ||
86 | } | 63 | } |
87 | 64 | ||
88 | void __init sparc_context_init(int numctx) | ||
89 | { | ||
90 | int ctx; | ||
91 | |||
92 | ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL); | ||
93 | |||
94 | for(ctx = 0; ctx < numctx; ctx++) { | ||
95 | struct ctx_list *clist; | ||
96 | |||
97 | clist = (ctx_list_pool + ctx); | ||
98 | clist->ctx_number = ctx; | ||
99 | clist->ctx_mm = NULL; | ||
100 | } | ||
101 | ctx_free.next = ctx_free.prev = &ctx_free; | ||
102 | ctx_used.next = ctx_used.prev = &ctx_used; | ||
103 | for(ctx = 0; ctx < numctx; ctx++) | ||
104 | add_to_free_ctxlist(ctx_list_pool + ctx); | ||
105 | } | ||
106 | 65 | ||
107 | extern unsigned long cmdline_memory_size; | 66 | extern unsigned long cmdline_memory_size; |
108 | unsigned long last_valid_pfn; | 67 | unsigned long last_valid_pfn; |
@@ -292,22 +251,7 @@ extern void device_scan(void); | |||
292 | 251 | ||
293 | void __init paging_init(void) | 252 | void __init paging_init(void) |
294 | { | 253 | { |
295 | switch(sparc_cpu_model) { | 254 | srmmu_paging_init(); |
296 | case sparc_leon: | ||
297 | leon_init(); | ||
298 | /* fall through */ | ||
299 | case sun4m: | ||
300 | case sun4d: | ||
301 | srmmu_paging_init(); | ||
302 | sparc_unmapped_base = 0x50000000; | ||
303 | break; | ||
304 | default: | ||
305 | prom_printf("paging_init: Cannot init paging on this Sparc\n"); | ||
306 | prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); | ||
307 | prom_printf("paging_init: Halting...\n"); | ||
308 | prom_halt(); | ||
309 | } | ||
310 | |||
311 | prom_build_devicetree(); | 255 | prom_build_devicetree(); |
312 | of_fill_in_cpu_data(); | 256 | of_fill_in_cpu_data(); |
313 | device_scan(); | 257 | device_scan(); |
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 62e3f5773303..c38bb72e3e80 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -8,45 +8,45 @@ | |||
8 | * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) | 8 | * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/seq_file.h> |
12 | #include <linux/mm.h> | ||
13 | #include <linux/vmalloc.h> | ||
14 | #include <linux/pagemap.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
17 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
18 | #include <linux/fs.h> | 14 | #include <linux/pagemap.h> |
19 | #include <linux/seq_file.h> | 15 | #include <linux/vmalloc.h> |
20 | #include <linux/kdebug.h> | 16 | #include <linux/kdebug.h> |
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
21 | #include <linux/log2.h> | 19 | #include <linux/log2.h> |
22 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
21 | #include <linux/fs.h> | ||
22 | #include <linux/mm.h> | ||
23 | 23 | ||
24 | #include <asm/bitext.h> | 24 | #include <asm/mmu_context.h> |
25 | #include <asm/page.h> | 25 | #include <asm/cacheflush.h> |
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/io-unit.h> | ||
26 | #include <asm/pgalloc.h> | 28 | #include <asm/pgalloc.h> |
27 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
28 | #include <asm/io.h> | 30 | #include <asm/bitext.h> |
29 | #include <asm/vaddrs.h> | 31 | #include <asm/vaddrs.h> |
30 | #include <asm/traps.h> | ||
31 | #include <asm/smp.h> | ||
32 | #include <asm/mbus.h> | ||
33 | #include <asm/cache.h> | 32 | #include <asm/cache.h> |
33 | #include <asm/traps.h> | ||
34 | #include <asm/oplib.h> | 34 | #include <asm/oplib.h> |
35 | #include <asm/mbus.h> | ||
36 | #include <asm/page.h> | ||
35 | #include <asm/asi.h> | 37 | #include <asm/asi.h> |
36 | #include <asm/msi.h> | 38 | #include <asm/msi.h> |
37 | #include <asm/mmu_context.h> | 39 | #include <asm/smp.h> |
38 | #include <asm/io-unit.h> | 40 | #include <asm/io.h> |
39 | #include <asm/cacheflush.h> | ||
40 | #include <asm/tlbflush.h> | ||
41 | 41 | ||
42 | /* Now the cpu specific definitions. */ | 42 | /* Now the cpu specific definitions. */ |
43 | #include <asm/viking.h> | 43 | #include <asm/turbosparc.h> |
44 | #include <asm/mxcc.h> | ||
45 | #include <asm/ross.h> | ||
46 | #include <asm/tsunami.h> | 44 | #include <asm/tsunami.h> |
45 | #include <asm/viking.h> | ||
47 | #include <asm/swift.h> | 46 | #include <asm/swift.h> |
48 | #include <asm/turbosparc.h> | ||
49 | #include <asm/leon.h> | 47 | #include <asm/leon.h> |
48 | #include <asm/mxcc.h> | ||
49 | #include <asm/ross.h> | ||
50 | 50 | ||
51 | #include "srmmu.h" | 51 | #include "srmmu.h" |
52 | 52 | ||
@@ -55,10 +55,6 @@ static unsigned int hwbug_bitmask; | |||
55 | int vac_cache_size; | 55 | int vac_cache_size; |
56 | int vac_line_size; | 56 | int vac_line_size; |
57 | 57 | ||
58 | struct ctx_list *ctx_list_pool; | ||
59 | struct ctx_list ctx_free; | ||
60 | struct ctx_list ctx_used; | ||
61 | |||
62 | extern struct resource sparc_iomap; | 58 | extern struct resource sparc_iomap; |
63 | 59 | ||
64 | extern unsigned long last_valid_pfn; | 60 | extern unsigned long last_valid_pfn; |
@@ -136,8 +132,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) | |||
136 | } | 132 | } |
137 | } | 133 | } |
138 | 134 | ||
139 | /* Find an entry in the third-level page table.. */ | 135 | /* Find an entry in the third-level page table.. */ |
140 | pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address) | 136 | pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address) |
141 | { | 137 | { |
142 | void *pte; | 138 | void *pte; |
143 | 139 | ||
@@ -151,55 +147,61 @@ pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address) | |||
151 | * align: bytes, number to align at. | 147 | * align: bytes, number to align at. |
152 | * Returns the virtual address of the allocated area. | 148 | * Returns the virtual address of the allocated area. |
153 | */ | 149 | */ |
154 | static unsigned long __srmmu_get_nocache(int size, int align) | 150 | static void *__srmmu_get_nocache(int size, int align) |
155 | { | 151 | { |
156 | int offset; | 152 | int offset; |
153 | unsigned long addr; | ||
157 | 154 | ||
158 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { | 155 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { |
159 | printk("Size 0x%x too small for nocache request\n", size); | 156 | printk(KERN_ERR "Size 0x%x too small for nocache request\n", |
157 | size); | ||
160 | size = SRMMU_NOCACHE_BITMAP_SHIFT; | 158 | size = SRMMU_NOCACHE_BITMAP_SHIFT; |
161 | } | 159 | } |
162 | if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { | 160 | if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) { |
163 | printk("Size 0x%x unaligned int nocache request\n", size); | 161 | printk(KERN_ERR "Size 0x%x unaligned int nocache request\n", |
164 | size += SRMMU_NOCACHE_BITMAP_SHIFT-1; | 162 | size); |
163 | size += SRMMU_NOCACHE_BITMAP_SHIFT - 1; | ||
165 | } | 164 | } |
166 | BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); | 165 | BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); |
167 | 166 | ||
168 | offset = bit_map_string_get(&srmmu_nocache_map, | 167 | offset = bit_map_string_get(&srmmu_nocache_map, |
169 | size >> SRMMU_NOCACHE_BITMAP_SHIFT, | 168 | size >> SRMMU_NOCACHE_BITMAP_SHIFT, |
170 | align >> SRMMU_NOCACHE_BITMAP_SHIFT); | 169 | align >> SRMMU_NOCACHE_BITMAP_SHIFT); |
171 | if (offset == -1) { | 170 | if (offset == -1) { |
172 | printk("srmmu: out of nocache %d: %d/%d\n", | 171 | printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n", |
173 | size, (int) srmmu_nocache_size, | 172 | size, (int) srmmu_nocache_size, |
174 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); | 173 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); |
175 | return 0; | 174 | return 0; |
176 | } | 175 | } |
177 | 176 | ||
178 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); | 177 | addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT); |
178 | return (void *)addr; | ||
179 | } | 179 | } |
180 | 180 | ||
181 | unsigned long srmmu_get_nocache(int size, int align) | 181 | void *srmmu_get_nocache(int size, int align) |
182 | { | 182 | { |
183 | unsigned long tmp; | 183 | void *tmp; |
184 | 184 | ||
185 | tmp = __srmmu_get_nocache(size, align); | 185 | tmp = __srmmu_get_nocache(size, align); |
186 | 186 | ||
187 | if (tmp) | 187 | if (tmp) |
188 | memset((void *)tmp, 0, size); | 188 | memset(tmp, 0, size); |
189 | 189 | ||
190 | return tmp; | 190 | return tmp; |
191 | } | 191 | } |
192 | 192 | ||
193 | void srmmu_free_nocache(unsigned long vaddr, int size) | 193 | void srmmu_free_nocache(void *addr, int size) |
194 | { | 194 | { |
195 | unsigned long vaddr; | ||
195 | int offset; | 196 | int offset; |
196 | 197 | ||
198 | vaddr = (unsigned long)addr; | ||
197 | if (vaddr < SRMMU_NOCACHE_VADDR) { | 199 | if (vaddr < SRMMU_NOCACHE_VADDR) { |
198 | printk("Vaddr %lx is smaller than nocache base 0x%lx\n", | 200 | printk("Vaddr %lx is smaller than nocache base 0x%lx\n", |
199 | vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); | 201 | vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); |
200 | BUG(); | 202 | BUG(); |
201 | } | 203 | } |
202 | if (vaddr+size > srmmu_nocache_end) { | 204 | if (vaddr + size > srmmu_nocache_end) { |
203 | printk("Vaddr %lx is bigger than nocache end 0x%lx\n", | 205 | printk("Vaddr %lx is bigger than nocache end 0x%lx\n", |
204 | vaddr, srmmu_nocache_end); | 206 | vaddr, srmmu_nocache_end); |
205 | BUG(); | 207 | BUG(); |
@@ -212,7 +214,7 @@ void srmmu_free_nocache(unsigned long vaddr, int size) | |||
212 | printk("Size 0x%x is too small\n", size); | 214 | printk("Size 0x%x is too small\n", size); |
213 | BUG(); | 215 | BUG(); |
214 | } | 216 | } |
215 | if (vaddr & (size-1)) { | 217 | if (vaddr & (size - 1)) { |
216 | printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); | 218 | printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); |
217 | BUG(); | 219 | BUG(); |
218 | } | 220 | } |
@@ -226,13 +228,23 @@ void srmmu_free_nocache(unsigned long vaddr, int size) | |||
226 | static void srmmu_early_allocate_ptable_skeleton(unsigned long start, | 228 | static void srmmu_early_allocate_ptable_skeleton(unsigned long start, |
227 | unsigned long end); | 229 | unsigned long end); |
228 | 230 | ||
229 | extern unsigned long probe_memory(void); /* in fault.c */ | 231 | /* Return how much physical memory we have. */ |
232 | static unsigned long __init probe_memory(void) | ||
233 | { | ||
234 | unsigned long total = 0; | ||
235 | int i; | ||
236 | |||
237 | for (i = 0; sp_banks[i].num_bytes; i++) | ||
238 | total += sp_banks[i].num_bytes; | ||
239 | |||
240 | return total; | ||
241 | } | ||
230 | 242 | ||
231 | /* | 243 | /* |
232 | * Reserve nocache dynamically proportionally to the amount of | 244 | * Reserve nocache dynamically proportionally to the amount of |
233 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 | 245 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 |
234 | */ | 246 | */ |
235 | static void srmmu_nocache_calcsize(void) | 247 | static void __init srmmu_nocache_calcsize(void) |
236 | { | 248 | { |
237 | unsigned long sysmemavail = probe_memory() / 1024; | 249 | unsigned long sysmemavail = probe_memory() / 1024; |
238 | int srmmu_nocache_npages; | 250 | int srmmu_nocache_npages; |
@@ -271,7 +283,7 @@ static void __init srmmu_nocache_init(void) | |||
271 | srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); | 283 | srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); |
272 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); | 284 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); |
273 | 285 | ||
274 | srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | 286 | srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); |
275 | memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); | 287 | memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); |
276 | init_mm.pgd = srmmu_swapper_pg_dir; | 288 | init_mm.pgd = srmmu_swapper_pg_dir; |
277 | 289 | ||
@@ -304,7 +316,7 @@ pgd_t *get_pgd_fast(void) | |||
304 | { | 316 | { |
305 | pgd_t *pgd = NULL; | 317 | pgd_t *pgd = NULL; |
306 | 318 | ||
307 | pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | 319 | pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); |
308 | if (pgd) { | 320 | if (pgd) { |
309 | pgd_t *init = pgd_offset_k(0); | 321 | pgd_t *init = pgd_offset_k(0); |
310 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | 322 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
@@ -330,7 +342,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
330 | 342 | ||
331 | if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) | 343 | if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) |
332 | return NULL; | 344 | return NULL; |
333 | page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); | 345 | page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); |
334 | pgtable_page_ctor(page); | 346 | pgtable_page_ctor(page); |
335 | return page; | 347 | return page; |
336 | } | 348 | } |
@@ -344,18 +356,50 @@ void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
344 | if (p == 0) | 356 | if (p == 0) |
345 | BUG(); | 357 | BUG(); |
346 | p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ | 358 | p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ |
347 | p = (unsigned long) __nocache_va(p); /* Nocached virtual */ | 359 | |
348 | srmmu_free_nocache(p, PTE_SIZE); | 360 | /* free non cached virtual address*/ |
361 | srmmu_free_nocache(__nocache_va(p), PTE_SIZE); | ||
349 | } | 362 | } |
350 | 363 | ||
351 | /* | 364 | /* context handling - a dynamically sized pool is used */ |
352 | */ | 365 | #define NO_CONTEXT -1 |
366 | |||
367 | struct ctx_list { | ||
368 | struct ctx_list *next; | ||
369 | struct ctx_list *prev; | ||
370 | unsigned int ctx_number; | ||
371 | struct mm_struct *ctx_mm; | ||
372 | }; | ||
373 | |||
374 | static struct ctx_list *ctx_list_pool; | ||
375 | static struct ctx_list ctx_free; | ||
376 | static struct ctx_list ctx_used; | ||
377 | |||
378 | /* At boot time we determine the number of contexts */ | ||
379 | static int num_contexts; | ||
380 | |||
381 | static inline void remove_from_ctx_list(struct ctx_list *entry) | ||
382 | { | ||
383 | entry->next->prev = entry->prev; | ||
384 | entry->prev->next = entry->next; | ||
385 | } | ||
386 | |||
387 | static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) | ||
388 | { | ||
389 | entry->next = head; | ||
390 | (entry->prev = head->prev)->next = entry; | ||
391 | head->prev = entry; | ||
392 | } | ||
393 | #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) | ||
394 | #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) | ||
395 | |||
396 | |||
353 | static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) | 397 | static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) |
354 | { | 398 | { |
355 | struct ctx_list *ctxp; | 399 | struct ctx_list *ctxp; |
356 | 400 | ||
357 | ctxp = ctx_free.next; | 401 | ctxp = ctx_free.next; |
358 | if(ctxp != &ctx_free) { | 402 | if (ctxp != &ctx_free) { |
359 | remove_from_ctx_list(ctxp); | 403 | remove_from_ctx_list(ctxp); |
360 | add_to_used_ctxlist(ctxp); | 404 | add_to_used_ctxlist(ctxp); |
361 | mm->context = ctxp->ctx_number; | 405 | mm->context = ctxp->ctx_number; |
@@ -363,9 +407,9 @@ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) | |||
363 | return; | 407 | return; |
364 | } | 408 | } |
365 | ctxp = ctx_used.next; | 409 | ctxp = ctx_used.next; |
366 | if(ctxp->ctx_mm == old_mm) | 410 | if (ctxp->ctx_mm == old_mm) |
367 | ctxp = ctxp->next; | 411 | ctxp = ctxp->next; |
368 | if(ctxp == &ctx_used) | 412 | if (ctxp == &ctx_used) |
369 | panic("out of mmu contexts"); | 413 | panic("out of mmu contexts"); |
370 | flush_cache_mm(ctxp->ctx_mm); | 414 | flush_cache_mm(ctxp->ctx_mm); |
371 | flush_tlb_mm(ctxp->ctx_mm); | 415 | flush_tlb_mm(ctxp->ctx_mm); |
@@ -385,11 +429,31 @@ static inline void free_context(int context) | |||
385 | add_to_free_ctxlist(ctx_old); | 429 | add_to_free_ctxlist(ctx_old); |
386 | } | 430 | } |
387 | 431 | ||
432 | static void __init sparc_context_init(int numctx) | ||
433 | { | ||
434 | int ctx; | ||
435 | unsigned long size; | ||
436 | |||
437 | size = numctx * sizeof(struct ctx_list); | ||
438 | ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); | ||
439 | |||
440 | for (ctx = 0; ctx < numctx; ctx++) { | ||
441 | struct ctx_list *clist; | ||
442 | |||
443 | clist = (ctx_list_pool + ctx); | ||
444 | clist->ctx_number = ctx; | ||
445 | clist->ctx_mm = NULL; | ||
446 | } | ||
447 | ctx_free.next = ctx_free.prev = &ctx_free; | ||
448 | ctx_used.next = ctx_used.prev = &ctx_used; | ||
449 | for (ctx = 0; ctx < numctx; ctx++) | ||
450 | add_to_free_ctxlist(ctx_list_pool + ctx); | ||
451 | } | ||
388 | 452 | ||
389 | void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, | 453 | void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, |
390 | struct task_struct *tsk) | 454 | struct task_struct *tsk) |
391 | { | 455 | { |
392 | if(mm->context == NO_CONTEXT) { | 456 | if (mm->context == NO_CONTEXT) { |
393 | spin_lock(&srmmu_context_spinlock); | 457 | spin_lock(&srmmu_context_spinlock); |
394 | alloc_context(old_mm, mm); | 458 | alloc_context(old_mm, mm); |
395 | spin_unlock(&srmmu_context_spinlock); | 459 | spin_unlock(&srmmu_context_spinlock); |
@@ -407,7 +471,7 @@ void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, | |||
407 | 471 | ||
408 | /* Low level IO area allocation on the SRMMU. */ | 472 | /* Low level IO area allocation on the SRMMU. */ |
409 | static inline void srmmu_mapioaddr(unsigned long physaddr, | 473 | static inline void srmmu_mapioaddr(unsigned long physaddr, |
410 | unsigned long virt_addr, int bus_type) | 474 | unsigned long virt_addr, int bus_type) |
411 | { | 475 | { |
412 | pgd_t *pgdp; | 476 | pgd_t *pgdp; |
413 | pmd_t *pmdp; | 477 | pmd_t *pmdp; |
@@ -420,8 +484,7 @@ static inline void srmmu_mapioaddr(unsigned long physaddr, | |||
420 | ptep = pte_offset_kernel(pmdp, virt_addr); | 484 | ptep = pte_offset_kernel(pmdp, virt_addr); |
421 | tmp = (physaddr >> 4) | SRMMU_ET_PTE; | 485 | tmp = (physaddr >> 4) | SRMMU_ET_PTE; |
422 | 486 | ||
423 | /* | 487 | /* I need to test whether this is consistent over all |
424 | * I need to test whether this is consistent over all | ||
425 | * sun4m's. The bus_type represents the upper 4 bits of | 488 | * sun4m's. The bus_type represents the upper 4 bits of |
426 | * 36-bit physical address on the I/O space lines... | 489 | * 36-bit physical address on the I/O space lines... |
427 | */ | 490 | */ |
@@ -591,10 +654,10 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, | |||
591 | pmd_t *pmdp; | 654 | pmd_t *pmdp; |
592 | pte_t *ptep; | 655 | pte_t *ptep; |
593 | 656 | ||
594 | while(start < end) { | 657 | while (start < end) { |
595 | pgdp = pgd_offset_k(start); | 658 | pgdp = pgd_offset_k(start); |
596 | if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { | 659 | if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { |
597 | pmdp = (pmd_t *) __srmmu_get_nocache( | 660 | pmdp = __srmmu_get_nocache( |
598 | SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 661 | SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
599 | if (pmdp == NULL) | 662 | if (pmdp == NULL) |
600 | early_pgtable_allocfail("pmd"); | 663 | early_pgtable_allocfail("pmd"); |
@@ -602,8 +665,8 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, | |||
602 | pgd_set(__nocache_fix(pgdp), pmdp); | 665 | pgd_set(__nocache_fix(pgdp), pmdp); |
603 | } | 666 | } |
604 | pmdp = pmd_offset(__nocache_fix(pgdp), start); | 667 | pmdp = pmd_offset(__nocache_fix(pgdp), start); |
605 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 668 | if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
606 | ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); | 669 | ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); |
607 | if (ptep == NULL) | 670 | if (ptep == NULL) |
608 | early_pgtable_allocfail("pte"); | 671 | early_pgtable_allocfail("pte"); |
609 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | 672 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
@@ -622,18 +685,18 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start, | |||
622 | pmd_t *pmdp; | 685 | pmd_t *pmdp; |
623 | pte_t *ptep; | 686 | pte_t *ptep; |
624 | 687 | ||
625 | while(start < end) { | 688 | while (start < end) { |
626 | pgdp = pgd_offset_k(start); | 689 | pgdp = pgd_offset_k(start); |
627 | if (pgd_none(*pgdp)) { | 690 | if (pgd_none(*pgdp)) { |
628 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 691 | pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
629 | if (pmdp == NULL) | 692 | if (pmdp == NULL) |
630 | early_pgtable_allocfail("pmd"); | 693 | early_pgtable_allocfail("pmd"); |
631 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); | 694 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); |
632 | pgd_set(pgdp, pmdp); | 695 | pgd_set(pgdp, pmdp); |
633 | } | 696 | } |
634 | pmdp = pmd_offset(pgdp, start); | 697 | pmdp = pmd_offset(pgdp, start); |
635 | if(srmmu_pmd_none(*pmdp)) { | 698 | if (srmmu_pmd_none(*pmdp)) { |
636 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, | 699 | ptep = __srmmu_get_nocache(PTE_SIZE, |
637 | PTE_SIZE); | 700 | PTE_SIZE); |
638 | if (ptep == NULL) | 701 | if (ptep == NULL) |
639 | early_pgtable_allocfail("pte"); | 702 | early_pgtable_allocfail("pte"); |
@@ -671,72 +734,76 @@ static inline unsigned long srmmu_probe(unsigned long vaddr) | |||
671 | static void __init srmmu_inherit_prom_mappings(unsigned long start, | 734 | static void __init srmmu_inherit_prom_mappings(unsigned long start, |
672 | unsigned long end) | 735 | unsigned long end) |
673 | { | 736 | { |
737 | unsigned long probed; | ||
738 | unsigned long addr; | ||
674 | pgd_t *pgdp; | 739 | pgd_t *pgdp; |
675 | pmd_t *pmdp; | 740 | pmd_t *pmdp; |
676 | pte_t *ptep; | 741 | pte_t *ptep; |
677 | int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ | 742 | int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ |
678 | unsigned long prompte; | ||
679 | 743 | ||
680 | while(start <= end) { | 744 | while (start <= end) { |
681 | if (start == 0) | 745 | if (start == 0) |
682 | break; /* probably wrap around */ | 746 | break; /* probably wrap around */ |
683 | if(start == 0xfef00000) | 747 | if (start == 0xfef00000) |
684 | start = KADB_DEBUGGER_BEGVM; | 748 | start = KADB_DEBUGGER_BEGVM; |
685 | if(!(prompte = srmmu_probe(start))) { | 749 | probed = srmmu_probe(start); |
750 | if (!probed) { | ||
751 | /* continue probing until we find an entry */ | ||
686 | start += PAGE_SIZE; | 752 | start += PAGE_SIZE; |
687 | continue; | 753 | continue; |
688 | } | 754 | } |
689 | 755 | ||
690 | /* A red snapper, see what it really is. */ | 756 | /* A red snapper, see what it really is. */ |
691 | what = 0; | 757 | what = 0; |
692 | 758 | addr = start - PAGE_SIZE; | |
693 | if(!(start & ~(SRMMU_REAL_PMD_MASK))) { | 759 | |
694 | if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) | 760 | if (!(start & ~(SRMMU_REAL_PMD_MASK))) { |
761 | if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed) | ||
695 | what = 1; | 762 | what = 1; |
696 | } | 763 | } |
697 | 764 | ||
698 | if(!(start & ~(SRMMU_PGDIR_MASK))) { | 765 | if (!(start & ~(SRMMU_PGDIR_MASK))) { |
699 | if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == | 766 | if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed) |
700 | prompte) | ||
701 | what = 2; | 767 | what = 2; |
702 | } | 768 | } |
703 | 769 | ||
704 | pgdp = pgd_offset_k(start); | 770 | pgdp = pgd_offset_k(start); |
705 | if(what == 2) { | 771 | if (what == 2) { |
706 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); | 772 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed); |
707 | start += SRMMU_PGDIR_SIZE; | 773 | start += SRMMU_PGDIR_SIZE; |
708 | continue; | 774 | continue; |
709 | } | 775 | } |
710 | if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { | 776 | if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { |
711 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); | 777 | pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, |
778 | SRMMU_PMD_TABLE_SIZE); | ||
712 | if (pmdp == NULL) | 779 | if (pmdp == NULL) |
713 | early_pgtable_allocfail("pmd"); | 780 | early_pgtable_allocfail("pmd"); |
714 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); | 781 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); |
715 | pgd_set(__nocache_fix(pgdp), pmdp); | 782 | pgd_set(__nocache_fix(pgdp), pmdp); |
716 | } | 783 | } |
717 | pmdp = pmd_offset(__nocache_fix(pgdp), start); | 784 | pmdp = pmd_offset(__nocache_fix(pgdp), start); |
718 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { | 785 | if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
719 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, | 786 | ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); |
720 | PTE_SIZE); | ||
721 | if (ptep == NULL) | 787 | if (ptep == NULL) |
722 | early_pgtable_allocfail("pte"); | 788 | early_pgtable_allocfail("pte"); |
723 | memset(__nocache_fix(ptep), 0, PTE_SIZE); | 789 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
724 | pmd_set(__nocache_fix(pmdp), ptep); | 790 | pmd_set(__nocache_fix(pmdp), ptep); |
725 | } | 791 | } |
726 | if(what == 1) { | 792 | if (what == 1) { |
727 | /* | 793 | /* We bend the rule where all 16 PTPs in a pmd_t point |
728 | * We bend the rule where all 16 PTPs in a pmd_t point | ||
729 | * inside the same PTE page, and we leak a perfectly | 794 | * inside the same PTE page, and we leak a perfectly |
730 | * good hardware PTE piece. Alternatives seem worse. | 795 | * good hardware PTE piece. Alternatives seem worse. |
731 | */ | 796 | */ |
732 | unsigned int x; /* Index of HW PMD in soft cluster */ | 797 | unsigned int x; /* Index of HW PMD in soft cluster */ |
798 | unsigned long *val; | ||
733 | x = (start >> PMD_SHIFT) & 15; | 799 | x = (start >> PMD_SHIFT) & 15; |
734 | *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; | 800 | val = &pmdp->pmdv[x]; |
801 | *(unsigned long *)__nocache_fix(val) = probed; | ||
735 | start += SRMMU_REAL_PMD_SIZE; | 802 | start += SRMMU_REAL_PMD_SIZE; |
736 | continue; | 803 | continue; |
737 | } | 804 | } |
738 | ptep = pte_offset_kernel(__nocache_fix(pmdp), start); | 805 | ptep = pte_offset_kernel(__nocache_fix(pmdp), start); |
739 | *(pte_t *)__nocache_fix(ptep) = __pte(prompte); | 806 | *(pte_t *)__nocache_fix(ptep) = __pte(probed); |
740 | start += PAGE_SIZE; | 807 | start += PAGE_SIZE; |
741 | } | 808 | } |
742 | } | 809 | } |
@@ -765,18 +832,18 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) | |||
765 | 832 | ||
766 | if (vstart < min_vaddr || vstart >= max_vaddr) | 833 | if (vstart < min_vaddr || vstart >= max_vaddr) |
767 | return vstart; | 834 | return vstart; |
768 | 835 | ||
769 | if (vend > max_vaddr || vend < min_vaddr) | 836 | if (vend > max_vaddr || vend < min_vaddr) |
770 | vend = max_vaddr; | 837 | vend = max_vaddr; |
771 | 838 | ||
772 | while(vstart < vend) { | 839 | while (vstart < vend) { |
773 | do_large_mapping(vstart, pstart); | 840 | do_large_mapping(vstart, pstart); |
774 | vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; | 841 | vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; |
775 | } | 842 | } |
776 | return vstart; | 843 | return vstart; |
777 | } | 844 | } |
778 | 845 | ||
779 | static inline void map_kernel(void) | 846 | static void __init map_kernel(void) |
780 | { | 847 | { |
781 | int i; | 848 | int i; |
782 | 849 | ||
@@ -789,9 +856,6 @@ static inline void map_kernel(void) | |||
789 | } | 856 | } |
790 | } | 857 | } |
791 | 858 | ||
792 | /* Paging initialization on the Sparc Reference MMU. */ | ||
793 | extern void sparc_context_init(int); | ||
794 | |||
795 | void (*poke_srmmu)(void) __cpuinitdata = NULL; | 859 | void (*poke_srmmu)(void) __cpuinitdata = NULL; |
796 | 860 | ||
797 | extern unsigned long bootmem_init(unsigned long *pages_avail); | 861 | extern unsigned long bootmem_init(unsigned long *pages_avail); |
@@ -806,6 +870,7 @@ void __init srmmu_paging_init(void) | |||
806 | pte_t *pte; | 870 | pte_t *pte; |
807 | unsigned long pages_avail; | 871 | unsigned long pages_avail; |
808 | 872 | ||
873 | init_mm.context = (unsigned long) NO_CONTEXT; | ||
809 | sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ | 874 | sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ |
810 | 875 | ||
811 | if (sparc_cpu_model == sun4d) | 876 | if (sparc_cpu_model == sun4d) |
@@ -814,9 +879,9 @@ void __init srmmu_paging_init(void) | |||
814 | /* Find the number of contexts on the srmmu. */ | 879 | /* Find the number of contexts on the srmmu. */ |
815 | cpunode = prom_getchild(prom_root_node); | 880 | cpunode = prom_getchild(prom_root_node); |
816 | num_contexts = 0; | 881 | num_contexts = 0; |
817 | while(cpunode != 0) { | 882 | while (cpunode != 0) { |
818 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); | 883 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); |
819 | if(!strcmp(node_str, "cpu")) { | 884 | if (!strcmp(node_str, "cpu")) { |
820 | num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); | 885 | num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); |
821 | break; | 886 | break; |
822 | } | 887 | } |
@@ -824,7 +889,7 @@ void __init srmmu_paging_init(void) | |||
824 | } | 889 | } |
825 | } | 890 | } |
826 | 891 | ||
827 | if(!num_contexts) { | 892 | if (!num_contexts) { |
828 | prom_printf("Something wrong, can't find cpu node in paging_init.\n"); | 893 | prom_printf("Something wrong, can't find cpu node in paging_init.\n"); |
829 | prom_halt(); | 894 | prom_halt(); |
830 | } | 895 | } |
@@ -834,14 +899,14 @@ void __init srmmu_paging_init(void) | |||
834 | 899 | ||
835 | srmmu_nocache_calcsize(); | 900 | srmmu_nocache_calcsize(); |
836 | srmmu_nocache_init(); | 901 | srmmu_nocache_init(); |
837 | srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); | 902 | srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE)); |
838 | map_kernel(); | 903 | map_kernel(); |
839 | 904 | ||
840 | /* ctx table has to be physically aligned to its size */ | 905 | /* ctx table has to be physically aligned to its size */ |
841 | srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); | 906 | srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t)); |
842 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); | 907 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); |
843 | 908 | ||
844 | for(i = 0; i < num_contexts; i++) | 909 | for (i = 0; i < num_contexts; i++) |
845 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); | 910 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); |
846 | 911 | ||
847 | flush_cache_all(); | 912 | flush_cache_all(); |
@@ -897,7 +962,7 @@ void __init srmmu_paging_init(void) | |||
897 | 962 | ||
898 | void mmu_info(struct seq_file *m) | 963 | void mmu_info(struct seq_file *m) |
899 | { | 964 | { |
900 | seq_printf(m, | 965 | seq_printf(m, |
901 | "MMU type\t: %s\n" | 966 | "MMU type\t: %s\n" |
902 | "contexts\t: %d\n" | 967 | "contexts\t: %d\n" |
903 | "nocache total\t: %ld\n" | 968 | "nocache total\t: %ld\n" |
@@ -908,10 +973,16 @@ void mmu_info(struct seq_file *m) | |||
908 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); | 973 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); |
909 | } | 974 | } |
910 | 975 | ||
976 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
977 | { | ||
978 | mm->context = NO_CONTEXT; | ||
979 | return 0; | ||
980 | } | ||
981 | |||
911 | void destroy_context(struct mm_struct *mm) | 982 | void destroy_context(struct mm_struct *mm) |
912 | { | 983 | { |
913 | 984 | ||
914 | if(mm->context != NO_CONTEXT) { | 985 | if (mm->context != NO_CONTEXT) { |
915 | flush_cache_mm(mm); | 986 | flush_cache_mm(mm); |
916 | srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); | 987 | srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); |
917 | flush_tlb_mm(mm); | 988 | flush_tlb_mm(mm); |
@@ -941,13 +1012,12 @@ static void __init init_vac_layout(void) | |||
941 | #endif | 1012 | #endif |
942 | 1013 | ||
943 | nd = prom_getchild(prom_root_node); | 1014 | nd = prom_getchild(prom_root_node); |
944 | while((nd = prom_getsibling(nd)) != 0) { | 1015 | while ((nd = prom_getsibling(nd)) != 0) { |
945 | prom_getstring(nd, "device_type", node_str, sizeof(node_str)); | 1016 | prom_getstring(nd, "device_type", node_str, sizeof(node_str)); |
946 | if(!strcmp(node_str, "cpu")) { | 1017 | if (!strcmp(node_str, "cpu")) { |
947 | vac_line_size = prom_getint(nd, "cache-line-size"); | 1018 | vac_line_size = prom_getint(nd, "cache-line-size"); |
948 | if (vac_line_size == -1) { | 1019 | if (vac_line_size == -1) { |
949 | prom_printf("can't determine cache-line-size, " | 1020 | prom_printf("can't determine cache-line-size, halting.\n"); |
950 | "halting.\n"); | ||
951 | prom_halt(); | 1021 | prom_halt(); |
952 | } | 1022 | } |
953 | cache_lines = prom_getint(nd, "cache-nlines"); | 1023 | cache_lines = prom_getint(nd, "cache-nlines"); |
@@ -958,9 +1028,9 @@ static void __init init_vac_layout(void) | |||
958 | 1028 | ||
959 | vac_cache_size = cache_lines * vac_line_size; | 1029 | vac_cache_size = cache_lines * vac_line_size; |
960 | #ifdef CONFIG_SMP | 1030 | #ifdef CONFIG_SMP |
961 | if(vac_cache_size > max_size) | 1031 | if (vac_cache_size > max_size) |
962 | max_size = vac_cache_size; | 1032 | max_size = vac_cache_size; |
963 | if(vac_line_size < min_line_size) | 1033 | if (vac_line_size < min_line_size) |
964 | min_line_size = vac_line_size; | 1034 | min_line_size = vac_line_size; |
965 | //FIXME: cpus not contiguous!! | 1035 | //FIXME: cpus not contiguous!! |
966 | cpu++; | 1036 | cpu++; |
@@ -971,7 +1041,7 @@ static void __init init_vac_layout(void) | |||
971 | #endif | 1041 | #endif |
972 | } | 1042 | } |
973 | } | 1043 | } |
974 | if(nd == 0) { | 1044 | if (nd == 0) { |
975 | prom_printf("No CPU nodes found, halting.\n"); | 1045 | prom_printf("No CPU nodes found, halting.\n"); |
976 | prom_halt(); | 1046 | prom_halt(); |
977 | } | 1047 | } |
@@ -1082,7 +1152,7 @@ static void __init init_swift(void) | |||
1082 | "=r" (swift_rev) : | 1152 | "=r" (swift_rev) : |
1083 | "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); | 1153 | "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); |
1084 | srmmu_name = "Fujitsu Swift"; | 1154 | srmmu_name = "Fujitsu Swift"; |
1085 | switch(swift_rev) { | 1155 | switch (swift_rev) { |
1086 | case 0x11: | 1156 | case 0x11: |
1087 | case 0x20: | 1157 | case 0x20: |
1088 | case 0x23: | 1158 | case 0x23: |
@@ -1222,10 +1292,11 @@ static void __cpuinit poke_turbosparc(void) | |||
1222 | 1292 | ||
1223 | /* Clear any crap from the cache or else... */ | 1293 | /* Clear any crap from the cache or else... */ |
1224 | turbosparc_flush_cache_all(); | 1294 | turbosparc_flush_cache_all(); |
1225 | mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ | 1295 | /* Temporarily disable I & D caches */ |
1296 | mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); | ||
1226 | mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ | 1297 | mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ |
1227 | srmmu_set_mmureg(mreg); | 1298 | srmmu_set_mmureg(mreg); |
1228 | 1299 | ||
1229 | ccreg = turbosparc_get_ccreg(); | 1300 | ccreg = turbosparc_get_ccreg(); |
1230 | 1301 | ||
1231 | #ifdef TURBOSPARC_WRITEBACK | 1302 | #ifdef TURBOSPARC_WRITEBACK |
@@ -1248,7 +1319,7 @@ static void __cpuinit poke_turbosparc(void) | |||
1248 | default: | 1319 | default: |
1249 | ccreg |= (TURBOSPARC_SCENABLE); | 1320 | ccreg |= (TURBOSPARC_SCENABLE); |
1250 | } | 1321 | } |
1251 | turbosparc_set_ccreg (ccreg); | 1322 | turbosparc_set_ccreg(ccreg); |
1252 | 1323 | ||
1253 | mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ | 1324 | mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ |
1254 | mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ | 1325 | mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ |
@@ -1342,7 +1413,7 @@ static void __cpuinit poke_viking(void) | |||
1342 | unsigned long bpreg; | 1413 | unsigned long bpreg; |
1343 | 1414 | ||
1344 | mreg &= ~(VIKING_TCENABLE); | 1415 | mreg &= ~(VIKING_TCENABLE); |
1345 | if(smp_catch++) { | 1416 | if (smp_catch++) { |
1346 | /* Must disable mixed-cmd mode here for other cpu's. */ | 1417 | /* Must disable mixed-cmd mode here for other cpu's. */ |
1347 | bpreg = viking_get_bpreg(); | 1418 | bpreg = viking_get_bpreg(); |
1348 | bpreg &= ~(VIKING_ACTION_MIX); | 1419 | bpreg &= ~(VIKING_ACTION_MIX); |
@@ -1411,7 +1482,7 @@ static void __init init_viking(void) | |||
1411 | unsigned long mreg = srmmu_get_mmureg(); | 1482 | unsigned long mreg = srmmu_get_mmureg(); |
1412 | 1483 | ||
1413 | /* Ahhh, the viking. SRMMU VLSI abortion number two... */ | 1484 | /* Ahhh, the viking. SRMMU VLSI abortion number two... */ |
1414 | if(mreg & VIKING_MMODE) { | 1485 | if (mreg & VIKING_MMODE) { |
1415 | srmmu_name = "TI Viking"; | 1486 | srmmu_name = "TI Viking"; |
1416 | viking_mxcc_present = 0; | 1487 | viking_mxcc_present = 0; |
1417 | msi_set_sync(); | 1488 | msi_set_sync(); |
@@ -1467,8 +1538,8 @@ static void __init get_srmmu_type(void) | |||
1467 | } | 1538 | } |
1468 | 1539 | ||
1469 | /* Second, check for HyperSparc or Cypress. */ | 1540 | /* Second, check for HyperSparc or Cypress. */ |
1470 | if(mod_typ == 1) { | 1541 | if (mod_typ == 1) { |
1471 | switch(mod_rev) { | 1542 | switch (mod_rev) { |
1472 | case 7: | 1543 | case 7: |
1473 | /* UP or MP Hypersparc */ | 1544 | /* UP or MP Hypersparc */ |
1474 | init_hypersparc(); | 1545 | init_hypersparc(); |
@@ -1488,9 +1559,8 @@ static void __init get_srmmu_type(void) | |||
1488 | } | 1559 | } |
1489 | return; | 1560 | return; |
1490 | } | 1561 | } |
1491 | 1562 | ||
1492 | /* | 1563 | /* Now Fujitsu TurboSparc. It might happen that it is |
1493 | * Now Fujitsu TurboSparc. It might happen that it is | ||
1494 | * in Swift emulation mode, so we will check later... | 1564 | * in Swift emulation mode, so we will check later... |
1495 | */ | 1565 | */ |
1496 | if (psr_typ == 0 && psr_vers == 5) { | 1566 | if (psr_typ == 0 && psr_vers == 5) { |
@@ -1499,15 +1569,15 @@ static void __init get_srmmu_type(void) | |||
1499 | } | 1569 | } |
1500 | 1570 | ||
1501 | /* Next check for Fujitsu Swift. */ | 1571 | /* Next check for Fujitsu Swift. */ |
1502 | if(psr_typ == 0 && psr_vers == 4) { | 1572 | if (psr_typ == 0 && psr_vers == 4) { |
1503 | phandle cpunode; | 1573 | phandle cpunode; |
1504 | char node_str[128]; | 1574 | char node_str[128]; |
1505 | 1575 | ||
1506 | /* Look if it is not a TurboSparc emulating Swift... */ | 1576 | /* Look if it is not a TurboSparc emulating Swift... */ |
1507 | cpunode = prom_getchild(prom_root_node); | 1577 | cpunode = prom_getchild(prom_root_node); |
1508 | while((cpunode = prom_getsibling(cpunode)) != 0) { | 1578 | while ((cpunode = prom_getsibling(cpunode)) != 0) { |
1509 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); | 1579 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); |
1510 | if(!strcmp(node_str, "cpu")) { | 1580 | if (!strcmp(node_str, "cpu")) { |
1511 | if (!prom_getintdefault(cpunode, "psr-implementation", 1) && | 1581 | if (!prom_getintdefault(cpunode, "psr-implementation", 1) && |
1512 | prom_getintdefault(cpunode, "psr-version", 1) == 5) { | 1582 | prom_getintdefault(cpunode, "psr-version", 1) == 5) { |
1513 | init_turbosparc(); | 1583 | init_turbosparc(); |
@@ -1516,13 +1586,13 @@ static void __init get_srmmu_type(void) | |||
1516 | break; | 1586 | break; |
1517 | } | 1587 | } |
1518 | } | 1588 | } |
1519 | 1589 | ||
1520 | init_swift(); | 1590 | init_swift(); |
1521 | return; | 1591 | return; |
1522 | } | 1592 | } |
1523 | 1593 | ||
1524 | /* Now the Viking family of srmmu. */ | 1594 | /* Now the Viking family of srmmu. */ |
1525 | if(psr_typ == 4 && | 1595 | if (psr_typ == 4 && |
1526 | ((psr_vers == 0) || | 1596 | ((psr_vers == 0) || |
1527 | ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { | 1597 | ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { |
1528 | init_viking(); | 1598 | init_viking(); |
@@ -1530,7 +1600,7 @@ static void __init get_srmmu_type(void) | |||
1530 | } | 1600 | } |
1531 | 1601 | ||
1532 | /* Finally the Tsunami. */ | 1602 | /* Finally the Tsunami. */ |
1533 | if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { | 1603 | if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { |
1534 | init_tsunami(); | 1604 | init_tsunami(); |
1535 | return; | 1605 | return; |
1536 | } | 1606 | } |
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c index 26c64cea3c9c..9ac30c2b7dba 100644 --- a/arch/sparc/prom/init_32.c +++ b/arch/sparc/prom/init_32.c | |||
@@ -27,13 +27,10 @@ EXPORT_SYMBOL(prom_root_node); | |||
27 | struct linux_nodeops *prom_nodeops; | 27 | struct linux_nodeops *prom_nodeops; |
28 | 28 | ||
29 | /* You must call prom_init() before you attempt to use any of the | 29 | /* You must call prom_init() before you attempt to use any of the |
30 | * routines in the prom library. It returns 0 on success, 1 on | 30 | * routines in the prom library. |
31 | * failure. It gets passed the pointer to the PROM vector. | 31 | * It gets passed the pointer to the PROM vector. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | extern void prom_meminit(void); | ||
35 | extern void prom_ranges_init(void); | ||
36 | |||
37 | void __init prom_init(struct linux_romvec *rp) | 34 | void __init prom_init(struct linux_romvec *rp) |
38 | { | 35 | { |
39 | romvec = rp; | 36 | romvec = rp; |
diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c index 5016c5e20575..d95db755828f 100644 --- a/arch/sparc/prom/init_64.c +++ b/arch/sparc/prom/init_64.c | |||
@@ -22,8 +22,8 @@ int prom_stdout; | |||
22 | phandle prom_chosen_node; | 22 | phandle prom_chosen_node; |
23 | 23 | ||
24 | /* You must call prom_init() before you attempt to use any of the | 24 | /* You must call prom_init() before you attempt to use any of the |
25 | * routines in the prom library. It returns 0 on success, 1 on | 25 | * routines in the prom library. |
26 | * failure. It gets passed the pointer to the PROM vector. | 26 | * It gets passed the pointer to the PROM vector. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | extern void prom_cif_init(void *, void *); | 29 | extern void prom_cif_init(void *, void *); |