aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile3
-rw-r--r--arch/sparc/mm/btfixup.c3
-rw-r--r--arch/sparc/mm/fault_32.c8
-rw-r--r--arch/sparc/mm/fault_64.c8
-rw-r--r--arch/sparc/mm/generic_32.c98
-rw-r--r--arch/sparc/mm/generic_64.c164
-rw-r--r--arch/sparc/mm/gup.c183
-rw-r--r--arch/sparc/mm/init_64.c47
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/sparc/mm/tsb.c11
10 files changed, 249 insertions, 278 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 79836a7dd00..301421c1129 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -4,11 +4,10 @@
4asflags-y := -ansi 4asflags-y := -ansi
5ccflags-y := -Werror 5ccflags-y := -Werror
6 6
7obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o 7obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8obj-y += fault_$(BITS).o 8obj-y += fault_$(BITS).o
9obj-y += init_$(BITS).o 9obj-y += init_$(BITS).o
10obj-$(CONFIG_SPARC32) += loadmmu.o 10obj-$(CONFIG_SPARC32) += loadmmu.o
11obj-y += generic_$(BITS).o
12obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o 11obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
13obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o 12obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
14obj-$(CONFIG_SPARC_LEON)+= leon_mm.o 13obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 5175ac2f482..8a7f81743c1 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -302,8 +302,7 @@ void __init btfixup(void)
302 case 'i': /* INT */ 302 case 'i': /* INT */
303 if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ 303 if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
304 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); 304 set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
305 else if ((insn & 0x80002000) == 0x80002000 && 305 else if ((insn & 0x80002000) == 0x80002000) /* %LO */
306 (insn & 0x01800000) != 0x01800000) /* %LO */
307 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); 306 set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
308 else { 307 else {
309 prom_printf(insn_i, p, addr, insn); 308 prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7543ddbdadb..aa1c1b1ce5c 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
251 if (in_atomic() || !mm) 251 if (in_atomic() || !mm)
252 goto no_context; 252 goto no_context;
253 253
254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
255 255
256 down_read(&mm->mmap_sem); 256 down_read(&mm->mmap_sem);
257 257
@@ -301,12 +301,10 @@ good_area:
301 } 301 }
302 if (fault & VM_FAULT_MAJOR) { 302 if (fault & VM_FAULT_MAJOR) {
303 current->maj_flt++; 303 current->maj_flt++;
304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
305 regs, address);
306 } else { 305 } else {
307 current->min_flt++; 306 current->min_flt++;
308 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 307 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
309 regs, address);
310 } 308 }
311 up_read(&mm->mmap_sem); 309 up_read(&mm->mmap_sem);
312 return; 310 return;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index f92ce56a8b2..504c0622f72 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
325 if (in_atomic() || !mm) 325 if (in_atomic() || !mm)
326 goto intr_or_no_mm; 326 goto intr_or_no_mm;
327 327
328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
329 329
330 if (!down_read_trylock(&mm->mmap_sem)) { 330 if (!down_read_trylock(&mm->mmap_sem)) {
331 if ((regs->tstate & TSTATE_PRIV) && 331 if ((regs->tstate & TSTATE_PRIV) &&
@@ -433,12 +433,10 @@ good_area:
433 } 433 }
434 if (fault & VM_FAULT_MAJOR) { 434 if (fault & VM_FAULT_MAJOR) {
435 current->maj_flt++; 435 current->maj_flt++;
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
437 regs, address);
438 } else { 437 } else {
439 current->min_flt++; 438 current->min_flt++;
440 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 439 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
441 regs, address);
442 } 440 }
443 up_read(&mm->mmap_sem); 441 up_read(&mm->mmap_sem);
444 442
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
deleted file mode 100644
index e6067b75f11..00000000000
--- a/arch/sparc/mm/generic_32.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/pagemap.h>
12
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/page.h>
16#include <asm/cacheflush.h>
17#include <asm/tlbflush.h>
18
19/* Remap IO memory, the same way as remap_pfn_range(), but use
20 * the obio memory space.
21 *
22 * They use a pgprot that sets PAGE_IO and does not check the
23 * mem_map table as this is independent of normal memory.
24 */
25static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
26 unsigned long offset, pgprot_t prot, int space)
27{
28 unsigned long end;
29
30 address &= ~PMD_MASK;
31 end = address + size;
32 if (end > PMD_SIZE)
33 end = PMD_SIZE;
34 do {
35 set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
36 address += PAGE_SIZE;
37 offset += PAGE_SIZE;
38 pte++;
39 } while (address < end);
40}
41
42static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
43 unsigned long offset, pgprot_t prot, int space)
44{
45 unsigned long end;
46
47 address &= ~PGDIR_MASK;
48 end = address + size;
49 if (end > PGDIR_SIZE)
50 end = PGDIR_SIZE;
51 offset -= address;
52 do {
53 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
54 if (!pte)
55 return -ENOMEM;
56 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
57 address = (address + PMD_SIZE) & PMD_MASK;
58 pmd++;
59 } while (address < end);
60 return 0;
61}
62
63int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
64 unsigned long pfn, unsigned long size, pgprot_t prot)
65{
66 int error = 0;
67 pgd_t * dir;
68 unsigned long beg = from;
69 unsigned long end = from + size;
70 struct mm_struct *mm = vma->vm_mm;
71 int space = GET_IOSPACE(pfn);
72 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
73
74 /* See comment in mm/memory.c remap_pfn_range */
75 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
76 vma->vm_pgoff = (offset >> PAGE_SHIFT) |
77 ((unsigned long)space << 28UL);
78
79 offset -= from;
80 dir = pgd_offset(mm, from);
81 flush_cache_range(vma, beg, end);
82
83 while (from < end) {
84 pmd_t *pmd = pmd_alloc(mm, dir, from);
85 error = -ENOMEM;
86 if (!pmd)
87 break;
88 error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
89 if (error)
90 break;
91 from = (from + PGDIR_SIZE) & PGDIR_MASK;
92 dir++;
93 }
94
95 flush_tlb_range(vma, beg, end);
96 return error;
97}
98EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
deleted file mode 100644
index 3cb00dfd4bd..00000000000
--- a/arch/sparc/mm/generic_64.c
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * generic.c: Generic Sparc mm routines that are not dependent upon
3 * MMU type but are Sparc specific.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/pagemap.h>
12
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/page.h>
16#include <asm/tlbflush.h>
17
18/* Remap IO memory, the same way as remap_pfn_range(), but use
19 * the obio memory space.
20 *
21 * They use a pgprot that sets PAGE_IO and does not check the
22 * mem_map table as this is independent of normal memory.
23 */
24static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
25 unsigned long address,
26 unsigned long size,
27 unsigned long offset, pgprot_t prot,
28 int space)
29{
30 unsigned long end;
31
32 /* clear hack bit that was used as a write_combine side-effect flag */
33 offset &= ~0x1UL;
34 address &= ~PMD_MASK;
35 end = address + size;
36 if (end > PMD_SIZE)
37 end = PMD_SIZE;
38 do {
39 pte_t entry;
40 unsigned long curend = address + PAGE_SIZE;
41
42 entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
43 if (!(address & 0xffff)) {
44 if (PAGE_SIZE < (4 * 1024 * 1024) &&
45 !(address & 0x3fffff) &&
46 !(offset & 0x3ffffe) &&
47 end >= address + 0x400000) {
48 entry = mk_pte_io(offset, prot, space,
49 4 * 1024 * 1024);
50 curend = address + 0x400000;
51 offset += 0x400000;
52 } else if (PAGE_SIZE < (512 * 1024) &&
53 !(address & 0x7ffff) &&
54 !(offset & 0x7fffe) &&
55 end >= address + 0x80000) {
56 entry = mk_pte_io(offset, prot, space,
57 512 * 1024 * 1024);
58 curend = address + 0x80000;
59 offset += 0x80000;
60 } else if (PAGE_SIZE < (64 * 1024) &&
61 !(offset & 0xfffe) &&
62 end >= address + 0x10000) {
63 entry = mk_pte_io(offset, prot, space,
64 64 * 1024);
65 curend = address + 0x10000;
66 offset += 0x10000;
67 } else
68 offset += PAGE_SIZE;
69 } else
70 offset += PAGE_SIZE;
71
72 if (pte_write(entry))
73 entry = pte_mkdirty(entry);
74 do {
75 BUG_ON(!pte_none(*pte));
76 set_pte_at(mm, address, pte, entry);
77 address += PAGE_SIZE;
78 pte_val(entry) += PAGE_SIZE;
79 pte++;
80 } while (address < curend);
81 } while (address < end);
82}
83
84static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
85 unsigned long offset, pgprot_t prot, int space)
86{
87 unsigned long end;
88
89 address &= ~PGDIR_MASK;
90 end = address + size;
91 if (end > PGDIR_SIZE)
92 end = PGDIR_SIZE;
93 offset -= address;
94 do {
95 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
96 if (!pte)
97 return -ENOMEM;
98 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
99 pte_unmap(pte);
100 address = (address + PMD_SIZE) & PMD_MASK;
101 pmd++;
102 } while (address < end);
103 return 0;
104}
105
106static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
107 unsigned long offset, pgprot_t prot, int space)
108{
109 unsigned long end;
110
111 address &= ~PUD_MASK;
112 end = address + size;
113 if (end > PUD_SIZE)
114 end = PUD_SIZE;
115 offset -= address;
116 do {
117 pmd_t *pmd = pmd_alloc(mm, pud, address);
118 if (!pud)
119 return -ENOMEM;
120 io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
121 address = (address + PUD_SIZE) & PUD_MASK;
122 pud++;
123 } while (address < end);
124 return 0;
125}
126
127int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
128 unsigned long pfn, unsigned long size, pgprot_t prot)
129{
130 int error = 0;
131 pgd_t * dir;
132 unsigned long beg = from;
133 unsigned long end = from + size;
134 struct mm_struct *mm = vma->vm_mm;
135 int space = GET_IOSPACE(pfn);
136 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
137 unsigned long phys_base;
138
139 phys_base = offset | (((unsigned long) space) << 32UL);
140
141 /* See comment in mm/memory.c remap_pfn_range */
142 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
143 vma->vm_pgoff = phys_base >> PAGE_SHIFT;
144
145 offset -= from;
146 dir = pgd_offset(mm, from);
147 flush_cache_range(vma, beg, end);
148
149 while (from < end) {
150 pud_t *pud = pud_alloc(mm, dir, from);
151 error = -ENOMEM;
152 if (!pud)
153 break;
154 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
155 if (error)
156 break;
157 from = (from + PGDIR_SIZE) & PGDIR_MASK;
158 dir++;
159 }
160
161 flush_tlb_range(vma, beg, end);
162 return error;
163}
164EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
new file mode 100644
index 00000000000..42c55df3aec
--- /dev/null
+++ b/arch/sparc/mm/gup.c
@@ -0,0 +1,183 @@
1/*
2 * Lockless get_user_pages_fast for sparc, cribbed from powerpc
3 *
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
7
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/vmstat.h>
11#include <linux/pagemap.h>
12#include <linux/rwsem.h>
13#include <asm/pgtable.h>
14
15/*
16 * The performance critical leaf functions are made noinline otherwise gcc
17 * inlines everything into a single function which results in too much
18 * register pressure.
19 */
20static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
21 unsigned long end, int write, struct page **pages, int *nr)
22{
23 unsigned long mask, result;
24 pte_t *ptep;
25
26 if (tlb_type == hypervisor) {
27 result = _PAGE_PRESENT_4V|_PAGE_P_4V;
28 if (write)
29 result |= _PAGE_WRITE_4V;
30 } else {
31 result = _PAGE_PRESENT_4U|_PAGE_P_4U;
32 if (write)
33 result |= _PAGE_WRITE_4U;
34 }
35 mask = result | _PAGE_SPECIAL;
36
37 ptep = pte_offset_kernel(&pmd, addr);
38 do {
39 struct page *page, *head;
40 pte_t pte = *ptep;
41
42 if ((pte_val(pte) & mask) != result)
43 return 0;
44 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
45
46 /* The hugepage case is simplified on sparc64 because
47 * we encode the sub-page pfn offsets into the
48 * hugepage PTEs. We could optimize this in the future
49 * use page_cache_add_speculative() for the hugepage case.
50 */
51 page = pte_page(pte);
52 head = compound_head(page);
53 if (!page_cache_get_speculative(head))
54 return 0;
55 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
56 put_page(head);
57 return 0;
58 }
59 if (head != page)
60 get_huge_page_tail(page);
61
62 pages[*nr] = page;
63 (*nr)++;
64 } while (ptep++, addr += PAGE_SIZE, addr != end);
65
66 return 1;
67}
68
69static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
70 int write, struct page **pages, int *nr)
71{
72 unsigned long next;
73 pmd_t *pmdp;
74
75 pmdp = pmd_offset(&pud, addr);
76 do {
77 pmd_t pmd = *pmdp;
78
79 next = pmd_addr_end(addr, end);
80 if (pmd_none(pmd))
81 return 0;
82 if (!gup_pte_range(pmd, addr, next, write, pages, nr))
83 return 0;
84 } while (pmdp++, addr = next, addr != end);
85
86 return 1;
87}
88
89static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
90 int write, struct page **pages, int *nr)
91{
92 unsigned long next;
93 pud_t *pudp;
94
95 pudp = pud_offset(&pgd, addr);
96 do {
97 pud_t pud = *pudp;
98
99 next = pud_addr_end(addr, end);
100 if (pud_none(pud))
101 return 0;
102 if (!gup_pmd_range(pud, addr, next, write, pages, nr))
103 return 0;
104 } while (pudp++, addr = next, addr != end);
105
106 return 1;
107}
108
109int get_user_pages_fast(unsigned long start, int nr_pages, int write,
110 struct page **pages)
111{
112 struct mm_struct *mm = current->mm;
113 unsigned long addr, len, end;
114 unsigned long next;
115 pgd_t *pgdp;
116 int nr = 0;
117
118 start &= PAGE_MASK;
119 addr = start;
120 len = (unsigned long) nr_pages << PAGE_SHIFT;
121 end = start + len;
122
123 /*
124 * XXX: batch / limit 'nr', to avoid large irq off latency
125 * needs some instrumenting to determine the common sizes used by
126 * important workloads (eg. DB2), and whether limiting the batch size
127 * will decrease performance.
128 *
129 * It seems like we're in the clear for the moment. Direct-IO is
130 * the main guy that batches up lots of get_user_pages, and even
131 * they are limited to 64-at-a-time which is not so many.
132 */
133 /*
134 * This doesn't prevent pagetable teardown, but does prevent
135 * the pagetables from being freed on sparc.
136 *
137 * So long as we atomically load page table pointers versus teardown,
138 * we can follow the address down to the the page and take a ref on it.
139 */
140 local_irq_disable();
141
142 pgdp = pgd_offset(mm, addr);
143 do {
144 pgd_t pgd = *pgdp;
145
146 next = pgd_addr_end(addr, end);
147 if (pgd_none(pgd))
148 goto slow;
149 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
150 goto slow;
151 } while (pgdp++, addr = next, addr != end);
152
153 local_irq_enable();
154
155 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
156 return nr;
157
158 {
159 int ret;
160
161slow:
162 local_irq_enable();
163
164 /* Try to get the remaining pages with get_user_pages */
165 start += nr << PAGE_SHIFT;
166 pages += nr;
167
168 down_read(&mm->mmap_sem);
169 ret = get_user_pages(current, mm, start,
170 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
171 up_read(&mm->mmap_sem);
172
173 /* Have to be a bit careful with return values */
174 if (nr > 0) {
175 if (ret < 0)
176 ret = nr;
177 else
178 ret += nr;
179 }
180
181 return ret;
182 }
183}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3fd8e18bed8..8e073d80213 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -511,6 +511,11 @@ static void __init read_obp_translations(void)
511 for (i = 0; i < prom_trans_ents; i++) 511 for (i = 0; i < prom_trans_ents; i++)
512 prom_trans[i].data &= ~0x0003fe0000000000UL; 512 prom_trans[i].data &= ~0x0003fe0000000000UL;
513 } 513 }
514
515 /* Force execute bit on. */
516 for (i = 0; i < prom_trans_ents; i++)
517 prom_trans[i].data |= (tlb_type == hypervisor ?
518 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
514} 519}
515 520
516static void __init hypervisor_tlb_lock(unsigned long vaddr, 521static void __init hypervisor_tlb_lock(unsigned long vaddr,
@@ -1597,6 +1602,44 @@ static void __init tsb_phys_patch(void)
1597static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1602static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1598extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 1603extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1599 1604
1605static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1606{
1607 pa >>= KTSB_PHYS_SHIFT;
1608
1609 while (start < end) {
1610 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1611
1612 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1613 __asm__ __volatile__("flush %0" : : "r" (ia));
1614
1615 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1616 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1617
1618 start++;
1619 }
1620}
1621
1622static void ktsb_phys_patch(void)
1623{
1624 extern unsigned int __swapper_tsb_phys_patch;
1625 extern unsigned int __swapper_tsb_phys_patch_end;
1626 unsigned long ktsb_pa;
1627
1628 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1629 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1630 &__swapper_tsb_phys_patch_end, ktsb_pa);
1631#ifndef CONFIG_DEBUG_PAGEALLOC
1632 {
1633 extern unsigned int __swapper_4m_tsb_phys_patch;
1634 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1635 ktsb_pa = (kern_base +
1636 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1637 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1638 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1639 }
1640#endif
1641}
1642
1600static void __init sun4v_ktsb_init(void) 1643static void __init sun4v_ktsb_init(void)
1601{ 1644{
1602 unsigned long ktsb_pa; 1645 unsigned long ktsb_pa;
@@ -1716,8 +1759,10 @@ void __init paging_init(void)
1716 sun4u_pgprot_init(); 1759 sun4u_pgprot_init();
1717 1760
1718 if (tlb_type == cheetah_plus || 1761 if (tlb_type == cheetah_plus ||
1719 tlb_type == hypervisor) 1762 tlb_type == hypervisor) {
1720 tsb_phys_patch(); 1763 tsb_phys_patch();
1764 ktsb_phys_patch();
1765 }
1721 1766
1722 if (tlb_type == hypervisor) { 1767 if (tlb_type == hypervisor) {
1723 sun4v_patch_tlb_handlers(); 1768 sun4v_patch_tlb_handlers();
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index e485a680499..13c2169822a 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -162,7 +162,7 @@ ready:
162 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); 162 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
163 if (paddr) 163 if (paddr)
164 *paddr = paddr_calc; 164 *paddr = paddr_calc;
165 return paddrbase; 165 return pte;
166} 166}
167 167
168void leon_flush_icache_all(void) 168void leon_flush_icache_all(void)
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index a5f51b22fcb..536412d8f41 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -236,6 +236,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
236 } 236 }
237} 237}
238 238
239struct kmem_cache *pgtable_cache __read_mostly;
240
239static struct kmem_cache *tsb_caches[8] __read_mostly; 241static struct kmem_cache *tsb_caches[8] __read_mostly;
240 242
241static const char *tsb_cache_names[8] = { 243static const char *tsb_cache_names[8] = {
@@ -253,6 +255,15 @@ void __init pgtable_cache_init(void)
253{ 255{
254 unsigned long i; 256 unsigned long i;
255 257
258 pgtable_cache = kmem_cache_create("pgtable_cache",
259 PAGE_SIZE, PAGE_SIZE,
260 0,
261 _clear_page);
262 if (!pgtable_cache) {
263 prom_printf("pgtable_cache_init(): Could not create!\n");
264 prom_halt();
265 }
266
256 for (i = 0; i < 8; i++) { 267 for (i = 0; i < 8; i++) {
257 unsigned long size = 8192 << i; 268 unsigned long size = 8192 << i;
258 const char *name = tsb_cache_names[i]; 269 const char *name = tsb_cache_names[i];