aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2012-10-09 05:54:17 -0400
committerJames Hogan <james.hogan@imgtec.com>2013-03-02 15:09:19 -0500
commitf5df8e268f749987c32c7eee001f7623fd7be69c (patch)
treeeb727e5917b4f4e4e0de0ec085fd36a11aa8d898 /arch
parent99ef7c2ac1e3b01f532bfdebbe92e9960e95bebc (diff)
metag: Memory management
Add memory management files for metag. Meta's 32bit virtual address space is split into two halves: - local (0x08000000-0x7fffffff): traditionally local to a hardware thread and incoherent between hardware threads. Each hardware thread has it's own local MMU table. On Meta2 the local space can be globally coherent (GCOn) if the cache partitions coincide. - global (0x88000000-0xffff0000): coherent and traditionally global between hardware threads. On Meta2, each hardware thread has it's own global MMU table. The low 128MiB of each half is non-MMUable and maps directly to the physical address space: - 0x00010000-0x07ffffff: contains Meta core registers and maps SoC bus - 0x80000000-0x87ffffff: contains low latency global core memories Linux usually further splits the local virtual address space like this: - 0x08000000-0x3fffffff: user mappings - 0x40000000-0x7fffffff: kernel mappings Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/metag/include/asm/mmu.h77
-rw-r--r--arch/metag/include/asm/mmu_context.h113
-rw-r--r--arch/metag/include/asm/page.h128
-rw-r--r--arch/metag/include/asm/pgalloc.h79
-rw-r--r--arch/metag/include/asm/pgtable.h370
-rw-r--r--arch/metag/mm/extable.c15
-rw-r--r--arch/metag/mm/fault.c239
-rw-r--r--arch/metag/mm/init.c448
-rw-r--r--arch/metag/mm/mmu-meta1.c157
-rw-r--r--arch/metag/mm/mmu-meta2.c207
10 files changed, 1833 insertions, 0 deletions
diff --git a/arch/metag/include/asm/mmu.h b/arch/metag/include/asm/mmu.h
new file mode 100644
index 000000000000..9c321147c0b4
--- /dev/null
+++ b/arch/metag/include/asm/mmu.h
@@ -0,0 +1,77 @@
1#ifndef __MMU_H
2#define __MMU_H
3
4#ifdef CONFIG_METAG_USER_TCM
5#include <linux/list.h>
6#endif
7
8#ifdef CONFIG_HUGETLB_PAGE
9#include <asm/page.h>
10#endif
11
12typedef struct {
13 /* Software pgd base pointer used for Meta 1.x MMU. */
14 unsigned long pgd_base;
15#ifdef CONFIG_METAG_USER_TCM
16 struct list_head tcm;
17#endif
18#ifdef CONFIG_HUGETLB_PAGE
19#if HPAGE_SHIFT < HUGEPT_SHIFT
20 /* last partially filled huge page table address */
21 unsigned long part_huge;
22#endif
23#endif
24} mm_context_t;
25
26/* Given a virtual address, return the pte for the top level 4meg entry
27 * that maps that address.
28 * Returns 0 (an empty pte) if that range is not mapped.
29 */
30unsigned long mmu_read_first_level_page(unsigned long vaddr);
31
32/* Given a linear (virtual) address, return the second level 4k pte
33 * that maps that address. Returns 0 if the address is not mapped.
34 */
35unsigned long mmu_read_second_level_page(unsigned long vaddr);
36
37/* Get the virtual base address of the MMU */
38unsigned long mmu_get_base(void);
39
40/* Initialize the MMU. */
41void mmu_init(unsigned long mem_end);
42
43#ifdef CONFIG_METAG_META21_MMU
44/*
45 * For cpu "cpu" calculate and return the address of the
46 * MMCU_TnLOCAL_TABLE_PHYS0 if running in local-space or
47 * MMCU_TnGLOBAL_TABLE_PHYS0 if running in global-space.
48 */
49static inline unsigned long mmu_phys0_addr(unsigned int cpu)
50{
51 unsigned long phys0;
52
53 phys0 = (MMCU_T0LOCAL_TABLE_PHYS0 +
54 (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
55 (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
56
57 return phys0;
58}
59
60/*
61 * For cpu "cpu" calculate and return the address of the
62 * MMCU_TnLOCAL_TABLE_PHYS1 if running in local-space or
63 * MMCU_TnGLOBAL_TABLE_PHYS1 if running in global-space.
64 */
65static inline unsigned long mmu_phys1_addr(unsigned int cpu)
66{
67 unsigned long phys1;
68
69 phys1 = (MMCU_T0LOCAL_TABLE_PHYS1 +
70 (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
71 (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
72
73 return phys1;
74}
75#endif /* CONFIG_METAG_META21_MMU */
76
77#endif
diff --git a/arch/metag/include/asm/mmu_context.h b/arch/metag/include/asm/mmu_context.h
new file mode 100644
index 000000000000..ae2a71b5e0be
--- /dev/null
+++ b/arch/metag/include/asm/mmu_context.h
@@ -0,0 +1,113 @@
1#ifndef __METAG_MMU_CONTEXT_H
2#define __METAG_MMU_CONTEXT_H
3
4#include <asm-generic/mm_hooks.h>
5
6#include <asm/page.h>
7#include <asm/mmu.h>
8#include <asm/tlbflush.h>
9#include <asm/cacheflush.h>
10
11#include <linux/io.h>
12
13static inline void enter_lazy_tlb(struct mm_struct *mm,
14 struct task_struct *tsk)
15{
16}
17
18static inline int init_new_context(struct task_struct *tsk,
19 struct mm_struct *mm)
20{
21#ifndef CONFIG_METAG_META21_MMU
22 /* We use context to store a pointer to the page holding the
23 * pgd of a process while it is running. While a process is not
24 * running the pgd and context fields should be equal.
25 */
26 mm->context.pgd_base = (unsigned long) mm->pgd;
27#endif
28#ifdef CONFIG_METAG_USER_TCM
29 INIT_LIST_HEAD(&mm->context.tcm);
30#endif
31 return 0;
32}
33
34#ifdef CONFIG_METAG_USER_TCM
35
36#include <linux/slab.h>
37#include <asm/tcm.h>
38
39static inline void destroy_context(struct mm_struct *mm)
40{
41 struct tcm_allocation *pos, *n;
42
43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) {
44 tcm_free(pos->tag, pos->addr, pos->size);
45 list_del(&pos->list);
46 kfree(pos);
47 }
48}
49#else
50#define destroy_context(mm) do { } while (0)
51#endif
52
53#ifdef CONFIG_METAG_META21_MMU
54static inline void load_pgd(pgd_t *pgd, int thread)
55{
56 unsigned long phys0 = mmu_phys0_addr(thread);
57 unsigned long phys1 = mmu_phys1_addr(thread);
58
59 /*
60 * 0x900 2Gb address space
61 * The permission bits apply to MMU table region which gives a 2MB
62 * window into physical memory. We especially don't want userland to be
63 * able to access this.
64 */
65 metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
66 _PAGE_PRESENT, phys0);
67 /* Set new MMU base address */
68 metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
69}
70#endif
71
72static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
73{
74#ifdef CONFIG_METAG_META21_MMU
75 load_pgd(next->pgd, hard_processor_id());
76#else
77 unsigned int i;
78
79 /* prev->context == prev->pgd in the case where we are initially
80 switching from the init task to the first process. */
81 if (prev->context.pgd_base != (unsigned long) prev->pgd) {
82 for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
83 ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
84 } else
85 prev->pgd = (pgd_t *)mmu_get_base();
86
87 next->pgd = prev->pgd;
88 prev->pgd = (pgd_t *) prev->context.pgd_base;
89
90 for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
91 next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];
92
93 flush_cache_all();
94#endif
95 flush_tlb_all();
96}
97
98static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
99 struct task_struct *tsk)
100{
101 if (prev != next)
102 switch_mmu(prev, next);
103}
104
105static inline void activate_mm(struct mm_struct *prev_mm,
106 struct mm_struct *next_mm)
107{
108 switch_mmu(prev_mm, next_mm);
109}
110
111#define deactivate_mm(tsk, mm) do { } while (0)
112
113#endif
diff --git a/arch/metag/include/asm/page.h b/arch/metag/include/asm/page.h
new file mode 100644
index 000000000000..1e8e281b8bb7
--- /dev/null
+++ b/arch/metag/include/asm/page.h
@@ -0,0 +1,128 @@
1#ifndef _METAG_PAGE_H
2#define _METAG_PAGE_H
3
4#include <linux/const.h>
5
6#include <asm/metag_mem.h>
7
8/* PAGE_SHIFT determines the page size */
9#if defined(CONFIG_PAGE_SIZE_4K)
10#define PAGE_SHIFT 12
11#elif defined(CONFIG_PAGE_SIZE_8K)
12#define PAGE_SHIFT 13
13#elif defined(CONFIG_PAGE_SIZE_16K)
14#define PAGE_SHIFT 14
15#endif
16
17#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
18#define PAGE_MASK (~(PAGE_SIZE-1))
19
20#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
21# define HPAGE_SHIFT 13
22#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
23# define HPAGE_SHIFT 14
24#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
25# define HPAGE_SHIFT 15
26#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
27# define HPAGE_SHIFT 16
28#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
29# define HPAGE_SHIFT 17
30#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
31# define HPAGE_SHIFT 18
32#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
33# define HPAGE_SHIFT 19
34#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
35# define HPAGE_SHIFT 20
36#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
37# define HPAGE_SHIFT 21
38#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
39# define HPAGE_SHIFT 22
40#endif
41
42#ifdef CONFIG_HUGETLB_PAGE
43# define HPAGE_SIZE (1UL << HPAGE_SHIFT)
44# define HPAGE_MASK (~(HPAGE_SIZE-1))
45# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
46/*
47 * We define our own hugetlb_get_unmapped_area so we don't corrupt 2nd level
48 * page tables with normal pages in them.
49 */
50# define HUGEPT_SHIFT (22)
51# define HUGEPT_ALIGN (1 << HUGEPT_SHIFT)
52# define HUGEPT_MASK (HUGEPT_ALIGN - 1)
53# define ALIGN_HUGEPT(x) ALIGN(x, HUGEPT_ALIGN)
54# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
55#endif
56
57#ifndef __ASSEMBLY__
58
59/* On the Meta, we would like to know if the address (heap) we have is
60 * in local or global space.
61 */
62#define is_global_space(addr) ((addr) > 0x7fffffff)
63#define is_local_space(addr) (!is_global_space(addr))
64
65extern void clear_page(void *to);
66extern void copy_page(void *to, void *from);
67
68#define clear_user_page(page, vaddr, pg) clear_page(page)
69#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
70
71/*
72 * These are used to make use of C type-checking..
73 */
74typedef struct { unsigned long pte; } pte_t;
75typedef struct { unsigned long pgd; } pgd_t;
76typedef struct { unsigned long pgprot; } pgprot_t;
77typedef struct page *pgtable_t;
78
79#define pte_val(x) ((x).pte)
80#define pgd_val(x) ((x).pgd)
81#define pgprot_val(x) ((x).pgprot)
82
83#define __pte(x) ((pte_t) { (x) })
84#define __pgd(x) ((pgd_t) { (x) })
85#define __pgprot(x) ((pgprot_t) { (x) })
86
87/* The kernel must now ALWAYS live at either 0xC0000000 or 0x40000000 - that
88 * being either global or local space.
89 */
90#define PAGE_OFFSET (CONFIG_PAGE_OFFSET)
91
92#if PAGE_OFFSET >= LINGLOBAL_BASE
93#define META_MEMORY_BASE LINGLOBAL_BASE
94#define META_MEMORY_LIMIT LINGLOBAL_LIMIT
95#else
96#define META_MEMORY_BASE LINLOCAL_BASE
97#define META_MEMORY_LIMIT LINLOCAL_LIMIT
98#endif
99
100/* Offset between physical and virtual mapping of kernel memory. */
101extern unsigned int meta_memoffset;
102
103#define __pa(x) ((unsigned long)(((unsigned long)(x)) - meta_memoffset))
104#define __va(x) ((void *)((unsigned long)(((unsigned long)(x)) + meta_memoffset)))
105
106extern unsigned long pfn_base;
107#define ARCH_PFN_OFFSET (pfn_base)
108#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
109#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
110#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
111#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
112#ifdef CONFIG_FLATMEM
113extern unsigned long max_pfn;
114extern unsigned long min_low_pfn;
115#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_pfn)
116#endif
117
118#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
119
120#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
121 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
122
123#include <asm-generic/memory_model.h>
124#include <asm-generic/getorder.h>
125
126#endif /* __ASSMEBLY__ */
127
128#endif /* _METAG_PAGE_H */
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
new file mode 100644
index 000000000000..275d9285141c
--- /dev/null
+++ b/arch/metag/include/asm/pgalloc.h
@@ -0,0 +1,79 @@
1#ifndef _METAG_PGALLOC_H
2#define _METAG_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h>
6
7#define pmd_populate_kernel(mm, pmd, pte) \
8 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
9
10#define pmd_populate(mm, pmd, pte) \
11 set_pmd(pmd, __pmd(_PAGE_TABLE | page_to_phys(pte)))
12
13#define pmd_pgtable(pmd) pmd_page(pmd)
14
15/*
16 * Allocate and free page tables.
17 */
18#ifdef CONFIG_METAG_META21_MMU
19static inline void pgd_ctor(pgd_t *pgd)
20{
21 memcpy(pgd + USER_PTRS_PER_PGD,
22 swapper_pg_dir + USER_PTRS_PER_PGD,
23 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
24}
25#else
26#define pgd_ctor(x) do { } while (0)
27#endif
28
29static inline pgd_t *pgd_alloc(struct mm_struct *mm)
30{
31 pgd_t *pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
32 if (pgd)
33 pgd_ctor(pgd);
34 return pgd;
35}
36
37static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
38{
39 free_page((unsigned long)pgd);
40}
41
42static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
43 unsigned long address)
44{
45 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
46 __GFP_ZERO);
47 return pte;
48}
49
50static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
51 unsigned long address)
52{
53 struct page *pte;
54 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
55 if (pte)
56 pgtable_page_ctor(pte);
57 return pte;
58}
59
60static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
61{
62 free_page((unsigned long)pte);
63}
64
65static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
66{
67 pgtable_page_dtor(pte);
68 __free_page(pte);
69}
70
71#define __pte_free_tlb(tlb, pte, addr) \
72 do { \
73 pgtable_page_dtor(pte); \
74 tlb_remove_page((tlb), (pte)); \
75 } while (0)
76
77#define check_pgt_cache() do { } while (0)
78
79#endif
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
new file mode 100644
index 000000000000..1cd13d595198
--- /dev/null
+++ b/arch/metag/include/asm/pgtable.h
@@ -0,0 +1,370 @@
1/*
2 * Macros and functions to manipulate Meta page tables.
3 */
4
5#ifndef _METAG_PGTABLE_H
6#define _METAG_PGTABLE_H
7
8#include <asm-generic/pgtable-nopmd.h>
9
10/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
11#if PAGE_OFFSET >= LINGLOBAL_BASE
12#define CONSISTENT_START 0xF7000000
13#define CONSISTENT_END 0xF73FFFFF
14#define VMALLOC_START 0xF8000000
15#define VMALLOC_END 0xFFFEFFFF
16#else
17#define CONSISTENT_START 0x77000000
18#define CONSISTENT_END 0x773FFFFF
19#define VMALLOC_START 0x78000000
20#define VMALLOC_END 0x7FFFFFFF
21#endif
22
23/*
24 * Definitions for MMU descriptors
25 *
26 * These are the hardware bits in the MMCU pte entries.
27 * Derived from the Meta toolkit headers.
28 */
29#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
30#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
31#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
32/* Write combine bit - this can cause writes to occur out of order */
33#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
34/* Sys coherent bit - this bit is never used by Linux */
35#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
36#define _PAGE_ALWAYS_ZERO_1 0x020
37#define _PAGE_CACHE_CTRL0 0x040
38#define _PAGE_CACHE_CTRL1 0x080
39#define _PAGE_ALWAYS_ZERO_2 0x100
40#define _PAGE_ALWAYS_ZERO_3 0x200
41#define _PAGE_ALWAYS_ZERO_4 0x400
42#define _PAGE_ALWAYS_ZERO_5 0x800
43
44/* These are software bits that we stuff into the gaps in the hardware
45 * pte entries that are not used. Note, these DO get stored in the actual
46 * hardware, but the hardware just does not use them.
47 */
48#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
49#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
50#define _PAGE_FILE _PAGE_ALWAYS_ZERO_3
51
52/* Pages owned, and protected by, the kernel. */
53#define _PAGE_KERNEL _PAGE_PRIV
54
55/* No cacheing of this page */
56#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
57/* burst cacheing - good for data streaming */
58#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
59/* One cache way per thread */
60#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
61/* Full on cacheing */
62#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
63
64#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
65
66/* which bits are used for cache control ... */
67#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
68 _PAGE_WR_COMBINE)
69
70/* This is a mask of the bits that pte_modify is allowed to change. */
71#define _PAGE_CHG_MASK (PAGE_MASK)
72
73#define _PAGE_SZ_SHIFT 1
74#define _PAGE_SZ_4K (0x0)
75#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
76#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
77#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
78#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
79#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
80#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
81#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
82#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
83#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
84#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
85#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
86
87#if defined(CONFIG_PAGE_SIZE_4K)
88#define _PAGE_SZ (_PAGE_SZ_4K)
89#elif defined(CONFIG_PAGE_SIZE_8K)
90#define _PAGE_SZ (_PAGE_SZ_8K)
91#elif defined(CONFIG_PAGE_SIZE_16K)
92#define _PAGE_SZ (_PAGE_SZ_16K)
93#endif
94#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
95
96#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
97# define _PAGE_SZHUGE (_PAGE_SZ_8K)
98#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
99# define _PAGE_SZHUGE (_PAGE_SZ_16K)
100#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
101# define _PAGE_SZHUGE (_PAGE_SZ_32K)
102#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
103# define _PAGE_SZHUGE (_PAGE_SZ_64K)
104#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
105# define _PAGE_SZHUGE (_PAGE_SZ_128K)
106#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
107# define _PAGE_SZHUGE (_PAGE_SZ_256K)
108#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
109# define _PAGE_SZHUGE (_PAGE_SZ_512K)
110#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
111# define _PAGE_SZHUGE (_PAGE_SZ_1M)
112#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
113# define _PAGE_SZHUGE (_PAGE_SZ_2M)
114#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
115# define _PAGE_SZHUGE (_PAGE_SZ_4M)
116#endif
117
118/*
119 * The Linux memory management assumes a three-level page table setup. On
120 * Meta, we use that, but "fold" the mid level into the top-level page
121 * table.
122 */
123
124/* PGDIR_SHIFT determines the size of the area a second-level page table can
125 * map. This is always 4MB.
126 */
127
128#define PGDIR_SHIFT 22
129#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
130#define PGDIR_MASK (~(PGDIR_SIZE-1))
131
132/*
133 * Entries per page directory level: we use a two-level, so
134 * we don't really have any PMD directory physically. First level tables
135 * always map 2Gb (local or global) at a granularity of 4MB, second-level
136 * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
137 * 1024 entries).
138 */
139#define PTRS_PER_PTE (PGDIR_SIZE/PAGE_SIZE)
140#define HPTRS_PER_PTE (PGDIR_SIZE/HPAGE_SIZE)
141#define PTRS_PER_PGD 512
142
143#define USER_PTRS_PER_PGD 256
144#define FIRST_USER_ADDRESS META_MEMORY_BASE
145#define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS)
146
147#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
148 _PAGE_CACHEABLE)
149
150#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
151 _PAGE_ACCESSED | _PAGE_CACHEABLE)
152#define PAGE_SHARED_C PAGE_SHARED
153#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
154 _PAGE_CACHEABLE)
155#define PAGE_COPY_C PAGE_COPY
156
157#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
158 _PAGE_CACHEABLE)
159#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
160 _PAGE_ACCESSED | _PAGE_WRITE | \
161 _PAGE_CACHEABLE | _PAGE_KERNEL)
162
163#define __P000 PAGE_NONE
164#define __P001 PAGE_READONLY
165#define __P010 PAGE_COPY
166#define __P011 PAGE_COPY
167#define __P100 PAGE_READONLY
168#define __P101 PAGE_READONLY
169#define __P110 PAGE_COPY_C
170#define __P111 PAGE_COPY_C
171
172#define __S000 PAGE_NONE
173#define __S001 PAGE_READONLY
174#define __S010 PAGE_SHARED
175#define __S011 PAGE_SHARED
176#define __S100 PAGE_READONLY
177#define __S101 PAGE_READONLY
178#define __S110 PAGE_SHARED_C
179#define __S111 PAGE_SHARED_C
180
181#ifndef __ASSEMBLY__
182
183#include <asm/page.h>
184
185/* zero page used for uninitialized stuff */
186extern unsigned long empty_zero_page;
187#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
188
189/* Certain architectures need to do special things when pte's
190 * within a page table are directly modified. Thus, the following
191 * hook is made available.
192 */
193#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
194#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
195
196#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
197
198#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
199
200#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
201
202#define pte_none(x) (!pte_val(x))
203#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
204#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
205
206#define pmd_none(x) (!pmd_val(x))
207#define pmd_bad(x) ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
208 != (_PAGE_TABLE & ~_PAGE_SZ_MASK))
209#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
210#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
211
212#define pte_page(x) pfn_to_page(pte_pfn(x))
213
214/*
215 * The following only work if pte_present() is true.
216 * Undefined behaviour if not..
217 */
218
219static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
220static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
221static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
222static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
223static inline int pte_special(pte_t pte) { return 0; }
224
225static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
226static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
227static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
228static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
229static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
230static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
231static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
232static inline pte_t pte_mkhuge(pte_t pte) { return pte; }
233
234/*
235 * Macro and implementation to make a page protection as uncacheable.
236 */
237#define pgprot_writecombine(prot) \
238 __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))
239
240#define pgprot_noncached(prot) \
241 __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)
242
243
244/*
245 * Conversion functions: convert a page and protection to a page entry,
246 * and a page entry and page directory to the page they refer to.
247 */
248
249#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
250
251static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
252{
253 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
254 return pte;
255}
256
257static inline unsigned long pmd_page_vaddr(pmd_t pmd)
258{
259 unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
260 if (!paddr)
261 return 0;
262 return (unsigned long)__va(paddr);
263}
264
265#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
266#define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
267 >> _PAGE_SZ_SHIFT))
268#define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd))
269
270/*
271 * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
272 * space drop the top bit before indexing the pgd.
273 */
274#if PAGE_OFFSET >= LINGLOBAL_BASE
275#define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \
276 & (PTRS_PER_PGD-1))
277#else
278#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
279#endif
280
281#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
282
283#define pgd_offset_k(address) pgd_offset(&init_mm, address)
284
285#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
286
287/* Find an entry in the second-level page table.. */
288#if !defined(CONFIG_HUGETLB_PAGE)
289 /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
290# define pte_index(pmd, address) \
291 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
292#else
293 /* some pages are huge, so read 1st level pt to find out */
294# define pte_index(pmd, address) \
295 (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
296#endif
297#define pte_offset_kernel(dir, address) \
298 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
299#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
300#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
301
302#define pte_unmap(pte) do { } while (0)
303#define pte_unmap_nested(pte) do { } while (0)
304
305#define pte_ERROR(e) \
306 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
307#define pgd_ERROR(e) \
308 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
309
310/*
311 * Meta doesn't have any external MMU info: the kernel page
312 * tables contain all the necessary information.
313 */
314static inline void update_mmu_cache(struct vm_area_struct *vma,
315 unsigned long address, pte_t *pte)
316{
317}
318
319/*
320 * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
321 * Since PAGE_PRESENT is bit 1, we can use the bits above that.
322 */
323#define __swp_type(x) (((x).val >> 1) & 0xff)
324#define __swp_offset(x) ((x).val >> 10)
325#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
326 ((offset) << 10) })
327#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
328#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
329
330#define PTE_FILE_MAX_BITS 22
331#define pte_to_pgoff(x) (pte_val(x) >> 10)
332#define pgoff_to_pte(x) __pte(((x) << 10) | _PAGE_FILE)
333
334#define kern_addr_valid(addr) (1)
335
336#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
337 remap_pfn_range(vma, vaddr, pfn, size, prot)
338
339/*
340 * No page table caches to initialise
341 */
342#define pgtable_cache_init() do { } while (0)
343
344extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
345void paging_init(unsigned long mem_end);
346
347#ifdef CONFIG_METAG_META12
348/* This is a workaround for an issue in Meta 1 cores. These cores cache
349 * invalid entries in the TLB so we always need to flush whenever we add
350 * a new pte. Unfortunately we can only flush the whole TLB not shoot down
351 * single entries so this is sub-optimal. This implementation ensures that
352 * we will get a flush at the second attempt, so we may still get repeated
353 * faults, we just don't overflow the kernel stack handling them.
354 */
355#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
356#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
357({ \
358 int __changed = !pte_same(*(__ptep), __entry); \
359 if (__changed) { \
360 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
361 } \
362 flush_tlb_page(__vma, __address); \
363 __changed; \
364})
365#endif
366
367#include <asm-generic/pgtable.h>
368
369#endif /* __ASSEMBLY__ */
370#endif /* _METAG_PGTABLE_H */
diff --git a/arch/metag/mm/extable.c b/arch/metag/mm/extable.c
new file mode 100644
index 000000000000..2a21eaebe84d
--- /dev/null
+++ b/arch/metag/mm/extable.c
@@ -0,0 +1,15 @@
1
2#include <linux/module.h>
3#include <linux/uaccess.h>
4
5int fixup_exception(struct pt_regs *regs)
6{
7 const struct exception_table_entry *fixup;
8 unsigned long pc = instruction_pointer(regs);
9
10 fixup = search_exception_tables(pc);
11 if (fixup)
12 regs->ctx.CurrPC = fixup->fixup;
13
14 return fixup != NULL;
15}
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
new file mode 100644
index 000000000000..2c75bf7357c5
--- /dev/null
+++ b/arch/metag/mm/fault.c
@@ -0,0 +1,239 @@
1/*
2 * Meta page fault handling.
3 *
4 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
5 */
6
7#include <linux/mman.h>
8#include <linux/mm.h>
9#include <linux/kernel.h>
10#include <linux/ptrace.h>
11#include <linux/interrupt.h>
12#include <linux/uaccess.h>
13
14#include <asm/tlbflush.h>
15#include <asm/mmu.h>
16#include <asm/traps.h>
17
18/* Clear any pending catch buffer state. */
19static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr,
20 unsigned int trapno)
21{
22 PTBICTXEXTCB0 cbuf = regs->extcb0;
23
24 switch (trapno) {
25 /* Instruction fetch faults leave no catch buffer state. */
26 case TBIXXF_SIGNUM_IGF:
27 case TBIXXF_SIGNUM_IPF:
28 return;
29 default:
30 if (cbuf[0].CBAddr == addr) {
31 cbuf[0].CBAddr = 0;
32 cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS;
33
34 /* And, as this is the ONLY catch entry, we
35 * need to clear the cbuf bit from the context!
36 */
37 regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT |
38 TBICTX_XCBF_BIT);
39
40 return;
41 }
42 pr_err("Failed to clear cbuf entry!\n");
43 }
44}
45
46int show_unhandled_signals = 1;
47
48int do_page_fault(struct pt_regs *regs, unsigned long address,
49 unsigned int write_access, unsigned int trapno)
50{
51 struct task_struct *tsk;
52 struct mm_struct *mm;
53 struct vm_area_struct *vma, *prev_vma;
54 siginfo_t info;
55 int fault;
56 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
57 (write_access ? FAULT_FLAG_WRITE : 0);
58
59 tsk = current;
60
61 if ((address >= VMALLOC_START) && (address < VMALLOC_END)) {
62 /*
63 * Synchronize this task's top level page-table
64 * with the 'reference' page table.
65 *
66 * Do _not_ use "tsk" here. We might be inside
67 * an interrupt in the middle of a task switch..
68 */
69 int offset = pgd_index(address);
70 pgd_t *pgd, *pgd_k;
71 pud_t *pud, *pud_k;
72 pmd_t *pmd, *pmd_k;
73 pte_t *pte_k;
74
75 pgd = ((pgd_t *)mmu_get_base()) + offset;
76 pgd_k = swapper_pg_dir + offset;
77
78 /* This will never happen with the folded page table. */
79 if (!pgd_present(*pgd)) {
80 if (!pgd_present(*pgd_k))
81 goto bad_area_nosemaphore;
82 set_pgd(pgd, *pgd_k);
83 return 0;
84 }
85
86 pud = pud_offset(pgd, address);
87 pud_k = pud_offset(pgd_k, address);
88 if (!pud_present(*pud_k))
89 goto bad_area_nosemaphore;
90 set_pud(pud, *pud_k);
91
92 pmd = pmd_offset(pud, address);
93 pmd_k = pmd_offset(pud_k, address);
94 if (!pmd_present(*pmd_k))
95 goto bad_area_nosemaphore;
96 set_pmd(pmd, *pmd_k);
97
98 pte_k = pte_offset_kernel(pmd_k, address);
99 if (!pte_present(*pte_k))
100 goto bad_area_nosemaphore;
101
102 /* May only be needed on Chorus2 */
103 flush_tlb_all();
104 return 0;
105 }
106
107 mm = tsk->mm;
108
109 if (in_atomic() || !mm)
110 goto no_context;
111
112retry:
113 down_read(&mm->mmap_sem);
114
115 vma = find_vma_prev(mm, address, &prev_vma);
116
117 if (!vma || address < vma->vm_start)
118 goto check_expansion;
119
120good_area:
121 if (write_access) {
122 if (!(vma->vm_flags & VM_WRITE))
123 goto bad_area;
124 } else {
125 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
126 goto bad_area;
127 }
128
129 /*
130 * If for any reason at all we couldn't handle the fault,
131 * make sure we exit gracefully rather than endlessly redo
132 * the fault.
133 */
134 fault = handle_mm_fault(mm, vma, address, flags);
135
136 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
137 return 0;
138
139 if (unlikely(fault & VM_FAULT_ERROR)) {
140 if (fault & VM_FAULT_OOM)
141 goto out_of_memory;
142 else if (fault & VM_FAULT_SIGBUS)
143 goto do_sigbus;
144 BUG();
145 }
146 if (flags & FAULT_FLAG_ALLOW_RETRY) {
147 if (fault & VM_FAULT_MAJOR)
148 tsk->maj_flt++;
149 else
150 tsk->min_flt++;
151 if (fault & VM_FAULT_RETRY) {
152 flags &= ~FAULT_FLAG_ALLOW_RETRY;
153 flags |= FAULT_FLAG_TRIED;
154
155 /*
156 * No need to up_read(&mm->mmap_sem) as we would
157 * have already released it in __lock_page_or_retry
158 * in mm/filemap.c.
159 */
160
161 goto retry;
162 }
163 }
164
165 up_read(&mm->mmap_sem);
166 return 0;
167
168check_expansion:
169 vma = prev_vma;
170 if (vma && (expand_stack(vma, address) == 0))
171 goto good_area;
172
173bad_area:
174 up_read(&mm->mmap_sem);
175
176bad_area_nosemaphore:
177 if (user_mode(regs)) {
178 info.si_signo = SIGSEGV;
179 info.si_errno = 0;
180 info.si_code = SEGV_MAPERR;
181 info.si_addr = (__force void __user *)address;
182 info.si_trapno = trapno;
183
184 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
185 printk_ratelimit()) {
186 pr_info("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
187 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
188 tsk->comm, task_pid_nr(tsk), address,
189 regs->ctx.CurrPC, regs->ctx.AX[0].U0,
190 write_access, trapno, trap_name(trapno));
191 print_vma_addr(" in ", regs->ctx.CurrPC);
192 print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
193 printk("\n");
194 show_regs(regs);
195 }
196 force_sig_info(SIGSEGV, &info, tsk);
197 return 1;
198 }
199 goto no_context;
200
201do_sigbus:
202 up_read(&mm->mmap_sem);
203
204 /*
205 * Send a sigbus, regardless of whether we were in kernel
206 * or user mode.
207 */
208 info.si_signo = SIGBUS;
209 info.si_errno = 0;
210 info.si_code = BUS_ADRERR;
211 info.si_addr = (__force void __user *)address;
212 info.si_trapno = trapno;
213 force_sig_info(SIGBUS, &info, tsk);
214
215 /* Kernel mode? Handle exceptions or die */
216 if (!user_mode(regs))
217 goto no_context;
218
219 return 1;
220
221 /*
222 * We ran out of memory, or some other thing happened to us that made
223 * us unable to handle the page fault gracefully.
224 */
225out_of_memory:
226 up_read(&mm->mmap_sem);
227 if (user_mode(regs))
228 do_group_exit(SIGKILL);
229
230no_context:
231 /* Are we prepared to handle this kernel fault? */
232 if (fixup_exception(regs)) {
233 clear_cbuf_entry(regs, address, trapno);
234 return 1;
235 }
236
237 die("Oops", regs, (write_access << 15) | trapno, address);
238 do_exit(SIGKILL);
239}
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
new file mode 100644
index 000000000000..514376d90db4
--- /dev/null
+++ b/arch/metag/mm/init.c
@@ -0,0 +1,448 @@
1/*
2 * Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
3 *
4 */
5
6#include <linux/mm.h>
7#include <linux/swap.h>
8#include <linux/init.h>
9#include <linux/bootmem.h>
10#include <linux/pagemap.h>
11#include <linux/percpu.h>
12#include <linux/memblock.h>
13#include <linux/initrd.h>
14#include <linux/of_fdt.h>
15
16#include <asm/setup.h>
17#include <asm/page.h>
18#include <asm/pgalloc.h>
19#include <asm/mmu.h>
20#include <asm/mmu_context.h>
21#include <asm/sections.h>
22#include <asm/tlb.h>
23#include <asm/user_gateway.h>
24#include <asm/mmzone.h>
25#include <asm/fixmap.h>
26
27unsigned long pfn_base;
28
29pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
30
31unsigned long empty_zero_page;
32
33extern char __user_gateway_start;
34extern char __user_gateway_end;
35
36void *gateway_page;
37
38/*
39 * Insert the gateway page into a set of page tables, creating the
40 * page tables if necessary.
41 */
42static void insert_gateway_page(pgd_t *pgd, unsigned long address)
43{
44 pud_t *pud;
45 pmd_t *pmd;
46 pte_t *pte;
47
48 BUG_ON(!pgd_present(*pgd));
49
50 pud = pud_offset(pgd, address);
51 BUG_ON(!pud_present(*pud));
52
53 pmd = pmd_offset(pud, address);
54 if (!pmd_present(*pmd)) {
55 pte = alloc_bootmem_pages(PAGE_SIZE);
56 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
57 }
58
59 pte = pte_offset_kernel(pmd, address);
60 set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
61}
62
63/* Alloc and map a page in a known location accessible to userspace. */
64static void __init user_gateway_init(void)
65{
66 unsigned long address = USER_GATEWAY_PAGE;
67 int offset = pgd_index(address);
68 pgd_t *pgd;
69
70 gateway_page = alloc_bootmem_pages(PAGE_SIZE);
71
72 pgd = swapper_pg_dir + offset;
73 insert_gateway_page(pgd, address);
74
75#ifdef CONFIG_METAG_META12
76 /*
77 * Insert the gateway page into our current page tables even
78 * though we've already inserted it into our reference page
79 * table (swapper_pg_dir). This is because with a META1 mmu we
80 * copy just the user address range and not the gateway page
81 * entry on context switch, see switch_mmu().
82 */
83 pgd = (pgd_t *)mmu_get_base() + offset;
84 insert_gateway_page(pgd, address);
85#endif /* CONFIG_METAG_META12 */
86
87 BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
88
89 gateway_page += (address & ~PAGE_MASK);
90
91 memcpy(gateway_page, &__user_gateway_start,
92 &__user_gateway_end - &__user_gateway_start);
93
94 /*
95 * We don't need to flush the TLB here, there should be no mapping
96 * present at boot for this address and only valid mappings are in
97 * the TLB (apart from on Meta 1.x, but those cached invalid
98 * mappings should be impossible to hit here).
99 *
100 * We don't flush the code cache here even though we have written
101 * code through the data cache and they may not be coherent. At
102 * this point we assume there is no stale data in the code cache
103 * for this address so there is no need to flush.
104 */
105}
106
107static void __init allocate_pgdat(unsigned int nid)
108{
109 unsigned long start_pfn, end_pfn;
110#ifdef CONFIG_NEED_MULTIPLE_NODES
111 unsigned long phys;
112#endif
113
114 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
115
116#ifdef CONFIG_NEED_MULTIPLE_NODES
117 phys = __memblock_alloc_base(sizeof(struct pglist_data),
118 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
119 /* Retry with all of system memory */
120 if (!phys)
121 phys = __memblock_alloc_base(sizeof(struct pglist_data),
122 SMP_CACHE_BYTES,
123 memblock_end_of_DRAM());
124 if (!phys)
125 panic("Can't allocate pgdat for node %d\n", nid);
126
127 NODE_DATA(nid) = __va(phys);
128 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
129
130 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
131#endif
132
133 NODE_DATA(nid)->node_start_pfn = start_pfn;
134 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
135}
136
137static void __init bootmem_init_one_node(unsigned int nid)
138{
139 unsigned long total_pages, paddr;
140 unsigned long end_pfn;
141 struct pglist_data *p;
142
143 p = NODE_DATA(nid);
144
145 /* Nothing to do.. */
146 if (!p->node_spanned_pages)
147 return;
148
149 end_pfn = p->node_start_pfn + p->node_spanned_pages;
150#ifdef CONFIG_HIGHMEM
151 if (end_pfn > max_low_pfn)
152 end_pfn = max_low_pfn;
153#endif
154
155 total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
156
157 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
158 if (!paddr)
159 panic("Can't allocate bootmap for nid[%d]\n", nid);
160
161 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
162
163 free_bootmem_with_active_regions(nid, end_pfn);
164
165 /*
166 * XXX Handle initial reservations for the system memory node
167 * only for the moment, we'll refactor this later for handling
168 * reservations in other nodes.
169 */
170 if (nid == 0) {
171 struct memblock_region *reg;
172
173 /* Reserve the sections we're already using. */
174 for_each_memblock(reserved, reg) {
175 unsigned long size = reg->size;
176
177#ifdef CONFIG_HIGHMEM
178 /* ...but not highmem */
179 if (PFN_DOWN(reg->base) >= highstart_pfn)
180 continue;
181
182 if (PFN_UP(reg->base + size) > highstart_pfn)
183 size = (highstart_pfn - PFN_DOWN(reg->base))
184 << PAGE_SHIFT;
185#endif
186
187 reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
188 }
189 }
190
191 sparse_memory_present_with_active_regions(nid);
192}
193
194static void __init do_init_bootmem(void)
195{
196 struct memblock_region *reg;
197 int i;
198
199 /* Add active regions with valid PFNs. */
200 for_each_memblock(memory, reg) {
201 unsigned long start_pfn, end_pfn;
202 start_pfn = memblock_region_memory_base_pfn(reg);
203 end_pfn = memblock_region_memory_end_pfn(reg);
204 memblock_set_node(PFN_PHYS(start_pfn),
205 PFN_PHYS(end_pfn - start_pfn), 0);
206 }
207
208 /* All of system RAM sits in node 0 for the non-NUMA case */
209 allocate_pgdat(0);
210 node_set_online(0);
211
212 soc_mem_setup();
213
214 for_each_online_node(i)
215 bootmem_init_one_node(i);
216
217 sparse_init();
218}
219
220extern char _heap_start[];
221
222static void __init init_and_reserve_mem(void)
223{
224 unsigned long start_pfn, heap_start;
225 u64 base = min_low_pfn << PAGE_SHIFT;
226 u64 size = (max_low_pfn << PAGE_SHIFT) - base;
227
228 heap_start = (unsigned long) &_heap_start;
229
230 memblock_add(base, size);
231
232 /*
233 * Partially used pages are not usable - thus
234 * we are rounding upwards:
235 */
236 start_pfn = PFN_UP(__pa(heap_start));
237
238 /*
239 * Reserve the kernel text.
240 */
241 memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
242
243#ifdef CONFIG_HIGHMEM
244 /*
245 * Add & reserve highmem, so page structures are initialised.
246 */
247 base = highstart_pfn << PAGE_SHIFT;
248 size = (highend_pfn << PAGE_SHIFT) - base;
249 if (size) {
250 memblock_add(base, size);
251 memblock_reserve(base, size);
252 }
253#endif
254}
255
256#ifdef CONFIG_HIGHMEM
257/*
258 * Ensure we have allocated page tables in swapper_pg_dir for the
259 * fixed mappings range from 'start' to 'end'.
260 */
261static void __init allocate_pgtables(unsigned long start, unsigned long end)
262{
263 pgd_t *pgd;
264 pmd_t *pmd;
265 pte_t *pte;
266 int i, j;
267 unsigned long vaddr;
268
269 vaddr = start;
270 i = pgd_index(vaddr);
271 j = pmd_index(vaddr);
272 pgd = swapper_pg_dir + i;
273
274 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
275 pmd = (pmd_t *)pgd;
276 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
277 vaddr += PMD_SIZE;
278
279 if (!pmd_none(*pmd))
280 continue;
281
282 pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
283 pmd_populate_kernel(&init_mm, pmd, pte);
284 }
285 j = 0;
286 }
287}
288
289static void __init fixedrange_init(void)
290{
291 unsigned long vaddr, end;
292 pgd_t *pgd;
293 pud_t *pud;
294 pmd_t *pmd;
295 pte_t *pte;
296
297 /*
298 * Fixed mappings:
299 */
300 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
301 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
302 allocate_pgtables(vaddr, end);
303
304 /*
305 * Permanent kmaps:
306 */
307 vaddr = PKMAP_BASE;
308 allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
309
310 pgd = swapper_pg_dir + pgd_index(vaddr);
311 pud = pud_offset(pgd, vaddr);
312 pmd = pmd_offset(pud, vaddr);
313 pte = pte_offset_kernel(pmd, vaddr);
314 pkmap_page_table = pte;
315}
316#endif /* CONFIG_HIGHMEM */
317
318/*
319 * paging_init() continues the virtual memory environment setup which
320 * was begun by the code in arch/metag/kernel/setup.c.
321 */
322void __init paging_init(unsigned long mem_end)
323{
324 unsigned long max_zone_pfns[MAX_NR_ZONES];
325 int nid;
326
327 init_and_reserve_mem();
328
329 memblock_allow_resize();
330
331 memblock_dump_all();
332
333 nodes_clear(node_online_map);
334
335 init_new_context(&init_task, &init_mm);
336
337 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
338
339 do_init_bootmem();
340 mmu_init(mem_end);
341
342#ifdef CONFIG_HIGHMEM
343 fixedrange_init();
344 kmap_init();
345#endif
346
347 /* Initialize the zero page to a bootmem page, already zeroed. */
348 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
349
350 user_gateway_init();
351
352 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
353
354 for_each_online_node(nid) {
355 pg_data_t *pgdat = NODE_DATA(nid);
356 unsigned long low, start_pfn;
357
358 start_pfn = pgdat->bdata->node_min_pfn;
359 low = pgdat->bdata->node_low_pfn;
360
361 if (max_zone_pfns[ZONE_NORMAL] < low)
362 max_zone_pfns[ZONE_NORMAL] = low;
363
364#ifdef CONFIG_HIGHMEM
365 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
366#endif
367 pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
368 nid, start_pfn, low);
369 }
370
371 free_area_init_nodes(max_zone_pfns);
372}
373
374void __init mem_init(void)
375{
376 int nid;
377
378#ifdef CONFIG_HIGHMEM
379 unsigned long tmp;
380 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
381 struct page *page = pfn_to_page(tmp);
382 ClearPageReserved(page);
383 init_page_count(page);
384 __free_page(page);
385 totalhigh_pages++;
386 }
387 totalram_pages += totalhigh_pages;
388 num_physpages += totalhigh_pages;
389#endif /* CONFIG_HIGHMEM */
390
391 for_each_online_node(nid) {
392 pg_data_t *pgdat = NODE_DATA(nid);
393 unsigned long node_pages = 0;
394
395 num_physpages += pgdat->node_present_pages;
396
397 if (pgdat->node_spanned_pages)
398 node_pages = free_all_bootmem_node(pgdat);
399
400 totalram_pages += node_pages;
401 }
402
403 pr_info("Memory: %luk/%luk available\n",
404 (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
405 num_physpages << (PAGE_SHIFT - 10));
406
407 show_mem(0);
408
409 return;
410}
411
412static void free_init_pages(char *what, unsigned long begin, unsigned long end)
413{
414 unsigned long addr;
415
416 for (addr = begin; addr < end; addr += PAGE_SIZE) {
417 ClearPageReserved(virt_to_page(addr));
418 init_page_count(virt_to_page(addr));
419 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
420 free_page(addr);
421 totalram_pages++;
422 }
423 pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10);
424}
425
426void free_initmem(void)
427{
428 free_init_pages("unused kernel memory",
429 (unsigned long)(&__init_begin),
430 (unsigned long)(&__init_end));
431}
432
433#ifdef CONFIG_BLK_DEV_INITRD
434void free_initrd_mem(unsigned long start, unsigned long end)
435{
436 end = end & PAGE_MASK;
437 free_init_pages("initrd memory", start, end);
438}
439#endif
440
441#ifdef CONFIG_OF_FLATTREE
442void __init early_init_dt_setup_initrd_arch(unsigned long start,
443 unsigned long end)
444{
445 pr_err("%s(%lx, %lx)\n",
446 __func__, start, end);
447}
448#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c
new file mode 100644
index 000000000000..91f4255bcb5c
--- /dev/null
+++ b/arch/metag/mm/mmu-meta1.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies
3 *
4 * Meta 1 MMU handling code.
5 *
6 */
7
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/io.h>
11
12#include <asm/mmu.h>
13
14#define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3))
15
16/*
17 * This contains the physical address of the top level 2k pgd table.
18 */
19static unsigned long mmu_base_phys;
20
21/*
22 * Given a physical address, return a mapped virtual address that can be used
23 * to access that location.
24 * In practice, we use the DirectMap region to make this happen.
25 */
26static unsigned long map_addr(unsigned long phys)
27{
28 static unsigned long dm_base = 0xFFFFFFFF;
29 int offset;
30
31 offset = phys - dm_base;
32
33 /* Are we in the current map range ? */
34 if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) {
35 /* Calculate new DM area */
36 dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1);
37
38 /* Actually map it in! */
39 metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR);
40
41 /* And calculate how far into that area our reference is */
42 offset = phys - dm_base;
43 }
44
45 return DM3_BASE + offset;
46}
47
48/*
49 * Return the physical address of the base of our pgd table.
50 */
51static inline unsigned long __get_mmu_base(void)
52{
53 unsigned long base_phys;
54 unsigned int stride;
55
56 if (is_global_space(PAGE_OFFSET))
57 stride = 4;
58 else
59 stride = hard_processor_id(); /* [0..3] */
60
61 base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR);
62 base_phys += (0x800 * stride);
63
64 return base_phys;
65}
66
67/* Given a virtual address, return the virtual address of the relevant pgd */
68static unsigned long pgd_entry_addr(unsigned long virt)
69{
70 unsigned long pgd_phys;
71 unsigned long pgd_virt;
72
73 if (!mmu_base_phys)
74 mmu_base_phys = __get_mmu_base();
75
76 /*
77 * Are we trying to map a global address. If so, then index
78 * the global pgd table instead of our local one.
79 */
80 if (is_global_space(virt)) {
81 /* Scale into 2gig map */
82 virt &= ~0x80000000;
83 }
84
85 /* Base of the pgd table plus our 4Meg entry, 4bytes each */
86 pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4);
87
88 pgd_virt = map_addr(pgd_phys);
89
90 return pgd_virt;
91}
92
93/* Given a virtual address, return the virtual address of the relevant pte */
94static unsigned long pgtable_entry_addr(unsigned long virt)
95{
96 unsigned long pgtable_phys;
97 unsigned long pgtable_virt, pte_virt;
98
99 /* Find the physical address of the 4MB page table*/
100 pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS;
101
102 /* Map it to a virtual address */
103 pgtable_virt = map_addr(pgtable_phys);
104
105 /* And index into it for our pte */
106 pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4;
107
108 return pte_virt;
109}
110
111unsigned long mmu_read_first_level_page(unsigned long vaddr)
112{
113 return metag_in32(pgd_entry_addr(vaddr));
114}
115
116unsigned long mmu_read_second_level_page(unsigned long vaddr)
117{
118 return metag_in32(pgtable_entry_addr(vaddr));
119}
120
121unsigned long mmu_get_base(void)
122{
123 static unsigned long __base;
124
125 /* Find the base of our MMU pgd table */
126 if (!__base)
127 __base = pgd_entry_addr(0);
128
129 return __base;
130}
131
132void __init mmu_init(unsigned long mem_end)
133{
134 unsigned long entry, addr;
135 pgd_t *p_swapper_pg_dir;
136
137 /*
138 * Now copy over any MMU pgd entries already in the mmu page tables
139 * over to our root init process (swapper_pg_dir) map. This map is
140 * then inherited by all other processes, which means all processes
141 * inherit a map of the kernel space.
142 */
143 addr = PAGE_OFFSET;
144 entry = pgd_index(PAGE_OFFSET);
145 p_swapper_pg_dir = pgd_offset_k(0) + entry;
146
147 while (addr <= META_MEMORY_LIMIT) {
148 unsigned long pgd_entry;
149 /* copy over the current MMU value */
150 pgd_entry = mmu_read_first_level_page(addr);
151 pgd_val(*p_swapper_pg_dir) = pgd_entry;
152
153 p_swapper_pg_dir++;
154 addr += PGDIR_SIZE;
155 entry++;
156 }
157}
diff --git a/arch/metag/mm/mmu-meta2.c b/arch/metag/mm/mmu-meta2.c
new file mode 100644
index 000000000000..81dcbb0bba34
--- /dev/null
+++ b/arch/metag/mm/mmu-meta2.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
3 *
4 * Meta 2 enhanced mode MMU handling code.
5 *
6 */
7
8#include <linux/mm.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/bootmem.h>
13#include <linux/syscore_ops.h>
14
15#include <asm/mmu.h>
16#include <asm/mmu_context.h>
17
18unsigned long mmu_read_first_level_page(unsigned long vaddr)
19{
20 unsigned int cpu = hard_processor_id();
21 unsigned long offset, linear_base, linear_limit;
22 unsigned int phys0;
23 pgd_t *pgd, entry;
24
25 if (is_global_space(vaddr))
26 vaddr &= ~0x80000000;
27
28 offset = vaddr >> PGDIR_SHIFT;
29
30 phys0 = metag_in32(mmu_phys0_addr(cpu));
31
32 /* Top bit of linear base is always zero. */
33 linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
34
35 /* Limit in the range 0 (4MB) to 9 (2GB). */
36 linear_limit = 1 << ((phys0 >> 8) & 0xf);
37 linear_limit += linear_base;
38
39 /*
40 * If offset is below linear base or above the limit then no
41 * mapping exists.
42 */
43 if (offset < linear_base || offset > linear_limit)
44 return 0;
45
46 offset -= linear_base;
47 pgd = (pgd_t *)mmu_get_base();
48 entry = pgd[offset];
49
50 return pgd_val(entry);
51}
52
53unsigned long mmu_read_second_level_page(unsigned long vaddr)
54{
55 return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
56}
57
58unsigned long mmu_get_base(void)
59{
60 unsigned int cpu = hard_processor_id();
61 unsigned long stride;
62
63 stride = cpu * LINSYSMEMTnX_STRIDE;
64
65 /*
66 * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
67 * used as an offset to the start of the top-level pgd table.
68 */
69 stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
70
71 if (is_global_space(PAGE_OFFSET))
72 stride += LINSYSMEMTXG_OFFSET;
73
74 return LINSYSMEMT0L_BASE + stride;
75}
76
77#define FIRST_LEVEL_MASK 0xffffffc0
78#define SECOND_LEVEL_MASK 0xfffff000
79#define SECOND_LEVEL_ALIGN 64
80
81static void repriv_mmu_tables(void)
82{
83 unsigned long phys0_addr;
84 unsigned int g;
85
86 /*
87 * Check that all the mmu table regions are priv protected, and if not
88 * fix them and emit a warning. If we left them without priv protection
89 * then userland processes would have access to a 2M window into
90 * physical memory near where the page tables are.
91 */
92 phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
93 for (g = 0; g < 2; ++g) {
94 unsigned int t, phys0;
95 unsigned long flags;
96 for (t = 0; t < 4; ++t) {
97 __global_lock2(flags);
98 phys0 = metag_in32(phys0_addr);
99 if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
100 pr_warn("Fixing priv protection on T%d %s MMU table region\n",
101 t,
102 g ? "global" : "local");
103 phys0 |= _PAGE_PRIV;
104 metag_out32(phys0, phys0_addr);
105 }
106 __global_unlock2(flags);
107
108 phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
109 }
110
111 phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
112 - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
113 }
114}
115
116#ifdef CONFIG_METAG_SUSPEND_MEM
117static void mmu_resume(void)
118{
119 /*
120 * If a full suspend to RAM has happened then the original bad MMU table
121 * priv may have been restored, so repriv them again.
122 */
123 repriv_mmu_tables();
124}
125#else
126#define mmu_resume NULL
127#endif /* CONFIG_METAG_SUSPEND_MEM */
128
129static struct syscore_ops mmu_syscore_ops = {
130 .resume = mmu_resume,
131};
132
133void __init mmu_init(unsigned long mem_end)
134{
135 unsigned long entry, addr;
136 pgd_t *p_swapper_pg_dir;
137#ifdef CONFIG_KERNEL_4M_PAGES
138 unsigned long mem_size = mem_end - PAGE_OFFSET;
139 unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
140 unsigned int second_level_entry = 0;
141 unsigned long *second_level_table;
142#endif
143
144 /*
145 * Now copy over any MMU pgd entries already in the mmu page tables
146 * over to our root init process (swapper_pg_dir) map. This map is
147 * then inherited by all other processes, which means all processes
148 * inherit a map of the kernel space.
149 */
150 addr = META_MEMORY_BASE;
151 entry = pgd_index(META_MEMORY_BASE);
152 p_swapper_pg_dir = pgd_offset_k(0) + entry;
153
154 while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
155 unsigned long pgd_entry;
156 /* copy over the current MMU value */
157 pgd_entry = mmu_read_first_level_page(addr);
158 pgd_val(*p_swapper_pg_dir) = pgd_entry;
159
160 p_swapper_pg_dir++;
161 addr += PGDIR_SIZE;
162 entry++;
163 }
164
165#ifdef CONFIG_KERNEL_4M_PAGES
166 /*
167 * At this point we can also map the kernel with 4MB pages to
168 * reduce TLB pressure.
169 */
170 second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
171
172 addr = PAGE_OFFSET;
173 entry = pgd_index(PAGE_OFFSET);
174 p_swapper_pg_dir = pgd_offset_k(0) + entry;
175
176 while (pages > 0) {
177 unsigned long phys_addr, second_level_phys;
178 pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
179
180 phys_addr = __pa(addr);
181
182 second_level_phys = __pa(pte);
183
184 pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
185 FIRST_LEVEL_MASK) |
186 _PAGE_SZ_4M |
187 _PAGE_PRESENT);
188
189 pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
190 _PAGE_PRESENT | _PAGE_DIRTY |
191 _PAGE_ACCESSED | _PAGE_WRITE |
192 _PAGE_CACHEABLE | _PAGE_KERNEL);
193
194 p_swapper_pg_dir++;
195 addr += PGDIR_SIZE;
196 /* Second level pages must be 64byte aligned. */
197 second_level_entry += (SECOND_LEVEL_ALIGN /
198 sizeof(unsigned long));
199 pages--;
200 }
201 load_pgd(swapper_pg_dir, hard_processor_id());
202 flush_tlb_all();
203#endif
204
205 repriv_mmu_tables();
206 register_syscore_ops(&mmu_syscore_ops);
207}