diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 48 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 12 | ||||
-rw-r--r-- | arch/sh/mm/alignment.c | 189 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 7 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2.c | 12 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2a.c | 20 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh3.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 27 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 12 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/fault_32.c | 5 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 166 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c (renamed from arch/sh/mm/ioremap_32.c) | 63 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_64.c | 326 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_fixed.c | 128 | ||||
-rw-r--r-- | arch/sh/mm/nommu.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/pgtable.c | 56 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 586 | ||||
-rw-r--r-- | arch/sh/mm/tlb-pteaex.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh3.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh5.c | 39 | ||||
-rw-r--r-- | arch/sh/mm/tlb-urb.c | 81 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_32.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/uncached.c | 34 |
26 files changed, 1190 insertions, 672 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 986a71b88ca3..1445ca6257df 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -75,52 +75,25 @@ config MEMORY_SIZE | |||
75 | config 29BIT | 75 | config 29BIT |
76 | def_bool !32BIT | 76 | def_bool !32BIT |
77 | depends on SUPERH32 | 77 | depends on SUPERH32 |
78 | select UNCACHED_MAPPING | ||
78 | 79 | ||
79 | config 32BIT | 80 | config 32BIT |
80 | bool | 81 | bool |
81 | default y if CPU_SH5 | 82 | default y if CPU_SH5 |
82 | 83 | ||
83 | config PMB_ENABLE | ||
84 | bool "Support 32-bit physical addressing through PMB" | ||
85 | depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP | ||
86 | help | ||
87 | If you say Y here, physical addressing will be extended to | ||
88 | 32-bits through the SH-4A PMB. If this is not set, legacy | ||
89 | 29-bit physical addressing will be used. | ||
90 | |||
91 | choice | ||
92 | prompt "PMB handling type" | ||
93 | depends on PMB_ENABLE | ||
94 | default PMB_FIXED | ||
95 | |||
96 | config PMB | 84 | config PMB |
97 | bool "PMB" | 85 | bool "Support 32-bit physical addressing through PMB" |
98 | depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP | 86 | depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP |
87 | select 32BIT | ||
88 | select UNCACHED_MAPPING | ||
99 | help | 89 | help |
100 | If you say Y here, physical addressing will be extended to | 90 | If you say Y here, physical addressing will be extended to |
101 | 32-bits through the SH-4A PMB. If this is not set, legacy | 91 | 32-bits through the SH-4A PMB. If this is not set, legacy |
102 | 29-bit physical addressing will be used. | 92 | 29-bit physical addressing will be used. |
103 | 93 | ||
104 | config PMB_FIXED | ||
105 | bool "fixed PMB" | ||
106 | depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP | ||
107 | select 32BIT | ||
108 | help | ||
109 | If this option is enabled, fixed PMB mappings are inherited | ||
110 | from the boot loader, and the kernel does not attempt dynamic | ||
111 | management. This is the closest to legacy 29-bit physical mode, | ||
112 | and allows systems to support up to 512MiB of system memory. | ||
113 | |||
114 | endchoice | ||
115 | |||
116 | config X2TLB | 94 | config X2TLB |
117 | bool "Enable extended TLB mode" | 95 | def_bool y |
118 | depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL | 96 | depends on (CPU_SHX2 || CPU_SHX3) && MMU |
119 | help | ||
120 | Selecting this option will enable the extended mode of the SH-X2 | ||
121 | TLB. For legacy SH-X behaviour and interoperability, say N. For | ||
122 | all of the fun new features and a willingless to submit bug reports, | ||
123 | say Y. | ||
124 | 97 | ||
125 | config VSYSCALL | 98 | config VSYSCALL |
126 | bool "Support vsyscall page" | 99 | bool "Support vsyscall page" |
@@ -188,14 +161,19 @@ config ARCH_MEMORY_PROBE | |||
188 | def_bool y | 161 | def_bool y |
189 | depends on MEMORY_HOTPLUG | 162 | depends on MEMORY_HOTPLUG |
190 | 163 | ||
164 | config IOREMAP_FIXED | ||
165 | def_bool y | ||
166 | depends on X2TLB || SUPERH64 | ||
167 | |||
168 | config UNCACHED_MAPPING | ||
169 | bool | ||
170 | |||
191 | choice | 171 | choice |
192 | prompt "Kernel page size" | 172 | prompt "Kernel page size" |
193 | default PAGE_SIZE_8KB if X2TLB | ||
194 | default PAGE_SIZE_4KB | 173 | default PAGE_SIZE_4KB |
195 | 174 | ||
196 | config PAGE_SIZE_4KB | 175 | config PAGE_SIZE_4KB |
197 | bool "4kB" | 176 | bool "4kB" |
198 | depends on !MMU || !X2TLB | ||
199 | help | 177 | help |
200 | This is the default page size used by all SuperH CPUs. | 178 | This is the default page size used by all SuperH CPUs. |
201 | 179 | ||
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 8a70535fa7ce..3dc8a8a63822 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | 2 | # Makefile for the Linux SuperH-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := cache.o init.o consistent.o mmap.o | 5 | obj-y := alignment.o cache.o init.o consistent.o mmap.o |
6 | 6 | ||
7 | cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o | 7 | cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o |
8 | cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o | 8 | cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o |
@@ -15,7 +15,7 @@ obj-y += $(cacheops-y) | |||
15 | 15 | ||
16 | mmu-y := nommu.o extable_32.o | 16 | mmu-y := nommu.o extable_32.o |
17 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ | 17 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ |
18 | ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o | 18 | ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o |
19 | 19 | ||
20 | obj-y += $(mmu-y) | 20 | obj-y += $(mmu-y) |
21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | 21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o |
@@ -26,15 +26,17 @@ endif | |||
26 | 26 | ||
27 | ifdef CONFIG_MMU | 27 | ifdef CONFIG_MMU |
28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o | 28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o |
29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o | 29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o |
30 | tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o | 30 | tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o |
31 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o | 31 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o |
32 | obj-y += $(tlb-y) | 32 | obj-y += $(tlb-y) |
33 | endif | 33 | endif |
34 | 34 | ||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
36 | obj-$(CONFIG_PMB_ENABLE) += pmb.o | 36 | obj-$(CONFIG_PMB) += pmb.o |
37 | obj-$(CONFIG_NUMA) += numa.o | 37 | obj-$(CONFIG_NUMA) += numa.o |
38 | obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o | ||
39 | obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o | ||
38 | 40 | ||
39 | # Special flags for fault_64.o. This puts restrictions on the number of | 41 | # Special flags for fault_64.o. This puts restrictions on the number of |
40 | # caller-save registers that the compiler can target when building this file. | 42 | # caller-save registers that the compiler can target when building this file. |
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c new file mode 100644 index 000000000000..b2595b8548ee --- /dev/null +++ b/arch/sh/mm/alignment.c | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Alignment access counters and corresponding user-space interfaces. | ||
3 | * | ||
4 | * Copyright (C) 2009 ST Microelectronics | ||
5 | * Copyright (C) 2009 - 2010 Paul Mundt | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/seq_file.h> | ||
14 | #include <linux/proc_fs.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <asm/alignment.h> | ||
17 | #include <asm/processor.h> | ||
18 | |||
19 | static unsigned long se_user; | ||
20 | static unsigned long se_sys; | ||
21 | static unsigned long se_half; | ||
22 | static unsigned long se_word; | ||
23 | static unsigned long se_dword; | ||
24 | static unsigned long se_multi; | ||
25 | /* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not | ||
26 | valid! */ | ||
27 | static int se_usermode = UM_WARN | UM_FIXUP; | ||
28 | /* 0: no warning 1: print a warning message, disabled by default */ | ||
29 | static int se_kernmode_warn; | ||
30 | |||
31 | core_param(alignment, se_usermode, int, 0600); | ||
32 | |||
33 | void inc_unaligned_byte_access(void) | ||
34 | { | ||
35 | se_half++; | ||
36 | } | ||
37 | |||
38 | void inc_unaligned_word_access(void) | ||
39 | { | ||
40 | se_word++; | ||
41 | } | ||
42 | |||
43 | void inc_unaligned_dword_access(void) | ||
44 | { | ||
45 | se_dword++; | ||
46 | } | ||
47 | |||
48 | void inc_unaligned_multi_access(void) | ||
49 | { | ||
50 | se_multi++; | ||
51 | } | ||
52 | |||
53 | void inc_unaligned_user_access(void) | ||
54 | { | ||
55 | se_user++; | ||
56 | } | ||
57 | |||
58 | void inc_unaligned_kernel_access(void) | ||
59 | { | ||
60 | se_sys++; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * This defaults to the global policy which can be set from the command | ||
65 | * line, while processes can overload their preferences via prctl(). | ||
66 | */ | ||
67 | unsigned int unaligned_user_action(void) | ||
68 | { | ||
69 | unsigned int action = se_usermode; | ||
70 | |||
71 | if (current->thread.flags & SH_THREAD_UAC_SIGBUS) { | ||
72 | action &= ~UM_FIXUP; | ||
73 | action |= UM_SIGNAL; | ||
74 | } | ||
75 | |||
76 | if (current->thread.flags & SH_THREAD_UAC_NOPRINT) | ||
77 | action &= ~UM_WARN; | ||
78 | |||
79 | return action; | ||
80 | } | ||
81 | |||
82 | int get_unalign_ctl(struct task_struct *tsk, unsigned long addr) | ||
83 | { | ||
84 | return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK, | ||
85 | (unsigned int __user *)addr); | ||
86 | } | ||
87 | |||
88 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) | ||
89 | { | ||
90 | tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) | | ||
91 | (val & SH_THREAD_UAC_MASK); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn, | ||
96 | struct pt_regs *regs) | ||
97 | { | ||
98 | if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit()) | ||
99 | pr_notice("Fixing up unaligned userspace access " | ||
100 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
101 | tsk->comm, task_pid_nr(tsk), | ||
102 | (void *)instruction_pointer(regs), insn); | ||
103 | else if (se_kernmode_warn && printk_ratelimit()) | ||
104 | pr_notice("Fixing up unaligned kernel access " | ||
105 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
106 | tsk->comm, task_pid_nr(tsk), | ||
107 | (void *)instruction_pointer(regs), insn); | ||
108 | } | ||
109 | |||
110 | static const char *se_usermode_action[] = { | ||
111 | "ignored", | ||
112 | "warn", | ||
113 | "fixup", | ||
114 | "fixup+warn", | ||
115 | "signal", | ||
116 | "signal+warn" | ||
117 | }; | ||
118 | |||
119 | static int alignment_proc_show(struct seq_file *m, void *v) | ||
120 | { | ||
121 | seq_printf(m, "User:\t\t%lu\n", se_user); | ||
122 | seq_printf(m, "System:\t\t%lu\n", se_sys); | ||
123 | seq_printf(m, "Half:\t\t%lu\n", se_half); | ||
124 | seq_printf(m, "Word:\t\t%lu\n", se_word); | ||
125 | seq_printf(m, "DWord:\t\t%lu\n", se_dword); | ||
126 | seq_printf(m, "Multi:\t\t%lu\n", se_multi); | ||
127 | seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, | ||
128 | se_usermode_action[se_usermode]); | ||
129 | seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, | ||
130 | se_kernmode_warn ? "+warn" : ""); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int alignment_proc_open(struct inode *inode, struct file *file) | ||
135 | { | ||
136 | return single_open(file, alignment_proc_show, NULL); | ||
137 | } | ||
138 | |||
139 | static ssize_t alignment_proc_write(struct file *file, | ||
140 | const char __user *buffer, size_t count, loff_t *pos) | ||
141 | { | ||
142 | int *data = PDE(file->f_path.dentry->d_inode)->data; | ||
143 | char mode; | ||
144 | |||
145 | if (count > 0) { | ||
146 | if (get_user(mode, buffer)) | ||
147 | return -EFAULT; | ||
148 | if (mode >= '0' && mode <= '5') | ||
149 | *data = mode - '0'; | ||
150 | } | ||
151 | return count; | ||
152 | } | ||
153 | |||
154 | static const struct file_operations alignment_proc_fops = { | ||
155 | .owner = THIS_MODULE, | ||
156 | .open = alignment_proc_open, | ||
157 | .read = seq_read, | ||
158 | .llseek = seq_lseek, | ||
159 | .release = single_release, | ||
160 | .write = alignment_proc_write, | ||
161 | }; | ||
162 | |||
163 | /* | ||
164 | * This needs to be done after sysctl_init, otherwise sys/ will be | ||
165 | * overwritten. Actually, this shouldn't be in sys/ at all since | ||
166 | * it isn't a sysctl, and it doesn't contain sysctl information. | ||
167 | * We now locate it in /proc/cpu/alignment instead. | ||
168 | */ | ||
169 | static int __init alignment_init(void) | ||
170 | { | ||
171 | struct proc_dir_entry *dir, *res; | ||
172 | |||
173 | dir = proc_mkdir("cpu", NULL); | ||
174 | if (!dir) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, | ||
178 | &alignment_proc_fops, &se_usermode); | ||
179 | if (!res) | ||
180 | return -ENOMEM; | ||
181 | |||
182 | res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, | ||
183 | &alignment_proc_fops, &se_kernmode_warn); | ||
184 | if (!res) | ||
185 | return -ENOMEM; | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | fs_initcall(alignment_init); | ||
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 5ba067b26591..690ed010d002 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c | |||
@@ -22,8 +22,7 @@ enum cache_type { | |||
22 | CACHE_TYPE_UNIFIED, | 22 | CACHE_TYPE_UNIFIED, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, | 25 | static int cache_seq_show(struct seq_file *file, void *iter) |
26 | void *iter) | ||
27 | { | 26 | { |
28 | unsigned int cache_type = (unsigned int)file->private; | 27 | unsigned int cache_type = (unsigned int)file->private; |
29 | struct cache_info *cache; | 28 | struct cache_info *cache; |
@@ -37,7 +36,7 @@ static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, | |||
37 | */ | 36 | */ |
38 | jump_to_uncached(); | 37 | jump_to_uncached(); |
39 | 38 | ||
40 | ccr = ctrl_inl(CCR); | 39 | ccr = __raw_readl(CCR); |
41 | if ((ccr & CCR_CACHE_ENABLE) == 0) { | 40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { |
42 | back_to_cached(); | 41 | back_to_cached(); |
43 | 42 | ||
@@ -90,7 +89,7 @@ static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, | |||
90 | for (addr = addrstart, line = 0; | 89 | for (addr = addrstart, line = 0; |
91 | addr < addrstart + waysize; | 90 | addr < addrstart + waysize; |
92 | addr += cache->linesz, line++) { | 91 | addr += cache->linesz, line++) { |
93 | unsigned long data = ctrl_inl(addr); | 92 | unsigned long data = __raw_readl(addr); |
94 | 93 | ||
95 | /* Check the V bit, ignore invalid cachelines */ | 94 | /* Check the V bit, ignore invalid cachelines */ |
96 | if ((data & 1) == 0) | 95 | if ((data & 1) == 0) |
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index 699a71f46327..defcf719f2e8 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -28,10 +28,10 @@ static void sh2__flush_wback_region(void *start, int size) | |||
28 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0); | 28 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0); |
29 | int way; | 29 | int way; |
30 | for (way = 0; way < 4; way++) { | 30 | for (way = 0; way < 4; way++) { |
31 | unsigned long data = ctrl_inl(addr | (way << 12)); | 31 | unsigned long data = __raw_readl(addr | (way << 12)); |
32 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 32 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { |
33 | data &= ~SH_CACHE_UPDATED; | 33 | data &= ~SH_CACHE_UPDATED; |
34 | ctrl_outl(data, addr | (way << 12)); | 34 | __raw_writel(data, addr | (way << 12)); |
35 | } | 35 | } |
36 | } | 36 | } |
37 | } | 37 | } |
@@ -47,7 +47,7 @@ static void sh2__flush_purge_region(void *start, int size) | |||
47 | & ~(L1_CACHE_BYTES-1); | 47 | & ~(L1_CACHE_BYTES-1); |
48 | 48 | ||
49 | for (v = begin; v < end; v+=L1_CACHE_BYTES) | 49 | for (v = begin; v < end; v+=L1_CACHE_BYTES) |
50 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 50 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
52 | } | 52 | } |
53 | 53 | ||
@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) | |||
63 | local_irq_save(flags); | 63 | local_irq_save(flags); |
64 | jump_to_uncached(); | 64 | jump_to_uncached(); |
65 | 65 | ||
66 | ccr = ctrl_inl(CCR); | 66 | ccr = __raw_readl(CCR); |
67 | ccr |= CCR_CACHE_INVALIDATE; | 67 | ccr |= CCR_CACHE_INVALIDATE; |
68 | ctrl_outl(ccr, CCR); | 68 | __raw_writel(ccr, CCR); |
69 | 69 | ||
70 | back_to_cached(); | 70 | back_to_cached(); |
71 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
@@ -78,7 +78,7 @@ static void sh2__flush_invalidate_region(void *start, int size) | |||
78 | & ~(L1_CACHE_BYTES-1); | 78 | & ~(L1_CACHE_BYTES-1); |
79 | 79 | ||
80 | for (v = begin; v < end; v+=L1_CACHE_BYTES) | 80 | for (v = begin; v < end; v+=L1_CACHE_BYTES) |
81 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 81 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
83 | #endif | 83 | #endif |
84 | } | 84 | } |
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 975899d83564..1f51225426a2 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -32,10 +32,10 @@ static void sh2a__flush_wback_region(void *start, int size) | |||
32 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0); | 32 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0); |
33 | int way; | 33 | int way; |
34 | for (way = 0; way < 4; way++) { | 34 | for (way = 0; way < 4; way++) { |
35 | unsigned long data = ctrl_inl(addr | (way << 11)); | 35 | unsigned long data = __raw_readl(addr | (way << 11)); |
36 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 36 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { |
37 | data &= ~SH_CACHE_UPDATED; | 37 | data &= ~SH_CACHE_UPDATED; |
38 | ctrl_outl(data, addr | (way << 11)); | 38 | __raw_writel(data, addr | (way << 11)); |
39 | } | 39 | } |
40 | } | 40 | } |
41 | } | 41 | } |
@@ -58,7 +58,7 @@ static void sh2a__flush_purge_region(void *start, int size) | |||
58 | jump_to_uncached(); | 58 | jump_to_uncached(); |
59 | 59 | ||
60 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 60 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
61 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 61 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
62 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 62 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
63 | } | 63 | } |
64 | back_to_cached(); | 64 | back_to_cached(); |
@@ -78,17 +78,17 @@ static void sh2a__flush_invalidate_region(void *start, int size) | |||
78 | jump_to_uncached(); | 78 | jump_to_uncached(); |
79 | 79 | ||
80 | #ifdef CONFIG_CACHE_WRITEBACK | 80 | #ifdef CONFIG_CACHE_WRITEBACK |
81 | ctrl_outl(ctrl_inl(CCR) | CCR_OCACHE_INVALIDATE, CCR); | 81 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); |
82 | /* I-cache invalidate */ | 82 | /* I-cache invalidate */ |
83 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 83 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
84 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 84 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
85 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 85 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
86 | } | 86 | } |
87 | #else | 87 | #else |
88 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 88 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
89 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 89 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
90 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 90 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
91 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 91 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
92 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 92 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
93 | } | 93 | } |
94 | #endif | 94 | #endif |
@@ -115,14 +115,14 @@ static void sh2a_flush_icache_range(void *args) | |||
115 | int way; | 115 | int way; |
116 | /* O-Cache writeback */ | 116 | /* O-Cache writeback */ |
117 | for (way = 0; way < 4; way++) { | 117 | for (way = 0; way < 4; way++) { |
118 | unsigned long data = ctrl_inl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); | 118 | unsigned long data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); |
119 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 119 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { |
120 | data &= ~SH_CACHE_UPDATED; | 120 | data &= ~SH_CACHE_UPDATED; |
121 | ctrl_outl(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); | 121 | __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); |
122 | } | 122 | } |
123 | } | 123 | } |
124 | /* I-Cache invalidate */ | 124 | /* I-Cache invalidate */ |
125 | ctrl_outl(addr, | 125 | __raw_writel(addr, |
126 | CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); | 126 | CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); |
127 | } | 127 | } |
128 | 128 | ||
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index faef80c98134..e37523f65195 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c | |||
@@ -50,12 +50,12 @@ static void sh3__flush_wback_region(void *start, int size) | |||
50 | p = __pa(v); | 50 | p = __pa(v); |
51 | addr = addrstart | (v & current_cpu_data.dcache.entry_mask); | 51 | addr = addrstart | (v & current_cpu_data.dcache.entry_mask); |
52 | local_irq_save(flags); | 52 | local_irq_save(flags); |
53 | data = ctrl_inl(addr); | 53 | data = __raw_readl(addr); |
54 | 54 | ||
55 | if ((data & CACHE_PHYSADDR_MASK) == | 55 | if ((data & CACHE_PHYSADDR_MASK) == |
56 | (p & CACHE_PHYSADDR_MASK)) { | 56 | (p & CACHE_PHYSADDR_MASK)) { |
57 | data &= ~SH_CACHE_UPDATED; | 57 | data &= ~SH_CACHE_UPDATED; |
58 | ctrl_outl(data, addr); | 58 | __raw_writel(data, addr); |
59 | local_irq_restore(flags); | 59 | local_irq_restore(flags); |
60 | break; | 60 | break; |
61 | } | 61 | } |
@@ -86,7 +86,7 @@ static void sh3__flush_purge_region(void *start, int size) | |||
86 | data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ | 86 | data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ |
87 | addr = CACHE_OC_ADDRESS_ARRAY | | 87 | addr = CACHE_OC_ADDRESS_ARRAY | |
88 | (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; | 88 | (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; |
89 | ctrl_outl(data, addr); | 89 | __raw_writel(data, addr); |
90 | } | 90 | } |
91 | } | 91 | } |
92 | 92 | ||
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 560ddb6bc8a7..2cfae81914aa 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -36,7 +36,7 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, | |||
36 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 36 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
37 | * signal handler code and kprobes code | 37 | * signal handler code and kprobes code |
38 | */ | 38 | */ |
39 | static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) | 39 | static void sh4_flush_icache_range(void *args) |
40 | { | 40 | { |
41 | struct flusher_data *data = args; | 41 | struct flusher_data *data = args; |
42 | unsigned long start, end; | 42 | unsigned long start, end; |
@@ -109,6 +109,7 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) | |||
109 | static void sh4_flush_dcache_page(void *arg) | 109 | static void sh4_flush_dcache_page(void *arg) |
110 | { | 110 | { |
111 | struct page *page = arg; | 111 | struct page *page = arg; |
112 | unsigned long addr = (unsigned long)page_address(page); | ||
112 | #ifndef CONFIG_SMP | 113 | #ifndef CONFIG_SMP |
113 | struct address_space *mapping = page_mapping(page); | 114 | struct address_space *mapping = page_mapping(page); |
114 | 115 | ||
@@ -116,22 +117,14 @@ static void sh4_flush_dcache_page(void *arg) | |||
116 | set_bit(PG_dcache_dirty, &page->flags); | 117 | set_bit(PG_dcache_dirty, &page->flags); |
117 | else | 118 | else |
118 | #endif | 119 | #endif |
119 | { | 120 | flush_cache_one(CACHE_OC_ADDRESS_ARRAY | |
120 | unsigned long phys = page_to_phys(page); | 121 | (addr & shm_align_mask), page_to_phys(page)); |
121 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | ||
122 | int i, n; | ||
123 | |||
124 | /* Loop all the D-cache */ | ||
125 | n = boot_cpu_data.dcache.n_aliases; | ||
126 | for (i = 0; i < n; i++, addr += PAGE_SIZE) | ||
127 | flush_cache_one(addr, phys); | ||
128 | } | ||
129 | 122 | ||
130 | wmb(); | 123 | wmb(); |
131 | } | 124 | } |
132 | 125 | ||
133 | /* TODO: Selective icache invalidation through IC address array.. */ | 126 | /* TODO: Selective icache invalidation through IC address array.. */ |
134 | static void __uses_jump_to_uncached flush_icache_all(void) | 127 | static void flush_icache_all(void) |
135 | { | 128 | { |
136 | unsigned long flags, ccr; | 129 | unsigned long flags, ccr; |
137 | 130 | ||
@@ -139,9 +132,9 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
139 | jump_to_uncached(); | 132 | jump_to_uncached(); |
140 | 133 | ||
141 | /* Flush I-cache */ | 134 | /* Flush I-cache */ |
142 | ccr = ctrl_inl(CCR); | 135 | ccr = __raw_readl(CCR); |
143 | ccr |= CCR_CACHE_ICI; | 136 | ccr |= CCR_CACHE_ICI; |
144 | ctrl_outl(ccr, CCR); | 137 | __raw_writel(ccr, CCR); |
145 | 138 | ||
146 | /* | 139 | /* |
147 | * back_to_cached() will take care of the barrier for us, don't add | 140 | * back_to_cached() will take care of the barrier for us, don't add |
@@ -384,9 +377,9 @@ extern void __weak sh4__flush_region_init(void); | |||
384 | void __init sh4_cache_init(void) | 377 | void __init sh4_cache_init(void) |
385 | { | 378 | { |
386 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | 379 | printk("PVR=%08x CVR=%08x PRR=%08x\n", |
387 | ctrl_inl(CCN_PVR), | 380 | __raw_readl(CCN_PVR), |
388 | ctrl_inl(CCN_CVR), | 381 | __raw_readl(CCN_CVR), |
389 | ctrl_inl(CCN_PRR)); | 382 | __raw_readl(CCN_PRR)); |
390 | 383 | ||
391 | local_flush_icache_range = sh4_flush_icache_range; | 384 | local_flush_icache_range = sh4_flush_icache_range; |
392 | local_flush_dcache_page = sh4_flush_dcache_page; | 385 | local_flush_dcache_page = sh4_flush_dcache_page; |
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index f527fb70fce6..f498da1cce7a 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -48,10 +48,10 @@ static inline void cache_wback_all(void) | |||
48 | unsigned long data; | 48 | unsigned long data; |
49 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; | 49 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; |
50 | 50 | ||
51 | data = ctrl_inl(addr); | 51 | data = __raw_readl(addr); |
52 | 52 | ||
53 | if ((data & v) == v) | 53 | if ((data & v) == v) |
54 | ctrl_outl(data & ~v, addr); | 54 | __raw_writel(data & ~v, addr); |
55 | 55 | ||
56 | } | 56 | } |
57 | 57 | ||
@@ -78,7 +78,7 @@ static void sh7705_flush_icache_range(void *args) | |||
78 | /* | 78 | /* |
79 | * Writeback&Invalidate the D-cache of the page | 79 | * Writeback&Invalidate the D-cache of the page |
80 | */ | 80 | */ |
81 | static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | 81 | static void __flush_dcache_page(unsigned long phys) |
82 | { | 82 | { |
83 | unsigned long ways, waysize, addrstart; | 83 | unsigned long ways, waysize, addrstart; |
84 | unsigned long flags; | 84 | unsigned long flags; |
@@ -115,10 +115,10 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | |||
115 | addr += current_cpu_data.dcache.linesz) { | 115 | addr += current_cpu_data.dcache.linesz) { |
116 | unsigned long data; | 116 | unsigned long data; |
117 | 117 | ||
118 | data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); | 118 | data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); |
119 | if (data == phys) { | 119 | if (data == phys) { |
120 | data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); | 120 | data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); |
121 | ctrl_outl(data, addr); | 121 | __raw_writel(data, addr); |
122 | } | 122 | } |
123 | } | 123 | } |
124 | 124 | ||
@@ -144,7 +144,7 @@ static void sh7705_flush_dcache_page(void *arg) | |||
144 | __flush_dcache_page(__pa(page_address(page))); | 144 | __flush_dcache_page(__pa(page_address(page))); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) | 147 | static void sh7705_flush_cache_all(void *args) |
148 | { | 148 | { |
149 | unsigned long flags; | 149 | unsigned long flags; |
150 | 150 | ||
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index b8607fa7ae12..0f4095d7ac8b 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/sh/mm/cache.c | 2 | * arch/sh/mm/cache.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2002 - 2009 Paul Mundt | 5 | * Copyright (C) 2002 - 2010 Paul Mundt |
6 | * | 6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | 7 | * Released under the terms of the GNU GPL v2.0. |
8 | */ | 8 | */ |
@@ -41,8 +41,17 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, | |||
41 | int wait) | 41 | int wait) |
42 | { | 42 | { |
43 | preempt_disable(); | 43 | preempt_disable(); |
44 | smp_call_function(func, info, wait); | 44 | |
45 | /* | ||
46 | * It's possible that this gets called early on when IRQs are | ||
47 | * still disabled due to ioremapping by the boot CPU, so don't | ||
48 | * even attempt IPIs unless there are other CPUs online. | ||
49 | */ | ||
50 | if (num_online_cpus() > 1) | ||
51 | smp_call_function(func, info, wait); | ||
52 | |||
45 | func(info); | 53 | func(info); |
54 | |||
46 | preempt_enable(); | 55 | preempt_enable(); |
47 | } | 56 | } |
48 | 57 | ||
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0ad..8bf79e3b7bdd 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -53,6 +53,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |||
53 | if (!pud_present(*pud_k)) | 53 | if (!pud_present(*pud_k)) |
54 | return NULL; | 54 | return NULL; |
55 | 55 | ||
56 | if (!pud_present(*pud)) | ||
57 | set_pud(pud, *pud_k); | ||
58 | |||
56 | pmd = pmd_offset(pud, address); | 59 | pmd = pmd_offset(pud, address); |
57 | pmd_k = pmd_offset(pud_k, address); | 60 | pmd_k = pmd_offset(pud_k, address); |
58 | if (!pmd_present(*pmd_k)) | 61 | if (!pmd_present(*pmd_k)) |
@@ -371,7 +374,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
371 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 374 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); |
372 | #endif | 375 | #endif |
373 | 376 | ||
374 | update_mmu_cache(NULL, address, entry); | 377 | update_mmu_cache(NULL, address, pte); |
375 | 378 | ||
376 | return 0; | 379 | return 0; |
377 | } | 380 | } |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 432acd07e76a..68028e8f26ce 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -21,25 +21,13 @@ | |||
21 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
22 | #include <asm/sections.h> | 22 | #include <asm/sections.h> |
23 | #include <asm/cache.h> | 23 | #include <asm/cache.h> |
24 | #include <asm/sizes.h> | ||
24 | 25 | ||
25 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 26 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
26 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 27 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
27 | 28 | ||
28 | #ifdef CONFIG_SUPERH32 | ||
29 | /* | ||
30 | * Handle trivial transitions between cached and uncached | ||
31 | * segments, making use of the 1:1 mapping relationship in | ||
32 | * 512MB lowmem. | ||
33 | * | ||
34 | * This is the offset of the uncached section from its cached alias. | ||
35 | * Default value only valid in 29 bit mode, in 32bit mode will be | ||
36 | * overridden in pmb_init. | ||
37 | */ | ||
38 | unsigned long cached_to_uncached = P2SEG - P1SEG; | ||
39 | #endif | ||
40 | |||
41 | #ifdef CONFIG_MMU | 29 | #ifdef CONFIG_MMU |
42 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | 30 | static pte_t *__get_pte_phys(unsigned long addr) |
43 | { | 31 | { |
44 | pgd_t *pgd; | 32 | pgd_t *pgd; |
45 | pud_t *pud; | 33 | pud_t *pud; |
@@ -49,22 +37,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
49 | pgd = pgd_offset_k(addr); | 37 | pgd = pgd_offset_k(addr); |
50 | if (pgd_none(*pgd)) { | 38 | if (pgd_none(*pgd)) { |
51 | pgd_ERROR(*pgd); | 39 | pgd_ERROR(*pgd); |
52 | return; | 40 | return NULL; |
53 | } | 41 | } |
54 | 42 | ||
55 | pud = pud_alloc(NULL, pgd, addr); | 43 | pud = pud_alloc(NULL, pgd, addr); |
56 | if (unlikely(!pud)) { | 44 | if (unlikely(!pud)) { |
57 | pud_ERROR(*pud); | 45 | pud_ERROR(*pud); |
58 | return; | 46 | return NULL; |
59 | } | 47 | } |
60 | 48 | ||
61 | pmd = pmd_alloc(NULL, pud, addr); | 49 | pmd = pmd_alloc(NULL, pud, addr); |
62 | if (unlikely(!pmd)) { | 50 | if (unlikely(!pmd)) { |
63 | pmd_ERROR(*pmd); | 51 | pmd_ERROR(*pmd); |
64 | return; | 52 | return NULL; |
65 | } | 53 | } |
66 | 54 | ||
67 | pte = pte_offset_kernel(pmd, addr); | 55 | pte = pte_offset_kernel(pmd, addr); |
56 | return pte; | ||
57 | } | ||
58 | |||
59 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | ||
60 | { | ||
61 | pte_t *pte; | ||
62 | |||
63 | pte = __get_pte_phys(addr); | ||
68 | if (!pte_none(*pte)) { | 64 | if (!pte_none(*pte)) { |
69 | pte_ERROR(*pte); | 65 | pte_ERROR(*pte); |
70 | return; | 66 | return; |
@@ -72,23 +68,24 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
72 | 68 | ||
73 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 69 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); |
74 | local_flush_tlb_one(get_asid(), addr); | 70 | local_flush_tlb_one(get_asid(), addr); |
71 | |||
72 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
73 | tlb_wire_entry(NULL, addr, *pte); | ||
74 | } | ||
75 | |||
76 | static void clear_pte_phys(unsigned long addr, pgprot_t prot) | ||
77 | { | ||
78 | pte_t *pte; | ||
79 | |||
80 | pte = __get_pte_phys(addr); | ||
81 | |||
82 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
83 | tlb_unwire_entry(); | ||
84 | |||
85 | set_pte(pte, pfn_pte(0, __pgprot(0))); | ||
86 | local_flush_tlb_one(get_asid(), addr); | ||
75 | } | 87 | } |
76 | 88 | ||
77 | /* | ||
78 | * As a performance optimization, other platforms preserve the fixmap mapping | ||
79 | * across a context switch, we don't presently do this, but this could be done | ||
80 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | ||
81 | * of the memory mapped UTLB configuration) -- this unfortunately forces us to | ||
82 | * give up a TLB entry for each mapping we want to preserve. While this may be | ||
83 | * viable for a small number of fixmaps, it's not particularly useful for | ||
84 | * everything and needs to be carefully evaluated. (ie, we may want this for | ||
85 | * the vsyscall page). | ||
86 | * | ||
87 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | ||
88 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | ||
89 | * | ||
90 | * -- PFM. | ||
91 | */ | ||
92 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 89 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
93 | { | 90 | { |
94 | unsigned long address = __fix_to_virt(idx); | 91 | unsigned long address = __fix_to_virt(idx); |
@@ -101,6 +98,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |||
101 | set_pte_phys(address, phys, prot); | 98 | set_pte_phys(address, phys, prot); |
102 | } | 99 | } |
103 | 100 | ||
101 | void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) | ||
102 | { | ||
103 | unsigned long address = __fix_to_virt(idx); | ||
104 | |||
105 | if (idx >= __end_of_fixed_addresses) { | ||
106 | BUG(); | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | clear_pte_phys(address, prot); | ||
111 | } | ||
112 | |||
104 | void __init page_table_range_init(unsigned long start, unsigned long end, | 113 | void __init page_table_range_init(unsigned long start, unsigned long end, |
105 | pgd_t *pgd_base) | 114 | pgd_t *pgd_base) |
106 | { | 115 | { |
@@ -120,7 +129,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
120 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | 129 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
121 | pud = (pud_t *)pgd; | 130 | pud = (pud_t *)pgd; |
122 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | 131 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
132 | #ifdef __PAGETABLE_PMD_FOLDED | ||
123 | pmd = (pmd_t *)pud; | 133 | pmd = (pmd_t *)pud; |
134 | #else | ||
135 | pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
136 | pud_populate(&init_mm, pud, pmd); | ||
137 | pmd += k; | ||
138 | #endif | ||
124 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | 139 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
125 | if (pmd_none(*pmd)) { | 140 | if (pmd_none(*pmd)) { |
126 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 141 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
@@ -182,9 +197,6 @@ void __init paging_init(void) | |||
182 | } | 197 | } |
183 | 198 | ||
184 | free_area_init_nodes(max_zone_pfns); | 199 | free_area_init_nodes(max_zone_pfns); |
185 | |||
186 | /* Set up the uncached fixmap */ | ||
187 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | ||
188 | } | 200 | } |
189 | 201 | ||
190 | /* | 202 | /* |
@@ -195,6 +207,8 @@ static void __init iommu_init(void) | |||
195 | no_iommu_init(); | 207 | no_iommu_init(); |
196 | } | 208 | } |
197 | 209 | ||
210 | unsigned int mem_init_done = 0; | ||
211 | |||
198 | void __init mem_init(void) | 212 | void __init mem_init(void) |
199 | { | 213 | { |
200 | int codesize, datasize, initsize; | 214 | int codesize, datasize, initsize; |
@@ -231,6 +245,8 @@ void __init mem_init(void) | |||
231 | memset(empty_zero_page, 0, PAGE_SIZE); | 245 | memset(empty_zero_page, 0, PAGE_SIZE); |
232 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 246 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
233 | 247 | ||
248 | vsyscall_init(); | ||
249 | |||
234 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 250 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
235 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 251 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
236 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 252 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
@@ -243,8 +259,48 @@ void __init mem_init(void) | |||
243 | datasize >> 10, | 259 | datasize >> 10, |
244 | initsize >> 10); | 260 | initsize >> 10); |
245 | 261 | ||
246 | /* Initialize the vDSO */ | 262 | printk(KERN_INFO "virtual kernel memory layout:\n" |
247 | vsyscall_init(); | 263 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
264 | #ifdef CONFIG_HIGHMEM | ||
265 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
266 | #endif | ||
267 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
268 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" | ||
269 | #ifdef CONFIG_UNCACHED_MAPPING | ||
270 | " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" | ||
271 | #endif | ||
272 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
273 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
274 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | ||
275 | FIXADDR_START, FIXADDR_TOP, | ||
276 | (FIXADDR_TOP - FIXADDR_START) >> 10, | ||
277 | |||
278 | #ifdef CONFIG_HIGHMEM | ||
279 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | ||
280 | (LAST_PKMAP*PAGE_SIZE) >> 10, | ||
281 | #endif | ||
282 | |||
283 | (unsigned long)VMALLOC_START, VMALLOC_END, | ||
284 | (VMALLOC_END - VMALLOC_START) >> 20, | ||
285 | |||
286 | (unsigned long)memory_start, (unsigned long)high_memory, | ||
287 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, | ||
288 | |||
289 | #ifdef CONFIG_UNCACHED_MAPPING | ||
290 | uncached_start, uncached_end, uncached_size >> 20, | ||
291 | #endif | ||
292 | |||
293 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | ||
294 | ((unsigned long)&__init_end - | ||
295 | (unsigned long)&__init_begin) >> 10, | ||
296 | |||
297 | (unsigned long)&_etext, (unsigned long)&_edata, | ||
298 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | ||
299 | |||
300 | (unsigned long)&_text, (unsigned long)&_etext, | ||
301 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | ||
302 | |||
303 | mem_init_done = 1; | ||
248 | } | 304 | } |
249 | 305 | ||
250 | void free_initmem(void) | 306 | void free_initmem(void) |
@@ -277,35 +333,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
277 | } | 333 | } |
278 | #endif | 334 | #endif |
279 | 335 | ||
280 | #if THREAD_SHIFT < PAGE_SHIFT | ||
281 | static struct kmem_cache *thread_info_cache; | ||
282 | |||
283 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | ||
284 | { | ||
285 | struct thread_info *ti; | ||
286 | |||
287 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | ||
288 | if (unlikely(ti == NULL)) | ||
289 | return NULL; | ||
290 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
291 | memset(ti, 0, THREAD_SIZE); | ||
292 | #endif | ||
293 | return ti; | ||
294 | } | ||
295 | |||
296 | void free_thread_info(struct thread_info *ti) | ||
297 | { | ||
298 | kmem_cache_free(thread_info_cache, ti); | ||
299 | } | ||
300 | |||
301 | void thread_info_cache_init(void) | ||
302 | { | ||
303 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | ||
304 | THREAD_SIZE, 0, NULL); | ||
305 | BUG_ON(thread_info_cache == NULL); | ||
306 | } | ||
307 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | ||
308 | |||
309 | #ifdef CONFIG_MEMORY_HOTPLUG | 336 | #ifdef CONFIG_MEMORY_HOTPLUG |
310 | int arch_add_memory(int nid, u64 start, u64 size) | 337 | int arch_add_memory(int nid, u64 start, u64 size) |
311 | { | 338 | { |
@@ -336,10 +363,3 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |||
336 | #endif | 363 | #endif |
337 | 364 | ||
338 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 365 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
339 | |||
340 | #ifdef CONFIG_PMB | ||
341 | int __in_29bit_mode(void) | ||
342 | { | ||
343 | return !(ctrl_inl(PMB_PASCR) & PASCR_SE); | ||
344 | } | ||
345 | #endif /* CONFIG_PMB */ | ||
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap.c index 2141befb4f91..c68d2d7d00a9 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/ioremap.c | 2 | * arch/sh/mm/ioremap.c |
3 | * | 3 | * |
4 | * (C) Copyright 1995 1996 Linus Torvalds | ||
5 | * (C) Copyright 2005 - 2010 Paul Mundt | ||
6 | * | ||
4 | * Re-map IO memory to kernel address space so that we can access it. | 7 | * Re-map IO memory to kernel address space so that we can access it. |
5 | * This is needed for high PCI addresses that aren't mapped in the | 8 | * This is needed for high PCI addresses that aren't mapped in the |
6 | * 640k-1MB IO memory area on PC's | 9 | * 640k-1MB IO memory area on PC's |
7 | * | 10 | * |
8 | * (C) Copyright 1995 1996 Linus Torvalds | ||
9 | * (C) Copyright 2005, 2006 Paul Mundt | ||
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General | 11 | * This file is subject to the terms and conditions of the GNU General |
12 | * Public License. See the file "COPYING" in the main directory of this | 12 | * Public License. See the file "COPYING" in the main directory of this |
13 | * archive for more details. | 13 | * archive for more details. |
@@ -33,12 +33,12 @@ | |||
33 | * have to convert them into an offset in a page-aligned mapping, but the | 33 | * have to convert them into an offset in a page-aligned mapping, but the |
34 | * caller shouldn't need to know that small detail. | 34 | * caller shouldn't need to know that small detail. |
35 | */ | 35 | */ |
36 | void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, | 36 | void __iomem * __init_refok |
37 | unsigned long flags, void *caller) | 37 | __ioremap_caller(unsigned long phys_addr, unsigned long size, |
38 | pgprot_t pgprot, void *caller) | ||
38 | { | 39 | { |
39 | struct vm_struct *area; | 40 | struct vm_struct *area; |
40 | unsigned long offset, last_addr, addr, orig_addr; | 41 | unsigned long offset, last_addr, addr, orig_addr; |
41 | pgprot_t pgprot; | ||
42 | 42 | ||
43 | /* Don't allow wraparound or zero size */ | 43 | /* Don't allow wraparound or zero size */ |
44 | last_addr = phys_addr + size - 1; | 44 | last_addr = phys_addr + size - 1; |
@@ -46,18 +46,6 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
46 | return NULL; | 46 | return NULL; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * If we're in the fixed PCI memory range, mapping through page | ||
50 | * tables is not only pointless, but also fundamentally broken. | ||
51 | * Just return the physical address instead. | ||
52 | * | ||
53 | * For boards that map a small PCI memory aperture somewhere in | ||
54 | * P1/P2 space, ioremap() will already do the right thing, | ||
55 | * and we'll never get this far. | ||
56 | */ | ||
57 | if (is_pci_memory_fixed_range(phys_addr, size)) | ||
58 | return (void __iomem *)phys_addr; | ||
59 | |||
60 | /* | ||
61 | * Mappings have to be page-aligned | 49 | * Mappings have to be page-aligned |
62 | */ | 50 | */ |
63 | offset = phys_addr & ~PAGE_MASK; | 51 | offset = phys_addr & ~PAGE_MASK; |
@@ -65,6 +53,12 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
65 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 53 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
66 | 54 | ||
67 | /* | 55 | /* |
56 | * If we can't yet use the regular approach, go the fixmap route. | ||
57 | */ | ||
58 | if (!mem_init_done) | ||
59 | return ioremap_fixed(phys_addr, offset, size, pgprot); | ||
60 | |||
61 | /* | ||
68 | * Ok, go for it.. | 62 | * Ok, go for it.. |
69 | */ | 63 | */ |
70 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 64 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
@@ -84,8 +78,9 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
84 | * PMB entries are all pre-faulted. | 78 | * PMB entries are all pre-faulted. |
85 | */ | 79 | */ |
86 | if (unlikely(phys_addr >= P1SEG)) { | 80 | if (unlikely(phys_addr >= P1SEG)) { |
87 | unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); | 81 | unsigned long mapped; |
88 | 82 | ||
83 | mapped = pmb_remap(addr, phys_addr, size, pgprot); | ||
89 | if (likely(mapped)) { | 84 | if (likely(mapped)) { |
90 | addr += mapped; | 85 | addr += mapped; |
91 | phys_addr += mapped; | 86 | phys_addr += mapped; |
@@ -94,7 +89,6 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
94 | } | 89 | } |
95 | #endif | 90 | #endif |
96 | 91 | ||
97 | pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); | ||
98 | if (likely(size)) | 92 | if (likely(size)) |
99 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { | 93 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { |
100 | vunmap((void *)orig_addr); | 94 | vunmap((void *)orig_addr); |
@@ -105,15 +99,38 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
105 | } | 99 | } |
106 | EXPORT_SYMBOL(__ioremap_caller); | 100 | EXPORT_SYMBOL(__ioremap_caller); |
107 | 101 | ||
102 | /* | ||
103 | * Simple checks for non-translatable mappings. | ||
104 | */ | ||
105 | static inline int iomapping_nontranslatable(unsigned long offset) | ||
106 | { | ||
107 | #ifdef CONFIG_29BIT | ||
108 | /* | ||
109 | * In 29-bit mode this includes the fixed P1/P2 areas, as well as | ||
110 | * parts of P3. | ||
111 | */ | ||
112 | if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) | ||
113 | return 1; | ||
114 | #endif | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
108 | void __iounmap(void __iomem *addr) | 119 | void __iounmap(void __iomem *addr) |
109 | { | 120 | { |
110 | unsigned long vaddr = (unsigned long __force)addr; | 121 | unsigned long vaddr = (unsigned long __force)addr; |
111 | unsigned long seg = PXSEG(vaddr); | ||
112 | struct vm_struct *p; | 122 | struct vm_struct *p; |
113 | 123 | ||
114 | if (seg < P3SEG || vaddr >= P3_ADDR_MAX) | 124 | /* |
125 | * Nothing to do if there is no translatable mapping. | ||
126 | */ | ||
127 | if (iomapping_nontranslatable(vaddr)) | ||
115 | return; | 128 | return; |
116 | if (is_pci_memory_fixed_range(vaddr, 0)) | 129 | |
130 | /* | ||
131 | * There's no VMA if it's from an early fixed mapping. | ||
132 | */ | ||
133 | if (iounmap_fixed(addr) == 0) | ||
117 | return; | 134 | return; |
118 | 135 | ||
119 | #ifdef CONFIG_PMB | 136 | #ifdef CONFIG_PMB |
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c deleted file mode 100644 index ef434657d428..000000000000 --- a/arch/sh/mm/ioremap_64.c +++ /dev/null | |||
@@ -1,326 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/ioremap_64.c | ||
3 | * | ||
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
5 | * Copyright (C) 2003 - 2007 Paul Mundt | ||
6 | * | ||
7 | * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly | ||
8 | * derived from arch/i386/mm/ioremap.c . | ||
9 | * | ||
10 | * (C) Copyright 1995 1996 Linus Torvalds | ||
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <asm/page.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/addrspace.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | #include <asm/tlbflush.h> | ||
29 | #include <asm/mmu.h> | ||
30 | |||
31 | static struct resource shmedia_iomap = { | ||
32 | .name = "shmedia_iomap", | ||
33 | .start = IOBASE_VADDR + PAGE_SIZE, | ||
34 | .end = IOBASE_END - 1, | ||
35 | }; | ||
36 | |||
37 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, | ||
38 | unsigned long flags); | ||
39 | static void shmedia_unmapioaddr(unsigned long vaddr); | ||
40 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, | ||
41 | int sz, unsigned long flags); | ||
42 | |||
43 | /* | ||
44 | * We have the same problem as the SPARC, so lets have the same comment: | ||
45 | * Our mini-allocator... | ||
46 | * Boy this is gross! We need it because we must map I/O for | ||
47 | * timers and interrupt controller before the kmalloc is available. | ||
48 | */ | ||
49 | |||
50 | #define XNMLN 15 | ||
51 | #define XNRES 10 | ||
52 | |||
53 | struct xresource { | ||
54 | struct resource xres; /* Must be first */ | ||
55 | int xflag; /* 1 == used */ | ||
56 | char xname[XNMLN+1]; | ||
57 | }; | ||
58 | |||
59 | static struct xresource xresv[XNRES]; | ||
60 | |||
61 | static struct xresource *xres_alloc(void) | ||
62 | { | ||
63 | struct xresource *xrp; | ||
64 | int n; | ||
65 | |||
66 | xrp = xresv; | ||
67 | for (n = 0; n < XNRES; n++) { | ||
68 | if (xrp->xflag == 0) { | ||
69 | xrp->xflag = 1; | ||
70 | return xrp; | ||
71 | } | ||
72 | xrp++; | ||
73 | } | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | static void xres_free(struct xresource *xrp) | ||
78 | { | ||
79 | xrp->xflag = 0; | ||
80 | } | ||
81 | |||
82 | static struct resource *shmedia_find_resource(struct resource *root, | ||
83 | unsigned long vaddr) | ||
84 | { | ||
85 | struct resource *res; | ||
86 | |||
87 | for (res = root->child; res; res = res->sibling) | ||
88 | if (res->start <= vaddr && res->end >= vaddr) | ||
89 | return res; | ||
90 | |||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, | ||
95 | const char *name, unsigned long flags) | ||
96 | { | ||
97 | struct xresource *xres; | ||
98 | struct resource *res; | ||
99 | char *tack; | ||
100 | int tlen; | ||
101 | |||
102 | if (name == NULL) | ||
103 | name = "???"; | ||
104 | |||
105 | xres = xres_alloc(); | ||
106 | if (xres != 0) { | ||
107 | tack = xres->xname; | ||
108 | res = &xres->xres; | ||
109 | } else { | ||
110 | printk_once(KERN_NOTICE "%s: done with statics, " | ||
111 | "switching to kmalloc\n", __func__); | ||
112 | tlen = strlen(name); | ||
113 | tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL); | ||
114 | if (!tack) | ||
115 | return NULL; | ||
116 | memset(tack, 0, sizeof(struct resource)); | ||
117 | res = (struct resource *) tack; | ||
118 | tack += sizeof(struct resource); | ||
119 | } | ||
120 | |||
121 | strncpy(tack, name, XNMLN); | ||
122 | tack[XNMLN] = 0; | ||
123 | res->name = tack; | ||
124 | |||
125 | return shmedia_ioremap(res, phys, size, flags); | ||
126 | } | ||
127 | |||
128 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz, | ||
129 | unsigned long flags) | ||
130 | { | ||
131 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | ||
132 | unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; | ||
133 | unsigned long va; | ||
134 | unsigned int psz; | ||
135 | |||
136 | if (allocate_resource(&shmedia_iomap, res, round_sz, | ||
137 | shmedia_iomap.start, shmedia_iomap.end, | ||
138 | PAGE_SIZE, NULL, NULL) != 0) { | ||
139 | panic("alloc_io_res(%s): cannot occupy\n", | ||
140 | (res->name != NULL) ? res->name : "???"); | ||
141 | } | ||
142 | |||
143 | va = res->start; | ||
144 | pa &= PAGE_MASK; | ||
145 | |||
146 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
147 | |||
148 | for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { | ||
149 | shmedia_mapioaddr(pa, va, flags); | ||
150 | va += PAGE_SIZE; | ||
151 | pa += PAGE_SIZE; | ||
152 | } | ||
153 | |||
154 | return (void __iomem *)(unsigned long)(res->start + offset); | ||
155 | } | ||
156 | |||
157 | static void shmedia_free_io(struct resource *res) | ||
158 | { | ||
159 | unsigned long len = res->end - res->start + 1; | ||
160 | |||
161 | BUG_ON((len & (PAGE_SIZE - 1)) != 0); | ||
162 | |||
163 | while (len) { | ||
164 | len -= PAGE_SIZE; | ||
165 | shmedia_unmapioaddr(res->start + len); | ||
166 | } | ||
167 | |||
168 | release_resource(res); | ||
169 | } | ||
170 | |||
171 | static __init_refok void *sh64_get_page(void) | ||
172 | { | ||
173 | void *page; | ||
174 | |||
175 | if (slab_is_available()) | ||
176 | page = (void *)get_zeroed_page(GFP_KERNEL); | ||
177 | else | ||
178 | page = alloc_bootmem_pages(PAGE_SIZE); | ||
179 | |||
180 | if (!page || ((unsigned long)page & ~PAGE_MASK)) | ||
181 | panic("sh64_get_page: Out of memory already?\n"); | ||
182 | |||
183 | return page; | ||
184 | } | ||
185 | |||
186 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, | ||
187 | unsigned long flags) | ||
188 | { | ||
189 | pgd_t *pgdp; | ||
190 | pud_t *pudp; | ||
191 | pmd_t *pmdp; | ||
192 | pte_t *ptep, pte; | ||
193 | pgprot_t prot; | ||
194 | |||
195 | pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); | ||
196 | |||
197 | if (!flags) | ||
198 | flags = 1; /* 1 = CB0-1 device */ | ||
199 | |||
200 | pgdp = pgd_offset_k(va); | ||
201 | if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { | ||
202 | pudp = (pud_t *)sh64_get_page(); | ||
203 | set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE)); | ||
204 | } | ||
205 | |||
206 | pudp = pud_offset(pgdp, va); | ||
207 | if (pud_none(*pudp) || !pud_present(*pudp)) { | ||
208 | pmdp = (pmd_t *)sh64_get_page(); | ||
209 | set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE)); | ||
210 | } | ||
211 | |||
212 | pmdp = pmd_offset(pudp, va); | ||
213 | if (pmd_none(*pmdp) || !pmd_present(*pmdp)) { | ||
214 | ptep = (pte_t *)sh64_get_page(); | ||
215 | set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); | ||
216 | } | ||
217 | |||
218 | prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | | ||
219 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
220 | |||
221 | pte = pfn_pte(pa >> PAGE_SHIFT, prot); | ||
222 | ptep = pte_offset_kernel(pmdp, va); | ||
223 | |||
224 | if (!pte_none(*ptep) && | ||
225 | pte_val(*ptep) != pte_val(pte)) | ||
226 | pte_ERROR(*ptep); | ||
227 | |||
228 | set_pte(ptep, pte); | ||
229 | |||
230 | flush_tlb_kernel_range(va, PAGE_SIZE); | ||
231 | } | ||
232 | |||
233 | static void shmedia_unmapioaddr(unsigned long vaddr) | ||
234 | { | ||
235 | pgd_t *pgdp; | ||
236 | pud_t *pudp; | ||
237 | pmd_t *pmdp; | ||
238 | pte_t *ptep; | ||
239 | |||
240 | pgdp = pgd_offset_k(vaddr); | ||
241 | if (pgd_none(*pgdp) || pgd_bad(*pgdp)) | ||
242 | return; | ||
243 | |||
244 | pudp = pud_offset(pgdp, vaddr); | ||
245 | if (pud_none(*pudp) || pud_bad(*pudp)) | ||
246 | return; | ||
247 | |||
248 | pmdp = pmd_offset(pudp, vaddr); | ||
249 | if (pmd_none(*pmdp) || pmd_bad(*pmdp)) | ||
250 | return; | ||
251 | |||
252 | ptep = pte_offset_kernel(pmdp, vaddr); | ||
253 | |||
254 | if (pte_none(*ptep) || !pte_present(*ptep)) | ||
255 | return; | ||
256 | |||
257 | clear_page((void *)ptep); | ||
258 | pte_clear(&init_mm, vaddr, ptep); | ||
259 | } | ||
260 | |||
261 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, | ||
262 | unsigned long flags, void *caller) | ||
263 | { | ||
264 | char name[14]; | ||
265 | |||
266 | sprintf(name, "phys_%08x", (u32)offset); | ||
267 | return shmedia_alloc_io(offset, size, name, flags); | ||
268 | } | ||
269 | EXPORT_SYMBOL(__ioremap_caller); | ||
270 | |||
271 | void __iounmap(void __iomem *virtual) | ||
272 | { | ||
273 | unsigned long vaddr = (unsigned long)virtual & PAGE_MASK; | ||
274 | struct resource *res; | ||
275 | unsigned int psz; | ||
276 | |||
277 | res = shmedia_find_resource(&shmedia_iomap, vaddr); | ||
278 | if (!res) { | ||
279 | printk(KERN_ERR "%s: Failed to free 0x%08lx\n", | ||
280 | __func__, vaddr); | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
285 | |||
286 | shmedia_free_io(res); | ||
287 | |||
288 | if ((char *)res >= (char *)xresv && | ||
289 | (char *)res < (char *)&xresv[XNRES]) { | ||
290 | xres_free((struct xresource *)res); | ||
291 | } else { | ||
292 | kfree(res); | ||
293 | } | ||
294 | } | ||
295 | EXPORT_SYMBOL(__iounmap); | ||
296 | |||
297 | static int | ||
298 | ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | ||
299 | void *data) | ||
300 | { | ||
301 | char *p = buf, *e = buf + length; | ||
302 | struct resource *r; | ||
303 | const char *nm; | ||
304 | |||
305 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | ||
306 | if (p + 32 >= e) /* Better than nothing */ | ||
307 | break; | ||
308 | nm = r->name; | ||
309 | if (nm == NULL) | ||
310 | nm = "???"; | ||
311 | |||
312 | p += sprintf(p, "%08lx-%08lx: %s\n", | ||
313 | (unsigned long)r->start, | ||
314 | (unsigned long)r->end, nm); | ||
315 | } | ||
316 | |||
317 | return p-buf; | ||
318 | } | ||
319 | |||
320 | static int __init register_proc_onchip(void) | ||
321 | { | ||
322 | create_proc_read_entry("io_map", 0, 0, ioremap_proc_info, | ||
323 | &shmedia_iomap); | ||
324 | return 0; | ||
325 | } | ||
326 | late_initcall(register_proc_onchip); | ||
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c new file mode 100644 index 000000000000..0b78b1e20ef1 --- /dev/null +++ b/arch/sh/mm/ioremap_fixed.c | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Re-map IO memory to kernel address space so that we can access it. | ||
3 | * | ||
4 | * These functions should only be used when it is necessary to map a | ||
5 | * physical address space into the kernel address space before ioremap() | ||
6 | * can be used, e.g. early in boot before paging_init(). | ||
7 | * | ||
8 | * Copyright (C) 2009 Matt Fleming | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/bootmem.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <asm/fixmap.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/addrspace.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <asm/mmu.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | |||
28 | struct ioremap_map { | ||
29 | void __iomem *addr; | ||
30 | unsigned long size; | ||
31 | unsigned long fixmap_addr; | ||
32 | }; | ||
33 | |||
34 | static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS]; | ||
35 | |||
36 | void __init ioremap_fixed_init(void) | ||
37 | { | ||
38 | struct ioremap_map *map; | ||
39 | int i; | ||
40 | |||
41 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
42 | map = &ioremap_maps[i]; | ||
43 | map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i); | ||
44 | } | ||
45 | } | ||
46 | |||
47 | void __init __iomem * | ||
48 | ioremap_fixed(resource_size_t phys_addr, unsigned long offset, | ||
49 | unsigned long size, pgprot_t prot) | ||
50 | { | ||
51 | enum fixed_addresses idx0, idx; | ||
52 | struct ioremap_map *map; | ||
53 | unsigned int nrpages; | ||
54 | int i, slot; | ||
55 | |||
56 | slot = -1; | ||
57 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
58 | map = &ioremap_maps[i]; | ||
59 | if (!map->addr) { | ||
60 | map->size = size; | ||
61 | slot = i; | ||
62 | break; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | if (slot < 0) | ||
67 | return NULL; | ||
68 | |||
69 | /* | ||
70 | * Mappings have to fit in the FIX_IOREMAP area. | ||
71 | */ | ||
72 | nrpages = size >> PAGE_SHIFT; | ||
73 | if (nrpages > FIX_N_IOREMAPS) | ||
74 | return NULL; | ||
75 | |||
76 | /* | ||
77 | * Ok, go for it.. | ||
78 | */ | ||
79 | idx0 = FIX_IOREMAP_BEGIN + slot; | ||
80 | idx = idx0; | ||
81 | while (nrpages > 0) { | ||
82 | pgprot_val(prot) |= _PAGE_WIRED; | ||
83 | __set_fixmap(idx, phys_addr, prot); | ||
84 | phys_addr += PAGE_SIZE; | ||
85 | idx++; | ||
86 | --nrpages; | ||
87 | } | ||
88 | |||
89 | map->addr = (void __iomem *)(offset + map->fixmap_addr); | ||
90 | return map->addr; | ||
91 | } | ||
92 | |||
93 | int iounmap_fixed(void __iomem *addr) | ||
94 | { | ||
95 | enum fixed_addresses idx; | ||
96 | struct ioremap_map *map; | ||
97 | unsigned int nrpages; | ||
98 | int i, slot; | ||
99 | |||
100 | slot = -1; | ||
101 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
102 | map = &ioremap_maps[i]; | ||
103 | if (map->addr == addr) { | ||
104 | slot = i; | ||
105 | break; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * If we don't match, it's not for us. | ||
111 | */ | ||
112 | if (slot < 0) | ||
113 | return -EINVAL; | ||
114 | |||
115 | nrpages = map->size >> PAGE_SHIFT; | ||
116 | |||
117 | idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1; | ||
118 | while (nrpages > 0) { | ||
119 | __clear_fixmap(idx, __pgprot(_PAGE_WIRED)); | ||
120 | --idx; | ||
121 | --nrpages; | ||
122 | } | ||
123 | |||
124 | map->size = 0; | ||
125 | map->addr = NULL; | ||
126 | |||
127 | return 0; | ||
128 | } | ||
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index ac16c05917ef..7694f50c9034 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c | |||
@@ -94,3 +94,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
94 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 94 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
95 | { | 95 | { |
96 | } | 96 | } |
97 | |||
98 | void pgtable_cache_init(void) | ||
99 | { | ||
100 | } | ||
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c new file mode 100644 index 000000000000..6f21fb1d8726 --- /dev/null +++ b/arch/sh/mm/pgtable.c | |||
@@ -0,0 +1,56 @@ | |||
1 | #include <linux/mm.h> | ||
2 | |||
3 | #define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO | ||
4 | |||
5 | static struct kmem_cache *pgd_cachep; | ||
6 | #if PAGETABLE_LEVELS > 2 | ||
7 | static struct kmem_cache *pmd_cachep; | ||
8 | #endif | ||
9 | |||
10 | void pgd_ctor(void *x) | ||
11 | { | ||
12 | pgd_t *pgd = x; | ||
13 | |||
14 | memcpy(pgd + USER_PTRS_PER_PGD, | ||
15 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
16 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
17 | } | ||
18 | |||
19 | void pgtable_cache_init(void) | ||
20 | { | ||
21 | pgd_cachep = kmem_cache_create("pgd_cache", | ||
22 | PTRS_PER_PGD * (1<<PTE_MAGNITUDE), | ||
23 | PAGE_SIZE, SLAB_PANIC, pgd_ctor); | ||
24 | #if PAGETABLE_LEVELS > 2 | ||
25 | pmd_cachep = kmem_cache_create("pmd_cache", | ||
26 | PTRS_PER_PMD * (1<<PTE_MAGNITUDE), | ||
27 | PAGE_SIZE, SLAB_PANIC, NULL); | ||
28 | #endif | ||
29 | } | ||
30 | |||
31 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
32 | { | ||
33 | return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP); | ||
34 | } | ||
35 | |||
36 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
37 | { | ||
38 | kmem_cache_free(pgd_cachep, pgd); | ||
39 | } | ||
40 | |||
41 | #if PAGETABLE_LEVELS > 2 | ||
42 | void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | ||
43 | { | ||
44 | set_pud(pud, __pud((unsigned long)pmd)); | ||
45 | } | ||
46 | |||
47 | pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
48 | { | ||
49 | return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP); | ||
50 | } | ||
51 | |||
52 | void pmd_free(struct mm_struct *mm, pmd_t *pmd) | ||
53 | { | ||
54 | kmem_cache_free(pmd_cachep, pmd); | ||
55 | } | ||
56 | #endif /* PAGETABLE_LEVELS > 2 */ | ||
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 280f6a166035..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -3,11 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Privileged Space Mapping Buffer (PMB) Support. | 4 | * Privileged Space Mapping Buffer (PMB) Support. |
5 | * | 5 | * |
6 | * Copyright (C) 2005, 2006, 2007 Paul Mundt | 6 | * Copyright (C) 2005 - 2010 Paul Mundt |
7 | * | 7 | * Copyright (C) 2010 Matt Fleming |
8 | * P1/P2 Section mapping definitions from map32.h, which was: | ||
9 | * | ||
10 | * Copyright 2003 (c) Lineo Solutions,Inc. | ||
11 | * | 8 | * |
12 | * This file is subject to the terms and conditions of the GNU General Public | 9 | * This file is subject to the terms and conditions of the GNU General Public |
13 | * License. See the file "COPYING" in the main directory of this archive | 10 | * License. See the file "COPYING" in the main directory of this archive |
@@ -24,47 +21,67 @@ | |||
24 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
25 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
26 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/io.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/rwlock.h> | ||
27 | #include <asm/sizes.h> | ||
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | #include <asm/page.h> | ||
30 | #include <asm/mmu.h> | 32 | #include <asm/mmu.h> |
31 | #include <asm/io.h> | ||
32 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
33 | 34 | ||
34 | #define NR_PMB_ENTRIES 16 | 35 | struct pmb_entry; |
36 | |||
37 | struct pmb_entry { | ||
38 | unsigned long vpn; | ||
39 | unsigned long ppn; | ||
40 | unsigned long flags; | ||
41 | unsigned long size; | ||
35 | 42 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 43 | spinlock_t lock; |
44 | |||
45 | /* | ||
46 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | ||
47 | * PMB_NO_ENTRY to search for a free one | ||
48 | */ | ||
49 | int entry; | ||
37 | 50 | ||
51 | /* Adjacent entry link for contiguous multi-entry mappings */ | ||
52 | struct pmb_entry *link; | ||
53 | }; | ||
54 | |||
55 | static void pmb_unmap_entry(struct pmb_entry *, int depth); | ||
56 | |||
57 | static DEFINE_RWLOCK(pmb_rwlock); | ||
38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 58 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 59 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); |
40 | 60 | ||
41 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 61 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) |
42 | { | 62 | { |
43 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
44 | } | 64 | } |
45 | 65 | ||
46 | static inline unsigned long mk_pmb_addr(unsigned int entry) | 66 | static __always_inline unsigned long mk_pmb_addr(unsigned int entry) |
47 | { | 67 | { |
48 | return mk_pmb_entry(entry) | PMB_ADDR; | 68 | return mk_pmb_entry(entry) | PMB_ADDR; |
49 | } | 69 | } |
50 | 70 | ||
51 | static inline unsigned long mk_pmb_data(unsigned int entry) | 71 | static __always_inline unsigned long mk_pmb_data(unsigned int entry) |
52 | { | 72 | { |
53 | return mk_pmb_entry(entry) | PMB_DATA; | 73 | return mk_pmb_entry(entry) | PMB_DATA; |
54 | } | 74 | } |
55 | 75 | ||
56 | static int pmb_alloc_entry(void) | 76 | static int pmb_alloc_entry(void) |
57 | { | 77 | { |
58 | unsigned int pos; | 78 | int pos; |
59 | |||
60 | repeat: | ||
61 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
62 | |||
63 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
64 | return -ENOSPC; | ||
65 | 79 | ||
66 | if (test_and_set_bit(pos, &pmb_map)) | 80 | pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); |
67 | goto repeat; | 81 | if (pos >= 0 && pos < NR_PMB_ENTRIES) |
82 | __set_bit(pos, pmb_map); | ||
83 | else | ||
84 | pos = -ENOSPC; | ||
68 | 85 | ||
69 | return pos; | 86 | return pos; |
70 | } | 87 | } |
@@ -73,21 +90,34 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
73 | unsigned long flags, int entry) | 90 | unsigned long flags, int entry) |
74 | { | 91 | { |
75 | struct pmb_entry *pmbe; | 92 | struct pmb_entry *pmbe; |
93 | unsigned long irqflags; | ||
94 | void *ret = NULL; | ||
76 | int pos; | 95 | int pos; |
77 | 96 | ||
97 | write_lock_irqsave(&pmb_rwlock, irqflags); | ||
98 | |||
78 | if (entry == PMB_NO_ENTRY) { | 99 | if (entry == PMB_NO_ENTRY) { |
79 | pos = pmb_alloc_entry(); | 100 | pos = pmb_alloc_entry(); |
80 | if (pos < 0) | 101 | if (unlikely(pos < 0)) { |
81 | return ERR_PTR(pos); | 102 | ret = ERR_PTR(pos); |
103 | goto out; | ||
104 | } | ||
82 | } else { | 105 | } else { |
83 | if (test_bit(entry, &pmb_map)) | 106 | if (__test_and_set_bit(entry, pmb_map)) { |
84 | return ERR_PTR(-ENOSPC); | 107 | ret = ERR_PTR(-ENOSPC); |
108 | goto out; | ||
109 | } | ||
110 | |||
85 | pos = entry; | 111 | pos = entry; |
86 | } | 112 | } |
87 | 113 | ||
114 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
115 | |||
88 | pmbe = &pmb_entry_list[pos]; | 116 | pmbe = &pmb_entry_list[pos]; |
89 | if (!pmbe) | 117 | |
90 | return ERR_PTR(-ENOMEM); | 118 | memset(pmbe, 0, sizeof(struct pmb_entry)); |
119 | |||
120 | spin_lock_init(&pmbe->lock); | ||
91 | 121 | ||
92 | pmbe->vpn = vpn; | 122 | pmbe->vpn = vpn; |
93 | pmbe->ppn = ppn; | 123 | pmbe->ppn = ppn; |
@@ -95,101 +125,113 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
95 | pmbe->entry = pos; | 125 | pmbe->entry = pos; |
96 | 126 | ||
97 | return pmbe; | 127 | return pmbe; |
128 | |||
129 | out: | ||
130 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
131 | return ret; | ||
98 | } | 132 | } |
99 | 133 | ||
100 | static void pmb_free(struct pmb_entry *pmbe) | 134 | static void pmb_free(struct pmb_entry *pmbe) |
101 | { | 135 | { |
102 | int pos = pmbe->entry; | 136 | __clear_bit(pmbe->entry, pmb_map); |
103 | |||
104 | pmbe->vpn = 0; | ||
105 | pmbe->ppn = 0; | ||
106 | pmbe->flags = 0; | ||
107 | pmbe->entry = 0; | ||
108 | 137 | ||
109 | clear_bit(pos, &pmb_map); | 138 | pmbe->entry = PMB_NO_ENTRY; |
139 | pmbe->link = NULL; | ||
110 | } | 140 | } |
111 | 141 | ||
112 | /* | 142 | /* |
113 | * Must be in P2 for __set_pmb_entry() | 143 | * Ensure that the PMB entries match our cache configuration. |
144 | * | ||
145 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
146 | * invalid, so care must be taken to manually adjust cacheable | ||
147 | * translations. | ||
114 | */ | 148 | */ |
115 | static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 149 | static __always_inline unsigned long pmb_cache_flags(void) |
116 | unsigned long flags, int pos) | ||
117 | { | 150 | { |
118 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 151 | unsigned long flags = 0; |
119 | 152 | ||
120 | #ifdef CONFIG_CACHE_WRITETHROUGH | 153 | #if defined(CONFIG_CACHE_WRITETHROUGH) |
121 | /* | 154 | flags |= PMB_C | PMB_WT | PMB_UB; |
122 | * When we are in 32-bit address extended mode, CCR.CB becomes | 155 | #elif defined(CONFIG_CACHE_WRITEBACK) |
123 | * invalid, so care must be taken to manually adjust cacheable | 156 | flags |= PMB_C; |
124 | * translations. | ||
125 | */ | ||
126 | if (likely(flags & PMB_C)) | ||
127 | flags |= PMB_WT; | ||
128 | #endif | 157 | #endif |
129 | 158 | ||
130 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 159 | return flags; |
131 | } | 160 | } |
132 | 161 | ||
133 | static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 162 | /* |
163 | * Must be run uncached. | ||
164 | */ | ||
165 | static void __set_pmb_entry(struct pmb_entry *pmbe) | ||
134 | { | 166 | { |
135 | jump_to_uncached(); | 167 | writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); |
136 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); | 168 | writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, |
137 | back_to_cached(); | 169 | mk_pmb_data(pmbe->entry)); |
138 | } | 170 | } |
139 | 171 | ||
140 | static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 172 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
141 | { | 173 | { |
142 | unsigned int entry = pmbe->entry; | 174 | unsigned long addr, data; |
143 | unsigned long addr; | 175 | unsigned long addr_val, data_val; |
144 | 176 | ||
145 | if (unlikely(entry >= NR_PMB_ENTRIES)) | 177 | addr = mk_pmb_addr(pmbe->entry); |
146 | return; | 178 | data = mk_pmb_data(pmbe->entry); |
147 | 179 | ||
148 | jump_to_uncached(); | 180 | addr_val = __raw_readl(addr); |
181 | data_val = __raw_readl(data); | ||
149 | 182 | ||
150 | /* Clear V-bit */ | 183 | /* Clear V-bit */ |
151 | addr = mk_pmb_addr(entry); | 184 | writel_uncached(addr_val & ~PMB_V, addr); |
152 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 185 | writel_uncached(data_val & ~PMB_V, data); |
186 | } | ||
153 | 187 | ||
154 | addr = mk_pmb_data(entry); | 188 | static void set_pmb_entry(struct pmb_entry *pmbe) |
155 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 189 | { |
190 | unsigned long flags; | ||
156 | 191 | ||
157 | back_to_cached(); | 192 | spin_lock_irqsave(&pmbe->lock, flags); |
193 | __set_pmb_entry(pmbe); | ||
194 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
158 | } | 195 | } |
159 | 196 | ||
160 | |||
161 | static struct { | 197 | static struct { |
162 | unsigned long size; | 198 | unsigned long size; |
163 | int flag; | 199 | int flag; |
164 | } pmb_sizes[] = { | 200 | } pmb_sizes[] = { |
165 | { .size = 0x20000000, .flag = PMB_SZ_512M, }, | 201 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, |
166 | { .size = 0x08000000, .flag = PMB_SZ_128M, }, | 202 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, |
167 | { .size = 0x04000000, .flag = PMB_SZ_64M, }, | 203 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, |
168 | { .size = 0x01000000, .flag = PMB_SZ_16M, }, | 204 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, |
169 | }; | 205 | }; |
170 | 206 | ||
171 | long pmb_remap(unsigned long vaddr, unsigned long phys, | 207 | long pmb_remap(unsigned long vaddr, unsigned long phys, |
172 | unsigned long size, unsigned long flags) | 208 | unsigned long size, pgprot_t prot) |
173 | { | 209 | { |
174 | struct pmb_entry *pmbp, *pmbe; | 210 | struct pmb_entry *pmbp, *pmbe; |
175 | unsigned long wanted; | 211 | unsigned long wanted; |
176 | int pmb_flags, i; | 212 | int pmb_flags, i; |
177 | long err; | 213 | long err; |
214 | u64 flags; | ||
215 | |||
216 | flags = pgprot_val(prot); | ||
217 | |||
218 | pmb_flags = PMB_WT | PMB_UB; | ||
178 | 219 | ||
179 | /* Convert typical pgprot value to the PMB equivalent */ | 220 | /* Convert typical pgprot value to the PMB equivalent */ |
180 | if (flags & _PAGE_CACHABLE) { | 221 | if (flags & _PAGE_CACHABLE) { |
181 | if (flags & _PAGE_WT) | 222 | pmb_flags |= PMB_C; |
182 | pmb_flags = PMB_WT; | 223 | |
183 | else | 224 | if ((flags & _PAGE_WT) == 0) |
184 | pmb_flags = PMB_C; | 225 | pmb_flags &= ~(PMB_WT | PMB_UB); |
185 | } else | 226 | } |
186 | pmb_flags = PMB_WT | PMB_UB; | ||
187 | 227 | ||
188 | pmbp = NULL; | 228 | pmbp = NULL; |
189 | wanted = size; | 229 | wanted = size; |
190 | 230 | ||
191 | again: | 231 | again: |
192 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 232 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
233 | unsigned long flags; | ||
234 | |||
193 | if (size < pmb_sizes[i].size) | 235 | if (size < pmb_sizes[i].size) |
194 | continue; | 236 | continue; |
195 | 237 | ||
@@ -200,18 +242,25 @@ again: | |||
200 | goto out; | 242 | goto out; |
201 | } | 243 | } |
202 | 244 | ||
203 | set_pmb_entry(pmbe); | 245 | spin_lock_irqsave(&pmbe->lock, flags); |
246 | |||
247 | __set_pmb_entry(pmbe); | ||
204 | 248 | ||
205 | phys += pmb_sizes[i].size; | 249 | phys += pmb_sizes[i].size; |
206 | vaddr += pmb_sizes[i].size; | 250 | vaddr += pmb_sizes[i].size; |
207 | size -= pmb_sizes[i].size; | 251 | size -= pmb_sizes[i].size; |
208 | 252 | ||
253 | pmbe->size = pmb_sizes[i].size; | ||
254 | |||
209 | /* | 255 | /* |
210 | * Link adjacent entries that span multiple PMB entries | 256 | * Link adjacent entries that span multiple PMB entries |
211 | * for easier tear-down. | 257 | * for easier tear-down. |
212 | */ | 258 | */ |
213 | if (likely(pmbp)) | 259 | if (likely(pmbp)) { |
260 | spin_lock(&pmbp->lock); | ||
214 | pmbp->link = pmbe; | 261 | pmbp->link = pmbe; |
262 | spin_unlock(&pmbp->lock); | ||
263 | } | ||
215 | 264 | ||
216 | pmbp = pmbe; | 265 | pmbp = pmbe; |
217 | 266 | ||
@@ -221,16 +270,17 @@ again: | |||
221 | * pmb_sizes[i].size again. | 270 | * pmb_sizes[i].size again. |
222 | */ | 271 | */ |
223 | i--; | 272 | i--; |
273 | |||
274 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
224 | } | 275 | } |
225 | 276 | ||
226 | if (size >= 0x1000000) | 277 | if (size >= SZ_16M) |
227 | goto again; | 278 | goto again; |
228 | 279 | ||
229 | return wanted - size; | 280 | return wanted - size; |
230 | 281 | ||
231 | out: | 282 | out: |
232 | if (pmbp) | 283 | pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); |
233 | __pmb_unmap(pmbp); | ||
234 | 284 | ||
235 | return err; | 285 | return err; |
236 | } | 286 | } |
@@ -240,24 +290,52 @@ void pmb_unmap(unsigned long addr) | |||
240 | struct pmb_entry *pmbe = NULL; | 290 | struct pmb_entry *pmbe = NULL; |
241 | int i; | 291 | int i; |
242 | 292 | ||
293 | read_lock(&pmb_rwlock); | ||
294 | |||
243 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 295 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
244 | if (test_bit(i, &pmb_map)) { | 296 | if (test_bit(i, pmb_map)) { |
245 | pmbe = &pmb_entry_list[i]; | 297 | pmbe = &pmb_entry_list[i]; |
246 | if (pmbe->vpn == addr) | 298 | if (pmbe->vpn == addr) |
247 | break; | 299 | break; |
248 | } | 300 | } |
249 | } | 301 | } |
250 | 302 | ||
251 | if (unlikely(!pmbe)) | 303 | read_unlock(&pmb_rwlock); |
252 | return; | ||
253 | 304 | ||
254 | __pmb_unmap(pmbe); | 305 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); |
255 | } | 306 | } |
256 | 307 | ||
257 | static void __pmb_unmap(struct pmb_entry *pmbe) | 308 | static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) |
258 | { | 309 | { |
259 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); | 310 | return (b->vpn == (a->vpn + a->size)) && |
311 | (b->ppn == (a->ppn + a->size)) && | ||
312 | (b->flags == a->flags); | ||
313 | } | ||
260 | 314 | ||
315 | static bool pmb_size_valid(unsigned long size) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
320 | if (pmb_sizes[i].size == size) | ||
321 | return true; | ||
322 | |||
323 | return false; | ||
324 | } | ||
325 | |||
326 | static int pmb_size_to_flags(unsigned long size) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
331 | if (pmb_sizes[i].size == size) | ||
332 | return pmb_sizes[i].flag; | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
338 | { | ||
261 | do { | 339 | do { |
262 | struct pmb_entry *pmblink = pmbe; | 340 | struct pmb_entry *pmblink = pmbe; |
263 | 341 | ||
@@ -268,102 +346,312 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
268 | * this entry in pmb_alloc() (even if we haven't filled | 346 | * this entry in pmb_alloc() (even if we haven't filled |
269 | * it yet). | 347 | * it yet). |
270 | * | 348 | * |
271 | * Therefore, calling clear_pmb_entry() is safe as no | 349 | * Therefore, calling __clear_pmb_entry() is safe as no |
272 | * other mapping can be using that slot. | 350 | * other mapping can be using that slot. |
273 | */ | 351 | */ |
274 | clear_pmb_entry(pmbe); | 352 | __clear_pmb_entry(pmbe); |
275 | 353 | ||
276 | pmbe = pmblink->link; | 354 | pmbe = pmblink->link; |
277 | 355 | ||
278 | pmb_free(pmblink); | 356 | pmb_free(pmblink); |
279 | } while (pmbe); | 357 | } while (pmbe && --depth); |
358 | } | ||
359 | |||
360 | static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
361 | { | ||
362 | unsigned long flags; | ||
363 | |||
364 | if (unlikely(!pmbe)) | ||
365 | return; | ||
366 | |||
367 | write_lock_irqsave(&pmb_rwlock, flags); | ||
368 | __pmb_unmap_entry(pmbe, depth); | ||
369 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
370 | } | ||
371 | |||
372 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
373 | { | ||
374 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | ||
280 | } | 375 | } |
281 | 376 | ||
282 | #ifdef CONFIG_PMB | 377 | static void __init pmb_notify(void) |
283 | int __uses_jump_to_uncached pmb_init(void) | ||
284 | { | 378 | { |
285 | unsigned int i; | 379 | int i; |
286 | long size, ret; | ||
287 | 380 | ||
288 | jump_to_uncached(); | 381 | pr_info("PMB: boot mappings:\n"); |
382 | |||
383 | read_lock(&pmb_rwlock); | ||
384 | |||
385 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
386 | struct pmb_entry *pmbe; | ||
387 | |||
388 | if (!test_bit(i, pmb_map)) | ||
389 | continue; | ||
390 | |||
391 | pmbe = &pmb_entry_list[i]; | ||
392 | |||
393 | pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", | ||
394 | pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, | ||
395 | pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); | ||
396 | } | ||
397 | |||
398 | read_unlock(&pmb_rwlock); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Sync our software copy of the PMB mappings with those in hardware. The | ||
403 | * mappings in the hardware PMB were either set up by the bootloader or | ||
404 | * very early on by the kernel. | ||
405 | */ | ||
406 | static void __init pmb_synchronize(void) | ||
407 | { | ||
408 | struct pmb_entry *pmbp = NULL; | ||
409 | int i, j; | ||
289 | 410 | ||
290 | /* | 411 | /* |
291 | * Insert PMB entries for the P1 and P2 areas so that, after | 412 | * Run through the initial boot mappings, log the established |
292 | * we've switched the MMU to 32-bit mode, the semantics of P1 | 413 | * ones, and blow away anything that falls outside of the valid |
293 | * and P2 are the same as in 29-bit mode, e.g. | 414 | * PPN range. Specifically, we only care about existing mappings |
415 | * that impact the cached/uncached sections. | ||
294 | * | 416 | * |
295 | * P1 - provides a cached window onto physical memory | 417 | * Note that touching these can be a bit of a minefield; the boot |
296 | * P2 - provides an uncached window onto physical memory | 418 | * loader can establish multi-page mappings with the same caching |
419 | * attributes, so we need to ensure that we aren't modifying a | ||
420 | * mapping that we're presently executing from, or may execute | ||
421 | * from in the case of straddling page boundaries. | ||
422 | * | ||
423 | * In the future we will have to tidy up after the boot loader by | ||
424 | * jumping between the cached and uncached mappings and tearing | ||
425 | * down alternating mappings while executing from the other. | ||
297 | */ | 426 | */ |
298 | size = __MEMORY_START + __MEMORY_SIZE; | 427 | for (i = 0; i < NR_PMB_ENTRIES; i++) { |
428 | unsigned long addr, data; | ||
429 | unsigned long addr_val, data_val; | ||
430 | unsigned long ppn, vpn, flags; | ||
431 | unsigned long irqflags; | ||
432 | unsigned int size; | ||
433 | struct pmb_entry *pmbe; | ||
299 | 434 | ||
300 | ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); | 435 | addr = mk_pmb_addr(i); |
301 | BUG_ON(ret != size); | 436 | data = mk_pmb_data(i); |
302 | 437 | ||
303 | ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); | 438 | addr_val = __raw_readl(addr); |
304 | BUG_ON(ret != size); | 439 | data_val = __raw_readl(data); |
305 | 440 | ||
306 | ctrl_outl(0, PMB_IRMCR); | 441 | /* |
442 | * Skip over any bogus entries | ||
443 | */ | ||
444 | if (!(data_val & PMB_V) || !(addr_val & PMB_V)) | ||
445 | continue; | ||
307 | 446 | ||
308 | /* PMB.SE and UB[7] */ | 447 | ppn = data_val & PMB_PFN_MASK; |
309 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); | 448 | vpn = addr_val & PMB_PFN_MASK; |
310 | 449 | ||
311 | /* Flush out the TLB */ | 450 | /* |
312 | i = ctrl_inl(MMUCR); | 451 | * Only preserve in-range mappings. |
313 | i |= MMUCR_TI; | 452 | */ |
314 | ctrl_outl(i, MMUCR); | 453 | if (!pmb_ppn_in_range(ppn)) { |
454 | /* | ||
455 | * Invalidate anything out of bounds. | ||
456 | */ | ||
457 | writel_uncached(addr_val & ~PMB_V, addr); | ||
458 | writel_uncached(data_val & ~PMB_V, data); | ||
459 | continue; | ||
460 | } | ||
315 | 461 | ||
316 | back_to_cached(); | 462 | /* |
463 | * Update the caching attributes if necessary | ||
464 | */ | ||
465 | if (data_val & PMB_C) { | ||
466 | data_val &= ~PMB_CACHE_MASK; | ||
467 | data_val |= pmb_cache_flags(); | ||
317 | 468 | ||
318 | return 0; | 469 | writel_uncached(data_val, data); |
470 | } | ||
471 | |||
472 | size = data_val & PMB_SZ_MASK; | ||
473 | flags = size | (data_val & PMB_CACHE_MASK); | ||
474 | |||
475 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
476 | if (IS_ERR(pmbe)) { | ||
477 | WARN_ON_ONCE(1); | ||
478 | continue; | ||
479 | } | ||
480 | |||
481 | spin_lock_irqsave(&pmbe->lock, irqflags); | ||
482 | |||
483 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) | ||
484 | if (pmb_sizes[j].flag == size) | ||
485 | pmbe->size = pmb_sizes[j].size; | ||
486 | |||
487 | if (pmbp) { | ||
488 | spin_lock(&pmbp->lock); | ||
489 | |||
490 | /* | ||
491 | * Compare the previous entry against the current one to | ||
492 | * see if the entries span a contiguous mapping. If so, | ||
493 | * setup the entry links accordingly. Compound mappings | ||
494 | * are later coalesced. | ||
495 | */ | ||
496 | if (pmb_can_merge(pmbp, pmbe)) | ||
497 | pmbp->link = pmbe; | ||
498 | |||
499 | spin_unlock(&pmbp->lock); | ||
500 | } | ||
501 | |||
502 | pmbp = pmbe; | ||
503 | |||
504 | spin_unlock_irqrestore(&pmbe->lock, irqflags); | ||
505 | } | ||
506 | } | ||
507 | |||
508 | static void __init pmb_merge(struct pmb_entry *head) | ||
509 | { | ||
510 | unsigned long span, newsize; | ||
511 | struct pmb_entry *tail; | ||
512 | int i = 1, depth = 0; | ||
513 | |||
514 | span = newsize = head->size; | ||
515 | |||
516 | tail = head->link; | ||
517 | while (tail) { | ||
518 | span += tail->size; | ||
519 | |||
520 | if (pmb_size_valid(span)) { | ||
521 | newsize = span; | ||
522 | depth = i; | ||
523 | } | ||
524 | |||
525 | /* This is the end of the line.. */ | ||
526 | if (!tail->link) | ||
527 | break; | ||
528 | |||
529 | tail = tail->link; | ||
530 | i++; | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * The merged page size must be valid. | ||
535 | */ | ||
536 | if (!pmb_size_valid(newsize)) | ||
537 | return; | ||
538 | |||
539 | head->flags &= ~PMB_SZ_MASK; | ||
540 | head->flags |= pmb_size_to_flags(newsize); | ||
541 | |||
542 | head->size = newsize; | ||
543 | |||
544 | __pmb_unmap_entry(head->link, depth); | ||
545 | __set_pmb_entry(head); | ||
319 | } | 546 | } |
320 | #else | 547 | |
321 | int __uses_jump_to_uncached pmb_init(void) | 548 | static void __init pmb_coalesce(void) |
322 | { | 549 | { |
550 | unsigned long flags; | ||
323 | int i; | 551 | int i; |
324 | unsigned long addr, data; | ||
325 | 552 | ||
326 | jump_to_uncached(); | 553 | write_lock_irqsave(&pmb_rwlock, flags); |
327 | 554 | ||
328 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | 555 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
329 | struct pmb_entry *pmbe; | 556 | struct pmb_entry *pmbe; |
330 | unsigned long vpn, ppn, flags; | ||
331 | 557 | ||
332 | addr = PMB_DATA + (i << PMB_E_SHIFT); | 558 | if (!test_bit(i, pmb_map)) |
333 | data = ctrl_inl(addr); | ||
334 | if (!(data & PMB_V)) | ||
335 | continue; | 559 | continue; |
336 | 560 | ||
337 | if (data & PMB_C) { | 561 | pmbe = &pmb_entry_list[i]; |
338 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
339 | data |= PMB_WT; | ||
340 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
341 | data &= ~PMB_WT; | ||
342 | #else | ||
343 | data &= ~(PMB_C | PMB_WT); | ||
344 | #endif | ||
345 | } | ||
346 | ctrl_outl(data, addr); | ||
347 | 562 | ||
348 | ppn = data & PMB_PFN_MASK; | 563 | /* |
564 | * We're only interested in compound mappings | ||
565 | */ | ||
566 | if (!pmbe->link) | ||
567 | continue; | ||
349 | 568 | ||
350 | flags = data & (PMB_C | PMB_WT | PMB_UB); | 569 | /* |
351 | flags |= data & PMB_SZ_MASK; | 570 | * Nothing to do if it already uses the largest possible |
571 | * page size. | ||
572 | */ | ||
573 | if (pmbe->size == SZ_512M) | ||
574 | continue; | ||
352 | 575 | ||
353 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | 576 | pmb_merge(pmbe); |
354 | data = ctrl_inl(addr); | 577 | } |
355 | 578 | ||
356 | vpn = data & PMB_PFN_MASK; | 579 | write_unlock_irqrestore(&pmb_rwlock, flags); |
580 | } | ||
357 | 581 | ||
358 | pmbe = pmb_alloc(vpn, ppn, flags, i); | 582 | #ifdef CONFIG_UNCACHED_MAPPING |
359 | WARN_ON(IS_ERR(pmbe)); | 583 | static void __init pmb_resize(void) |
584 | { | ||
585 | int i; | ||
586 | |||
587 | /* | ||
588 | * If the uncached mapping was constructed by the kernel, it will | ||
589 | * already be a reasonable size. | ||
590 | */ | ||
591 | if (uncached_size == SZ_16M) | ||
592 | return; | ||
593 | |||
594 | read_lock(&pmb_rwlock); | ||
595 | |||
596 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
597 | struct pmb_entry *pmbe; | ||
598 | unsigned long flags; | ||
599 | |||
600 | if (!test_bit(i, pmb_map)) | ||
601 | continue; | ||
602 | |||
603 | pmbe = &pmb_entry_list[i]; | ||
604 | |||
605 | if (pmbe->vpn != uncached_start) | ||
606 | continue; | ||
607 | |||
608 | /* | ||
609 | * Found it, now resize it. | ||
610 | */ | ||
611 | spin_lock_irqsave(&pmbe->lock, flags); | ||
612 | |||
613 | pmbe->size = SZ_16M; | ||
614 | pmbe->flags &= ~PMB_SZ_MASK; | ||
615 | pmbe->flags |= pmb_size_to_flags(pmbe->size); | ||
616 | |||
617 | uncached_resize(pmbe->size); | ||
618 | |||
619 | __set_pmb_entry(pmbe); | ||
620 | |||
621 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
360 | } | 622 | } |
361 | 623 | ||
362 | back_to_cached(); | 624 | read_lock(&pmb_rwlock); |
625 | } | ||
626 | #endif | ||
627 | |||
628 | void __init pmb_init(void) | ||
629 | { | ||
630 | /* Synchronize software state */ | ||
631 | pmb_synchronize(); | ||
363 | 632 | ||
364 | return 0; | 633 | /* Attempt to combine compound mappings */ |
634 | pmb_coalesce(); | ||
635 | |||
636 | #ifdef CONFIG_UNCACHED_MAPPING | ||
637 | /* Resize initial mappings, if necessary */ | ||
638 | pmb_resize(); | ||
639 | #endif | ||
640 | |||
641 | /* Log them */ | ||
642 | pmb_notify(); | ||
643 | |||
644 | writel_uncached(0, PMB_IRMCR); | ||
645 | |||
646 | /* Flush out the TLB */ | ||
647 | __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); | ||
648 | ctrl_barrier(); | ||
649 | } | ||
650 | |||
651 | bool __in_29bit_mode(void) | ||
652 | { | ||
653 | return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; | ||
365 | } | 654 | } |
366 | #endif /* CONFIG_PMB */ | ||
367 | 655 | ||
368 | static int pmb_seq_show(struct seq_file *file, void *iter) | 656 | static int pmb_seq_show(struct seq_file *file, void *iter) |
369 | { | 657 | { |
@@ -378,8 +666,8 @@ static int pmb_seq_show(struct seq_file *file, void *iter) | |||
378 | unsigned int size; | 666 | unsigned int size; |
379 | char *sz_str = NULL; | 667 | char *sz_str = NULL; |
380 | 668 | ||
381 | addr = ctrl_inl(mk_pmb_addr(i)); | 669 | addr = __raw_readl(mk_pmb_addr(i)); |
382 | data = ctrl_inl(mk_pmb_data(i)); | 670 | data = __raw_readl(mk_pmb_data(i)); |
383 | 671 | ||
384 | size = data & PMB_SZ_MASK; | 672 | size = data & PMB_SZ_MASK; |
385 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | 673 | sz_str = (size == PMB_SZ_16M) ? " 16MB": |
@@ -437,14 +725,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | |||
437 | if (state.event == PM_EVENT_ON && | 725 | if (state.event == PM_EVENT_ON && |
438 | prev_state.event == PM_EVENT_FREEZE) { | 726 | prev_state.event == PM_EVENT_FREEZE) { |
439 | struct pmb_entry *pmbe; | 727 | struct pmb_entry *pmbe; |
728 | |||
729 | read_lock(&pmb_rwlock); | ||
730 | |||
440 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 731 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
441 | if (test_bit(i, &pmb_map)) { | 732 | if (test_bit(i, pmb_map)) { |
442 | pmbe = &pmb_entry_list[i]; | 733 | pmbe = &pmb_entry_list[i]; |
443 | set_pmb_entry(pmbe); | 734 | set_pmb_entry(pmbe); |
444 | } | 735 | } |
445 | } | 736 | } |
737 | |||
738 | read_unlock(&pmb_rwlock); | ||
446 | } | 739 | } |
740 | |||
447 | prev_state = state; | 741 | prev_state = state; |
742 | |||
448 | return 0; | 743 | return 0; |
449 | } | 744 | } |
450 | 745 | ||
@@ -462,6 +757,5 @@ static int __init pmb_sysdev_init(void) | |||
462 | { | 757 | { |
463 | return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); | 758 | return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); |
464 | } | 759 | } |
465 | |||
466 | subsys_initcall(pmb_sysdev_init); | 760 | subsys_initcall(pmb_sysdev_init); |
467 | #endif | 761 | #endif |
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 409b7c2b4b9d..32dc674c550c 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c | |||
@@ -68,8 +68,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
68 | * in extended mode, the legacy 8-bit ASID field in address array 1 has | 68 | * in extended mode, the legacy 8-bit ASID field in address array 1 has |
69 | * undefined behaviour. | 69 | * undefined behaviour. |
70 | */ | 70 | */ |
71 | void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | 71 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
72 | unsigned long page) | ||
73 | { | 72 | { |
74 | jump_to_uncached(); | 73 | jump_to_uncached(); |
75 | __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); | 74 | __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); |
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index ace8e6d2f59d..4f5f7cbdd508 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c | |||
@@ -41,14 +41,14 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
41 | 41 | ||
42 | /* Set PTEH register */ | 42 | /* Set PTEH register */ |
43 | vpn = (address & MMU_VPN_MASK) | get_asid(); | 43 | vpn = (address & MMU_VPN_MASK) | get_asid(); |
44 | ctrl_outl(vpn, MMU_PTEH); | 44 | __raw_writel(vpn, MMU_PTEH); |
45 | 45 | ||
46 | pteval = pte_val(pte); | 46 | pteval = pte_val(pte); |
47 | 47 | ||
48 | /* Set PTEL register */ | 48 | /* Set PTEL register */ |
49 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | 49 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ |
50 | /* conveniently, we want all the software flags to be 0 anyway */ | 50 | /* conveniently, we want all the software flags to be 0 anyway */ |
51 | ctrl_outl(pteval, MMU_PTEL); | 51 | __raw_writel(pteval, MMU_PTEL); |
52 | 52 | ||
53 | /* Load the TLB */ | 53 | /* Load the TLB */ |
54 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | 54 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); |
@@ -75,5 +75,5 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
75 | } | 75 | } |
76 | 76 | ||
77 | for (i = 0; i < ways; i++) | 77 | for (i = 0; i < ways; i++) |
78 | ctrl_outl(data, addr + (i << 8)); | 78 | __raw_writel(data, addr + (i << 8)); |
79 | } | 79 | } |
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 8cf550e2570f..ccac77f504a8 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -29,7 +29,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
29 | 29 | ||
30 | /* Set PTEH register */ | 30 | /* Set PTEH register */ |
31 | vpn = (address & MMU_VPN_MASK) | get_asid(); | 31 | vpn = (address & MMU_VPN_MASK) | get_asid(); |
32 | ctrl_outl(vpn, MMU_PTEH); | 32 | __raw_writel(vpn, MMU_PTEH); |
33 | 33 | ||
34 | pteval = pte.pte_low; | 34 | pteval = pte.pte_low; |
35 | 35 | ||
@@ -41,13 +41,13 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
41 | * the protection bits (with the exception of the compat-mode SZ | 41 | * the protection bits (with the exception of the compat-mode SZ |
42 | * and PR bits, which are cleared) being written out in PTEL. | 42 | * and PR bits, which are cleared) being written out in PTEL. |
43 | */ | 43 | */ |
44 | ctrl_outl(pte.pte_high, MMU_PTEA); | 44 | __raw_writel(pte.pte_high, MMU_PTEA); |
45 | #else | 45 | #else |
46 | if (cpu_data->flags & CPU_HAS_PTEA) { | 46 | if (cpu_data->flags & CPU_HAS_PTEA) { |
47 | /* The last 3 bits and the first one of pteval contains | 47 | /* The last 3 bits and the first one of pteval contains |
48 | * the PTEA timing control and space attribute bits | 48 | * the PTEA timing control and space attribute bits |
49 | */ | 49 | */ |
50 | ctrl_outl(copy_ptea_attributes(pteval), MMU_PTEA); | 50 | __raw_writel(copy_ptea_attributes(pteval), MMU_PTEA); |
51 | } | 51 | } |
52 | #endif | 52 | #endif |
53 | 53 | ||
@@ -57,15 +57,14 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
57 | pteval |= _PAGE_WT; | 57 | pteval |= _PAGE_WT; |
58 | #endif | 58 | #endif |
59 | /* conveniently, we want all the software flags to be 0 anyway */ | 59 | /* conveniently, we want all the software flags to be 0 anyway */ |
60 | ctrl_outl(pteval, MMU_PTEL); | 60 | __raw_writel(pteval, MMU_PTEL); |
61 | 61 | ||
62 | /* Load the TLB */ | 62 | /* Load the TLB */ |
63 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | 63 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); |
64 | local_irq_restore(flags); | 64 | local_irq_restore(flags); |
65 | } | 65 | } |
66 | 66 | ||
67 | void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | 67 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
68 | unsigned long page) | ||
69 | { | 68 | { |
70 | unsigned long addr, data; | 69 | unsigned long addr, data; |
71 | 70 | ||
@@ -78,6 +77,6 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | |||
78 | addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT; | 77 | addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT; |
79 | data = page | asid; /* VALID bit is off */ | 78 | data = page | asid; /* VALID bit is off */ |
80 | jump_to_uncached(); | 79 | jump_to_uncached(); |
81 | ctrl_outl(data, addr); | 80 | __raw_writel(data, addr); |
82 | back_to_cached(); | 81 | back_to_cached(); |
83 | } | 82 | } |
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index fdb64e41ec50..f27dbe1c1599 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c | |||
@@ -143,3 +143,42 @@ void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, | |||
143 | */ | 143 | */ |
144 | void sh64_teardown_tlb_slot(unsigned long long config_addr) | 144 | void sh64_teardown_tlb_slot(unsigned long long config_addr) |
145 | __attribute__ ((alias("__flush_tlb_slot"))); | 145 | __attribute__ ((alias("__flush_tlb_slot"))); |
146 | |||
147 | static int dtlb_entry; | ||
148 | static unsigned long long dtlb_entries[64]; | ||
149 | |||
150 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
151 | { | ||
152 | unsigned long long entry; | ||
153 | unsigned long paddr, flags; | ||
154 | |||
155 | BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries)); | ||
156 | |||
157 | local_irq_save(flags); | ||
158 | |||
159 | entry = sh64_get_wired_dtlb_entry(); | ||
160 | dtlb_entries[dtlb_entry++] = entry; | ||
161 | |||
162 | paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK; | ||
163 | paddr &= ~PAGE_MASK; | ||
164 | |||
165 | sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); | ||
166 | |||
167 | local_irq_restore(flags); | ||
168 | } | ||
169 | |||
170 | void tlb_unwire_entry(void) | ||
171 | { | ||
172 | unsigned long long entry; | ||
173 | unsigned long flags; | ||
174 | |||
175 | BUG_ON(!dtlb_entry); | ||
176 | |||
177 | local_irq_save(flags); | ||
178 | entry = dtlb_entries[dtlb_entry--]; | ||
179 | |||
180 | sh64_teardown_tlb_slot(entry); | ||
181 | sh64_put_wired_dtlb_entry(entry); | ||
182 | |||
183 | local_irq_restore(flags); | ||
184 | } | ||
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c new file mode 100644 index 000000000000..bb5b9098956d --- /dev/null +++ b/arch/sh/mm/tlb-urb.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/tlb-urb.c | ||
3 | * | ||
4 | * TLB entry wiring helpers for URB-equipped parts. | ||
5 | * | ||
6 | * Copyright (C) 2010 Matt Fleming | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <asm/tlb.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | |||
17 | /* | ||
18 | * Load the entry for 'addr' into the TLB and wire the entry. | ||
19 | */ | ||
20 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
21 | { | ||
22 | unsigned long status, flags; | ||
23 | int urb; | ||
24 | |||
25 | local_irq_save(flags); | ||
26 | |||
27 | /* Load the entry into the TLB */ | ||
28 | __update_tlb(vma, addr, pte); | ||
29 | |||
30 | /* ... and wire it up. */ | ||
31 | status = __raw_readl(MMUCR); | ||
32 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
33 | status &= ~MMUCR_URB; | ||
34 | |||
35 | /* | ||
36 | * Make sure we're not trying to wire the last TLB entry slot. | ||
37 | */ | ||
38 | BUG_ON(!--urb); | ||
39 | |||
40 | urb = urb % MMUCR_URB_NENTRIES; | ||
41 | |||
42 | status |= (urb << MMUCR_URB_SHIFT); | ||
43 | __raw_writel(status, MMUCR); | ||
44 | ctrl_barrier(); | ||
45 | |||
46 | local_irq_restore(flags); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Unwire the last wired TLB entry. | ||
51 | * | ||
52 | * It should also be noted that it is not possible to wire and unwire | ||
53 | * TLB entries in an arbitrary order. If you wire TLB entry N, followed | ||
54 | * by entry N+1, you must unwire entry N+1 first, then entry N. In this | ||
55 | * respect, it works like a stack or LIFO queue. | ||
56 | */ | ||
57 | void tlb_unwire_entry(void) | ||
58 | { | ||
59 | unsigned long status, flags; | ||
60 | int urb; | ||
61 | |||
62 | local_irq_save(flags); | ||
63 | |||
64 | status = __raw_readl(MMUCR); | ||
65 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
66 | status &= ~MMUCR_URB; | ||
67 | |||
68 | /* | ||
69 | * Make sure we're not trying to unwire a TLB entry when none | ||
70 | * have been wired. | ||
71 | */ | ||
72 | BUG_ON(urb++ == MMUCR_URB_NENTRIES); | ||
73 | |||
74 | urb = urb % MMUCR_URB_NENTRIES; | ||
75 | |||
76 | status |= (urb << MMUCR_URB_SHIFT); | ||
77 | __raw_writel(status, MMUCR); | ||
78 | ctrl_barrier(); | ||
79 | |||
80 | local_irq_restore(flags); | ||
81 | } | ||
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c index 6f45c1f8a7fe..004bb3f25b5f 100644 --- a/arch/sh/mm/tlbflush_32.c +++ b/arch/sh/mm/tlbflush_32.c | |||
@@ -132,9 +132,9 @@ void local_flush_tlb_all(void) | |||
132 | * It's same position, bit #2. | 132 | * It's same position, bit #2. |
133 | */ | 133 | */ |
134 | local_irq_save(flags); | 134 | local_irq_save(flags); |
135 | status = ctrl_inl(MMUCR); | 135 | status = __raw_readl(MMUCR); |
136 | status |= 0x04; | 136 | status |= 0x04; |
137 | ctrl_outl(status, MMUCR); | 137 | __raw_writel(status, MMUCR); |
138 | ctrl_barrier(); | 138 | ctrl_barrier(); |
139 | local_irq_restore(flags); | 139 | local_irq_restore(flags); |
140 | } | 140 | } |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index de0b0e881823..706da1d3a67a 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -36,7 +36,7 @@ extern void die(const char *,struct pt_regs *,long); | |||
36 | 36 | ||
37 | static inline void print_prots(pgprot_t prot) | 37 | static inline void print_prots(pgprot_t prot) |
38 | { | 38 | { |
39 | printk("prot is 0x%08lx\n",pgprot_val(prot)); | 39 | printk("prot is 0x%016llx\n",pgprot_val(prot)); |
40 | 40 | ||
41 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), | 41 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), |
42 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); | 42 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); |
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c new file mode 100644 index 000000000000..cf20a5c5136a --- /dev/null +++ b/arch/sh/mm/uncached.c | |||
@@ -0,0 +1,34 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <asm/sizes.h> | ||
3 | #include <asm/page.h> | ||
4 | |||
5 | /* | ||
6 | * This is the offset of the uncached section from its cached alias. | ||
7 | * | ||
8 | * Legacy platforms handle trivial transitions between cached and | ||
9 | * uncached segments by making use of the 1:1 mapping relationship in | ||
10 | * 512MB lowmem, others via a special uncached mapping. | ||
11 | * | ||
12 | * Default value only valid in 29 bit mode, in 32bit mode this will be | ||
13 | * updated by the early PMB initialization code. | ||
14 | */ | ||
15 | unsigned long cached_to_uncached = SZ_512M; | ||
16 | unsigned long uncached_size = SZ_512M; | ||
17 | unsigned long uncached_start, uncached_end; | ||
18 | |||
19 | int virt_addr_uncached(unsigned long kaddr) | ||
20 | { | ||
21 | return (kaddr >= uncached_start) && (kaddr < uncached_end); | ||
22 | } | ||
23 | |||
24 | void __init uncached_init(void) | ||
25 | { | ||
26 | uncached_start = memory_end; | ||
27 | uncached_end = uncached_start + uncached_size; | ||
28 | } | ||
29 | |||
30 | void __init uncached_resize(unsigned long size) | ||
31 | { | ||
32 | uncached_size = size; | ||
33 | uncached_end = uncached_start + uncached_size; | ||
34 | } | ||