diff options
Diffstat (limited to 'arch/mips/mm')
| -rw-r--r-- | arch/mips/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/mips/mm/c-octeon.c | 307 | ||||
| -rw-r--r-- | arch/mips/mm/cache.c | 6 | ||||
| -rw-r--r-- | arch/mips/mm/cex-oct.S | 70 | ||||
| -rw-r--r-- | arch/mips/mm/dma-default.c | 25 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-r4k.c | 5 | ||||
| -rw-r--r-- | arch/mips/mm/tlbex.c | 1 |
7 files changed, 399 insertions, 16 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 95ba32b5b720..d7ec95522292 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile | |||
| @@ -27,6 +27,7 @@ obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o | |||
| 27 | obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o | 27 | obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o |
| 28 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o | 28 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o |
| 29 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o | 29 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o |
| 30 | obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o | ||
| 30 | 31 | ||
| 31 | obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o | 32 | obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o |
| 32 | obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o | 33 | obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o |
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c new file mode 100644 index 000000000000..44d01a0a8490 --- /dev/null +++ b/arch/mips/mm/c-octeon.c | |||
| @@ -0,0 +1,307 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2005-2007 Cavium Networks | ||
| 7 | */ | ||
| 8 | #include <linux/init.h> | ||
| 9 | #include <linux/kernel.h> | ||
| 10 | #include <linux/sched.h> | ||
| 11 | #include <linux/mm.h> | ||
| 12 | #include <linux/bitops.h> | ||
| 13 | #include <linux/cpu.h> | ||
| 14 | #include <linux/io.h> | ||
| 15 | |||
| 16 | #include <asm/bcache.h> | ||
| 17 | #include <asm/bootinfo.h> | ||
| 18 | #include <asm/cacheops.h> | ||
| 19 | #include <asm/cpu-features.h> | ||
| 20 | #include <asm/page.h> | ||
| 21 | #include <asm/pgtable.h> | ||
| 22 | #include <asm/r4kcache.h> | ||
| 23 | #include <asm/system.h> | ||
| 24 | #include <asm/mmu_context.h> | ||
| 25 | #include <asm/war.h> | ||
| 26 | |||
| 27 | #include <asm/octeon/octeon.h> | ||
| 28 | |||
| 29 | unsigned long long cache_err_dcache[NR_CPUS]; | ||
| 30 | |||
| 31 | /** | ||
| 32 | * Octeon automatically flushes the dcache on tlb changes, so | ||
| 33 | * from Linux's viewpoint it acts much like a physically | ||
| 34 | * tagged cache. No flushing is needed | ||
| 35 | * | ||
| 36 | */ | ||
| 37 | static void octeon_flush_data_cache_page(unsigned long addr) | ||
| 38 | { | ||
| 39 | /* Nothing to do */ | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline void octeon_local_flush_icache(void) | ||
| 43 | { | ||
| 44 | asm volatile ("synci 0($0)"); | ||
| 45 | } | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Flush local I-cache for the specified range. | ||
| 49 | */ | ||
| 50 | static void local_octeon_flush_icache_range(unsigned long start, | ||
| 51 | unsigned long end) | ||
| 52 | { | ||
| 53 | octeon_local_flush_icache(); | ||
| 54 | } | ||
| 55 | |||
| 56 | /** | ||
| 57 | * Flush caches as necessary for all cores affected by a | ||
| 58 | * vma. If no vma is supplied, all cores are flushed. | ||
| 59 | * | ||
| 60 | * @vma: VMA to flush or NULL to flush all icaches. | ||
| 61 | */ | ||
| 62 | static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) | ||
| 63 | { | ||
| 64 | extern void octeon_send_ipi_single(int cpu, unsigned int action); | ||
| 65 | #ifdef CONFIG_SMP | ||
| 66 | int cpu; | ||
| 67 | cpumask_t mask; | ||
| 68 | #endif | ||
| 69 | |||
| 70 | mb(); | ||
| 71 | octeon_local_flush_icache(); | ||
| 72 | #ifdef CONFIG_SMP | ||
| 73 | preempt_disable(); | ||
| 74 | cpu = smp_processor_id(); | ||
| 75 | |||
| 76 | /* | ||
| 77 | * If we have a vma structure, we only need to worry about | ||
| 78 | * cores it has been used on | ||
| 79 | */ | ||
| 80 | if (vma) | ||
| 81 | mask = vma->vm_mm->cpu_vm_mask; | ||
| 82 | else | ||
| 83 | mask = cpu_online_map; | ||
| 84 | cpu_clear(cpu, mask); | ||
| 85 | for_each_cpu_mask(cpu, mask) | ||
| 86 | octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); | ||
| 87 | |||
| 88 | preempt_enable(); | ||
| 89 | #endif | ||
| 90 | } | ||
| 91 | |||
| 92 | |||
| 93 | /** | ||
| 94 | * Called to flush the icache on all cores | ||
| 95 | */ | ||
| 96 | static void octeon_flush_icache_all(void) | ||
| 97 | { | ||
| 98 | octeon_flush_icache_all_cores(NULL); | ||
| 99 | } | ||
| 100 | |||
| 101 | |||
| 102 | /** | ||
| 103 | * Called to flush all memory associated with a memory | ||
| 104 | * context. | ||
| 105 | * | ||
| 106 | * @mm: Memory context to flush | ||
| 107 | */ | ||
| 108 | static void octeon_flush_cache_mm(struct mm_struct *mm) | ||
| 109 | { | ||
| 110 | /* | ||
| 111 | * According to the R4K version of this file, CPUs without | ||
| 112 | * dcache aliases don't need to do anything here | ||
| 113 | */ | ||
| 114 | } | ||
| 115 | |||
| 116 | |||
| 117 | /** | ||
| 118 | * Flush a range of kernel addresses out of the icache | ||
| 119 | * | ||
| 120 | */ | ||
| 121 | static void octeon_flush_icache_range(unsigned long start, unsigned long end) | ||
| 122 | { | ||
| 123 | octeon_flush_icache_all_cores(NULL); | ||
| 124 | } | ||
| 125 | |||
| 126 | |||
| 127 | /** | ||
| 128 | * Flush the icache for a trampoline. These are used for interrupt | ||
| 129 | * and exception hooking. | ||
| 130 | * | ||
| 131 | * @addr: Address to flush | ||
| 132 | */ | ||
| 133 | static void octeon_flush_cache_sigtramp(unsigned long addr) | ||
| 134 | { | ||
| 135 | struct vm_area_struct *vma; | ||
| 136 | |||
| 137 | vma = find_vma(current->mm, addr); | ||
| 138 | octeon_flush_icache_all_cores(vma); | ||
| 139 | } | ||
| 140 | |||
| 141 | |||
| 142 | /** | ||
| 143 | * Flush a range out of a vma | ||
| 144 | * | ||
| 145 | * @vma: VMA to flush | ||
| 146 | * @start: | ||
| 147 | * @end: | ||
| 148 | */ | ||
| 149 | static void octeon_flush_cache_range(struct vm_area_struct *vma, | ||
| 150 | unsigned long start, unsigned long end) | ||
| 151 | { | ||
| 152 | if (vma->vm_flags & VM_EXEC) | ||
| 153 | octeon_flush_icache_all_cores(vma); | ||
| 154 | } | ||
| 155 | |||
| 156 | |||
| 157 | /** | ||
| 158 | * Flush a specific page of a vma | ||
| 159 | * | ||
| 160 | * @vma: VMA to flush page for | ||
| 161 | * @page: Page to flush | ||
| 162 | * @pfn: | ||
| 163 | */ | ||
| 164 | static void octeon_flush_cache_page(struct vm_area_struct *vma, | ||
| 165 | unsigned long page, unsigned long pfn) | ||
| 166 | { | ||
| 167 | if (vma->vm_flags & VM_EXEC) | ||
| 168 | octeon_flush_icache_all_cores(vma); | ||
| 169 | } | ||
| 170 | |||
| 171 | |||
| 172 | /** | ||
| 173 | * Probe Octeon's caches | ||
| 174 | * | ||
| 175 | */ | ||
| 176 | static void __devinit probe_octeon(void) | ||
| 177 | { | ||
| 178 | unsigned long icache_size; | ||
| 179 | unsigned long dcache_size; | ||
| 180 | unsigned int config1; | ||
| 181 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
| 182 | |||
| 183 | switch (c->cputype) { | ||
| 184 | case CPU_CAVIUM_OCTEON: | ||
| 185 | config1 = read_c0_config1(); | ||
| 186 | c->icache.linesz = 2 << ((config1 >> 19) & 7); | ||
| 187 | c->icache.sets = 64 << ((config1 >> 22) & 7); | ||
| 188 | c->icache.ways = 1 + ((config1 >> 16) & 7); | ||
| 189 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
| 190 | icache_size = | ||
| 191 | c->icache.sets * c->icache.ways * c->icache.linesz; | ||
| 192 | c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; | ||
| 193 | c->dcache.linesz = 128; | ||
| 194 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) | ||
| 195 | c->dcache.sets = 1; /* CN3XXX has one Dcache set */ | ||
| 196 | else | ||
| 197 | c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ | ||
| 198 | c->dcache.ways = 64; | ||
| 199 | dcache_size = | ||
| 200 | c->dcache.sets * c->dcache.ways * c->dcache.linesz; | ||
| 201 | c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; | ||
| 202 | c->options |= MIPS_CPU_PREFETCH; | ||
| 203 | break; | ||
| 204 | |||
| 205 | default: | ||
| 206 | panic("Unsupported Cavium Networks CPU type\n"); | ||
| 207 | break; | ||
| 208 | } | ||
| 209 | |||
| 210 | /* compute a couple of other cache variables */ | ||
| 211 | c->icache.waysize = icache_size / c->icache.ways; | ||
| 212 | c->dcache.waysize = dcache_size / c->dcache.ways; | ||
| 213 | |||
| 214 | c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); | ||
| 215 | c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); | ||
| 216 | |||
| 217 | if (smp_processor_id() == 0) { | ||
| 218 | pr_notice("Primary instruction cache %ldkB, %s, %d way, " | ||
| 219 | "%d sets, linesize %d bytes.\n", | ||
| 220 | icache_size >> 10, | ||
| 221 | cpu_has_vtag_icache ? | ||
| 222 | "virtually tagged" : "physically tagged", | ||
| 223 | c->icache.ways, c->icache.sets, c->icache.linesz); | ||
| 224 | |||
| 225 | pr_notice("Primary data cache %ldkB, %d-way, %d sets, " | ||
| 226 | "linesize %d bytes.\n", | ||
| 227 | dcache_size >> 10, c->dcache.ways, | ||
| 228 | c->dcache.sets, c->dcache.linesz); | ||
| 229 | } | ||
| 230 | } | ||
| 231 | |||
| 232 | |||
| 233 | /** | ||
| 234 | * Setup the Octeon cache flush routines | ||
| 235 | * | ||
| 236 | */ | ||
| 237 | void __devinit octeon_cache_init(void) | ||
| 238 | { | ||
| 239 | extern unsigned long ebase; | ||
| 240 | extern char except_vec2_octeon; | ||
| 241 | |||
| 242 | memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80); | ||
| 243 | octeon_flush_cache_sigtramp(ebase + 0x100); | ||
| 244 | |||
| 245 | probe_octeon(); | ||
| 246 | |||
| 247 | shm_align_mask = PAGE_SIZE - 1; | ||
| 248 | |||
| 249 | flush_cache_all = octeon_flush_icache_all; | ||
| 250 | __flush_cache_all = octeon_flush_icache_all; | ||
| 251 | flush_cache_mm = octeon_flush_cache_mm; | ||
| 252 | flush_cache_page = octeon_flush_cache_page; | ||
| 253 | flush_cache_range = octeon_flush_cache_range; | ||
| 254 | flush_cache_sigtramp = octeon_flush_cache_sigtramp; | ||
| 255 | flush_icache_all = octeon_flush_icache_all; | ||
| 256 | flush_data_cache_page = octeon_flush_data_cache_page; | ||
| 257 | flush_icache_range = octeon_flush_icache_range; | ||
| 258 | local_flush_icache_range = local_octeon_flush_icache_range; | ||
| 259 | |||
| 260 | build_clear_page(); | ||
| 261 | build_copy_page(); | ||
| 262 | } | ||
| 263 | |||
| 264 | /** | ||
| 265 | * Handle a cache error exception | ||
| 266 | */ | ||
| 267 | |||
| 268 | static void cache_parity_error_octeon(int non_recoverable) | ||
| 269 | { | ||
| 270 | unsigned long coreid = cvmx_get_core_num(); | ||
| 271 | uint64_t icache_err = read_octeon_c0_icacheerr(); | ||
| 272 | |||
| 273 | pr_err("Cache error exception:\n"); | ||
| 274 | pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); | ||
| 275 | if (icache_err & 1) { | ||
| 276 | pr_err("CacheErr (Icache) == %llx\n", | ||
| 277 | (unsigned long long)icache_err); | ||
| 278 | write_octeon_c0_icacheerr(0); | ||
| 279 | } | ||
| 280 | if (cache_err_dcache[coreid] & 1) { | ||
| 281 | pr_err("CacheErr (Dcache) == %llx\n", | ||
| 282 | (unsigned long long)cache_err_dcache[coreid]); | ||
| 283 | cache_err_dcache[coreid] = 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | if (non_recoverable) | ||
| 287 | panic("Can't handle cache error: nested exception"); | ||
| 288 | } | ||
| 289 | |||
| 290 | /** | ||
| 291 | * Called when the the exception is not recoverable | ||
| 292 | */ | ||
| 293 | |||
| 294 | asmlinkage void cache_parity_error_octeon_recoverable(void) | ||
| 295 | { | ||
| 296 | cache_parity_error_octeon(0); | ||
| 297 | } | ||
| 298 | |||
| 299 | /** | ||
| 300 | * Called when the the exception is recoverable | ||
| 301 | */ | ||
| 302 | |||
| 303 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) | ||
| 304 | { | ||
| 305 | cache_parity_error_octeon(1); | ||
| 306 | } | ||
| 307 | |||
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 1eb7c71e3d6a..98ad0a82c29e 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
| @@ -182,6 +182,12 @@ void __devinit cpu_cache_init(void) | |||
| 182 | tx39_cache_init(); | 182 | tx39_cache_init(); |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | if (cpu_has_octeon_cache) { | ||
| 186 | extern void __weak octeon_cache_init(void); | ||
| 187 | |||
| 188 | octeon_cache_init(); | ||
| 189 | } | ||
| 190 | |||
| 185 | setup_protection_map(); | 191 | setup_protection_map(); |
| 186 | } | 192 | } |
| 187 | 193 | ||
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S new file mode 100644 index 000000000000..3db8553fcd34 --- /dev/null +++ b/arch/mips/mm/cex-oct.S | |||
| @@ -0,0 +1,70 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2006 Cavium Networks | ||
| 7 | * Cache error handler | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <asm/asm.h> | ||
| 11 | #include <asm/regdef.h> | ||
| 12 | #include <asm/mipsregs.h> | ||
| 13 | #include <asm/stackframe.h> | ||
| 14 | |||
| 15 | /* | ||
| 16 | * Handle cache error. Indicate to the second level handler whether | ||
| 17 | * the exception is recoverable. | ||
| 18 | */ | ||
| 19 | LEAF(except_vec2_octeon) | ||
| 20 | |||
| 21 | .set push | ||
| 22 | .set mips64r2 | ||
| 23 | .set noreorder | ||
| 24 | .set noat | ||
| 25 | |||
| 26 | |||
| 27 | /* due to an errata we need to read the COP0 CacheErr (Dcache) | ||
| 28 | * before any cache/DRAM access */ | ||
| 29 | |||
| 30 | rdhwr k0, $0 /* get core_id */ | ||
| 31 | PTR_LA k1, cache_err_dcache | ||
| 32 | sll k0, k0, 3 | ||
| 33 | PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */ | ||
| 34 | |||
| 35 | dmfc0 k0, CP0_CACHEERR, 1 | ||
| 36 | sd k0, (k1) | ||
| 37 | dmtc0 $0, CP0_CACHEERR, 1 | ||
| 38 | |||
| 39 | /* check whether this is a nested exception */ | ||
| 40 | mfc0 k1, CP0_STATUS | ||
| 41 | andi k1, k1, ST0_EXL | ||
| 42 | beqz k1, 1f | ||
| 43 | nop | ||
| 44 | j cache_parity_error_octeon_non_recoverable | ||
| 45 | nop | ||
| 46 | |||
| 47 | /* exception is recoverable */ | ||
| 48 | 1: j handle_cache_err | ||
| 49 | nop | ||
| 50 | |||
| 51 | .set pop | ||
| 52 | END(except_vec2_octeon) | ||
| 53 | |||
| 54 | /* We need to jump to handle_cache_err so that the previous handler | ||
| 55 | * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX | ||
| 56 | * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */ | ||
| 57 | LEAF(handle_cache_err) | ||
| 58 | .set push | ||
| 59 | .set noreorder | ||
| 60 | .set noat | ||
| 61 | |||
| 62 | SAVE_ALL | ||
| 63 | KMODE | ||
| 64 | jal cache_parity_error_octeon_recoverable | ||
| 65 | nop | ||
| 66 | j ret_from_exception | ||
| 67 | nop | ||
| 68 | |||
| 69 | .set pop | ||
| 70 | END(handle_cache_err) | ||
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index e6708b3ad343..546e6977d4ff 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
| @@ -111,7 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent); | |||
| 111 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | 111 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
| 112 | dma_addr_t dma_handle) | 112 | dma_addr_t dma_handle) |
| 113 | { | 113 | { |
| 114 | plat_unmap_dma_mem(dma_handle); | 114 | plat_unmap_dma_mem(dev, dma_handle); |
| 115 | free_pages((unsigned long) vaddr, get_order(size)); | 115 | free_pages((unsigned long) vaddr, get_order(size)); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| @@ -122,7 +122,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
| 122 | { | 122 | { |
| 123 | unsigned long addr = (unsigned long) vaddr; | 123 | unsigned long addr = (unsigned long) vaddr; |
| 124 | 124 | ||
| 125 | plat_unmap_dma_mem(dma_handle); | 125 | plat_unmap_dma_mem(dev, dma_handle); |
| 126 | 126 | ||
| 127 | if (!plat_device_is_coherent(dev)) | 127 | if (!plat_device_is_coherent(dev)) |
| 128 | addr = CAC_ADDR(addr); | 128 | addr = CAC_ADDR(addr); |
| @@ -173,7 +173,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| 173 | __dma_sync(dma_addr_to_virt(dma_addr), size, | 173 | __dma_sync(dma_addr_to_virt(dma_addr), size, |
| 174 | direction); | 174 | direction); |
| 175 | 175 | ||
| 176 | plat_unmap_dma_mem(dma_addr); | 176 | plat_unmap_dma_mem(dev, dma_addr); |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | EXPORT_SYMBOL(dma_unmap_single); | 179 | EXPORT_SYMBOL(dma_unmap_single); |
| @@ -229,7 +229,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |||
| 229 | dma_cache_wback_inv(addr, size); | 229 | dma_cache_wback_inv(addr, size); |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | plat_unmap_dma_mem(dma_address); | 232 | plat_unmap_dma_mem(dev, dma_address); |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | EXPORT_SYMBOL(dma_unmap_page); | 235 | EXPORT_SYMBOL(dma_unmap_page); |
| @@ -249,7 +249,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
| 249 | if (addr) | 249 | if (addr) |
| 250 | __dma_sync(addr, sg->length, direction); | 250 | __dma_sync(addr, sg->length, direction); |
| 251 | } | 251 | } |
| 252 | plat_unmap_dma_mem(sg->dma_address); | 252 | plat_unmap_dma_mem(dev, sg->dma_address); |
| 253 | } | 253 | } |
| 254 | } | 254 | } |
| 255 | 255 | ||
| @@ -275,6 +275,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |||
| 275 | { | 275 | { |
| 276 | BUG_ON(direction == DMA_NONE); | 276 | BUG_ON(direction == DMA_NONE); |
| 277 | 277 | ||
| 278 | plat_extra_sync_for_device(dev); | ||
| 278 | if (!plat_device_is_coherent(dev)) { | 279 | if (!plat_device_is_coherent(dev)) { |
| 279 | unsigned long addr; | 280 | unsigned long addr; |
| 280 | 281 | ||
| @@ -305,6 +306,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |||
| 305 | { | 306 | { |
| 306 | BUG_ON(direction == DMA_NONE); | 307 | BUG_ON(direction == DMA_NONE); |
| 307 | 308 | ||
| 309 | plat_extra_sync_for_device(dev); | ||
| 308 | if (!plat_device_is_coherent(dev)) { | 310 | if (!plat_device_is_coherent(dev)) { |
| 309 | unsigned long addr; | 311 | unsigned long addr; |
| 310 | 312 | ||
| @@ -351,22 +353,14 @@ EXPORT_SYMBOL(dma_sync_sg_for_device); | |||
| 351 | 353 | ||
| 352 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 354 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 353 | { | 355 | { |
| 354 | return 0; | 356 | return plat_dma_mapping_error(dev, dma_addr); |
| 355 | } | 357 | } |
| 356 | 358 | ||
| 357 | EXPORT_SYMBOL(dma_mapping_error); | 359 | EXPORT_SYMBOL(dma_mapping_error); |
| 358 | 360 | ||
| 359 | int dma_supported(struct device *dev, u64 mask) | 361 | int dma_supported(struct device *dev, u64 mask) |
| 360 | { | 362 | { |
| 361 | /* | 363 | return plat_dma_supported(dev, mask); |
| 362 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
| 363 | * so we can't guarantee allocations that must be | ||
| 364 | * within a tighter range than GFP_DMA.. | ||
| 365 | */ | ||
| 366 | if (mask < DMA_BIT_MASK(24)) | ||
| 367 | return 0; | ||
| 368 | |||
| 369 | return 1; | ||
| 370 | } | 364 | } |
| 371 | 365 | ||
| 372 | EXPORT_SYMBOL(dma_supported); | 366 | EXPORT_SYMBOL(dma_supported); |
| @@ -383,6 +377,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
| 383 | { | 377 | { |
| 384 | BUG_ON(direction == DMA_NONE); | 378 | BUG_ON(direction == DMA_NONE); |
| 385 | 379 | ||
| 380 | plat_extra_sync_for_device(dev); | ||
| 386 | if (!plat_device_is_coherent(dev)) | 381 | if (!plat_device_is_coherent(dev)) |
| 387 | __dma_sync((unsigned long)vaddr, size, direction); | 382 | __dma_sync((unsigned long)vaddr, size, direction); |
| 388 | } | 383 | } |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 5ce2fa745626..9619f66e531e 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
| @@ -478,7 +478,10 @@ void __cpuinit tlb_init(void) | |||
| 478 | probe_tlb(config); | 478 | probe_tlb(config); |
| 479 | write_c0_pagemask(PM_DEFAULT_MASK); | 479 | write_c0_pagemask(PM_DEFAULT_MASK); |
| 480 | write_c0_wired(0); | 480 | write_c0_wired(0); |
| 481 | write_c0_framemask(0); | 481 | if (current_cpu_type() == CPU_R10000 || |
| 482 | current_cpu_type() == CPU_R12000 || | ||
| 483 | current_cpu_type() == CPU_R14000) | ||
| 484 | write_c0_framemask(0); | ||
| 482 | temp_tlb_entry = current_cpu_data.tlbsize - 1; | 485 | temp_tlb_entry = current_cpu_data.tlbsize - 1; |
| 483 | 486 | ||
| 484 | /* From this point on the ARC firmware is dead. */ | 487 | /* From this point on the ARC firmware is dead. */ |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 979cf9197282..42942038d0fd 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
| @@ -317,6 +317,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
| 317 | case CPU_BCM3302: | 317 | case CPU_BCM3302: |
| 318 | case CPU_BCM4710: | 318 | case CPU_BCM4710: |
| 319 | case CPU_LOONGSON2: | 319 | case CPU_LOONGSON2: |
| 320 | case CPU_CAVIUM_OCTEON: | ||
| 320 | if (m4kc_tlbp_war()) | 321 | if (m4kc_tlbp_war()) |
| 321 | uasm_i_nop(p); | 322 | uasm_i_nop(p); |
| 322 | tlbw(p); | 323 | tlbw(p); |
