aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-18 04:13:51 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-18 04:13:51 -0500
commitd01447b3197c2c470a14666be2c640407bbbfec7 (patch)
tree06d1b83868e4d3971b781b45607b124718ee2ec0 /arch
parent2e450643d70b62e0192577681b227d7d5d2efa45 (diff)
sh: Merge legacy and dynamic PMB modes.
This implements a bit of rework for the PMB code, which permits us to kill off the legacy PMB mode completely. Rather than trusting the boot loader to do the right thing, we do a quick verification of the PMB contents to determine whether to have the kernel setup the initial mappings or whether it needs to mangle them later on instead. If we're booting from legacy mappings, the kernel will now take control of them and make them match the kernel's initial mapping configuration. This is accomplished by breaking the initialization phase out in to multiple steps: synchronization, merging, and resizing. With the recent rework, the synchronization code establishes page links for compound mappings already, so we build on top of this for promoting mappings and reclaiming unused slots. At the same time, the changes introduced for the uncached helpers also permit us to dynamically resize the uncached mapping without any particular headaches. The smallest page size is more than sufficient for mapping all of kernel text, and as we're careful not to jump to any far off locations in the setup code the mapping can safely be resized regardless of whether we are executing from it or not. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/boot/compressed/misc.c2
-rw-r--r--arch/sh/include/asm/mmu.h12
-rw-r--r--arch/sh/include/asm/page.h11
-rw-r--r--arch/sh/include/asm/uncached.h18
-rw-r--r--arch/sh/kernel/head_32.S42
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/mm/Kconfig10
-rw-r--r--arch/sh/mm/init.c1
-rw-r--r--arch/sh/mm/pmb.c243
-rw-r--r--arch/sh/mm/uncached.c6
10 files changed, 276 insertions, 71 deletions
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index 9ba07927d16a..27140a6b365d 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -117,7 +117,7 @@ void decompress_kernel(void)
117 output_addr = (CONFIG_MEMORY_START + 0x2000); 117 output_addr = (CONFIG_MEMORY_START + 0x2000);
118#else 118#else
119 output_addr = __pa((unsigned long)&_text+PAGE_SIZE); 119 output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
120#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY) 120#if defined(CONFIG_29BIT)
121 output_addr |= P2SEG; 121 output_addr |= P2SEG;
122#endif 122#endif
123#endif 123#endif
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index e42c4e2a41df..15a05b615ba7 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -58,7 +58,7 @@ typedef struct {
58long pmb_remap(unsigned long virt, unsigned long phys, 58long pmb_remap(unsigned long virt, unsigned long phys,
59 unsigned long size, pgprot_t prot); 59 unsigned long size, pgprot_t prot);
60void pmb_unmap(unsigned long addr); 60void pmb_unmap(unsigned long addr);
61int pmb_init(void); 61void pmb_init(void);
62bool __in_29bit_mode(void); 62bool __in_29bit_mode(void);
63#else 63#else
64static inline long pmb_remap(unsigned long virt, unsigned long phys, 64static inline long pmb_remap(unsigned long virt, unsigned long phys,
@@ -67,14 +67,8 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys,
67 return -EINVAL; 67 return -EINVAL;
68} 68}
69 69
70static inline void pmb_unmap(unsigned long addr) 70#define pmb_unmap(addr) do { } while (0)
71{ 71#define pmb_init(addr) do { } while (0)
72}
73
74static inline int pmb_init(void)
75{
76 return -ENODEV;
77}
78 72
79#ifdef CONFIG_29BIT 73#ifdef CONFIG_29BIT
80#define __in_29bit_mode() (1) 74#define __in_29bit_mode() (1)
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 8237d9f53e56..d71feb359304 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -45,21 +45,12 @@
45#endif 45#endif
46 46
47#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
48#include <asm/uncached.h>
48 49
49extern unsigned long shm_align_mask; 50extern unsigned long shm_align_mask;
50extern unsigned long max_low_pfn, min_low_pfn; 51extern unsigned long max_low_pfn, min_low_pfn;
51extern unsigned long memory_start, memory_end; 52extern unsigned long memory_start, memory_end;
52 53
53#ifdef CONFIG_UNCACHED_MAPPING
54extern unsigned long uncached_start, uncached_end;
55
56extern int virt_addr_uncached(unsigned long kaddr);
57extern void uncached_init(void);
58#else
59#define virt_addr_uncached(kaddr) (0)
60#define uncached_init() do { } while (0)
61#endif
62
63static inline unsigned long 54static inline unsigned long
64pages_do_alias(unsigned long addr1, unsigned long addr2) 55pages_do_alias(unsigned long addr1, unsigned long addr2)
65{ 56{
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
new file mode 100644
index 000000000000..e3419f96626a
--- /dev/null
+++ b/arch/sh/include/asm/uncached.h
@@ -0,0 +1,18 @@
1#ifndef __ASM_SH_UNCACHED_H
2#define __ASM_SH_UNCACHED_H
3
4#include <linux/bug.h>
5
6#ifdef CONFIG_UNCACHED_MAPPING
7extern unsigned long uncached_start, uncached_end;
8
9extern int virt_addr_uncached(unsigned long kaddr);
10extern void uncached_init(void);
11extern void uncached_resize(unsigned long size);
12#else
13#define virt_addr_uncached(kaddr) (0)
14#define uncached_init() do { } while (0)
15#define uncached_resize(size) BUG()
16#endif
17
18#endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 79ff39517f8e..fe0b743881b0 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -85,7 +85,7 @@ ENTRY(_stext)
85 ldc r0, r7_bank ! ... and initial thread_info 85 ldc r0, r7_bank ! ... and initial thread_info
86#endif 86#endif
87 87
88#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) 88#ifdef CONFIG_PMB
89/* 89/*
90 * Reconfigure the initial PMB mappings setup by the hardware. 90 * Reconfigure the initial PMB mappings setup by the hardware.
91 * 91 *
@@ -139,7 +139,6 @@ ENTRY(_stext)
139 mov.l r0, @r1 139 mov.l r0, @r1
140 140
141 mov.l .LMEMORY_SIZE, r5 141 mov.l .LMEMORY_SIZE, r5
142 mov r5, r7
143 142
144 mov #PMB_E_SHIFT, r0 143 mov #PMB_E_SHIFT, r0
145 mov #0x1, r4 144 mov #0x1, r4
@@ -150,6 +149,40 @@ ENTRY(_stext)
150 mov.l .LFIRST_ADDR_ENTRY, r2 149 mov.l .LFIRST_ADDR_ENTRY, r2
151 mov.l .LPMB_ADDR, r3 150 mov.l .LPMB_ADDR, r3
152 151
152 /*
153 * First we need to walk the PMB and figure out if there are any
154 * existing mappings that match the initial mappings VPN/PPN.
155 * If these have already been established by the bootloader, we
156 * don't bother setting up new entries here, and let the late PMB
157 * initialization take care of things instead.
158 *
159 * Note that we may need to coalesce and merge entries in order
160 * to reclaim more available PMB slots, which is much more than
161 * we want to do at this early stage.
162 */
163 mov #0, r10
164 mov #NR_PMB_ENTRIES, r9
165
166 mov r1, r7 /* temporary PMB_DATA iter */
167
168.Lvalidate_existing_mappings:
169
170 mov.l @r7, r8
171 and r0, r8
172 cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
173 bt .Lpmb_done
174
175 add #1, r10 /* Increment the loop counter */
176 cmp/eq r9, r10
177 bf/s .Lvalidate_existing_mappings
178 add r4, r7 /* Increment to the next PMB_DATA entry */
179
180 /*
181 * If we've fallen through, continue with setting up the initial
182 * mappings.
183 */
184
185 mov r5, r7 /* cached_to_uncached */
153 mov #0, r10 186 mov #0, r10
154 187
155#ifdef CONFIG_UNCACHED_MAPPING 188#ifdef CONFIG_UNCACHED_MAPPING
@@ -252,7 +285,8 @@ ENTRY(_stext)
252 mov.l 6f, r0 285 mov.l 6f, r0
253 icbi @r0 286 icbi @r0
254 287
255#endif /* !CONFIG_PMB_LEGACY */ 288.Lpmb_done:
289#endif /* CONFIG_PMB */
256 290
257#ifndef CONFIG_SH_NO_BSS_INIT 291#ifndef CONFIG_SH_NO_BSS_INIT
258 /* 292 /*
@@ -304,7 +338,7 @@ ENTRY(stack_start)
3046: .long sh_cpu_init 3386: .long sh_cpu_init
3057: .long init_thread_union 3397: .long init_thread_union
306 340
307#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) 341#ifdef CONFIG_PMB
308.LPMB_ADDR: .long PMB_ADDR 342.LPMB_ADDR: .long PMB_ADDR
309.LPMB_DATA: .long PMB_DATA 343.LPMB_DATA: .long PMB_DATA
310.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V 344.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index e187750dd319..3459e70eed72 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p)
421 421
422 parse_early_param(); 422 parse_early_param();
423 423
424 uncached_init();
425
424 plat_early_device_setup(); 426 plat_early_device_setup();
425 427
426 /* Let earlyprintk output early console messages */ 428 /* Let earlyprintk output early console messages */
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 65cb5b83e072..1445ca6257df 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -91,16 +91,6 @@ config PMB
91 32-bits through the SH-4A PMB. If this is not set, legacy 91 32-bits through the SH-4A PMB. If this is not set, legacy
92 29-bit physical addressing will be used. 92 29-bit physical addressing will be used.
93 93
94config PMB_LEGACY
95 bool "Support legacy boot mappings for PMB"
96 depends on PMB
97 select 32BIT
98 help
99 If this option is enabled, fixed PMB mappings are inherited
100 from the boot loader, and the kernel does not attempt dynamic
101 management. This is the closest to legacy 29-bit physical mode,
102 and allows systems to support up to 512MiB of system memory.
103
104config X2TLB 94config X2TLB
105 def_bool y 95 def_bool y
106 depends on (CPU_SHX2 || CPU_SHX3) && MMU 96 depends on (CPU_SHX2 || CPU_SHX3) && MMU
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 08e280d7cc7e..68028e8f26ce 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -245,7 +245,6 @@ void __init mem_init(void)
245 memset(empty_zero_page, 0, PAGE_SIZE); 245 memset(empty_zero_page, 0, PAGE_SIZE);
246 __flush_wback_region(empty_zero_page, PAGE_SIZE); 246 __flush_wback_region(empty_zero_page, PAGE_SIZE);
247 247
248 uncached_init();
249 vsyscall_init(); 248 vsyscall_init();
250 249
251 codesize = (unsigned long) &_etext - (unsigned long) &_text; 250 codesize = (unsigned long) &_etext - (unsigned long) &_text;
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index b9d5476e1284..198bcff5e96f 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -52,7 +52,7 @@ struct pmb_entry {
52 struct pmb_entry *link; 52 struct pmb_entry *link;
53}; 53};
54 54
55static void pmb_unmap_entry(struct pmb_entry *); 55static void pmb_unmap_entry(struct pmb_entry *, int depth);
56 56
57static DEFINE_RWLOCK(pmb_rwlock); 57static DEFINE_RWLOCK(pmb_rwlock);
58static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 58static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
@@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
115 115
116 pmbe = &pmb_entry_list[pos]; 116 pmbe = &pmb_entry_list[pos];
117 117
118 memset(pmbe, 0, sizeof(struct pmb_entry));
119
118 spin_lock_init(&pmbe->lock); 120 spin_lock_init(&pmbe->lock);
119 121
120 pmbe->vpn = vpn; 122 pmbe->vpn = vpn;
121 pmbe->ppn = ppn; 123 pmbe->ppn = ppn;
122 pmbe->flags = flags; 124 pmbe->flags = flags;
123 pmbe->entry = pos; 125 pmbe->entry = pos;
124 pmbe->size = 0;
125 126
126 return pmbe; 127 return pmbe;
127 128
@@ -133,7 +134,9 @@ out:
133static void pmb_free(struct pmb_entry *pmbe) 134static void pmb_free(struct pmb_entry *pmbe)
134{ 135{
135 __clear_bit(pmbe->entry, pmb_map); 136 __clear_bit(pmbe->entry, pmb_map);
136 pmbe->entry = PMB_NO_ENTRY; 137
138 pmbe->entry = PMB_NO_ENTRY;
139 pmbe->link = NULL;
137} 140}
138 141
139/* 142/*
@@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void)
161 */ 164 */
162static void __set_pmb_entry(struct pmb_entry *pmbe) 165static void __set_pmb_entry(struct pmb_entry *pmbe)
163{ 166{
164 pmbe->flags &= ~PMB_CACHE_MASK;
165 pmbe->flags |= pmb_cache_flags();
166
167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); 167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, 168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
169 mk_pmb_data(pmbe->entry)); 169 mk_pmb_data(pmbe->entry));
@@ -280,7 +280,7 @@ again:
280 return wanted - size; 280 return wanted - size;
281 281
282out: 282out:
283 pmb_unmap_entry(pmbp); 283 pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
284 284
285 return err; 285 return err;
286} 286}
@@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr)
302 302
303 read_unlock(&pmb_rwlock); 303 read_unlock(&pmb_rwlock);
304 304
305 pmb_unmap_entry(pmbe); 305 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
306} 306}
307 307
308static void pmb_unmap_entry(struct pmb_entry *pmbe) 308static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
309{ 309{
310 unsigned long flags; 310 return (b->vpn == (a->vpn + a->size)) &&
311 (b->ppn == (a->ppn + a->size)) &&
312 (b->flags == a->flags);
313}
311 314
312 if (unlikely(!pmbe)) 315static bool pmb_size_valid(unsigned long size)
313 return; 316{
317 int i;
314 318
315 write_lock_irqsave(&pmb_rwlock, flags); 319 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
320 if (pmb_sizes[i].size == size)
321 return true;
322
323 return false;
324}
325
326static int pmb_size_to_flags(unsigned long size)
327{
328 int i;
329
330 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
331 if (pmb_sizes[i].size == size)
332 return pmb_sizes[i].flag;
316 333
334 return 0;
335}
336
337static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
338{
317 do { 339 do {
318 struct pmb_entry *pmblink = pmbe; 340 struct pmb_entry *pmblink = pmbe;
319 341
@@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe)
332 pmbe = pmblink->link; 354 pmbe = pmblink->link;
333 355
334 pmb_free(pmblink); 356 pmb_free(pmblink);
335 } while (pmbe); 357 } while (pmbe && --depth);
358}
359
360static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
361{
362 unsigned long flags;
336 363
364 if (unlikely(!pmbe))
365 return;
366
367 write_lock_irqsave(&pmb_rwlock, flags);
368 __pmb_unmap_entry(pmbe, depth);
337 write_unlock_irqrestore(&pmb_rwlock, flags); 369 write_unlock_irqrestore(&pmb_rwlock, flags);
338} 370}
339 371
@@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
342 return ppn >= __pa(memory_start) && ppn < __pa(memory_end); 374 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
343} 375}
344 376
345static int pmb_synchronize_mappings(void) 377static void __init pmb_notify(void)
346{ 378{
347 unsigned int applied = 0; 379 int i;
348 struct pmb_entry *pmbp = NULL;
349 int i, j;
350 380
351 pr_info("PMB: boot mappings:\n"); 381 pr_info("PMB: boot mappings:\n");
352 382
383 read_lock(&pmb_rwlock);
384
385 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
386 struct pmb_entry *pmbe;
387
388 if (!test_bit(i, pmb_map))
389 continue;
390
391 pmbe = &pmb_entry_list[i];
392
393 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
394 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
395 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
396 }
397
398 read_unlock(&pmb_rwlock);
399}
400
401/*
402 * Sync our software copy of the PMB mappings with those in hardware. The
403 * mappings in the hardware PMB were either set up by the bootloader or
404 * very early on by the kernel.
405 */
406static void __init pmb_synchronize(void)
407{
408 struct pmb_entry *pmbp = NULL;
409 int i, j;
410
353 /* 411 /*
354 * Run through the initial boot mappings, log the established 412 * Run through the initial boot mappings, log the established
355 * ones, and blow away anything that falls outside of the valid 413 * ones, and blow away anything that falls outside of the valid
@@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void)
432 /* 490 /*
433 * Compare the previous entry against the current one to 491 * Compare the previous entry against the current one to
434 * see if the entries span a contiguous mapping. If so, 492 * see if the entries span a contiguous mapping. If so,
435 * setup the entry links accordingly. 493 * setup the entry links accordingly. Compound mappings
494 * are later coalesced.
436 */ 495 */
437 if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && 496 if (pmb_can_merge(pmbp, pmbe))
438 (pmbe->ppn == (pmbp->ppn + pmbp->size)))
439 pmbp->link = pmbe; 497 pmbp->link = pmbe;
440 498
441 spin_unlock(&pmbp->lock); 499 spin_unlock(&pmbp->lock);
@@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void)
444 pmbp = pmbe; 502 pmbp = pmbe;
445 503
446 spin_unlock_irqrestore(&pmbe->lock, irqflags); 504 spin_unlock_irqrestore(&pmbe->lock, irqflags);
505 }
506}
447 507
448 pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", 508static void __init pmb_merge(struct pmb_entry *head)
449 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, 509{
450 (data_val & PMB_C) ? "" : "un"); 510 unsigned long span, newsize;
511 struct pmb_entry *tail;
512 int i = 1, depth = 0;
513
514 span = newsize = head->size;
451 515
452 applied++; 516 tail = head->link;
517 while (tail) {
518 span += tail->size;
519
520 if (pmb_size_valid(span)) {
521 newsize = span;
522 depth = i;
523 }
524
525 /* This is the end of the line.. */
526 if (!tail->link)
527 break;
528
529 tail = tail->link;
530 i++;
453 } 531 }
454 532
455 return (applied == 0); 533 /*
534 * The merged page size must be valid.
535 */
536 if (!pmb_size_valid(newsize))
537 return;
538
539 head->flags &= ~PMB_SZ_MASK;
540 head->flags |= pmb_size_to_flags(newsize);
541
542 head->size = newsize;
543
544 __pmb_unmap_entry(head->link, depth);
545 __set_pmb_entry(head);
456} 546}
457 547
458int pmb_init(void) 548static void __init pmb_coalesce(void)
459{ 549{
460 int ret; 550 unsigned long flags;
551 int i;
552
553 write_lock_irqsave(&pmb_rwlock, flags);
554
555 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
556 struct pmb_entry *pmbe;
557
558 if (!test_bit(i, pmb_map))
559 continue;
560
561 pmbe = &pmb_entry_list[i];
562
563 /*
564 * We're only interested in compound mappings
565 */
566 if (!pmbe->link)
567 continue;
568
569 /*
570 * Nothing to do if it already uses the largest possible
571 * page size.
572 */
573 if (pmbe->size == SZ_512M)
574 continue;
575
576 pmb_merge(pmbe);
577 }
578
579 write_unlock_irqrestore(&pmb_rwlock, flags);
580}
581
582#ifdef CONFIG_UNCACHED_MAPPING
583static void __init pmb_resize(void)
584{
585 int i;
461 586
462 /* 587 /*
463 * Sync our software copy of the PMB mappings with those in 588 * If the uncached mapping was constructed by the kernel, it will
464 * hardware. The mappings in the hardware PMB were either set up 589 * already be a reasonable size.
465 * by the bootloader or very early on by the kernel.
466 */ 590 */
467 ret = pmb_synchronize_mappings(); 591 if (uncached_size == SZ_16M)
468 if (unlikely(ret == 0)) 592 return;
469 return 0; 593
594 read_lock(&pmb_rwlock);
595
596 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
597 struct pmb_entry *pmbe;
598 unsigned long flags;
599
600 if (!test_bit(i, pmb_map))
601 continue;
602
603 pmbe = &pmb_entry_list[i];
604
605 if (pmbe->vpn != uncached_start)
606 continue;
607
608 /*
609 * Found it, now resize it.
610 */
611 spin_lock_irqsave(&pmbe->lock, flags);
612
613 pmbe->size = SZ_16M;
614 pmbe->flags &= ~PMB_SZ_MASK;
615 pmbe->flags |= pmb_size_to_flags(pmbe->size);
616
617 uncached_resize(pmbe->size);
618
619 __set_pmb_entry(pmbe);
620
621 spin_unlock_irqrestore(&pmbe->lock, flags);
622 }
623
624 read_lock(&pmb_rwlock);
625}
626#endif
627
628void __init pmb_init(void)
629{
630 /* Synchronize software state */
631 pmb_synchronize();
632
633 /* Attempt to combine compound mappings */
634 pmb_coalesce();
635
636#ifdef CONFIG_UNCACHED_MAPPING
637 /* Resize initial mappings, if necessary */
638 pmb_resize();
639#endif
640
641 /* Log them */
642 pmb_notify();
470 643
471 writel_uncached(0, PMB_IRMCR); 644 writel_uncached(0, PMB_IRMCR);
472 645
473 /* Flush out the TLB */ 646 /* Flush out the TLB */
474 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); 647 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
475 ctrl_barrier(); 648 ctrl_barrier();
476
477 return 0;
478} 649}
479 650
480bool __in_29bit_mode(void) 651bool __in_29bit_mode(void)
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index 807906981d9d..cf20a5c5136a 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -26,3 +26,9 @@ void __init uncached_init(void)
26 uncached_start = memory_end; 26 uncached_start = memory_end;
27 uncached_end = uncached_start + uncached_size; 27 uncached_end = uncached_start + uncached_size;
28} 28}
29
30void __init uncached_resize(unsigned long size)
31{
32 uncached_size = size;
33 uncached_end = uncached_start + uncached_size;
34}