aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-24 20:19:56 -0500
committerPaul Mundt <lethal@linux-sh.org>2007-02-12 20:54:45 -0500
commit11c1965687b0a472add948d4240dfe65a2fcb298 (patch)
tree69a71a34591bbdc6339dbe72de36819479f96198 /arch/sh/mm
parentaec5e0e1c179fac4bbca4007a3f0d3107275a73c (diff)
sh: Fixup cpu_data references for the non-boot CPUs.
There are a lot of bogus cpu_data-> references that only end up working for the boot CPU, convert these to current_cpu_data to fixup SMP. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/cache-debugfs.c4
-rw-r--r--arch/sh/mm/cache-sh3.c8
-rw-r--r--arch/sh/mm/cache-sh4.c65
-rw-r--r--arch/sh/mm/cache-sh7705.c20
-rw-r--r--arch/sh/mm/pg-sh4.c2
-rw-r--r--arch/sh/mm/pg-sh7705.c6
-rw-r--r--arch/sh/mm/tlb-sh3.c2
7 files changed, 54 insertions, 53 deletions
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 909dcfa8c8c6..de6d2c9aa477 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -46,10 +46,10 @@ static int cache_seq_show(struct seq_file *file, void *iter)
46 46
47 if (cache_type == CACHE_TYPE_DCACHE) { 47 if (cache_type == CACHE_TYPE_DCACHE) {
48 base = CACHE_OC_ADDRESS_ARRAY; 48 base = CACHE_OC_ADDRESS_ARRAY;
49 cache = &cpu_data->dcache; 49 cache = &current_cpu_data.dcache;
50 } else { 50 } else {
51 base = CACHE_IC_ADDRESS_ARRAY; 51 base = CACHE_IC_ADDRESS_ARRAY;
52 cache = &cpu_data->icache; 52 cache = &current_cpu_data.icache;
53 } 53 }
54 54
55 /* 55 /*
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c
index 838731fc608d..6d1dbec08ad4 100644
--- a/arch/sh/mm/cache-sh3.c
+++ b/arch/sh/mm/cache-sh3.c
@@ -44,11 +44,11 @@ void __flush_wback_region(void *start, int size)
44 44
45 for (v = begin; v < end; v+=L1_CACHE_BYTES) { 45 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
46 unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; 46 unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY;
47 for (j = 0; j < cpu_data->dcache.ways; j++) { 47 for (j = 0; j < current_cpu_data.dcache.ways; j++) {
48 unsigned long data, addr, p; 48 unsigned long data, addr, p;
49 49
50 p = __pa(v); 50 p = __pa(v);
51 addr = addrstart | (v & cpu_data->dcache.entry_mask); 51 addr = addrstart | (v & current_cpu_data.dcache.entry_mask);
52 local_irq_save(flags); 52 local_irq_save(flags);
53 data = ctrl_inl(addr); 53 data = ctrl_inl(addr);
54 54
@@ -60,7 +60,7 @@ void __flush_wback_region(void *start, int size)
60 break; 60 break;
61 } 61 }
62 local_irq_restore(flags); 62 local_irq_restore(flags);
63 addrstart += cpu_data->dcache.way_incr; 63 addrstart += current_cpu_data.dcache.way_incr;
64 } 64 }
65 } 65 }
66} 66}
@@ -85,7 +85,7 @@ void __flush_purge_region(void *start, int size)
85 85
86 data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ 86 data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
87 addr = CACHE_OC_ADDRESS_ARRAY | 87 addr = CACHE_OC_ADDRESS_ARRAY |
88 (v & cpu_data->dcache.entry_mask) | SH_CACHE_ASSOC; 88 (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC;
89 ctrl_outl(data, addr); 89 ctrl_outl(data, addr);
90 } 90 }
91} 91}
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 72bb48773337..e0cd4b7f4aeb 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -54,21 +54,21 @@ static void __init emit_cache_params(void)
54 ctrl_inl(CCN_CVR), 54 ctrl_inl(CCN_CVR),
55 ctrl_inl(CCN_PRR)); 55 ctrl_inl(CCN_PRR));
56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", 56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 cpu_data->icache.ways, 57 current_cpu_data.icache.ways,
58 cpu_data->icache.sets, 58 current_cpu_data.icache.sets,
59 cpu_data->icache.way_incr); 59 current_cpu_data.icache.way_incr);
60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", 60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 cpu_data->icache.entry_mask, 61 current_cpu_data.icache.entry_mask,
62 cpu_data->icache.alias_mask, 62 current_cpu_data.icache.alias_mask,
63 cpu_data->icache.n_aliases); 63 current_cpu_data.icache.n_aliases);
64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", 64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 cpu_data->dcache.ways, 65 current_cpu_data.dcache.ways,
66 cpu_data->dcache.sets, 66 current_cpu_data.dcache.sets,
67 cpu_data->dcache.way_incr); 67 current_cpu_data.dcache.way_incr);
68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", 68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 cpu_data->dcache.entry_mask, 69 current_cpu_data.dcache.entry_mask,
70 cpu_data->dcache.alias_mask, 70 current_cpu_data.dcache.alias_mask,
71 cpu_data->dcache.n_aliases); 71 current_cpu_data.dcache.n_aliases);
72 72
73 if (!__flush_dcache_segment_fn) 73 if (!__flush_dcache_segment_fn)
74 panic("unknown number of cache ways\n"); 74 panic("unknown number of cache ways\n");
@@ -87,10 +87,10 @@ void __init p3_cache_init(void)
87{ 87{
88 int i; 88 int i;
89 89
90 compute_alias(&cpu_data->icache); 90 compute_alias(&current_cpu_data.icache);
91 compute_alias(&cpu_data->dcache); 91 compute_alias(&current_cpu_data.dcache);
92 92
93 switch (cpu_data->dcache.ways) { 93 switch (current_cpu_data.dcache.ways) {
94 case 1: 94 case 1:
95 __flush_dcache_segment_fn = __flush_dcache_segment_1way; 95 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
96 break; 96 break;
@@ -110,7 +110,7 @@ void __init p3_cache_init(void)
110 if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) 110 if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
111 panic("%s failed.", __FUNCTION__); 111 panic("%s failed.", __FUNCTION__);
112 112
113 for (i = 0; i < cpu_data->dcache.n_aliases; i++) 113 for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
114 mutex_init(&p3map_mutex[i]); 114 mutex_init(&p3map_mutex[i]);
115} 115}
116 116
@@ -200,13 +200,14 @@ void flush_cache_sigtramp(unsigned long addr)
200 : /* no output */ 200 : /* no output */
201 : "m" (__m(v))); 201 : "m" (__m(v)));
202 202
203 index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); 203 index = CACHE_IC_ADDRESS_ARRAY |
204 (v & current_cpu_data.icache.entry_mask);
204 205
205 local_irq_save(flags); 206 local_irq_save(flags);
206 jump_to_P2(); 207 jump_to_P2();
207 208
208 for (i = 0; i < cpu_data->icache.ways; 209 for (i = 0; i < current_cpu_data.icache.ways;
209 i++, index += cpu_data->icache.way_incr) 210 i++, index += current_cpu_data.icache.way_incr)
210 ctrl_outl(0, index); /* Clear out Valid-bit */ 211 ctrl_outl(0, index); /* Clear out Valid-bit */
211 212
212 back_to_P1(); 213 back_to_P1();
@@ -223,7 +224,7 @@ static inline void flush_cache_4096(unsigned long start,
223 * All types of SH-4 require PC to be in P2 to operate on the I-cache. 224 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
224 * Some types of SH-4 require PC to be in P2 to operate on the D-cache. 225 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
225 */ 226 */
226 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || 227 if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
227 (start < CACHE_OC_ADDRESS_ARRAY)) 228 (start < CACHE_OC_ADDRESS_ARRAY))
228 exec_offset = 0x20000000; 229 exec_offset = 0x20000000;
229 230
@@ -255,7 +256,7 @@ void flush_dcache_page(struct page *page)
255 int i, n; 256 int i, n;
256 257
257 /* Loop all the D-cache */ 258 /* Loop all the D-cache */
258 n = cpu_data->dcache.n_aliases; 259 n = current_cpu_data.dcache.n_aliases;
259 for (i = 0; i < n; i++, addr += 4096) 260 for (i = 0; i < n; i++, addr += 4096)
260 flush_cache_4096(addr, phys); 261 flush_cache_4096(addr, phys);
261 } 262 }
@@ -287,7 +288,7 @@ static inline void flush_icache_all(void)
287 288
288void flush_dcache_all(void) 289void flush_dcache_all(void)
289{ 290{
290 (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size); 291 (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
291 wmb(); 292 wmb();
292} 293}
293 294
@@ -301,8 +302,8 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
301 unsigned long end) 302 unsigned long end)
302{ 303{
303 unsigned long d = 0, p = start & PAGE_MASK; 304 unsigned long d = 0, p = start & PAGE_MASK;
304 unsigned long alias_mask = cpu_data->dcache.alias_mask; 305 unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
305 unsigned long n_aliases = cpu_data->dcache.n_aliases; 306 unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
306 unsigned long select_bit; 307 unsigned long select_bit;
307 unsigned long all_aliases_mask; 308 unsigned long all_aliases_mask;
308 unsigned long addr_offset; 309 unsigned long addr_offset;
@@ -389,7 +390,7 @@ void flush_cache_mm(struct mm_struct *mm)
389 * If cache is only 4k-per-way, there are never any 'aliases'. Since 390 * If cache is only 4k-per-way, there are never any 'aliases'. Since
390 * the cache is physically tagged, the data can just be left in there. 391 * the cache is physically tagged, the data can just be left in there.
391 */ 392 */
392 if (cpu_data->dcache.n_aliases == 0) 393 if (current_cpu_data.dcache.n_aliases == 0)
393 return; 394 return;
394 395
395 /* 396 /*
@@ -426,7 +427,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
426 unsigned long phys = pfn << PAGE_SHIFT; 427 unsigned long phys = pfn << PAGE_SHIFT;
427 unsigned int alias_mask; 428 unsigned int alias_mask;
428 429
429 alias_mask = cpu_data->dcache.alias_mask; 430 alias_mask = current_cpu_data.dcache.alias_mask;
430 431
431 /* We only need to flush D-cache when we have alias */ 432 /* We only need to flush D-cache when we have alias */
432 if ((address^phys) & alias_mask) { 433 if ((address^phys) & alias_mask) {
@@ -440,7 +441,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
440 phys); 441 phys);
441 } 442 }
442 443
443 alias_mask = cpu_data->icache.alias_mask; 444 alias_mask = current_cpu_data.icache.alias_mask;
444 if (vma->vm_flags & VM_EXEC) { 445 if (vma->vm_flags & VM_EXEC) {
445 /* 446 /*
446 * Evict entries from the portion of the cache from which code 447 * Evict entries from the portion of the cache from which code
@@ -472,7 +473,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
472 * If cache is only 4k-per-way, there are never any 'aliases'. Since 473 * If cache is only 4k-per-way, there are never any 'aliases'. Since
473 * the cache is physically tagged, the data can just be left in there. 474 * the cache is physically tagged, the data can just be left in there.
474 */ 475 */
475 if (cpu_data->dcache.n_aliases == 0) 476 if (current_cpu_data.dcache.n_aliases == 0)
476 return; 477 return;
477 478
478 /* 479 /*
@@ -533,7 +534,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
533 unsigned long a, ea, p; 534 unsigned long a, ea, p;
534 unsigned long temp_pc; 535 unsigned long temp_pc;
535 536
536 dcache = &cpu_data->dcache; 537 dcache = &current_cpu_data.dcache;
537 /* Write this way for better assembly. */ 538 /* Write this way for better assembly. */
538 way_count = dcache->ways; 539 way_count = dcache->ways;
539 way_incr = dcache->way_incr; 540 way_incr = dcache->way_incr;
@@ -608,7 +609,7 @@ static void __flush_dcache_segment_1way(unsigned long start,
608 base_addr = ((base_addr >> 16) << 16); 609 base_addr = ((base_addr >> 16) << 16);
609 base_addr |= start; 610 base_addr |= start;
610 611
611 dcache = &cpu_data->dcache; 612 dcache = &current_cpu_data.dcache;
612 linesz = dcache->linesz; 613 linesz = dcache->linesz;
613 way_incr = dcache->way_incr; 614 way_incr = dcache->way_incr;
614 way_size = dcache->way_size; 615 way_size = dcache->way_size;
@@ -650,7 +651,7 @@ static void __flush_dcache_segment_2way(unsigned long start,
650 base_addr = ((base_addr >> 16) << 16); 651 base_addr = ((base_addr >> 16) << 16);
651 base_addr |= start; 652 base_addr |= start;
652 653
653 dcache = &cpu_data->dcache; 654 dcache = &current_cpu_data.dcache;
654 linesz = dcache->linesz; 655 linesz = dcache->linesz;
655 way_incr = dcache->way_incr; 656 way_incr = dcache->way_incr;
656 way_size = dcache->way_size; 657 way_size = dcache->way_size;
@@ -709,7 +710,7 @@ static void __flush_dcache_segment_4way(unsigned long start,
709 base_addr = ((base_addr >> 16) << 16); 710 base_addr = ((base_addr >> 16) << 16);
710 base_addr |= start; 711 base_addr |= start;
711 712
712 dcache = &cpu_data->dcache; 713 dcache = &current_cpu_data.dcache;
713 linesz = dcache->linesz; 714 linesz = dcache->linesz;
714 way_incr = dcache->way_incr; 715 way_incr = dcache->way_incr;
715 way_size = dcache->way_size; 716 way_size = dcache->way_size;
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 2808b580d984..31f8deb7a158 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -32,9 +32,9 @@ static inline void cache_wback_all(void)
32{ 32{
33 unsigned long ways, waysize, addrstart; 33 unsigned long ways, waysize, addrstart;
34 34
35 ways = cpu_data->dcache.ways; 35 ways = current_cpu_data.dcache.ways;
36 waysize = cpu_data->dcache.sets; 36 waysize = current_cpu_data.dcache.sets;
37 waysize <<= cpu_data->dcache.entry_shift; 37 waysize <<= current_cpu_data.dcache.entry_shift;
38 38
39 addrstart = CACHE_OC_ADDRESS_ARRAY; 39 addrstart = CACHE_OC_ADDRESS_ARRAY;
40 40
@@ -43,7 +43,7 @@ static inline void cache_wback_all(void)
43 43
44 for (addr = addrstart; 44 for (addr = addrstart;
45 addr < addrstart + waysize; 45 addr < addrstart + waysize;
46 addr += cpu_data->dcache.linesz) { 46 addr += current_cpu_data.dcache.linesz) {
47 unsigned long data; 47 unsigned long data;
48 int v = SH_CACHE_UPDATED | SH_CACHE_VALID; 48 int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
49 49
@@ -53,7 +53,7 @@ static inline void cache_wback_all(void)
53 ctrl_outl(data & ~v, addr); 53 ctrl_outl(data & ~v, addr);
54 } 54 }
55 55
56 addrstart += cpu_data->dcache.way_incr; 56 addrstart += current_cpu_data.dcache.way_incr;
57 } while (--ways); 57 } while (--ways);
58} 58}
59 59
@@ -93,9 +93,9 @@ static void __flush_dcache_page(unsigned long phys)
93 local_irq_save(flags); 93 local_irq_save(flags);
94 jump_to_P2(); 94 jump_to_P2();
95 95
96 ways = cpu_data->dcache.ways; 96 ways = current_cpu_data.dcache.ways;
97 waysize = cpu_data->dcache.sets; 97 waysize = current_cpu_data.dcache.sets;
98 waysize <<= cpu_data->dcache.entry_shift; 98 waysize <<= current_cpu_data.dcache.entry_shift;
99 99
100 addrstart = CACHE_OC_ADDRESS_ARRAY; 100 addrstart = CACHE_OC_ADDRESS_ARRAY;
101 101
@@ -104,7 +104,7 @@ static void __flush_dcache_page(unsigned long phys)
104 104
105 for (addr = addrstart; 105 for (addr = addrstart;
106 addr < addrstart + waysize; 106 addr < addrstart + waysize;
107 addr += cpu_data->dcache.linesz) { 107 addr += current_cpu_data.dcache.linesz) {
108 unsigned long data; 108 unsigned long data;
109 109
110 data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); 110 data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
@@ -114,7 +114,7 @@ static void __flush_dcache_page(unsigned long phys)
114 } 114 }
115 } 115 }
116 116
117 addrstart += cpu_data->dcache.way_incr; 117 addrstart += current_cpu_data.dcache.way_incr;
118 } while (--ways); 118 } while (--ways);
119 119
120 back_to_P1(); 120 back_to_P1();
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index cfc323551741..b529d809dd4b 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -13,7 +13,7 @@
13 13
14extern struct mutex p3map_mutex[]; 14extern struct mutex p3map_mutex[];
15 15
16#define CACHE_ALIAS (cpu_data->dcache.alias_mask) 16#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
17 17
18/* 18/*
19 * clear_user_page 19 * clear_user_page
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c
index b052d0fee827..887ab9d18ccd 100644
--- a/arch/sh/mm/pg-sh7705.c
+++ b/arch/sh/mm/pg-sh7705.c
@@ -43,13 +43,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
43 43
44 p = __pa(p1_begin); 44 p = __pa(p1_begin);
45 45
46 ways = cpu_data->dcache.ways; 46 ways = current_cpu_data.dcache.ways;
47 addr = CACHE_OC_ADDRESS_ARRAY; 47 addr = CACHE_OC_ADDRESS_ARRAY;
48 48
49 do { 49 do {
50 unsigned long data; 50 unsigned long data;
51 51
52 addr |= (v & cpu_data->dcache.entry_mask); 52 addr |= (v & current_cpu_data.dcache.entry_mask);
53 53
54 data = ctrl_inl(addr); 54 data = ctrl_inl(addr);
55 if ((data & CACHE_PHYSADDR_MASK) == 55 if ((data & CACHE_PHYSADDR_MASK) ==
@@ -58,7 +58,7 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
58 ctrl_outl(data, addr); 58 ctrl_outl(data, addr);
59 } 59 }
60 60
61 addr += cpu_data->dcache.way_incr; 61 addr += current_cpu_data.dcache.way_incr;
62 } while (--ways); 62 } while (--ways);
63 63
64 p1_begin += L1_CACHE_BYTES; 64 p1_begin += L1_CACHE_BYTES;
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 16627069c536..598c998dba5c 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -26,7 +26,7 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
26 addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); 26 addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
27 data = (page & 0xfffe0000) | asid; /* VALID bit is off */ 27 data = (page & 0xfffe0000) | asid; /* VALID bit is off */
28 28
29 if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) { 29 if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) {
30 addr |= MMU_PAGE_ASSOC_BIT; 30 addr |= MMU_PAGE_ASSOC_BIT;
31 ways = 1; /* we already know the way .. */ 31 ways = 1; /* we already know the way .. */
32 } 32 }