aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-09-16 17:11:43 -0400
committerDavid S. Miller <davem@davemloft.net>2008-09-16 17:11:43 -0400
commit2e57572a50a4de41c6cbc879a4866a312d4cd316 (patch)
treec4f58ec96c06642c4b415b881d3f0a3b673d5b44 /arch/powerpc/platforms
parent9b2e43ae4e9609f80034dfe8de895045cac52d77 (diff)
parentf948cc6ab9e61a8e88d70ee9aafc690e6d26f92c (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
Conflicts: arch/sparc64/kernel/pci_psycho.c
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c29
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/powerpc/platforms/ps3/htab.c269
-rw-r--r--arch/powerpc/platforms/ps3/setup.c1
-rw-r--r--arch/powerpc/platforms/ps3/spu.c18
-rw-r--r--arch/powerpc/platforms/pseries/setup.c1
6 files changed, 130 insertions, 190 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1c1b627ee843..67595bc380dc 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -643,9 +643,10 @@ static struct spu *find_victim(struct spu_context *ctx)
643 !(tmp->flags & SPU_CREATE_NOSCHED) && 643 !(tmp->flags & SPU_CREATE_NOSCHED) &&
644 (!victim || tmp->prio > victim->prio)) { 644 (!victim || tmp->prio > victim->prio)) {
645 victim = spu->ctx; 645 victim = spu->ctx;
646 get_spu_context(victim);
647 } 646 }
648 } 647 }
648 if (victim)
649 get_spu_context(victim);
649 mutex_unlock(&cbe_spu_info[node].list_mutex); 650 mutex_unlock(&cbe_spu_info[node].list_mutex);
650 651
651 if (victim) { 652 if (victim) {
@@ -727,17 +728,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
727 /* not a candidate for interruptible because it's called either 728 /* not a candidate for interruptible because it's called either
728 from the scheduler thread or from spu_deactivate */ 729 from the scheduler thread or from spu_deactivate */
729 mutex_lock(&ctx->state_mutex); 730 mutex_lock(&ctx->state_mutex);
730 __spu_schedule(spu, ctx); 731 if (ctx->state == SPU_STATE_SAVED)
732 __spu_schedule(spu, ctx);
731 spu_release(ctx); 733 spu_release(ctx);
732} 734}
733 735
734static void spu_unschedule(struct spu *spu, struct spu_context *ctx) 736/**
737 * spu_unschedule - remove a context from a spu, and possibly release it.
738 * @spu: The SPU to unschedule from
739 * @ctx: The context currently scheduled on the SPU
740 * @free_spu Whether to free the SPU for other contexts
741 *
742 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
743 * SPU is made available for other contexts (ie, may be returned by
744 * spu_get_idle). If this is zero, the caller is expected to schedule another
745 * context to this spu.
746 *
747 * Should be called with ctx->state_mutex held.
748 */
749static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
750 int free_spu)
735{ 751{
736 int node = spu->node; 752 int node = spu->node;
737 753
738 mutex_lock(&cbe_spu_info[node].list_mutex); 754 mutex_lock(&cbe_spu_info[node].list_mutex);
739 cbe_spu_info[node].nr_active--; 755 cbe_spu_info[node].nr_active--;
740 spu->alloc_state = SPU_FREE; 756 if (free_spu)
757 spu->alloc_state = SPU_FREE;
741 spu_unbind_context(spu, ctx); 758 spu_unbind_context(spu, ctx);
742 ctx->stats.invol_ctx_switch++; 759 ctx->stats.invol_ctx_switch++;
743 spu->stats.invol_ctx_switch++; 760 spu->stats.invol_ctx_switch++;
@@ -837,7 +854,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
837 if (spu) { 854 if (spu) {
838 new = grab_runnable_context(max_prio, spu->node); 855 new = grab_runnable_context(max_prio, spu->node);
839 if (new || force) { 856 if (new || force) {
840 spu_unschedule(spu, ctx); 857 spu_unschedule(spu, ctx, new == NULL);
841 if (new) { 858 if (new) {
842 if (new->flags & SPU_CREATE_NOSCHED) 859 if (new->flags & SPU_CREATE_NOSCHED)
843 wake_up(&new->stop_wq); 860 wake_up(&new->stop_wq);
@@ -910,7 +927,7 @@ static noinline void spusched_tick(struct spu_context *ctx)
910 927
911 new = grab_runnable_context(ctx->prio + 1, spu->node); 928 new = grab_runnable_context(ctx->prio + 1, spu->node);
912 if (new) { 929 if (new) {
913 spu_unschedule(spu, ctx); 930 spu_unschedule(spu, ctx, 0);
914 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 931 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
915 spu_add_to_rq(ctx); 932 spu_add_to_rq(ctx);
916 } else { 933 } else {
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 58ecdd72630f..be60d64be7ad 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -2,7 +2,7 @@ CFLAGS_bootx_init.o += -fPIC
2 2
3ifdef CONFIG_FTRACE 3ifdef CONFIG_FTRACE
4# Do not trace early boot code 4# Do not trace early boot code
5CFLAGS_REMOVE_bootx_init.o = -pg 5CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
6endif 6endif
7 7
8obj-y += pic.o setup.o time.o feature.o pci.o \ 8obj-y += pic.o setup.o time.o feature.o pci.o \
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 1cf901fa9031..6eb1d4d182c9 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -29,138 +29,75 @@
29 29
30#include "platform.h" 30#include "platform.h"
31 31
32#if defined(DEBUG) 32/**
33#define DBG udbg_printf 33 * enum lpar_vas_id - id of LPAR virtual address space.
34#else 34 * @lpar_vas_id_current: Current selected virtual address space
35#define DBG pr_debug 35 *
36#endif 36 * Identify the target LPAR address space.
37 37 */
38static struct hash_pte *htab; 38
39static unsigned long htab_addr; 39enum ps3_lpar_vas_id {
40static unsigned char *bolttab; 40 PS3_LPAR_VAS_ID_CURRENT = 0,
41static unsigned char *inusetab; 41};
42 42
43static DEFINE_SPINLOCK(ps3_bolttab_lock); 43
44 44static DEFINE_SPINLOCK(ps3_htab_lock);
45#define debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g) \
46 _debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
47static void _debug_dump_hpte(unsigned long pa, unsigned long va,
48 unsigned long group, unsigned long bitmap, struct hash_pte lhpte,
49 int psize, unsigned long slot, const char* func, int line)
50{
51 DBG("%s:%d: pa = %lxh\n", func, line, pa);
52 DBG("%s:%d: lpar = %lxh\n", func, line,
53 ps3_mm_phys_to_lpar(pa));
54 DBG("%s:%d: va = %lxh\n", func, line, va);
55 DBG("%s:%d: group = %lxh\n", func, line, group);
56 DBG("%s:%d: bitmap = %lxh\n", func, line, bitmap);
57 DBG("%s:%d: hpte.v = %lxh\n", func, line, lhpte.v);
58 DBG("%s:%d: hpte.r = %lxh\n", func, line, lhpte.r);
59 DBG("%s:%d: psize = %xh\n", func, line, psize);
60 DBG("%s:%d: slot = %lxh\n", func, line, slot);
61}
62 45
63static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, 46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
64 unsigned long pa, unsigned long rflags, unsigned long vflags, 47 unsigned long pa, unsigned long rflags, unsigned long vflags,
65 int psize, int ssize) 48 int psize, int ssize)
66{ 49{
67 unsigned long slot; 50 int result;
68 struct hash_pte lhpte; 51 u64 hpte_v, hpte_r;
69 int secondary = 0; 52 u64 inserted_index;
70 unsigned long result; 53 u64 evicted_v, evicted_r;
71 unsigned long bitmap; 54 u64 hpte_v_array[4], hpte_rs;
72 unsigned long flags; 55 unsigned long flags;
73 unsigned long p_pteg, s_pteg, b_index, b_mask, cb, ci; 56 long ret = -1;
74
75 vflags &= ~HPTE_V_SECONDARY; /* this bit is ignored */
76
77 lhpte.v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
78 vflags | HPTE_V_VALID;
79 lhpte.r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
80
81 p_pteg = hpte_group / HPTES_PER_GROUP;
82 s_pteg = ~p_pteg & htab_hash_mask;
83
84 spin_lock_irqsave(&ps3_bolttab_lock, flags);
85
86 BUG_ON(bolttab[p_pteg] == 0xff && bolttab[s_pteg] == 0xff);
87 57
88 bitmap = (inusetab[p_pteg] << 8) | inusetab[s_pteg]; 58 /*
59 * lv1_insert_htab_entry() will search for victim
60 * entry in both primary and secondary pte group
61 */
62 vflags &= ~HPTE_V_SECONDARY;
89 63
90 if (bitmap == 0xffff) { 64 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
91 /* 65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
92 * PTEG is full. Search for victim.
93 */
94 bitmap &= ~((bolttab[p_pteg] << 8) | bolttab[s_pteg]);
95 do {
96 ci = mftb() & 15;
97 cb = 0x8000UL >> ci;
98 } while ((cb & bitmap) == 0);
99 } else {
100 /*
101 * search free slot in hardware order
102 * [primary] 0, 2, 4, 6, 1, 3, 5, 7
103 * [secondary] 0, 2, 4, 6, 1, 3, 5, 7
104 */
105 for (ci = 0; ci < HPTES_PER_GROUP; ci += 2) {
106 cb = 0x8000UL >> ci;
107 if ((cb & bitmap) == 0)
108 goto found;
109 }
110 for (ci = 1; ci < HPTES_PER_GROUP; ci += 2) {
111 cb = 0x8000UL >> ci;
112 if ((cb & bitmap) == 0)
113 goto found;
114 }
115 for (ci = HPTES_PER_GROUP; ci < HPTES_PER_GROUP*2; ci += 2) {
116 cb = 0x8000UL >> ci;
117 if ((cb & bitmap) == 0)
118 goto found;
119 }
120 for (ci = HPTES_PER_GROUP+1; ci < HPTES_PER_GROUP*2; ci += 2) {
121 cb = 0x8000UL >> ci;
122 if ((cb & bitmap) == 0)
123 goto found;
124 }
125 }
126 66
127found: 67 spin_lock_irqsave(&ps3_htab_lock, flags);
128 if (ci < HPTES_PER_GROUP) {
129 slot = p_pteg * HPTES_PER_GROUP + ci;
130 } else {
131 slot = s_pteg * HPTES_PER_GROUP + (ci & 7);
132 /* lhpte.dw0.dw0.h = 1; */
133 vflags |= HPTE_V_SECONDARY;
134 lhpte.v |= HPTE_V_SECONDARY;
135 }
136 68
137 result = lv1_write_htab_entry(0, slot, lhpte.v, lhpte.r); 69 /* talk hvc to replace entries BOLTED == 0 */
70 result = lv1_insert_htab_entry(PS3_LPAR_VAS_ID_CURRENT, hpte_group,
71 hpte_v, hpte_r,
72 HPTE_V_BOLTED, 0,
73 &inserted_index,
74 &evicted_v, &evicted_r);
138 75
139 if (result) { 76 if (result) {
140 debug_dump_hpte(pa, va, hpte_group, bitmap, lhpte, psize, slot); 77 /* all entries bolted !*/
78 pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%lx r=%lx\n",
79 __func__, result, va, pa, hpte_group, hpte_v, hpte_r);
141 BUG(); 80 BUG();
142 } 81 }
143 82
144 /* 83 /*
145 * If used slot is not in primary HPTE group, 84 * see if the entry is inserted into secondary pteg
146 * the slot should be in secondary HPTE group.
147 */ 85 */
86 result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT,
87 inserted_index & ~0x3UL,
88 &hpte_v_array[0], &hpte_v_array[1],
89 &hpte_v_array[2], &hpte_v_array[3],
90 &hpte_rs);
91 BUG_ON(result);
148 92
149 if ((hpte_group ^ slot) & ~(HPTES_PER_GROUP - 1)) { 93 if (hpte_v_array[inserted_index % 4] & HPTE_V_SECONDARY)
150 secondary = 1; 94 ret = (inserted_index & 7) | (1 << 3);
151 b_index = s_pteg; 95 else
152 } else { 96 ret = inserted_index & 7;
153 secondary = 0;
154 b_index = p_pteg;
155 }
156 97
157 b_mask = (lhpte.v & HPTE_V_BOLTED) ? 1 << 7 : 0 << 7; 98 spin_unlock_irqrestore(&ps3_htab_lock, flags);
158 bolttab[b_index] |= b_mask >> (slot & 7);
159 b_mask = 1 << 7;
160 inusetab[b_index] |= b_mask >> (slot & 7);
161 spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
162 99
163 return (slot & 7) | (secondary << 3); 100 return ret;
164} 101}
165 102
166static long ps3_hpte_remove(unsigned long hpte_group) 103static long ps3_hpte_remove(unsigned long hpte_group)
@@ -172,39 +109,48 @@ static long ps3_hpte_remove(unsigned long hpte_group)
172static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
173 unsigned long va, int psize, int ssize, int local) 110 unsigned long va, int psize, int ssize, int local)
174{ 111{
112 int result;
113 u64 hpte_v, want_v, hpte_rs;
114 u64 hpte_v_array[4];
175 unsigned long flags; 115 unsigned long flags;
176 unsigned long result; 116 long ret;
177 unsigned long pteg, bit;
178 unsigned long hpte_v, want_v;
179 117
180 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 118 want_v = hpte_encode_v(va, psize, ssize);
181 119
182 spin_lock_irqsave(&ps3_bolttab_lock, flags); 120 spin_lock_irqsave(&ps3_htab_lock, flags);
183 121
184 hpte_v = htab[slot].v; 122 result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT, slot & ~0x3UL,
185 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { 123 &hpte_v_array[0], &hpte_v_array[1],
186 spin_unlock_irqrestore(&ps3_bolttab_lock, flags); 124 &hpte_v_array[2], &hpte_v_array[3],
187 125 &hpte_rs);
188 /* ps3_hpte_insert() will be used to update PTE */
189 return -1;
190 }
191
192 result = lv1_write_htab_entry(0, slot, 0, 0);
193 126
194 if (result) { 127 if (result) {
195 DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n", 128 pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n",
196 __func__, va, slot, psize, result, result); 129 __func__, result, va, slot, psize);
197 BUG(); 130 BUG();
198 } 131 }
199 132
200 pteg = slot / HPTES_PER_GROUP; 133 hpte_v = hpte_v_array[slot % 4];
201 bit = slot % HPTES_PER_GROUP;
202 inusetab[pteg] &= ~(0x80 >> bit);
203 134
204 spin_unlock_irqrestore(&ps3_bolttab_lock, flags); 135 /*
136 * As lv1_read_htab_entries() does not give us the RPN, we can
137 * not synthesize the new hpte_r value here, and therefore can
138 * not update the hpte with lv1_insert_htab_entry(), so we
139 * insted invalidate it and ask the caller to update it via
140 * ps3_hpte_insert() by returning a -1 value.
141 */
142 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
143 /* not found */
144 ret = -1;
145 } else {
146 /* entry found, just invalidate it */
147 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT,
148 slot, 0, 0);
149 ret = -1;
150 }
205 151
206 /* ps3_hpte_insert() will be used to update PTE */ 152 spin_unlock_irqrestore(&ps3_htab_lock, flags);
207 return -1; 153 return ret;
208} 154}
209 155
210static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 156static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
@@ -217,45 +163,35 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
217 int psize, int ssize, int local) 163 int psize, int ssize, int local)
218{ 164{
219 unsigned long flags; 165 unsigned long flags;
220 unsigned long result; 166 int result;
221 unsigned long pteg, bit; 167
168 spin_lock_irqsave(&ps3_htab_lock, flags);
222 169
223 spin_lock_irqsave(&ps3_bolttab_lock, flags); 170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
224 result = lv1_write_htab_entry(0, slot, 0, 0);
225 171
226 if (result) { 172 if (result) {
227 DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n", 173 pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n",
228 __func__, va, slot, psize, result, result); 174 __func__, result, va, slot, psize);
229 BUG(); 175 BUG();
230 } 176 }
231 177
232 pteg = slot / HPTES_PER_GROUP; 178 spin_unlock_irqrestore(&ps3_htab_lock, flags);
233 bit = slot % HPTES_PER_GROUP;
234 inusetab[pteg] &= ~(0x80 >> bit);
235 spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
236} 179}
237 180
238static void ps3_hpte_clear(void) 181static void ps3_hpte_clear(void)
239{ 182{
240 int result; 183 unsigned long hpte_count = (1UL << ppc64_pft_size) >> 4;
241 184 u64 i;
242 DBG(" -> %s:%d\n", __func__, __LINE__);
243 185
244 result = lv1_unmap_htab(htab_addr); 186 for (i = 0; i < hpte_count; i++)
245 BUG_ON(result); 187 lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, i, 0, 0);
246 188
247 ps3_mm_shutdown(); 189 ps3_mm_shutdown();
248 ps3_mm_vas_destroy(); 190 ps3_mm_vas_destroy();
249
250 DBG(" <- %s:%d\n", __func__, __LINE__);
251} 191}
252 192
253void __init ps3_hpte_init(unsigned long htab_size) 193void __init ps3_hpte_init(unsigned long htab_size)
254{ 194{
255 long bitmap_size;
256
257 DBG(" -> %s:%d\n", __func__, __LINE__);
258
259 ppc_md.hpte_invalidate = ps3_hpte_invalidate; 195 ppc_md.hpte_invalidate = ps3_hpte_invalidate;
260 ppc_md.hpte_updatepp = ps3_hpte_updatepp; 196 ppc_md.hpte_updatepp = ps3_hpte_updatepp;
261 ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp; 197 ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
@@ -264,28 +200,5 @@ void __init ps3_hpte_init(unsigned long htab_size)
264 ppc_md.hpte_clear_all = ps3_hpte_clear; 200 ppc_md.hpte_clear_all = ps3_hpte_clear;
265 201
266 ppc64_pft_size = __ilog2(htab_size); 202 ppc64_pft_size = __ilog2(htab_size);
267
268 bitmap_size = htab_size / sizeof(struct hash_pte) / 8;
269
270 bolttab = __va(lmb_alloc(bitmap_size, 1));
271 inusetab = __va(lmb_alloc(bitmap_size, 1));
272
273 memset(bolttab, 0, bitmap_size);
274 memset(inusetab, 0, bitmap_size);
275
276 DBG(" <- %s:%d\n", __func__, __LINE__);
277} 203}
278 204
279void __init ps3_map_htab(void)
280{
281 long result;
282 unsigned long htab_size = (1UL << ppc64_pft_size);
283
284 result = lv1_map_htab(0, &htab_addr);
285
286 htab = (__force struct hash_pte *)ioremap_flags(htab_addr, htab_size,
287 pgprot_val(PAGE_READONLY_X));
288
289 DBG("%s:%d: lpar %016lxh, virt %016lxh\n", __func__, __LINE__,
290 htab_addr, (unsigned long)htab);
291}
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index a413abbd4123..77bc330263c4 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -201,7 +201,6 @@ static void __init ps3_setup_arch(void)
201 ps3_firmware_version.rev); 201 ps3_firmware_version.rev);
202 202
203 ps3_spu_set_platform(); 203 ps3_spu_set_platform();
204 ps3_map_htab();
205 204
206#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
207 smp_init_ps3(); 206 smp_init_ps3();
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index d135cef9ed6a..ccae3d446b98 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -186,14 +186,24 @@ static void spu_unmap(struct spu *spu)
186 iounmap(spu_pdata(spu)->shadow); 186 iounmap(spu_pdata(spu)->shadow);
187} 187}
188 188
189/**
190 * setup_areas - Map the spu regions into the address space.
191 *
192 * The current HV requires the spu shadow regs to be mapped with the
193 * PTE page protection bits set as read-only (PP=3). This implementation
194 * uses the low level __ioremap() to bypass the page protection settings
195 * inforced by ioremap_flags() to get the needed PTE bits set for the
196 * shadow regs.
197 */
198
189static int __init setup_areas(struct spu *spu) 199static int __init setup_areas(struct spu *spu)
190{ 200{
191 struct table {char* name; unsigned long addr; unsigned long size;}; 201 struct table {char* name; unsigned long addr; unsigned long size;};
202 static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3;
192 203
193 spu_pdata(spu)->shadow = ioremap_flags(spu_pdata(spu)->shadow_addr, 204 spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,
194 sizeof(struct spe_shadow), 205 sizeof(struct spe_shadow),
195 pgprot_val(PAGE_READONLY) | 206 shadow_flags);
196 _PAGE_NO_CACHE);
197 if (!spu_pdata(spu)->shadow) { 207 if (!spu_pdata(spu)->shadow) {
198 pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); 208 pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
199 goto fail_ioremap; 209 goto fail_ioremap;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 3ce8a139b85d..7b01d67b4e48 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -71,6 +71,7 @@
71int CMO_PrPSP = -1; 71int CMO_PrPSP = -1;
72int CMO_SecPSP = -1; 72int CMO_SecPSP = -1;
73unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT); 73unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
74EXPORT_SYMBOL(CMO_PageSize);
74 75
75int fwnmi_active; /* TRUE if an FWNMI handler is present */ 76int fwnmi_active; /* TRUE if an FWNMI handler is present */
76 77