aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2009-08-28 08:06:29 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-09-02 02:19:01 -0400
commit46db2f86a3b2a94e0b33e0b4548fb7b7b6bdff66 (patch)
tree6aef8ab146a54d04dd207b0f85f362a4aee3ef5d /arch/powerpc/mm
parentb8e4a7dae53760b9791aca96e74366078692d90f (diff)
powerpc/pseries: Fix to handle slb resize across migration
The SLB can change sizes across a live migration, which was not being handled, resulting in possible machine crashes during migration if migrating to a machine which has a smaller max SLB size than the source machine. Fix this by first reducing the SLB size to the minimum possible value, which is 32, prior to migration. Then during the device tree update which occurs after migration, we make the call to ensure the SLB gets updated. Also add the slb_size to the lparcfg output so that the migration tools can check to make sure the kernel has this capability before allowing migration in scenarios where the SLB size will change. BenH: Fixed #include <asm/mmu-hash64.h> -> <asm/mmu.h> to avoid breaking ppc32 build Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/slb.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 07961c5c169e..1d98ecc8eecd 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -249,14 +249,22 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
249static inline void patch_slb_encoding(unsigned int *insn_addr, 249static inline void patch_slb_encoding(unsigned int *insn_addr,
250 unsigned int immed) 250 unsigned int immed)
251{ 251{
252 /* Assume the instruction had a "0" immediate value, just 252 *insn_addr = (*insn_addr & 0xffff0000) | immed;
253 * "or" in the new value
254 */
255 *insn_addr |= immed;
256 flush_icache_range((unsigned long)insn_addr, 4+ 253 flush_icache_range((unsigned long)insn_addr, 4+
257 (unsigned long)insn_addr); 254 (unsigned long)insn_addr);
258} 255}
259 256
257void slb_set_size(u16 size)
258{
259 extern unsigned int *slb_compare_rr_to_size;
260
261 if (mmu_slb_size == size)
262 return;
263
264 mmu_slb_size = size;
265 patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
266}
267
260void slb_initialize(void) 268void slb_initialize(void)
261{ 269{
262 unsigned long linear_llp, vmalloc_llp, io_llp; 270 unsigned long linear_llp, vmalloc_llp, io_llp;