aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/align.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-04-11 02:13:19 -0400
committerPaul Mackerras <paulus@samba.org>2007-04-12 14:09:38 -0400
commite4ee3891db35aa9a069bb403c2a66a8fbfa274d6 (patch)
treec2c16d53c70e14e786b7d5a5722d38590fb4d7d2 /arch/powerpc/kernel/align.c
parente68c825bb016703eda94aac99be96de73b482d61 (diff)
[POWERPC] Alignment exception uses __get/put_user_inatomic
Make the alignment exception handler use the new _inatomic variants of __get/put_user. This fixes erroneous warnings in the very rare cases where we manage to have copy_tofrom_user_inatomic() trigger an alignment exception. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> arch/powerpc/kernel/align.c | 56 ++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 25 deletions(-) Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/align.c')
-rw-r--r--arch/powerpc/kernel/align.c56
1 files changed, 31 insertions, 25 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 4734b5de599d..5c9ff7f5c44e 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -241,7 +241,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
241 if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size)) 241 if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size))
242 return -EFAULT; 242 return -EFAULT;
243 for (i = 0; i < size / sizeof(long); ++i) 243 for (i = 0; i < size / sizeof(long); ++i)
244 if (__put_user(0, p+i)) 244 if (__put_user_inatomic(0, p+i))
245 return -EFAULT; 245 return -EFAULT;
246 return 1; 246 return 1;
247} 247}
@@ -288,7 +288,8 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
288 } else { 288 } else {
289 unsigned long pc = regs->nip ^ (swiz & 4); 289 unsigned long pc = regs->nip ^ (swiz & 4);
290 290
291 if (__get_user(instr, (unsigned int __user *)pc)) 291 if (__get_user_inatomic(instr,
292 (unsigned int __user *)pc))
292 return -EFAULT; 293 return -EFAULT;
293 if (swiz == 0 && (flags & SW)) 294 if (swiz == 0 && (flags & SW))
294 instr = cpu_to_le32(instr); 295 instr = cpu_to_le32(instr);
@@ -324,27 +325,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
324 ((nb0 + 3) / 4) * sizeof(unsigned long)); 325 ((nb0 + 3) / 4) * sizeof(unsigned long));
325 326
326 for (i = 0; i < nb; ++i, ++p) 327 for (i = 0; i < nb; ++i, ++p)
327 if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p))) 328 if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
329 SWIZ_PTR(p)))
328 return -EFAULT; 330 return -EFAULT;
329 if (nb0 > 0) { 331 if (nb0 > 0) {
330 rptr = &regs->gpr[0]; 332 rptr = &regs->gpr[0];
331 addr += nb; 333 addr += nb;
332 for (i = 0; i < nb0; ++i, ++p) 334 for (i = 0; i < nb0; ++i, ++p)
333 if (__get_user(REG_BYTE(rptr, i ^ bswiz), 335 if (__get_user_inatomic(REG_BYTE(rptr,
334 SWIZ_PTR(p))) 336 i ^ bswiz),
337 SWIZ_PTR(p)))
335 return -EFAULT; 338 return -EFAULT;
336 } 339 }
337 340
338 } else { 341 } else {
339 for (i = 0; i < nb; ++i, ++p) 342 for (i = 0; i < nb; ++i, ++p)
340 if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p))) 343 if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
344 SWIZ_PTR(p)))
341 return -EFAULT; 345 return -EFAULT;
342 if (nb0 > 0) { 346 if (nb0 > 0) {
343 rptr = &regs->gpr[0]; 347 rptr = &regs->gpr[0];
344 addr += nb; 348 addr += nb;
345 for (i = 0; i < nb0; ++i, ++p) 349 for (i = 0; i < nb0; ++i, ++p)
346 if (__put_user(REG_BYTE(rptr, i ^ bswiz), 350 if (__put_user_inatomic(REG_BYTE(rptr,
347 SWIZ_PTR(p))) 351 i ^ bswiz),
352 SWIZ_PTR(p)))
348 return -EFAULT; 353 return -EFAULT;
349 } 354 }
350 } 355 }
@@ -398,7 +403,8 @@ int fix_alignment(struct pt_regs *regs)
398 403
399 if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE)) 404 if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
400 pc ^= 4; 405 pc ^= 4;
401 if (unlikely(__get_user(instr, (unsigned int __user *)pc))) 406 if (unlikely(__get_user_inatomic(instr,
407 (unsigned int __user *)pc)))
402 return -EFAULT; 408 return -EFAULT;
403 if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) 409 if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
404 instr = cpu_to_le32(instr); 410 instr = cpu_to_le32(instr);
@@ -474,16 +480,16 @@ int fix_alignment(struct pt_regs *regs)
474 p = (unsigned long) addr; 480 p = (unsigned long) addr;
475 switch (nb) { 481 switch (nb) {
476 case 8: 482 case 8:
477 ret |= __get_user(data.v[0], SWIZ_PTR(p++)); 483 ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
478 ret |= __get_user(data.v[1], SWIZ_PTR(p++)); 484 ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
479 ret |= __get_user(data.v[2], SWIZ_PTR(p++)); 485 ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
480 ret |= __get_user(data.v[3], SWIZ_PTR(p++)); 486 ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
481 case 4: 487 case 4:
482 ret |= __get_user(data.v[4], SWIZ_PTR(p++)); 488 ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
483 ret |= __get_user(data.v[5], SWIZ_PTR(p++)); 489 ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
484 case 2: 490 case 2:
485 ret |= __get_user(data.v[6], SWIZ_PTR(p++)); 491 ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
486 ret |= __get_user(data.v[7], SWIZ_PTR(p++)); 492 ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
487 if (unlikely(ret)) 493 if (unlikely(ret))
488 return -EFAULT; 494 return -EFAULT;
489 } 495 }
@@ -551,16 +557,16 @@ int fix_alignment(struct pt_regs *regs)
551 p = (unsigned long) addr; 557 p = (unsigned long) addr;
552 switch (nb) { 558 switch (nb) {
553 case 8: 559 case 8:
554 ret |= __put_user(data.v[0], SWIZ_PTR(p++)); 560 ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
555 ret |= __put_user(data.v[1], SWIZ_PTR(p++)); 561 ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
556 ret |= __put_user(data.v[2], SWIZ_PTR(p++)); 562 ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
557 ret |= __put_user(data.v[3], SWIZ_PTR(p++)); 563 ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
558 case 4: 564 case 4:
559 ret |= __put_user(data.v[4], SWIZ_PTR(p++)); 565 ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
560 ret |= __put_user(data.v[5], SWIZ_PTR(p++)); 566 ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
561 case 2: 567 case 2:
562 ret |= __put_user(data.v[6], SWIZ_PTR(p++)); 568 ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
563 ret |= __put_user(data.v[7], SWIZ_PTR(p++)); 569 ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
564 } 570 }
565 if (unlikely(ret)) 571 if (unlikely(ret))
566 return -EFAULT; 572 return -EFAULT;