aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-07 02:14:40 -0400
committerPaul Mackerras <paulus@samba.org>2006-06-09 07:24:15 -0400
commitfab5db97e44f76461f76b24adfa8ccb14d4df498 (patch)
tree123026a1a6f1702468220189b7410077479ae8a2
parent651d765d0b2c72d33430487c8b6ef64c60cd2134 (diff)
[PATCH] powerpc: Implement support for setting little-endian mode via prctl
This adds the PowerPC part of the code to allow processes to change their endian mode via prctl. This also extends the alignment exception handler to be able to fix up alignment exceptions that occur in little-endian mode, both for "PowerPC" little-endian and true little-endian. We always enter signal handlers in big-endian mode -- the support for little-endian mode does not amount to the creation of a little-endian user/kernel ABI. If the signal handler returns, the endian mode is restored to what it was when the signal was delivered. We have two new kernel CPU feature bits, one for PPC little-endian and one for true little-endian. Most of the classic 32-bit processors support PPC little-endian, and this is reflected in the CPU feature table. There are two corresponding feature bits reported to userland in the AT_HWCAP aux vector entry. This is based on an earlier patch by Anton Blanchard. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/align.c189
-rw-r--r--arch/powerpc/kernel/cputable.c84
-rw-r--r--arch/powerpc/kernel/process.c44
-rw-r--r--arch/powerpc/kernel/signal_32.c15
-rw-r--r--arch/powerpc/kernel/signal_64.c12
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--include/asm-powerpc/cputable.h52
-rw-r--r--include/asm-powerpc/processor.h6
8 files changed, 267 insertions, 137 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index faaec9c6f78f..4734b5de599d 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -35,17 +35,19 @@ struct aligninfo {
35 35
36#define INVALID { 0, 0 } 36#define INVALID { 0, 0 }
37 37
38#define LD 1 /* load */ 38/* Bits in the flags field */
39#define ST 2 /* store */ 39#define LD 0 /* load */
40#define SE 4 /* sign-extend value */ 40#define ST 1 /* store */
41#define F 8 /* to/from fp regs */ 41#define SE 2 /* sign-extend value */
42#define U 0x10 /* update index register */ 42#define F 4 /* to/from fp regs */
43#define M 0x20 /* multiple load/store */ 43#define U 8 /* update index register */
44#define SW 0x40 /* byte swap int or ... */ 44#define M 0x10 /* multiple load/store */
45#define S 0x40 /* ... single-precision fp */ 45#define SW 0x20 /* byte swap */
46#define SX 0x40 /* byte count in XER */ 46#define S 0x40 /* single-precision fp or... */
47#define SX 0x40 /* ... byte count in XER */
47#define HARD 0x80 /* string, stwcx. */ 48#define HARD 0x80 /* string, stwcx. */
48 49
50/* DSISR bits reported for a DCBZ instruction: */
49#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ 51#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
50 52
51#define SWAP(a, b) (t = (a), (a) = (b), (b) = t) 53#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
@@ -256,12 +258,16 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
256#define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) 258#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
257#endif 259#endif
258 260
261#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
262
259static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, 263static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
260 unsigned int reg, unsigned int nb, 264 unsigned int reg, unsigned int nb,
261 unsigned int flags, unsigned int instr) 265 unsigned int flags, unsigned int instr,
266 unsigned long swiz)
262{ 267{
263 unsigned long *rptr; 268 unsigned long *rptr;
264 unsigned int nb0, i; 269 unsigned int nb0, i, bswiz;
270 unsigned long p;
265 271
266 /* 272 /*
267 * We do not try to emulate 8 bytes multiple as they aren't really 273 * We do not try to emulate 8 bytes multiple as they aren't really
@@ -280,9 +286,12 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
280 if (nb == 0) 286 if (nb == 0)
281 return 1; 287 return 1;
282 } else { 288 } else {
283 if (__get_user(instr, 289 unsigned long pc = regs->nip ^ (swiz & 4);
284 (unsigned int __user *)regs->nip)) 290
291 if (__get_user(instr, (unsigned int __user *)pc))
285 return -EFAULT; 292 return -EFAULT;
293 if (swiz == 0 && (flags & SW))
294 instr = cpu_to_le32(instr);
286 nb = (instr >> 11) & 0x1f; 295 nb = (instr >> 11) & 0x1f;
287 if (nb == 0) 296 if (nb == 0)
288 nb = 32; 297 nb = 32;
@@ -300,7 +309,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
300 return -EFAULT; /* bad address */ 309 return -EFAULT; /* bad address */
301 310
302 rptr = &regs->gpr[reg]; 311 rptr = &regs->gpr[reg];
303 if (flags & LD) { 312 p = (unsigned long) addr;
313 bswiz = (flags & SW)? 3: 0;
314
315 if (!(flags & ST)) {
304 /* 316 /*
305 * This zeroes the top 4 bytes of the affected registers 317 * This zeroes the top 4 bytes of the affected registers
306 * in 64-bit mode, and also zeroes out any remaining 318 * in 64-bit mode, and also zeroes out any remaining
@@ -311,26 +323,28 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
311 memset(&regs->gpr[0], 0, 323 memset(&regs->gpr[0], 0,
312 ((nb0 + 3) / 4) * sizeof(unsigned long)); 324 ((nb0 + 3) / 4) * sizeof(unsigned long));
313 325
314 for (i = 0; i < nb; ++i) 326 for (i = 0; i < nb; ++i, ++p)
315 if (__get_user(REG_BYTE(rptr, i), addr + i)) 327 if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
316 return -EFAULT; 328 return -EFAULT;
317 if (nb0 > 0) { 329 if (nb0 > 0) {
318 rptr = &regs->gpr[0]; 330 rptr = &regs->gpr[0];
319 addr += nb; 331 addr += nb;
320 for (i = 0; i < nb0; ++i) 332 for (i = 0; i < nb0; ++i, ++p)
321 if (__get_user(REG_BYTE(rptr, i), addr + i)) 333 if (__get_user(REG_BYTE(rptr, i ^ bswiz),
334 SWIZ_PTR(p)))
322 return -EFAULT; 335 return -EFAULT;
323 } 336 }
324 337
325 } else { 338 } else {
326 for (i = 0; i < nb; ++i) 339 for (i = 0; i < nb; ++i, ++p)
327 if (__put_user(REG_BYTE(rptr, i), addr + i)) 340 if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p)))
328 return -EFAULT; 341 return -EFAULT;
329 if (nb0 > 0) { 342 if (nb0 > 0) {
330 rptr = &regs->gpr[0]; 343 rptr = &regs->gpr[0];
331 addr += nb; 344 addr += nb;
332 for (i = 0; i < nb0; ++i) 345 for (i = 0; i < nb0; ++i, ++p)
333 if (__put_user(REG_BYTE(rptr, i), addr + i)) 346 if (__put_user(REG_BYTE(rptr, i ^ bswiz),
347 SWIZ_PTR(p)))
334 return -EFAULT; 348 return -EFAULT;
335 } 349 }
336 } 350 }
@@ -352,7 +366,7 @@ int fix_alignment(struct pt_regs *regs)
352 unsigned int reg, areg; 366 unsigned int reg, areg;
353 unsigned int dsisr; 367 unsigned int dsisr;
354 unsigned char __user *addr; 368 unsigned char __user *addr;
355 unsigned char __user *p; 369 unsigned long p, swiz;
356 int ret, t; 370 int ret, t;
357 union { 371 union {
358 u64 ll; 372 u64 ll;
@@ -380,11 +394,15 @@ int fix_alignment(struct pt_regs *regs)
380 * let's make one up from the instruction 394 * let's make one up from the instruction
381 */ 395 */
382 if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) { 396 if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
383 unsigned int real_instr; 397 unsigned long pc = regs->nip;
384 if (unlikely(__get_user(real_instr, 398
385 (unsigned int __user *)regs->nip))) 399 if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
400 pc ^= 4;
401 if (unlikely(__get_user(instr, (unsigned int __user *)pc)))
386 return -EFAULT; 402 return -EFAULT;
387 dsisr = make_dsisr(real_instr); 403 if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
404 instr = cpu_to_le32(instr);
405 dsisr = make_dsisr(instr);
388 } 406 }
389 407
390 /* extract the operation and registers from the dsisr */ 408 /* extract the operation and registers from the dsisr */
@@ -397,6 +415,24 @@ int fix_alignment(struct pt_regs *regs)
397 nb = aligninfo[instr].len; 415 nb = aligninfo[instr].len;
398 flags = aligninfo[instr].flags; 416 flags = aligninfo[instr].flags;
399 417
418 /* Byteswap little endian loads and stores */
419 swiz = 0;
420 if (regs->msr & MSR_LE) {
421 flags ^= SW;
422 /*
423 * So-called "PowerPC little endian" mode works by
424 * swizzling addresses rather than by actually doing
425 * any byte-swapping. To emulate this, we XOR each
426 * byte address with 7. We also byte-swap, because
427 * the processor's address swizzling depends on the
428 * operand size (it xors the address with 7 for bytes,
429 * 6 for halfwords, 4 for words, 0 for doublewords) but
430 * we will xor with 7 and load/store each byte separately.
431 */
432 if (cpu_has_feature(CPU_FTR_PPC_LE))
433 swiz = 7;
434 }
435
400 /* DAR has the operand effective address */ 436 /* DAR has the operand effective address */
401 addr = (unsigned char __user *)regs->dar; 437 addr = (unsigned char __user *)regs->dar;
402 438
@@ -412,7 +448,8 @@ int fix_alignment(struct pt_regs *regs)
412 * function 448 * function
413 */ 449 */
414 if (flags & M) 450 if (flags & M)
415 return emulate_multiple(regs, addr, reg, nb, flags, instr); 451 return emulate_multiple(regs, addr, reg, nb,
452 flags, instr, swiz);
416 453
417 /* Verify the address of the operand */ 454 /* Verify the address of the operand */
418 if (unlikely(user_mode(regs) && 455 if (unlikely(user_mode(regs) &&
@@ -431,51 +468,71 @@ int fix_alignment(struct pt_regs *regs)
431 /* If we are loading, get the data from user space, else 468 /* If we are loading, get the data from user space, else
432 * get it from register values 469 * get it from register values
433 */ 470 */
434 if (flags & LD) { 471 if (!(flags & ST)) {
435 data.ll = 0; 472 data.ll = 0;
436 ret = 0; 473 ret = 0;
437 p = addr; 474 p = (unsigned long) addr;
438 switch (nb) { 475 switch (nb) {
439 case 8: 476 case 8:
440 ret |= __get_user(data.v[0], p++); 477 ret |= __get_user(data.v[0], SWIZ_PTR(p++));
441 ret |= __get_user(data.v[1], p++); 478 ret |= __get_user(data.v[1], SWIZ_PTR(p++));
442 ret |= __get_user(data.v[2], p++); 479 ret |= __get_user(data.v[2], SWIZ_PTR(p++));
443 ret |= __get_user(data.v[3], p++); 480 ret |= __get_user(data.v[3], SWIZ_PTR(p++));
444 case 4: 481 case 4:
445 ret |= __get_user(data.v[4], p++); 482 ret |= __get_user(data.v[4], SWIZ_PTR(p++));
446 ret |= __get_user(data.v[5], p++); 483 ret |= __get_user(data.v[5], SWIZ_PTR(p++));
447 case 2: 484 case 2:
448 ret |= __get_user(data.v[6], p++); 485 ret |= __get_user(data.v[6], SWIZ_PTR(p++));
449 ret |= __get_user(data.v[7], p++); 486 ret |= __get_user(data.v[7], SWIZ_PTR(p++));
450 if (unlikely(ret)) 487 if (unlikely(ret))
451 return -EFAULT; 488 return -EFAULT;
452 } 489 }
453 } else if (flags & F) 490 } else if (flags & F) {
454 data.dd = current->thread.fpr[reg]; 491 data.dd = current->thread.fpr[reg];
455 else 492 if (flags & S) {
493 /* Single-precision FP store requires conversion... */
494#ifdef CONFIG_PPC_FPU
495 preempt_disable();
496 enable_kernel_fp();
497 cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
498 preempt_enable();
499#else
500 return 0;
501#endif
502 }
503 } else
456 data.ll = regs->gpr[reg]; 504 data.ll = regs->gpr[reg];
457 505
458 /* Perform other misc operations like sign extension, byteswap, 506 if (flags & SW) {
507 switch (nb) {
508 case 8:
509 SWAP(data.v[0], data.v[7]);
510 SWAP(data.v[1], data.v[6]);
511 SWAP(data.v[2], data.v[5]);
512 SWAP(data.v[3], data.v[4]);
513 break;
514 case 4:
515 SWAP(data.v[4], data.v[7]);
516 SWAP(data.v[5], data.v[6]);
517 break;
518 case 2:
519 SWAP(data.v[6], data.v[7]);
520 break;
521 }
522 }
523
524 /* Perform other misc operations like sign extension
459 * or floating point single precision conversion 525 * or floating point single precision conversion
460 */ 526 */
461 switch (flags & ~U) { 527 switch (flags & ~(U|SW)) {
462 case LD+SE: /* sign extend */ 528 case LD+SE: /* sign extend */
463 if ( nb == 2 ) 529 if ( nb == 2 )
464 data.ll = data.x16.low16; 530 data.ll = data.x16.low16;
465 else /* nb must be 4 */ 531 else /* nb must be 4 */
466 data.ll = data.x32.low32; 532 data.ll = data.x32.low32;
467 break; 533 break;
468 case LD+S: /* byte-swap */
469 case ST+S:
470 if (nb == 2) {
471 SWAP(data.v[6], data.v[7]);
472 } else {
473 SWAP(data.v[4], data.v[7]);
474 SWAP(data.v[5], data.v[6]);
475 }
476 break;
477 534
478 /* Single-precision FP load and store require conversions... */ 535 /* Single-precision FP load requires conversion... */
479 case LD+F+S: 536 case LD+F+S:
480#ifdef CONFIG_PPC_FPU 537#ifdef CONFIG_PPC_FPU
481 preempt_disable(); 538 preempt_disable();
@@ -486,34 +543,24 @@ int fix_alignment(struct pt_regs *regs)
486 return 0; 543 return 0;
487#endif 544#endif
488 break; 545 break;
489 case ST+F+S:
490#ifdef CONFIG_PPC_FPU
491 preempt_disable();
492 enable_kernel_fp();
493 cvt_df(&data.dd, (float *)&data.v[4], &current->thread);
494 preempt_enable();
495#else
496 return 0;
497#endif
498 break;
499 } 546 }
500 547
501 /* Store result to memory or update registers */ 548 /* Store result to memory or update registers */
502 if (flags & ST) { 549 if (flags & ST) {
503 ret = 0; 550 ret = 0;
504 p = addr; 551 p = (unsigned long) addr;
505 switch (nb) { 552 switch (nb) {
506 case 8: 553 case 8:
507 ret |= __put_user(data.v[0], p++); 554 ret |= __put_user(data.v[0], SWIZ_PTR(p++));
508 ret |= __put_user(data.v[1], p++); 555 ret |= __put_user(data.v[1], SWIZ_PTR(p++));
509 ret |= __put_user(data.v[2], p++); 556 ret |= __put_user(data.v[2], SWIZ_PTR(p++));
510 ret |= __put_user(data.v[3], p++); 557 ret |= __put_user(data.v[3], SWIZ_PTR(p++));
511 case 4: 558 case 4:
512 ret |= __put_user(data.v[4], p++); 559 ret |= __put_user(data.v[4], SWIZ_PTR(p++));
513 ret |= __put_user(data.v[5], p++); 560 ret |= __put_user(data.v[5], SWIZ_PTR(p++));
514 case 2: 561 case 2:
515 ret |= __put_user(data.v[6], p++); 562 ret |= __put_user(data.v[6], SWIZ_PTR(p++));
516 ret |= __put_user(data.v[7], p++); 563 ret |= __put_user(data.v[7], SWIZ_PTR(p++));
517 } 564 }
518 if (unlikely(ret)) 565 if (unlikely(ret))
519 return -EFAULT; 566 return -EFAULT;
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 83f9ab139d4c..dfe2fcfb20a0 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -54,7 +54,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
54#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ 54#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
55 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) 55 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
56#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ 56#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
57 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) 57 PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
58 PPC_FEATURE_TRUE_LE)
58#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ 59#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
59 PPC_FEATURE_BOOKE) 60 PPC_FEATURE_BOOKE)
60 61
@@ -74,7 +75,7 @@ struct cpu_spec cpu_specs[] = {
74 .pvr_value = 0x00400000, 75 .pvr_value = 0x00400000,
75 .cpu_name = "POWER3 (630)", 76 .cpu_name = "POWER3 (630)",
76 .cpu_features = CPU_FTRS_POWER3, 77 .cpu_features = CPU_FTRS_POWER3,
77 .cpu_user_features = COMMON_USER_PPC64, 78 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
78 .icache_bsize = 128, 79 .icache_bsize = 128,
79 .dcache_bsize = 128, 80 .dcache_bsize = 128,
80 .num_pmcs = 8, 81 .num_pmcs = 8,
@@ -87,7 +88,7 @@ struct cpu_spec cpu_specs[] = {
87 .pvr_value = 0x00410000, 88 .pvr_value = 0x00410000,
88 .cpu_name = "POWER3 (630+)", 89 .cpu_name = "POWER3 (630+)",
89 .cpu_features = CPU_FTRS_POWER3, 90 .cpu_features = CPU_FTRS_POWER3,
90 .cpu_user_features = COMMON_USER_PPC64, 91 .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
91 .icache_bsize = 128, 92 .icache_bsize = 128,
92 .dcache_bsize = 128, 93 .dcache_bsize = 128,
93 .num_pmcs = 8, 94 .num_pmcs = 8,
@@ -318,7 +319,7 @@ struct cpu_spec cpu_specs[] = {
318 .pvr_value = 0x00030000, 319 .pvr_value = 0x00030000,
319 .cpu_name = "603", 320 .cpu_name = "603",
320 .cpu_features = CPU_FTRS_603, 321 .cpu_features = CPU_FTRS_603,
321 .cpu_user_features = COMMON_USER, 322 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
322 .icache_bsize = 32, 323 .icache_bsize = 32,
323 .dcache_bsize = 32, 324 .dcache_bsize = 32,
324 .cpu_setup = __setup_cpu_603, 325 .cpu_setup = __setup_cpu_603,
@@ -329,7 +330,7 @@ struct cpu_spec cpu_specs[] = {
329 .pvr_value = 0x00060000, 330 .pvr_value = 0x00060000,
330 .cpu_name = "603e", 331 .cpu_name = "603e",
331 .cpu_features = CPU_FTRS_603, 332 .cpu_features = CPU_FTRS_603,
332 .cpu_user_features = COMMON_USER, 333 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
333 .icache_bsize = 32, 334 .icache_bsize = 32,
334 .dcache_bsize = 32, 335 .dcache_bsize = 32,
335 .cpu_setup = __setup_cpu_603, 336 .cpu_setup = __setup_cpu_603,
@@ -340,7 +341,7 @@ struct cpu_spec cpu_specs[] = {
340 .pvr_value = 0x00070000, 341 .pvr_value = 0x00070000,
341 .cpu_name = "603ev", 342 .cpu_name = "603ev",
342 .cpu_features = CPU_FTRS_603, 343 .cpu_features = CPU_FTRS_603,
343 .cpu_user_features = COMMON_USER, 344 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
344 .icache_bsize = 32, 345 .icache_bsize = 32,
345 .dcache_bsize = 32, 346 .dcache_bsize = 32,
346 .cpu_setup = __setup_cpu_603, 347 .cpu_setup = __setup_cpu_603,
@@ -351,7 +352,7 @@ struct cpu_spec cpu_specs[] = {
351 .pvr_value = 0x00040000, 352 .pvr_value = 0x00040000,
352 .cpu_name = "604", 353 .cpu_name = "604",
353 .cpu_features = CPU_FTRS_604, 354 .cpu_features = CPU_FTRS_604,
354 .cpu_user_features = COMMON_USER, 355 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
355 .icache_bsize = 32, 356 .icache_bsize = 32,
356 .dcache_bsize = 32, 357 .dcache_bsize = 32,
357 .num_pmcs = 2, 358 .num_pmcs = 2,
@@ -363,7 +364,7 @@ struct cpu_spec cpu_specs[] = {
363 .pvr_value = 0x00090000, 364 .pvr_value = 0x00090000,
364 .cpu_name = "604e", 365 .cpu_name = "604e",
365 .cpu_features = CPU_FTRS_604, 366 .cpu_features = CPU_FTRS_604,
366 .cpu_user_features = COMMON_USER, 367 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
367 .icache_bsize = 32, 368 .icache_bsize = 32,
368 .dcache_bsize = 32, 369 .dcache_bsize = 32,
369 .num_pmcs = 4, 370 .num_pmcs = 4,
@@ -375,7 +376,7 @@ struct cpu_spec cpu_specs[] = {
375 .pvr_value = 0x00090000, 376 .pvr_value = 0x00090000,
376 .cpu_name = "604r", 377 .cpu_name = "604r",
377 .cpu_features = CPU_FTRS_604, 378 .cpu_features = CPU_FTRS_604,
378 .cpu_user_features = COMMON_USER, 379 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
379 .icache_bsize = 32, 380 .icache_bsize = 32,
380 .dcache_bsize = 32, 381 .dcache_bsize = 32,
381 .num_pmcs = 4, 382 .num_pmcs = 4,
@@ -387,7 +388,7 @@ struct cpu_spec cpu_specs[] = {
387 .pvr_value = 0x000a0000, 388 .pvr_value = 0x000a0000,
388 .cpu_name = "604ev", 389 .cpu_name = "604ev",
389 .cpu_features = CPU_FTRS_604, 390 .cpu_features = CPU_FTRS_604,
390 .cpu_user_features = COMMON_USER, 391 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
391 .icache_bsize = 32, 392 .icache_bsize = 32,
392 .dcache_bsize = 32, 393 .dcache_bsize = 32,
393 .num_pmcs = 4, 394 .num_pmcs = 4,
@@ -399,7 +400,7 @@ struct cpu_spec cpu_specs[] = {
399 .pvr_value = 0x00084202, 400 .pvr_value = 0x00084202,
400 .cpu_name = "740/750", 401 .cpu_name = "740/750",
401 .cpu_features = CPU_FTRS_740_NOTAU, 402 .cpu_features = CPU_FTRS_740_NOTAU,
402 .cpu_user_features = COMMON_USER, 403 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
403 .icache_bsize = 32, 404 .icache_bsize = 32,
404 .dcache_bsize = 32, 405 .dcache_bsize = 32,
405 .num_pmcs = 4, 406 .num_pmcs = 4,
@@ -411,7 +412,7 @@ struct cpu_spec cpu_specs[] = {
411 .pvr_value = 0x00080100, 412 .pvr_value = 0x00080100,
412 .cpu_name = "750CX", 413 .cpu_name = "750CX",
413 .cpu_features = CPU_FTRS_750, 414 .cpu_features = CPU_FTRS_750,
414 .cpu_user_features = COMMON_USER, 415 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
415 .icache_bsize = 32, 416 .icache_bsize = 32,
416 .dcache_bsize = 32, 417 .dcache_bsize = 32,
417 .num_pmcs = 4, 418 .num_pmcs = 4,
@@ -423,7 +424,7 @@ struct cpu_spec cpu_specs[] = {
423 .pvr_value = 0x00082200, 424 .pvr_value = 0x00082200,
424 .cpu_name = "750CX", 425 .cpu_name = "750CX",
425 .cpu_features = CPU_FTRS_750, 426 .cpu_features = CPU_FTRS_750,
426 .cpu_user_features = COMMON_USER, 427 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
427 .icache_bsize = 32, 428 .icache_bsize = 32,
428 .dcache_bsize = 32, 429 .dcache_bsize = 32,
429 .num_pmcs = 4, 430 .num_pmcs = 4,
@@ -435,7 +436,7 @@ struct cpu_spec cpu_specs[] = {
435 .pvr_value = 0x00082210, 436 .pvr_value = 0x00082210,
436 .cpu_name = "750CXe", 437 .cpu_name = "750CXe",
437 .cpu_features = CPU_FTRS_750, 438 .cpu_features = CPU_FTRS_750,
438 .cpu_user_features = COMMON_USER, 439 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
439 .icache_bsize = 32, 440 .icache_bsize = 32,
440 .dcache_bsize = 32, 441 .dcache_bsize = 32,
441 .num_pmcs = 4, 442 .num_pmcs = 4,
@@ -447,7 +448,7 @@ struct cpu_spec cpu_specs[] = {
447 .pvr_value = 0x00083214, 448 .pvr_value = 0x00083214,
448 .cpu_name = "750CXe", 449 .cpu_name = "750CXe",
449 .cpu_features = CPU_FTRS_750, 450 .cpu_features = CPU_FTRS_750,
450 .cpu_user_features = COMMON_USER, 451 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
451 .icache_bsize = 32, 452 .icache_bsize = 32,
452 .dcache_bsize = 32, 453 .dcache_bsize = 32,
453 .num_pmcs = 4, 454 .num_pmcs = 4,
@@ -459,7 +460,7 @@ struct cpu_spec cpu_specs[] = {
459 .pvr_value = 0x00083000, 460 .pvr_value = 0x00083000,
460 .cpu_name = "745/755", 461 .cpu_name = "745/755",
461 .cpu_features = CPU_FTRS_750, 462 .cpu_features = CPU_FTRS_750,
462 .cpu_user_features = COMMON_USER, 463 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
463 .icache_bsize = 32, 464 .icache_bsize = 32,
464 .dcache_bsize = 32, 465 .dcache_bsize = 32,
465 .num_pmcs = 4, 466 .num_pmcs = 4,
@@ -471,7 +472,7 @@ struct cpu_spec cpu_specs[] = {
471 .pvr_value = 0x70000100, 472 .pvr_value = 0x70000100,
472 .cpu_name = "750FX", 473 .cpu_name = "750FX",
473 .cpu_features = CPU_FTRS_750FX1, 474 .cpu_features = CPU_FTRS_750FX1,
474 .cpu_user_features = COMMON_USER, 475 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
475 .icache_bsize = 32, 476 .icache_bsize = 32,
476 .dcache_bsize = 32, 477 .dcache_bsize = 32,
477 .num_pmcs = 4, 478 .num_pmcs = 4,
@@ -483,7 +484,7 @@ struct cpu_spec cpu_specs[] = {
483 .pvr_value = 0x70000200, 484 .pvr_value = 0x70000200,
484 .cpu_name = "750FX", 485 .cpu_name = "750FX",
485 .cpu_features = CPU_FTRS_750FX2, 486 .cpu_features = CPU_FTRS_750FX2,
486 .cpu_user_features = COMMON_USER, 487 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
487 .icache_bsize = 32, 488 .icache_bsize = 32,
488 .dcache_bsize = 32, 489 .dcache_bsize = 32,
489 .num_pmcs = 4, 490 .num_pmcs = 4,
@@ -495,7 +496,7 @@ struct cpu_spec cpu_specs[] = {
495 .pvr_value = 0x70000000, 496 .pvr_value = 0x70000000,
496 .cpu_name = "750FX", 497 .cpu_name = "750FX",
497 .cpu_features = CPU_FTRS_750FX, 498 .cpu_features = CPU_FTRS_750FX,
498 .cpu_user_features = COMMON_USER, 499 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
499 .icache_bsize = 32, 500 .icache_bsize = 32,
500 .dcache_bsize = 32, 501 .dcache_bsize = 32,
501 .num_pmcs = 4, 502 .num_pmcs = 4,
@@ -507,7 +508,7 @@ struct cpu_spec cpu_specs[] = {
507 .pvr_value = 0x70020000, 508 .pvr_value = 0x70020000,
508 .cpu_name = "750GX", 509 .cpu_name = "750GX",
509 .cpu_features = CPU_FTRS_750GX, 510 .cpu_features = CPU_FTRS_750GX,
510 .cpu_user_features = COMMON_USER, 511 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
511 .icache_bsize = 32, 512 .icache_bsize = 32,
512 .dcache_bsize = 32, 513 .dcache_bsize = 32,
513 .num_pmcs = 4, 514 .num_pmcs = 4,
@@ -519,7 +520,7 @@ struct cpu_spec cpu_specs[] = {
519 .pvr_value = 0x00080000, 520 .pvr_value = 0x00080000,
520 .cpu_name = "740/750", 521 .cpu_name = "740/750",
521 .cpu_features = CPU_FTRS_740, 522 .cpu_features = CPU_FTRS_740,
522 .cpu_user_features = COMMON_USER, 523 .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
523 .icache_bsize = 32, 524 .icache_bsize = 32,
524 .dcache_bsize = 32, 525 .dcache_bsize = 32,
525 .num_pmcs = 4, 526 .num_pmcs = 4,
@@ -531,7 +532,8 @@ struct cpu_spec cpu_specs[] = {
531 .pvr_value = 0x000c1101, 532 .pvr_value = 0x000c1101,
532 .cpu_name = "7400 (1.1)", 533 .cpu_name = "7400 (1.1)",
533 .cpu_features = CPU_FTRS_7400_NOTAU, 534 .cpu_features = CPU_FTRS_7400_NOTAU,
534 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 535 .cpu_user_features = COMMON_USER |
536 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
535 .icache_bsize = 32, 537 .icache_bsize = 32,
536 .dcache_bsize = 32, 538 .dcache_bsize = 32,
537 .num_pmcs = 4, 539 .num_pmcs = 4,
@@ -543,7 +545,8 @@ struct cpu_spec cpu_specs[] = {
543 .pvr_value = 0x000c0000, 545 .pvr_value = 0x000c0000,
544 .cpu_name = "7400", 546 .cpu_name = "7400",
545 .cpu_features = CPU_FTRS_7400, 547 .cpu_features = CPU_FTRS_7400,
546 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 548 .cpu_user_features = COMMON_USER |
549 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
547 .icache_bsize = 32, 550 .icache_bsize = 32,
548 .dcache_bsize = 32, 551 .dcache_bsize = 32,
549 .num_pmcs = 4, 552 .num_pmcs = 4,
@@ -555,7 +558,8 @@ struct cpu_spec cpu_specs[] = {
555 .pvr_value = 0x800c0000, 558 .pvr_value = 0x800c0000,
556 .cpu_name = "7410", 559 .cpu_name = "7410",
557 .cpu_features = CPU_FTRS_7400, 560 .cpu_features = CPU_FTRS_7400,
558 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 561 .cpu_user_features = COMMON_USER |
562 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
559 .icache_bsize = 32, 563 .icache_bsize = 32,
560 .dcache_bsize = 32, 564 .dcache_bsize = 32,
561 .num_pmcs = 4, 565 .num_pmcs = 4,
@@ -567,7 +571,8 @@ struct cpu_spec cpu_specs[] = {
567 .pvr_value = 0x80000200, 571 .pvr_value = 0x80000200,
568 .cpu_name = "7450", 572 .cpu_name = "7450",
569 .cpu_features = CPU_FTRS_7450_20, 573 .cpu_features = CPU_FTRS_7450_20,
570 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 574 .cpu_user_features = COMMON_USER |
575 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
571 .icache_bsize = 32, 576 .icache_bsize = 32,
572 .dcache_bsize = 32, 577 .dcache_bsize = 32,
573 .num_pmcs = 6, 578 .num_pmcs = 6,
@@ -581,7 +586,8 @@ struct cpu_spec cpu_specs[] = {
581 .pvr_value = 0x80000201, 586 .pvr_value = 0x80000201,
582 .cpu_name = "7450", 587 .cpu_name = "7450",
583 .cpu_features = CPU_FTRS_7450_21, 588 .cpu_features = CPU_FTRS_7450_21,
584 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 589 .cpu_user_features = COMMON_USER |
590 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
585 .icache_bsize = 32, 591 .icache_bsize = 32,
586 .dcache_bsize = 32, 592 .dcache_bsize = 32,
587 .num_pmcs = 6, 593 .num_pmcs = 6,
@@ -595,7 +601,8 @@ struct cpu_spec cpu_specs[] = {
595 .pvr_value = 0x80000000, 601 .pvr_value = 0x80000000,
596 .cpu_name = "7450", 602 .cpu_name = "7450",
597 .cpu_features = CPU_FTRS_7450_23, 603 .cpu_features = CPU_FTRS_7450_23,
598 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 604 .cpu_user_features = COMMON_USER |
605 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
599 .icache_bsize = 32, 606 .icache_bsize = 32,
600 .dcache_bsize = 32, 607 .dcache_bsize = 32,
601 .num_pmcs = 6, 608 .num_pmcs = 6,
@@ -609,7 +616,8 @@ struct cpu_spec cpu_specs[] = {
609 .pvr_value = 0x80010100, 616 .pvr_value = 0x80010100,
610 .cpu_name = "7455", 617 .cpu_name = "7455",
611 .cpu_features = CPU_FTRS_7455_1, 618 .cpu_features = CPU_FTRS_7455_1,
612 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 619 .cpu_user_features = COMMON_USER |
620 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
613 .icache_bsize = 32, 621 .icache_bsize = 32,
614 .dcache_bsize = 32, 622 .dcache_bsize = 32,
615 .num_pmcs = 6, 623 .num_pmcs = 6,
@@ -623,7 +631,8 @@ struct cpu_spec cpu_specs[] = {
623 .pvr_value = 0x80010200, 631 .pvr_value = 0x80010200,
624 .cpu_name = "7455", 632 .cpu_name = "7455",
625 .cpu_features = CPU_FTRS_7455_20, 633 .cpu_features = CPU_FTRS_7455_20,
626 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 634 .cpu_user_features = COMMON_USER |
635 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
627 .icache_bsize = 32, 636 .icache_bsize = 32,
628 .dcache_bsize = 32, 637 .dcache_bsize = 32,
629 .num_pmcs = 6, 638 .num_pmcs = 6,
@@ -637,7 +646,8 @@ struct cpu_spec cpu_specs[] = {
637 .pvr_value = 0x80010000, 646 .pvr_value = 0x80010000,
638 .cpu_name = "7455", 647 .cpu_name = "7455",
639 .cpu_features = CPU_FTRS_7455, 648 .cpu_features = CPU_FTRS_7455,
640 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 649 .cpu_user_features = COMMON_USER |
650 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
641 .icache_bsize = 32, 651 .icache_bsize = 32,
642 .dcache_bsize = 32, 652 .dcache_bsize = 32,
643 .num_pmcs = 6, 653 .num_pmcs = 6,
@@ -651,7 +661,8 @@ struct cpu_spec cpu_specs[] = {
651 .pvr_value = 0x80020100, 661 .pvr_value = 0x80020100,
652 .cpu_name = "7447/7457", 662 .cpu_name = "7447/7457",
653 .cpu_features = CPU_FTRS_7447_10, 663 .cpu_features = CPU_FTRS_7447_10,
654 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 664 .cpu_user_features = COMMON_USER |
665 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
655 .icache_bsize = 32, 666 .icache_bsize = 32,
656 .dcache_bsize = 32, 667 .dcache_bsize = 32,
657 .num_pmcs = 6, 668 .num_pmcs = 6,
@@ -665,7 +676,8 @@ struct cpu_spec cpu_specs[] = {
665 .pvr_value = 0x80020101, 676 .pvr_value = 0x80020101,
666 .cpu_name = "7447/7457", 677 .cpu_name = "7447/7457",
667 .cpu_features = CPU_FTRS_7447_10, 678 .cpu_features = CPU_FTRS_7447_10,
668 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 679 .cpu_user_features = COMMON_USER |
680 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
669 .icache_bsize = 32, 681 .icache_bsize = 32,
670 .dcache_bsize = 32, 682 .dcache_bsize = 32,
671 .num_pmcs = 6, 683 .num_pmcs = 6,
@@ -679,7 +691,7 @@ struct cpu_spec cpu_specs[] = {
679 .pvr_value = 0x80020000, 691 .pvr_value = 0x80020000,
680 .cpu_name = "7447/7457", 692 .cpu_name = "7447/7457",
681 .cpu_features = CPU_FTRS_7447, 693 .cpu_features = CPU_FTRS_7447,
682 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 694 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
683 .icache_bsize = 32, 695 .icache_bsize = 32,
684 .dcache_bsize = 32, 696 .dcache_bsize = 32,
685 .num_pmcs = 6, 697 .num_pmcs = 6,
@@ -693,7 +705,8 @@ struct cpu_spec cpu_specs[] = {
693 .pvr_value = 0x80030000, 705 .pvr_value = 0x80030000,
694 .cpu_name = "7447A", 706 .cpu_name = "7447A",
695 .cpu_features = CPU_FTRS_7447A, 707 .cpu_features = CPU_FTRS_7447A,
696 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 708 .cpu_user_features = COMMON_USER |
709 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
697 .icache_bsize = 32, 710 .icache_bsize = 32,
698 .dcache_bsize = 32, 711 .dcache_bsize = 32,
699 .num_pmcs = 6, 712 .num_pmcs = 6,
@@ -707,7 +720,8 @@ struct cpu_spec cpu_specs[] = {
707 .pvr_value = 0x80040000, 720 .pvr_value = 0x80040000,
708 .cpu_name = "7448", 721 .cpu_name = "7448",
709 .cpu_features = CPU_FTRS_7447A, 722 .cpu_features = CPU_FTRS_7447A,
710 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, 723 .cpu_user_features = COMMON_USER |
724 PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
711 .icache_bsize = 32, 725 .icache_bsize = 32,
712 .dcache_bsize = 32, 726 .dcache_bsize = 32,
713 .num_pmcs = 6, 727 .num_pmcs = 6,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 2dd47d2dd998..2d35d83961b2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -708,6 +708,50 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
708 return put_user(val, (unsigned int __user *) adr); 708 return put_user(val, (unsigned int __user *) adr);
709} 709}
710 710
711int set_endian(struct task_struct *tsk, unsigned int val)
712{
713 struct pt_regs *regs = tsk->thread.regs;
714
715 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
716 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
717 return -EINVAL;
718
719 if (regs == NULL)
720 return -EINVAL;
721
722 if (val == PR_ENDIAN_BIG)
723 regs->msr &= ~MSR_LE;
724 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
725 regs->msr |= MSR_LE;
726 else
727 return -EINVAL;
728
729 return 0;
730}
731
732int get_endian(struct task_struct *tsk, unsigned long adr)
733{
734 struct pt_regs *regs = tsk->thread.regs;
735 unsigned int val;
736
737 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
738 !cpu_has_feature(CPU_FTR_REAL_LE))
739 return -EINVAL;
740
741 if (regs == NULL)
742 return -EINVAL;
743
744 if (regs->msr & MSR_LE) {
745 if (cpu_has_feature(CPU_FTR_REAL_LE))
746 val = PR_ENDIAN_LITTLE;
747 else
748 val = PR_ENDIAN_PPC_LITTLE;
749 } else
750 val = PR_ENDIAN_BIG;
751
752 return put_user(val, (unsigned int __user *)adr);
753}
754
711#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff)) 755#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
712 756
713int sys_clone(unsigned long clone_flags, unsigned long usp, 757int sys_clone(unsigned long clone_flags, unsigned long usp,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 22f078984845..237faeec2ec2 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -419,9 +419,7 @@ static long restore_user_regs(struct pt_regs *regs,
419{ 419{
420 long err; 420 long err;
421 unsigned int save_r2 = 0; 421 unsigned int save_r2 = 0;
422#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
423 unsigned long msr; 422 unsigned long msr;
424#endif
425 423
426 /* 424 /*
427 * restore general registers but not including MSR or SOFTE. Also 425 * restore general registers but not including MSR or SOFTE. Also
@@ -430,11 +428,16 @@ static long restore_user_regs(struct pt_regs *regs,
430 if (!sig) 428 if (!sig)
431 save_r2 = (unsigned int)regs->gpr[2]; 429 save_r2 = (unsigned int)regs->gpr[2];
432 err = restore_general_regs(regs, sr); 430 err = restore_general_regs(regs, sr);
431 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
433 if (!sig) 432 if (!sig)
434 regs->gpr[2] = (unsigned long) save_r2; 433 regs->gpr[2] = (unsigned long) save_r2;
435 if (err) 434 if (err)
436 return 1; 435 return 1;
437 436
437 /* if doing signal return, restore the previous little-endian mode */
438 if (sig)
439 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
440
438 /* 441 /*
439 * Do this before updating the thread state in 442 * Do this before updating the thread state in
440 * current->thread.fpr/vr/evr. That way, if we get preempted 443 * current->thread.fpr/vr/evr. That way, if we get preempted
@@ -455,7 +458,7 @@ static long restore_user_regs(struct pt_regs *regs,
455 /* force the process to reload the altivec registers from 458 /* force the process to reload the altivec registers from
456 current->thread when it next does altivec instructions */ 459 current->thread when it next does altivec instructions */
457 regs->msr &= ~MSR_VEC; 460 regs->msr &= ~MSR_VEC;
458 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) { 461 if (msr & MSR_VEC) {
459 /* restore altivec registers from the stack */ 462 /* restore altivec registers from the stack */
460 if (__copy_from_user(current->thread.vr, &sr->mc_vregs, 463 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
461 sizeof(sr->mc_vregs))) 464 sizeof(sr->mc_vregs)))
@@ -472,7 +475,7 @@ static long restore_user_regs(struct pt_regs *regs,
472 /* force the process to reload the spe registers from 475 /* force the process to reload the spe registers from
473 current->thread when it next does spe instructions */ 476 current->thread when it next does spe instructions */
474 regs->msr &= ~MSR_SPE; 477 regs->msr &= ~MSR_SPE;
475 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) { 478 if (msr & MSR_SPE) {
476 /* restore spe registers from the stack */ 479 /* restore spe registers from the stack */
477 if (__copy_from_user(current->thread.evr, &sr->mc_vregs, 480 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
478 ELF_NEVRREG * sizeof(u32))) 481 ELF_NEVRREG * sizeof(u32)))
@@ -777,6 +780,8 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
777 regs->gpr[5] = (unsigned long) &rt_sf->uc; 780 regs->gpr[5] = (unsigned long) &rt_sf->uc;
778 regs->gpr[6] = (unsigned long) rt_sf; 781 regs->gpr[6] = (unsigned long) rt_sf;
779 regs->nip = (unsigned long) ka->sa.sa_handler; 782 regs->nip = (unsigned long) ka->sa.sa_handler;
783 /* enter the signal handler in big-endian mode */
784 regs->msr &= ~MSR_LE;
780 regs->trap = 0; 785 regs->trap = 0;
781 return 1; 786 return 1;
782 787
@@ -1047,6 +1052,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
1047 regs->gpr[3] = sig; 1052 regs->gpr[3] = sig;
1048 regs->gpr[4] = (unsigned long) sc; 1053 regs->gpr[4] = (unsigned long) sc;
1049 regs->nip = (unsigned long) ka->sa.sa_handler; 1054 regs->nip = (unsigned long) ka->sa.sa_handler;
1055 /* enter the signal handler in big-endian mode */
1056 regs->msr &= ~MSR_LE;
1050 regs->trap = 0; 1057 regs->trap = 0;
1051 1058
1052 return 1; 1059 return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 23ba69c26913..66a5fbe31989 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -141,9 +141,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
141 unsigned long err = 0; 141 unsigned long err = 0;
142 unsigned long save_r13 = 0; 142 unsigned long save_r13 = 0;
143 elf_greg_t *gregs = (elf_greg_t *)regs; 143 elf_greg_t *gregs = (elf_greg_t *)regs;
144#ifdef CONFIG_ALTIVEC
145 unsigned long msr; 144 unsigned long msr;
146#endif
147 int i; 145 int i;
148 146
149 /* If this is not a signal return, we preserve the TLS in r13 */ 147 /* If this is not a signal return, we preserve the TLS in r13 */
@@ -154,7 +152,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
154 err |= __copy_from_user(regs, &sc->gp_regs, 152 err |= __copy_from_user(regs, &sc->gp_regs,
155 PT_MSR*sizeof(unsigned long)); 153 PT_MSR*sizeof(unsigned long));
156 154
157 /* skip MSR and SOFTE */ 155 /* get MSR separately, transfer the LE bit if doing signal return */
156 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
157 if (sig)
158 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
159
160 /* skip SOFTE */
158 for (i = PT_MSR+1; i <= PT_RESULT; i++) { 161 for (i = PT_MSR+1; i <= PT_RESULT; i++) {
159 if (i == PT_SOFTE) 162 if (i == PT_SOFTE)
160 continue; 163 continue;
@@ -179,7 +182,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
179 182
180#ifdef CONFIG_ALTIVEC 183#ifdef CONFIG_ALTIVEC
181 err |= __get_user(v_regs, &sc->v_regs); 184 err |= __get_user(v_regs, &sc->v_regs);
182 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
183 if (err) 185 if (err)
184 return err; 186 return err;
185 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 187 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
@@ -410,6 +412,8 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
410 412
411 /* Set up "regs" so we "return" to the signal handler. */ 413 /* Set up "regs" so we "return" to the signal handler. */
412 err |= get_user(regs->nip, &funct_desc_ptr->entry); 414 err |= get_user(regs->nip, &funct_desc_ptr->entry);
415 /* enter the signal handler in big-endian mode */
416 regs->msr &= ~MSR_LE;
413 regs->gpr[1] = newsp; 417 regs->gpr[1] = newsp;
414 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); 418 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
415 regs->gpr[3] = signr; 419 regs->gpr[3] = signr;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 064a52564692..03def5715494 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -658,7 +658,7 @@ static int emulate_instruction(struct pt_regs *regs)
658 u32 instword; 658 u32 instword;
659 u32 rd; 659 u32 rd;
660 660
661 if (!user_mode(regs)) 661 if (!user_mode(regs) || (regs->msr & MSR_LE))
662 return -EINVAL; 662 return -EINVAL;
663 CHECK_FULL_REGS(regs); 663 CHECK_FULL_REGS(regs);
664 664
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index defc166379d2..69f2c242797f 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -24,6 +24,9 @@
24#define PPC_FEATURE_ICACHE_SNOOP 0x00002000 24#define PPC_FEATURE_ICACHE_SNOOP 0x00002000
25#define PPC_FEATURE_ARCH_2_05 0x00001000 25#define PPC_FEATURE_ARCH_2_05 0x00001000
26 26
27#define PPC_FEATURE_TRUE_LE 0x00000002
28#define PPC_FEATURE_PPC_LE 0x00000001
29
27#ifdef __KERNEL__ 30#ifdef __KERNEL__
28#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
29 32
@@ -111,6 +114,8 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
111#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) 114#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
112#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000) 115#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
113#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) 116#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
117#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
118#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
114 119
115#ifdef __powerpc64__ 120#ifdef __powerpc64__
116/* Add the 64b processor unique features in the top half of the word */ 121/* Add the 64b processor unique features in the top half of the word */
@@ -197,92 +202,95 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
197#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE) 202#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE)
198#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 203#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
199 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 204 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
200 CPU_FTR_MAYBE_CAN_NAP) 205 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
201#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 206#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
202 CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE) 207 CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE | \
208 CPU_FTR_PPC_LE)
203#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 209#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
204 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 210 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
205 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP) 211 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
206#define CPU_FTRS_740 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 212#define CPU_FTRS_740 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
207 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 213 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
208 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP) 214 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
215 CPU_FTR_PPC_LE)
209#define CPU_FTRS_750 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 216#define CPU_FTRS_750 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
210 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 217 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
211 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP) 218 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
219 CPU_FTR_PPC_LE)
212#define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 220#define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
213 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 221 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
214 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 222 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
215 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM) 223 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM | CPU_FTR_PPC_LE)
216#define CPU_FTRS_750FX2 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 224#define CPU_FTRS_750FX2 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
217 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 225 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
218 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 226 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
219 CPU_FTR_NO_DPM) 227 CPU_FTR_NO_DPM | CPU_FTR_PPC_LE)
220#define CPU_FTRS_750FX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 228#define CPU_FTRS_750FX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
221 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 229 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
222 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 230 CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
223 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS) 231 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
224#define CPU_FTRS_750GX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ 232#define CPU_FTRS_750GX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
225 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | \ 233 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | \
226 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ 234 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
227 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS) 235 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
228#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 236#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
229 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 237 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
230 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ 238 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
231 CPU_FTR_MAYBE_CAN_NAP) 239 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
232#define CPU_FTRS_7400 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 240#define CPU_FTRS_7400 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
233 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ 241 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
234 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \ 242 CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
235 CPU_FTR_MAYBE_CAN_NAP) 243 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
236#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 244#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
237 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 245 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
238 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 246 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
239 CPU_FTR_NEED_COHERENT) 247 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
240#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 248#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
241 CPU_FTR_USE_TB | \ 249 CPU_FTR_USE_TB | \
242 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 250 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
243 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 251 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
244 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ 252 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
245 CPU_FTR_NEED_COHERENT) 253 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
246#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 254#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
247 CPU_FTR_USE_TB | \ 255 CPU_FTR_USE_TB | \
248 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 256 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
249 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 257 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
250 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT) 258 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
251#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 259#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
252 CPU_FTR_USE_TB | \ 260 CPU_FTR_USE_TB | \
253 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \ 261 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
254 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \ 262 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \
255 CPU_FTR_NEED_COHERENT) 263 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
256#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 264#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
257 CPU_FTR_USE_TB | \ 265 CPU_FTR_USE_TB | \
258 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 266 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
259 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 267 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
260 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \ 268 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
261 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS) 269 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
262#define CPU_FTRS_7455 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 270#define CPU_FTRS_7455 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
263 CPU_FTR_USE_TB | \ 271 CPU_FTR_USE_TB | \
264 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 272 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
265 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 273 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
266 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ 274 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
267 CPU_FTR_NEED_COHERENT) 275 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
268#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 276#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
269 CPU_FTR_USE_TB | \ 277 CPU_FTR_USE_TB | \
270 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 278 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
271 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 279 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
272 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ 280 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
273 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC) 281 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE)
274#define CPU_FTRS_7447 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 282#define CPU_FTRS_7447 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
275 CPU_FTR_USE_TB | \ 283 CPU_FTR_USE_TB | \
276 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 284 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
277 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 285 CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
278 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ 286 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
279 CPU_FTR_NEED_COHERENT) 287 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
280#define CPU_FTRS_7447A (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 288#define CPU_FTRS_7447A (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
281 CPU_FTR_USE_TB | \ 289 CPU_FTR_USE_TB | \
282 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \ 290 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
283 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \ 291 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
284 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \ 292 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
285 CPU_FTR_NEED_COHERENT) 293 CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
286#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ 294#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
287 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB) 295 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
288#define CPU_FTRS_G2_LE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ 296#define CPU_FTRS_G2_LE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
@@ -312,7 +320,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
312#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 320#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
313#ifdef __powerpc64__ 321#ifdef __powerpc64__
314#define CPU_FTRS_POWER3 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 322#define CPU_FTRS_POWER3 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
315 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR) 323 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
316#define CPU_FTRS_RS64 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 324#define CPU_FTRS_RS64 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
317 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ 325 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
318 CPU_FTR_MMCRA | CPU_FTR_CTRL) 326 CPU_FTR_MMCRA | CPU_FTR_CTRL)
@@ -330,7 +338,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
330 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 338 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
331 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 339 CPU_FTR_MMCRA | CPU_FTR_SMT | \
332 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 340 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
333 CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE) 341 CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_REAL_LE)
334#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 342#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
335 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 343 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
336 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 344 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index d5c7ef1cca26..fa6163268153 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -211,6 +211,12 @@ unsigned long get_wchan(struct task_struct *p);
211extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); 211extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
212extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); 212extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
213 213
214#define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
215#define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
216
217extern int get_endian(struct task_struct *tsk, unsigned long adr);
218extern int set_endian(struct task_struct *tsk, unsigned int val);
219
214static inline unsigned int __unpack_fe01(unsigned long msr_bits) 220static inline unsigned int __unpack_fe01(unsigned long msr_bits)
215{ 221{
216 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); 222 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);