diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2013-03-21 10:11:52 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2013-03-21 10:11:52 -0400 |
commit | 2ae33b389601b86a3d0cfe2d09f5e3189d5322fd (patch) | |
tree | 53e658f7ef511b5afb7d2e88753e1aaaed3de5ba /arch/powerpc | |
parent | 04b66839d312d3bdaff77f265eb7305347fa1fb7 (diff) | |
parent | 2ffdd7e23cde5a8b94d41ec0adfdd58cffe67f3a (diff) |
Merge remote-tracking branch 'upstream/master' into queue
Merge reason:
From: Alexander Graf <agraf@suse.de>
"Just recently this really important patch got pulled into Linus' tree for 3.9:
commit 1674400aaee5b466c595a8fc310488263ce888c7
Author: Anton Blanchard <anton <at> samba.org>
Date: Tue Mar 12 01:51:51 2013 +0000
Without that commit, I can not boot my G5, thus I can't run automated tests on it against my queue.
Could you please merge kvm/next against linus/master, so that I can base my trees against that?"
* upstream/master: (653 commits)
PCI: Use ROM images from firmware only if no other ROM source available
sparc: remove unused "config BITS"
sparc: delete "if !ULTRA_HAS_POPULATION_COUNT"
KVM: Fix bounds checking in ioapic indirect register reads (CVE-2013-1798)
KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797)
KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME (CVE-2013-1796)
arm64: Kconfig.debug: Remove unused CONFIG_DEBUG_ERRORS
arm64: Do not select GENERIC_HARDIRQS_NO_DEPRECATED
inet: limit length of fragment queue hash table bucket lists
qeth: Fix scatter-gather regression
qeth: Fix invalid router settings handling
qeth: delay feature trace
sgy-cts1000: Remove __dev* attributes
KVM: x86: fix deadlock in clock-in-progress request handling
KVM: allow host header to be included even for !CONFIG_KVM
hwmon: (lm75) Fix tcn75 prefix
hwmon: (lm75.h) Update header inclusion
MAINTAINERS: Remove Mark M. Hoffman
xfs: ensure we capture IO errors correctly
xfs: fix xfs_iomap_eof_prealloc_initial_size type
...
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/powerpc')
23 files changed, 170 insertions, 143 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b89d7eb730a2..ea5bb045983a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -90,6 +90,7 @@ config GENERIC_GPIO | |||
90 | config PPC | 90 | config PPC |
91 | bool | 91 | bool |
92 | default y | 92 | default y |
93 | select BINFMT_ELF | ||
93 | select OF | 94 | select OF |
94 | select OF_EARLY_FLATTREE | 95 | select OF_EARLY_FLATTREE |
95 | select HAVE_FTRACE_MCOUNT_RECORD | 96 | select HAVE_FTRACE_MCOUNT_RECORD |
@@ -98,7 +99,7 @@ config PPC | |||
98 | select HAVE_FUNCTION_GRAPH_TRACER | 99 | select HAVE_FUNCTION_GRAPH_TRACER |
99 | select SYSCTL_EXCEPTION_TRACE | 100 | select SYSCTL_EXCEPTION_TRACE |
100 | select ARCH_WANT_OPTIONAL_GPIOLIB | 101 | select ARCH_WANT_OPTIONAL_GPIOLIB |
101 | select HAVE_VIRT_TO_BUS if !PPC64 | 102 | select VIRT_TO_BUS if !PPC64 |
102 | select HAVE_IDE | 103 | select HAVE_IDE |
103 | select HAVE_IOREMAP_PROT | 104 | select HAVE_IOREMAP_PROT |
104 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 105 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S index a5f8264d2d3c..125e16520061 100644 --- a/arch/powerpc/crypto/sha1-powerpc-asm.S +++ b/arch/powerpc/crypto/sha1-powerpc-asm.S | |||
@@ -113,7 +113,7 @@ | |||
113 | STEPUP4((t)+16, fn) | 113 | STEPUP4((t)+16, fn) |
114 | 114 | ||
115 | _GLOBAL(powerpc_sha_transform) | 115 | _GLOBAL(powerpc_sha_transform) |
116 | PPC_STLU r1,-STACKFRAMESIZE(r1) | 116 | PPC_STLU r1,-INT_FRAME_SIZE(r1) |
117 | SAVE_8GPRS(14, r1) | 117 | SAVE_8GPRS(14, r1) |
118 | SAVE_10GPRS(22, r1) | 118 | SAVE_10GPRS(22, r1) |
119 | 119 | ||
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform) | |||
175 | 175 | ||
176 | REST_8GPRS(14, r1) | 176 | REST_8GPRS(14, r1) |
177 | REST_10GPRS(22, r1) | 177 | REST_10GPRS(22, r1) |
178 | addi r1,r1,STACKFRAMESIZE | 178 | addi r1,r1,INT_FRAME_SIZE |
179 | blr | 179 | blr |
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index ef918a2328bb..08bd299c75b1 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h | |||
@@ -52,8 +52,6 @@ | |||
52 | #define smp_mb__before_clear_bit() smp_mb() | 52 | #define smp_mb__before_clear_bit() smp_mb() |
53 | #define smp_mb__after_clear_bit() smp_mb() | 53 | #define smp_mb__after_clear_bit() smp_mb() |
54 | 54 | ||
55 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | ||
56 | |||
57 | /* Macro for generating the ***_bits() functions */ | 55 | /* Macro for generating the ***_bits() functions */ |
58 | #define DEFINE_BITOP(fn, op, prefix, postfix) \ | 56 | #define DEFINE_BITOP(fn, op, prefix, postfix) \ |
59 | static __inline__ void fn(unsigned long mask, \ | 57 | static __inline__ void fn(unsigned long mask, \ |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2fdb47a19efd..b59e06f507ea 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size); | |||
343 | /* | 343 | /* |
344 | * VSID allocation (256MB segment) | 344 | * VSID allocation (256MB segment) |
345 | * | 345 | * |
346 | * We first generate a 38-bit "proto-VSID". For kernel addresses this | 346 | * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated |
347 | * is equal to the ESID | 1 << 37, for user addresses it is: | 347 | * from mmu context id and effective segment id of the address. |
348 | * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) | ||
349 | * | 348 | * |
350 | * This splits the proto-VSID into the below range | 349 | * For user processes max context id is limited to ((1ul << 19) - 5) |
351 | * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range | 350 | * for kernel space, we use the top 4 context ids to map address as below |
352 | * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range | 351 | * NOTE: each context only support 64TB now. |
353 | * | 352 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] |
354 | * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 | 353 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] |
355 | * That is, we assign half of the space to user processes and half | 354 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] |
356 | * to the kernel. | 355 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] |
357 | * | 356 | * |
358 | * The proto-VSIDs are then scrambled into real VSIDs with the | 357 | * The proto-VSIDs are then scrambled into real VSIDs with the |
359 | * multiplicative hash: | 358 | * multiplicative hash: |
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size); | |||
363 | * VSID_MULTIPLIER is prime, so in particular it is | 362 | * VSID_MULTIPLIER is prime, so in particular it is |
364 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. | 363 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. |
365 | * Because the modulus is 2^n-1 we can compute it efficiently without | 364 | * Because the modulus is 2^n-1 we can compute it efficiently without |
366 | * a divide or extra multiply (see below). | 365 | * a divide or extra multiply (see below). The scramble function gives |
367 | * | 366 | * robust scattering in the hash table (at least based on some initial |
368 | * This scheme has several advantages over older methods: | 367 | * results). |
369 | * | ||
370 | * - We have VSIDs allocated for every kernel address | ||
371 | * (i.e. everything above 0xC000000000000000), except the very top | ||
372 | * segment, which simplifies several things. | ||
373 | * | 368 | * |
374 | * - We allow for USER_ESID_BITS significant bits of ESID and | 369 | * We also consider VSID 0 special. We use VSID 0 for slb entries mapping |
375 | * CONTEXT_BITS bits of context for user addresses. | 370 | * bad address. This enables us to consolidate bad address handling in |
376 | * i.e. 64T (46 bits) of address space for up to half a million contexts. | 371 | * hash_page. |
377 | * | 372 | * |
378 | * - The scramble function gives robust scattering in the hash | 373 | * We also need to avoid the last segment of the last context, because that |
379 | * table (at least based on some initial results). The previous | 374 | * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 |
380 | * method was more susceptible to pathological cases giving excessive | 375 | * because of the modulo operation in vsid scramble. But the vmemmap |
381 | * hash collisions. | 376 | * (which is what uses region 0xf) will never be close to 64TB in size |
377 | * (it's 56 bytes per page of system memory). | ||
382 | */ | 378 | */ |
383 | 379 | ||
380 | #define CONTEXT_BITS 19 | ||
381 | #define ESID_BITS 18 | ||
382 | #define ESID_BITS_1T 6 | ||
383 | |||
384 | /* | ||
385 | * 256MB segment | ||
386 | * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments | ||
387 | * available for user + kernel mapping. The top 4 contexts are used for | ||
388 | * kernel mapping. Each segment contains 2^28 bytes. Each | ||
389 | * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts | ||
390 | * (19 == 37 + 28 - 46). | ||
391 | */ | ||
392 | #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) | ||
393 | |||
384 | /* | 394 | /* |
385 | * This should be computed such that protovosid * vsid_mulitplier | 395 | * This should be computed such that protovosid * vsid_mulitplier |
386 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus | 396 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus |
387 | */ | 397 | */ |
388 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ | 398 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ |
389 | #define VSID_BITS_256M 38 | 399 | #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) |
390 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) | 400 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) |
391 | 401 | ||
392 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ | 402 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ |
393 | #define VSID_BITS_1T 26 | 403 | #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) |
394 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) | 404 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) |
395 | 405 | ||
396 | #define CONTEXT_BITS 19 | ||
397 | #define USER_ESID_BITS 18 | ||
398 | #define USER_ESID_BITS_1T 6 | ||
399 | 406 | ||
400 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) | 407 | #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) |
401 | 408 | ||
402 | /* | 409 | /* |
403 | * This macro generates asm code to compute the VSID scramble | 410 | * This macro generates asm code to compute the VSID scramble |
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size); | |||
421 | srdi rx,rt,VSID_BITS_##size; \ | 428 | srdi rx,rt,VSID_BITS_##size; \ |
422 | clrldi rt,rt,(64-VSID_BITS_##size); \ | 429 | clrldi rt,rt,(64-VSID_BITS_##size); \ |
423 | add rt,rt,rx; /* add high and low bits */ \ | 430 | add rt,rt,rx; /* add high and low bits */ \ |
424 | /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | 431 | /* NOTE: explanation based on VSID_BITS_##size = 36 \ |
432 | * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | ||
425 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ | 433 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ |
426 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ | 434 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ |
427 | * the bit clear, r3 already has the answer we want, if it \ | 435 | * the bit clear, r3 already has the answer we want, if it \ |
@@ -513,34 +521,6 @@ typedef struct { | |||
513 | }) | 521 | }) |
514 | #endif /* 1 */ | 522 | #endif /* 1 */ |
515 | 523 | ||
516 | /* | ||
517 | * This is only valid for addresses >= PAGE_OFFSET | ||
518 | * The proto-VSID space is divided into two class | ||
519 | * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 | ||
520 | * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 | ||
521 | * | ||
522 | * With KERNEL_START at 0xc000000000000000, the proto vsid for | ||
523 | * the kernel ends up with 0xc00000000 (36 bits). With 64TB | ||
524 | * support we need to have kernel proto-VSID in the | ||
525 | * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. | ||
526 | */ | ||
527 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | ||
528 | { | ||
529 | unsigned long proto_vsid; | ||
530 | /* | ||
531 | * We need to make sure proto_vsid for the kernel is | ||
532 | * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) | ||
533 | */ | ||
534 | if (ssize == MMU_SEGSIZE_256M) { | ||
535 | proto_vsid = ea >> SID_SHIFT; | ||
536 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); | ||
537 | return vsid_scramble(proto_vsid, 256M); | ||
538 | } | ||
539 | proto_vsid = ea >> SID_SHIFT_1T; | ||
540 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); | ||
541 | return vsid_scramble(proto_vsid, 1T); | ||
542 | } | ||
543 | |||
544 | /* Returns the segment size indicator for a user address */ | 524 | /* Returns the segment size indicator for a user address */ |
545 | static inline int user_segment_size(unsigned long addr) | 525 | static inline int user_segment_size(unsigned long addr) |
546 | { | 526 | { |
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr) | |||
550 | return MMU_SEGSIZE_256M; | 530 | return MMU_SEGSIZE_256M; |
551 | } | 531 | } |
552 | 532 | ||
553 | /* This is only valid for user addresses (which are below 2^44) */ | ||
554 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, | 533 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, |
555 | int ssize) | 534 | int ssize) |
556 | { | 535 | { |
536 | /* | ||
537 | * Bad address. We return VSID 0 for that | ||
538 | */ | ||
539 | if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) | ||
540 | return 0; | ||
541 | |||
557 | if (ssize == MMU_SEGSIZE_256M) | 542 | if (ssize == MMU_SEGSIZE_256M) |
558 | return vsid_scramble((context << USER_ESID_BITS) | 543 | return vsid_scramble((context << ESID_BITS) |
559 | | (ea >> SID_SHIFT), 256M); | 544 | | (ea >> SID_SHIFT), 256M); |
560 | return vsid_scramble((context << USER_ESID_BITS_1T) | 545 | return vsid_scramble((context << ESID_BITS_1T) |
561 | | (ea >> SID_SHIFT_1T), 1T); | 546 | | (ea >> SID_SHIFT_1T), 1T); |
562 | } | 547 | } |
563 | 548 | ||
549 | /* | ||
550 | * This is only valid for addresses >= PAGE_OFFSET | ||
551 | * | ||
552 | * For kernel space, we use the top 4 context ids to map address as below | ||
553 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] | ||
554 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] | ||
555 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] | ||
556 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] | ||
557 | */ | ||
558 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | ||
559 | { | ||
560 | unsigned long context; | ||
561 | |||
562 | /* | ||
563 | * kernel take the top 4 context from the available range | ||
564 | */ | ||
565 | context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; | ||
566 | return get_vsid(context, ea, ssize); | ||
567 | } | ||
564 | #endif /* __ASSEMBLY__ */ | 568 | #endif /* __ASSEMBLY__ */ |
565 | 569 | ||
566 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ | 570 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e66586122030..c9c67fc888c9 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -266,7 +266,8 @@ | |||
266 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ | 266 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ |
267 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ | 267 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ |
268 | #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ | 268 | #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ |
269 | #define FSCR_TAR (1<<8) /* Enable Target Adress Register */ | 269 | #define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ |
270 | #define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ | ||
270 | #define SPRN_TAR 0x32f /* Target Address Register */ | 271 | #define SPRN_TAR 0x32f /* Target Address Register */ |
271 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ | 272 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ |
272 | #define LPCR_VPM0 (1ul << (63-0)) | 273 | #define LPCR_VPM0 (1ul << (63-0)) |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 535b6d8a41cc..ebbec52d21bd 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns) | |||
358 | COMPAT_SYS(process_vm_readv) | 358 | COMPAT_SYS(process_vm_readv) |
359 | COMPAT_SYS(process_vm_writev) | 359 | COMPAT_SYS(process_vm_writev) |
360 | SYSCALL(finit_module) | 360 | SYSCALL(finit_module) |
361 | SYSCALL(ni_syscall) /* sys_kcmp */ | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index f25b5c45c435..1487f0f12293 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
13 | 13 | ||
14 | 14 | ||
15 | #define __NR_syscalls 354 | 15 | #define __NR_syscalls 355 |
16 | 16 | ||
17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
18 | #define NR_syscalls __NR_syscalls | 18 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 8c478c6c6b1e..74cb4d72d673 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
@@ -376,6 +376,7 @@ | |||
376 | #define __NR_process_vm_readv 351 | 376 | #define __NR_process_vm_readv 351 |
377 | #define __NR_process_vm_writev 352 | 377 | #define __NR_process_vm_writev 352 |
378 | #define __NR_finit_module 353 | 378 | #define __NR_finit_module 353 |
379 | #define __NR_kcmp 354 | ||
379 | 380 | ||
380 | 381 | ||
381 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 382 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index d29facbf9a28..ea847abb0d0a 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S | |||
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7) | |||
48 | 48 | ||
49 | _GLOBAL(__setup_cpu_power8) | 49 | _GLOBAL(__setup_cpu_power8) |
50 | mflr r11 | 50 | mflr r11 |
51 | bl __init_FSCR | ||
51 | bl __init_hvmode_206 | 52 | bl __init_hvmode_206 |
52 | mtlr r11 | 53 | mtlr r11 |
53 | beqlr | 54 | beqlr |
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8) | |||
56 | mfspr r3,SPRN_LPCR | 57 | mfspr r3,SPRN_LPCR |
57 | oris r3, r3, LPCR_AIL_3@h | 58 | oris r3, r3, LPCR_AIL_3@h |
58 | bl __init_LPCR | 59 | bl __init_LPCR |
59 | bl __init_FSCR | ||
60 | bl __init_TLB | 60 | bl __init_TLB |
61 | mtlr r11 | 61 | mtlr r11 |
62 | blr | 62 | blr |
63 | 63 | ||
64 | _GLOBAL(__restore_cpu_power8) | 64 | _GLOBAL(__restore_cpu_power8) |
65 | mflr r11 | 65 | mflr r11 |
66 | bl __init_FSCR | ||
66 | mfmsr r3 | 67 | mfmsr r3 |
67 | rldicl. r0,r3,4,63 | 68 | rldicl. r0,r3,4,63 |
68 | beqlr | 69 | beqlr |
@@ -115,7 +116,7 @@ __init_LPCR: | |||
115 | 116 | ||
116 | __init_FSCR: | 117 | __init_FSCR: |
117 | mfspr r3,SPRN_FSCR | 118 | mfspr r3,SPRN_FSCR |
118 | ori r3,r3,FSCR_TAR | 119 | ori r3,r3,FSCR_TAR|FSCR_DSCR |
119 | mtspr SPRN_FSCR,r3 | 120 | mtspr SPRN_FSCR,r3 |
120 | blr | 121 | blr |
121 | 122 | ||
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 75a3d71b895d..19599ef352bc 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
275 | .cpu_features = CPU_FTRS_PPC970, | 275 | .cpu_features = CPU_FTRS_PPC970, |
276 | .cpu_user_features = COMMON_USER_POWER4 | | 276 | .cpu_user_features = COMMON_USER_POWER4 | |
277 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 277 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
278 | .mmu_features = MMU_FTR_HPTE_TABLE, | 278 | .mmu_features = MMU_FTRS_PPC970, |
279 | .icache_bsize = 128, | 279 | .icache_bsize = 128, |
280 | .dcache_bsize = 128, | 280 | .dcache_bsize = 128, |
281 | .num_pmcs = 8, | 281 | .num_pmcs = 8, |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a8a5361fb70c..200afa5bcfb7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ | |||
74 | mflr r10 ; \ | 74 | mflr r10 ; \ |
75 | ld r12,PACAKBASE(r13) ; \ | 75 | ld r12,PACAKBASE(r13) ; \ |
76 | LOAD_HANDLER(r12, system_call_entry_direct) ; \ | 76 | LOAD_HANDLER(r12, system_call_entry_direct) ; \ |
77 | mtlr r12 ; \ | 77 | mtctr r12 ; \ |
78 | mfspr r12,SPRN_SRR1 ; \ | 78 | mfspr r12,SPRN_SRR1 ; \ |
79 | /* Re-use of r13... No spare regs to do this */ \ | 79 | /* Re-use of r13... No spare regs to do this */ \ |
80 | li r13,MSR_RI ; \ | 80 | li r13,MSR_RI ; \ |
81 | mtmsrd r13,1 ; \ | 81 | mtmsrd r13,1 ; \ |
82 | GET_PACA(r13) ; /* get r13 back */ \ | 82 | GET_PACA(r13) ; /* get r13 back */ \ |
83 | blr ; | 83 | bctr ; |
84 | #else | 84 | #else |
85 | /* We can branch directly */ | 85 | /* We can branch directly */ |
86 | #define SYSCALL_PSERIES_2_DIRECT \ | 86 | #define SYSCALL_PSERIES_2_DIRECT \ |
@@ -1452,20 +1452,36 @@ do_ste_alloc: | |||
1452 | _GLOBAL(do_stab_bolted) | 1452 | _GLOBAL(do_stab_bolted) |
1453 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 1453 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
1454 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | 1454 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ |
1455 | mfspr r11,SPRN_DAR /* ea */ | ||
1455 | 1456 | ||
1457 | /* | ||
1458 | * check for bad kernel/user address | ||
1459 | * (ea & ~REGION_MASK) >= PGTABLE_RANGE | ||
1460 | */ | ||
1461 | rldicr. r9,r11,4,(63 - 46 - 4) | ||
1462 | li r9,0 /* VSID = 0 for bad address */ | ||
1463 | bne- 0f | ||
1464 | |||
1465 | /* | ||
1466 | * Calculate VSID: | ||
1467 | * This is the kernel vsid, we take the top for context from | ||
1468 | * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 | ||
1469 | * Here we know that (ea >> 60) == 0xc | ||
1470 | */ | ||
1471 | lis r9,(MAX_USER_CONTEXT + 1)@ha | ||
1472 | addi r9,r9,(MAX_USER_CONTEXT + 1)@l | ||
1473 | |||
1474 | srdi r10,r11,SID_SHIFT | ||
1475 | rldimi r10,r9,ESID_BITS,0 /* proto vsid */ | ||
1476 | ASM_VSID_SCRAMBLE(r10, r9, 256M) | ||
1477 | rldic r9,r10,12,16 /* r9 = vsid << 12 */ | ||
1478 | |||
1479 | 0: | ||
1456 | /* Hash to the primary group */ | 1480 | /* Hash to the primary group */ |
1457 | ld r10,PACASTABVIRT(r13) | 1481 | ld r10,PACASTABVIRT(r13) |
1458 | mfspr r11,SPRN_DAR | 1482 | srdi r11,r11,SID_SHIFT |
1459 | srdi r11,r11,28 | ||
1460 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | 1483 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ |
1461 | 1484 | ||
1462 | /* Calculate VSID */ | ||
1463 | /* This is a kernel address, so protovsid = ESID | 1 << 37 */ | ||
1464 | li r9,0x1 | ||
1465 | rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | ||
1466 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | ||
1467 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
1468 | |||
1469 | /* Search the primary group for a free entry */ | 1485 | /* Search the primary group for a free entry */ |
1470 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | 1486 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ |
1471 | andi. r11,r11,0x80 | 1487 | andi. r11,r11,0x80 |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7f7fb7fd991b..13f8d168b3f1 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void) | |||
2832 | { | 2832 | { |
2833 | } | 2833 | } |
2834 | #else | 2834 | #else |
2835 | static void __reloc_toc(void *tocstart, unsigned long offset, | 2835 | static void __reloc_toc(unsigned long offset, unsigned long nr_entries) |
2836 | unsigned long nr_entries) | ||
2837 | { | 2836 | { |
2838 | unsigned long i; | 2837 | unsigned long i; |
2839 | unsigned long *toc_entry = (unsigned long *)tocstart; | 2838 | unsigned long *toc_entry; |
2839 | |||
2840 | /* Get the start of the TOC by using r2 directly. */ | ||
2841 | asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); | ||
2840 | 2842 | ||
2841 | for (i = 0; i < nr_entries; i++) { | 2843 | for (i = 0; i < nr_entries; i++) { |
2842 | *toc_entry = *toc_entry + offset; | 2844 | *toc_entry = *toc_entry + offset; |
@@ -2850,8 +2852,7 @@ static void reloc_toc(void) | |||
2850 | unsigned long nr_entries = | 2852 | unsigned long nr_entries = |
2851 | (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); | 2853 | (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); |
2852 | 2854 | ||
2853 | /* Need to add offset to get at __prom_init_toc_start */ | 2855 | __reloc_toc(offset, nr_entries); |
2854 | __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); | ||
2855 | 2856 | ||
2856 | mb(); | 2857 | mb(); |
2857 | } | 2858 | } |
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void) | |||
2864 | 2865 | ||
2865 | mb(); | 2866 | mb(); |
2866 | 2867 | ||
2867 | /* __prom_init_toc_start has been relocated, no need to add offset */ | 2868 | __reloc_toc(-offset, nr_entries); |
2868 | __reloc_toc(__prom_init_toc_start, -offset, nr_entries); | ||
2869 | } | 2869 | } |
2870 | #endif | 2870 | #endif |
2871 | #endif | 2871 | #endif |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 245c1b6a0858..f9b30c68ba47 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1428 | 1428 | ||
1429 | brk.address = bp_info->addr & ~7UL; | 1429 | brk.address = bp_info->addr & ~7UL; |
1430 | brk.type = HW_BRK_TYPE_TRANSLATE; | 1430 | brk.type = HW_BRK_TYPE_TRANSLATE; |
1431 | brk.len = 8; | ||
1431 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | 1432 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) |
1432 | brk.type |= HW_BRK_TYPE_READ; | 1433 | brk.type |= HW_BRK_TYPE_READ; |
1433 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | 1434 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index ead58e317294..5d7d29a313eb 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
326 | vcpu3s->context_id[0] = err; | 326 | vcpu3s->context_id[0] = err; |
327 | 327 | ||
328 | vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) | 328 | vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) |
329 | << USER_ESID_BITS) - 1; | 329 | << ESID_BITS) - 1; |
330 | vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; | 330 | vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; |
331 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; | 331 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; |
332 | 332 | ||
333 | kvmppc_mmu_hpte_init(vcpu); | 333 | kvmppc_mmu_hpte_init(vcpu); |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1b6e1271719f..f410c3e12c1e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
195 | unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); | 195 | unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); |
196 | unsigned long tprot = prot; | 196 | unsigned long tprot = prot; |
197 | 197 | ||
198 | /* | ||
199 | * If we hit a bad address return error. | ||
200 | */ | ||
201 | if (!vsid) | ||
202 | return -1; | ||
198 | /* Make kernel text executable */ | 203 | /* Make kernel text executable */ |
199 | if (overlaps_kernel_text(vaddr, vaddr + step)) | 204 | if (overlaps_kernel_text(vaddr, vaddr + step)) |
200 | tprot &= ~HPTE_R_N; | 205 | tprot &= ~HPTE_R_N; |
@@ -759,6 +764,8 @@ void __init early_init_mmu(void) | |||
759 | /* Initialize stab / SLB management */ | 764 | /* Initialize stab / SLB management */ |
760 | if (mmu_has_feature(MMU_FTR_SLB)) | 765 | if (mmu_has_feature(MMU_FTR_SLB)) |
761 | slb_initialize(); | 766 | slb_initialize(); |
767 | else | ||
768 | stab_initialize(get_paca()->stab_real); | ||
762 | } | 769 | } |
763 | 770 | ||
764 | #ifdef CONFIG_SMP | 771 | #ifdef CONFIG_SMP |
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
922 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", | 929 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", |
923 | ea, access, trap); | 930 | ea, access, trap); |
924 | 931 | ||
925 | if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { | ||
926 | DBG_LOW(" out of pgtable range !\n"); | ||
927 | return 1; | ||
928 | } | ||
929 | |||
930 | /* Get region & vsid */ | 932 | /* Get region & vsid */ |
931 | switch (REGION_ID(ea)) { | 933 | switch (REGION_ID(ea)) { |
932 | case USER_REGION_ID: | 934 | case USER_REGION_ID: |
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
957 | } | 959 | } |
958 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); | 960 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); |
959 | 961 | ||
962 | /* Bad address. */ | ||
963 | if (!vsid) { | ||
964 | DBG_LOW("Bad address!\n"); | ||
965 | return 1; | ||
966 | } | ||
960 | /* Get pgdir */ | 967 | /* Get pgdir */ |
961 | pgdir = mm->pgd; | 968 | pgdir = mm->pgd; |
962 | if (pgdir == NULL) | 969 | if (pgdir == NULL) |
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1126 | /* Get VSID */ | 1133 | /* Get VSID */ |
1127 | ssize = user_segment_size(ea); | 1134 | ssize = user_segment_size(ea); |
1128 | vsid = get_vsid(mm->context.id, ea, ssize); | 1135 | vsid = get_vsid(mm->context.id, ea, ssize); |
1136 | if (!vsid) | ||
1137 | return; | ||
1129 | 1138 | ||
1130 | /* Hash doesn't like irqs */ | 1139 | /* Hash doesn't like irqs */ |
1131 | local_irq_save(flags); | 1140 | local_irq_save(flags); |
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) | |||
1233 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); | 1242 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); |
1234 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | 1243 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
1235 | 1244 | ||
1245 | /* Don't create HPTE entries for bad address */ | ||
1246 | if (!vsid) | ||
1247 | return; | ||
1236 | ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), | 1248 | ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), |
1237 | mode, HPTE_V_BOLTED, | 1249 | mode, HPTE_V_BOLTED, |
1238 | mmu_linear_psize, mmu_kernel_ssize); | 1250 | mmu_linear_psize, mmu_kernel_ssize); |
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 40bc5b0ace54..d1d1b92c5b99 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
@@ -29,15 +29,6 @@ | |||
29 | static DEFINE_SPINLOCK(mmu_context_lock); | 29 | static DEFINE_SPINLOCK(mmu_context_lock); |
30 | static DEFINE_IDA(mmu_context_ida); | 30 | static DEFINE_IDA(mmu_context_ida); |
31 | 31 | ||
32 | /* | ||
33 | * 256MB segment | ||
34 | * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments | ||
35 | * available for user mappings. Each segment contains 2^28 bytes. Each | ||
36 | * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts | ||
37 | * (19 == 37 + 28 - 46). | ||
38 | */ | ||
39 | #define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) | ||
40 | |||
41 | int __init_new_context(void) | 32 | int __init_new_context(void) |
42 | { | 33 | { |
43 | int index; | 34 | int index; |
@@ -56,7 +47,7 @@ again: | |||
56 | else if (err) | 47 | else if (err) |
57 | return err; | 48 | return err; |
58 | 49 | ||
59 | if (index > MAX_CONTEXT) { | 50 | if (index > MAX_USER_CONTEXT) { |
60 | spin_lock(&mmu_context_lock); | 51 | spin_lock(&mmu_context_lock); |
61 | ida_remove(&mmu_context_ida, index); | 52 | ida_remove(&mmu_context_ida, index); |
62 | spin_unlock(&mmu_context_lock); | 53 | spin_unlock(&mmu_context_lock); |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e212a271c7a4..654258f165ae 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #ifdef CONFIG_PPC_STD_MMU_64 | 63 | #ifdef CONFIG_PPC_STD_MMU_64 |
64 | #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) | 64 | #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) |
65 | #error TASK_SIZE_USER64 exceeds user VSID range | 65 | #error TASK_SIZE_USER64 exceeds user VSID range |
66 | #endif | 66 | #endif |
67 | #endif | 67 | #endif |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 1a16ca227757..17aa6dfceb34 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -31,10 +31,15 @@ | |||
31 | * No other registers are examined or changed. | 31 | * No other registers are examined or changed. |
32 | */ | 32 | */ |
33 | _GLOBAL(slb_allocate_realmode) | 33 | _GLOBAL(slb_allocate_realmode) |
34 | /* r3 = faulting address */ | 34 | /* |
35 | * check for bad kernel/user address | ||
36 | * (ea & ~REGION_MASK) >= PGTABLE_RANGE | ||
37 | */ | ||
38 | rldicr. r9,r3,4,(63 - 46 - 4) | ||
39 | bne- 8f | ||
35 | 40 | ||
36 | srdi r9,r3,60 /* get region */ | 41 | srdi r9,r3,60 /* get region */ |
37 | srdi r10,r3,28 /* get esid */ | 42 | srdi r10,r3,SID_SHIFT /* get esid */ |
38 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ | 43 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
39 | 44 | ||
40 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ | 45 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) | |||
56 | */ | 61 | */ |
57 | _GLOBAL(slb_miss_kernel_load_linear) | 62 | _GLOBAL(slb_miss_kernel_load_linear) |
58 | li r11,0 | 63 | li r11,0 |
59 | li r9,0x1 | ||
60 | /* | 64 | /* |
61 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 65 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
62 | * the necessary adjustment | 66 | * r9 = region id. |
63 | */ | 67 | */ |
64 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 68 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
69 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
70 | |||
71 | |||
65 | BEGIN_FTR_SECTION | 72 | BEGIN_FTR_SECTION |
66 | b slb_finish_load | 73 | b slb_finish_load |
67 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 74 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) | |||
91 | _GLOBAL(slb_miss_kernel_load_io) | 98 | _GLOBAL(slb_miss_kernel_load_io) |
92 | li r11,0 | 99 | li r11,0 |
93 | 6: | 100 | 6: |
94 | li r9,0x1 | ||
95 | /* | 101 | /* |
96 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 102 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
97 | * the necessary adjustment | 103 | * r9 = region id. |
98 | */ | 104 | */ |
99 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 105 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
106 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
107 | |||
100 | BEGIN_FTR_SECTION | 108 | BEGIN_FTR_SECTION |
101 | b slb_finish_load | 109 | b slb_finish_load |
102 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 110 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
103 | b slb_finish_load_1T | 111 | b slb_finish_load_1T |
104 | 112 | ||
105 | 0: /* user address: proto-VSID = context << 15 | ESID. First check | 113 | 0: |
106 | * if the address is within the boundaries of the user region | ||
107 | */ | ||
108 | srdi. r9,r10,USER_ESID_BITS | ||
109 | bne- 8f /* invalid ea bits set */ | ||
110 | |||
111 | |||
112 | /* when using slices, we extract the psize off the slice bitmaps | 114 | /* when using slices, we extract the psize off the slice bitmaps |
113 | * and then we need to get the sllp encoding off the mmu_psize_defs | 115 | * and then we need to get the sllp encoding off the mmu_psize_defs |
114 | * array. | 116 | * array. |
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | |||
164 | ld r9,PACACONTEXTID(r13) | 166 | ld r9,PACACONTEXTID(r13) |
165 | BEGIN_FTR_SECTION | 167 | BEGIN_FTR_SECTION |
166 | cmpldi r10,0x1000 | 168 | cmpldi r10,0x1000 |
167 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | ||
168 | rldimi r10,r9,USER_ESID_BITS,0 | ||
169 | BEGIN_FTR_SECTION | ||
170 | bge slb_finish_load_1T | 169 | bge slb_finish_load_1T |
171 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | 170 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
172 | b slb_finish_load | 171 | b slb_finish_load |
173 | 172 | ||
174 | 8: /* invalid EA */ | 173 | 8: /* invalid EA */ |
175 | li r10,0 /* BAD_VSID */ | 174 | li r10,0 /* BAD_VSID */ |
175 | li r9,0 /* BAD_VSID */ | ||
176 | li r11,SLB_VSID_USER /* flags don't much matter */ | 176 | li r11,SLB_VSID_USER /* flags don't much matter */ |
177 | b slb_finish_load | 177 | b slb_finish_load |
178 | 178 | ||
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) | |||
221 | 221 | ||
222 | /* get context to calculate proto-VSID */ | 222 | /* get context to calculate proto-VSID */ |
223 | ld r9,PACACONTEXTID(r13) | 223 | ld r9,PACACONTEXTID(r13) |
224 | rldimi r10,r9,USER_ESID_BITS,0 | ||
225 | |||
226 | /* fall through slb_finish_load */ | 224 | /* fall through slb_finish_load */ |
227 | 225 | ||
228 | #endif /* __DISABLED__ */ | 226 | #endif /* __DISABLED__ */ |
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) | |||
231 | /* | 229 | /* |
232 | * Finish loading of an SLB entry and return | 230 | * Finish loading of an SLB entry and return |
233 | * | 231 | * |
234 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET | 232 | * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
235 | */ | 233 | */ |
236 | slb_finish_load: | 234 | slb_finish_load: |
235 | rldimi r10,r9,ESID_BITS,0 | ||
237 | ASM_VSID_SCRAMBLE(r10,r9,256M) | 236 | ASM_VSID_SCRAMBLE(r10,r9,256M) |
238 | /* | 237 | /* |
239 | * bits above VSID_BITS_256M need to be ignored from r10 | 238 | * bits above VSID_BITS_256M need to be ignored from r10 |
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) | |||
298 | /* | 297 | /* |
299 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | 298 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. |
300 | * | 299 | * |
301 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | 300 | * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 |
302 | */ | 301 | */ |
303 | slb_finish_load_1T: | 302 | slb_finish_load_1T: |
304 | srdi r10,r10,40-28 /* get 1T ESID */ | 303 | srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ |
304 | rldimi r10,r9,ESID_BITS_1T,0 | ||
305 | ASM_VSID_SCRAMBLE(r10,r9,1T) | 305 | ASM_VSID_SCRAMBLE(r10,r9,1T) |
306 | /* | 306 | /* |
307 | * bits above VSID_BITS_1T need to be ignored from r10 | 307 | * bits above VSID_BITS_1T need to be ignored from r10 |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 0d82ef50dc3f..023ec8a13f38 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
82 | if (!is_kernel_addr(addr)) { | 82 | if (!is_kernel_addr(addr)) { |
83 | ssize = user_segment_size(addr); | 83 | ssize = user_segment_size(addr); |
84 | vsid = get_vsid(mm->context.id, addr, ssize); | 84 | vsid = get_vsid(mm->context.id, addr, ssize); |
85 | WARN_ON(vsid == 0); | ||
86 | } else { | 85 | } else { |
87 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | 86 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
88 | ssize = mmu_kernel_ssize; | 87 | ssize = mmu_kernel_ssize; |
89 | } | 88 | } |
89 | WARN_ON(vsid == 0); | ||
90 | vpn = hpt_vpn(addr, vsid, ssize); | 90 | vpn = hpt_vpn(addr, vsid, ssize); |
91 | rpte = __real_pte(__pte(pte), ptep); | 91 | rpte = __real_pte(__pte(pte), ptep); |
92 | 92 | ||
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 611e92f291c4..7179726ba5c5 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c | |||
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data) | |||
69 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static int __devinit gpio_halt_probe(struct platform_device *pdev) | 72 | static int gpio_halt_probe(struct platform_device *pdev) |
73 | { | 73 | { |
74 | enum of_gpio_flags flags; | 74 | enum of_gpio_flags flags; |
75 | struct device_node *node = pdev->dev.of_node; | 75 | struct device_node *node = pdev->dev.of_node; |
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev) | |||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int __devexit gpio_halt_remove(struct platform_device *pdev) | 131 | static int gpio_halt_remove(struct platform_device *pdev) |
132 | { | 132 | { |
133 | if (halt_node) { | 133 | if (halt_node) { |
134 | int gpio = of_get_gpio(halt_node, 0); | 134 | int gpio = of_get_gpio(halt_node, 0); |
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = { | |||
165 | .of_match_table = gpio_halt_match, | 165 | .of_match_table = gpio_halt_match, |
166 | }, | 166 | }, |
167 | .probe = gpio_halt_probe, | 167 | .probe = gpio_halt_probe, |
168 | .remove = __devexit_p(gpio_halt_remove), | 168 | .remove = gpio_halt_remove, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | module_platform_driver(gpio_halt_driver); | 171 | module_platform_driver(gpio_halt_driver); |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index cea2f09c4241..18e3b76c78d7 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -124,9 +124,8 @@ config 6xx | |||
124 | select PPC_HAVE_PMU_SUPPORT | 124 | select PPC_HAVE_PMU_SUPPORT |
125 | 125 | ||
126 | config POWER3 | 126 | config POWER3 |
127 | bool | ||
128 | depends on PPC64 && PPC_BOOK3S | 127 | depends on PPC64 && PPC_BOOK3S |
129 | default y if !POWER4_ONLY | 128 | def_bool y |
130 | 129 | ||
131 | config POWER4 | 130 | config POWER4 |
132 | depends on PPC64 && PPC_BOOK3S | 131 | depends on PPC64 && PPC_BOOK3S |
@@ -145,8 +144,7 @@ config TUNE_CELL | |||
145 | but somewhat slower on other machines. This option only changes | 144 | but somewhat slower on other machines. This option only changes |
146 | the scheduling of instructions, not the selection of instructions | 145 | the scheduling of instructions, not the selection of instructions |
147 | itself, so the resulting kernel will keep running on all other | 146 | itself, so the resulting kernel will keep running on all other |
148 | machines. When building a kernel that is supposed to run only | 147 | machines. |
149 | on Cell, you should also select the POWER4_ONLY option. | ||
150 | 148 | ||
151 | # this is temp to handle compat with arch=ppc | 149 | # this is temp to handle compat with arch=ppc |
152 | config 8xx | 150 | config 8xx |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 863184b182f4..3f3bb4cdbbec 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -749,6 +749,7 @@ static struct file_system_type spufs_type = { | |||
749 | .mount = spufs_mount, | 749 | .mount = spufs_mount, |
750 | .kill_sb = kill_litter_super, | 750 | .kill_sb = kill_litter_super, |
751 | }; | 751 | }; |
752 | MODULE_ALIAS_FS("spufs"); | ||
752 | 753 | ||
753 | static int __init spufs_init(void) | 754 | static int __init spufs_init(void) |
754 | { | 755 | { |
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c index fcf4b4cbeaf3..4557e91626c4 100644 --- a/arch/powerpc/platforms/pseries/hvcserver.c +++ b/arch/powerpc/platforms/pseries/hvcserver.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/string.h> | ||
26 | 27 | ||
27 | #include <asm/hvcall.h> | 28 | #include <asm/hvcall.h> |
28 | #include <asm/hvcserver.h> | 29 | #include <asm/hvcserver.h> |
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, | |||
188 | = (unsigned int)last_p_partition_ID; | 189 | = (unsigned int)last_p_partition_ID; |
189 | 190 | ||
190 | /* copy the Null-term char too */ | 191 | /* copy the Null-term char too */ |
191 | strncpy(&next_partner_info->location_code[0], | 192 | strlcpy(&next_partner_info->location_code[0], |
192 | (char *)&pi_buff[2], | 193 | (char *)&pi_buff[2], |
193 | strlen((char *)&pi_buff[2]) + 1); | 194 | sizeof(next_partner_info->location_code)); |
194 | 195 | ||
195 | list_add_tail(&(next_partner_info->node), head); | 196 | list_add_tail(&(next_partner_info->node), head); |
196 | next_partner_info = NULL; | 197 | next_partner_info = NULL; |