diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:22:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:22:43 -0400 |
commit | 4f0ac854167846bd55cd81dbc9a36e03708aa01c (patch) | |
tree | 0eb34d18a667f8e68ad9255f791560b028fed2a6 /arch/powerpc/include | |
parent | b9356c53ba2f593081e5aa45eb67adcce243d1c0 (diff) | |
parent | 6b58e7f146f8d79c08f62087f928e1f01747de71 (diff) |
Merge branch 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (60 commits)
perf tools: Avoid unnecessary work in directory lookups
perf stat: Clean up statistics calculations a bit more
perf stat: More advanced variance computation
perf stat: Use stddev_mean in stead of stddev
perf stat: Remove the limit on repeat
perf stat: Change noise calculation to use stddev
x86, perf_counter, bts: Do not allow kernel BTS tracing for now
x86, perf_counter, bts: Correct pointer-to-u64 casts
x86, perf_counter, bts: Fail if BTS is not available
perf_counter: Fix output-sharing error path
perf trace: Fix read_string()
perf trace: Print out in nanoseconds
perf tools: Seek to the end of the header area
perf trace: Fix parsing of perf.data
perf trace: Sample timestamps as well
perf_counter: Introduce new (non-)paranoia level to allow raw tracepoint access
perf trace: Sample the CPU too
perf tools: Work around strict aliasing related warnings
perf tools: Clean up warnings list in the Makefile
perf tools: Complete support for dynamic strings
...
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index eb17da781128..2a5da069714e 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
104 | else | 104 | else |
105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | 105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
106 | 106 | ||
107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | 107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
108 | /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we | 108 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
109 | * can just store as long as we do the two halves in the right order | 109 | * can just store as long as we do the two halves in the right order |
110 | * with a barrier in between. This is possible because we take care, | 110 | * with a barrier in between. This is possible because we take care, |
111 | * in the hash code, to pre-invalidate if the PTE was already hashed, | 111 | * in the hash code, to pre-invalidate if the PTE was already hashed, |
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
140 | 140 | ||
141 | #else | 141 | #else |
142 | /* Anything else just stores the PTE normally. That covers all 64-bit | 142 | /* Anything else just stores the PTE normally. That covers all 64-bit |
143 | * cases, and 32-bit non-hash with 64-bit PTEs in UP mode | 143 | * cases, and 32-bit non-hash with 32-bit PTEs. |
144 | */ | 144 | */ |
145 | *ptep = pte; | 145 | *ptep = pte; |
146 | #endif | 146 | #endif |