aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r--include/asm-ppc64/a.out.h2
-rw-r--r--include/asm-ppc64/bug.h7
-rw-r--r--include/asm-ppc64/elf.h8
-rw-r--r--include/asm-ppc64/page.h15
-rw-r--r--include/asm-ppc64/pgalloc.h2
-rw-r--r--include/asm-ppc64/pgtable.h41
-rw-r--r--include/asm-ppc64/signal.h13
-rw-r--r--include/asm-ppc64/spinlock.h8
8 files changed, 51 insertions, 45 deletions
diff --git a/include/asm-ppc64/a.out.h b/include/asm-ppc64/a.out.h
index 802338efcb19..3871e252a6f1 100644
--- a/include/asm-ppc64/a.out.h
+++ b/include/asm-ppc64/a.out.h
@@ -1,8 +1,6 @@
1#ifndef __PPC64_A_OUT_H__ 1#ifndef __PPC64_A_OUT_H__
2#define __PPC64_A_OUT_H__ 2#define __PPC64_A_OUT_H__
3 3
4#include <asm/ppcdebug.h>
5
6/* 4/*
7 * c 2001 PPC 64 Team, IBM Corp 5 * c 2001 PPC 64 Team, IBM Corp
8 * 6 *
diff --git a/include/asm-ppc64/bug.h b/include/asm-ppc64/bug.h
index db31dd22233c..169868fa307d 100644
--- a/include/asm-ppc64/bug.h
+++ b/include/asm-ppc64/bug.h
@@ -26,6 +26,8 @@ struct bug_entry *find_bug(unsigned long bugaddr);
26 */ 26 */
27#define BUG_WARNING_TRAP 0x1000000 27#define BUG_WARNING_TRAP 0x1000000
28 28
29#ifdef CONFIG_BUG
30
29#define BUG() do { \ 31#define BUG() do { \
30 __asm__ __volatile__( \ 32 __asm__ __volatile__( \
31 "1: twi 31,0,0\n" \ 33 "1: twi 31,0,0\n" \
@@ -55,11 +57,12 @@ struct bug_entry *find_bug(unsigned long bugaddr);
55 "i" (__FILE__), "i" (__FUNCTION__)); \ 57 "i" (__FILE__), "i" (__FUNCTION__)); \
56} while (0) 58} while (0)
57 59
58#endif
59
60#define HAVE_ARCH_BUG 60#define HAVE_ARCH_BUG
61#define HAVE_ARCH_BUG_ON 61#define HAVE_ARCH_BUG_ON
62#define HAVE_ARCH_WARN_ON 62#define HAVE_ARCH_WARN_ON
63#endif
64#endif
65
63#include <asm-generic/bug.h> 66#include <asm-generic/bug.h>
64 67
65#endif 68#endif
diff --git a/include/asm-ppc64/elf.h b/include/asm-ppc64/elf.h
index 8457d90244fb..6c42d61bedd1 100644
--- a/include/asm-ppc64/elf.h
+++ b/include/asm-ppc64/elf.h
@@ -229,9 +229,13 @@ do { \
229 229
230/* 230/*
231 * An executable for which elf_read_implies_exec() returns TRUE will 231 * An executable for which elf_read_implies_exec() returns TRUE will
232 * have the READ_IMPLIES_EXEC personality flag set automatically. 232 * have the READ_IMPLIES_EXEC personality flag set automatically. This
233 * is only required to work around bugs in old 32bit toolchains. Since
234 * the 64bit ABI has never had these issues dont enable the workaround
235 * even if we have an executable stack.
233 */ 236 */
234#define elf_read_implies_exec(ex, exec_stk) (exec_stk != EXSTACK_DISABLE_X) 237#define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
238 (exec_stk != EXSTACK_DISABLE_X) : 0)
235 239
236#endif 240#endif
237 241
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index 20e0f19324e8..86219574c1a5 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -252,10 +252,19 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
252 252
253/* 253/*
254 * This is the default if a program doesn't have a PT_GNU_STACK 254 * This is the default if a program doesn't have a PT_GNU_STACK
255 * program header entry. 255 * program header entry. The PPC64 ELF ABI has a non executable stack
256 * stack by default, so in the absense of a PT_GNU_STACK program header
257 * we turn execute permission off.
256 */ 258 */
257#define VM_STACK_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 259#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
258 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 260 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
261
262#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
263 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
264
265#define VM_STACK_DEFAULT_FLAGS \
266 (test_thread_flag(TIF_32BIT) ? \
267 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
259 268
260#endif /* __KERNEL__ */ 269#endif /* __KERNEL__ */
261#endif /* _PPC64_PAGE_H */ 270#endif /* _PPC64_PAGE_H */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 16232d740173..4fc4b739b380 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -27,7 +27,7 @@ pgd_free(pgd_t *pgd)
27 kmem_cache_free(zero_cache, pgd); 27 kmem_cache_free(zero_cache, pgd);
28} 28}
29 29
30#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) 30#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
31 31
32static inline pmd_t * 32static inline pmd_t *
33pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 33pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index a26120517c54..b984e2747e0c 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -1,8 +1,6 @@
1#ifndef _PPC64_PGTABLE_H 1#ifndef _PPC64_PGTABLE_H
2#define _PPC64_PGTABLE_H 2#define _PPC64_PGTABLE_H
3 3
4#include <asm-generic/4level-fixup.h>
5
6/* 4/*
7 * This file contains the functions and defines necessary to modify and use 5 * This file contains the functions and defines necessary to modify and use
8 * the ppc64 hashed page table. 6 * the ppc64 hashed page table.
@@ -17,6 +15,8 @@
17#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
18#endif /* __ASSEMBLY__ */ 16#endif /* __ASSEMBLY__ */
19 17
18#include <asm-generic/pgtable-nopud.h>
19
20/* PMD_SHIFT determines what a second-level page table entry can map */ 20/* PMD_SHIFT determines what a second-level page table entry can map */
21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) 21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
22#define PMD_SIZE (1UL << PMD_SHIFT) 22#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -228,12 +228,13 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
228#define pmd_page_kernel(pmd) \ 228#define pmd_page_kernel(pmd) \
229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT)) 229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
231#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp))) 231
232#define pgd_none(pgd) (!pgd_val(pgd)) 232#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp)))
233#define pgd_bad(pgd) ((pgd_val(pgd)) == 0) 233#define pud_none(pud) (!pud_val(pud))
234#define pgd_present(pgd) (pgd_val(pgd) != 0UL) 234#define pud_bad(pud) ((pud_val(pud)) == 0UL)
235#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) 235#define pud_present(pud) (pud_val(pud) != 0UL)
236#define pgd_page(pgd) (__bpn_to_ba(pgd_val(pgd))) 236#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
237#define pud_page(pud) (__bpn_to_ba(pud_val(pud)))
237 238
238/* 239/*
239 * Find an entry in a page-table-directory. We combine the address region 240 * Find an entry in a page-table-directory. We combine the address region
@@ -245,12 +246,13 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
245#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 246#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
246 247
247/* Find an entry in the second-level page table.. */ 248/* Find an entry in the second-level page table.. */
248#define pmd_offset(dir,addr) \ 249#define pmd_offset(pudp,addr) \
249 ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 250 ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
250 251
251/* Find an entry in the third-level page table.. */ 252/* Find an entry in the third-level page table.. */
252#define pte_offset_kernel(dir,addr) \ 253#define pte_offset_kernel(dir,addr) \
253 ((pte_t *) pmd_page_kernel(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 254 ((pte_t *) pmd_page_kernel(*(dir)) \
255 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
254 256
255#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 257#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
256#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 258#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -582,19 +584,22 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
582static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 584static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
583{ 585{
584 pgd_t *pg; 586 pgd_t *pg;
587 pud_t *pu;
585 pmd_t *pm; 588 pmd_t *pm;
586 pte_t *pt = NULL; 589 pte_t *pt = NULL;
587 pte_t pte; 590 pte_t pte;
588 591
589 pg = pgdir + pgd_index(ea); 592 pg = pgdir + pgd_index(ea);
590 if (!pgd_none(*pg)) { 593 if (!pgd_none(*pg)) {
591 594 pu = pud_offset(pg, ea);
592 pm = pmd_offset(pg, ea); 595 if (!pud_none(*pu)) {
593 if (pmd_present(*pm)) { 596 pm = pmd_offset(pu, ea);
594 pt = pte_offset_kernel(pm, ea); 597 if (pmd_present(*pm)) {
595 pte = *pt; 598 pt = pte_offset_kernel(pm, ea);
596 if (!pte_present(pte)) 599 pte = *pt;
597 pt = NULL; 600 if (!pte_present(pte))
601 pt = NULL;
602 }
598 } 603 }
599 } 604 }
600 605
diff --git a/include/asm-ppc64/signal.h b/include/asm-ppc64/signal.h
index fe5401adb41b..a2d7bbb4befd 100644
--- a/include/asm-ppc64/signal.h
+++ b/include/asm-ppc64/signal.h
@@ -96,19 +96,6 @@ typedef struct {
96 96
97#define MINSIGSTKSZ 2048 97#define MINSIGSTKSZ 2048
98#define SIGSTKSZ 8192 98#define SIGSTKSZ 8192
99#ifdef __KERNEL__
100
101/*
102 * These values of sa_flags are used only by the kernel as part of the
103 * irq handling routines.
104 *
105 * SA_INTERRUPT is also used by the irq handling routines.
106 * SA_SHIRQ is for shared interrupt support on PCI and EISA.
107 */
108#define SA_PROBE SA_ONESHOT
109#define SA_SAMPLE_RANDOM SA_RESTART
110#define SA_SHIRQ 0x04000000
111#endif
112 99
113#define SIG_BLOCK 0 /* for blocking signals */ 100#define SIG_BLOCK 0 /* for blocking signals */
114#define SIG_UNBLOCK 1 /* for unblocking signals */ 101#define SIG_UNBLOCK 1 /* for unblocking signals */
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h
index a9b2a1162cf7..acd11564dd75 100644
--- a/include/asm-ppc64/spinlock.h
+++ b/include/asm-ppc64/spinlock.h
@@ -110,7 +110,7 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock)
110 HMT_low(); 110 HMT_low();
111 if (SHARED_PROCESSOR) 111 if (SHARED_PROCESSOR)
112 __spin_yield(lock); 112 __spin_yield(lock);
113 } while (likely(lock->lock != 0)); 113 } while (unlikely(lock->lock != 0));
114 HMT_medium(); 114 HMT_medium();
115 } 115 }
116} 116}
@@ -128,7 +128,7 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
128 HMT_low(); 128 HMT_low();
129 if (SHARED_PROCESSOR) 129 if (SHARED_PROCESSOR)
130 __spin_yield(lock); 130 __spin_yield(lock);
131 } while (likely(lock->lock != 0)); 131 } while (unlikely(lock->lock != 0));
132 HMT_medium(); 132 HMT_medium();
133 local_irq_restore(flags_dis); 133 local_irq_restore(flags_dis);
134 } 134 }
@@ -194,7 +194,7 @@ static void __inline__ _raw_read_lock(rwlock_t *rw)
194 HMT_low(); 194 HMT_low();
195 if (SHARED_PROCESSOR) 195 if (SHARED_PROCESSOR)
196 __rw_yield(rw); 196 __rw_yield(rw);
197 } while (likely(rw->lock < 0)); 197 } while (unlikely(rw->lock < 0));
198 HMT_medium(); 198 HMT_medium();
199 } 199 }
200} 200}
@@ -251,7 +251,7 @@ static void __inline__ _raw_write_lock(rwlock_t *rw)
251 HMT_low(); 251 HMT_low();
252 if (SHARED_PROCESSOR) 252 if (SHARED_PROCESSOR)
253 __rw_yield(rw); 253 __rw_yield(rw);
254 } while (likely(rw->lock != 0)); 254 } while (unlikely(rw->lock != 0));
255 HMT_medium(); 255 HMT_medium();
256 } 256 }
257} 257}