aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/pgtable-ppc32.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/pgtable-ppc32.h')
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h67
1 files changed, 0 insertions, 67 deletions
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 7fb730c62f83..86a54a4a8a2a 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -6,11 +6,7 @@
6#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/threads.h> 8#include <linux/threads.h>
9#include <asm/processor.h> /* For TASK_SIZE */
10#include <asm/mmu.h>
11#include <asm/page.h>
12#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ 9#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
13struct mm_struct;
14 10
15extern unsigned long va_to_phys(unsigned long address); 11extern unsigned long va_to_phys(unsigned long address);
16extern pte_t *va_to_pte(unsigned long address); 12extern pte_t *va_to_pte(unsigned long address);
@@ -488,14 +484,6 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
488#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ 484#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
489 pgprot_val(prot)) 485 pgprot_val(prot))
490#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 486#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
491
492/*
493 * ZERO_PAGE is a global shared page that is always zero: used
494 * for zero-mapped memory areas etc..
495 */
496extern unsigned long empty_zero_page[1024];
497#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
498
499#endif /* __ASSEMBLY__ */ 487#endif /* __ASSEMBLY__ */
500 488
501#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) 489#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
@@ -512,9 +500,7 @@ extern unsigned long empty_zero_page[1024];
512 * The following only work if pte_present() is true. 500 * The following only work if pte_present() is true.
513 * Undefined behaviour if not.. 501 * Undefined behaviour if not..
514 */ 502 */
515static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
516static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 503static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
517static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
518static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 504static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
519static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 505static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
520static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 506static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
@@ -522,21 +508,13 @@ static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
522static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 508static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
523static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 509static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
524 510
525static inline pte_t pte_rdprotect(pte_t pte) {
526 pte_val(pte) &= ~_PAGE_USER; return pte; }
527static inline pte_t pte_wrprotect(pte_t pte) { 511static inline pte_t pte_wrprotect(pte_t pte) {
528 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } 512 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
529static inline pte_t pte_exprotect(pte_t pte) {
530 pte_val(pte) &= ~_PAGE_EXEC; return pte; }
531static inline pte_t pte_mkclean(pte_t pte) { 513static inline pte_t pte_mkclean(pte_t pte) {
532 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } 514 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
533static inline pte_t pte_mkold(pte_t pte) { 515static inline pte_t pte_mkold(pte_t pte) {
534 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 516 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
535 517
536static inline pte_t pte_mkread(pte_t pte) {
537 pte_val(pte) |= _PAGE_USER; return pte; }
538static inline pte_t pte_mkexec(pte_t pte) {
539 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
540static inline pte_t pte_mkwrite(pte_t pte) { 518static inline pte_t pte_mkwrite(pte_t pte) {
541 pte_val(pte) |= _PAGE_RW; return pte; } 519 pte_val(pte) |= _PAGE_RW; return pte; }
542static inline pte_t pte_mkdirty(pte_t pte) { 520static inline pte_t pte_mkdirty(pte_t pte) {
@@ -643,13 +621,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
643#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 621#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
644 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) 622 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
645 623
646#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
647static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
648 unsigned long addr, pte_t *ptep)
649{
650 return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
651}
652
653#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 624#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
654static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 625static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
655 pte_t *ptep) 626 pte_t *ptep)
@@ -734,10 +705,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
734#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 705#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
735#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 706#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
736 707
737extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
738
739extern void paging_init(void);
740
741/* 708/*
742 * Encode and decode a swap entry. 709 * Encode and decode a swap entry.
743 * Note that the bits we use in a PTE for representing a swap entry 710 * Note that the bits we use in a PTE for representing a swap entry
@@ -755,40 +722,6 @@ extern void paging_init(void);
755#define pte_to_pgoff(pte) (pte_val(pte) >> 3) 722#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
756#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 723#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
757 724
758/* CONFIG_APUS */
759/* For virtual address to physical address conversion */
760extern void cache_clear(__u32 addr, int length);
761extern void cache_push(__u32 addr, int length);
762extern int mm_end_of_chunk (unsigned long addr, int len);
763extern unsigned long iopa(unsigned long addr);
764extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
765
766/* Values for nocacheflag and cmode */
767/* These are not used by the APUS kernel_map, but prevents
768 compilation errors. */
769#define KERNELMAP_FULL_CACHING 0
770#define KERNELMAP_NOCACHE_SER 1
771#define KERNELMAP_NOCACHE_NONSER 2
772#define KERNELMAP_NO_COPYBACK 3
773
774/*
775 * Map some physical address range into the kernel address space.
776 */
777extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
778 int nocacheflag, unsigned long *memavailp );
779
780/*
781 * Set cache mode of (kernel space) address range.
782 */
783extern void kernel_set_cachemode (unsigned long address, unsigned long size,
784 unsigned int cmode);
785
786/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
787#define kern_addr_valid(addr) (1)
788
789#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
790 remap_pfn_range(vma, vaddr, pfn, size, prot)
791
792/* 725/*
793 * No page table caches to initialise 726 * No page table caches to initialise
794 */ 727 */