aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/page.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/page.h')
-rw-r--r--include/asm-sh/page.h117
1 files changed, 67 insertions, 50 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index cb3d46c59eab..e0fe02950f52 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -5,15 +5,7 @@
5 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
6 */ 6 */
7 7
8/* 8#include <linux/const.h>
9 [ P0/U0 (virtual) ] 0x00000000 <------ User space
10 [ P1 (fixed) cached ] 0x80000000 <------ Kernel space
11 [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
12 [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
13 [ P4 control ] 0xE0000000
14 */
15
16#ifdef __KERNEL__
17 9
18/* PAGE_SHIFT determines the page size */ 10/* PAGE_SHIFT determines the page size */
19#if defined(CONFIG_PAGE_SIZE_4KB) 11#if defined(CONFIG_PAGE_SIZE_4KB)
@@ -26,15 +18,13 @@
26# error "Bogus kernel page size?" 18# error "Bogus kernel page size?"
27#endif 19#endif
28 20
29#ifdef __ASSEMBLY__ 21#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
30#define PAGE_SIZE (1 << PAGE_SHIFT)
31#else
32#define PAGE_SIZE (1UL << PAGE_SHIFT)
33#endif
34
35#define PAGE_MASK (~(PAGE_SIZE-1)) 22#define PAGE_MASK (~(PAGE_SIZE-1))
36#define PTE_MASK PAGE_MASK 23#define PTE_MASK PAGE_MASK
37 24
25/* to align the pointer to the (next) page boundary */
26#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
27
38#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 28#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
39#define HPAGE_SHIFT 16 29#define HPAGE_SHIFT 16
40#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 30#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
@@ -45,6 +35,8 @@
45#define HPAGE_SHIFT 22 35#define HPAGE_SHIFT 22
46#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 36#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
47#define HPAGE_SHIFT 26 37#define HPAGE_SHIFT 26
38#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
39#define HPAGE_SHIFT 29
48#endif 40#endif
49 41
50#ifdef CONFIG_HUGETLB_PAGE 42#ifdef CONFIG_HUGETLB_PAGE
@@ -55,28 +47,23 @@
55 47
56#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
57 49
58extern void (*clear_page)(void *to);
59extern void (*copy_page)(void *to, void *from);
60
61extern unsigned long shm_align_mask; 50extern unsigned long shm_align_mask;
62extern unsigned long max_low_pfn, min_low_pfn; 51extern unsigned long max_low_pfn, min_low_pfn;
63extern unsigned long memory_start, memory_end; 52extern unsigned long memory_start, memory_end;
64 53
65#ifdef CONFIG_MMU 54extern void clear_page(void *to);
66extern void clear_page_slow(void *to); 55extern void copy_page(void *to, void *from);
67extern void copy_page_slow(void *to, void *from);
68#else
69extern void clear_page_nommu(void *to);
70extern void copy_page_nommu(void *to, void *from);
71#endif
72 56
73#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ 57#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
74 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) 58 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
75struct page; 59struct page;
76extern void clear_user_page(void *to, unsigned long address, struct page *pg); 60struct vm_area_struct;
77extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); 61extern void clear_user_page(void *to, unsigned long address, struct page *page);
78extern void __clear_user_page(void *to, void *orig_to); 62#ifdef CONFIG_CPU_SH4
79extern void __copy_user_page(void *to, void *from, void *orig_to); 63extern void copy_user_highpage(struct page *to, struct page *from,
64 unsigned long vaddr, struct vm_area_struct *vma);
65#define __HAVE_ARCH_COPY_USER_HIGHPAGE
66#endif
80#else 67#else
81#define clear_user_page(page, vaddr, pg) clear_page(page) 68#define clear_user_page(page, vaddr, pg) clear_page(page)
82#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 69#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
@@ -93,12 +80,18 @@ typedef struct { unsigned long long pgd; } pgd_t;
93 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 80 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
94#define __pte(x) \ 81#define __pte(x) \
95 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 82 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
96#else 83#elif defined(CONFIG_SUPERH32)
97typedef struct { unsigned long pte_low; } pte_t; 84typedef struct { unsigned long pte_low; } pte_t;
98typedef struct { unsigned long pgprot; } pgprot_t; 85typedef struct { unsigned long pgprot; } pgprot_t;
99typedef struct { unsigned long pgd; } pgd_t; 86typedef struct { unsigned long pgd; } pgd_t;
100#define pte_val(x) ((x).pte_low) 87#define pte_val(x) ((x).pte_low)
101#define __pte(x) ((pte_t) { (x) } ) 88#define __pte(x) ((pte_t) { (x) } )
89#else
90typedef struct { unsigned long long pte_low; } pte_t;
91typedef struct { unsigned long pgprot; } pgprot_t;
92typedef struct { unsigned long pgd; } pgd_t;
93#define pte_val(x) ((x).pte_low)
94#define __pte(x) ((pte_t) { (x) } )
102#endif 95#endif
103 96
104#define pgd_val(x) ((x).pgd) 97#define pgd_val(x) ((x).pgd)
@@ -109,29 +102,44 @@ typedef struct { unsigned long pgd; } pgd_t;
109 102
110#endif /* !__ASSEMBLY__ */ 103#endif /* !__ASSEMBLY__ */
111 104
112/* to align the pointer to the (next) page boundary */
113#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
114
115/* 105/*
116 * IF YOU CHANGE THIS, PLEASE ALSO CHANGE 106 * __MEMORY_START and SIZE are the physical addresses and size of RAM.
117 *
118 * arch/sh/kernel/vmlinux.lds.S
119 *
120 * which has the same constant encoded..
121 */ 107 */
122
123#define __MEMORY_START CONFIG_MEMORY_START 108#define __MEMORY_START CONFIG_MEMORY_START
124#define __MEMORY_SIZE CONFIG_MEMORY_SIZE 109#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
125 110
111/*
112 * PAGE_OFFSET is the virtual address of the start of kernel address
113 * space.
114 */
126#define PAGE_OFFSET CONFIG_PAGE_OFFSET 115#define PAGE_OFFSET CONFIG_PAGE_OFFSET
127#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
128#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
129#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
130 116
131#define phys_to_page(phys) (pfn_to_page(phys >> PAGE_SHIFT)) 117/*
118 * Virtual to physical RAM address translation.
119 *
120 * In 29 bit mode, the physical offset of RAM from address 0 is visible in
121 * the kernel virtual address space, and thus we don't have to take
122 * this into account when translating. However in 32 bit mode this offset
123 * is not visible (it is part of the PMB mapping) and so needs to be
124 * added or subtracted as required.
125 */
126#ifdef CONFIG_32BIT
127#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
128#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
129#else
130#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
131#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
132#endif
133
134#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
132#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 135#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
133 136
134/* PFN start number, because of __MEMORY_START */ 137/*
138 * PFN = physical frame number (ie PFN 0 == physical address 0)
139 * PFN_START is the PFN of the first page of RAM. By defining this we
140 * don't have struct page entries for the portion of address space
141 * between physical address 0 and the start of RAM.
142 */
135#define PFN_START (__MEMORY_START >> PAGE_SHIFT) 143#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
136#define ARCH_PFN_OFFSET (PFN_START) 144#define ARCH_PFN_OFFSET (PFN_START)
137#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 145#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
@@ -152,11 +160,20 @@ typedef struct { unsigned long pgd; } pgd_t;
152#endif 160#endif
153 161
154/* 162/*
155 * Slub defaults to 8-byte alignment, we're only interested in 4. 163 * Some drivers need to perform DMA into kmalloc'ed buffers
156 * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. 164 * and so we have to increase the kmalloc minalign for this.
157 */ 165 */
158#define ARCH_KMALLOC_MINALIGN 4 166#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
159#define ARCH_SLAB_MINALIGN 4 167
168#ifdef CONFIG_SUPERH64
169/*
170 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
171 * happily generate {ld/st}.q pairs, requiring us to have 8-byte
172 * alignment to avoid traps. The kmalloc alignment is gauranteed by
173 * virtue of L1_CACHE_BYTES, requiring this to only be special cased
174 * for slab caches.
175 */
176#define ARCH_SLAB_MINALIGN 8
177#endif
160 178
161#endif /* __KERNEL__ */
162#endif /* __ASM_SH_PAGE_H */ 179#endif /* __ASM_SH_PAGE_H */