aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/page.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/page.h')
-rw-r--r--include/asm-sh/page.h114
1 files changed, 67 insertions, 47 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index cb3d46c59eab..002e64a4f049 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -5,13 +5,7 @@
5 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
6 */ 6 */
7 7
8/* 8#include <linux/const.h>
9 [ P0/U0 (virtual) ] 0x00000000 <------ User space
10 [ P1 (fixed) cached ] 0x80000000 <------ Kernel space
11 [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
12 [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
13 [ P4 control ] 0xE0000000
14 */
15 9
16#ifdef __KERNEL__ 10#ifdef __KERNEL__
17 11
@@ -26,15 +20,13 @@
26# error "Bogus kernel page size?" 20# error "Bogus kernel page size?"
27#endif 21#endif
28 22
29#ifdef __ASSEMBLY__ 23#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
30#define PAGE_SIZE (1 << PAGE_SHIFT)
31#else
32#define PAGE_SIZE (1UL << PAGE_SHIFT)
33#endif
34
35#define PAGE_MASK (~(PAGE_SIZE-1)) 24#define PAGE_MASK (~(PAGE_SIZE-1))
36#define PTE_MASK PAGE_MASK 25#define PTE_MASK PAGE_MASK
37 26
27/* to align the pointer to the (next) page boundary */
28#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
29
38#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 30#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
39#define HPAGE_SHIFT 16 31#define HPAGE_SHIFT 16
40#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 32#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
@@ -45,6 +37,8 @@
45#define HPAGE_SHIFT 22 37#define HPAGE_SHIFT 22
46#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 38#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
47#define HPAGE_SHIFT 26 39#define HPAGE_SHIFT 26
40#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
41#define HPAGE_SHIFT 29
48#endif 42#endif
49 43
50#ifdef CONFIG_HUGETLB_PAGE 44#ifdef CONFIG_HUGETLB_PAGE
@@ -55,28 +49,23 @@
55 49
56#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
57 51
58extern void (*clear_page)(void *to);
59extern void (*copy_page)(void *to, void *from);
60
61extern unsigned long shm_align_mask; 52extern unsigned long shm_align_mask;
62extern unsigned long max_low_pfn, min_low_pfn; 53extern unsigned long max_low_pfn, min_low_pfn;
63extern unsigned long memory_start, memory_end; 54extern unsigned long memory_start, memory_end;
64 55
65#ifdef CONFIG_MMU 56extern void clear_page(void *to);
66extern void clear_page_slow(void *to); 57extern void copy_page(void *to, void *from);
67extern void copy_page_slow(void *to, void *from);
68#else
69extern void clear_page_nommu(void *to);
70extern void copy_page_nommu(void *to, void *from);
71#endif
72 58
73#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ 59#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
74 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) 60 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
75struct page; 61struct page;
76extern void clear_user_page(void *to, unsigned long address, struct page *pg); 62struct vm_area_struct;
77extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); 63extern void clear_user_page(void *to, unsigned long address, struct page *page);
78extern void __clear_user_page(void *to, void *orig_to); 64#ifdef CONFIG_CPU_SH4
79extern void __copy_user_page(void *to, void *from, void *orig_to); 65extern void copy_user_highpage(struct page *to, struct page *from,
66 unsigned long vaddr, struct vm_area_struct *vma);
67#define __HAVE_ARCH_COPY_USER_HIGHPAGE
68#endif
80#else 69#else
81#define clear_user_page(page, vaddr, pg) clear_page(page) 70#define clear_user_page(page, vaddr, pg) clear_page(page)
82#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 71#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
@@ -93,12 +82,18 @@ typedef struct { unsigned long long pgd; } pgd_t;
93 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 82 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
94#define __pte(x) \ 83#define __pte(x) \
95 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 84 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
96#else 85#elif defined(CONFIG_SUPERH32)
97typedef struct { unsigned long pte_low; } pte_t; 86typedef struct { unsigned long pte_low; } pte_t;
98typedef struct { unsigned long pgprot; } pgprot_t; 87typedef struct { unsigned long pgprot; } pgprot_t;
99typedef struct { unsigned long pgd; } pgd_t; 88typedef struct { unsigned long pgd; } pgd_t;
100#define pte_val(x) ((x).pte_low) 89#define pte_val(x) ((x).pte_low)
101#define __pte(x) ((pte_t) { (x) } ) 90#define __pte(x) ((pte_t) { (x) } )
91#else
92typedef struct { unsigned long long pte_low; } pte_t;
93typedef struct { unsigned long pgprot; } pgprot_t;
94typedef struct { unsigned long pgd; } pgd_t;
95#define pte_val(x) ((x).pte_low)
96#define __pte(x) ((pte_t) { (x) } )
102#endif 97#endif
103 98
104#define pgd_val(x) ((x).pgd) 99#define pgd_val(x) ((x).pgd)
@@ -109,29 +104,44 @@ typedef struct { unsigned long pgd; } pgd_t;
109 104
110#endif /* !__ASSEMBLY__ */ 105#endif /* !__ASSEMBLY__ */
111 106
112/* to align the pointer to the (next) page boundary */
113#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
114
115/* 107/*
116 * IF YOU CHANGE THIS, PLEASE ALSO CHANGE 108 * __MEMORY_START and SIZE are the physical addresses and size of RAM.
117 *
118 * arch/sh/kernel/vmlinux.lds.S
119 *
120 * which has the same constant encoded..
121 */ 109 */
122
123#define __MEMORY_START CONFIG_MEMORY_START 110#define __MEMORY_START CONFIG_MEMORY_START
124#define __MEMORY_SIZE CONFIG_MEMORY_SIZE 111#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
125 112
113/*
114 * PAGE_OFFSET is the virtual address of the start of kernel address
115 * space.
116 */
126#define PAGE_OFFSET CONFIG_PAGE_OFFSET 117#define PAGE_OFFSET CONFIG_PAGE_OFFSET
127#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
128#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
129#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
130 118
131#define phys_to_page(phys) (pfn_to_page(phys >> PAGE_SHIFT)) 119/*
120 * Virtual to physical RAM address translation.
121 *
122 * In 29 bit mode, the physical offset of RAM from address 0 is visible in
123 * the kernel virtual address space, and thus we don't have to take
124 * this into account when translating. However in 32 bit mode this offset
125 * is not visible (it is part of the PMB mapping) and so needs to be
126 * added or subtracted as required.
127 */
128#ifdef CONFIG_32BIT
129#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
130#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
131#else
132#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
133#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
134#endif
135
136#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
132#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 137#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
133 138
134/* PFN start number, because of __MEMORY_START */ 139/*
140 * PFN = physical frame number (ie PFN 0 == physical address 0)
141 * PFN_START is the PFN of the first page of RAM. By defining this we
142 * don't have struct page entries for the portion of address space
143 * between physical address 0 and the start of RAM.
144 */
135#define PFN_START (__MEMORY_START >> PAGE_SHIFT) 145#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
136#define ARCH_PFN_OFFSET (PFN_START) 146#define ARCH_PFN_OFFSET (PFN_START)
137#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 147#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
@@ -152,11 +162,21 @@ typedef struct { unsigned long pgd; } pgd_t;
152#endif 162#endif
153 163
154/* 164/*
155 * Slub defaults to 8-byte alignment, we're only interested in 4. 165 * Some drivers need to perform DMA into kmalloc'ed buffers
156 * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. 166 * and so we have to increase the kmalloc minalign for this.
157 */ 167 */
158#define ARCH_KMALLOC_MINALIGN 4 168#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
159#define ARCH_SLAB_MINALIGN 4 169
170#ifdef CONFIG_SUPERH64
171/*
172 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
173 * happily generate {ld/st}.q pairs, requiring us to have 8-byte
174 * alignment to avoid traps. The kmalloc alignment is gauranteed by
175 * virtue of L1_CACHE_BYTES, requiring this to only be special cased
176 * for slab caches.
177 */
178#define ARCH_SLAB_MINALIGN 8
179#endif
160 180
161#endif /* __KERNEL__ */ 181#endif /* __KERNEL__ */
162#endif /* __ASM_SH_PAGE_H */ 182#endif /* __ASM_SH_PAGE_H */