aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/page.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/page.h')
-rw-r--r--include/asm-sh/page.h102
1 files changed, 60 insertions, 42 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index d00a8fde7c7f..002e64a4f049 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -5,13 +5,7 @@
5 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
6 */ 6 */
7 7
8/* 8#include <linux/const.h>
9 [ P0/U0 (virtual) ] 0x00000000 <------ User space
10 [ P1 (fixed) cached ] 0x80000000 <------ Kernel space
11 [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
12 [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
13 [ P4 control ] 0xE0000000
14 */
15 9
16#ifdef __KERNEL__ 10#ifdef __KERNEL__
17 11
@@ -26,15 +20,13 @@
26# error "Bogus kernel page size?" 20# error "Bogus kernel page size?"
27#endif 21#endif
28 22
29#ifdef __ASSEMBLY__ 23#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
30#define PAGE_SIZE (1 << PAGE_SHIFT)
31#else
32#define PAGE_SIZE (1UL << PAGE_SHIFT)
33#endif
34
35#define PAGE_MASK (~(PAGE_SIZE-1)) 24#define PAGE_MASK (~(PAGE_SIZE-1))
36#define PTE_MASK PAGE_MASK 25#define PTE_MASK PAGE_MASK
37 26
27/* to align the pointer to the (next) page boundary */
28#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
29
38#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 30#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
39#define HPAGE_SHIFT 16 31#define HPAGE_SHIFT 16
40#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 32#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
@@ -45,6 +37,8 @@
45#define HPAGE_SHIFT 22 37#define HPAGE_SHIFT 22
46#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 38#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
47#define HPAGE_SHIFT 26 39#define HPAGE_SHIFT 26
40#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
41#define HPAGE_SHIFT 29
48#endif 42#endif
49 43
50#ifdef CONFIG_HUGETLB_PAGE 44#ifdef CONFIG_HUGETLB_PAGE
@@ -55,20 +49,12 @@
55 49
56#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
57 51
58extern void (*clear_page)(void *to);
59extern void (*copy_page)(void *to, void *from);
60
61extern unsigned long shm_align_mask; 52extern unsigned long shm_align_mask;
62extern unsigned long max_low_pfn, min_low_pfn; 53extern unsigned long max_low_pfn, min_low_pfn;
63extern unsigned long memory_start, memory_end; 54extern unsigned long memory_start, memory_end;
64 55
65#ifdef CONFIG_MMU 56extern void clear_page(void *to);
66extern void clear_page_slow(void *to); 57extern void copy_page(void *to, void *from);
67extern void copy_page_slow(void *to, void *from);
68#else
69extern void clear_page_nommu(void *to);
70extern void copy_page_nommu(void *to, void *from);
71#endif
72 58
73#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ 59#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
74 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) 60 (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
@@ -96,12 +82,18 @@ typedef struct { unsigned long long pgd; } pgd_t;
96 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 82 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
97#define __pte(x) \ 83#define __pte(x) \
98 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 84 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
99#else 85#elif defined(CONFIG_SUPERH32)
100typedef struct { unsigned long pte_low; } pte_t; 86typedef struct { unsigned long pte_low; } pte_t;
101typedef struct { unsigned long pgprot; } pgprot_t; 87typedef struct { unsigned long pgprot; } pgprot_t;
102typedef struct { unsigned long pgd; } pgd_t; 88typedef struct { unsigned long pgd; } pgd_t;
103#define pte_val(x) ((x).pte_low) 89#define pte_val(x) ((x).pte_low)
104#define __pte(x) ((pte_t) { (x) } ) 90#define __pte(x) ((pte_t) { (x) } )
91#else
92typedef struct { unsigned long long pte_low; } pte_t;
93typedef struct { unsigned long pgprot; } pgprot_t;
94typedef struct { unsigned long pgd; } pgd_t;
95#define pte_val(x) ((x).pte_low)
96#define __pte(x) ((pte_t) { (x) } )
105#endif 97#endif
106 98
107#define pgd_val(x) ((x).pgd) 99#define pgd_val(x) ((x).pgd)
@@ -112,28 +104,44 @@ typedef struct { unsigned long pgd; } pgd_t;
112 104
113#endif /* !__ASSEMBLY__ */ 105#endif /* !__ASSEMBLY__ */
114 106
115/* to align the pointer to the (next) page boundary */
116#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
117
118/* 107/*
119 * IF YOU CHANGE THIS, PLEASE ALSO CHANGE 108 * __MEMORY_START and SIZE are the physical addresses and size of RAM.
120 *
121 * arch/sh/kernel/vmlinux.lds.S
122 *
123 * which has the same constant encoded..
124 */ 109 */
125
126#define __MEMORY_START CONFIG_MEMORY_START 110#define __MEMORY_START CONFIG_MEMORY_START
127#define __MEMORY_SIZE CONFIG_MEMORY_SIZE 111#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
128 112
113/*
114 * PAGE_OFFSET is the virtual address of the start of kernel address
115 * space.
116 */
129#define PAGE_OFFSET CONFIG_PAGE_OFFSET 117#define PAGE_OFFSET CONFIG_PAGE_OFFSET
130#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
131#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
132#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
133 118
119/*
120 * Virtual to physical RAM address translation.
121 *
122 * In 29 bit mode, the physical offset of RAM from address 0 is visible in
123 * the kernel virtual address space, and thus we don't have to take
124 * this into account when translating. However in 32 bit mode this offset
125 * is not visible (it is part of the PMB mapping) and so needs to be
126 * added or subtracted as required.
127 */
128#ifdef CONFIG_32BIT
129#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
130#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
131#else
132#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
133#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
134#endif
135
136#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
134#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 137#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
135 138
136/* PFN start number, because of __MEMORY_START */ 139/*
140 * PFN = physical frame number (ie PFN 0 == physical address 0)
141 * PFN_START is the PFN of the first page of RAM. By defining this we
142 * don't have struct page entries for the portion of address space
143 * between physical address 0 and the start of RAM.
144 */
137#define PFN_START (__MEMORY_START >> PAGE_SHIFT) 145#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
138#define ARCH_PFN_OFFSET (PFN_START) 146#define ARCH_PFN_OFFSET (PFN_START)
139#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 147#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
@@ -154,11 +162,21 @@ typedef struct { unsigned long pgd; } pgd_t;
154#endif 162#endif
155 163
156/* 164/*
157 * Slub defaults to 8-byte alignment, we're only interested in 4. 165 * Some drivers need to perform DMA into kmalloc'ed buffers
158 * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. 166 * and so we have to increase the kmalloc minalign for this.
159 */ 167 */
160#define ARCH_KMALLOC_MINALIGN 4 168#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
161#define ARCH_SLAB_MINALIGN 4 169
170#ifdef CONFIG_SUPERH64
171/*
172 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
173 * happily generate {ld/st}.q pairs, requiring us to have 8-byte
174 * alignment to avoid traps. The kmalloc alignment is gauranteed by
175 * virtue of L1_CACHE_BYTES, requiring this to only be special cased
176 * for slab caches.
177 */
178#define ARCH_SLAB_MINALIGN 8
179#endif
162 180
163#endif /* __KERNEL__ */ 181#endif /* __KERNEL__ */
164#endif /* __ASM_SH_PAGE_H */ 182#endif /* __ASM_SH_PAGE_H */