aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2007-01-28 08:52:57 -0500
committerKyle McMartin <kyle@athena.road.mcmartin.ca>2007-02-17 01:16:26 -0500
commit0b3d643f9ead9b5141dedbb2d1b06ce15469fc4a (patch)
tree3be51559fb366dea87dc0eacfea2f94c15190875 /include
parent8e9e9844b44dd9f855d824d035b3097b199e44ed (diff)
[PARISC] add ASM_EXCEPTIONTABLE_ENTRY() macro
- this macro unifies the code to add exception table entries - additionally use ENTRY()/ENDPROC() at more places Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-parisc/assembly.h8
-rw-r--r--include/asm-parisc/bug.h12
-rw-r--r--include/asm-parisc/uaccess.h53
3 files changed, 35 insertions, 38 deletions
diff --git a/include/asm-parisc/assembly.h b/include/asm-parisc/assembly.h
index 5a1e0e8b1c32..7e26fcbe56d6 100644
--- a/include/asm-parisc/assembly.h
+++ b/include/asm-parisc/assembly.h
@@ -31,9 +31,13 @@
31#define STREGM std,ma 31#define STREGM std,ma
32#define SHRREG shrd 32#define SHRREG shrd
33#define SHLREG shld 33#define SHLREG shld
34#define ADDIB addib,*
35#define CMPB cmpb,*
36#define ANDCM andcm,*
34#define RP_OFFSET 16 37#define RP_OFFSET 16
35#define FRAME_SIZE 128 38#define FRAME_SIZE 128
36#define CALLEE_REG_FRAME_SIZE 144 39#define CALLEE_REG_FRAME_SIZE 144
40#define ASM_ULONG_INSN .dword
37#else /* CONFIG_64BIT */ 41#else /* CONFIG_64BIT */
38#define LDREG ldw 42#define LDREG ldw
39#define STREG stw 43#define STREG stw
@@ -42,9 +46,13 @@
42#define STREGM stwm 46#define STREGM stwm
43#define SHRREG shr 47#define SHRREG shr
44#define SHLREG shlw 48#define SHLREG shlw
49#define ADDIB addib,
50#define CMPB cmpb,
51#define ANDCM andcm
45#define RP_OFFSET 20 52#define RP_OFFSET 20
46#define FRAME_SIZE 64 53#define FRAME_SIZE 64
47#define CALLEE_REG_FRAME_SIZE 128 54#define CALLEE_REG_FRAME_SIZE 128
55#define ASM_ULONG_INSN .word
48#endif 56#endif
49 57
50#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE) 58#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
diff --git a/include/asm-parisc/bug.h b/include/asm-parisc/bug.h
index cfe39a2547aa..83ba510ed5d8 100644
--- a/include/asm-parisc/bug.h
+++ b/include/asm-parisc/bug.h
@@ -14,10 +14,10 @@
14#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff" 14#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
15#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */ 15#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
16 16
17#ifdef CONFIG_64BIT 17#if defined(CONFIG_64BIT)
18#define ASM_ULONG_INSN ".dword" 18#define ASM_WORD_INSN ".dword\t"
19#else 19#else
20#define ASM_ULONG_INSN ".word" 20#define ASM_WORD_INSN ".word\t"
21#endif 21#endif
22 22
23#ifdef CONFIG_DEBUG_BUGVERBOSE 23#ifdef CONFIG_DEBUG_BUGVERBOSE
@@ -26,7 +26,7 @@
26 asm volatile("\n" \ 26 asm volatile("\n" \
27 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 27 "1:\t" PARISC_BUG_BREAK_ASM "\n" \
28 "\t.pushsection __bug_table,\"a\"\n" \ 28 "\t.pushsection __bug_table,\"a\"\n" \
29 "2:\t" ASM_ULONG_INSN " 1b, %c0\n" \ 29 "2:\t" ASM_WORD_INSN "1b, %c0\n" \
30 "\t.short %c1, %c2\n" \ 30 "\t.short %c1, %c2\n" \
31 "\t.org 2b+%c3\n" \ 31 "\t.org 2b+%c3\n" \
32 "\t.popsection" \ 32 "\t.popsection" \
@@ -49,7 +49,7 @@
49 asm volatile("\n" \ 49 asm volatile("\n" \
50 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 50 "1:\t" PARISC_BUG_BREAK_ASM "\n" \
51 "\t.pushsection __bug_table,\"a\"\n" \ 51 "\t.pushsection __bug_table,\"a\"\n" \
52 "2:\t" ASM_ULONG_INSN " 1b, %c0\n" \ 52 "2:\t" ASM_WORD_INSN "1b, %c0\n" \
53 "\t.short %c1, %c2\n" \ 53 "\t.short %c1, %c2\n" \
54 "\t.org 2b+%c3\n" \ 54 "\t.org 2b+%c3\n" \
55 "\t.popsection" \ 55 "\t.popsection" \
@@ -63,7 +63,7 @@
63 asm volatile("\n" \ 63 asm volatile("\n" \
64 "1:\t" PARISC_BUG_BREAK_ASM "\n" \ 64 "1:\t" PARISC_BUG_BREAK_ASM "\n" \
65 "\t.pushsection __bug_table,\"a\"\n" \ 65 "\t.pushsection __bug_table,\"a\"\n" \
66 "2:\t" ASM_ULONG_INSN " 1b\n" \ 66 "2:\t" ASM_WORD_INSN "1b\n" \
67 "\t.short %c0\n" \ 67 "\t.short %c0\n" \
68 "\t.org 2b+%c1\n" \ 68 "\t.org 2b+%c1\n" \
69 "\t.popsection" \ 69 "\t.popsection" \
diff --git a/include/asm-parisc/uaccess.h b/include/asm-parisc/uaccess.h
index d5d831ea7bc6..4878b9501f24 100644
--- a/include/asm-parisc/uaccess.h
+++ b/include/asm-parisc/uaccess.h
@@ -67,6 +67,11 @@ struct exception_table_entry {
67 long fixup; /* fixup routine */ 67 long fixup; /* fixup routine */
68}; 68};
69 69
70#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
71 ".section __ex_table,\"aw\"\n" \
72 ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
73 ".previous\n"
74
70/* 75/*
71 * The page fault handler stores, in a per-cpu area, the following information 76 * The page fault handler stores, in a per-cpu area, the following information
72 * if a fixup routine is available. 77 * if a fixup routine is available.
@@ -106,21 +111,15 @@ struct exception_data {
106}) 111})
107 112
108#define __get_kernel_asm(ldx,ptr) \ 113#define __get_kernel_asm(ldx,ptr) \
109 __asm__("\n1:\t" ldx "\t0(%2),%0\n" \ 114 __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
110 "\t.section __ex_table,\"aw\"\n" \ 115 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
111 "\t" ASM_WORD_INSN \
112 "1b,fixup_get_user_skip_1\n" \
113 "\t.previous" \
114 : "=r"(__gu_val), "=r"(__gu_err) \ 116 : "=r"(__gu_val), "=r"(__gu_err) \
115 : "r"(ptr), "1"(__gu_err) \ 117 : "r"(ptr), "1"(__gu_err) \
116 : "r1"); 118 : "r1");
117 119
118#define __get_user_asm(ldx,ptr) \ 120#define __get_user_asm(ldx,ptr) \
119 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \ 121 __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
120 "\t.section __ex_table,\"aw\"\n" \ 122 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
121 "\t" ASM_WORD_INSN \
122 "1b,fixup_get_user_skip_1\n" \
123 "\t.previous" \
124 : "=r"(__gu_val), "=r"(__gu_err) \ 123 : "=r"(__gu_val), "=r"(__gu_err) \
125 : "r"(ptr), "1"(__gu_err) \ 124 : "r"(ptr), "1"(__gu_err) \
126 : "r1"); 125 : "r1");
@@ -164,22 +163,16 @@ struct exception_data {
164 163
165#define __put_kernel_asm(stx,x,ptr) \ 164#define __put_kernel_asm(stx,x,ptr) \
166 __asm__ __volatile__ ( \ 165 __asm__ __volatile__ ( \
167 "\n1:\t" stx "\t%2,0(%1)\n" \ 166 "\n1:\t" stx "\t%2,0(%1)\n\t" \
168 "\t.section __ex_table,\"aw\"\n" \ 167 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
169 "\t" ASM_WORD_INSN \
170 "1b,fixup_put_user_skip_1\n" \
171 "\t.previous" \
172 : "=r"(__pu_err) \ 168 : "=r"(__pu_err) \
173 : "r"(ptr), "r"(x), "0"(__pu_err) \ 169 : "r"(ptr), "r"(x), "0"(__pu_err) \
174 : "r1") 170 : "r1")
175 171
176#define __put_user_asm(stx,x,ptr) \ 172#define __put_user_asm(stx,x,ptr) \
177 __asm__ __volatile__ ( \ 173 __asm__ __volatile__ ( \
178 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \ 174 "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
179 "\t.section __ex_table,\"aw\"\n" \ 175 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
180 "\t" ASM_WORD_INSN \
181 "1b,fixup_put_user_skip_1\n" \
182 "\t.previous" \
183 : "=r"(__pu_err) \ 176 : "=r"(__pu_err) \
184 : "r"(ptr), "r"(x), "0"(__pu_err) \ 177 : "r"(ptr), "r"(x), "0"(__pu_err) \
185 : "r1") 178 : "r1")
@@ -192,12 +185,10 @@ struct exception_data {
192 u32 hi = (__val64) >> 32; \ 185 u32 hi = (__val64) >> 32; \
193 u32 lo = (__val64) & 0xffffffff; \ 186 u32 lo = (__val64) & 0xffffffff; \
194 __asm__ __volatile__ ( \ 187 __asm__ __volatile__ ( \
195 "\n1:\tstw %2,0(%1)\n" \ 188 "\n1:\tstw %2,0(%1)" \
196 "\n2:\tstw %3,4(%1)\n" \ 189 "\n2:\tstw %3,4(%1)\n\t" \
197 "\t.section __ex_table,\"aw\"\n" \ 190 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
198 "\t.word\t1b,fixup_put_user_skip_2\n" \ 191 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
199 "\t.word\t2b,fixup_put_user_skip_1\n" \
200 "\t.previous" \
201 : "=r"(__pu_err) \ 192 : "=r"(__pu_err) \
202 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 193 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
203 : "r1"); \ 194 : "r1"); \
@@ -208,12 +199,10 @@ struct exception_data {
208 u32 hi = (__val64) >> 32; \ 199 u32 hi = (__val64) >> 32; \
209 u32 lo = (__val64) & 0xffffffff; \ 200 u32 lo = (__val64) & 0xffffffff; \
210 __asm__ __volatile__ ( \ 201 __asm__ __volatile__ ( \
211 "\n1:\tstw %2,0(%%sr3,%1)\n" \ 202 "\n1:\tstw %2,0(%%sr3,%1)" \
212 "\n2:\tstw %3,4(%%sr3,%1)\n" \ 203 "\n2:\tstw %3,4(%%sr3,%1)\n\t" \
213 "\t.section __ex_table,\"aw\"\n" \ 204 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
214 "\t.word\t1b,fixup_put_user_skip_2\n" \ 205 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
215 "\t.word\t2b,fixup_put_user_skip_1\n" \
216 "\t.previous" \
217 : "=r"(__pu_err) \ 206 : "=r"(__pu_err) \
218 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 207 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
219 : "r1"); \ 208 : "r1"); \
="hl kwc">inline int is_vmalloc_addr(const void *x) { #ifdef CONFIG_MMU unsigned long addr = (unsigned long)x; return addr >= VMALLOC_START && addr < VMALLOC_END; #else return 0; #endif } static inline struct page *compound_head(struct page *page) { if (unlikely(PageTail(page))) return page->first_page; return page; } static inline int page_count(struct page *page) { return atomic_read(&compound_head(page)->_count); } static inline void get_page(struct page *page) { page = compound_head(page); VM_BUG_ON(atomic_read(&page->_count) == 0); atomic_inc(&page->_count); } static inline struct page *virt_to_head_page(const void *x) { struct page *page = virt_to_page(x); return compound_head(page); } /* * Setup the page count before being freed into the page allocator for * the first time (boot or memory hotplug) */ static inline void init_page_count(struct page *page) { atomic_set(&page->_count, 1); } void put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); /* * Compound pages have a destructor function. Provide a * prototype for that function and accessor functions. * These are _only_ valid on the head of a PG_compound page. */ typedef void compound_page_dtor(struct page *); static inline void set_compound_page_dtor(struct page *page, compound_page_dtor *dtor) { page[1].lru.next = (void *)dtor; } static inline compound_page_dtor *get_compound_page_dtor(struct page *page) { return (compound_page_dtor *)page[1].lru.next; } static inline int compound_order(struct page *page) { if (!PageHead(page)) return 0; return (unsigned long)page[1].lru.prev; } static inline void set_compound_order(struct page *page, unsigned long order) { page[1].lru.prev = (void *)order; } /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of * zeroes, and text pages of executables and shared libraries have * only one copy in memory, at most, normally. * * For the non-reserved pages, page_count(page) denotes a reference count. * page_count() == 0 means the page is free. page->lru is then used for * freelist management in the buddy allocator. * page_count() > 0 means the page has been allocated. * * Pages are allocated by the slab allocator in order to provide memory * to kmalloc and kmem_cache_alloc. In this case, the management of the * page, and the fields in 'struct page' are the responsibility of mm/slab.c * unless a particular usage is carefully commented. (the responsibility of * freeing the kmalloc memory is the caller's, of course). * * A page may be used by anyone else who does a __get_free_page(). * In this case, page_count still tracks the references, and should only * be used through the normal accessor functions. The top bits of page->flags * and page->virtual store page management information, but all other fields * are unused and could be used privately, carefully. The management of this * page is the responsibility of the one who allocated it, and those who have * subsequently been given references to it. * * The other pages (we may call them "pagecache pages") are completely * managed by the Linux memory manager: I/O, buffers, swapping etc. * The following discussion applies only to them. * * A pagecache page contains an opaque `private' member, which belongs to the * page's address_space. Usually, this is the address of a circular list of * the page's disk buffers. PG_private must be set to tell the VM to call * into the filesystem to release these pages. * * A page may belong to an inode's memory mapping. In this case, page->mapping * is the pointer to the inode, and page->index is the file offset of the page, * in units of PAGE_CACHE_SIZE. * * If pagecache pages are not associated with an inode, they are said to be * anonymous pages. These may become associated with the swapcache, and in that * case PG_swapcache is set, and page->private is an offset into the swapcache. * * In either case (swapcache or inode backed), the pagecache itself holds one * reference to the page. Setting PG_private should also increment the * refcount. The each user mapping also has a reference to the page. * * The pagecache pages are stored in a per-mapping radix tree, which is * rooted at mapping->page_tree, and indexed by offset. * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space * lists, we instead now tag pages as dirty/writeback in the radix tree. * * All pagecache pages may be subject to I/O: * - inode pages may need to be read from disk, * - inode pages which have been modified and are MAP_SHARED may need * to be written back to the inode on disk, * - anonymous pages (including MAP_PRIVATE file mappings) which have been * modified may need to be swapped out to swap space and (later) to be read * back into memory. */ /* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. */ /* * page->flags layout: * * There are three possibilities for how page->flags get * laid out. The first is for the normal case, without * sparsemem. The second is for sparsemem when there is * plenty of space for node and section. The last is when * we have run out of space and have to fall back to an * alternate (slower) way of determining the node. * * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | */ #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTIONS_WIDTH SECTIONS_SHIFT #else #define SECTIONS_WIDTH 0 #endif #define ZONES_WIDTH ZONES_SHIFT #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define NODES_WIDTH NODES_SHIFT #else #ifdef CONFIG_SPARSEMEM_VMEMMAP #error "Vmemmap: No space for nodes field in page flags" #endif #define NODES_WIDTH 0 #endif /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) /* * We are going to use the flags for the page to node mapping if its in * there. This includes the case where there is no node, so it is implicit. */ #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) #define NODE_NOT_IN_PAGE_FLAGS #endif #ifndef PFN_SECTION_SHIFT #define PFN_SECTION_SHIFT 0 #endif /* * Define the bit shifts to access each section. For non-existant * sections we define the shift as 0; that plus a 0 mask ensures * the compiler will optimise away reference to them. */ #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ #ifdef NODE_NOT_IN_PAGEFLAGS #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ SECTIONS_PGOFF : ZONES_PGOFF) #else #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ NODES_PGOFF : ZONES_PGOFF) #endif #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS #endif #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) #define NODES_MASK ((1UL << NODES_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) static inline enum zone_type page_zonenum(struct page *page) { return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } /* * The identification function is only used by the buddy allocator for * determining if two pages could be buddies. We are not really * identifying a zone since we could be using a the section number * id if we have not node id available in page flags. * We guarantee only that it will return the same value for two * combinable pages in a zone. */ static inline int page_zone_id(struct page *page) { return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; } static inline int zone_to_nid(struct zone *zone) { #ifdef CONFIG_NUMA return zone->node; #else return 0; #endif } #ifdef NODE_NOT_IN_PAGE_FLAGS extern int page_to_nid(struct page *page); #else static inline int page_to_nid(struct page *page) { return (page->flags >> NODES_PGSHIFT) & NODES_MASK; } #endif static inline struct zone *page_zone(struct page *page) { return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; } #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) static inline unsigned long page_to_section(struct page *page) { return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; } #endif static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; } static inline void set_page_node(struct page *page, unsigned long node) { page->flags &= ~(NODES_MASK << NODES_PGSHIFT); page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; } static inline void set_page_section(struct page *page, unsigned long section) { page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; } static inline void set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn) { set_page_zone(page, zone); set_page_node(page, node); set_page_section(page, pfn_to_section_nr(pfn)); } /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { #ifdef CONFIG_SECURITY hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); #endif return hint; } /* * Some inline functions in vmstat.h depend on page_zone() */ #include <linux/vmstat.h> static __always_inline void *lowmem_page_address(struct page *page) { return __va(page_to_pfn(page) << PAGE_SHIFT); } #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) #define HASHED_PAGE_VIRTUAL #endif #if defined(WANT_PAGE_VIRTUAL) #define page_address(page) ((page)->virtual) #define set_page_address(page, address) \ do { \ (page)->virtual = (address); \ } while(0) #define page_address_init() do { } while(0) #endif #if defined(HASHED_PAGE_VIRTUAL) void *page_address(struct page *page); void set_page_address(struct page *page, void *virtual); void page_address_init(void); #endif #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) #define page_address(page) lowmem_page_address(page) #define set_page_address(page, address) do { } while(0) #define page_address_init() do { } while(0) #endif /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; * with the PAGE_MAPPING_ANON bit set to distinguish it. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ #define PAGE_MAPPING_ANON 1 extern struct address_space swapper_space; static inline struct address_space *page_mapping(struct page *page) { struct address_space *mapping = page->mapping; VM_BUG_ON(PageSlab(page)); #ifdef CONFIG_SWAP if (unlikely(PageSwapCache(page))) mapping = &swapper_space; else #endif if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; } static inline int PageAnon(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; } /* * Return the pagecache index of the passed page. Regular pagecache pages * use ->index whereas swapcache pages use ->private */ static inline pgoff_t page_index(struct page *page) { if (unlikely(PageSwapCache(page))) return page_private(page); return page->index; } /* * The atomic page->_mapcount, like _count, starts from -1: * so that transitions both from it and to it can be tracked, * using atomic_inc_and_test and atomic_add_negative(-1). */ static inline void reset_page_mapcount(struct page *page) { atomic_set(&(page)->_mapcount, -1); } static inline int page_mapcount(struct page *page) { return atomic_read(&(page)->_mapcount) + 1; } /* * Return true if this page is mapped into pagetables. */ static inline int page_mapped(struct page *page) { return atomic_read(&(page)->_mapcount) >= 0; } /* * Error return values for the *_nopfn functions */ #define NOPFN_SIGBUS ((unsigned long) -1) #define NOPFN_OOM ((unsigned long) -2) #define NOPFN_REFAULT ((unsigned long) -3) /* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. */ #define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ #define VM_FAULT_OOM 0x0001 #define VM_FAULT_SIGBUS 0x0002 #define VM_FAULT_MAJOR 0x0004 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) extern void show_free_areas(void); #ifdef CONFIG_SHMEM int shmem_lock(struct file *file, int lock, struct user_struct *user); #else static inline int shmem_lock(struct file *file, int lock, struct user_struct *user) { return 0; } #endif struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); int shmem_zero_setup(struct vm_area_struct *); #ifndef CONFIG_MMU extern unsigned long shmem_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #endif extern int can_do_mlock(void); extern int user_shm_lock(size_t, struct user_struct *); extern void user_shm_unlock(size_t, struct user_struct *); /* * Parameter block passed down to zap_pte_range in exceptional cases. */ struct zap_details { struct vm_area_struct *nonlinear_vma; /* Check page->index if set */ struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ unsigned long truncate_count; /* Compare vm_truncate_count */ }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, struct vm_area_struct *start_vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *); /** * mm_walk - callbacks for walk_page_range * @pgd_entry: if set, called for each non-empty PGD (top-level) entry * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry * @pte_entry: if set, called for each non-empty PTE (4th-level) entry * @pte_hole: if set, called for each hole at all levels * * (see walk_page_range for more details) */ struct mm_walk { int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); struct mm_struct *mm; void *private; }; int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk); void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); static inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) { unmap_mapping_range(mapping, holebegin, holelen, 0); } extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access); #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) { /* should never happen if there's no MMU */ BUG(); return VM_FAULT_SIGBUS; } #endif extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr); extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); /* * A callback you can register to apply pressure to ageable caches. * * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should * look through the least-recently-used 'nr_to_scan' entries and * attempt to free them up. It should return the number of objects * which remain in the cache. If it returns -1, it means it cannot do * any scanning at this time (eg. there is a risk of deadlock). * * The 'gfpmask' refers to the allocation we are currently trying to * fulfil. * * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is * querying the cache size, so a fastpath for that case is appropriate. */ struct shrinker { int (*shrink)(int nr_to_scan, gfp_t gfp_mask); int seeks; /* seeks to recreate an obj */ /* These are for internal use */ struct list_head list; long nr; /* objs pending delete */ }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ extern void register_shrinker(struct shrinker *); extern void unregister_shrinker(struct shrinker *); int vma_wants_writenotify(struct vm_area_struct *vma); extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); #ifdef __PAGETABLE_PUD_FOLDED static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { return 0; } #else int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); #endif #ifdef __PAGETABLE_PMD_FOLDED static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { return 0; } #else int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); #endif int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); /* * The following ifdef needed to get the 4level-fixup.h header to work. * Remove it when 4level-fixup.h has been removed. */ #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? NULL: pud_offset(pgd, address); } static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address); } #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS /* * We tuck a spinlock to guard each pagetable page into its struct page, * at page->private, with BUILD_BUG_ON to make sure that this will not * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */ #define __pte_lockptr(page) &((page)->ptl) #define pte_lock_init(_page) do { \ spin_lock_init(__pte_lockptr(_page)); \ } while (0) #define pte_lock_deinit(page) ((page)->mapping = NULL) #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) #else /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ #define pte_lock_init(page) do {} while (0) #define pte_lock_deinit(page) do {} while (0) #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ static inline void pgtable_page_ctor(struct page *page) { pte_lock_init(page); inc_zone_page_state(page, NR_PAGETABLE); } static inline void pgtable_page_dtor(struct page *page) { pte_lock_deinit(page); dec_zone_page_state(page, NR_PAGETABLE); } #define pte_offset_map_lock(mm, pmd, address, ptlp) \ ({ \ spinlock_t *__ptl = pte_lockptr(mm, pmd); \ pte_t *__pte = pte_offset_map(pmd, address); \ *(ptlp) = __ptl; \ spin_lock(__ptl); \ __pte; \ }) #define pte_unmap_unlock(pte, ptl) do { \ spin_unlock(ptl); \ pte_unmap(pte); \ } while (0) #define pte_alloc_map(mm, pmd, address) \ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ NULL: pte_offset_map(pmd, address)) #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) #define pte_alloc_kernel(pmd, address) \ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ NULL: pte_offset_kernel(pmd, address)) extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, pg_data_t *pgdat, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its * zones, allocate the backing mem_map and account for memory holes in a more * architecture independent manner. This is a substitute for creating the * zone_sizes[] and zholes_size[] arrays and passing them to * free_area_init_node() * * An architecture is expected to register range of page frames backed by * physical memory with add_active_range() before calling * free_area_init_nodes() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() * add_active_range(node_id, start_pfn, end_pfn) * free_area_init_nodes(max_zone_pfns); * * If the architecture guarantees that there are no holes in the ranges * registered with add_active_range(), free_bootmem_active_regions() * will call free_bootmem_node() for each registered physical page range. * Similarly sparse_memory_present_with_active_regions() calls * memory_present() for each range when SPARSEMEM is enabled. * * See mm/page_alloc.c for more information on each function exposed by * CONFIG_ARCH_POPULATES_NODE_MAP */ extern void free_area_init_nodes(unsigned long *max_zone_pfn); extern void add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, unsigned long new_end_pfn); extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); extern void remove_all_active_ranges(void); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); extern unsigned long find_max_pfn_with_active_regions(void); extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); extern void sparse_memory_present_with_active_regions(int nid); #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID extern int early_pfn_to_nid(unsigned long pfn); #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, enum memmap_context); extern void setup_per_zone_pages_min(void); extern void mem_init(void); extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); #ifdef CONFIG_NUMA extern void setup_per_cpu_pageset(void); #else static inline void setup_per_cpu_pageset(void) {} #endif /* prio_tree.c */ void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, struct prio_tree_iter *iter); #define vma_prio_tree_foreach(vma, iter, root, begin, end) \ for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \ (vma = vma_prio_tree_next(vma, iter)); ) static inline void vma_nonlinear_insert(struct vm_area_struct *vma, struct list_head *list) { vma->shared.vm_set.parent = NULL; list_add_tail(&vma->shared.vm_set.list, list); } /* mmap.c */ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, struct mempolicy *); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); #ifdef CONFIG_PROC_FS /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ extern void added_exe_file_vma(struct mm_struct *mm); extern void removed_exe_file_vma(struct mm_struct *mm); #else static inline void added_exe_file_vma(struct mm_struct *mm) {} static inline void removed_exe_file_vma(struct mm_struct *mm) {} #endif /* CONFIG_PROC_FS */ extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff); extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, unsigned int vm_flags, unsigned long pgoff, int accountable); static inline unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { unsigned long ret = -EINVAL; if ((offset + PAGE_ALIGN(len)) < offset) goto out; if (!(offset & ~PAGE_MASK)) ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); out: return ret; } extern int do_munmap(struct mm_struct *, unsigned long, size_t); extern unsigned long do_brk(unsigned long, unsigned long); /* filemap.c */ extern unsigned long page_unuse(struct page *); extern void truncate_inode_pages(struct address_space *, loff_t); extern void truncate_inode_pages_range(struct address_space *, loff_t lstart, loff_t lend); /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); /* readahead.c */ #define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,