aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h62
-rw-r--r--include/linux/mm_types.h67
-rw-r--r--include/linux/mmzone.h5
3 files changed, 73 insertions, 61 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9d046db31e76..7477fb59c4f2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -16,6 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/debug_locks.h> 17#include <linux/debug_locks.h>
18#include <linux/backing-dev.h> 18#include <linux/backing-dev.h>
19#include <linux/mm_types.h>
19 20
20struct mempolicy; 21struct mempolicy;
21struct anon_vma; 22struct anon_vma;
@@ -215,62 +216,6 @@ struct vm_operations_struct {
215struct mmu_gather; 216struct mmu_gather;
216struct inode; 217struct inode;
217 218
218/*
219 * Each physical page in the system has a struct page associated with
220 * it to keep track of whatever it is we are using the page for at the
221 * moment. Note that we have no way to track which tasks are using
222 * a page, though if it is a pagecache page, rmap structures can tell us
223 * who is mapping it.
224 */
225struct page {
226 unsigned long flags; /* Atomic flags, some possibly
227 * updated asynchronously */
228 atomic_t _count; /* Usage count, see below. */
229 atomic_t _mapcount; /* Count of ptes mapped in mms,
230 * to show when page is mapped
231 * & limit reverse map searches.
232 */
233 union {
234 struct {
235 unsigned long private; /* Mapping-private opaque data:
236 * usually used for buffer_heads
237 * if PagePrivate set; used for
238 * swp_entry_t if PageSwapCache;
239 * indicates order in the buddy
240 * system if PG_buddy is set.
241 */
242 struct address_space *mapping; /* If low bit clear, points to
243 * inode address_space, or NULL.
244 * If page mapped as anonymous
245 * memory, low bit is set, and
246 * it points to anon_vma object:
247 * see PAGE_MAPPING_ANON below.
248 */
249 };
250#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
251 spinlock_t ptl;
252#endif
253 };
254 pgoff_t index; /* Our offset within mapping. */
255 struct list_head lru; /* Pageout list, eg. active_list
256 * protected by zone->lru_lock !
257 */
258 /*
259 * On machines where all RAM is mapped into kernel address space,
260 * we can simply calculate the virtual address. On machines with
261 * highmem some memory is mapped into kernel virtual memory
262 * dynamically, so we need a place to store that address.
263 * Note that this field could be 16 bits on x86 ... ;)
264 *
265 * Architectures with slow multiplication can define
266 * WANT_PAGE_VIRTUAL in asm/page.h
267 */
268#if defined(WANT_PAGE_VIRTUAL)
269 void *virtual; /* Kernel virtual address (NULL if
270 not kmapped, ie. highmem) */
271#endif /* WANT_PAGE_VIRTUAL */
272};
273
274#define page_private(page) ((page)->private) 219#define page_private(page) ((page)->private)
275#define set_page_private(page, v) ((page)->private = (v)) 220#define set_page_private(page, v) ((page)->private = (v))
276 221
@@ -546,11 +491,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
546 */ 491 */
547#include <linux/vmstat.h> 492#include <linux/vmstat.h>
548 493
549#ifndef CONFIG_DISCONTIGMEM
550/* The array of struct pages - for discontigmem use pgdat->lmem_map */
551extern struct page *mem_map;
552#endif
553
554static __always_inline void *lowmem_page_address(struct page *page) 494static __always_inline void *lowmem_page_address(struct page *page)
555{ 495{
556 return __va(page_to_pfn(page) << PAGE_SHIFT); 496 return __va(page_to_pfn(page) << PAGE_SHIFT);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
new file mode 100644
index 000000000000..c3852fd4a1cc
--- /dev/null
+++ b/include/linux/mm_types.h
@@ -0,0 +1,67 @@
1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4#include <linux/types.h>
5#include <linux/threads.h>
6#include <linux/list.h>
7#include <linux/spinlock.h>
8
9struct address_space;
10
11/*
12 * Each physical page in the system has a struct page associated with
13 * it to keep track of whatever it is we are using the page for at the
14 * moment. Note that we have no way to track which tasks are using
15 * a page, though if it is a pagecache page, rmap structures can tell us
16 * who is mapping it.
17 */
18struct page {
19 unsigned long flags; /* Atomic flags, some possibly
20 * updated asynchronously */
21 atomic_t _count; /* Usage count, see below. */
22 atomic_t _mapcount; /* Count of ptes mapped in mms,
23 * to show when page is mapped
24 * & limit reverse map searches.
25 */
26 union {
27 struct {
28 unsigned long private; /* Mapping-private opaque data:
29 * usually used for buffer_heads
30 * if PagePrivate set; used for
31 * swp_entry_t if PageSwapCache;
32 * indicates order in the buddy
33 * system if PG_buddy is set.
34 */
35 struct address_space *mapping; /* If low bit clear, points to
36 * inode address_space, or NULL.
37 * If page mapped as anonymous
38 * memory, low bit is set, and
39 * it points to anon_vma object:
40 * see PAGE_MAPPING_ANON below.
41 */
42 };
43#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
44 spinlock_t ptl;
45#endif
46 };
47 pgoff_t index; /* Our offset within mapping. */
48 struct list_head lru; /* Pageout list, eg. active_list
49 * protected by zone->lru_lock !
50 */
51 /*
52 * On machines where all RAM is mapped into kernel address space,
53 * we can simply calculate the virtual address. On machines with
54 * highmem some memory is mapped into kernel virtual memory
55 * dynamically, so we need a place to store that address.
56 * Note that this field could be 16 bits on x86 ... ;)
57 *
58 * Architectures with slow multiplication can define
59 * WANT_PAGE_VIRTUAL in asm/page.h
60 */
61#if defined(WANT_PAGE_VIRTUAL)
62 void *virtual; /* Kernel virtual address (NULL if
63 not kmapped, ie. highmem) */
64#endif /* WANT_PAGE_VIRTUAL */
65};
66
67#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1b0680cd84d2..562cf7a8f3ee 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -314,6 +314,11 @@ struct node_active_region {
314}; 314};
315#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 315#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
316 316
317#ifndef CONFIG_DISCONTIGMEM
318/* The array of struct pages - for discontigmem use pgdat->lmem_map */
319extern struct page *mem_map;
320#endif
321
317/* 322/*
318 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM 323 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
319 * (mostly NUMA machines?) to denote a higher-level memory zone than the 324 * (mostly NUMA machines?) to denote a higher-level memory zone than the