diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-09-26 13:13:19 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-26 13:13:19 -0400 |
commit | c226951b93f7cd7c3a10b17384535b617bd43fd0 (patch) | |
tree | 07b8796a5c99fbbf587b8d0dbcbc173cfe5e381e /include/linux | |
parent | b0df3bd1e553e901ec7297267611a5db88240b38 (diff) | |
parent | e8216dee838c09776680a6f1a2e54d81f3cdfa14 (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/bootmem.h | 100 | ||||
-rw-r--r-- | include/linux/console.h | 5 | ||||
-rw-r--r-- | include/linux/cpu.h | 8 | ||||
-rw-r--r-- | include/linux/dccp.h | 14 | ||||
-rw-r--r-- | include/linux/elf-em.h | 1 | ||||
-rw-r--r-- | include/linux/elfnote.h | 90 | ||||
-rw-r--r-- | include/linux/gfp.h | 36 | ||||
-rw-r--r-- | include/linux/highmem.h | 5 | ||||
-rw-r--r-- | include/linux/irq.h | 6 | ||||
-rw-r--r-- | include/linux/kernel.h | 1 | ||||
-rw-r--r-- | include/linux/mempolicy.h | 4 | ||||
-rw-r--r-- | include/linux/mm.h | 128 | ||||
-rw-r--r-- | include/linux/mmzone.h | 120 | ||||
-rw-r--r-- | include/linux/netfilter/Kbuild | 2 | ||||
-rw-r--r-- | include/linux/page-flags.h | 35 | ||||
-rw-r--r-- | include/linux/pagemap.h | 15 | ||||
-rw-r--r-- | include/linux/percpu.h | 89 | ||||
-rw-r--r-- | include/linux/resume-trace.h | 24 | ||||
-rw-r--r-- | include/linux/rmap.h | 14 | ||||
-rw-r--r-- | include/linux/selinux.h | 29 | ||||
-rw-r--r-- | include/linux/slab.h | 29 | ||||
-rw-r--r-- | include/linux/smp.h | 3 | ||||
-rw-r--r-- | include/linux/suspend.h | 32 | ||||
-rw-r--r-- | include/linux/swap.h | 12 | ||||
-rw-r--r-- | include/linux/sysctl.h | 1 | ||||
-rw-r--r-- | include/linux/vmalloc.h | 2 | ||||
-rw-r--r-- | include/linux/vmstat.h | 18 | ||||
-rw-r--r-- | include/linux/writeback.h | 1 |
28 files changed, 571 insertions, 253 deletions
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index e319c649e4fd..31e9abb6d977 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -4,11 +4,8 @@ | |||
4 | #ifndef _LINUX_BOOTMEM_H | 4 | #ifndef _LINUX_BOOTMEM_H |
5 | #define _LINUX_BOOTMEM_H | 5 | #define _LINUX_BOOTMEM_H |
6 | 6 | ||
7 | #include <asm/pgtable.h> | ||
8 | #include <asm/dma.h> | ||
9 | #include <linux/cache.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mmzone.h> | 7 | #include <linux/mmzone.h> |
8 | #include <asm/dma.h> | ||
12 | 9 | ||
13 | /* | 10 | /* |
14 | * simple boot-time physical memory area allocator. | 11 | * simple boot-time physical memory area allocator. |
@@ -41,45 +38,64 @@ typedef struct bootmem_data { | |||
41 | struct list_head list; | 38 | struct list_head list; |
42 | } bootmem_data_t; | 39 | } bootmem_data_t; |
43 | 40 | ||
44 | extern unsigned long __init bootmem_bootmap_pages (unsigned long); | 41 | extern unsigned long bootmem_bootmap_pages(unsigned long); |
45 | extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend); | 42 | extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); |
46 | extern void __init free_bootmem (unsigned long addr, unsigned long size); | 43 | extern void free_bootmem(unsigned long addr, unsigned long size); |
47 | extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal); | 44 | extern void *__alloc_bootmem(unsigned long size, |
48 | extern void * __init __alloc_bootmem_nopanic (unsigned long size, unsigned long align, unsigned long goal); | 45 | unsigned long align, |
49 | extern void * __init __alloc_bootmem_low(unsigned long size, | 46 | unsigned long goal); |
50 | unsigned long align, | 47 | extern void *__alloc_bootmem_nopanic(unsigned long size, |
51 | unsigned long goal); | 48 | unsigned long align, |
52 | extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, | 49 | unsigned long goal); |
53 | unsigned long size, | 50 | extern void *__alloc_bootmem_low(unsigned long size, |
54 | unsigned long align, | 51 | unsigned long align, |
55 | unsigned long goal); | 52 | unsigned long goal); |
56 | extern void * __init __alloc_bootmem_core(struct bootmem_data *bdata, | 53 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, |
57 | unsigned long size, unsigned long align, unsigned long goal, | 54 | unsigned long size, |
58 | unsigned long limit); | 55 | unsigned long align, |
56 | unsigned long goal); | ||
57 | extern void *__alloc_bootmem_core(struct bootmem_data *bdata, | ||
58 | unsigned long size, | ||
59 | unsigned long align, | ||
60 | unsigned long goal, | ||
61 | unsigned long limit); | ||
62 | |||
59 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 63 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
60 | extern void __init reserve_bootmem (unsigned long addr, unsigned long size); | 64 | extern void reserve_bootmem(unsigned long addr, unsigned long size); |
61 | #define alloc_bootmem(x) \ | 65 | #define alloc_bootmem(x) \ |
62 | __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 66 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
63 | #define alloc_bootmem_low(x) \ | 67 | #define alloc_bootmem_low(x) \ |
64 | __alloc_bootmem_low((x), SMP_CACHE_BYTES, 0) | 68 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) |
65 | #define alloc_bootmem_pages(x) \ | 69 | #define alloc_bootmem_pages(x) \ |
66 | __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 70 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
67 | #define alloc_bootmem_low_pages(x) \ | 71 | #define alloc_bootmem_low_pages(x) \ |
68 | __alloc_bootmem_low((x), PAGE_SIZE, 0) | 72 | __alloc_bootmem_low(x, PAGE_SIZE, 0) |
69 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | 73 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ |
70 | extern unsigned long __init free_all_bootmem (void); | 74 | |
71 | extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); | 75 | extern unsigned long free_all_bootmem(void); |
72 | extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn); | 76 | extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); |
73 | extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size); | 77 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, |
74 | extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size); | 78 | unsigned long size, |
75 | extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat); | 79 | unsigned long align, |
80 | unsigned long goal); | ||
81 | extern unsigned long init_bootmem_node(pg_data_t *pgdat, | ||
82 | unsigned long freepfn, | ||
83 | unsigned long startpfn, | ||
84 | unsigned long endpfn); | ||
85 | extern void reserve_bootmem_node(pg_data_t *pgdat, | ||
86 | unsigned long physaddr, | ||
87 | unsigned long size); | ||
88 | extern void free_bootmem_node(pg_data_t *pgdat, | ||
89 | unsigned long addr, | ||
90 | unsigned long size); | ||
91 | |||
76 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 92 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
77 | #define alloc_bootmem_node(pgdat, x) \ | 93 | #define alloc_bootmem_node(pgdat, x) \ |
78 | __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 94 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
79 | #define alloc_bootmem_pages_node(pgdat, x) \ | 95 | #define alloc_bootmem_pages_node(pgdat, x) \ |
80 | __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 96 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
81 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 97 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
82 | __alloc_bootmem_low_node((pgdat), (x), PAGE_SIZE, 0) | 98 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) |
83 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | 99 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ |
84 | 100 | ||
85 | #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP | 101 | #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP |
@@ -89,19 +105,19 @@ static inline void *alloc_remap(int nid, unsigned long size) | |||
89 | { | 105 | { |
90 | return NULL; | 106 | return NULL; |
91 | } | 107 | } |
92 | #endif | 108 | #endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */ |
93 | 109 | ||
94 | extern unsigned long __meminitdata nr_kernel_pages; | 110 | extern unsigned long __meminitdata nr_kernel_pages; |
95 | extern unsigned long nr_all_pages; | 111 | extern unsigned long nr_all_pages; |
96 | 112 | ||
97 | extern void *__init alloc_large_system_hash(const char *tablename, | 113 | extern void *alloc_large_system_hash(const char *tablename, |
98 | unsigned long bucketsize, | 114 | unsigned long bucketsize, |
99 | unsigned long numentries, | 115 | unsigned long numentries, |
100 | int scale, | 116 | int scale, |
101 | int flags, | 117 | int flags, |
102 | unsigned int *_hash_shift, | 118 | unsigned int *_hash_shift, |
103 | unsigned int *_hash_mask, | 119 | unsigned int *_hash_mask, |
104 | unsigned long limit); | 120 | unsigned long limit); |
105 | 121 | ||
106 | #define HASH_HIGHMEM 0x00000001 /* Consider highmem? */ | 122 | #define HASH_HIGHMEM 0x00000001 /* Consider highmem? */ |
107 | #define HASH_EARLY 0x00000002 /* Allocating during early boot? */ | 123 | #define HASH_EARLY 0x00000002 /* Allocating during early boot? */ |
diff --git a/include/linux/console.h b/include/linux/console.h index 3bdf2155e565..76a1807726eb 100644 --- a/include/linux/console.h +++ b/include/linux/console.h | |||
@@ -120,9 +120,14 @@ extern void console_stop(struct console *); | |||
120 | extern void console_start(struct console *); | 120 | extern void console_start(struct console *); |
121 | extern int is_console_locked(void); | 121 | extern int is_console_locked(void); |
122 | 122 | ||
123 | #ifndef CONFIG_DISABLE_CONSOLE_SUSPEND | ||
123 | /* Suspend and resume console messages over PM events */ | 124 | /* Suspend and resume console messages over PM events */ |
124 | extern void suspend_console(void); | 125 | extern void suspend_console(void); |
125 | extern void resume_console(void); | 126 | extern void resume_console(void); |
127 | #else | ||
128 | static inline void suspend_console(void) {} | ||
129 | static inline void resume_console(void) {} | ||
130 | #endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */ | ||
126 | 131 | ||
127 | /* Some debug stub to catch some of the obvious races in the VT code */ | 132 | /* Some debug stub to catch some of the obvious races in the VT code */ |
128 | #if 1 | 133 | #if 1 |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 8fb344a9abd8..3fef7d67aedc 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -89,4 +89,12 @@ int cpu_down(unsigned int cpu); | |||
89 | static inline int cpu_is_offline(int cpu) { return 0; } | 89 | static inline int cpu_is_offline(int cpu) { return 0; } |
90 | #endif | 90 | #endif |
91 | 91 | ||
92 | #ifdef CONFIG_SUSPEND_SMP | ||
93 | extern int disable_nonboot_cpus(void); | ||
94 | extern void enable_nonboot_cpus(void); | ||
95 | #else | ||
96 | static inline int disable_nonboot_cpus(void) { return 0; } | ||
97 | static inline void enable_nonboot_cpus(void) {} | ||
98 | #endif | ||
99 | |||
92 | #endif /* _LINUX_CPU_H_ */ | 100 | #endif /* _LINUX_CPU_H_ */ |
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 2d7671c92c0b..d6f4ec467a4b 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -169,6 +169,12 @@ enum { | |||
169 | DCCPO_MAX_CCID_SPECIFIC = 255, | 169 | DCCPO_MAX_CCID_SPECIFIC = 255, |
170 | }; | 170 | }; |
171 | 171 | ||
172 | /* DCCP CCIDS */ | ||
173 | enum { | ||
174 | DCCPC_CCID2 = 2, | ||
175 | DCCPC_CCID3 = 3, | ||
176 | }; | ||
177 | |||
172 | /* DCCP features */ | 178 | /* DCCP features */ |
173 | enum { | 179 | enum { |
174 | DCCPF_RESERVED = 0, | 180 | DCCPF_RESERVED = 0, |
@@ -320,7 +326,7 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) | |||
320 | /* initial values for each feature */ | 326 | /* initial values for each feature */ |
321 | #define DCCPF_INITIAL_SEQUENCE_WINDOW 100 | 327 | #define DCCPF_INITIAL_SEQUENCE_WINDOW 100 |
322 | #define DCCPF_INITIAL_ACK_RATIO 2 | 328 | #define DCCPF_INITIAL_ACK_RATIO 2 |
323 | #define DCCPF_INITIAL_CCID 2 | 329 | #define DCCPF_INITIAL_CCID DCCPC_CCID2 |
324 | #define DCCPF_INITIAL_SEND_ACK_VECTOR 1 | 330 | #define DCCPF_INITIAL_SEND_ACK_VECTOR 1 |
325 | /* FIXME: for now we're default to 1 but it should really be 0 */ | 331 | /* FIXME: for now we're default to 1 but it should really be 0 */ |
326 | #define DCCPF_INITIAL_SEND_NDP_COUNT 1 | 332 | #define DCCPF_INITIAL_SEND_NDP_COUNT 1 |
@@ -404,6 +410,7 @@ struct dccp_service_list { | |||
404 | }; | 410 | }; |
405 | 411 | ||
406 | #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) | 412 | #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) |
413 | #define DCCP_SERVICE_CODE_IS_ABSENT 0 | ||
407 | 414 | ||
408 | static inline int dccp_list_has_service(const struct dccp_service_list *sl, | 415 | static inline int dccp_list_has_service(const struct dccp_service_list *sl, |
409 | const __be32 service) | 416 | const __be32 service) |
@@ -484,11 +491,6 @@ static inline struct dccp_minisock *dccp_msk(const struct sock *sk) | |||
484 | return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock; | 491 | return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock; |
485 | } | 492 | } |
486 | 493 | ||
487 | static inline int dccp_service_not_initialized(const struct sock *sk) | ||
488 | { | ||
489 | return dccp_sk(sk)->dccps_service == DCCP_SERVICE_INVALID_VALUE; | ||
490 | } | ||
491 | |||
492 | static inline const char *dccp_role(const struct sock *sk) | 494 | static inline const char *dccp_role(const struct sock *sk) |
493 | { | 495 | { |
494 | switch (dccp_sk(sk)->dccps_role) { | 496 | switch (dccp_sk(sk)->dccps_role) { |
diff --git a/include/linux/elf-em.h b/include/linux/elf-em.h index 6a5796c81c90..666e0a5f00fc 100644 --- a/include/linux/elf-em.h +++ b/include/linux/elf-em.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #define EM_M32R 88 /* Renesas M32R */ | 31 | #define EM_M32R 88 /* Renesas M32R */ |
32 | #define EM_H8_300 46 /* Renesas H8/300,300H,H8S */ | 32 | #define EM_H8_300 46 /* Renesas H8/300,300H,H8S */ |
33 | #define EM_FRV 0x5441 /* Fujitsu FR-V */ | 33 | #define EM_FRV 0x5441 /* Fujitsu FR-V */ |
34 | #define EM_AVR32 0x18ad /* Atmel AVR32 */ | ||
34 | 35 | ||
35 | /* | 36 | /* |
36 | * This is an interim value that we will use until the committee comes | 37 | * This is an interim value that we will use until the committee comes |
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h new file mode 100644 index 000000000000..67396db141e8 --- /dev/null +++ b/include/linux/elfnote.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef _LINUX_ELFNOTE_H | ||
2 | #define _LINUX_ELFNOTE_H | ||
3 | /* | ||
4 | * Helper macros to generate ELF Note structures, which are put into a | ||
5 | * PT_NOTE segment of the final vmlinux image. These are useful for | ||
6 | * including name-value pairs of metadata into the kernel binary (or | ||
7 | * modules?) for use by external programs. | ||
8 | * | ||
9 | * Each note has three parts: a name, a type and a desc. The name is | ||
10 | * intended to distinguish the note's originator, so it would be a | ||
11 | * company, project, subsystem, etc; it must be in a suitable form for | ||
12 | * use in a section name. The type is an integer which is used to tag | ||
13 | * the data, and is considered to be within the "name" namespace (so | ||
14 | * "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The | ||
15 | * "desc" field is the actual data. There are no constraints on the | ||
16 | * desc field's contents, though typically they're fairly small. | ||
17 | * | ||
18 | * All notes from a given NAME are put into a section named | ||
19 | * .note.NAME. When the kernel image is finally linked, all the notes | ||
20 | * are packed into a single .notes section, which is mapped into the | ||
21 | * PT_NOTE segment. Because notes for a given name are grouped into | ||
22 | * the same section, they'll all be adjacent the output file. | ||
23 | * | ||
24 | * This file defines macros for both C and assembler use. Their | ||
25 | * syntax is slightly different, but they're semantically similar. | ||
26 | * | ||
27 | * See the ELF specification for more detail about ELF notes. | ||
28 | */ | ||
29 | |||
30 | #ifdef __ASSEMBLER__ | ||
31 | /* | ||
32 | * Generate a structure with the same shape as Elf{32,64}_Nhdr (which | ||
33 | * turn out to be the same size and shape), followed by the name and | ||
34 | * desc data with appropriate padding. The 'desctype' argument is the | ||
35 | * assembler pseudo op defining the type of the data e.g. .asciz while | ||
36 | * 'descdata' is the data itself e.g. "hello, world". | ||
37 | * | ||
38 | * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two") | ||
39 | * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) | ||
40 | */ | ||
41 | #define ELFNOTE(name, type, desctype, descdata) \ | ||
42 | .pushsection .note.name ; \ | ||
43 | .align 4 ; \ | ||
44 | .long 2f - 1f /* namesz */ ; \ | ||
45 | .long 4f - 3f /* descsz */ ; \ | ||
46 | .long type ; \ | ||
47 | 1:.asciz "name" ; \ | ||
48 | 2:.align 4 ; \ | ||
49 | 3:desctype descdata ; \ | ||
50 | 4:.align 4 ; \ | ||
51 | .popsection ; | ||
52 | #else /* !__ASSEMBLER__ */ | ||
53 | #include <linux/elf.h> | ||
54 | /* | ||
55 | * Use an anonymous structure which matches the shape of | ||
56 | * Elf{32,64}_Nhdr, but includes the name and desc data. The size and | ||
57 | * type of name and desc depend on the macro arguments. "name" must | ||
58 | * be a literal string, and "desc" must be passed by value. You may | ||
59 | * only define one note per line, since __LINE__ is used to generate | ||
60 | * unique symbols. | ||
61 | */ | ||
62 | #define _ELFNOTE_PASTE(a,b) a##b | ||
63 | #define _ELFNOTE(size, name, unique, type, desc) \ | ||
64 | static const struct { \ | ||
65 | struct elf##size##_note _nhdr; \ | ||
66 | unsigned char _name[sizeof(name)] \ | ||
67 | __attribute__((aligned(sizeof(Elf##size##_Word)))); \ | ||
68 | typeof(desc) _desc \ | ||
69 | __attribute__((aligned(sizeof(Elf##size##_Word)))); \ | ||
70 | } _ELFNOTE_PASTE(_note_, unique) \ | ||
71 | __attribute_used__ \ | ||
72 | __attribute__((section(".note." name), \ | ||
73 | aligned(sizeof(Elf##size##_Word)), \ | ||
74 | unused)) = { \ | ||
75 | { \ | ||
76 | sizeof(name), \ | ||
77 | sizeof(desc), \ | ||
78 | type, \ | ||
79 | }, \ | ||
80 | name, \ | ||
81 | desc \ | ||
82 | } | ||
83 | #define ELFNOTE(size, name, type, desc) \ | ||
84 | _ELFNOTE(size, name, __LINE__, type, desc) | ||
85 | |||
86 | #define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc) | ||
87 | #define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc) | ||
88 | #endif /* __ASSEMBLER__ */ | ||
89 | |||
90 | #endif /* _LINUX_ELFNOTE_H */ | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index cc9e60844484..8b34aabfe4c6 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -9,17 +9,16 @@ struct vm_area_struct; | |||
9 | 9 | ||
10 | /* | 10 | /* |
11 | * GFP bitmasks.. | 11 | * GFP bitmasks.. |
12 | * | ||
13 | * Zone modifiers (see linux/mmzone.h - low three bits) | ||
14 | * | ||
15 | * Do not put any conditional on these. If necessary modify the definitions | ||
16 | * without the underscores and use the consistently. The definitions here may | ||
17 | * be used in bit comparisons. | ||
12 | */ | 18 | */ |
13 | /* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low three bits) */ | ||
14 | #define __GFP_DMA ((__force gfp_t)0x01u) | 19 | #define __GFP_DMA ((__force gfp_t)0x01u) |
15 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) | 20 | #define __GFP_HIGHMEM ((__force gfp_t)0x02u) |
16 | #ifdef CONFIG_DMA_IS_DMA32 | 21 | #define __GFP_DMA32 ((__force gfp_t)0x04u) |
17 | #define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */ | ||
18 | #elif BITS_PER_LONG < 64 | ||
19 | #define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */ | ||
20 | #else | ||
21 | #define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */ | ||
22 | #endif | ||
23 | 22 | ||
24 | /* | 23 | /* |
25 | * Action modifiers - doesn't change the zoning | 24 | * Action modifiers - doesn't change the zoning |
@@ -46,6 +45,7 @@ struct vm_area_struct; | |||
46 | #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ | 45 | #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ |
47 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | 46 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ |
48 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 47 | #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ |
48 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | ||
49 | 49 | ||
50 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ | 50 | #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ |
51 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 51 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
@@ -54,7 +54,7 @@ struct vm_area_struct; | |||
54 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ | 54 | #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ |
55 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ | 55 | __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ |
56 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ | 56 | __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ |
57 | __GFP_NOMEMALLOC|__GFP_HARDWALL) | 57 | __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) |
58 | 58 | ||
59 | /* This equals 0, but use constants in case they ever change */ | 59 | /* This equals 0, but use constants in case they ever change */ |
60 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) | 60 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
@@ -67,6 +67,8 @@ struct vm_area_struct; | |||
67 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 67 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ |
68 | __GFP_HIGHMEM) | 68 | __GFP_HIGHMEM) |
69 | 69 | ||
70 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | ||
71 | |||
70 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some | 72 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
71 | platforms, used as appropriate on others */ | 73 | platforms, used as appropriate on others */ |
72 | 74 | ||
@@ -76,11 +78,19 @@ struct vm_area_struct; | |||
76 | #define GFP_DMA32 __GFP_DMA32 | 78 | #define GFP_DMA32 __GFP_DMA32 |
77 | 79 | ||
78 | 80 | ||
79 | static inline int gfp_zone(gfp_t gfp) | 81 | static inline enum zone_type gfp_zone(gfp_t flags) |
80 | { | 82 | { |
81 | int zone = GFP_ZONEMASK & (__force int) gfp; | 83 | if (flags & __GFP_DMA) |
82 | BUG_ON(zone >= GFP_ZONETYPES); | 84 | return ZONE_DMA; |
83 | return zone; | 85 | #ifdef CONFIG_ZONE_DMA32 |
86 | if (flags & __GFP_DMA32) | ||
87 | return ZONE_DMA32; | ||
88 | #endif | ||
89 | #ifdef CONFIG_HIGHMEM | ||
90 | if (flags & __GFP_HIGHMEM) | ||
91 | return ZONE_HIGHMEM; | ||
92 | #endif | ||
93 | return ZONE_NORMAL; | ||
84 | } | 94 | } |
85 | 95 | ||
86 | /* | 96 | /* |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 85ce7ef9a512..fd7d12daa94f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -24,11 +24,15 @@ static inline void flush_kernel_dcache_page(struct page *page) | |||
24 | 24 | ||
25 | /* declarations for linux/mm/highmem.c */ | 25 | /* declarations for linux/mm/highmem.c */ |
26 | unsigned int nr_free_highpages(void); | 26 | unsigned int nr_free_highpages(void); |
27 | extern unsigned long totalhigh_pages; | ||
27 | 28 | ||
28 | #else /* CONFIG_HIGHMEM */ | 29 | #else /* CONFIG_HIGHMEM */ |
29 | 30 | ||
30 | static inline unsigned int nr_free_highpages(void) { return 0; } | 31 | static inline unsigned int nr_free_highpages(void) { return 0; } |
31 | 32 | ||
33 | #define totalhigh_pages 0 | ||
34 | |||
35 | #ifndef ARCH_HAS_KMAP | ||
32 | static inline void *kmap(struct page *page) | 36 | static inline void *kmap(struct page *page) |
33 | { | 37 | { |
34 | might_sleep(); | 38 | might_sleep(); |
@@ -41,6 +45,7 @@ static inline void *kmap(struct page *page) | |||
41 | #define kunmap_atomic(addr, idx) do { } while (0) | 45 | #define kunmap_atomic(addr, idx) do { } while (0) |
42 | #define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) | 46 | #define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) |
43 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) | 47 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
48 | #endif | ||
44 | 49 | ||
45 | #endif /* CONFIG_HIGHMEM */ | 50 | #endif /* CONFIG_HIGHMEM */ |
46 | 51 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index fbf6d901e9c2..48d3cb3b6a47 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -320,7 +320,9 @@ handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *, | |||
320 | * Monolithic do_IRQ implementation. | 320 | * Monolithic do_IRQ implementation. |
321 | * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly) | 321 | * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly) |
322 | */ | 322 | */ |
323 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
323 | extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs); | 324 | extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs); |
325 | #endif | ||
324 | 326 | ||
325 | /* | 327 | /* |
326 | * Architectures call this to let the generic IRQ layer | 328 | * Architectures call this to let the generic IRQ layer |
@@ -332,10 +334,14 @@ static inline void generic_handle_irq(unsigned int irq, struct pt_regs *regs) | |||
332 | { | 334 | { |
333 | struct irq_desc *desc = irq_desc + irq; | 335 | struct irq_desc *desc = irq_desc + irq; |
334 | 336 | ||
337 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
338 | desc->handle_irq(irq, desc, regs); | ||
339 | #else | ||
335 | if (likely(desc->handle_irq)) | 340 | if (likely(desc->handle_irq)) |
336 | desc->handle_irq(irq, desc, regs); | 341 | desc->handle_irq(irq, desc, regs); |
337 | else | 342 | else |
338 | __do_IRQ(irq, regs); | 343 | __do_IRQ(irq, regs); |
344 | #endif | ||
339 | } | 345 | } |
340 | 346 | ||
341 | /* Handling of unhandled and spurious interrupts: */ | 347 | /* Handling of unhandled and spurious interrupts: */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2b2ae4fdce8b..e44a37e2c71c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -33,6 +33,7 @@ extern const char linux_banner[]; | |||
33 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) | 33 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) |
34 | #define ALIGN(x,a) (((x)+(a)-1UL)&~((a)-1UL)) | 34 | #define ALIGN(x,a) (((x)+(a)-1UL)&~((a)-1UL)) |
35 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 35 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
36 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | ||
36 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) | 37 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) |
37 | 38 | ||
38 | #define KERN_EMERG "<0>" /* system is unusable */ | 39 | #define KERN_EMERG "<0>" /* system is unusable */ |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 72440f0a443d..09f0f575ddff 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -162,9 +162,9 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | |||
162 | unsigned long addr); | 162 | unsigned long addr); |
163 | extern unsigned slab_node(struct mempolicy *policy); | 163 | extern unsigned slab_node(struct mempolicy *policy); |
164 | 164 | ||
165 | extern int policy_zone; | 165 | extern enum zone_type policy_zone; |
166 | 166 | ||
167 | static inline void check_highest_zone(int k) | 167 | static inline void check_highest_zone(enum zone_type k) |
168 | { | 168 | { |
169 | if (k > policy_zone) | 169 | if (k > policy_zone) |
170 | policy_zone = k; | 170 | policy_zone = k; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 224178a000d2..856f0ee7e84a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/debug_locks.h> | 17 | #include <linux/debug_locks.h> |
18 | #include <linux/backing-dev.h> | ||
18 | 19 | ||
19 | struct mempolicy; | 20 | struct mempolicy; |
20 | struct anon_vma; | 21 | struct anon_vma; |
@@ -218,7 +219,8 @@ struct inode; | |||
218 | * Each physical page in the system has a struct page associated with | 219 | * Each physical page in the system has a struct page associated with |
219 | * it to keep track of whatever it is we are using the page for at the | 220 | * it to keep track of whatever it is we are using the page for at the |
220 | * moment. Note that we have no way to track which tasks are using | 221 | * moment. Note that we have no way to track which tasks are using |
221 | * a page. | 222 | * a page, though if it is a pagecache page, rmap structures can tell us |
223 | * who is mapping it. | ||
222 | */ | 224 | */ |
223 | struct page { | 225 | struct page { |
224 | unsigned long flags; /* Atomic flags, some possibly | 226 | unsigned long flags; /* Atomic flags, some possibly |
@@ -278,6 +280,12 @@ struct page { | |||
278 | */ | 280 | */ |
279 | #include <linux/page-flags.h> | 281 | #include <linux/page-flags.h> |
280 | 282 | ||
283 | #ifdef CONFIG_DEBUG_VM | ||
284 | #define VM_BUG_ON(cond) BUG_ON(cond) | ||
285 | #else | ||
286 | #define VM_BUG_ON(condition) do { } while(0) | ||
287 | #endif | ||
288 | |||
281 | /* | 289 | /* |
282 | * Methods to modify the page usage count. | 290 | * Methods to modify the page usage count. |
283 | * | 291 | * |
@@ -292,12 +300,11 @@ struct page { | |||
292 | */ | 300 | */ |
293 | 301 | ||
294 | /* | 302 | /* |
295 | * Drop a ref, return true if the logical refcount fell to zero (the page has | 303 | * Drop a ref, return true if the refcount fell to zero (the page has no users) |
296 | * no users) | ||
297 | */ | 304 | */ |
298 | static inline int put_page_testzero(struct page *page) | 305 | static inline int put_page_testzero(struct page *page) |
299 | { | 306 | { |
300 | BUG_ON(atomic_read(&page->_count) == 0); | 307 | VM_BUG_ON(atomic_read(&page->_count) == 0); |
301 | return atomic_dec_and_test(&page->_count); | 308 | return atomic_dec_and_test(&page->_count); |
302 | } | 309 | } |
303 | 310 | ||
@@ -307,11 +314,10 @@ static inline int put_page_testzero(struct page *page) | |||
307 | */ | 314 | */ |
308 | static inline int get_page_unless_zero(struct page *page) | 315 | static inline int get_page_unless_zero(struct page *page) |
309 | { | 316 | { |
317 | VM_BUG_ON(PageCompound(page)); | ||
310 | return atomic_inc_not_zero(&page->_count); | 318 | return atomic_inc_not_zero(&page->_count); |
311 | } | 319 | } |
312 | 320 | ||
313 | extern void FASTCALL(__page_cache_release(struct page *)); | ||
314 | |||
315 | static inline int page_count(struct page *page) | 321 | static inline int page_count(struct page *page) |
316 | { | 322 | { |
317 | if (unlikely(PageCompound(page))) | 323 | if (unlikely(PageCompound(page))) |
@@ -323,6 +329,7 @@ static inline void get_page(struct page *page) | |||
323 | { | 329 | { |
324 | if (unlikely(PageCompound(page))) | 330 | if (unlikely(PageCompound(page))) |
325 | page = (struct page *)page_private(page); | 331 | page = (struct page *)page_private(page); |
332 | VM_BUG_ON(atomic_read(&page->_count) == 0); | ||
326 | atomic_inc(&page->_count); | 333 | atomic_inc(&page->_count); |
327 | } | 334 | } |
328 | 335 | ||
@@ -349,43 +356,55 @@ void split_page(struct page *page, unsigned int order); | |||
349 | * For the non-reserved pages, page_count(page) denotes a reference count. | 356 | * For the non-reserved pages, page_count(page) denotes a reference count. |
350 | * page_count() == 0 means the page is free. page->lru is then used for | 357 | * page_count() == 0 means the page is free. page->lru is then used for |
351 | * freelist management in the buddy allocator. | 358 | * freelist management in the buddy allocator. |
352 | * page_count() == 1 means the page is used for exactly one purpose | 359 | * page_count() > 0 means the page has been allocated. |
353 | * (e.g. a private data page of one process). | 360 | * |
361 | * Pages are allocated by the slab allocator in order to provide memory | ||
362 | * to kmalloc and kmem_cache_alloc. In this case, the management of the | ||
363 | * page, and the fields in 'struct page' are the responsibility of mm/slab.c | ||
364 | * unless a particular usage is carefully commented. (the responsibility of | ||
365 | * freeing the kmalloc memory is the caller's, of course). | ||
354 | * | 366 | * |
355 | * A page may be used for kmalloc() or anyone else who does a | 367 | * A page may be used by anyone else who does a __get_free_page(). |
356 | * __get_free_page(). In this case the page_count() is at least 1, and | 368 | * In this case, page_count still tracks the references, and should only |
357 | * all other fields are unused but should be 0 or NULL. The | 369 | * be used through the normal accessor functions. The top bits of page->flags |
358 | * management of this page is the responsibility of the one who uses | 370 | * and page->virtual store page management information, but all other fields |
359 | * it. | 371 | * are unused and could be used privately, carefully. The management of this |
372 | * page is the responsibility of the one who allocated it, and those who have | ||
373 | * subsequently been given references to it. | ||
360 | * | 374 | * |
361 | * The other pages (we may call them "process pages") are completely | 375 | * The other pages (we may call them "pagecache pages") are completely |
362 | * managed by the Linux memory manager: I/O, buffers, swapping etc. | 376 | * managed by the Linux memory manager: I/O, buffers, swapping etc. |
363 | * The following discussion applies only to them. | 377 | * The following discussion applies only to them. |
364 | * | 378 | * |
365 | * A page may belong to an inode's memory mapping. In this case, | 379 | * A pagecache page contains an opaque `private' member, which belongs to the |
366 | * page->mapping is the pointer to the inode, and page->index is the | 380 | * page's address_space. Usually, this is the address of a circular list of |
367 | * file offset of the page, in units of PAGE_CACHE_SIZE. | 381 | * the page's disk buffers. PG_private must be set to tell the VM to call |
382 | * into the filesystem to release these pages. | ||
368 | * | 383 | * |
369 | * A page contains an opaque `private' member, which belongs to the | 384 | * A page may belong to an inode's memory mapping. In this case, page->mapping |
370 | * page's address_space. Usually, this is the address of a circular | 385 | * is the pointer to the inode, and page->index is the file offset of the page, |
371 | * list of the page's disk buffers. | 386 | * in units of PAGE_CACHE_SIZE. |
372 | * | 387 | * |
373 | * For pages belonging to inodes, the page_count() is the number of | 388 | * If pagecache pages are not associated with an inode, they are said to be |
374 | * attaches, plus 1 if `private' contains something, plus one for | 389 | * anonymous pages. These may become associated with the swapcache, and in that |
375 | * the page cache itself. | 390 | * case PG_swapcache is set, and page->private is an offset into the swapcache. |
376 | * | 391 | * |
377 | * Instead of keeping dirty/clean pages in per address-space lists, we instead | 392 | * In either case (swapcache or inode backed), the pagecache itself holds one |
378 | * now tag pages as dirty/under writeback in the radix tree. | 393 | * reference to the page. Setting PG_private should also increment the |
394 | * refcount. The each user mapping also has a reference to the page. | ||
379 | * | 395 | * |
380 | * There is also a per-mapping radix tree mapping index to the page | 396 | * The pagecache pages are stored in a per-mapping radix tree, which is |
381 | * in memory if present. The tree is rooted at mapping->root. | 397 | * rooted at mapping->page_tree, and indexed by offset. |
398 | * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space | ||
399 | * lists, we instead now tag pages as dirty/writeback in the radix tree. | ||
382 | * | 400 | * |
383 | * All process pages can do I/O: | 401 | * All pagecache pages may be subject to I/O: |
384 | * - inode pages may need to be read from disk, | 402 | * - inode pages may need to be read from disk, |
385 | * - inode pages which have been modified and are MAP_SHARED may need | 403 | * - inode pages which have been modified and are MAP_SHARED may need |
386 | * to be written to disk, | 404 | * to be written back to the inode on disk, |
387 | * - private pages which have been modified may need to be swapped out | 405 | * - anonymous pages (including MAP_PRIVATE file mappings) which have been |
388 | * to swap space and (later) to be read back into memory. | 406 | * modified may need to be swapped out to swap space and (later) to be read |
407 | * back into memory. | ||
389 | */ | 408 | */ |
390 | 409 | ||
391 | /* | 410 | /* |
@@ -463,7 +482,7 @@ void split_page(struct page *page, unsigned int order); | |||
463 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) | 482 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) |
464 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) | 483 | #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) |
465 | 484 | ||
466 | static inline unsigned long page_zonenum(struct page *page) | 485 | static inline enum zone_type page_zonenum(struct page *page) |
467 | { | 486 | { |
468 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; | 487 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; |
469 | } | 488 | } |
@@ -480,23 +499,29 @@ static inline struct zone *page_zone(struct page *page) | |||
480 | return zone_table[page_zone_id(page)]; | 499 | return zone_table[page_zone_id(page)]; |
481 | } | 500 | } |
482 | 501 | ||
502 | static inline unsigned long zone_to_nid(struct zone *zone) | ||
503 | { | ||
504 | return zone->zone_pgdat->node_id; | ||
505 | } | ||
506 | |||
483 | static inline unsigned long page_to_nid(struct page *page) | 507 | static inline unsigned long page_to_nid(struct page *page) |
484 | { | 508 | { |
485 | if (FLAGS_HAS_NODE) | 509 | if (FLAGS_HAS_NODE) |
486 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | 510 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; |
487 | else | 511 | else |
488 | return page_zone(page)->zone_pgdat->node_id; | 512 | return zone_to_nid(page_zone(page)); |
489 | } | 513 | } |
490 | static inline unsigned long page_to_section(struct page *page) | 514 | static inline unsigned long page_to_section(struct page *page) |
491 | { | 515 | { |
492 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; | 516 | return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; |
493 | } | 517 | } |
494 | 518 | ||
495 | static inline void set_page_zone(struct page *page, unsigned long zone) | 519 | static inline void set_page_zone(struct page *page, enum zone_type zone) |
496 | { | 520 | { |
497 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); | 521 | page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); |
498 | page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; | 522 | page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; |
499 | } | 523 | } |
524 | |||
500 | static inline void set_page_node(struct page *page, unsigned long node) | 525 | static inline void set_page_node(struct page *page, unsigned long node) |
501 | { | 526 | { |
502 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); | 527 | page->flags &= ~(NODES_MASK << NODES_PGSHIFT); |
@@ -508,7 +533,7 @@ static inline void set_page_section(struct page *page, unsigned long section) | |||
508 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; | 533 | page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; |
509 | } | 534 | } |
510 | 535 | ||
511 | static inline void set_page_links(struct page *page, unsigned long zone, | 536 | static inline void set_page_links(struct page *page, enum zone_type zone, |
512 | unsigned long node, unsigned long pfn) | 537 | unsigned long node, unsigned long pfn) |
513 | { | 538 | { |
514 | set_page_zone(page, zone); | 539 | set_page_zone(page, zone); |
@@ -802,6 +827,39 @@ struct shrinker; | |||
802 | extern struct shrinker *set_shrinker(int, shrinker_t); | 827 | extern struct shrinker *set_shrinker(int, shrinker_t); |
803 | extern void remove_shrinker(struct shrinker *shrinker); | 828 | extern void remove_shrinker(struct shrinker *shrinker); |
804 | 829 | ||
830 | /* | ||
831 | * Some shared mappigns will want the pages marked read-only | ||
832 | * to track write events. If so, we'll downgrade vm_page_prot | ||
833 | * to the private version (using protection_map[] without the | ||
834 | * VM_SHARED bit). | ||
835 | */ | ||
836 | static inline int vma_wants_writenotify(struct vm_area_struct *vma) | ||
837 | { | ||
838 | unsigned int vm_flags = vma->vm_flags; | ||
839 | |||
840 | /* If it was private or non-writable, the write bit is already clear */ | ||
841 | if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) | ||
842 | return 0; | ||
843 | |||
844 | /* The backer wishes to know when pages are first written to? */ | ||
845 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) | ||
846 | return 1; | ||
847 | |||
848 | /* The open routine did something to the protections already? */ | ||
849 | if (pgprot_val(vma->vm_page_prot) != | ||
850 | pgprot_val(protection_map[vm_flags & | ||
851 | (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) | ||
852 | return 0; | ||
853 | |||
854 | /* Specialty mapping? */ | ||
855 | if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) | ||
856 | return 0; | ||
857 | |||
858 | /* Can the mapping track the dirty pages? */ | ||
859 | return vma->vm_file && vma->vm_file->f_mapping && | ||
860 | mapping_cap_account_dirty(vma->vm_file->f_mapping); | ||
861 | } | ||
862 | |||
805 | extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); | 863 | extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); |
806 | 864 | ||
807 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); | 865 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f45163c528e8..3693f1a52788 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -51,7 +51,8 @@ enum zone_stat_item { | |||
51 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | 51 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
52 | only modified from process context */ | 52 | only modified from process context */ |
53 | NR_FILE_PAGES, | 53 | NR_FILE_PAGES, |
54 | NR_SLAB, /* Pages used by slab allocator */ | 54 | NR_SLAB_RECLAIMABLE, |
55 | NR_SLAB_UNRECLAIMABLE, | ||
55 | NR_PAGETABLE, /* used for pagetables */ | 56 | NR_PAGETABLE, /* used for pagetables */ |
56 | NR_FILE_DIRTY, | 57 | NR_FILE_DIRTY, |
57 | NR_WRITEBACK, | 58 | NR_WRITEBACK, |
@@ -88,53 +89,68 @@ struct per_cpu_pageset { | |||
88 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | 89 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) |
89 | #endif | 90 | #endif |
90 | 91 | ||
91 | #define ZONE_DMA 0 | 92 | enum zone_type { |
92 | #define ZONE_DMA32 1 | 93 | /* |
93 | #define ZONE_NORMAL 2 | 94 | * ZONE_DMA is used when there are devices that are not able |
94 | #define ZONE_HIGHMEM 3 | 95 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we |
95 | 96 | * carve out the portion of memory that is needed for these devices. | |
96 | #define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ | 97 | * The range is arch specific. |
97 | #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ | 98 | * |
98 | 99 | * Some examples | |
100 | * | ||
101 | * Architecture Limit | ||
102 | * --------------------------- | ||
103 | * parisc, ia64, sparc <4G | ||
104 | * s390 <2G | ||
105 | * arm26 <48M | ||
106 | * arm Various | ||
107 | * alpha Unlimited or 0-16MB. | ||
108 | * | ||
109 | * i386, x86_64 and multiple other arches | ||
110 | * <16M. | ||
111 | */ | ||
112 | ZONE_DMA, | ||
113 | #ifdef CONFIG_ZONE_DMA32 | ||
114 | /* | ||
115 | * x86_64 needs two ZONE_DMAs because it supports devices that are | ||
116 | * only able to do DMA to the lower 16M but also 32 bit devices that | ||
117 | * can only do DMA areas below 4G. | ||
118 | */ | ||
119 | ZONE_DMA32, | ||
120 | #endif | ||
121 | /* | ||
122 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | ||
123 | * performed on pages in ZONE_NORMAL if the DMA devices support | ||
124 | * transfers to all addressable memory. | ||
125 | */ | ||
126 | ZONE_NORMAL, | ||
127 | #ifdef CONFIG_HIGHMEM | ||
128 | /* | ||
129 | * A memory area that is only addressable by the kernel through | ||
130 | * mapping portions into its own address space. This is for example | ||
131 | * used by i386 to allow the kernel to address the memory beyond | ||
132 | * 900MB. The kernel will set up special mappings (page | ||
133 | * table entries on i386) for each page that the kernel needs to | ||
134 | * access. | ||
135 | */ | ||
136 | ZONE_HIGHMEM, | ||
137 | #endif | ||
138 | MAX_NR_ZONES | ||
139 | }; | ||
99 | 140 | ||
100 | /* | 141 | /* |
101 | * When a memory allocation must conform to specific limitations (such | 142 | * When a memory allocation must conform to specific limitations (such |
102 | * as being suitable for DMA) the caller will pass in hints to the | 143 | * as being suitable for DMA) the caller will pass in hints to the |
103 | * allocator in the gfp_mask, in the zone modifier bits. These bits | 144 | * allocator in the gfp_mask, in the zone modifier bits. These bits |
104 | * are used to select a priority ordered list of memory zones which | 145 | * are used to select a priority ordered list of memory zones which |
105 | * match the requested limits. GFP_ZONEMASK defines which bits within | 146 | * match the requested limits. See gfp_zone() in include/linux/gfp.h |
106 | * the gfp_mask should be considered as zone modifiers. Each valid | ||
107 | * combination of the zone modifier bits has a corresponding list | ||
108 | * of zones (in node_zonelists). Thus for two zone modifiers there | ||
109 | * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will | ||
110 | * be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible | ||
111 | * combinations of zone modifiers in "zone modifier space". | ||
112 | * | ||
113 | * As an optimisation any zone modifier bits which are only valid when | ||
114 | * no other zone modifier bits are set (loners) should be placed in | ||
115 | * the highest order bits of this field. This allows us to reduce the | ||
116 | * extent of the zonelists thus saving space. For example in the case | ||
117 | * of three zone modifier bits, we could require up to eight zonelists. | ||
118 | * If the left most zone modifier is a "loner" then the highest valid | ||
119 | * zonelist would be four allowing us to allocate only five zonelists. | ||
120 | * Use the first form for GFP_ZONETYPES when the left most bit is not | ||
121 | * a "loner", otherwise use the second. | ||
122 | * | ||
123 | * NOTE! Make sure this matches the zones in <linux/gfp.h> | ||
124 | */ | 147 | */ |
125 | #define GFP_ZONEMASK 0x07 | ||
126 | /* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */ | ||
127 | #define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ | ||
128 | 148 | ||
129 | /* | 149 | #if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM) |
130 | * On machines where it is needed (eg PCs) we divide physical memory | 150 | #define ZONES_SHIFT 1 |
131 | * into multiple physical zones. On a 32bit PC we have 4 zones: | 151 | #else |
132 | * | 152 | #define ZONES_SHIFT 2 |
133 | * ZONE_DMA < 16 MB ISA DMA capable memory | 153 | #endif |
134 | * ZONE_DMA32 0 MB Empty | ||
135 | * ZONE_NORMAL 16-896 MB direct mapped by the kernel | ||
136 | * ZONE_HIGHMEM > 896 MB only page cache and user processes | ||
137 | */ | ||
138 | 154 | ||
139 | struct zone { | 155 | struct zone { |
140 | /* Fields commonly accessed by the page allocator */ | 156 | /* Fields commonly accessed by the page allocator */ |
@@ -154,7 +170,8 @@ struct zone { | |||
154 | /* | 170 | /* |
155 | * zone reclaim becomes active if more unmapped pages exist. | 171 | * zone reclaim becomes active if more unmapped pages exist. |
156 | */ | 172 | */ |
157 | unsigned long min_unmapped_ratio; | 173 | unsigned long min_unmapped_pages; |
174 | unsigned long min_slab_pages; | ||
158 | struct per_cpu_pageset *pageset[NR_CPUS]; | 175 | struct per_cpu_pageset *pageset[NR_CPUS]; |
159 | #else | 176 | #else |
160 | struct per_cpu_pageset pageset[NR_CPUS]; | 177 | struct per_cpu_pageset pageset[NR_CPUS]; |
@@ -266,7 +283,6 @@ struct zone { | |||
266 | char *name; | 283 | char *name; |
267 | } ____cacheline_internodealigned_in_smp; | 284 | } ____cacheline_internodealigned_in_smp; |
268 | 285 | ||
269 | |||
270 | /* | 286 | /* |
271 | * The "priority" of VM scanning is how much of the queues we will scan in one | 287 | * The "priority" of VM scanning is how much of the queues we will scan in one |
272 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 288 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
@@ -304,7 +320,7 @@ struct zonelist { | |||
304 | struct bootmem_data; | 320 | struct bootmem_data; |
305 | typedef struct pglist_data { | 321 | typedef struct pglist_data { |
306 | struct zone node_zones[MAX_NR_ZONES]; | 322 | struct zone node_zones[MAX_NR_ZONES]; |
307 | struct zonelist node_zonelists[GFP_ZONETYPES]; | 323 | struct zonelist node_zonelists[MAX_NR_ZONES]; |
308 | int nr_zones; | 324 | int nr_zones; |
309 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 325 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
310 | struct page *node_mem_map; | 326 | struct page *node_mem_map; |
@@ -373,12 +389,16 @@ static inline int populated_zone(struct zone *zone) | |||
373 | return (!!zone->present_pages); | 389 | return (!!zone->present_pages); |
374 | } | 390 | } |
375 | 391 | ||
376 | static inline int is_highmem_idx(int idx) | 392 | static inline int is_highmem_idx(enum zone_type idx) |
377 | { | 393 | { |
394 | #ifdef CONFIG_HIGHMEM | ||
378 | return (idx == ZONE_HIGHMEM); | 395 | return (idx == ZONE_HIGHMEM); |
396 | #else | ||
397 | return 0; | ||
398 | #endif | ||
379 | } | 399 | } |
380 | 400 | ||
381 | static inline int is_normal_idx(int idx) | 401 | static inline int is_normal_idx(enum zone_type idx) |
382 | { | 402 | { |
383 | return (idx == ZONE_NORMAL); | 403 | return (idx == ZONE_NORMAL); |
384 | } | 404 | } |
@@ -391,7 +411,11 @@ static inline int is_normal_idx(int idx) | |||
391 | */ | 411 | */ |
392 | static inline int is_highmem(struct zone *zone) | 412 | static inline int is_highmem(struct zone *zone) |
393 | { | 413 | { |
414 | #ifdef CONFIG_HIGHMEM | ||
394 | return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; | 415 | return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; |
416 | #else | ||
417 | return 0; | ||
418 | #endif | ||
395 | } | 419 | } |
396 | 420 | ||
397 | static inline int is_normal(struct zone *zone) | 421 | static inline int is_normal(struct zone *zone) |
@@ -401,7 +425,11 @@ static inline int is_normal(struct zone *zone) | |||
401 | 425 | ||
402 | static inline int is_dma32(struct zone *zone) | 426 | static inline int is_dma32(struct zone *zone) |
403 | { | 427 | { |
428 | #ifdef CONFIG_ZONE_DMA32 | ||
404 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | 429 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; |
430 | #else | ||
431 | return 0; | ||
432 | #endif | ||
405 | } | 433 | } |
406 | 434 | ||
407 | static inline int is_dma(struct zone *zone) | 435 | static inline int is_dma(struct zone *zone) |
@@ -421,6 +449,8 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file | |||
421 | void __user *, size_t *, loff_t *); | 449 | void __user *, size_t *, loff_t *); |
422 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | 450 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
423 | struct file *, void __user *, size_t *, loff_t *); | 451 | struct file *, void __user *, size_t *, loff_t *); |
452 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | ||
453 | struct file *, void __user *, size_t *, loff_t *); | ||
424 | 454 | ||
425 | #include <linux/topology.h> | 455 | #include <linux/topology.h> |
426 | /* Returns the number of the current Node. */ | 456 | /* Returns the number of the current Node. */ |
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 9a285cecf249..312bd2ffee33 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild | |||
@@ -10,6 +10,8 @@ header-y += xt_connmark.h | |||
10 | header-y += xt_CONNMARK.h | 10 | header-y += xt_CONNMARK.h |
11 | header-y += xt_conntrack.h | 11 | header-y += xt_conntrack.h |
12 | header-y += xt_dccp.h | 12 | header-y += xt_dccp.h |
13 | header-y += xt_dscp.h | ||
14 | header-y += xt_DSCP.h | ||
13 | header-y += xt_esp.h | 15 | header-y += xt_esp.h |
14 | header-y += xt_helper.h | 16 | header-y += xt_helper.h |
15 | header-y += xt_length.h | 17 | header-y += xt_length.h |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5748642e9f36..9d7921dd50f0 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -13,24 +13,25 @@ | |||
13 | * PG_reserved is set for special pages, which can never be swapped out. Some | 13 | * PG_reserved is set for special pages, which can never be swapped out. Some |
14 | * of them might not even exist (eg empty_bad_page)... | 14 | * of them might not even exist (eg empty_bad_page)... |
15 | * | 15 | * |
16 | * The PG_private bitflag is set if page->private contains a valid value. | 16 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
17 | * specific data (which is normally at page->private). It can be used by | ||
18 | * private allocations for its own usage. | ||
17 | * | 19 | * |
18 | * During disk I/O, PG_locked is used. This bit is set before I/O and | 20 | * During initiation of disk I/O, PG_locked is set. This bit is set before I/O |
19 | * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks | 21 | * and cleared when writeback _starts_ or when read _completes_. PG_writeback |
20 | * waiting for the I/O on this page to complete. | 22 | * is set before writeback starts and cleared when it finishes. |
23 | * | ||
24 | * PG_locked also pins a page in pagecache, and blocks truncation of the file | ||
25 | * while it is held. | ||
26 | * | ||
27 | * page_waitqueue(page) is a wait queue of all tasks waiting for the page | ||
28 | * to become unlocked. | ||
21 | * | 29 | * |
22 | * PG_uptodate tells whether the page's contents is valid. When a read | 30 | * PG_uptodate tells whether the page's contents is valid. When a read |
23 | * completes, the page becomes uptodate, unless a disk I/O error happened. | 31 | * completes, the page becomes uptodate, unless a disk I/O error happened. |
24 | * | 32 | * |
25 | * For choosing which pages to swap out, inode pages carry a PG_referenced bit, | 33 | * PG_referenced, PG_reclaim are used for page reclaim for anonymous and |
26 | * which is set any time the system accesses that page through the (mapping, | 34 | * file-backed pagecache (see mm/vmscan.c). |
27 | * index) hash table. This referenced bit, together with the referenced bit | ||
28 | * in the page tables, is used to manipulate page->age and move the page across | ||
29 | * the active, inactive_dirty and inactive_clean lists. | ||
30 | * | ||
31 | * Note that the referenced bit, the page->lru list_head and the active, | ||
32 | * inactive_dirty and inactive_clean lists are protected by the | ||
33 | * zone->lru_lock, and *NOT* by the usual PG_locked bit! | ||
34 | * | 35 | * |
35 | * PG_error is set to indicate that an I/O error occurred on this page. | 36 | * PG_error is set to indicate that an I/O error occurred on this page. |
36 | * | 37 | * |
@@ -42,6 +43,10 @@ | |||
42 | * space, they need to be kmapped separately for doing IO on the pages. The | 43 | * space, they need to be kmapped separately for doing IO on the pages. The |
43 | * struct page (these bits with information) are always mapped into kernel | 44 | * struct page (these bits with information) are always mapped into kernel |
44 | * address space... | 45 | * address space... |
46 | * | ||
47 | * PG_buddy is set to indicate that the page is free and in the buddy system | ||
48 | * (see mm/page_alloc.c). | ||
49 | * | ||
45 | */ | 50 | */ |
46 | 51 | ||
47 | /* | 52 | /* |
@@ -74,7 +79,7 @@ | |||
74 | #define PG_checked 8 /* kill me in 2.5.<early>. */ | 79 | #define PG_checked 8 /* kill me in 2.5.<early>. */ |
75 | #define PG_arch_1 9 | 80 | #define PG_arch_1 9 |
76 | #define PG_reserved 10 | 81 | #define PG_reserved 10 |
77 | #define PG_private 11 /* Has something at ->private */ | 82 | #define PG_private 11 /* If pagecache, has fs-private data */ |
78 | 83 | ||
79 | #define PG_writeback 12 /* Page is under writeback */ | 84 | #define PG_writeback 12 /* Page is under writeback */ |
80 | #define PG_nosave 13 /* Used for system suspend/resume */ | 85 | #define PG_nosave 13 /* Used for system suspend/resume */ |
@@ -83,7 +88,7 @@ | |||
83 | 88 | ||
84 | #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ | 89 | #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ |
85 | #define PG_reclaim 17 /* To be reclaimed asap */ | 90 | #define PG_reclaim 17 /* To be reclaimed asap */ |
86 | #define PG_nosave_free 18 /* Free, should not be written */ | 91 | #define PG_nosave_free 18 /* Used for system suspend/resume */ |
87 | #define PG_buddy 19 /* Page is free, on buddy lists */ | 92 | #define PG_buddy 19 /* Page is free, on buddy lists */ |
88 | 93 | ||
89 | 94 | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0a2f5d27f60e..64f950925151 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -130,14 +130,29 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |||
130 | } | 130 | } |
131 | 131 | ||
132 | extern void FASTCALL(__lock_page(struct page *page)); | 132 | extern void FASTCALL(__lock_page(struct page *page)); |
133 | extern void FASTCALL(__lock_page_nosync(struct page *page)); | ||
133 | extern void FASTCALL(unlock_page(struct page *page)); | 134 | extern void FASTCALL(unlock_page(struct page *page)); |
134 | 135 | ||
136 | /* | ||
137 | * lock_page may only be called if we have the page's inode pinned. | ||
138 | */ | ||
135 | static inline void lock_page(struct page *page) | 139 | static inline void lock_page(struct page *page) |
136 | { | 140 | { |
137 | might_sleep(); | 141 | might_sleep(); |
138 | if (TestSetPageLocked(page)) | 142 | if (TestSetPageLocked(page)) |
139 | __lock_page(page); | 143 | __lock_page(page); |
140 | } | 144 | } |
145 | |||
146 | /* | ||
147 | * lock_page_nosync should only be used if we can't pin the page's inode. | ||
148 | * Doesn't play quite so well with block device plugging. | ||
149 | */ | ||
150 | static inline void lock_page_nosync(struct page *page) | ||
151 | { | ||
152 | might_sleep(); | ||
153 | if (TestSetPageLocked(page)) | ||
154 | __lock_page_nosync(page); | ||
155 | } | ||
141 | 156 | ||
142 | /* | 157 | /* |
143 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. | 158 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index cb9039a21f2a..3835a9642f13 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -1,9 +1,12 @@ | |||
1 | #ifndef __LINUX_PERCPU_H | 1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | 2 | #define __LINUX_PERCPU_H |
3 | |||
3 | #include <linux/spinlock.h> /* For preempt_disable() */ | 4 | #include <linux/spinlock.h> /* For preempt_disable() */ |
4 | #include <linux/slab.h> /* For kmalloc() */ | 5 | #include <linux/slab.h> /* For kmalloc() */ |
5 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
6 | #include <linux/string.h> /* For memset() */ | 7 | #include <linux/string.h> /* For memset() */ |
8 | #include <linux/cpumask.h> | ||
9 | |||
7 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
8 | 11 | ||
9 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ | 12 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ |
@@ -11,8 +14,14 @@ | |||
11 | #define PERCPU_ENOUGH_ROOM 32768 | 14 | #define PERCPU_ENOUGH_ROOM 32768 |
12 | #endif | 15 | #endif |
13 | 16 | ||
14 | /* Must be an lvalue. */ | 17 | /* |
15 | #define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); })) | 18 | * Must be an lvalue. Since @var must be a simple identifier, |
19 | * we force a syntax error here if it isn't. | ||
20 | */ | ||
21 | #define get_cpu_var(var) (*({ \ | ||
22 | extern int simple_indentifier_##var(void); \ | ||
23 | preempt_disable(); \ | ||
24 | &__get_cpu_var(var); })) | ||
16 | #define put_cpu_var(var) preempt_enable() | 25 | #define put_cpu_var(var) preempt_enable() |
17 | 26 | ||
18 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
@@ -21,39 +30,77 @@ struct percpu_data { | |||
21 | void *ptrs[NR_CPUS]; | 30 | void *ptrs[NR_CPUS]; |
22 | }; | 31 | }; |
23 | 32 | ||
33 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | ||
24 | /* | 34 | /* |
25 | * Use this to get to a cpu's version of the per-cpu object allocated using | 35 | * Use this to get to a cpu's version of the per-cpu object dynamically |
26 | * alloc_percpu. Non-atomic access to the current CPU's version should | 36 | * allocated. Non-atomic access to the current CPU's version should |
27 | * probably be combined with get_cpu()/put_cpu(). | 37 | * probably be combined with get_cpu()/put_cpu(). |
28 | */ | 38 | */ |
29 | #define per_cpu_ptr(ptr, cpu) \ | 39 | #define percpu_ptr(ptr, cpu) \ |
30 | ({ \ | 40 | ({ \ |
31 | struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \ | 41 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
32 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | 42 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
33 | }) | 43 | }) |
34 | 44 | ||
35 | extern void *__alloc_percpu(size_t size); | 45 | extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); |
36 | extern void free_percpu(const void *); | 46 | extern void percpu_depopulate(void *__pdata, int cpu); |
47 | extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | ||
48 | cpumask_t *mask); | ||
49 | extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); | ||
50 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | ||
51 | extern void percpu_free(void *__pdata); | ||
37 | 52 | ||
38 | #else /* CONFIG_SMP */ | 53 | #else /* CONFIG_SMP */ |
39 | 54 | ||
40 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 55 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
56 | |||
57 | static inline void percpu_depopulate(void *__pdata, int cpu) | ||
58 | { | ||
59 | } | ||
60 | |||
61 | static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | ||
62 | { | ||
63 | } | ||
41 | 64 | ||
42 | static inline void *__alloc_percpu(size_t size) | 65 | static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, |
66 | int cpu) | ||
43 | { | 67 | { |
44 | void *ret = kmalloc(size, GFP_KERNEL); | 68 | return percpu_ptr(__pdata, cpu); |
45 | if (ret) | ||
46 | memset(ret, 0, size); | ||
47 | return ret; | ||
48 | } | 69 | } |
49 | static inline void free_percpu(const void *ptr) | 70 | |
50 | { | 71 | static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, |
51 | kfree(ptr); | 72 | cpumask_t *mask) |
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | ||
78 | { | ||
79 | return kzalloc(size, gfp); | ||
80 | } | ||
81 | |||
82 | static inline void percpu_free(void *__pdata) | ||
83 | { | ||
84 | kfree(__pdata); | ||
52 | } | 85 | } |
53 | 86 | ||
54 | #endif /* CONFIG_SMP */ | 87 | #endif /* CONFIG_SMP */ |
55 | 88 | ||
56 | /* Simple wrapper for the common case: zeros memory. */ | 89 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ |
57 | #define alloc_percpu(type) ((type *)(__alloc_percpu(sizeof(type)))) | 90 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) |
91 | #define percpu_depopulate_mask(__pdata, mask) \ | ||
92 | __percpu_depopulate_mask((__pdata), &(mask)) | ||
93 | #define percpu_alloc_mask(size, gfp, mask) \ | ||
94 | __percpu_alloc_mask((size), (gfp), &(mask)) | ||
95 | |||
96 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | ||
97 | |||
98 | /* (legacy) interface for use without CPU hotplug handling */ | ||
99 | |||
100 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | ||
101 | cpu_possible_map) | ||
102 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | ||
103 | #define free_percpu(ptr) percpu_free((ptr)) | ||
104 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | ||
58 | 105 | ||
59 | #endif /* __LINUX_PERCPU_H */ | 106 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h index a376bd4ade39..81e9299ca148 100644 --- a/include/linux/resume-trace.h +++ b/include/linux/resume-trace.h | |||
@@ -3,21 +3,25 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_PM_TRACE | 4 | #ifdef CONFIG_PM_TRACE |
5 | 5 | ||
6 | extern int pm_trace_enabled; | ||
7 | |||
6 | struct device; | 8 | struct device; |
7 | extern void set_trace_device(struct device *); | 9 | extern void set_trace_device(struct device *); |
8 | extern void generate_resume_trace(void *tracedata, unsigned int user); | 10 | extern void generate_resume_trace(void *tracedata, unsigned int user); |
9 | 11 | ||
10 | #define TRACE_DEVICE(dev) set_trace_device(dev) | 12 | #define TRACE_DEVICE(dev) set_trace_device(dev) |
11 | #define TRACE_RESUME(user) do { \ | 13 | #define TRACE_RESUME(user) do { \ |
12 | void *tracedata; \ | 14 | if (pm_trace_enabled) { \ |
13 | asm volatile("movl $1f,%0\n" \ | 15 | void *tracedata; \ |
14 | ".section .tracedata,\"a\"\n" \ | 16 | asm volatile("movl $1f,%0\n" \ |
15 | "1:\t.word %c1\n" \ | 17 | ".section .tracedata,\"a\"\n" \ |
16 | "\t.long %c2\n" \ | 18 | "1:\t.word %c1\n" \ |
17 | ".previous" \ | 19 | "\t.long %c2\n" \ |
18 | :"=r" (tracedata) \ | 20 | ".previous" \ |
19 | : "i" (__LINE__), "i" (__FILE__)); \ | 21 | :"=r" (tracedata) \ |
20 | generate_resume_trace(tracedata, user); \ | 22 | : "i" (__LINE__), "i" (__FILE__)); \ |
23 | generate_resume_trace(tracedata, user); \ | ||
24 | } \ | ||
21 | } while (0) | 25 | } while (0) |
22 | 26 | ||
23 | #else | 27 | #else |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bf97b0900014..db2c1df4fef9 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *, struct mm_struct *, | |||
103 | */ | 103 | */ |
104 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | 104 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); |
105 | 105 | ||
106 | /* | ||
107 | * Cleans the PTEs of shared mappings. | ||
108 | * (and since clean PTEs should also be readonly, write protects them too) | ||
109 | * | ||
110 | * returns the number of cleaned PTEs. | ||
111 | */ | ||
112 | int page_mkclean(struct page *); | ||
113 | |||
106 | #else /* !CONFIG_MMU */ | 114 | #else /* !CONFIG_MMU */ |
107 | 115 | ||
108 | #define anon_vma_init() do {} while (0) | 116 | #define anon_vma_init() do {} while (0) |
@@ -112,6 +120,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |||
112 | #define page_referenced(page,l) TestClearPageReferenced(page) | 120 | #define page_referenced(page,l) TestClearPageReferenced(page) |
113 | #define try_to_unmap(page, refs) SWAP_FAIL | 121 | #define try_to_unmap(page, refs) SWAP_FAIL |
114 | 122 | ||
123 | static inline int page_mkclean(struct page *page) | ||
124 | { | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | |||
115 | #endif /* CONFIG_MMU */ | 129 | #endif /* CONFIG_MMU */ |
116 | 130 | ||
117 | /* | 131 | /* |
diff --git a/include/linux/selinux.h b/include/linux/selinux.h index aad4e390d6a5..d1b7ca6c1c57 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h | |||
@@ -46,7 +46,7 @@ void selinux_audit_rule_free(struct selinux_audit_rule *rule); | |||
46 | 46 | ||
47 | /** | 47 | /** |
48 | * selinux_audit_rule_match - determine if a context ID matches a rule. | 48 | * selinux_audit_rule_match - determine if a context ID matches a rule. |
49 | * @ctxid: the context ID to check | 49 | * @sid: the context ID to check |
50 | * @field: the field this rule refers to | 50 | * @field: the field this rule refers to |
51 | * @op: the operater the rule uses | 51 | * @op: the operater the rule uses |
52 | * @rule: pointer to the audit rule to check against | 52 | * @rule: pointer to the audit rule to check against |
@@ -55,7 +55,7 @@ void selinux_audit_rule_free(struct selinux_audit_rule *rule); | |||
55 | * Returns 1 if the context id matches the rule, 0 if it does not, and | 55 | * Returns 1 if the context id matches the rule, 0 if it does not, and |
56 | * -errno on failure. | 56 | * -errno on failure. |
57 | */ | 57 | */ |
58 | int selinux_audit_rule_match(u32 ctxid, u32 field, u32 op, | 58 | int selinux_audit_rule_match(u32 sid, u32 field, u32 op, |
59 | struct selinux_audit_rule *rule, | 59 | struct selinux_audit_rule *rule, |
60 | struct audit_context *actx); | 60 | struct audit_context *actx); |
61 | 61 | ||
@@ -70,18 +70,8 @@ int selinux_audit_rule_match(u32 ctxid, u32 field, u32 op, | |||
70 | void selinux_audit_set_callback(int (*callback)(void)); | 70 | void selinux_audit_set_callback(int (*callback)(void)); |
71 | 71 | ||
72 | /** | 72 | /** |
73 | * selinux_task_ctxid - determine a context ID for a process. | 73 | * selinux_sid_to_string - map a security context ID to a string |
74 | * @tsk: the task object | 74 | * @sid: security context ID to be converted. |
75 | * @ctxid: ID value returned via this | ||
76 | * | ||
77 | * On return, ctxid will contain an ID for the context. This value | ||
78 | * should only be used opaquely. | ||
79 | */ | ||
80 | void selinux_task_ctxid(struct task_struct *tsk, u32 *ctxid); | ||
81 | |||
82 | /** | ||
83 | * selinux_ctxid_to_string - map a security context ID to a string | ||
84 | * @ctxid: security context ID to be converted. | ||
85 | * @ctx: address of context string to be returned | 75 | * @ctx: address of context string to be returned |
86 | * @ctxlen: length of returned context string. | 76 | * @ctxlen: length of returned context string. |
87 | * | 77 | * |
@@ -89,7 +79,7 @@ void selinux_task_ctxid(struct task_struct *tsk, u32 *ctxid); | |||
89 | * string will be allocated internally, and the caller must call | 79 | * string will be allocated internally, and the caller must call |
90 | * kfree() on it after use. | 80 | * kfree() on it after use. |
91 | */ | 81 | */ |
92 | int selinux_ctxid_to_string(u32 ctxid, char **ctx, u32 *ctxlen); | 82 | int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen); |
93 | 83 | ||
94 | /** | 84 | /** |
95 | * selinux_get_inode_sid - get the inode's security context ID | 85 | * selinux_get_inode_sid - get the inode's security context ID |
@@ -154,7 +144,7 @@ static inline void selinux_audit_rule_free(struct selinux_audit_rule *rule) | |||
154 | return; | 144 | return; |
155 | } | 145 | } |
156 | 146 | ||
157 | static inline int selinux_audit_rule_match(u32 ctxid, u32 field, u32 op, | 147 | static inline int selinux_audit_rule_match(u32 sid, u32 field, u32 op, |
158 | struct selinux_audit_rule *rule, | 148 | struct selinux_audit_rule *rule, |
159 | struct audit_context *actx) | 149 | struct audit_context *actx) |
160 | { | 150 | { |
@@ -166,12 +156,7 @@ static inline void selinux_audit_set_callback(int (*callback)(void)) | |||
166 | return; | 156 | return; |
167 | } | 157 | } |
168 | 158 | ||
169 | static inline void selinux_task_ctxid(struct task_struct *tsk, u32 *ctxid) | 159 | static inline int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen) |
170 | { | ||
171 | *ctxid = 0; | ||
172 | } | ||
173 | |||
174 | static inline int selinux_ctxid_to_string(u32 ctxid, char **ctx, u32 *ctxlen) | ||
175 | { | 160 | { |
176 | *ctx = NULL; | 161 | *ctx = NULL; |
177 | *ctxlen = 0; | 162 | *ctxlen = 0; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 45ad55b70d1c..66d6eb78d1c6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -67,7 +67,6 @@ extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | |||
67 | extern void kmem_cache_free(kmem_cache_t *, void *); | 67 | extern void kmem_cache_free(kmem_cache_t *, void *); |
68 | extern unsigned int kmem_cache_size(kmem_cache_t *); | 68 | extern unsigned int kmem_cache_size(kmem_cache_t *); |
69 | extern const char *kmem_cache_name(kmem_cache_t *); | 69 | extern const char *kmem_cache_name(kmem_cache_t *); |
70 | extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags); | ||
71 | 70 | ||
72 | /* Size description struct for general caches. */ | 71 | /* Size description struct for general caches. */ |
73 | struct cache_sizes { | 72 | struct cache_sizes { |
@@ -203,7 +202,30 @@ extern int slab_is_available(void); | |||
203 | 202 | ||
204 | #ifdef CONFIG_NUMA | 203 | #ifdef CONFIG_NUMA |
205 | extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); | 204 | extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); |
206 | extern void *kmalloc_node(size_t size, gfp_t flags, int node); | 205 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
206 | |||
207 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
208 | { | ||
209 | if (__builtin_constant_p(size)) { | ||
210 | int i = 0; | ||
211 | #define CACHE(x) \ | ||
212 | if (size <= x) \ | ||
213 | goto found; \ | ||
214 | else \ | ||
215 | i++; | ||
216 | #include "kmalloc_sizes.h" | ||
217 | #undef CACHE | ||
218 | { | ||
219 | extern void __you_cannot_kmalloc_that_much(void); | ||
220 | __you_cannot_kmalloc_that_much(); | ||
221 | } | ||
222 | found: | ||
223 | return kmem_cache_alloc_node((flags & GFP_DMA) ? | ||
224 | malloc_sizes[i].cs_dmacachep : | ||
225 | malloc_sizes[i].cs_cachep, flags, node); | ||
226 | } | ||
227 | return __kmalloc_node(size, flags, node); | ||
228 | } | ||
207 | #else | 229 | #else |
208 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) | 230 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) |
209 | { | 231 | { |
@@ -223,7 +245,6 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); | |||
223 | /* SLOB allocator routines */ | 245 | /* SLOB allocator routines */ |
224 | 246 | ||
225 | void kmem_cache_init(void); | 247 | void kmem_cache_init(void); |
226 | struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags); | ||
227 | struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, | 248 | struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, |
228 | unsigned long, | 249 | unsigned long, |
229 | void (*)(void *, struct kmem_cache *, unsigned long), | 250 | void (*)(void *, struct kmem_cache *, unsigned long), |
@@ -263,8 +284,6 @@ extern kmem_cache_t *fs_cachep; | |||
263 | extern kmem_cache_t *sighand_cachep; | 284 | extern kmem_cache_t *sighand_cachep; |
264 | extern kmem_cache_t *bio_cachep; | 285 | extern kmem_cache_t *bio_cachep; |
265 | 286 | ||
266 | extern atomic_t slab_reclaim_pages; | ||
267 | |||
268 | #endif /* __KERNEL__ */ | 287 | #endif /* __KERNEL__ */ |
269 | 288 | ||
270 | #endif /* _LINUX_SLAB_H */ | 289 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 837e8bce1349..51649987f691 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -53,6 +53,9 @@ extern void smp_cpus_done(unsigned int max_cpus); | |||
53 | */ | 53 | */ |
54 | int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); | 54 | int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); |
55 | 55 | ||
56 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | ||
57 | int retry, int wait); | ||
58 | |||
56 | /* | 59 | /* |
57 | * Call a function on all processors | 60 | * Call a function on all processors |
58 | */ | 61 | */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 96e31aa64cc7..b1237f16ecde 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -10,29 +10,11 @@ | |||
10 | #include <linux/pm.h> | 10 | #include <linux/pm.h> |
11 | 11 | ||
12 | /* page backup entry */ | 12 | /* page backup entry */ |
13 | typedef struct pbe { | 13 | struct pbe { |
14 | unsigned long address; /* address of the copy */ | 14 | unsigned long address; /* address of the copy */ |
15 | unsigned long orig_address; /* original address of page */ | 15 | unsigned long orig_address; /* original address of page */ |
16 | struct pbe *next; | 16 | struct pbe *next; |
17 | } suspend_pagedir_t; | 17 | }; |
18 | |||
19 | #define for_each_pbe(pbe, pblist) \ | ||
20 | for (pbe = pblist ; pbe ; pbe = pbe->next) | ||
21 | |||
22 | #define PBES_PER_PAGE (PAGE_SIZE/sizeof(struct pbe)) | ||
23 | #define PB_PAGE_SKIP (PBES_PER_PAGE-1) | ||
24 | |||
25 | #define for_each_pb_page(pbe, pblist) \ | ||
26 | for (pbe = pblist ; pbe ; pbe = (pbe+PB_PAGE_SKIP)->next) | ||
27 | |||
28 | |||
29 | #define SWAP_FILENAME_MAXLENGTH 32 | ||
30 | |||
31 | |||
32 | extern dev_t swsusp_resume_device; | ||
33 | |||
34 | /* mm/vmscan.c */ | ||
35 | extern int shrink_mem(void); | ||
36 | 18 | ||
37 | /* mm/page_alloc.c */ | 19 | /* mm/page_alloc.c */ |
38 | extern void drain_local_pages(void); | 20 | extern void drain_local_pages(void); |
@@ -53,18 +35,10 @@ static inline void pm_restore_console(void) {} | |||
53 | static inline int software_suspend(void) | 35 | static inline int software_suspend(void) |
54 | { | 36 | { |
55 | printk("Warning: fake suspend called\n"); | 37 | printk("Warning: fake suspend called\n"); |
56 | return -EPERM; | 38 | return -ENOSYS; |
57 | } | 39 | } |
58 | #endif /* CONFIG_PM */ | 40 | #endif /* CONFIG_PM */ |
59 | 41 | ||
60 | #ifdef CONFIG_SUSPEND_SMP | ||
61 | extern void disable_nonboot_cpus(void); | ||
62 | extern void enable_nonboot_cpus(void); | ||
63 | #else | ||
64 | static inline void disable_nonboot_cpus(void) {} | ||
65 | static inline void enable_nonboot_cpus(void) {} | ||
66 | #endif | ||
67 | |||
68 | void save_processor_state(void); | 42 | void save_processor_state(void); |
69 | void restore_processor_state(void); | 43 | void restore_processor_state(void); |
70 | struct saved_context; | 44 | struct saved_context; |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 5e59184c9096..e7c36ba2a2db 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -10,6 +10,10 @@ | |||
10 | #include <asm/atomic.h> | 10 | #include <asm/atomic.h> |
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | 12 | ||
13 | struct notifier_block; | ||
14 | |||
15 | struct bio; | ||
16 | |||
13 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ | 17 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
14 | #define SWAP_FLAG_PRIO_MASK 0x7fff | 18 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
15 | #define SWAP_FLAG_PRIO_SHIFT 0 | 19 | #define SWAP_FLAG_PRIO_SHIFT 0 |
@@ -156,13 +160,14 @@ struct swap_list_t { | |||
156 | 160 | ||
157 | /* linux/mm/oom_kill.c */ | 161 | /* linux/mm/oom_kill.c */ |
158 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); | 162 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); |
163 | extern int register_oom_notifier(struct notifier_block *nb); | ||
164 | extern int unregister_oom_notifier(struct notifier_block *nb); | ||
159 | 165 | ||
160 | /* linux/mm/memory.c */ | 166 | /* linux/mm/memory.c */ |
161 | extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *); | 167 | extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *); |
162 | 168 | ||
163 | /* linux/mm/page_alloc.c */ | 169 | /* linux/mm/page_alloc.c */ |
164 | extern unsigned long totalram_pages; | 170 | extern unsigned long totalram_pages; |
165 | extern unsigned long totalhigh_pages; | ||
166 | extern unsigned long totalreserve_pages; | 171 | extern unsigned long totalreserve_pages; |
167 | extern long nr_swap_pages; | 172 | extern long nr_swap_pages; |
168 | extern unsigned int nr_free_pages(void); | 173 | extern unsigned int nr_free_pages(void); |
@@ -190,6 +195,7 @@ extern long vm_total_pages; | |||
190 | #ifdef CONFIG_NUMA | 195 | #ifdef CONFIG_NUMA |
191 | extern int zone_reclaim_mode; | 196 | extern int zone_reclaim_mode; |
192 | extern int sysctl_min_unmapped_ratio; | 197 | extern int sysctl_min_unmapped_ratio; |
198 | extern int sysctl_min_slab_ratio; | ||
193 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); | 199 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); |
194 | #else | 200 | #else |
195 | #define zone_reclaim_mode 0 | 201 | #define zone_reclaim_mode 0 |
@@ -212,7 +218,9 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); | |||
212 | /* linux/mm/page_io.c */ | 218 | /* linux/mm/page_io.c */ |
213 | extern int swap_readpage(struct file *, struct page *); | 219 | extern int swap_readpage(struct file *, struct page *); |
214 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); | 220 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
215 | extern int rw_swap_page_sync(int, swp_entry_t, struct page *); | 221 | extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page, |
222 | struct bio **bio_chain); | ||
223 | extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err); | ||
216 | 224 | ||
217 | /* linux/mm/swap_state.c */ | 225 | /* linux/mm/swap_state.c */ |
218 | extern struct address_space swapper_space; | 226 | extern struct address_space swapper_space; |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 736ed917a4f8..eca555781d05 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -191,6 +191,7 @@ enum | |||
191 | VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */ | 191 | VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */ |
192 | VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ | 192 | VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ |
193 | VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ | 193 | VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ |
194 | VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */ | ||
194 | }; | 195 | }; |
195 | 196 | ||
196 | 197 | ||
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 71b6363caaaf..dee88c6b6fa7 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size); | |||
44 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); | 44 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
45 | extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, | 45 | extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, |
46 | pgprot_t prot); | 46 | pgprot_t prot); |
47 | extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, | ||
48 | pgprot_t prot, int node); | ||
49 | extern void vfree(void *addr); | 47 | extern void vfree(void *addr); |
50 | 48 | ||
51 | extern void *vmap(struct page **pages, unsigned int count, | 49 | extern void *vmap(struct page **pages, unsigned int count, |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 2d9b1b60798a..176c7f797339 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -18,7 +18,19 @@ | |||
18 | * generated will simply be the increment of a global address. | 18 | * generated will simply be the increment of a global address. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH | 21 | #ifdef CONFIG_ZONE_DMA32 |
22 | #define DMA32_ZONE(xx) xx##_DMA32, | ||
23 | #else | ||
24 | #define DMA32_ZONE(xx) | ||
25 | #endif | ||
26 | |||
27 | #ifdef CONFIG_HIGHMEM | ||
28 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | ||
29 | #else | ||
30 | #define HIGHMEM_ZONE(xx) | ||
31 | #endif | ||
32 | |||
33 | #define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) | ||
22 | 34 | ||
23 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | 35 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, |
24 | FOR_ALL_ZONES(PGALLOC), | 36 | FOR_ALL_ZONES(PGALLOC), |
@@ -124,12 +136,10 @@ static inline unsigned long node_page_state(int node, | |||
124 | struct zone *zones = NODE_DATA(node)->node_zones; | 136 | struct zone *zones = NODE_DATA(node)->node_zones; |
125 | 137 | ||
126 | return | 138 | return |
127 | #ifndef CONFIG_DMA_IS_NORMAL | 139 | #ifdef CONFIG_ZONE_DMA32 |
128 | #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64 | ||
129 | zone_page_state(&zones[ZONE_DMA32], item) + | 140 | zone_page_state(&zones[ZONE_DMA32], item) + |
130 | #endif | 141 | #endif |
131 | zone_page_state(&zones[ZONE_NORMAL], item) + | 142 | zone_page_state(&zones[ZONE_NORMAL], item) + |
132 | #endif | ||
133 | #ifdef CONFIG_HIGHMEM | 143 | #ifdef CONFIG_HIGHMEM |
134 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | 144 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
135 | #endif | 145 | #endif |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 0422036af4eb..56a23a0e7f2e 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -116,6 +116,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping, | |||
116 | loff_t pos, loff_t count); | 116 | loff_t pos, loff_t count); |
117 | int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, | 117 | int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, |
118 | loff_t pos, loff_t count); | 118 | loff_t pos, loff_t count); |
119 | void set_page_dirty_balance(struct page *page); | ||
119 | 120 | ||
120 | /* pdflush.c */ | 121 | /* pdflush.c */ |
121 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl | 122 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl |