aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cpufreq.h6
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmzone.h38
-rw-r--r--include/linux/page-flags.h149
-rw-r--r--include/linux/pagemap.h45
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/rcupdate.h24
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/security.h38
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/sunrpc/svc.h4
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/vmstat.h215
15 files changed, 326 insertions, 211 deletions
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 466fbe9e4899..35e137636b0b 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -100,8 +100,10 @@ struct cpufreq_policy {
100#define CPUFREQ_INCOMPATIBLE (1) 100#define CPUFREQ_INCOMPATIBLE (1)
101#define CPUFREQ_NOTIFY (2) 101#define CPUFREQ_NOTIFY (2)
102 102
103#define CPUFREQ_SHARED_TYPE_ALL (0) /* All dependent CPUs should set freq */ 103#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
104#define CPUFREQ_SHARED_TYPE_ANY (1) /* Freq can be set from any dependent CPU */ 104#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
105#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
106#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
105 107
106/******************** cpufreq transition notifiers *******************/ 108/******************** cpufreq transition notifiers *******************/
107 109
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2d229327959e..0503b2ed8bae 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -46,6 +46,8 @@ enum kobject_action {
46 KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ 46 KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
47 KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ 47 KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
48 KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ 48 KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
49 KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */
50 KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */
49}; 51};
50 52
51struct kobject { 53struct kobject {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c41a1299b8cf..75179529e399 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout;
36#include <asm/page.h> 36#include <asm/page.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/atomic.h>
40 39
41#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 40#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
42 41
@@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone,
515 set_page_section(page, pfn_to_section_nr(pfn)); 514 set_page_section(page, pfn_to_section_nr(pfn));
516} 515}
517 516
517/*
518 * Some inline functions in vmstat.h depend on page_zone()
519 */
520#include <linux/vmstat.h>
521
518#ifndef CONFIG_DISCONTIGMEM 522#ifndef CONFIG_DISCONTIGMEM
519/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 523/* The array of struct pages - for discontigmem use pgdat->lmem_map */
520extern struct page *mem_map; 524extern struct page *mem_map;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d6120fa69116..27e748eb72b0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -46,6 +46,27 @@ struct zone_padding {
46#define ZONE_PADDING(name) 46#define ZONE_PADDING(name)
47#endif 47#endif
48 48
49enum zone_stat_item {
50 NR_ANON_PAGES, /* Mapped anonymous pages */
51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
52 only modified from process context */
53 NR_FILE_PAGES,
54 NR_SLAB, /* Pages used by slab allocator */
55 NR_PAGETABLE, /* used for pagetables */
56 NR_FILE_DIRTY,
57 NR_WRITEBACK,
58 NR_UNSTABLE_NFS, /* NFS unstable pages */
59 NR_BOUNCE,
60#ifdef CONFIG_NUMA
61 NUMA_HIT, /* allocated in intended node */
62 NUMA_MISS, /* allocated in non intended node */
63 NUMA_FOREIGN, /* was intended here, hit elsewhere */
64 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
65 NUMA_LOCAL, /* allocation from local node */
66 NUMA_OTHER, /* allocation from other node */
67#endif
68 NR_VM_ZONE_STAT_ITEMS };
69
49struct per_cpu_pages { 70struct per_cpu_pages {
50 int count; /* number of pages in the list */ 71 int count; /* number of pages in the list */
51 int high; /* high watermark, emptying needed */ 72 int high; /* high watermark, emptying needed */
@@ -55,13 +76,8 @@ struct per_cpu_pages {
55 76
56struct per_cpu_pageset { 77struct per_cpu_pageset {
57 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 78 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
58#ifdef CONFIG_NUMA 79#ifdef CONFIG_SMP
59 unsigned long numa_hit; /* allocated in intended node */ 80 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
60 unsigned long numa_miss; /* allocated in non intended node */
61 unsigned long numa_foreign; /* was intended here, hit elsewhere */
62 unsigned long interleave_hit; /* interleaver prefered this zone */
63 unsigned long local_node; /* allocation from local node */
64 unsigned long other_node; /* allocation from other node */
65#endif 81#endif
66} ____cacheline_aligned_in_smp; 82} ____cacheline_aligned_in_smp;
67 83
@@ -165,12 +181,8 @@ struct zone {
165 /* A count of how many reclaimers are scanning this zone */ 181 /* A count of how many reclaimers are scanning this zone */
166 atomic_t reclaim_in_progress; 182 atomic_t reclaim_in_progress;
167 183
168 /* 184 /* Zone statistics */
169 * timestamp (in jiffies) of the last zone reclaim that did not 185 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
170 * result in freeing of pages. This is used to avoid repeated scans
171 * if all memory in the zone is in use.
172 */
173 unsigned long last_unsuccessful_zone_reclaim;
174 186
175 /* 187 /*
176 * prev_priority holds the scanning priority for this zone. It is 188 * prev_priority holds the scanning priority for this zone. It is
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0c076d58c676..5748642e9f36 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -5,12 +5,8 @@
5#ifndef PAGE_FLAGS_H 5#ifndef PAGE_FLAGS_H
6#define PAGE_FLAGS_H 6#define PAGE_FLAGS_H
7 7
8#include <linux/percpu.h>
9#include <linux/cache.h>
10#include <linux/types.h> 8#include <linux/types.h>
11 9
12#include <asm/pgtable.h>
13
14/* 10/*
15 * Various page->flags bits: 11 * Various page->flags bits:
16 * 12 *
@@ -103,134 +99,6 @@
103#endif 99#endif
104 100
105/* 101/*
106 * Global page accounting. One instance per CPU. Only unsigned longs are
107 * allowed.
108 *
109 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
110 * any time safely (which protects the instance from modification by
111 * interrupt.
112 * - The __xxx_page_state variants can be used safely when interrupts are
113 * disabled.
114 * - The __xxx_page_state variants can be used if the field is only
115 * modified from process context and protected from preemption, or only
116 * modified from interrupt context. In this case, the field should be
117 * commented here.
118 */
119struct page_state {
120 unsigned long nr_dirty; /* Dirty writeable pages */
121 unsigned long nr_writeback; /* Pages under writeback */
122 unsigned long nr_unstable; /* NFS unstable pages */
123 unsigned long nr_page_table_pages;/* Pages used for pagetables */
124 unsigned long nr_mapped; /* mapped into pagetables.
125 * only modified from process context */
126 unsigned long nr_slab; /* In slab */
127#define GET_PAGE_STATE_LAST nr_slab
128
129 /*
130 * The below are zeroed by get_page_state(). Use get_full_page_state()
131 * to add up all these.
132 */
133 unsigned long pgpgin; /* Disk reads */
134 unsigned long pgpgout; /* Disk writes */
135 unsigned long pswpin; /* swap reads */
136 unsigned long pswpout; /* swap writes */
137
138 unsigned long pgalloc_high; /* page allocations */
139 unsigned long pgalloc_normal;
140 unsigned long pgalloc_dma32;
141 unsigned long pgalloc_dma;
142
143 unsigned long pgfree; /* page freeings */
144 unsigned long pgactivate; /* pages moved inactive->active */
145 unsigned long pgdeactivate; /* pages moved active->inactive */
146
147 unsigned long pgfault; /* faults (major+minor) */
148 unsigned long pgmajfault; /* faults (major only) */
149
150 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
151 unsigned long pgrefill_normal;
152 unsigned long pgrefill_dma32;
153 unsigned long pgrefill_dma;
154
155 unsigned long pgsteal_high; /* total highmem pages reclaimed */
156 unsigned long pgsteal_normal;
157 unsigned long pgsteal_dma32;
158 unsigned long pgsteal_dma;
159
160 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
161 unsigned long pgscan_kswapd_normal;
162 unsigned long pgscan_kswapd_dma32;
163 unsigned long pgscan_kswapd_dma;
164
165 unsigned long pgscan_direct_high;/* total highmem pages scanned */
166 unsigned long pgscan_direct_normal;
167 unsigned long pgscan_direct_dma32;
168 unsigned long pgscan_direct_dma;
169
170 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
171 unsigned long slabs_scanned; /* slab objects scanned */
172 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
173 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
174 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
175 unsigned long allocstall; /* direct reclaim calls */
176
177 unsigned long pgrotated; /* pages rotated to tail of the LRU */
178 unsigned long nr_bounce; /* pages for bounce buffers */
179};
180
181extern void get_page_state(struct page_state *ret);
182extern void get_page_state_node(struct page_state *ret, int node);
183extern void get_full_page_state(struct page_state *ret);
184extern unsigned long read_page_state_offset(unsigned long offset);
185extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
186extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
187
188#define read_page_state(member) \
189 read_page_state_offset(offsetof(struct page_state, member))
190
191#define mod_page_state(member, delta) \
192 mod_page_state_offset(offsetof(struct page_state, member), (delta))
193
194#define __mod_page_state(member, delta) \
195 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
196
197#define inc_page_state(member) mod_page_state(member, 1UL)
198#define dec_page_state(member) mod_page_state(member, 0UL - 1)
199#define add_page_state(member,delta) mod_page_state(member, (delta))
200#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
201
202#define __inc_page_state(member) __mod_page_state(member, 1UL)
203#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
204#define __add_page_state(member,delta) __mod_page_state(member, (delta))
205#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
206
207#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
208
209#define state_zone_offset(zone, member) \
210({ \
211 unsigned offset; \
212 if (is_highmem(zone)) \
213 offset = offsetof(struct page_state, member##_high); \
214 else if (is_normal(zone)) \
215 offset = offsetof(struct page_state, member##_normal); \
216 else if (is_dma32(zone)) \
217 offset = offsetof(struct page_state, member##_dma32); \
218 else \
219 offset = offsetof(struct page_state, member##_dma); \
220 offset; \
221})
222
223#define __mod_page_state_zone(zone, member, delta) \
224 do { \
225 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
226 } while (0)
227
228#define mod_page_state_zone(zone, member, delta) \
229 do { \
230 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
231 } while (0)
232
233/*
234 * Manipulation of page state flags 102 * Manipulation of page state flags
235 */ 103 */
236#define PageLocked(page) \ 104#define PageLocked(page) \
@@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
254#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) 122#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
255 123
256#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) 124#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
257#ifndef SetPageUptodate 125#ifdef CONFIG_S390
126#define SetPageUptodate(_page) \
127 do { \
128 struct page *__page = (_page); \
129 if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
130 page_test_and_clear_dirty(_page); \
131 } while (0)
132#else
258#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) 133#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
259#endif 134#endif
260#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) 135#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
@@ -306,7 +181,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
306 do { \ 181 do { \
307 if (!test_and_set_bit(PG_writeback, \ 182 if (!test_and_set_bit(PG_writeback, \
308 &(page)->flags)) \ 183 &(page)->flags)) \
309 inc_page_state(nr_writeback); \ 184 inc_zone_page_state(page, NR_WRITEBACK); \
310 } while (0) 185 } while (0)
311#define TestSetPageWriteback(page) \ 186#define TestSetPageWriteback(page) \
312 ({ \ 187 ({ \
@@ -314,14 +189,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
314 ret = test_and_set_bit(PG_writeback, \ 189 ret = test_and_set_bit(PG_writeback, \
315 &(page)->flags); \ 190 &(page)->flags); \
316 if (!ret) \ 191 if (!ret) \
317 inc_page_state(nr_writeback); \ 192 inc_zone_page_state(page, NR_WRITEBACK); \
318 ret; \ 193 ret; \
319 }) 194 })
320#define ClearPageWriteback(page) \ 195#define ClearPageWriteback(page) \
321 do { \ 196 do { \
322 if (test_and_clear_bit(PG_writeback, \ 197 if (test_and_clear_bit(PG_writeback, \
323 &(page)->flags)) \ 198 &(page)->flags)) \
324 dec_page_state(nr_writeback); \ 199 dec_zone_page_state(page, NR_WRITEBACK); \
325 } while (0) 200 } while (0)
326#define TestClearPageWriteback(page) \ 201#define TestClearPageWriteback(page) \
327 ({ \ 202 ({ \
@@ -329,7 +204,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
329 ret = test_and_clear_bit(PG_writeback, \ 204 ret = test_and_clear_bit(PG_writeback, \
330 &(page)->flags); \ 205 &(page)->flags); \
331 if (ret) \ 206 if (ret) \
332 dec_page_state(nr_writeback); \ 207 dec_zone_page_state(page, NR_WRITEBACK); \
333 ret; \ 208 ret; \
334 }) 209 })
335 210
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1245df7141aa..0a2f5d27f60e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
113extern void remove_from_page_cache(struct page *page); 113extern void remove_from_page_cache(struct page *page);
114extern void __remove_from_page_cache(struct page *page); 114extern void __remove_from_page_cache(struct page *page);
115 115
116extern atomic_t nr_pagecache;
117
118#ifdef CONFIG_SMP
119
120#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
121DECLARE_PER_CPU(long, nr_pagecache_local);
122
123/*
124 * pagecache_acct implements approximate accounting for pagecache.
125 * vm_enough_memory() do not need high accuracy. Writers will keep
126 * an offset in their per-cpu arena and will spill that into the
127 * global count whenever the absolute value of the local count
128 * exceeds the counter's threshold.
129 *
130 * MUST be protected from preemption.
131 * current protection is mapping->page_lock.
132 */
133static inline void pagecache_acct(int count)
134{
135 long *local;
136
137 local = &__get_cpu_var(nr_pagecache_local);
138 *local += count;
139 if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
140 atomic_add(*local, &nr_pagecache);
141 *local = 0;
142 }
143}
144
145#else
146
147static inline void pagecache_acct(int count)
148{
149 atomic_add(count, &nr_pagecache);
150}
151#endif
152
153static inline unsigned long get_page_cache_size(void)
154{
155 int ret = atomic_read(&nr_pagecache);
156 if (unlikely(ret < 0))
157 ret = 0;
158 return ret;
159}
160
161/* 116/*
162 * Return byte-offset into filesystem object for page. 117 * Return byte-offset into filesystem object for page.
163 */ 118 */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9ae6b1a75366..b093479a531d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -729,6 +729,7 @@
729#define PCI_DEVICE_ID_TI_4450 0x8011 729#define PCI_DEVICE_ID_TI_4450 0x8011
730#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 730#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
731#define PCI_DEVICE_ID_TI_X515 0x8036 731#define PCI_DEVICE_ID_TI_X515 0x8036
732#define PCI_DEVICE_ID_TI_XX12 0x8039
732#define PCI_DEVICE_ID_TI_1130 0xac12 733#define PCI_DEVICE_ID_TI_1130 0xac12
733#define PCI_DEVICE_ID_TI_1031 0xac13 734#define PCI_DEVICE_ID_TI_1031 0xac13
734#define PCI_DEVICE_ID_TI_1131 0xac15 735#define PCI_DEVICE_ID_TI_1131 0xac15
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 48dfe00070c7..b4ca73d65891 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -163,14 +163,22 @@ extern int rcu_needs_cpu(int cpu);
163 * 163 *
164 * It is illegal to block while in an RCU read-side critical section. 164 * It is illegal to block while in an RCU read-side critical section.
165 */ 165 */
166#define rcu_read_lock() preempt_disable() 166#define rcu_read_lock() \
167 do { \
168 preempt_disable(); \
169 __acquire(RCU); \
170 } while(0)
167 171
168/** 172/**
169 * rcu_read_unlock - marks the end of an RCU read-side critical section. 173 * rcu_read_unlock - marks the end of an RCU read-side critical section.
170 * 174 *
171 * See rcu_read_lock() for more information. 175 * See rcu_read_lock() for more information.
172 */ 176 */
173#define rcu_read_unlock() preempt_enable() 177#define rcu_read_unlock() \
178 do { \
179 __release(RCU); \
180 preempt_enable(); \
181 } while(0)
174 182
175/* 183/*
176 * So where is rcu_write_lock()? It does not exist, as there is no 184 * So where is rcu_write_lock()? It does not exist, as there is no
@@ -193,14 +201,22 @@ extern int rcu_needs_cpu(int cpu);
193 * can use just rcu_read_lock(). 201 * can use just rcu_read_lock().
194 * 202 *
195 */ 203 */
196#define rcu_read_lock_bh() local_bh_disable() 204#define rcu_read_lock_bh() \
205 do { \
206 local_bh_disable(); \
207 __acquire(RCU_BH); \
208 } while(0)
197 209
198/* 210/*
199 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 211 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
200 * 212 *
201 * See rcu_read_lock_bh() for more information. 213 * See rcu_read_lock_bh() for more information.
202 */ 214 */
203#define rcu_read_unlock_bh() local_bh_enable() 215#define rcu_read_unlock_bh() \
216 do { \
217 __release(RCU_BH); \
218 local_bh_enable(); \
219 } while(0)
204 220
205/** 221/**
206 * rcu_dereference - fetch an RCU-protected pointer in an 222 * rcu_dereference - fetch an RCU-protected pointer in an
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 36e2bf4b4315..5371e4e74595 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -34,8 +34,8 @@ struct rtc_time {
34 * alarm API. 34 * alarm API.
35 */ 35 */
36struct rtc_wkalrm { 36struct rtc_wkalrm {
37 unsigned char enabled; /* 0 = alarm disable, 1 = alarm disabled */ 37 unsigned char enabled; /* 0 = alarm disabled, 1 = alarm enabled */
38 unsigned char pending; /* 0 = alarm pending, 1 = alarm not pending */ 38 unsigned char pending; /* 0 = alarm not pending, 1 = alarm pending */
39 struct rtc_time time; /* time the alarm is set to */ 39 struct rtc_time time; /* time the alarm is set to */
40}; 40};
41 41
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 821f0481ebe1..aaf723308ed4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1153,7 +1153,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1153extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); 1153extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
1154extern int kill_pg_info(int, struct siginfo *, pid_t); 1154extern int kill_pg_info(int, struct siginfo *, pid_t);
1155extern int kill_proc_info(int, struct siginfo *, pid_t); 1155extern int kill_proc_info(int, struct siginfo *, pid_t);
1156extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); 1156extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32);
1157extern void do_notify_parent(struct task_struct *, int); 1157extern void do_notify_parent(struct task_struct *, int);
1158extern void force_sig(int, struct task_struct *); 1158extern void force_sig(int, struct task_struct *);
1159extern void force_sig_specific(int, struct task_struct *); 1159extern void force_sig_specific(int, struct task_struct *);
diff --git a/include/linux/security.h b/include/linux/security.h
index c7ea15716dce..f75303831d09 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -567,6 +567,9 @@ struct swap_info_struct;
567 * @p. 567 * @p.
568 * @p contains the task_struct for the process. 568 * @p contains the task_struct for the process.
569 * Return 0 if permission is granted. 569 * Return 0 if permission is granted.
570 * @task_getsecid:
571 * Retrieve the security identifier of the process @p.
572 * @p contains the task_struct for the process and place is into @secid.
570 * @task_setgroups: 573 * @task_setgroups:
571 * Check permission before setting the supplementary group set of the 574 * Check permission before setting the supplementary group set of the
572 * current process. 575 * current process.
@@ -582,6 +585,10 @@ struct swap_info_struct;
582 * @p contains the task_struct of process. 585 * @p contains the task_struct of process.
583 * @ioprio contains the new ioprio value 586 * @ioprio contains the new ioprio value
584 * Return 0 if permission is granted. 587 * Return 0 if permission is granted.
588 * @task_getioprio
589 * Check permission before getting the ioprio value of @p.
590 * @p contains the task_struct of process.
591 * Return 0 if permission is granted.
585 * @task_setrlimit: 592 * @task_setrlimit:
586 * Check permission before setting the resource limits of the current 593 * Check permission before setting the resource limits of the current
587 * process for @resource to @new_rlim. The old resource limit values can 594 * process for @resource to @new_rlim. The old resource limit values can
@@ -615,6 +622,7 @@ struct swap_info_struct;
615 * @p contains the task_struct for process. 622 * @p contains the task_struct for process.
616 * @info contains the signal information. 623 * @info contains the signal information.
617 * @sig contains the signal value. 624 * @sig contains the signal value.
625 * @secid contains the sid of the process where the signal originated
618 * Return 0 if permission is granted. 626 * Return 0 if permission is granted.
619 * @task_wait: 627 * @task_wait:
620 * Check permission before allowing a process to reap a child process @p 628 * Check permission before allowing a process to reap a child process @p
@@ -1219,16 +1227,18 @@ struct security_operations {
1219 int (*task_setpgid) (struct task_struct * p, pid_t pgid); 1227 int (*task_setpgid) (struct task_struct * p, pid_t pgid);
1220 int (*task_getpgid) (struct task_struct * p); 1228 int (*task_getpgid) (struct task_struct * p);
1221 int (*task_getsid) (struct task_struct * p); 1229 int (*task_getsid) (struct task_struct * p);
1230 void (*task_getsecid) (struct task_struct * p, u32 * secid);
1222 int (*task_setgroups) (struct group_info *group_info); 1231 int (*task_setgroups) (struct group_info *group_info);
1223 int (*task_setnice) (struct task_struct * p, int nice); 1232 int (*task_setnice) (struct task_struct * p, int nice);
1224 int (*task_setioprio) (struct task_struct * p, int ioprio); 1233 int (*task_setioprio) (struct task_struct * p, int ioprio);
1234 int (*task_getioprio) (struct task_struct * p);
1225 int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim); 1235 int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim);
1226 int (*task_setscheduler) (struct task_struct * p, int policy, 1236 int (*task_setscheduler) (struct task_struct * p, int policy,
1227 struct sched_param * lp); 1237 struct sched_param * lp);
1228 int (*task_getscheduler) (struct task_struct * p); 1238 int (*task_getscheduler) (struct task_struct * p);
1229 int (*task_movememory) (struct task_struct * p); 1239 int (*task_movememory) (struct task_struct * p);
1230 int (*task_kill) (struct task_struct * p, 1240 int (*task_kill) (struct task_struct * p,
1231 struct siginfo * info, int sig); 1241 struct siginfo * info, int sig, u32 secid);
1232 int (*task_wait) (struct task_struct * p); 1242 int (*task_wait) (struct task_struct * p);
1233 int (*task_prctl) (int option, unsigned long arg2, 1243 int (*task_prctl) (int option, unsigned long arg2,
1234 unsigned long arg3, unsigned long arg4, 1244 unsigned long arg3, unsigned long arg4,
@@ -1839,6 +1849,11 @@ static inline int security_task_getsid (struct task_struct *p)
1839 return security_ops->task_getsid (p); 1849 return security_ops->task_getsid (p);
1840} 1850}
1841 1851
1852static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
1853{
1854 security_ops->task_getsecid (p, secid);
1855}
1856
1842static inline int security_task_setgroups (struct group_info *group_info) 1857static inline int security_task_setgroups (struct group_info *group_info)
1843{ 1858{
1844 return security_ops->task_setgroups (group_info); 1859 return security_ops->task_setgroups (group_info);
@@ -1854,6 +1869,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio)
1854 return security_ops->task_setioprio (p, ioprio); 1869 return security_ops->task_setioprio (p, ioprio);
1855} 1870}
1856 1871
1872static inline int security_task_getioprio (struct task_struct *p)
1873{
1874 return security_ops->task_getioprio (p);
1875}
1876
1857static inline int security_task_setrlimit (unsigned int resource, 1877static inline int security_task_setrlimit (unsigned int resource,
1858 struct rlimit *new_rlim) 1878 struct rlimit *new_rlim)
1859{ 1879{
@@ -1878,9 +1898,10 @@ static inline int security_task_movememory (struct task_struct *p)
1878} 1898}
1879 1899
1880static inline int security_task_kill (struct task_struct *p, 1900static inline int security_task_kill (struct task_struct *p,
1881 struct siginfo *info, int sig) 1901 struct siginfo *info, int sig,
1902 u32 secid)
1882{ 1903{
1883 return security_ops->task_kill (p, info, sig); 1904 return security_ops->task_kill (p, info, sig, secid);
1884} 1905}
1885 1906
1886static inline int security_task_wait (struct task_struct *p) 1907static inline int security_task_wait (struct task_struct *p)
@@ -2491,6 +2512,9 @@ static inline int security_task_getsid (struct task_struct *p)
2491 return 0; 2512 return 0;
2492} 2513}
2493 2514
2515static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
2516{ }
2517
2494static inline int security_task_setgroups (struct group_info *group_info) 2518static inline int security_task_setgroups (struct group_info *group_info)
2495{ 2519{
2496 return 0; 2520 return 0;
@@ -2506,6 +2530,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio)
2506 return 0; 2530 return 0;
2507} 2531}
2508 2532
2533static inline int security_task_getioprio (struct task_struct *p)
2534{
2535 return 0;
2536}
2537
2509static inline int security_task_setrlimit (unsigned int resource, 2538static inline int security_task_setrlimit (unsigned int resource,
2510 struct rlimit *new_rlim) 2539 struct rlimit *new_rlim)
2511{ 2540{
@@ -2530,7 +2559,8 @@ static inline int security_task_movememory (struct task_struct *p)
2530} 2559}
2531 2560
2532static inline int security_task_kill (struct task_struct *p, 2561static inline int security_task_kill (struct task_struct *p,
2533 struct siginfo *info, int sig) 2562 struct siginfo *info, int sig,
2563 u32 secid)
2534{ 2564{
2535 return 0; 2565 return 0;
2536} 2566}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c93c3fe4308c..837e8bce1349 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -125,4 +125,6 @@ static inline void smp_send_reschedule(int cpu) { }
125#define put_cpu() preempt_enable() 125#define put_cpu() preempt_enable()
126#define put_cpu_no_resched() preempt_enable_no_resched() 126#define put_cpu_no_resched() preempt_enable_no_resched()
127 127
128void smp_setup_processor_id(void);
129
128#endif /* __LINUX_SMP_H */ 130#endif /* __LINUX_SMP_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 503564384545..7b27c09b5604 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -159,7 +159,9 @@ struct svc_rqst {
159 * determine what device number 159 * determine what device number
160 * to report (real or virtual) 160 * to report (real or virtual)
161 */ 161 */
162 162 int rq_sendfile_ok; /* turned off in gss privacy
163 * to prevent encrypting page
164 * cache pages */
163 wait_queue_head_t rq_wait; /* synchronization */ 165 wait_queue_head_t rq_wait; /* synchronization */
164}; 166};
165 167
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c41e2d6d1acc..cf6ca6e377bd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -189,7 +189,6 @@ extern long vm_total_pages;
189 189
190#ifdef CONFIG_NUMA 190#ifdef CONFIG_NUMA
191extern int zone_reclaim_mode; 191extern int zone_reclaim_mode;
192extern int zone_reclaim_interval;
193extern int zone_reclaim(struct zone *, gfp_t, unsigned int); 192extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
194#else 193#else
195#define zone_reclaim_mode 0 194#define zone_reclaim_mode 0
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
new file mode 100644
index 000000000000..3e0daf54133e
--- /dev/null
+++ b/include/linux/vmstat.h
@@ -0,0 +1,215 @@
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6#include <linux/config.h>
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
9
10#ifdef CONFIG_VM_EVENT_COUNTERS
11/*
12 * Light weight per cpu counter implementation.
13 *
14 * Counters should only be incremented and no critical kernel component
15 * should rely on the counter values.
16 *
17 * Counters are handled completely inline. On many platforms the code
18 * generated will simply be the increment of a global address.
19 */
20
21#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
22
23enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
24 FOR_ALL_ZONES(PGALLOC),
25 PGFREE, PGACTIVATE, PGDEACTIVATE,
26 PGFAULT, PGMAJFAULT,
27 FOR_ALL_ZONES(PGREFILL),
28 FOR_ALL_ZONES(PGSTEAL),
29 FOR_ALL_ZONES(PGSCAN_KSWAPD),
30 FOR_ALL_ZONES(PGSCAN_DIRECT),
31 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
32 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
33 NR_VM_EVENT_ITEMS
34};
35
36struct vm_event_state {
37 unsigned long event[NR_VM_EVENT_ITEMS];
38};
39
40DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
41
42static inline void __count_vm_event(enum vm_event_item item)
43{
44 __get_cpu_var(vm_event_states.event[item])++;
45}
46
47static inline void count_vm_event(enum vm_event_item item)
48{
49 get_cpu_var(vm_event_states.event[item])++;
50 put_cpu();
51}
52
53static inline void __count_vm_events(enum vm_event_item item, long delta)
54{
55 __get_cpu_var(vm_event_states.event[item]) += delta;
56}
57
58static inline void count_vm_events(enum vm_event_item item, long delta)
59{
60 get_cpu_var(vm_event_states.event[item])++;
61 put_cpu();
62}
63
64extern void all_vm_events(unsigned long *);
65extern void vm_events_fold_cpu(int cpu);
66
67#else
68
69/* Disable counters */
70#define get_cpu_vm_events(e) 0L
71#define count_vm_event(e) do { } while (0)
72#define count_vm_events(e,d) do { } while (0)
73#define __count_vm_event(e) do { } while (0)
74#define __count_vm_events(e,d) do { } while (0)
75#define vm_events_fold_cpu(x) do { } while (0)
76
77#endif /* CONFIG_VM_EVENT_COUNTERS */
78
79#define __count_zone_vm_events(item, zone, delta) \
80 __count_vm_events(item##_DMA + zone_idx(zone), delta)
81
82/*
83 * Zone based page accounting with per cpu differentials.
84 */
85extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86
87static inline void zone_page_state_add(long x, struct zone *zone,
88 enum zone_stat_item item)
89{
90 atomic_long_add(x, &zone->vm_stat[item]);
91 atomic_long_add(x, &vm_stat[item]);
92}
93
94static inline unsigned long global_page_state(enum zone_stat_item item)
95{
96 long x = atomic_long_read(&vm_stat[item]);
97#ifdef CONFIG_SMP
98 if (x < 0)
99 x = 0;
100#endif
101 return x;
102}
103
104static inline unsigned long zone_page_state(struct zone *zone,
105 enum zone_stat_item item)
106{
107 long x = atomic_long_read(&zone->vm_stat[item]);
108#ifdef CONFIG_SMP
109 if (x < 0)
110 x = 0;
111#endif
112 return x;
113}
114
115#ifdef CONFIG_NUMA
116/*
117 * Determine the per node value of a stat item. This function
118 * is called frequently in a NUMA machine, so try to be as
119 * frugal as possible.
120 */
121static inline unsigned long node_page_state(int node,
122 enum zone_stat_item item)
123{
124 struct zone *zones = NODE_DATA(node)->node_zones;
125
126 return
127#ifndef CONFIG_DMA_IS_NORMAL
128#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
129 zone_page_state(&zones[ZONE_DMA32], item) +
130#endif
131 zone_page_state(&zones[ZONE_NORMAL], item) +
132#endif
133#ifdef CONFIG_HIGHMEM
134 zone_page_state(&zones[ZONE_HIGHMEM], item) +
135#endif
136 zone_page_state(&zones[ZONE_DMA], item);
137}
138
139extern void zone_statistics(struct zonelist *, struct zone *);
140
141#else
142
143#define node_page_state(node, item) global_page_state(item)
144#define zone_statistics(_zl,_z) do { } while (0)
145
146#endif /* CONFIG_NUMA */
147
148#define __add_zone_page_state(__z, __i, __d) \
149 __mod_zone_page_state(__z, __i, __d)
150#define __sub_zone_page_state(__z, __i, __d) \
151 __mod_zone_page_state(__z, __i,-(__d))
152
153#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
154#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
155
156static inline void zap_zone_vm_stats(struct zone *zone)
157{
158 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
159}
160
161extern void inc_zone_state(struct zone *, enum zone_stat_item);
162
163#ifdef CONFIG_SMP
164void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
165void __inc_zone_page_state(struct page *, enum zone_stat_item);
166void __dec_zone_page_state(struct page *, enum zone_stat_item);
167
168void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
169void inc_zone_page_state(struct page *, enum zone_stat_item);
170void dec_zone_page_state(struct page *, enum zone_stat_item);
171
172extern void inc_zone_state(struct zone *, enum zone_stat_item);
173
174void refresh_cpu_vm_stats(int);
175void refresh_vm_stats(void);
176
177#else /* CONFIG_SMP */
178
179/*
180 * We do not maintain differentials in a single processor configuration.
181 * The functions directly modify the zone and global counters.
182 */
183static inline void __mod_zone_page_state(struct zone *zone,
184 enum zone_stat_item item, int delta)
185{
186 zone_page_state_add(delta, zone, item);
187}
188
189static inline void __inc_zone_page_state(struct page *page,
190 enum zone_stat_item item)
191{
192 atomic_long_inc(&page_zone(page)->vm_stat[item]);
193 atomic_long_inc(&vm_stat[item]);
194}
195
196static inline void __dec_zone_page_state(struct page *page,
197 enum zone_stat_item item)
198{
199 atomic_long_dec(&page_zone(page)->vm_stat[item]);
200 atomic_long_dec(&vm_stat[item]);
201}
202
203/*
204 * We only use atomic operations to update counters. So there is no need to
205 * disable interrupts.
206 */
207#define inc_zone_page_state __inc_zone_page_state
208#define dec_zone_page_state __dec_zone_page_state
209#define mod_zone_page_state __mod_zone_page_state
210
211static inline void refresh_cpu_vm_stats(int cpu) { }
212static inline void refresh_vm_stats(void) { }
213#endif
214
215#endif /* _LINUX_VMSTAT_H */