aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:15:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:15:20 -0400
commit31453a9764f7e2a72a6e2c502ace586e2663a68c (patch)
tree5d4db63de5b4b85d1ffdab4e95a75175a784a10a /include
parentf9ba5375a8aae4aeea6be15df77e24707a429812 (diff)
parent93ed0e2d07b25aff4db1d61bfbcd1e82074c0ad5 (diff)
Merge branch 'akpm-incoming-1'
* akpm-incoming-1: (176 commits) scripts/checkpatch.pl: add check for declaration of pci_device_id scripts/checkpatch.pl: add warnings for static char that could be static const char checkpatch: version 0.31 checkpatch: statement/block context analyser should look at sanitised lines checkpatch: handle EXPORT_SYMBOL for DEVICE_ATTR and similar checkpatch: clean up structure definition macro handline checkpatch: update copyright dates checkpatch: Add additional attribute #defines checkpatch: check for incorrect permissions checkpatch: ensure kconfig help checks only apply when we are adding help checkpatch: simplify and consolidate "missing space after" checks checkpatch: add check for space after struct, union, and enum checkpatch: returning errno typically should be negative checkpatch: handle casts better fixing false categorisation of : as binary checkpatch: ensure we do not collapse bracketed sections into constants checkpatch: suggest cleanpatch and cleanfile when appropriate checkpatch: types may sit on a line on their own checkpatch: fix regressions in "fix handling of leading spaces" div64_u64(): improve precision on 32bit platforms lib/parser: cleanup match_number() ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/gfp.h105
-rw-r--r--include/linux/highmem.h65
-rw-r--r--include/linux/i2c/apds990x.h79
-rw-r--r--include/linux/i2c/bh1770glc.h53
-rw-r--r--include/linux/io-mapping.h14
-rw-r--r--include/linux/kernel.h36
-rw-r--r--include/linux/kfifo.h7
-rw-r--r--include/linux/math64.h12
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/mm.h17
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/moduleparam.h4
-rw-r--r--include/linux/pageblock-flags.h5
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/ratelimit.h2
-rw-r--r--include/linux/rmap.h30
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/types.h20
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/trace/events/ext4.h8
-rw-r--r--include/trace/events/vmscan.h44
-rw-r--r--include/trace/events/writeback.h37
29 files changed, 483 insertions, 119 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f4229fb315e1..2c0fc10956ba 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -150,6 +150,7 @@
150#define DATA_DATA \ 150#define DATA_DATA \
151 *(.data) \ 151 *(.data) \
152 *(.ref.data) \ 152 *(.ref.data) \
153 *(.data..shared_aligned) /* percpu related */ \
153 DEV_KEEP(init.data) \ 154 DEV_KEEP(init.data) \
154 DEV_KEEP(exit.data) \ 155 DEV_KEEP(exit.data) \
155 CPU_KEEP(init.data) \ 156 CPU_KEEP(init.data) \
@@ -636,7 +637,7 @@
636 637
637#ifdef CONFIG_BLK_DEV_INITRD 638#ifdef CONFIG_BLK_DEV_INITRD
638#define INIT_RAM_FS \ 639#define INIT_RAM_FS \
639 . = ALIGN(PAGE_SIZE); \ 640 . = ALIGN(4); \
640 VMLINUX_SYMBOL(__initramfs_start) = .; \ 641 VMLINUX_SYMBOL(__initramfs_start) = .; \
641 *(.init.ramfs) \ 642 *(.init.ramfs) \
642 VMLINUX_SYMBOL(__initramfs_end) = .; 643 VMLINUX_SYMBOL(__initramfs_end) = .;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 35b00746c712..4ce34fa937d4 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -111,6 +111,7 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
111 111
112extern spinlock_t bdi_lock; 112extern spinlock_t bdi_lock;
113extern struct list_head bdi_list; 113extern struct list_head bdi_list;
114extern struct list_head bdi_pending_list;
114 115
115static inline int wb_has_dirty_io(struct bdi_writeback *wb) 116static inline int wb_has_dirty_io(struct bdi_writeback *wb)
116{ 117{
@@ -285,7 +286,7 @@ enum {
285void clear_bdi_congested(struct backing_dev_info *bdi, int sync); 286void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
286void set_bdi_congested(struct backing_dev_info *bdi, int sync); 287void set_bdi_congested(struct backing_dev_info *bdi, int sync);
287long congestion_wait(int sync, long timeout); 288long congestion_wait(int sync, long timeout);
288 289long wait_iff_congested(struct zone *zone, int sync, long timeout);
289 290
290static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) 291static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
291{ 292{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bb20373d0b46..4658777b41cc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -34,9 +34,9 @@
34 34
35/* And dynamically-tunable limits and defaults: */ 35/* And dynamically-tunable limits and defaults: */
36struct files_stat_struct { 36struct files_stat_struct {
37 int nr_files; /* read only */ 37 unsigned long nr_files; /* read only */
38 int nr_free_files; /* read only */ 38 unsigned long nr_free_files; /* read only */
39 int max_files; /* tunable */ 39 unsigned long max_files; /* tunable */
40}; 40};
41 41
42struct inodes_stat_t { 42struct inodes_stat_t {
@@ -402,7 +402,7 @@ extern void __init inode_init_early(void);
402extern void __init files_init(unsigned long); 402extern void __init files_init(unsigned long);
403 403
404extern struct files_stat_struct files_stat; 404extern struct files_stat_struct files_stat;
405extern int get_max_files(void); 405extern unsigned long get_max_files(void);
406extern int sysctl_nr_open; 406extern int sysctl_nr_open;
407extern struct inodes_stat_t inodes_stat; 407extern struct inodes_stat_t inodes_stat;
408extern int leases_enable, lease_break_time; 408extern int leases_enable, lease_break_time;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 975609cb8548..e8713d55360a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -9,6 +9,32 @@
9 9
10struct vm_area_struct; 10struct vm_area_struct;
11 11
12/* Plain integer GFP bitmasks. Do not use this directly. */
13#define ___GFP_DMA 0x01u
14#define ___GFP_HIGHMEM 0x02u
15#define ___GFP_DMA32 0x04u
16#define ___GFP_MOVABLE 0x08u
17#define ___GFP_WAIT 0x10u
18#define ___GFP_HIGH 0x20u
19#define ___GFP_IO 0x40u
20#define ___GFP_FS 0x80u
21#define ___GFP_COLD 0x100u
22#define ___GFP_NOWARN 0x200u
23#define ___GFP_REPEAT 0x400u
24#define ___GFP_NOFAIL 0x800u
25#define ___GFP_NORETRY 0x1000u
26#define ___GFP_COMP 0x4000u
27#define ___GFP_ZERO 0x8000u
28#define ___GFP_NOMEMALLOC 0x10000u
29#define ___GFP_HARDWALL 0x20000u
30#define ___GFP_THISNODE 0x40000u
31#define ___GFP_RECLAIMABLE 0x80000u
32#ifdef CONFIG_KMEMCHECK
33#define ___GFP_NOTRACK 0x200000u
34#else
35#define ___GFP_NOTRACK 0
36#endif
37
12/* 38/*
13 * GFP bitmasks.. 39 * GFP bitmasks..
14 * 40 *
@@ -18,10 +44,10 @@ struct vm_area_struct;
18 * without the underscores and use them consistently. The definitions here may 44 * without the underscores and use them consistently. The definitions here may
19 * be used in bit comparisons. 45 * be used in bit comparisons.
20 */ 46 */
21#define __GFP_DMA ((__force gfp_t)0x01u) 47#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
22#define __GFP_HIGHMEM ((__force gfp_t)0x02u) 48#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
23#define __GFP_DMA32 ((__force gfp_t)0x04u) 49#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
24#define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */ 50#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
25#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) 51#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
26/* 52/*
27 * Action modifiers - doesn't change the zoning 53 * Action modifiers - doesn't change the zoning
@@ -38,27 +64,22 @@ struct vm_area_struct;
38 * __GFP_MOVABLE: Flag that this page will be movable by the page migration 64 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
39 * mechanism or reclaimed 65 * mechanism or reclaimed
40 */ 66 */
41#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ 67#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
42#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ 68#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
43#define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ 69#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
44#define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ 70#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
45#define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ 71#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
46#define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ 72#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
47#define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ 73#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
48#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ 74#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
49#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ 75#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
50#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ 76#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
51#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ 77#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
52#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 78#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */
53#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ 79#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
54#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ 80#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
55#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ 81#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
56 82#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
57#ifdef CONFIG_KMEMCHECK
58#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
59#else
60#define __GFP_NOTRACK ((__force gfp_t)0)
61#endif
62 83
63/* 84/*
64 * This may seem redundant, but it's a way of annotating false positives vs. 85 * This may seem redundant, but it's a way of annotating false positives vs.
@@ -186,14 +207,14 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
186#endif 207#endif
187 208
188#define GFP_ZONE_TABLE ( \ 209#define GFP_ZONE_TABLE ( \
189 (ZONE_NORMAL << 0 * ZONES_SHIFT) \ 210 (ZONE_NORMAL << 0 * ZONES_SHIFT) \
190 | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ 211 | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
191 | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ 212 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
192 | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ 213 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
193 | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ 214 | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
194 | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \ 215 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
195 | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\ 216 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
196 | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\ 217 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
197) 218)
198 219
199/* 220/*
@@ -203,20 +224,20 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
203 * allowed. 224 * allowed.
204 */ 225 */
205#define GFP_ZONE_BAD ( \ 226#define GFP_ZONE_BAD ( \
206 1 << (__GFP_DMA | __GFP_HIGHMEM) \ 227 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
207 | 1 << (__GFP_DMA | __GFP_DMA32) \ 228 | 1 << (___GFP_DMA | ___GFP_DMA32) \
208 | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \ 229 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
209 | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \ 230 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
210 | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \ 231 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
211 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \ 232 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
212 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \ 233 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
213 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\ 234 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
214) 235)
215 236
216static inline enum zone_type gfp_zone(gfp_t flags) 237static inline enum zone_type gfp_zone(gfp_t flags)
217{ 238{
218 enum zone_type z; 239 enum zone_type z;
219 int bit = flags & GFP_ZONEMASK; 240 int bit = (__force int) (flags & GFP_ZONEMASK);
220 241
221 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & 242 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
222 ((1 << ZONES_SHIFT) - 1); 243 ((1 << ZONES_SHIFT) - 1);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e3060ef85b6d..8a85ec109a3a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
28 28
29#include <asm/kmap_types.h> 29#include <asm/kmap_types.h>
30 30
31#ifdef CONFIG_DEBUG_HIGHMEM
32
33void debug_kmap_atomic(enum km_type type);
34
35#else
36
37static inline void debug_kmap_atomic(enum km_type type)
38{
39}
40
41#endif
42
43#ifdef CONFIG_HIGHMEM 31#ifdef CONFIG_HIGHMEM
44#include <asm/highmem.h> 32#include <asm/highmem.h>
45 33
@@ -49,6 +37,27 @@ extern unsigned long totalhigh_pages;
49 37
50void kmap_flush_unused(void); 38void kmap_flush_unused(void);
51 39
40DECLARE_PER_CPU(int, __kmap_atomic_idx);
41
42static inline int kmap_atomic_idx_push(void)
43{
44 int idx = __get_cpu_var(__kmap_atomic_idx)++;
45#ifdef CONFIG_DEBUG_HIGHMEM
46 WARN_ON_ONCE(in_irq() && !irqs_disabled());
47 BUG_ON(idx > KM_TYPE_NR);
48#endif
49 return idx;
50}
51
52static inline int kmap_atomic_idx_pop(void)
53{
54 int idx = --__get_cpu_var(__kmap_atomic_idx);
55#ifdef CONFIG_DEBUG_HIGHMEM
56 BUG_ON(idx < 0);
57#endif
58 return idx;
59}
60
52#else /* CONFIG_HIGHMEM */ 61#else /* CONFIG_HIGHMEM */
53 62
54static inline unsigned int nr_free_highpages(void) { return 0; } 63static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -66,19 +75,19 @@ static inline void kunmap(struct page *page)
66{ 75{
67} 76}
68 77
69static inline void *kmap_atomic(struct page *page, enum km_type idx) 78static inline void *__kmap_atomic(struct page *page)
70{ 79{
71 pagefault_disable(); 80 pagefault_disable();
72 return page_address(page); 81 return page_address(page);
73} 82}
74#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 83#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
75 84
76static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) 85static inline void __kunmap_atomic(void *addr)
77{ 86{
78 pagefault_enable(); 87 pagefault_enable();
79} 88}
80 89
81#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 90#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
82#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 91#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
83 92
84#define kmap_flush_unused() do {} while(0) 93#define kmap_flush_unused() do {} while(0)
@@ -86,12 +95,20 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
86 95
87#endif /* CONFIG_HIGHMEM */ 96#endif /* CONFIG_HIGHMEM */
88 97
89/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */ 98/*
90/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */ 99 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
91#define kunmap_atomic(addr, idx) do { \ 100 */
92 BUILD_BUG_ON(__same_type((addr), struct page *)); \ 101#define kmap_atomic(page, args...) __kmap_atomic(page)
93 kunmap_atomic_notypecheck((addr), (idx)); \ 102
94 } while (0) 103/*
104 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
105 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
106 */
107#define kunmap_atomic(addr, args...) \
108do { \
109 BUILD_BUG_ON(__same_type((addr), struct page *)); \
110 __kunmap_atomic(addr); \
111} while (0)
95 112
96/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 113/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
97#ifndef clear_user_highpage 114#ifndef clear_user_highpage
@@ -201,8 +218,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
201 vfrom = kmap_atomic(from, KM_USER0); 218 vfrom = kmap_atomic(from, KM_USER0);
202 vto = kmap_atomic(to, KM_USER1); 219 vto = kmap_atomic(to, KM_USER1);
203 copy_user_page(vto, vfrom, vaddr, to); 220 copy_user_page(vto, vfrom, vaddr, to);
204 kunmap_atomic(vfrom, KM_USER0);
205 kunmap_atomic(vto, KM_USER1); 221 kunmap_atomic(vto, KM_USER1);
222 kunmap_atomic(vfrom, KM_USER0);
206} 223}
207 224
208#endif 225#endif
@@ -214,8 +231,8 @@ static inline void copy_highpage(struct page *to, struct page *from)
214 vfrom = kmap_atomic(from, KM_USER0); 231 vfrom = kmap_atomic(from, KM_USER0);
215 vto = kmap_atomic(to, KM_USER1); 232 vto = kmap_atomic(to, KM_USER1);
216 copy_page(vto, vfrom); 233 copy_page(vto, vfrom);
217 kunmap_atomic(vfrom, KM_USER0);
218 kunmap_atomic(vto, KM_USER1); 234 kunmap_atomic(vto, KM_USER1);
235 kunmap_atomic(vfrom, KM_USER0);
219} 236}
220 237
221#endif /* _LINUX_HIGHMEM_H */ 238#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/i2c/apds990x.h b/include/linux/i2c/apds990x.h
new file mode 100644
index 000000000000..d186fcc5d257
--- /dev/null
+++ b/include/linux/i2c/apds990x.h
@@ -0,0 +1,79 @@
1/*
2 * This file is part of the APDS990x sensor driver.
3 * Chip is combined proximity and ambient light sensor.
4 *
5 * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
6 *
7 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#ifndef __APDS990X_H__
26#define __APDS990X_H__
27
28
29#define APDS_IRLED_CURR_12mA 0x3
30#define APDS_IRLED_CURR_25mA 0x2
31#define APDS_IRLED_CURR_50mA 0x1
32#define APDS_IRLED_CURR_100mA 0x0
33
34/**
35 * struct apds990x_chip_factors - defines effect of the cover window
36 * @ga: Total glass attenuation
37 * @cf1: clear channel factor 1 for raw to lux conversion
38 * @irf1: IR channel factor 1 for raw to lux conversion
39 * @cf2: clear channel factor 2 for raw to lux conversion
40 * @irf2: IR channel factor 2 for raw to lux conversion
41 * @df: device factor for conversion formulas
42 *
43 * Structure for tuning ALS calculation to match with environment.
44 * Values depend on the material above the sensor and the sensor
45 * itself. If the GA is zero, driver will use uncovered sensor default values
46 * format: decimal value * APDS_PARAM_SCALE except df which is plain integer.
47 */
48#define APDS_PARAM_SCALE 4096
49struct apds990x_chip_factors {
50 int ga;
51 int cf1;
52 int irf1;
53 int cf2;
54 int irf2;
55 int df;
56};
57
58/**
59 * struct apds990x_platform_data - platform data for apsd990x.c driver
60 * @cf: chip factor data
61 * @pddrive: IR-led driving current
62 * @ppcount: number of IR pulses used for proximity estimation
63 * @setup_resources: interrupt line setup call back function
64 * @release_resources: interrupt line release call back function
65 *
66 * Proximity detection result depends heavily on correct ppcount, pdrive
67 * and cover window.
68 *
69 */
70
71struct apds990x_platform_data {
72 struct apds990x_chip_factors cf;
73 u8 pdrive;
74 u8 ppcount;
75 int (*setup_resources)(void);
76 int (*release_resources)(void);
77};
78
79#endif
diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/i2c/bh1770glc.h
new file mode 100644
index 000000000000..8b5e2df36c72
--- /dev/null
+++ b/include/linux/i2c/bh1770glc.h
@@ -0,0 +1,53 @@
1/*
2 * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
3 * Chip is combined proximity and ambient light sensor.
4 *
5 * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
6 *
7 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#ifndef __BH1770_H__
26#define __BH1770_H__
27
28/**
29 * struct bh1770_platform_data - platform data for bh1770glc driver
30 * @led_def_curr: IR led driving current.
31 * @glass_attenuation: Attenuation factor for covering window.
32 * @setup_resources: Call back for interrupt line setup function
33 * @release_resources: Call back for interrupte line release function
34 *
35 * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor
36 * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85
37 */
38
39struct bh1770_platform_data {
40#define BH1770_LED_5mA 0
41#define BH1770_LED_10mA 1
42#define BH1770_LED_20mA 2
43#define BH1770_LED_50mA 3
44#define BH1770_LED_100mA 4
45#define BH1770_LED_150mA 5
46#define BH1770_LED_200mA 6
47 __u8 led_def_curr;
48#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */
49 __u32 glass_attenuation;
50 int (*setup_resources)(void);
51 int (*release_resources)(void);
52};
53#endif
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 7fb592793738..8cdcc2a199ad 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping)
81/* Atomic map/unmap */ 81/* Atomic map/unmap */
82static inline void __iomem * 82static inline void __iomem *
83io_mapping_map_atomic_wc(struct io_mapping *mapping, 83io_mapping_map_atomic_wc(struct io_mapping *mapping,
84 unsigned long offset, 84 unsigned long offset)
85 int slot)
86{ 85{
87 resource_size_t phys_addr; 86 resource_size_t phys_addr;
88 unsigned long pfn; 87 unsigned long pfn;
@@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
90 BUG_ON(offset >= mapping->size); 89 BUG_ON(offset >= mapping->size);
91 phys_addr = mapping->base + offset; 90 phys_addr = mapping->base + offset;
92 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); 91 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
93 return iomap_atomic_prot_pfn(pfn, slot, mapping->prot); 92 return iomap_atomic_prot_pfn(pfn, mapping->prot);
94} 93}
95 94
96static inline void 95static inline void
97io_mapping_unmap_atomic(void __iomem *vaddr, int slot) 96io_mapping_unmap_atomic(void __iomem *vaddr)
98{ 97{
99 iounmap_atomic(vaddr, slot); 98 iounmap_atomic(vaddr);
100} 99}
101 100
102static inline void __iomem * 101static inline void __iomem *
@@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping)
137/* Atomic map/unmap */ 136/* Atomic map/unmap */
138static inline void __iomem * 137static inline void __iomem *
139io_mapping_map_atomic_wc(struct io_mapping *mapping, 138io_mapping_map_atomic_wc(struct io_mapping *mapping,
140 unsigned long offset, 139 unsigned long offset)
141 int slot)
142{ 140{
143 return ((char __force __iomem *) mapping) + offset; 141 return ((char __force __iomem *) mapping) + offset;
144} 142}
145 143
146static inline void 144static inline void
147io_mapping_unmap_atomic(void __iomem *vaddr, int slot) 145io_mapping_unmap_atomic(void __iomem *vaddr)
148{ 146{
149} 147}
150 148
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index edef168a0406..450092c1e35f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -173,6 +173,11 @@ extern int _cond_resched(void);
173 (__x < 0) ? -__x : __x; \ 173 (__x < 0) ? -__x : __x; \
174 }) 174 })
175 175
176#define abs64(x) ({ \
177 s64 __x = (x); \
178 (__x < 0) ? -__x : __x; \
179 })
180
176#ifdef CONFIG_PROVE_LOCKING 181#ifdef CONFIG_PROVE_LOCKING
177void might_fault(void); 182void might_fault(void);
178#else 183#else
@@ -203,10 +208,10 @@ extern unsigned long simple_strtoul(const char *,char **,unsigned int);
203extern long simple_strtol(const char *,char **,unsigned int); 208extern long simple_strtol(const char *,char **,unsigned int);
204extern unsigned long long simple_strtoull(const char *,char **,unsigned int); 209extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
205extern long long simple_strtoll(const char *,char **,unsigned int); 210extern long long simple_strtoll(const char *,char **,unsigned int);
206extern int strict_strtoul(const char *, unsigned int, unsigned long *); 211extern int __must_check strict_strtoul(const char *, unsigned int, unsigned long *);
207extern int strict_strtol(const char *, unsigned int, long *); 212extern int __must_check strict_strtol(const char *, unsigned int, long *);
208extern int strict_strtoull(const char *, unsigned int, unsigned long long *); 213extern int __must_check strict_strtoull(const char *, unsigned int, unsigned long long *);
209extern int strict_strtoll(const char *, unsigned int, long long *); 214extern int __must_check strict_strtoll(const char *, unsigned int, long long *);
210extern int sprintf(char * buf, const char * fmt, ...) 215extern int sprintf(char * buf, const char * fmt, ...)
211 __attribute__ ((format (printf, 2, 3))); 216 __attribute__ ((format (printf, 2, 3)));
212extern int vsprintf(char *buf, const char *, va_list) 217extern int vsprintf(char *buf, const char *, va_list)
@@ -277,6 +282,11 @@ asmlinkage int vprintk(const char *fmt, va_list args)
277asmlinkage int printk(const char * fmt, ...) 282asmlinkage int printk(const char * fmt, ...)
278 __attribute__ ((format (printf, 1, 2))) __cold; 283 __attribute__ ((format (printf, 1, 2))) __cold;
279 284
285/*
286 * Please don't use printk_ratelimit(), because it shares ratelimiting state
287 * with all other unrelated printk_ratelimit() callsites. Instead use
288 * printk_ratelimited() or plain old __ratelimit().
289 */
280extern int __printk_ratelimit(const char *func); 290extern int __printk_ratelimit(const char *func);
281#define printk_ratelimit() __printk_ratelimit(__func__) 291#define printk_ratelimit() __printk_ratelimit(__func__)
282extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 292extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
@@ -651,6 +661,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
651 (void) (&_max1 == &_max2); \ 661 (void) (&_max1 == &_max2); \
652 _max1 > _max2 ? _max1 : _max2; }) 662 _max1 > _max2 ? _max1 : _max2; })
653 663
664#define min3(x, y, z) ({ \
665 typeof(x) _min1 = (x); \
666 typeof(y) _min2 = (y); \
667 typeof(z) _min3 = (z); \
668 (void) (&_min1 == &_min2); \
669 (void) (&_min1 == &_min3); \
670 _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
671 (_min2 < _min3 ? _min2 : _min3); })
672
673#define max3(x, y, z) ({ \
674 typeof(x) _max1 = (x); \
675 typeof(y) _max2 = (y); \
676 typeof(z) _max3 = (z); \
677 (void) (&_max1 == &_max2); \
678 (void) (&_max1 == &_max3); \
679 _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
680 (_max2 > _max3 ? _max2 : _max3); })
681
654/** 682/**
655 * min_not_zero - return the minimum that is _not_ zero, unless both are zero 683 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
656 * @x: value1 684 * @x: value1
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 62dbee554f60..c238ad2f82ea 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -171,11 +171,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
171 } 171 }
172 172
173 173
174static inline unsigned int __must_check 174/* __kfifo_must_check_helper() is temporarily disabled because it was faulty */
175__kfifo_must_check_helper(unsigned int val) 175#define __kfifo_must_check_helper(x) (x)
176{
177 return val;
178}
179 176
180/** 177/**
181 * kfifo_initialized - Check if the fifo is initialized 178 * kfifo_initialized - Check if the fifo is initialized
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c87f1528703a..23fcdfcba81b 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -35,6 +35,14 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
35 return dividend / divisor; 35 return dividend / divisor;
36} 36}
37 37
38/**
39 * div64_s64 - signed 64bit divide with 64bit divisor
40 */
41static inline s64 div64_s64(s64 dividend, s64 divisor)
42{
43 return dividend / divisor;
44}
45
38#elif BITS_PER_LONG == 32 46#elif BITS_PER_LONG == 32
39 47
40#ifndef div_u64_rem 48#ifndef div_u64_rem
@@ -53,6 +61,10 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
53extern u64 div64_u64(u64 dividend, u64 divisor); 61extern u64 div64_u64(u64 dividend, u64 divisor);
54#endif 62#endif
55 63
64#ifndef div64_s64
65extern s64 div64_s64(s64 dividend, s64 divisor);
66#endif
67
56#endif /* BITS_PER_LONG */ 68#endif /* BITS_PER_LONG */
57 69
58/** 70/**
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 864035fb8f8a..4307231bd22f 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -70,6 +70,10 @@ extern void online_page(struct page *page);
70extern int online_pages(unsigned long, unsigned long); 70extern int online_pages(unsigned long, unsigned long);
71extern void __offline_isolated_pages(unsigned long, unsigned long); 71extern void __offline_isolated_pages(unsigned long, unsigned long);
72 72
73#ifdef CONFIG_MEMORY_HOTREMOVE
74extern bool is_pageblock_removable_nolock(struct page *page);
75#endif /* CONFIG_MEMORY_HOTREMOVE */
76
73/* reasonably generic interface to expand the physical pages in a zone */ 77/* reasonably generic interface to expand the physical pages in a zone */
74extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, 78extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
75 unsigned long nr_pages); 79 unsigned long nr_pages);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a4c66846fb8f..721f451c3029 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -144,6 +144,7 @@ extern pgprot_t protection_map[16];
144#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 144#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
145#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 145#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
146#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 146#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
147#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
147 148
148/* 149/*
149 * This interface is used by x86 PAT code to identify a pfn mapping that is 150 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -497,8 +498,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
497#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 498#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
498#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 499#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
499 500
500/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ 501/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
501#ifdef NODE_NOT_IN_PAGEFLAGS 502#ifdef NODE_NOT_IN_PAGE_FLAGS
502#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 503#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
503#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 504#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
504 SECTIONS_PGOFF : ZONES_PGOFF) 505 SECTIONS_PGOFF : ZONES_PGOFF)
@@ -723,6 +724,7 @@ static inline int page_mapped(struct page *page)
723 724
724#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 725#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
725#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 726#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
727#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
726 728
727#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 729#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
728 730
@@ -868,6 +870,7 @@ int __set_page_dirty_no_writeback(struct page *page);
868int redirty_page_for_writepage(struct writeback_control *wbc, 870int redirty_page_for_writepage(struct writeback_control *wbc,
869 struct page *page); 871 struct page *page);
870void account_page_dirtied(struct page *page, struct address_space *mapping); 872void account_page_dirtied(struct page *page, struct address_space *mapping);
873void account_page_writeback(struct page *page);
871int set_page_dirty(struct page *page); 874int set_page_dirty(struct page *page);
872int set_page_dirty_lock(struct page *page); 875int set_page_dirty_lock(struct page *page);
873int clear_page_dirty_for_io(struct page *page); 876int clear_page_dirty_for_io(struct page *page);
@@ -1031,7 +1034,15 @@ extern void unregister_shrinker(struct shrinker *);
1031 1034
1032int vma_wants_writenotify(struct vm_area_struct *vma); 1035int vma_wants_writenotify(struct vm_area_struct *vma);
1033 1036
1034extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); 1037extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1038 spinlock_t **ptl);
1039static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1040 spinlock_t **ptl)
1041{
1042 pte_t *ptep;
1043 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1044 return ptep;
1045}
1035 1046
1036#ifdef __PAGETABLE_PUD_FOLDED 1047#ifdef __PAGETABLE_PUD_FOLDED
1037static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 1048static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cb57d657ce4d..bb7288a782fd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -310,6 +310,8 @@ struct mm_struct {
310#ifdef CONFIG_MMU_NOTIFIER 310#ifdef CONFIG_MMU_NOTIFIER
311 struct mmu_notifier_mm *mmu_notifier_mm; 311 struct mmu_notifier_mm *mmu_notifier_mm;
312#endif 312#endif
313 /* How many tasks sharing this mm are OOM_DISABLE */
314 atomic_t oom_disable_count;
313}; 315};
314 316
315/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ 317/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3984c4eb41fd..39c24ebe9cfd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -104,6 +104,8 @@ enum zone_stat_item {
104 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 104 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
105 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 105 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
106 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 106 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
107 NR_DIRTIED, /* page dirtyings since bootup */
108 NR_WRITTEN, /* page writings since bootup */
107#ifdef CONFIG_NUMA 109#ifdef CONFIG_NUMA
108 NUMA_HIT, /* allocated in intended node */ 110 NUMA_HIT, /* allocated in intended node */
109 NUMA_MISS, /* allocated in non intended node */ 111 NUMA_MISS, /* allocated in non intended node */
@@ -421,6 +423,9 @@ struct zone {
421typedef enum { 423typedef enum {
422 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 424 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
423 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 425 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
426 ZONE_CONGESTED, /* zone has many dirty pages backed by
427 * a congested BDI
428 */
424} zone_flags_t; 429} zone_flags_t;
425 430
426static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) 431static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -438,6 +443,11 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
438 clear_bit(flag, &zone->flags); 443 clear_bit(flag, &zone->flags);
439} 444}
440 445
446static inline int zone_is_reclaim_congested(const struct zone *zone)
447{
448 return test_bit(ZONE_CONGESTED, &zone->flags);
449}
450
441static inline int zone_is_reclaim_locked(const struct zone *zone) 451static inline int zone_is_reclaim_locked(const struct zone *zone)
442{ 452{
443 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 453 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 9d2f1837b3d8..112adf8bd47d 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -21,8 +21,8 @@
21#define __module_cat(a,b) ___module_cat(a,b) 21#define __module_cat(a,b) ___module_cat(a,b)
22#define __MODULE_INFO(tag, name, info) \ 22#define __MODULE_INFO(tag, name, info) \
23static const char __module_cat(name,__LINE__)[] \ 23static const char __module_cat(name,__LINE__)[] \
24 __used \ 24 __used __attribute__((section(".modinfo"), unused, aligned(1))) \
25 __attribute__((section(".modinfo"),unused)) = __stringify(tag) "=" info 25 = __stringify(tag) "=" info
26#else /* !MODULE */ 26#else /* !MODULE */
27#define __MODULE_INFO(tag, name, info) 27#define __MODULE_INFO(tag, name, info)
28#endif 28#endif
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e8c06122be36..19ef95d293ae 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -67,7 +67,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
67 67
68#define get_pageblock_flags(page) \ 68#define get_pageblock_flags(page) \
69 get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1) 69 get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
70#define set_pageblock_flags(page) \ 70#define set_pageblock_flags(page, flags) \
71 set_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1) 71 set_pageblock_flags_group(page, flags, \
72 0, NR_PAGEBLOCK_BITS-1)
72 73
73#endif /* PAGEBLOCK_FLAGS_H */ 74#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e12cdc6d79ee..2d1ffe3cf1ee 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -299,6 +299,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
299extern void __lock_page(struct page *page); 299extern void __lock_page(struct page *page);
300extern int __lock_page_killable(struct page *page); 300extern int __lock_page_killable(struct page *page);
301extern void __lock_page_nosync(struct page *page); 301extern void __lock_page_nosync(struct page *page);
302extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
303 unsigned int flags);
302extern void unlock_page(struct page *page); 304extern void unlock_page(struct page *page);
303 305
304static inline void __set_page_locked(struct page *page) 306static inline void __set_page_locked(struct page *page)
@@ -351,6 +353,17 @@ static inline void lock_page_nosync(struct page *page)
351} 353}
352 354
353/* 355/*
356 * lock_page_or_retry - Lock the page, unless this would block and the
357 * caller indicated that it can handle a retry.
358 */
359static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
360 unsigned int flags)
361{
362 might_sleep();
363 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
364}
365
366/*
354 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 367 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
355 * Never use this directly! 368 * Never use this directly!
356 */ 369 */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 8f69d09a41a5..03ff67b0cdf5 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -36,6 +36,8 @@ static inline void ratelimit_state_init(struct ratelimit_state *rs,
36 rs->begin = 0; 36 rs->begin = 0;
37} 37}
38 38
39extern struct ratelimit_state printk_ratelimit_state;
40
39extern int ___ratelimit(struct ratelimit_state *rs, const char *func); 41extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
40#define __ratelimit(state) ___ratelimit(state, __func__) 42#define __ratelimit(state) ___ratelimit(state, __func__)
41 43
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 31b2fd75dcba..bb83c0da2071 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -25,8 +25,8 @@
25 * pointing to this anon_vma once its vma list is empty. 25 * pointing to this anon_vma once its vma list is empty.
26 */ 26 */
27struct anon_vma { 27struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */
29 struct anon_vma *root; /* Root of this anon_vma tree */ 28 struct anon_vma *root; /* Root of this anon_vma tree */
29 spinlock_t lock; /* Serialize access to vma list */
30#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) 30#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
31 31
32 /* 32 /*
@@ -205,9 +205,20 @@ int try_to_unmap_one(struct page *, struct vm_area_struct *,
205/* 205/*
206 * Called from mm/filemap_xip.c to unmap empty zero page 206 * Called from mm/filemap_xip.c to unmap empty zero page
207 */ 207 */
208pte_t *page_check_address(struct page *, struct mm_struct *, 208pte_t *__page_check_address(struct page *, struct mm_struct *,
209 unsigned long, spinlock_t **, int); 209 unsigned long, spinlock_t **, int);
210 210
211static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
212 unsigned long address,
213 spinlock_t **ptlp, int sync)
214{
215 pte_t *ptep;
216
217 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
218 ptlp, sync));
219 return ptep;
220}
221
211/* 222/*
212 * Used by swapoff to help locate where page is expected in vma. 223 * Used by swapoff to help locate where page is expected in vma.
213 */ 224 */
@@ -230,7 +241,20 @@ int try_to_munlock(struct page *);
230/* 241/*
231 * Called by memory-failure.c to kill processes. 242 * Called by memory-failure.c to kill processes.
232 */ 243 */
233struct anon_vma *page_lock_anon_vma(struct page *page); 244struct anon_vma *__page_lock_anon_vma(struct page *page);
245
246static inline struct anon_vma *page_lock_anon_vma(struct page *page)
247{
248 struct anon_vma *anon_vma;
249
250 __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
251
252 /* (void) is needed to make gcc happy */
253 (void) __cond_lock(&anon_vma->root->lock, anon_vma);
254
255 return anon_vma;
256}
257
234void page_unlock_anon_vma(struct anon_vma *anon_vma); 258void page_unlock_anon_vma(struct anon_vma *anon_vma);
235int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 259int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
236 260
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 56154bbb8da9..393ce94e54b7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1706,7 +1706,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1706#define PF_DUMPCORE 0x00000200 /* dumped core */ 1706#define PF_DUMPCORE 0x00000200 /* dumped core */
1707#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1707#define PF_SIGNALED 0x00000400 /* killed by a signal */
1708#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1708#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1709#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
1710#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1709#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1711#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ 1710#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1712#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1711#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7cdd63366f88..eba53e71d2cc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -271,8 +271,18 @@ extern void scan_mapping_unevictable_pages(struct address_space *);
271extern unsigned long scan_unevictable_pages; 271extern unsigned long scan_unevictable_pages;
272extern int scan_unevictable_handler(struct ctl_table *, int, 272extern int scan_unevictable_handler(struct ctl_table *, int,
273 void __user *, size_t *, loff_t *); 273 void __user *, size_t *, loff_t *);
274#ifdef CONFIG_NUMA
274extern int scan_unevictable_register_node(struct node *node); 275extern int scan_unevictable_register_node(struct node *node);
275extern void scan_unevictable_unregister_node(struct node *node); 276extern void scan_unevictable_unregister_node(struct node *node);
277#else
278static inline int scan_unevictable_register_node(struct node *node)
279{
280 return 0;
281}
282static inline void scan_unevictable_unregister_node(struct node *node)
283{
284}
285#endif
276 286
277extern int kswapd_run(int nid); 287extern int kswapd_run(int nid);
278extern void kswapd_stop(int nid); 288extern void kswapd_stop(int nid);
diff --git a/include/linux/types.h b/include/linux/types.h
index 357dbc19606f..c2a9eb44f2fa 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -121,15 +121,7 @@ typedef __u64 u_int64_t;
121typedef __s64 int64_t; 121typedef __s64 int64_t;
122#endif 122#endif
123 123
124/* 124/* this is a special 64bit data type that is 8-byte aligned */
125 * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
126 * common 32/64-bit compat problems.
127 * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
128 * architectures) and to 8-byte boundaries on 64-bit architetures. The new
129 * aligned_64 type enforces 8-byte alignment so that structs containing
130 * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
131 * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
132 */
133#define aligned_u64 __u64 __attribute__((aligned(8))) 125#define aligned_u64 __u64 __attribute__((aligned(8)))
134#define aligned_be64 __be64 __attribute__((aligned(8))) 126#define aligned_be64 __be64 __attribute__((aligned(8)))
135#define aligned_le64 __le64 __attribute__((aligned(8))) 127#define aligned_le64 __le64 __attribute__((aligned(8)))
@@ -186,7 +178,15 @@ typedef __u64 __bitwise __be64;
186typedef __u16 __bitwise __sum16; 178typedef __u16 __bitwise __sum16;
187typedef __u32 __bitwise __wsum; 179typedef __u32 __bitwise __wsum;
188 180
189/* this is a special 64bit data type that is 8-byte aligned */ 181/*
182 * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
183 * common 32/64-bit compat problems.
184 * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
185 * architectures) and to 8-byte boundaries on 64-bit architetures. The new
186 * aligned_64 type enforces 8-byte alignment so that structs containing
187 * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
188 * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
189 */
190#define __aligned_u64 __u64 __attribute__((aligned(8))) 190#define __aligned_u64 __u64 __attribute__((aligned(8)))
191#define __aligned_be64 __be64 __attribute__((aligned(8))) 191#define __aligned_be64 __be64 __attribute__((aligned(8)))
192#define __aligned_le64 __le64 __attribute__((aligned(8))) 192#define __aligned_le64 __le64 __attribute__((aligned(8)))
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 63a4fe6d51bd..a03dcf62ca9d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -53,8 +53,10 @@ static inline void vmalloc_init(void)
53#endif 53#endif
54 54
55extern void *vmalloc(unsigned long size); 55extern void *vmalloc(unsigned long size);
56extern void *vzalloc(unsigned long size);
56extern void *vmalloc_user(unsigned long size); 57extern void *vmalloc_user(unsigned long size);
57extern void *vmalloc_node(unsigned long size, int node); 58extern void *vmalloc_node(unsigned long size, int node);
59extern void *vzalloc_node(unsigned long size, int node);
58extern void *vmalloc_exec(unsigned long size); 60extern void *vmalloc_exec(unsigned long size);
59extern void *vmalloc_32(unsigned long size); 61extern void *vmalloc_32(unsigned long size);
60extern void *vmalloc_32_user(unsigned long size); 62extern void *vmalloc_32_user(unsigned long size);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 070bb7a88936..0c0771f06bfa 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -190,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
190 __INIT_WORK((_work), (_func), 0); \ 190 __INIT_WORK((_work), (_func), 0); \
191 } while (0) 191 } while (0)
192 192
193#define INIT_WORK_ON_STACK(_work, _func) \ 193#define INIT_WORK_ONSTACK(_work, _func) \
194 do { \ 194 do { \
195 __INIT_WORK((_work), (_func), 1); \ 195 __INIT_WORK((_work), (_func), 1); \
196 } while (0) 196 } while (0)
@@ -201,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
201 init_timer(&(_work)->timer); \ 201 init_timer(&(_work)->timer); \
202 } while (0) 202 } while (0)
203 203
204#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ 204#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
205 do { \ 205 do { \
206 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ 206 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
207 init_timer_on_stack(&(_work)->timer); \ 207 init_timer_on_stack(&(_work)->timer); \
208 } while (0) 208 } while (0)
209 209
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 72a5d647a5f2..c7299d2ace6b 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -149,6 +149,8 @@ int write_cache_pages(struct address_space *mapping,
149int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 149int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
150void set_page_dirty_balance(struct page *page, int page_mkwrite); 150void set_page_dirty_balance(struct page *page, int page_mkwrite);
151void writeback_set_ratelimit(void); 151void writeback_set_ratelimit(void);
152void tag_pages_for_writeback(struct address_space *mapping,
153 pgoff_t start, pgoff_t end);
152 154
153/* pdflush.c */ 155/* pdflush.c */
154extern int nr_pdflush_threads; /* Global so it can be exported to sysctl 156extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 01e9e0076a92..6bcb00645de4 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -242,18 +242,20 @@ TRACE_EVENT(ext4_da_writepages,
242 __entry->pages_skipped = wbc->pages_skipped; 242 __entry->pages_skipped = wbc->pages_skipped;
243 __entry->range_start = wbc->range_start; 243 __entry->range_start = wbc->range_start;
244 __entry->range_end = wbc->range_end; 244 __entry->range_end = wbc->range_end;
245 __entry->nonblocking = wbc->nonblocking;
246 __entry->for_kupdate = wbc->for_kupdate; 245 __entry->for_kupdate = wbc->for_kupdate;
247 __entry->for_reclaim = wbc->for_reclaim; 246 __entry->for_reclaim = wbc->for_reclaim;
248 __entry->range_cyclic = wbc->range_cyclic; 247 __entry->range_cyclic = wbc->range_cyclic;
249 __entry->writeback_index = inode->i_mapping->writeback_index; 248 __entry->writeback_index = inode->i_mapping->writeback_index;
250 ), 249 ),
251 250
252 TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu", 251 TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld "
252 "range_start %llu range_end %llu "
253 "for_kupdate %d for_reclaim %d "
254 "range_cyclic %d writeback_index %lu",
253 jbd2_dev_to_name(__entry->dev), 255 jbd2_dev_to_name(__entry->dev),
254 (unsigned long) __entry->ino, __entry->nr_to_write, 256 (unsigned long) __entry->ino, __entry->nr_to_write,
255 __entry->pages_skipped, __entry->range_start, 257 __entry->pages_skipped, __entry->range_start,
256 __entry->range_end, __entry->nonblocking, 258 __entry->range_end,
257 __entry->for_kupdate, __entry->for_reclaim, 259 __entry->for_kupdate, __entry->for_reclaim,
258 __entry->range_cyclic, 260 __entry->range_cyclic,
259 (unsigned long) __entry->writeback_index) 261 (unsigned long) __entry->writeback_index)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 370aa5a87322..c255fcc587bf 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -10,6 +10,7 @@
10 10
11#define RECLAIM_WB_ANON 0x0001u 11#define RECLAIM_WB_ANON 0x0001u
12#define RECLAIM_WB_FILE 0x0002u 12#define RECLAIM_WB_FILE 0x0002u
13#define RECLAIM_WB_MIXED 0x0010u
13#define RECLAIM_WB_SYNC 0x0004u 14#define RECLAIM_WB_SYNC 0x0004u
14#define RECLAIM_WB_ASYNC 0x0008u 15#define RECLAIM_WB_ASYNC 0x0008u
15 16
@@ -17,13 +18,20 @@
17 (flags) ? __print_flags(flags, "|", \ 18 (flags) ? __print_flags(flags, "|", \
18 {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \ 19 {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \
19 {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \ 20 {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \
21 {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \
20 {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \ 22 {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \
21 {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \ 23 {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
22 ) : "RECLAIM_WB_NONE" 24 ) : "RECLAIM_WB_NONE"
23 25
24#define trace_reclaim_flags(page, sync) ( \ 26#define trace_reclaim_flags(page, sync) ( \
25 (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ 27 (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
26 (sync == PAGEOUT_IO_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ 28 (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
29 )
30
31#define trace_shrink_flags(file, sync) ( \
32 (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
33 (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
34 (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
27 ) 35 )
28 36
29TRACE_EVENT(mm_vmscan_kswapd_sleep, 37TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -269,6 +277,40 @@ TRACE_EVENT(mm_vmscan_writepage,
269 show_reclaim_flags(__entry->reclaim_flags)) 277 show_reclaim_flags(__entry->reclaim_flags))
270); 278);
271 279
280TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
281
282 TP_PROTO(int nid, int zid,
283 unsigned long nr_scanned, unsigned long nr_reclaimed,
284 int priority, int reclaim_flags),
285
286 TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags),
287
288 TP_STRUCT__entry(
289 __field(int, nid)
290 __field(int, zid)
291 __field(unsigned long, nr_scanned)
292 __field(unsigned long, nr_reclaimed)
293 __field(int, priority)
294 __field(int, reclaim_flags)
295 ),
296
297 TP_fast_assign(
298 __entry->nid = nid;
299 __entry->zid = zid;
300 __entry->nr_scanned = nr_scanned;
301 __entry->nr_reclaimed = nr_reclaimed;
302 __entry->priority = priority;
303 __entry->reclaim_flags = reclaim_flags;
304 ),
305
306 TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
307 __entry->nid, __entry->zid,
308 __entry->nr_scanned, __entry->nr_reclaimed,
309 __entry->priority,
310 show_reclaim_flags(__entry->reclaim_flags))
311);
312
313
272#endif /* _TRACE_VMSCAN_H */ 314#endif /* _TRACE_VMSCAN_H */
273 315
274/* This part must be outside protection */ 316/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index f345f66ae9d1..89a2b2db4375 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -96,8 +96,6 @@ DECLARE_EVENT_CLASS(wbc_class,
96 __field(long, nr_to_write) 96 __field(long, nr_to_write)
97 __field(long, pages_skipped) 97 __field(long, pages_skipped)
98 __field(int, sync_mode) 98 __field(int, sync_mode)
99 __field(int, nonblocking)
100 __field(int, encountered_congestion)
101 __field(int, for_kupdate) 99 __field(int, for_kupdate)
102 __field(int, for_background) 100 __field(int, for_background)
103 __field(int, for_reclaim) 101 __field(int, for_reclaim)
@@ -153,6 +151,41 @@ DEFINE_WBC_EVENT(wbc_balance_dirty_written);
153DEFINE_WBC_EVENT(wbc_balance_dirty_wait); 151DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
154DEFINE_WBC_EVENT(wbc_writepage); 152DEFINE_WBC_EVENT(wbc_writepage);
155 153
154DECLARE_EVENT_CLASS(writeback_congest_waited_template,
155
156 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
157
158 TP_ARGS(usec_timeout, usec_delayed),
159
160 TP_STRUCT__entry(
161 __field( unsigned int, usec_timeout )
162 __field( unsigned int, usec_delayed )
163 ),
164
165 TP_fast_assign(
166 __entry->usec_timeout = usec_timeout;
167 __entry->usec_delayed = usec_delayed;
168 ),
169
170 TP_printk("usec_timeout=%u usec_delayed=%u",
171 __entry->usec_timeout,
172 __entry->usec_delayed)
173);
174
175DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
176
177 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
178
179 TP_ARGS(usec_timeout, usec_delayed)
180);
181
182DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
183
184 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
185
186 TP_ARGS(usec_timeout, usec_delayed)
187);
188
156#endif /* _TRACE_WRITEBACK_H */ 189#endif /* _TRACE_WRITEBACK_H */
157 190
158/* This part must be outside protection */ 191/* This part must be outside protection */