summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/gfp.h46
-rw-r--r--include/linux/skbuff.h6
2 files changed, 36 insertions, 16 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 440fca3e7e5d..b56e811b6f7c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -29,12 +29,13 @@ struct vm_area_struct;
29#define ___GFP_NOMEMALLOC 0x10000u 29#define ___GFP_NOMEMALLOC 0x10000u
30#define ___GFP_HARDWALL 0x20000u 30#define ___GFP_HARDWALL 0x20000u
31#define ___GFP_THISNODE 0x40000u 31#define ___GFP_THISNODE 0x40000u
32#define ___GFP_WAIT 0x80000u 32#define ___GFP_ATOMIC 0x80000u
33#define ___GFP_NOACCOUNT 0x100000u 33#define ___GFP_NOACCOUNT 0x100000u
34#define ___GFP_NOTRACK 0x200000u 34#define ___GFP_NOTRACK 0x200000u
35#define ___GFP_NO_KSWAPD 0x400000u 35#define ___GFP_DIRECT_RECLAIM 0x400000u
36#define ___GFP_OTHER_NODE 0x800000u 36#define ___GFP_OTHER_NODE 0x800000u
37#define ___GFP_WRITE 0x1000000u 37#define ___GFP_WRITE 0x1000000u
38#define ___GFP_KSWAPD_RECLAIM 0x2000000u
38/* If the above are modified, __GFP_BITS_SHIFT may need updating */ 39/* If the above are modified, __GFP_BITS_SHIFT may need updating */
39 40
40/* 41/*
@@ -71,7 +72,7 @@ struct vm_area_struct;
71 * __GFP_MOVABLE: Flag that this page will be movable by the page migration 72 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
72 * mechanism or reclaimed 73 * mechanism or reclaimed
73 */ 74 */
74#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ 75#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) /* Caller cannot wait or reschedule */
75#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ 76#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
76#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ 77#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
77#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ 78#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
@@ -94,23 +95,37 @@ struct vm_area_struct;
94#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */ 95#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
95#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ 96#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
96 97
97#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
98#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ 98#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
99#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ 99#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
100 100
101/* 101/*
102 * A caller that is willing to wait may enter direct reclaim and will
103 * wake kswapd to reclaim pages in the background until the high
104 * watermark is met. A caller may wish to clear __GFP_DIRECT_RECLAIM to
105 * avoid unnecessary delays when a fallback option is available but
106 * still allow kswapd to reclaim in the background. The kswapd flag
107 * can be cleared when the reclaiming of pages would cause unnecessary
108 * disruption.
109 */
110#define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
111#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
112#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
113
114/*
102 * This may seem redundant, but it's a way of annotating false positives vs. 115 * This may seem redundant, but it's a way of annotating false positives vs.
103 * allocations that simply cannot be supported (e.g. page tables). 116 * allocations that simply cannot be supported (e.g. page tables).
104 */ 117 */
105#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) 118#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
106 119
107#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ 120#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
108#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 121#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
109 122
110/* This equals 0, but use constants in case they ever change */ 123/*
111#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) 124 * GFP_ATOMIC callers can not sleep, need the allocation to succeed.
112/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ 125 * A lower watermark is applied to allow access to "atomic reserves"
113#define GFP_ATOMIC (__GFP_HIGH) 126 */
127#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
128#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
114#define GFP_NOIO (__GFP_WAIT) 129#define GFP_NOIO (__GFP_WAIT)
115#define GFP_NOFS (__GFP_WAIT | __GFP_IO) 130#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
116#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 131#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
@@ -119,10 +134,10 @@ struct vm_area_struct;
119#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 134#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
120#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) 135#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
121#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) 136#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
122#define GFP_IOFS (__GFP_IO | __GFP_FS) 137#define GFP_IOFS (__GFP_IO | __GFP_FS | __GFP_KSWAPD_RECLAIM)
123#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 138#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
124 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ 139 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
125 __GFP_NO_KSWAPD) 140 ~__GFP_KSWAPD_RECLAIM)
126 141
127/* This mask makes up all the page movable related flags */ 142/* This mask makes up all the page movable related flags */
128#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) 143#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -164,6 +179,11 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
164 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; 179 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
165} 180}
166 181
182static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
183{
184 return gfp_flags & __GFP_DIRECT_RECLAIM;
185}
186
167#ifdef CONFIG_HIGHMEM 187#ifdef CONFIG_HIGHMEM
168#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM 188#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
169#else 189#else
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 24f4dfd94c51..4355129fff91 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1224,7 +1224,7 @@ static inline int skb_cloned(const struct sk_buff *skb)
1224 1224
1225static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) 1225static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1226{ 1226{
1227 might_sleep_if(pri & __GFP_WAIT); 1227 might_sleep_if(gfpflags_allow_blocking(pri));
1228 1228
1229 if (skb_cloned(skb)) 1229 if (skb_cloned(skb))
1230 return pskb_expand_head(skb, 0, 0, pri); 1230 return pskb_expand_head(skb, 0, 0, pri);
@@ -1308,7 +1308,7 @@ static inline int skb_shared(const struct sk_buff *skb)
1308 */ 1308 */
1309static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) 1309static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1310{ 1310{
1311 might_sleep_if(pri & __GFP_WAIT); 1311 might_sleep_if(gfpflags_allow_blocking(pri));
1312 if (skb_shared(skb)) { 1312 if (skb_shared(skb)) {
1313 struct sk_buff *nskb = skb_clone(skb, pri); 1313 struct sk_buff *nskb = skb_clone(skb, pri);
1314 1314
@@ -1344,7 +1344,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1344static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 1344static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1345 gfp_t pri) 1345 gfp_t pri)
1346{ 1346{
1347 might_sleep_if(pri & __GFP_WAIT); 1347 might_sleep_if(gfpflags_allow_blocking(pri));
1348 if (skb_cloned(skb)) { 1348 if (skb_cloned(skb)) {
1349 struct sk_buff *nskb = skb_copy(skb, pri); 1349 struct sk_buff *nskb = skb_copy(skb, pri);
1350 1350