diff options
-rw-r--r-- | include/linux/skbuff.h | 6 | ||||
-rw-r--r-- | net/core/skbuff.c | 12 |
2 files changed, 10 insertions, 8 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a05a5f7c0b73..1d649f3eb006 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -332,17 +332,17 @@ struct sk_buff { | |||
332 | extern void kfree_skb(struct sk_buff *skb); | 332 | extern void kfree_skb(struct sk_buff *skb); |
333 | extern void __kfree_skb(struct sk_buff *skb); | 333 | extern void __kfree_skb(struct sk_buff *skb); |
334 | extern struct sk_buff *__alloc_skb(unsigned int size, | 334 | extern struct sk_buff *__alloc_skb(unsigned int size, |
335 | gfp_t priority, int fclone); | 335 | gfp_t priority, int fclone, int node); |
336 | static inline struct sk_buff *alloc_skb(unsigned int size, | 336 | static inline struct sk_buff *alloc_skb(unsigned int size, |
337 | gfp_t priority) | 337 | gfp_t priority) |
338 | { | 338 | { |
339 | return __alloc_skb(size, priority, 0); | 339 | return __alloc_skb(size, priority, 0, -1); |
340 | } | 340 | } |
341 | 341 | ||
342 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | 342 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
343 | gfp_t priority) | 343 | gfp_t priority) |
344 | { | 344 | { |
345 | return __alloc_skb(size, priority, 1); | 345 | return __alloc_skb(size, priority, 1, -1); |
346 | } | 346 | } |
347 | 347 | ||
348 | extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | 348 | extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e1c385e5ba9..7217fb8928f2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug); | |||
132 | * @gfp_mask: allocation mask | 132 | * @gfp_mask: allocation mask |
133 | * @fclone: allocate from fclone cache instead of head cache | 133 | * @fclone: allocate from fclone cache instead of head cache |
134 | * and allocate a cloned (child) skb | 134 | * and allocate a cloned (child) skb |
135 | * @node: numa node to allocate memory on | ||
135 | * | 136 | * |
136 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | 137 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
137 | * tail room of size bytes. The object has a reference count of one. | 138 | * tail room of size bytes. The object has a reference count of one. |
@@ -141,7 +142,7 @@ EXPORT_SYMBOL(skb_truesize_bug); | |||
141 | * %GFP_ATOMIC. | 142 | * %GFP_ATOMIC. |
142 | */ | 143 | */ |
143 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | 144 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
144 | int fclone) | 145 | int fclone, int node) |
145 | { | 146 | { |
146 | kmem_cache_t *cache; | 147 | kmem_cache_t *cache; |
147 | struct skb_shared_info *shinfo; | 148 | struct skb_shared_info *shinfo; |
@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
151 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; | 152 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; |
152 | 153 | ||
153 | /* Get the HEAD */ | 154 | /* Get the HEAD */ |
154 | skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); | 155 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
155 | if (!skb) | 156 | if (!skb) |
156 | goto out; | 157 | goto out; |
157 | 158 | ||
158 | /* Get the DATA. Size must match skb_add_mtu(). */ | 159 | /* Get the DATA. Size must match skb_add_mtu(). */ |
159 | size = SKB_DATA_ALIGN(size); | 160 | size = SKB_DATA_ALIGN(size); |
160 | data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), | 161 | data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), |
161 | gfp_mask); | 162 | gfp_mask, node); |
162 | if (!data) | 163 | if (!data) |
163 | goto nodata; | 164 | goto nodata; |
164 | 165 | ||
@@ -267,9 +268,10 @@ nodata: | |||
267 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | 268 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
268 | unsigned int length, gfp_t gfp_mask) | 269 | unsigned int length, gfp_t gfp_mask) |
269 | { | 270 | { |
271 | int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; | ||
270 | struct sk_buff *skb; | 272 | struct sk_buff *skb; |
271 | 273 | ||
272 | skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); | 274 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); |
273 | if (likely(skb)) { | 275 | if (likely(skb)) { |
274 | skb_reserve(skb, NET_SKB_PAD); | 276 | skb_reserve(skb, NET_SKB_PAD); |
275 | skb->dev = dev; | 277 | skb->dev = dev; |