aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c79
-rw-r--r--net/core/dev_addr_lists.c3
-rw-r--r--net/core/drop_monitor.c89
-rw-r--r--net/core/filter.c9
-rw-r--r--net/core/net_namespace.c33
-rw-r--r--net/core/skbuff.c8
6 files changed, 131 insertions, 90 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 5d59155adf2a..9bb8f87c4cda 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
1409 * register_netdevice_notifier(). The notifier is unlinked into the 1409 * register_netdevice_notifier(). The notifier is unlinked into the
1410 * kernel structures and may then be reused. A negative errno code 1410 * kernel structures and may then be reused. A negative errno code
1411 * is returned on a failure. 1411 * is returned on a failure.
1412 *
1413 * After unregistering unregister and down device events are synthesized
1414 * for all devices on the device list to the removed notifier to remove
1415 * the need for special case cleanup code.
1412 */ 1416 */
1413 1417
1414int unregister_netdevice_notifier(struct notifier_block *nb) 1418int unregister_netdevice_notifier(struct notifier_block *nb)
1415{ 1419{
1420 struct net_device *dev;
1421 struct net *net;
1416 int err; 1422 int err;
1417 1423
1418 rtnl_lock(); 1424 rtnl_lock();
1419 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1425 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1426 if (err)
1427 goto unlock;
1428
1429 for_each_net(net) {
1430 for_each_netdev(net, dev) {
1431 if (dev->flags & IFF_UP) {
1432 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1433 nb->notifier_call(nb, NETDEV_DOWN, dev);
1434 }
1435 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1436 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1437 }
1438 }
1439unlock:
1420 rtnl_unlock(); 1440 rtnl_unlock();
1421 return err; 1441 return err;
1422} 1442}
@@ -1596,6 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1596 kfree_skb(skb); 1616 kfree_skb(skb);
1597 return NET_RX_DROP; 1617 return NET_RX_DROP;
1598 } 1618 }
1619 skb->skb_iif = 0;
1599 skb_set_dev(skb, dev); 1620 skb_set_dev(skb, dev);
1600 skb->tstamp.tv64 = 0; 1621 skb->tstamp.tv64 = 0;
1601 skb->pkt_type = PACKET_HOST; 1622 skb->pkt_type = PACKET_HOST;
@@ -4027,54 +4048,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
4027 4048
4028#ifdef CONFIG_PROC_FS 4049#ifdef CONFIG_PROC_FS
4029 4050
4030#define BUCKET_SPACE (32 - NETDEV_HASHBITS) 4051#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4031
4032struct dev_iter_state {
4033 struct seq_net_private p;
4034 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4035};
4036 4052
4037#define get_bucket(x) ((x) >> BUCKET_SPACE) 4053#define get_bucket(x) ((x) >> BUCKET_SPACE)
4038#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4054#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4039#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4055#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4040 4056
4041static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) 4057static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4042{ 4058{
4043 struct dev_iter_state *state = seq->private;
4044 struct net *net = seq_file_net(seq); 4059 struct net *net = seq_file_net(seq);
4045 struct net_device *dev; 4060 struct net_device *dev;
4046 struct hlist_node *p; 4061 struct hlist_node *p;
4047 struct hlist_head *h; 4062 struct hlist_head *h;
4048 unsigned int count, bucket, offset; 4063 unsigned int count = 0, offset = get_offset(*pos);
4049 4064
4050 bucket = get_bucket(state->pos); 4065 h = &net->dev_name_head[get_bucket(*pos)];
4051 offset = get_offset(state->pos);
4052 h = &net->dev_name_head[bucket];
4053 count = 0;
4054 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4066 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4055 if (count++ == offset) { 4067 if (++count == offset)
4056 state->pos = set_bucket_offset(bucket, count);
4057 return dev; 4068 return dev;
4058 }
4059 } 4069 }
4060 4070
4061 return NULL; 4071 return NULL;
4062} 4072}
4063 4073
4064static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) 4074static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4065{ 4075{
4066 struct dev_iter_state *state = seq->private;
4067 struct net_device *dev; 4076 struct net_device *dev;
4068 unsigned int bucket; 4077 unsigned int bucket;
4069 4078
4070 bucket = get_bucket(state->pos);
4071 do { 4079 do {
4072 dev = dev_from_same_bucket(seq); 4080 dev = dev_from_same_bucket(seq, pos);
4073 if (dev) 4081 if (dev)
4074 return dev; 4082 return dev;
4075 4083
4076 bucket++; 4084 bucket = get_bucket(*pos) + 1;
4077 state->pos = set_bucket_offset(bucket, 0); 4085 *pos = set_bucket_offset(bucket, 1);
4078 } while (bucket < NETDEV_HASHENTRIES); 4086 } while (bucket < NETDEV_HASHENTRIES);
4079 4087
4080 return NULL; 4088 return NULL;
@@ -4087,33 +4095,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4087void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4095void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4088 __acquires(RCU) 4096 __acquires(RCU)
4089{ 4097{
4090 struct dev_iter_state *state = seq->private;
4091
4092 rcu_read_lock(); 4098 rcu_read_lock();
4093 if (!*pos) 4099 if (!*pos)
4094 return SEQ_START_TOKEN; 4100 return SEQ_START_TOKEN;
4095 4101
4096 /* check for end of the hash */ 4102 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4097 if (state->pos == 0 && *pos > 1)
4098 return NULL; 4103 return NULL;
4099 4104
4100 return dev_from_new_bucket(seq); 4105 return dev_from_bucket(seq, pos);
4101} 4106}
4102 4107
4103void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4108void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4104{ 4109{
4105 struct net_device *dev;
4106
4107 ++*pos; 4110 ++*pos;
4108 4111 return dev_from_bucket(seq, pos);
4109 if (v == SEQ_START_TOKEN)
4110 return dev_from_new_bucket(seq);
4111
4112 dev = dev_from_same_bucket(seq);
4113 if (dev)
4114 return dev;
4115
4116 return dev_from_new_bucket(seq);
4117} 4112}
4118 4113
4119void dev_seq_stop(struct seq_file *seq, void *v) 4114void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4212,13 +4207,7 @@ static const struct seq_operations dev_seq_ops = {
4212static int dev_seq_open(struct inode *inode, struct file *file) 4207static int dev_seq_open(struct inode *inode, struct file *file)
4213{ 4208{
4214 return seq_open_net(inode, file, &dev_seq_ops, 4209 return seq_open_net(inode, file, &dev_seq_ops,
4215 sizeof(struct dev_iter_state)); 4210 sizeof(struct seq_net_private));
4216}
4217
4218int dev_seq_open_ops(struct inode *inode, struct file *file,
4219 const struct seq_operations *ops)
4220{
4221 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4222} 4211}
4223 4212
4224static const struct file_operations dev_seq_fops = { 4213static const struct file_operations dev_seq_fops = {
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 29c07fef9228..626698f0db8b 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -696,7 +696,8 @@ static const struct seq_operations dev_mc_seq_ops = {
696 696
697static int dev_mc_seq_open(struct inode *inode, struct file *file) 697static int dev_mc_seq_open(struct inode *inode, struct file *file)
698{ 698{
699 return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); 699 return seq_open_net(inode, file, &dev_mc_seq_ops,
700 sizeof(struct seq_net_private));
700} 701}
701 702
702static const struct file_operations dev_mc_seq_fops = { 703static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38e060f..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
42 * netlink alerts 42 * netlink alerts
43 */ 43 */
44static int trace_state = TRACE_OFF; 44static int trace_state = TRACE_OFF;
45static DEFINE_SPINLOCK(trace_state_lock); 45static DEFINE_MUTEX(trace_state_mutex);
46 46
47struct per_cpu_dm_data { 47struct per_cpu_dm_data {
48 struct work_struct dm_alert_work; 48 struct work_struct dm_alert_work;
49 struct sk_buff *skb; 49 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 50 atomic_t dm_hit_count;
51 struct timer_list send_timer; 51 struct timer_list send_timer;
52 int cpu;
52}; 53};
53 54
54struct dm_hw_stat_delta { 55struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79 size_t al; 80 size_t al;
80 struct net_dm_alert_msg *msg; 81 struct net_dm_alert_msg *msg;
81 struct nlattr *nla; 82 struct nlattr *nla;
83 struct sk_buff *skb;
84 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
82 85
83 al = sizeof(struct net_dm_alert_msg); 86 al = sizeof(struct net_dm_alert_msg);
84 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 87 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
85 al += sizeof(struct nlattr); 88 al += sizeof(struct nlattr);
86 89
87 data->skb = genlmsg_new(al, GFP_KERNEL); 90 skb = genlmsg_new(al, GFP_KERNEL);
88 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 91
89 0, NET_DM_CMD_ALERT); 92 if (skb) {
90 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); 93 genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
91 msg = nla_data(nla); 94 0, NET_DM_CMD_ALERT);
92 memset(msg, 0, al); 95 nla = nla_reserve(skb, NLA_UNSPEC,
93 atomic_set(&data->dm_hit_count, dm_hit_limit); 96 sizeof(struct net_dm_alert_msg));
97 msg = nla_data(nla);
98 memset(msg, 0, al);
99 } else
100 schedule_work_on(data->cpu, &data->dm_alert_work);
101
102 /*
103 * Don't need to lock this, since we are guaranteed to only
104 * run this on a single cpu at a time.
105 * Note also that we only update data->skb if the old and new skb
106 * pointers don't match. This ensures that we don't continually call
107 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
108 */
109 if (skb != oskb) {
110 rcu_assign_pointer(data->skb, skb);
111
112 synchronize_rcu();
113
114 atomic_set(&data->dm_hit_count, dm_hit_limit);
115 }
116
94} 117}
95 118
96static void send_dm_alert(struct work_struct *unused) 119static void send_dm_alert(struct work_struct *unused)
97{ 120{
98 struct sk_buff *skb; 121 struct sk_buff *skb;
99 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
123
124 WARN_ON_ONCE(data->cpu != smp_processor_id());
100 125
101 /* 126 /*
102 * Grab the skb we're about to send 127 * Grab the skb we're about to send
103 */ 128 */
104 skb = data->skb; 129 skb = rcu_dereference_protected(data->skb, 1);
105 130
106 /* 131 /*
107 * Replace it with a new one 132 * Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
111 /* 136 /*
112 * Ship it! 137 * Ship it!
113 */ 138 */
114 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 139 if (skb)
140 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
115 141
142 put_cpu_var(dm_cpu_data);
116} 143}
117 144
118/* 145/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
123 */ 150 */
124static void sched_send_work(unsigned long unused) 151static void sched_send_work(unsigned long unused)
125{ 152{
126 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 153 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
154
155 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
127 156
128 schedule_work(&data->dm_alert_work); 157 put_cpu_var(dm_cpu_data);
129} 158}
130 159
131static void trace_drop_common(struct sk_buff *skb, void *location) 160static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
134 struct nlmsghdr *nlh; 163 struct nlmsghdr *nlh;
135 struct nlattr *nla; 164 struct nlattr *nla;
136 int i; 165 int i;
137 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 166 struct sk_buff *dskb;
167 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
168
169
170 rcu_read_lock();
171 dskb = rcu_dereference(data->skb);
138 172
173 if (!dskb)
174 goto out;
139 175
140 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 176 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
141 /* 177 /*
@@ -144,12 +180,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
144 goto out; 180 goto out;
145 } 181 }
146 182
147 nlh = (struct nlmsghdr *)data->skb->data; 183 nlh = (struct nlmsghdr *)dskb->data;
148 nla = genlmsg_data(nlmsg_data(nlh)); 184 nla = genlmsg_data(nlmsg_data(nlh));
149 msg = nla_data(nla); 185 msg = nla_data(nla);
150 for (i = 0; i < msg->entries; i++) { 186 for (i = 0; i < msg->entries; i++) {
151 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 187 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
152 msg->points[i].count++; 188 msg->points[i].count++;
189 atomic_inc(&data->dm_hit_count);
153 goto out; 190 goto out;
154 } 191 }
155 } 192 }
@@ -157,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
157 /* 194 /*
158 * We need to create a new entry 195 * We need to create a new entry
159 */ 196 */
160 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 197 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
161 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 198 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
162 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 199 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
163 msg->points[msg->entries].count = 1; 200 msg->points[msg->entries].count = 1;
@@ -169,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
169 } 206 }
170 207
171out: 208out:
209 rcu_read_unlock();
210 put_cpu_var(dm_cpu_data);
172 return; 211 return;
173} 212}
174 213
@@ -213,7 +252,7 @@ static int set_all_monitor_traces(int state)
213 struct dm_hw_stat_delta *new_stat = NULL; 252 struct dm_hw_stat_delta *new_stat = NULL;
214 struct dm_hw_stat_delta *temp; 253 struct dm_hw_stat_delta *temp;
215 254
216 spin_lock(&trace_state_lock); 255 mutex_lock(&trace_state_mutex);
217 256
218 if (state == trace_state) { 257 if (state == trace_state) {
219 rc = -EAGAIN; 258 rc = -EAGAIN;
@@ -252,7 +291,7 @@ static int set_all_monitor_traces(int state)
252 rc = -EINPROGRESS; 291 rc = -EINPROGRESS;
253 292
254out_unlock: 293out_unlock:
255 spin_unlock(&trace_state_lock); 294 mutex_unlock(&trace_state_mutex);
256 295
257 return rc; 296 return rc;
258} 297}
@@ -295,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
295 334
296 new_stat->dev = dev; 335 new_stat->dev = dev;
297 new_stat->last_rx = jiffies; 336 new_stat->last_rx = jiffies;
298 spin_lock(&trace_state_lock); 337 mutex_lock(&trace_state_mutex);
299 list_add_rcu(&new_stat->list, &hw_stats_list); 338 list_add_rcu(&new_stat->list, &hw_stats_list);
300 spin_unlock(&trace_state_lock); 339 mutex_unlock(&trace_state_mutex);
301 break; 340 break;
302 case NETDEV_UNREGISTER: 341 case NETDEV_UNREGISTER:
303 spin_lock(&trace_state_lock); 342 mutex_lock(&trace_state_mutex);
304 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 343 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
305 if (new_stat->dev == dev) { 344 if (new_stat->dev == dev) {
306 new_stat->dev = NULL; 345 new_stat->dev = NULL;
@@ -311,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
311 } 350 }
312 } 351 }
313 } 352 }
314 spin_unlock(&trace_state_lock); 353 mutex_unlock(&trace_state_mutex);
315 break; 354 break;
316 } 355 }
317out: 356out:
@@ -367,13 +406,15 @@ static int __init init_net_drop_monitor(void)
367 406
368 for_each_present_cpu(cpu) { 407 for_each_present_cpu(cpu) {
369 data = &per_cpu(dm_cpu_data, cpu); 408 data = &per_cpu(dm_cpu_data, cpu);
370 reset_per_cpu_data(data); 409 data->cpu = cpu;
371 INIT_WORK(&data->dm_alert_work, send_dm_alert); 410 INIT_WORK(&data->dm_alert_work, send_dm_alert);
372 init_timer(&data->send_timer); 411 init_timer(&data->send_timer);
373 data->send_timer.data = cpu; 412 data->send_timer.data = cpu;
374 data->send_timer.function = sched_send_work; 413 data->send_timer.function = sched_send_work;
414 reset_per_cpu_data(data);
375 } 415 }
376 416
417
377 goto out; 418 goto out;
378 419
379out_unreg: 420out_unreg:
diff --git a/net/core/filter.c b/net/core/filter.c
index cf4989ac503b..6f755cca4520 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,8 +39,11 @@
39#include <linux/reciprocal_div.h> 39#include <linux/reciprocal_div.h>
40#include <linux/ratelimit.h> 40#include <linux/ratelimit.h>
41 41
42/* No hurry in this branch */ 42/* No hurry in this branch
43static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 43 *
44 * Exported for the bpf jit load helper.
45 */
46void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
44{ 47{
45 u8 *ptr = NULL; 48 u8 *ptr = NULL;
46 49
@@ -59,7 +62,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
59{ 62{
60 if (k >= 0) 63 if (k >= 0)
61 return skb_header_pointer(skb, k, size, buffer); 64 return skb_header_pointer(skb, k, size, buffer);
62 return __load_pointer(skb, k, size); 65 return bpf_internal_load_pointer_neg_helper(skb, k, size);
63} 66}
64 67
65/** 68/**
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0e950fda9a0a..31a5ae51a45c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -83,21 +83,29 @@ assign:
83 83
84static int ops_init(const struct pernet_operations *ops, struct net *net) 84static int ops_init(const struct pernet_operations *ops, struct net *net)
85{ 85{
86 int err; 86 int err = -ENOMEM;
87 void *data = NULL;
88
87 if (ops->id && ops->size) { 89 if (ops->id && ops->size) {
88 void *data = kzalloc(ops->size, GFP_KERNEL); 90 data = kzalloc(ops->size, GFP_KERNEL);
89 if (!data) 91 if (!data)
90 return -ENOMEM; 92 goto out;
91 93
92 err = net_assign_generic(net, *ops->id, data); 94 err = net_assign_generic(net, *ops->id, data);
93 if (err) { 95 if (err)
94 kfree(data); 96 goto cleanup;
95 return err;
96 }
97 } 97 }
98 err = 0;
98 if (ops->init) 99 if (ops->init)
99 return ops->init(net); 100 err = ops->init(net);
100 return 0; 101 if (!err)
102 return 0;
103
104cleanup:
105 kfree(data);
106
107out:
108 return err;
101} 109}
102 110
103static void ops_free(const struct pernet_operations *ops, struct net *net) 111static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
448static int __register_pernet_operations(struct list_head *list, 456static int __register_pernet_operations(struct list_head *list,
449 struct pernet_operations *ops) 457 struct pernet_operations *ops)
450{ 458{
451 int err = 0; 459 return ops_init(ops, &init_net);
452 err = ops_init(ops, &init_net);
453 if (err)
454 ops_free(ops, &init_net);
455 return err;
456
457} 460}
458 461
459static void __unregister_pernet_operations(struct pernet_operations *ops) 462static void __unregister_pernet_operations(struct pernet_operations *ops)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f223cdc75da6..e59840010d45 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -952,9 +952,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
952 goto adjust_others; 952 goto adjust_others;
953 } 953 }
954 954
955 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 955 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
956 gfp_mask);
956 if (!data) 957 if (!data)
957 goto nodata; 958 goto nodata;
959 size = SKB_WITH_OVERHEAD(ksize(data));
958 960
959 /* Copy only real data... and, alas, header. This should be 961 /* Copy only real data... and, alas, header. This should be
960 * optimized for the cases when header is void. 962 * optimized for the cases when header is void.
@@ -3161,6 +3163,8 @@ static void sock_rmem_free(struct sk_buff *skb)
3161 */ 3163 */
3162int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3164int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3163{ 3165{
3166 int len = skb->len;
3167
3164 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3168 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3165 (unsigned)sk->sk_rcvbuf) 3169 (unsigned)sk->sk_rcvbuf)
3166 return -ENOMEM; 3170 return -ENOMEM;
@@ -3175,7 +3179,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3175 3179
3176 skb_queue_tail(&sk->sk_error_queue, skb); 3180 skb_queue_tail(&sk->sk_error_queue, skb);
3177 if (!sock_flag(sk, SOCK_DEAD)) 3181 if (!sock_flag(sk, SOCK_DEAD))
3178 sk->sk_data_ready(sk, skb->len); 3182 sk->sk_data_ready(sk, len);
3179 return 0; 3183 return 0;
3180} 3184}
3181EXPORT_SYMBOL(sock_queue_err_skb); 3185EXPORT_SYMBOL(sock_queue_err_skb);