aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2014-04-11 13:24:45 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2014-04-11 13:24:45 -0400
commit438145c7ef5c9445f25bb8fc4d52e2c9d11fdc7c (patch)
tree76941991e36f4a32bf1be0db3854959053f24619 /net/core
parent9ddd1b8ad8abd321964b8add5581910de6d67c2a (diff)
Update from 2.6.36 to 2.6.36.4wip-dissipation-jerickso
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/filter.c83
-rw-r--r--net/core/iovec.c20
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/timestamping.c6
6 files changed, 71 insertions, 58 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 660dd41aaaa6..1dad6c0926f2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1648,10 +1648,10 @@ EXPORT_SYMBOL(netif_device_attach);
1648 1648
1649static bool can_checksum_protocol(unsigned long features, __be16 protocol) 1649static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1650{ 1650{
1651 return ((features & NETIF_F_GEN_CSUM) || 1651 return ((features & NETIF_F_NO_CSUM) ||
1652 ((features & NETIF_F_IP_CSUM) && 1652 ((features & NETIF_F_V4_CSUM) &&
1653 protocol == htons(ETH_P_IP)) || 1653 protocol == htons(ETH_P_IP)) ||
1654 ((features & NETIF_F_IPV6_CSUM) && 1654 ((features & NETIF_F_V6_CSUM) &&
1655 protocol == htons(ETH_P_IPV6)) || 1655 protocol == htons(ETH_P_IPV6)) ||
1656 ((features & NETIF_F_FCOE_CRC) && 1656 ((features & NETIF_F_FCOE_CRC) &&
1657 protocol == htons(ETH_P_FCOE))); 1657 protocol == htons(ETH_P_FCOE)));
@@ -2891,6 +2891,15 @@ static int __netif_receive_skb(struct sk_buff *skb)
2891ncls: 2891ncls:
2892#endif 2892#endif
2893 2893
2894 /* If we got this far with a hardware accelerated VLAN tag, it means
2895 * that we were put in promiscuous mode but nobody is interested in
2896 * this vid. Drop the packet now to prevent it from getting propagated
2897 * to other parts of the stack that won't know how to deal with packets
2898 * tagged in this manner.
2899 */
2900 if (unlikely(vlan_tx_tag_present(skb)))
2901 goto bypass;
2902
2894 /* Handle special case of bridge or macvlan */ 2903 /* Handle special case of bridge or macvlan */
2895 rx_handler = rcu_dereference(skb->dev->rx_handler); 2904 rx_handler = rcu_dereference(skb->dev->rx_handler);
2896 if (rx_handler) { 2905 if (rx_handler) {
@@ -2927,6 +2936,7 @@ ncls:
2927 } 2936 }
2928 } 2937 }
2929 2938
2939bypass:
2930 if (pt_prev) { 2940 if (pt_prev) {
2931 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2941 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2932 } else { 2942 } else {
diff --git a/net/core/dst.c b/net/core/dst.c
index 6c41b1fac3db..2844639dfb79 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -343,6 +343,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
343 343
344static struct notifier_block dst_dev_notifier = { 344static struct notifier_block dst_dev_notifier = {
345 .notifier_call = dst_dev_event, 345 .notifier_call = dst_dev_event,
346 .priority = -10, /* must be called after other network notifiers */
346}; 347};
347 348
348void __init dst_init(void) 349void __init dst_init(void)
diff --git a/net/core/filter.c b/net/core/filter.c
index 52b051f82a01..71a433cdf7d3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
112 */ 112 */
113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
114{ 114{
115 struct sock_filter *fentry; /* We walk down these */
116 void *ptr; 115 void *ptr;
117 u32 A = 0; /* Accumulator */ 116 u32 A = 0; /* Accumulator */
118 u32 X = 0; /* Index Register */ 117 u32 X = 0; /* Index Register */
119 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
119 unsigned long memvalid = 0;
120 u32 tmp; 120 u32 tmp;
121 int k; 121 int k;
122 int pc; 122 int pc;
123 123
124 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
124 /* 125 /*
125 * Process array of filter instructions. 126 * Process array of filter instructions.
126 */ 127 */
127 for (pc = 0; pc < flen; pc++) { 128 for (pc = 0; pc < flen; pc++) {
128 fentry = &filter[pc]; 129 const struct sock_filter *fentry = &filter[pc];
130 u32 f_k = fentry->k;
129 131
130 switch (fentry->code) { 132 switch (fentry->code) {
131 case BPF_S_ALU_ADD_X: 133 case BPF_S_ALU_ADD_X:
132 A += X; 134 A += X;
133 continue; 135 continue;
134 case BPF_S_ALU_ADD_K: 136 case BPF_S_ALU_ADD_K:
135 A += fentry->k; 137 A += f_k;
136 continue; 138 continue;
137 case BPF_S_ALU_SUB_X: 139 case BPF_S_ALU_SUB_X:
138 A -= X; 140 A -= X;
139 continue; 141 continue;
140 case BPF_S_ALU_SUB_K: 142 case BPF_S_ALU_SUB_K:
141 A -= fentry->k; 143 A -= f_k;
142 continue; 144 continue;
143 case BPF_S_ALU_MUL_X: 145 case BPF_S_ALU_MUL_X:
144 A *= X; 146 A *= X;
145 continue; 147 continue;
146 case BPF_S_ALU_MUL_K: 148 case BPF_S_ALU_MUL_K:
147 A *= fentry->k; 149 A *= f_k;
148 continue; 150 continue;
149 case BPF_S_ALU_DIV_X: 151 case BPF_S_ALU_DIV_X:
150 if (X == 0) 152 if (X == 0)
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
152 A /= X; 154 A /= X;
153 continue; 155 continue;
154 case BPF_S_ALU_DIV_K: 156 case BPF_S_ALU_DIV_K:
155 A /= fentry->k; 157 A /= f_k;
156 continue; 158 continue;
157 case BPF_S_ALU_AND_X: 159 case BPF_S_ALU_AND_X:
158 A &= X; 160 A &= X;
159 continue; 161 continue;
160 case BPF_S_ALU_AND_K: 162 case BPF_S_ALU_AND_K:
161 A &= fentry->k; 163 A &= f_k;
162 continue; 164 continue;
163 case BPF_S_ALU_OR_X: 165 case BPF_S_ALU_OR_X:
164 A |= X; 166 A |= X;
165 continue; 167 continue;
166 case BPF_S_ALU_OR_K: 168 case BPF_S_ALU_OR_K:
167 A |= fentry->k; 169 A |= f_k;
168 continue; 170 continue;
169 case BPF_S_ALU_LSH_X: 171 case BPF_S_ALU_LSH_X:
170 A <<= X; 172 A <<= X;
171 continue; 173 continue;
172 case BPF_S_ALU_LSH_K: 174 case BPF_S_ALU_LSH_K:
173 A <<= fentry->k; 175 A <<= f_k;
174 continue; 176 continue;
175 case BPF_S_ALU_RSH_X: 177 case BPF_S_ALU_RSH_X:
176 A >>= X; 178 A >>= X;
177 continue; 179 continue;
178 case BPF_S_ALU_RSH_K: 180 case BPF_S_ALU_RSH_K:
179 A >>= fentry->k; 181 A >>= f_k;
180 continue; 182 continue;
181 case BPF_S_ALU_NEG: 183 case BPF_S_ALU_NEG:
182 A = -A; 184 A = -A;
183 continue; 185 continue;
184 case BPF_S_JMP_JA: 186 case BPF_S_JMP_JA:
185 pc += fentry->k; 187 pc += f_k;
186 continue; 188 continue;
187 case BPF_S_JMP_JGT_K: 189 case BPF_S_JMP_JGT_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf; 190 pc += (A > f_k) ? fentry->jt : fentry->jf;
189 continue; 191 continue;
190 case BPF_S_JMP_JGE_K: 192 case BPF_S_JMP_JGE_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 193 pc += (A >= f_k) ? fentry->jt : fentry->jf;
192 continue; 194 continue;
193 case BPF_S_JMP_JEQ_K: 195 case BPF_S_JMP_JEQ_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf; 196 pc += (A == f_k) ? fentry->jt : fentry->jf;
195 continue; 197 continue;
196 case BPF_S_JMP_JSET_K: 198 case BPF_S_JMP_JSET_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf; 199 pc += (A & f_k) ? fentry->jt : fentry->jf;
198 continue; 200 continue;
199 case BPF_S_JMP_JGT_X: 201 case BPF_S_JMP_JGT_X:
200 pc += (A > X) ? fentry->jt : fentry->jf; 202 pc += (A > X) ? fentry->jt : fentry->jf;
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
209 pc += (A & X) ? fentry->jt : fentry->jf; 211 pc += (A & X) ? fentry->jt : fentry->jf;
210 continue; 212 continue;
211 case BPF_S_LD_W_ABS: 213 case BPF_S_LD_W_ABS:
212 k = fentry->k; 214 k = f_k;
213load_w: 215load_w:
214 ptr = load_pointer(skb, k, 4, &tmp); 216 ptr = load_pointer(skb, k, 4, &tmp);
215 if (ptr != NULL) { 217 if (ptr != NULL) {
@@ -218,7 +220,7 @@ load_w:
218 } 220 }
219 break; 221 break;
220 case BPF_S_LD_H_ABS: 222 case BPF_S_LD_H_ABS:
221 k = fentry->k; 223 k = f_k;
222load_h: 224load_h:
223 ptr = load_pointer(skb, k, 2, &tmp); 225 ptr = load_pointer(skb, k, 2, &tmp);
224 if (ptr != NULL) { 226 if (ptr != NULL) {
@@ -227,7 +229,7 @@ load_h:
227 } 229 }
228 break; 230 break;
229 case BPF_S_LD_B_ABS: 231 case BPF_S_LD_B_ABS:
230 k = fentry->k; 232 k = f_k;
231load_b: 233load_b:
232 ptr = load_pointer(skb, k, 1, &tmp); 234 ptr = load_pointer(skb, k, 1, &tmp);
233 if (ptr != NULL) { 235 if (ptr != NULL) {
@@ -242,32 +244,34 @@ load_b:
242 X = skb->len; 244 X = skb->len;
243 continue; 245 continue;
244 case BPF_S_LD_W_IND: 246 case BPF_S_LD_W_IND:
245 k = X + fentry->k; 247 k = X + f_k;
246 goto load_w; 248 goto load_w;
247 case BPF_S_LD_H_IND: 249 case BPF_S_LD_H_IND:
248 k = X + fentry->k; 250 k = X + f_k;
249 goto load_h; 251 goto load_h;
250 case BPF_S_LD_B_IND: 252 case BPF_S_LD_B_IND:
251 k = X + fentry->k; 253 k = X + f_k;
252 goto load_b; 254 goto load_b;
253 case BPF_S_LDX_B_MSH: 255 case BPF_S_LDX_B_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp); 256 ptr = load_pointer(skb, f_k, 1, &tmp);
255 if (ptr != NULL) { 257 if (ptr != NULL) {
256 X = (*(u8 *)ptr & 0xf) << 2; 258 X = (*(u8 *)ptr & 0xf) << 2;
257 continue; 259 continue;
258 } 260 }
259 return 0; 261 return 0;
260 case BPF_S_LD_IMM: 262 case BPF_S_LD_IMM:
261 A = fentry->k; 263 A = f_k;
262 continue; 264 continue;
263 case BPF_S_LDX_IMM: 265 case BPF_S_LDX_IMM:
264 X = fentry->k; 266 X = f_k;
265 continue; 267 continue;
266 case BPF_S_LD_MEM: 268 case BPF_S_LD_MEM:
267 A = mem[fentry->k]; 269 A = (memvalid & (1UL << f_k)) ?
270 mem[f_k] : 0;
268 continue; 271 continue;
269 case BPF_S_LDX_MEM: 272 case BPF_S_LDX_MEM:
270 X = mem[fentry->k]; 273 X = (memvalid & (1UL << f_k)) ?
274 mem[f_k] : 0;
271 continue; 275 continue;
272 case BPF_S_MISC_TAX: 276 case BPF_S_MISC_TAX:
273 X = A; 277 X = A;
@@ -276,14 +280,16 @@ load_b:
276 A = X; 280 A = X;
277 continue; 281 continue;
278 case BPF_S_RET_K: 282 case BPF_S_RET_K:
279 return fentry->k; 283 return f_k;
280 case BPF_S_RET_A: 284 case BPF_S_RET_A:
281 return A; 285 return A;
282 case BPF_S_ST: 286 case BPF_S_ST:
283 mem[fentry->k] = A; 287 memvalid |= 1UL << f_k;
288 mem[f_k] = A;
284 continue; 289 continue;
285 case BPF_S_STX: 290 case BPF_S_STX:
286 mem[fentry->k] = X; 291 memvalid |= 1UL << f_k;
292 mem[f_k] = X;
287 continue; 293 continue;
288 default: 294 default:
289 WARN_ON(1); 295 WARN_ON(1);
@@ -583,23 +589,16 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
583EXPORT_SYMBOL(sk_chk_filter); 589EXPORT_SYMBOL(sk_chk_filter);
584 590
585/** 591/**
586 * sk_filter_rcu_release: Release a socket filter by rcu_head 592 * sk_filter_release_rcu - Release a socket filter by rcu_head
587 * @rcu: rcu_head that contains the sk_filter to free 593 * @rcu: rcu_head that contains the sk_filter to free
588 */ 594 */
589static void sk_filter_rcu_release(struct rcu_head *rcu) 595void sk_filter_release_rcu(struct rcu_head *rcu)
590{ 596{
591 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 597 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
592 598
593 sk_filter_release(fp); 599 kfree(fp);
594}
595
596static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
597{
598 unsigned int size = sk_filter_len(fp);
599
600 atomic_sub(size, &sk->sk_omem_alloc);
601 call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
602} 600}
601EXPORT_SYMBOL(sk_filter_release_rcu);
603 602
604/** 603/**
605 * sk_attach_filter - attach a socket filter 604 * sk_attach_filter - attach a socket filter
@@ -644,7 +643,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
644 rcu_read_unlock_bh(); 643 rcu_read_unlock_bh();
645 644
646 if (old_fp) 645 if (old_fp)
647 sk_filter_delayed_uncharge(sk, old_fp); 646 sk_filter_uncharge(sk, old_fp);
648 return 0; 647 return 0;
649} 648}
650EXPORT_SYMBOL_GPL(sk_attach_filter); 649EXPORT_SYMBOL_GPL(sk_attach_filter);
@@ -658,7 +657,7 @@ int sk_detach_filter(struct sock *sk)
658 filter = rcu_dereference_bh(sk->sk_filter); 657 filter = rcu_dereference_bh(sk->sk_filter);
659 if (filter) { 658 if (filter) {
660 rcu_assign_pointer(sk->sk_filter, NULL); 659 rcu_assign_pointer(sk->sk_filter, NULL);
661 sk_filter_delayed_uncharge(sk, filter); 660 sk_filter_uncharge(sk, filter);
662 ret = 0; 661 ret = 0;
663 } 662 }
664 rcu_read_unlock_bh(); 663 rcu_read_unlock_bh();
diff --git a/net/core/iovec.c b/net/core/iovec.c
index e6b133b77ccb..58eb9999f89d 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,10 +35,9 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
39{ 39{
40 int size, ct; 40 int size, ct, err;
41 long err;
42 41
43 if (m->msg_namelen) { 42 if (m->msg_namelen) {
44 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
@@ -60,14 +59,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
60 err = 0; 59 err = 0;
61 60
62 for (ct = 0; ct < m->msg_iovlen; ct++) { 61 for (ct = 0; ct < m->msg_iovlen; ct++) {
63 err += iov[ct].iov_len; 62 size_t len = iov[ct].iov_len;
64 /* 63
65 * Goal is not to verify user data, but to prevent returning 64 if (len > INT_MAX - err) {
66 * negative value, which is interpreted as errno. 65 len = INT_MAX - err;
67 * Overflow is still possible, but it is harmless. 66 iov[ct].iov_len = len;
68 */ 67 }
69 if (err < 0) 68 err += len;
70 return -EMSGSIZE;
71 } 69 }
72 70
73 return err; 71 return err;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f78d821bd935..29d7bce933f2 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1546,6 +1546,9 @@ replay:
1546 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); 1546 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
1547 1547
1548 dest_net = rtnl_link_get_net(net, tb); 1548 dest_net = rtnl_link_get_net(net, tb);
1549 if (IS_ERR(dest_net))
1550 return PTR_ERR(dest_net);
1551
1549 dev = rtnl_create_link(net, dest_net, ifname, ops, tb); 1552 dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
1550 1553
1551 if (IS_ERR(dev)) 1554 if (IS_ERR(dev))
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 0ae6c22da85b..c19bb4ee405e 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -96,11 +96,13 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
96 struct phy_device *phydev; 96 struct phy_device *phydev;
97 unsigned int type; 97 unsigned int type;
98 98
99 skb_push(skb, ETH_HLEN); 99 if (skb_headroom(skb) < ETH_HLEN)
100 return false;
101 __skb_push(skb, ETH_HLEN);
100 102
101 type = classify(skb); 103 type = classify(skb);
102 104
103 skb_pull(skb, ETH_HLEN); 105 __skb_pull(skb, ETH_HLEN);
104 106
105 switch (type) { 107 switch (type) {
106 case PTP_CLASS_V1_IPV4: 108 case PTP_CLASS_V1_IPV4: