aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-11-15 01:33:11 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2010-11-15 01:33:11 -0500
commit1bb95834bbcdc969e477a9284cf96c17a4c2616f (patch)
tree9cf66b22a611bb6bc78778c05dac72263bb45a23 /net/core
parent85345517fe6d4de27b0d6ca19fef9d28ac947c4a (diff)
parenta41c73e04673b47730df682446f0d52f95e32a5b (diff)
Merge remote branch 'airlied/drm-fixes' into drm-intel-fixes
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/filter.c68
-rw-r--r--net/core/iovec.c20
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/pktgen.c41
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/sock.c16
-rw-r--r--net/core/sysctl_net_core.c3
11 files changed, 128 insertions, 115 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 78b5a89b0f40..0dd54a69dace 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1685,10 +1685,10 @@ EXPORT_SYMBOL(netif_device_attach);
1685 1685
1686static bool can_checksum_protocol(unsigned long features, __be16 protocol) 1686static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1687{ 1687{
1688 return ((features & NETIF_F_GEN_CSUM) || 1688 return ((features & NETIF_F_NO_CSUM) ||
1689 ((features & NETIF_F_IP_CSUM) && 1689 ((features & NETIF_F_V4_CSUM) &&
1690 protocol == htons(ETH_P_IP)) || 1690 protocol == htons(ETH_P_IP)) ||
1691 ((features & NETIF_F_IPV6_CSUM) && 1691 ((features & NETIF_F_V6_CSUM) &&
1692 protocol == htons(ETH_P_IPV6)) || 1692 protocol == htons(ETH_P_IPV6)) ||
1693 ((features & NETIF_F_FCOE_CRC) && 1693 ((features & NETIF_F_FCOE_CRC) &&
1694 protocol == htons(ETH_P_FCOE))); 1694 protocol == htons(ETH_P_FCOE)));
@@ -1696,22 +1696,18 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1696 1696
1697static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) 1697static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1698{ 1698{
1699 __be16 protocol = skb->protocol;
1699 int features = dev->features; 1700 int features = dev->features;
1700 1701
1701 if (vlan_tx_tag_present(skb)) 1702 if (vlan_tx_tag_present(skb)) {
1702 features &= dev->vlan_features; 1703 features &= dev->vlan_features;
1703 1704 } else if (protocol == htons(ETH_P_8021Q)) {
1704 if (can_checksum_protocol(features, skb->protocol))
1705 return true;
1706
1707 if (skb->protocol == htons(ETH_P_8021Q)) {
1708 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 1705 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1709 if (can_checksum_protocol(dev->features & dev->vlan_features, 1706 protocol = veh->h_vlan_encapsulated_proto;
1710 veh->h_vlan_encapsulated_proto)) 1707 features &= dev->vlan_features;
1711 return true;
1712 } 1708 }
1713 1709
1714 return false; 1710 return can_checksum_protocol(features, protocol);
1715} 1711}
1716 1712
1717/** 1713/**
@@ -2135,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2135 } else { 2131 } else {
2136 struct sock *sk = skb->sk; 2132 struct sock *sk = skb->sk;
2137 queue_index = sk_tx_queue_get(sk); 2133 queue_index = sk_tx_queue_get(sk);
2138 if (queue_index < 0) { 2134 if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
2139 2135
2140 queue_index = 0; 2136 queue_index = 0;
2141 if (dev->real_num_tx_queues > 1) 2137 if (dev->real_num_tx_queues > 1)
@@ -2213,7 +2209,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2213} 2209}
2214 2210
2215static DEFINE_PER_CPU(int, xmit_recursion); 2211static DEFINE_PER_CPU(int, xmit_recursion);
2216#define RECURSION_LIMIT 3 2212#define RECURSION_LIMIT 10
2217 2213
2218/** 2214/**
2219 * dev_queue_xmit - transmit a buffer 2215 * dev_queue_xmit - transmit a buffer
@@ -2413,7 +2409,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
2413#ifdef CONFIG_RPS 2409#ifdef CONFIG_RPS
2414 2410
2415/* One global table that all flow-based protocols share. */ 2411/* One global table that all flow-based protocols share. */
2416struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; 2412struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2417EXPORT_SYMBOL(rps_sock_flow_table); 2413EXPORT_SYMBOL(rps_sock_flow_table);
2418 2414
2419/* 2415/*
@@ -2425,7 +2421,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2425 struct rps_dev_flow **rflowp) 2421 struct rps_dev_flow **rflowp)
2426{ 2422{
2427 struct netdev_rx_queue *rxqueue; 2423 struct netdev_rx_queue *rxqueue;
2428 struct rps_map *map = NULL; 2424 struct rps_map *map;
2429 struct rps_dev_flow_table *flow_table; 2425 struct rps_dev_flow_table *flow_table;
2430 struct rps_sock_flow_table *sock_flow_table; 2426 struct rps_sock_flow_table *sock_flow_table;
2431 int cpu = -1; 2427 int cpu = -1;
@@ -2444,15 +2440,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2444 } else 2440 } else
2445 rxqueue = dev->_rx; 2441 rxqueue = dev->_rx;
2446 2442
2447 if (rxqueue->rps_map) { 2443 map = rcu_dereference(rxqueue->rps_map);
2448 map = rcu_dereference(rxqueue->rps_map); 2444 if (map) {
2449 if (map && map->len == 1) { 2445 if (map->len == 1) {
2450 tcpu = map->cpus[0]; 2446 tcpu = map->cpus[0];
2451 if (cpu_online(tcpu)) 2447 if (cpu_online(tcpu))
2452 cpu = tcpu; 2448 cpu = tcpu;
2453 goto done; 2449 goto done;
2454 } 2450 }
2455 } else if (!rxqueue->rps_flow_table) { 2451 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
2456 goto done; 2452 goto done;
2457 } 2453 }
2458 2454
@@ -5416,7 +5412,7 @@ void netdev_run_todo(void)
5416 /* paranoia */ 5412 /* paranoia */
5417 BUG_ON(netdev_refcnt_read(dev)); 5413 BUG_ON(netdev_refcnt_read(dev));
5418 WARN_ON(rcu_dereference_raw(dev->ip_ptr)); 5414 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
5419 WARN_ON(dev->ip6_ptr); 5415 WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
5420 WARN_ON(dev->dn_ptr); 5416 WARN_ON(dev->dn_ptr);
5421 5417
5422 if (dev->destructor) 5418 if (dev->destructor)
diff --git a/net/core/dst.c b/net/core/dst.c
index 8abe628b79f1..b99c7c7ffce2 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
370 370
371static struct notifier_block dst_dev_notifier = { 371static struct notifier_block dst_dev_notifier = {
372 .notifier_call = dst_dev_event, 372 .notifier_call = dst_dev_event,
373 .priority = -10, /* must be called after other network notifiers */
373}; 374};
374 375
375void __init dst_init(void) 376void __init dst_init(void)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1bc3f253ba6c..82a4369ae150 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -351,12 +351,12 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
351 351
352 list_for_each_entry(r, &ops->rules_list, list) { 352 list_for_each_entry(r, &ops->rules_list, list) {
353 if (r->pref == rule->target) { 353 if (r->pref == rule->target) {
354 rule->ctarget = r; 354 RCU_INIT_POINTER(rule->ctarget, r);
355 break; 355 break;
356 } 356 }
357 } 357 }
358 358
359 if (rule->ctarget == NULL) 359 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
360 unresolved = 1; 360 unresolved = 1;
361 } else if (rule->action == FR_ACT_GOTO) 361 } else if (rule->action == FR_ACT_GOTO)
362 goto errout_free; 362 goto errout_free;
@@ -373,6 +373,11 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
373 373
374 fib_rule_get(rule); 374 fib_rule_get(rule);
375 375
376 if (last)
377 list_add_rcu(&rule->list, &last->list);
378 else
379 list_add_rcu(&rule->list, &ops->rules_list);
380
376 if (ops->unresolved_rules) { 381 if (ops->unresolved_rules) {
377 /* 382 /*
378 * There are unresolved goto rules in the list, check if 383 * There are unresolved goto rules in the list, check if
@@ -381,7 +386,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
381 list_for_each_entry(r, &ops->rules_list, list) { 386 list_for_each_entry(r, &ops->rules_list, list) {
382 if (r->action == FR_ACT_GOTO && 387 if (r->action == FR_ACT_GOTO &&
383 r->target == rule->pref) { 388 r->target == rule->pref) {
384 BUG_ON(r->ctarget != NULL); 389 BUG_ON(rtnl_dereference(r->ctarget) != NULL);
385 rcu_assign_pointer(r->ctarget, rule); 390 rcu_assign_pointer(r->ctarget, rule);
386 if (--ops->unresolved_rules == 0) 391 if (--ops->unresolved_rules == 0)
387 break; 392 break;
@@ -395,11 +400,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
395 if (unresolved) 400 if (unresolved)
396 ops->unresolved_rules++; 401 ops->unresolved_rules++;
397 402
398 if (last)
399 list_add_rcu(&rule->list, &last->list);
400 else
401 list_add_rcu(&rule->list, &ops->rules_list);
402
403 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); 403 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
404 flush_route_cache(ops); 404 flush_route_cache(ops);
405 rules_ops_put(ops); 405 rules_ops_put(ops);
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
487 */ 487 */
488 if (ops->nr_goto_rules > 0) { 488 if (ops->nr_goto_rules > 0) {
489 list_for_each_entry(tmp, &ops->rules_list, list) { 489 list_for_each_entry(tmp, &ops->rules_list, list) {
490 if (tmp->ctarget == rule) { 490 if (rtnl_dereference(tmp->ctarget) == rule) {
491 rcu_assign_pointer(tmp->ctarget, NULL); 491 rcu_assign_pointer(tmp->ctarget, NULL);
492 ops->unresolved_rules++; 492 ops->unresolved_rules++;
493 } 493 }
@@ -545,7 +545,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
545 frh->action = rule->action; 545 frh->action = rule->action;
546 frh->flags = rule->flags; 546 frh->flags = rule->flags;
547 547
548 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL) 548 if (rule->action == FR_ACT_GOTO &&
549 rcu_dereference_raw(rule->ctarget) == NULL)
549 frh->flags |= FIB_RULE_UNRESOLVED; 550 frh->flags |= FIB_RULE_UNRESOLVED;
550 551
551 if (rule->iifname[0]) { 552 if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 7adf50352918..23e9b2a6b4c8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -89,8 +89,8 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
89 rcu_read_lock_bh(); 89 rcu_read_lock_bh();
90 filter = rcu_dereference_bh(sk->sk_filter); 90 filter = rcu_dereference_bh(sk->sk_filter);
91 if (filter) { 91 if (filter) {
92 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 92 unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
93 filter->len); 93
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
95 } 95 }
96 rcu_read_unlock_bh(); 96 rcu_read_unlock_bh();
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
112 */ 112 */
113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
114{ 114{
115 struct sock_filter *fentry; /* We walk down these */
116 void *ptr; 115 void *ptr;
117 u32 A = 0; /* Accumulator */ 116 u32 A = 0; /* Accumulator */
118 u32 X = 0; /* Index Register */ 117 u32 X = 0; /* Index Register */
119 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
119 unsigned long memvalid = 0;
120 u32 tmp; 120 u32 tmp;
121 int k; 121 int k;
122 int pc; 122 int pc;
123 123
124 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
124 /* 125 /*
125 * Process array of filter instructions. 126 * Process array of filter instructions.
126 */ 127 */
127 for (pc = 0; pc < flen; pc++) { 128 for (pc = 0; pc < flen; pc++) {
128 fentry = &filter[pc]; 129 const struct sock_filter *fentry = &filter[pc];
130 u32 f_k = fentry->k;
129 131
130 switch (fentry->code) { 132 switch (fentry->code) {
131 case BPF_S_ALU_ADD_X: 133 case BPF_S_ALU_ADD_X:
132 A += X; 134 A += X;
133 continue; 135 continue;
134 case BPF_S_ALU_ADD_K: 136 case BPF_S_ALU_ADD_K:
135 A += fentry->k; 137 A += f_k;
136 continue; 138 continue;
137 case BPF_S_ALU_SUB_X: 139 case BPF_S_ALU_SUB_X:
138 A -= X; 140 A -= X;
139 continue; 141 continue;
140 case BPF_S_ALU_SUB_K: 142 case BPF_S_ALU_SUB_K:
141 A -= fentry->k; 143 A -= f_k;
142 continue; 144 continue;
143 case BPF_S_ALU_MUL_X: 145 case BPF_S_ALU_MUL_X:
144 A *= X; 146 A *= X;
145 continue; 147 continue;
146 case BPF_S_ALU_MUL_K: 148 case BPF_S_ALU_MUL_K:
147 A *= fentry->k; 149 A *= f_k;
148 continue; 150 continue;
149 case BPF_S_ALU_DIV_X: 151 case BPF_S_ALU_DIV_X:
150 if (X == 0) 152 if (X == 0)
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
152 A /= X; 154 A /= X;
153 continue; 155 continue;
154 case BPF_S_ALU_DIV_K: 156 case BPF_S_ALU_DIV_K:
155 A /= fentry->k; 157 A /= f_k;
156 continue; 158 continue;
157 case BPF_S_ALU_AND_X: 159 case BPF_S_ALU_AND_X:
158 A &= X; 160 A &= X;
159 continue; 161 continue;
160 case BPF_S_ALU_AND_K: 162 case BPF_S_ALU_AND_K:
161 A &= fentry->k; 163 A &= f_k;
162 continue; 164 continue;
163 case BPF_S_ALU_OR_X: 165 case BPF_S_ALU_OR_X:
164 A |= X; 166 A |= X;
165 continue; 167 continue;
166 case BPF_S_ALU_OR_K: 168 case BPF_S_ALU_OR_K:
167 A |= fentry->k; 169 A |= f_k;
168 continue; 170 continue;
169 case BPF_S_ALU_LSH_X: 171 case BPF_S_ALU_LSH_X:
170 A <<= X; 172 A <<= X;
171 continue; 173 continue;
172 case BPF_S_ALU_LSH_K: 174 case BPF_S_ALU_LSH_K:
173 A <<= fentry->k; 175 A <<= f_k;
174 continue; 176 continue;
175 case BPF_S_ALU_RSH_X: 177 case BPF_S_ALU_RSH_X:
176 A >>= X; 178 A >>= X;
177 continue; 179 continue;
178 case BPF_S_ALU_RSH_K: 180 case BPF_S_ALU_RSH_K:
179 A >>= fentry->k; 181 A >>= f_k;
180 continue; 182 continue;
181 case BPF_S_ALU_NEG: 183 case BPF_S_ALU_NEG:
182 A = -A; 184 A = -A;
183 continue; 185 continue;
184 case BPF_S_JMP_JA: 186 case BPF_S_JMP_JA:
185 pc += fentry->k; 187 pc += f_k;
186 continue; 188 continue;
187 case BPF_S_JMP_JGT_K: 189 case BPF_S_JMP_JGT_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf; 190 pc += (A > f_k) ? fentry->jt : fentry->jf;
189 continue; 191 continue;
190 case BPF_S_JMP_JGE_K: 192 case BPF_S_JMP_JGE_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 193 pc += (A >= f_k) ? fentry->jt : fentry->jf;
192 continue; 194 continue;
193 case BPF_S_JMP_JEQ_K: 195 case BPF_S_JMP_JEQ_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf; 196 pc += (A == f_k) ? fentry->jt : fentry->jf;
195 continue; 197 continue;
196 case BPF_S_JMP_JSET_K: 198 case BPF_S_JMP_JSET_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf; 199 pc += (A & f_k) ? fentry->jt : fentry->jf;
198 continue; 200 continue;
199 case BPF_S_JMP_JGT_X: 201 case BPF_S_JMP_JGT_X:
200 pc += (A > X) ? fentry->jt : fentry->jf; 202 pc += (A > X) ? fentry->jt : fentry->jf;
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
209 pc += (A & X) ? fentry->jt : fentry->jf; 211 pc += (A & X) ? fentry->jt : fentry->jf;
210 continue; 212 continue;
211 case BPF_S_LD_W_ABS: 213 case BPF_S_LD_W_ABS:
212 k = fentry->k; 214 k = f_k;
213load_w: 215load_w:
214 ptr = load_pointer(skb, k, 4, &tmp); 216 ptr = load_pointer(skb, k, 4, &tmp);
215 if (ptr != NULL) { 217 if (ptr != NULL) {
@@ -218,7 +220,7 @@ load_w:
218 } 220 }
219 break; 221 break;
220 case BPF_S_LD_H_ABS: 222 case BPF_S_LD_H_ABS:
221 k = fentry->k; 223 k = f_k;
222load_h: 224load_h:
223 ptr = load_pointer(skb, k, 2, &tmp); 225 ptr = load_pointer(skb, k, 2, &tmp);
224 if (ptr != NULL) { 226 if (ptr != NULL) {
@@ -227,7 +229,7 @@ load_h:
227 } 229 }
228 break; 230 break;
229 case BPF_S_LD_B_ABS: 231 case BPF_S_LD_B_ABS:
230 k = fentry->k; 232 k = f_k;
231load_b: 233load_b:
232 ptr = load_pointer(skb, k, 1, &tmp); 234 ptr = load_pointer(skb, k, 1, &tmp);
233 if (ptr != NULL) { 235 if (ptr != NULL) {
@@ -242,32 +244,34 @@ load_b:
242 X = skb->len; 244 X = skb->len;
243 continue; 245 continue;
244 case BPF_S_LD_W_IND: 246 case BPF_S_LD_W_IND:
245 k = X + fentry->k; 247 k = X + f_k;
246 goto load_w; 248 goto load_w;
247 case BPF_S_LD_H_IND: 249 case BPF_S_LD_H_IND:
248 k = X + fentry->k; 250 k = X + f_k;
249 goto load_h; 251 goto load_h;
250 case BPF_S_LD_B_IND: 252 case BPF_S_LD_B_IND:
251 k = X + fentry->k; 253 k = X + f_k;
252 goto load_b; 254 goto load_b;
253 case BPF_S_LDX_B_MSH: 255 case BPF_S_LDX_B_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp); 256 ptr = load_pointer(skb, f_k, 1, &tmp);
255 if (ptr != NULL) { 257 if (ptr != NULL) {
256 X = (*(u8 *)ptr & 0xf) << 2; 258 X = (*(u8 *)ptr & 0xf) << 2;
257 continue; 259 continue;
258 } 260 }
259 return 0; 261 return 0;
260 case BPF_S_LD_IMM: 262 case BPF_S_LD_IMM:
261 A = fentry->k; 263 A = f_k;
262 continue; 264 continue;
263 case BPF_S_LDX_IMM: 265 case BPF_S_LDX_IMM:
264 X = fentry->k; 266 X = f_k;
265 continue; 267 continue;
266 case BPF_S_LD_MEM: 268 case BPF_S_LD_MEM:
267 A = mem[fentry->k]; 269 A = (memvalid & (1UL << f_k)) ?
270 mem[f_k] : 0;
268 continue; 271 continue;
269 case BPF_S_LDX_MEM: 272 case BPF_S_LDX_MEM:
270 X = mem[fentry->k]; 273 X = (memvalid & (1UL << f_k)) ?
274 mem[f_k] : 0;
271 continue; 275 continue;
272 case BPF_S_MISC_TAX: 276 case BPF_S_MISC_TAX:
273 X = A; 277 X = A;
@@ -276,14 +280,16 @@ load_b:
276 A = X; 280 A = X;
277 continue; 281 continue;
278 case BPF_S_RET_K: 282 case BPF_S_RET_K:
279 return fentry->k; 283 return f_k;
280 case BPF_S_RET_A: 284 case BPF_S_RET_A:
281 return A; 285 return A;
282 case BPF_S_ST: 286 case BPF_S_ST:
283 mem[fentry->k] = A; 287 memvalid |= 1UL << f_k;
288 mem[f_k] = A;
284 continue; 289 continue;
285 case BPF_S_STX: 290 case BPF_S_STX:
286 mem[fentry->k] = X; 291 memvalid |= 1UL << f_k;
292 mem[f_k] = X;
287 continue; 293 continue;
288 default: 294 default:
289 WARN_ON(1); 295 WARN_ON(1);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 72aceb1fe4fa..c40f27e7d208 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,10 +35,9 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
39{ 39{
40 int size, ct; 40 int size, ct, err;
41 long err;
42 41
43 if (m->msg_namelen) { 42 if (m->msg_namelen) {
44 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
@@ -62,14 +61,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
62 err = 0; 61 err = 0;
63 62
64 for (ct = 0; ct < m->msg_iovlen; ct++) { 63 for (ct = 0; ct < m->msg_iovlen; ct++) {
65 err += iov[ct].iov_len; 64 size_t len = iov[ct].iov_len;
66 /* 65
67 * Goal is not to verify user data, but to prevent returning 66 if (len > INT_MAX - err) {
68 * negative value, which is interpreted as errno. 67 len = INT_MAX - err;
69 * Overflow is still possible, but it is harmless. 68 iov[ct].iov_len = len;
70 */ 69 }
71 if (err < 0) 70 err += len;
72 return -EMSGSIZE;
73 } 71 }
74 72
75 return err; 73 return err;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b143173e3eb2..a5ff5a89f376 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
598 } 598 }
599 599
600 spin_lock(&rps_map_lock); 600 spin_lock(&rps_map_lock);
601 old_map = queue->rps_map; 601 old_map = rcu_dereference_protected(queue->rps_map,
602 lockdep_is_held(&rps_map_lock));
602 rcu_assign_pointer(queue->rps_map, map); 603 rcu_assign_pointer(queue->rps_map, map);
603 spin_unlock(&rps_map_lock); 604 spin_unlock(&rps_map_lock);
604 605
@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
677 table = NULL; 678 table = NULL;
678 679
679 spin_lock(&rps_dev_flow_lock); 680 spin_lock(&rps_dev_flow_lock);
680 old_table = queue->rps_flow_table; 681 old_table = rcu_dereference_protected(queue->rps_flow_table,
682 lockdep_is_held(&rps_dev_flow_lock));
681 rcu_assign_pointer(queue->rps_flow_table, table); 683 rcu_assign_pointer(queue->rps_flow_table, table);
682 spin_unlock(&rps_dev_flow_lock); 684 spin_unlock(&rps_dev_flow_lock);
683 685
@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj)
705{ 707{
706 struct netdev_rx_queue *queue = to_rx_queue(kobj); 708 struct netdev_rx_queue *queue = to_rx_queue(kobj);
707 struct netdev_rx_queue *first = queue->first; 709 struct netdev_rx_queue *first = queue->first;
710 struct rps_map *map;
711 struct rps_dev_flow_table *flow_table;
708 712
709 if (queue->rps_map)
710 call_rcu(&queue->rps_map->rcu, rps_map_release);
711 713
712 if (queue->rps_flow_table) 714 map = rcu_dereference_raw(queue->rps_map);
713 call_rcu(&queue->rps_flow_table->rcu, 715 if (map)
714 rps_dev_flow_table_release); 716 call_rcu(&map->rcu, rps_map_release);
717
718 flow_table = rcu_dereference_raw(queue->rps_flow_table);
719 if (flow_table)
720 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
715 721
716 if (atomic_dec_and_test(&first->count)) 722 if (atomic_dec_and_test(&first->count))
717 kfree(first); 723 kfree(first);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index c988e685433a..3f860261c5ee 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -42,7 +42,9 @@ static int net_assign_generic(struct net *net, int id, void *data)
42 BUG_ON(!mutex_is_locked(&net_mutex)); 42 BUG_ON(!mutex_is_locked(&net_mutex));
43 BUG_ON(id == 0); 43 BUG_ON(id == 0);
44 44
45 ng = old_ng = net->gen; 45 old_ng = rcu_dereference_protected(net->gen,
46 lockdep_is_held(&net_mutex));
47 ng = old_ng;
46 if (old_ng->len >= id) 48 if (old_ng->len >= id)
47 goto assign; 49 goto assign;
48 50
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2c0df0f95b3d..33bc3823ac6f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -771,10 +771,10 @@ done:
771static unsigned long num_arg(const char __user * user_buffer, 771static unsigned long num_arg(const char __user * user_buffer,
772 unsigned long maxlen, unsigned long *num) 772 unsigned long maxlen, unsigned long *num)
773{ 773{
774 int i = 0; 774 int i;
775 *num = 0; 775 *num = 0;
776 776
777 for (; i < maxlen; i++) { 777 for (i = 0; i < maxlen; i++) {
778 char c; 778 char c;
779 if (get_user(c, &user_buffer[i])) 779 if (get_user(c, &user_buffer[i]))
780 return -EFAULT; 780 return -EFAULT;
@@ -789,9 +789,9 @@ static unsigned long num_arg(const char __user * user_buffer,
789 789
790static int strn_len(const char __user * user_buffer, unsigned int maxlen) 790static int strn_len(const char __user * user_buffer, unsigned int maxlen)
791{ 791{
792 int i = 0; 792 int i;
793 793
794 for (; i < maxlen; i++) { 794 for (i = 0; i < maxlen; i++) {
795 char c; 795 char c;
796 if (get_user(c, &user_buffer[i])) 796 if (get_user(c, &user_buffer[i]))
797 return -EFAULT; 797 return -EFAULT;
@@ -846,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file,
846{ 846{
847 struct seq_file *seq = file->private_data; 847 struct seq_file *seq = file->private_data;
848 struct pktgen_dev *pkt_dev = seq->private; 848 struct pktgen_dev *pkt_dev = seq->private;
849 int i = 0, max, len; 849 int i, max, len;
850 char name[16], valstr[32]; 850 char name[16], valstr[32];
851 unsigned long value = 0; 851 unsigned long value = 0;
852 char *pg_result = NULL; 852 char *pg_result = NULL;
@@ -860,13 +860,13 @@ static ssize_t pktgen_if_write(struct file *file,
860 return -EINVAL; 860 return -EINVAL;
861 } 861 }
862 862
863 max = count - i; 863 max = count;
864 tmp = count_trail_chars(&user_buffer[i], max); 864 tmp = count_trail_chars(user_buffer, max);
865 if (tmp < 0) { 865 if (tmp < 0) {
866 pr_warning("illegal format\n"); 866 pr_warning("illegal format\n");
867 return tmp; 867 return tmp;
868 } 868 }
869 i += tmp; 869 i = tmp;
870 870
871 /* Read variable name */ 871 /* Read variable name */
872 872
@@ -887,10 +887,11 @@ static ssize_t pktgen_if_write(struct file *file,
887 i += len; 887 i += len;
888 888
889 if (debug) { 889 if (debug) {
890 char tb[count + 1]; 890 size_t copy = min_t(size_t, count, 1023);
891 if (copy_from_user(tb, user_buffer, count)) 891 char tb[copy + 1];
892 if (copy_from_user(tb, user_buffer, copy))
892 return -EFAULT; 893 return -EFAULT;
893 tb[count] = 0; 894 tb[copy] = 0;
894 printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, 895 printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
895 (unsigned long)count, tb); 896 (unsigned long)count, tb);
896 } 897 }
@@ -1764,7 +1765,7 @@ static ssize_t pktgen_thread_write(struct file *file,
1764{ 1765{
1765 struct seq_file *seq = file->private_data; 1766 struct seq_file *seq = file->private_data;
1766 struct pktgen_thread *t = seq->private; 1767 struct pktgen_thread *t = seq->private;
1767 int i = 0, max, len, ret; 1768 int i, max, len, ret;
1768 char name[40]; 1769 char name[40];
1769 char *pg_result; 1770 char *pg_result;
1770 1771
@@ -1773,12 +1774,12 @@ static ssize_t pktgen_thread_write(struct file *file,
1773 return -EINVAL; 1774 return -EINVAL;
1774 } 1775 }
1775 1776
1776 max = count - i; 1777 max = count;
1777 len = count_trail_chars(&user_buffer[i], max); 1778 len = count_trail_chars(user_buffer, max);
1778 if (len < 0) 1779 if (len < 0)
1779 return len; 1780 return len;
1780 1781
1781 i += len; 1782 i = len;
1782 1783
1783 /* Read variable name */ 1784 /* Read variable name */
1784 1785
@@ -1975,7 +1976,7 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
1975 const char *ifname) 1976 const char *ifname)
1976{ 1977{
1977 char b[IFNAMSIZ+5]; 1978 char b[IFNAMSIZ+5];
1978 int i = 0; 1979 int i;
1979 1980
1980 for (i = 0; ifname[i] != '@'; i++) { 1981 for (i = 0; ifname[i] != '@'; i++) {
1981 if (i == IFNAMSIZ) 1982 if (i == IFNAMSIZ)
@@ -2519,8 +2520,8 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
2519{ 2520{
2520 if (pkt_dev->cflows) { 2521 if (pkt_dev->cflows) {
2521 /* let go of the SAs if we have them */ 2522 /* let go of the SAs if we have them */
2522 int i = 0; 2523 int i;
2523 for (; i < pkt_dev->cflows; i++) { 2524 for (i = 0; i < pkt_dev->cflows; i++) {
2524 struct xfrm_state *x = pkt_dev->flows[i].x; 2525 struct xfrm_state *x = pkt_dev->flows[i].x;
2525 if (x) { 2526 if (x) {
2526 xfrm_state_put(x); 2527 xfrm_state_put(x);
@@ -2611,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2611 /* Update any of the values, used when we're incrementing various 2612 /* Update any of the values, used when we're incrementing various
2612 * fields. 2613 * fields.
2613 */ 2614 */
2614 queue_map = pkt_dev->cur_queue_map;
2615 mod_cur_headers(pkt_dev); 2615 mod_cur_headers(pkt_dev);
2616 queue_map = pkt_dev->cur_queue_map;
2616 2617
2617 datalen = (odev->hard_header_len + 16) & ~0xf; 2618 datalen = (odev->hard_header_len + 16) & ~0xf;
2618 2619
@@ -2975,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2975 /* Update any of the values, used when we're incrementing various 2976 /* Update any of the values, used when we're incrementing various
2976 * fields. 2977 * fields.
2977 */ 2978 */
2978 queue_map = pkt_dev->cur_queue_map;
2979 mod_cur_headers(pkt_dev); 2979 mod_cur_headers(pkt_dev);
2980 queue_map = pkt_dev->cur_queue_map;
2980 2981
2981 skb = __netdev_alloc_skb(odev, 2982 skb = __netdev_alloc_skb(odev,
2982 pkt_dev->cur_pkt_size + 64 2983 pkt_dev->cur_pkt_size + 64
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8121268ddbdd..841c287ef40a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
347 if (!ops) 347 if (!ops)
348 return 0; 348 return 0;
349 349
350 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 350 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
351 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 351 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
352 352
353 if (ops->get_size) 353 if (ops->get_size)
354 /* IFLA_INFO_DATA + nested data */ 354 /* IFLA_INFO_DATA + nested data */
355 size += nlmsg_total_size(sizeof(struct nlattr)) + 355 size += nla_total_size(sizeof(struct nlattr)) +
356 ops->get_size(dev); 356 ops->get_size(dev);
357 357
358 if (ops->get_xstats_size) 358 if (ops->get_xstats_size)
359 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ 359 /* IFLA_INFO_XSTATS */
360 size += nla_total_size(ops->get_xstats_size(dev));
360 361
361 return size; 362 return size;
362} 363}
diff --git a/net/core/sock.c b/net/core/sock.c
index 11db43632df8..fb6080111461 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1225,7 +1225,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1225 sock_reset_flag(newsk, SOCK_DONE); 1225 sock_reset_flag(newsk, SOCK_DONE);
1226 skb_queue_head_init(&newsk->sk_error_queue); 1226 skb_queue_head_init(&newsk->sk_error_queue);
1227 1227
1228 filter = newsk->sk_filter; 1228 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1229 if (filter != NULL) 1229 if (filter != NULL)
1230 sk_filter_charge(newsk, filter); 1230 sk_filter_charge(newsk, filter);
1231 1231
@@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
1653{ 1653{
1654 struct proto *prot = sk->sk_prot; 1654 struct proto *prot = sk->sk_prot;
1655 int amt = sk_mem_pages(size); 1655 int amt = sk_mem_pages(size);
1656 int allocated; 1656 long allocated;
1657 1657
1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1659 allocated = atomic_add_return(amt, prot->memory_allocated); 1659 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1660 1660
1661 /* Under limit. */ 1661 /* Under limit. */
1662 if (allocated <= prot->sysctl_mem[0]) { 1662 if (allocated <= prot->sysctl_mem[0]) {
@@ -1714,7 +1714,7 @@ suppress_allocation:
1714 1714
1715 /* Alas. Undo changes. */ 1715 /* Alas. Undo changes. */
1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1717 atomic_sub(amt, prot->memory_allocated); 1717 atomic_long_sub(amt, prot->memory_allocated);
1718 return 0; 1718 return 0;
1719} 1719}
1720EXPORT_SYMBOL(__sk_mem_schedule); 1720EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk)
1727{ 1727{
1728 struct proto *prot = sk->sk_prot; 1728 struct proto *prot = sk->sk_prot;
1729 1729
1730 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1730 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1731 prot->memory_allocated); 1731 prot->memory_allocated);
1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1733 1733
1734 if (prot->memory_pressure && *prot->memory_pressure && 1734 if (prot->memory_pressure && *prot->memory_pressure &&
1735 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1735 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1736 *prot->memory_pressure = 0; 1736 *prot->memory_pressure = 0;
1737} 1737}
1738EXPORT_SYMBOL(__sk_mem_reclaim); 1738EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method)
2452 2452
2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2454{ 2454{
2455 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2455 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2457 proto->name, 2457 proto->name,
2458 proto->obj_size, 2458 proto->obj_size,
2459 sock_prot_inuse_get(seq_file_net(seq), proto), 2459 sock_prot_inuse_get(seq_file_net(seq), proto),
2460 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2460 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2462 proto->max_header, 2462 proto->max_header,
2463 proto->slab == NULL ? "no" : "yes", 2463 proto->slab == NULL ? "no" : "yes",
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 01eee5d984be..385b6095fdc4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
34 34
35 mutex_lock(&sock_flow_mutex); 35 mutex_lock(&sock_flow_mutex);
36 36
37 orig_sock_table = rps_sock_flow_table; 37 orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
38 lockdep_is_held(&sock_flow_mutex));
38 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; 39 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
39 40
40 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); 41 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);