diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 12:57:02 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 12:57:02 -0500 |
commit | 4b7bd364700d9ac8372eff48832062b936d0793b (patch) | |
tree | 0dbf78c95456a0b02d07fcd473281f04a87e266d /net/core | |
parent | c0d8768af260e2cbb4bf659ae6094a262c86b085 (diff) | |
parent | 90a8a73c06cc32b609a880d48449d7083327e11a (diff) |
Merge branch 'master' into for-next
Conflicts:
MAINTAINERS
arch/arm/mach-omap2/pm24xx.c
drivers/scsi/bfa/bfa_fcpim.c
Needed to update to apply fixes for which the old branch was too
outdated.
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 2 | ||||
-rw-r--r-- | net/core/dst.c | 1 | ||||
-rw-r--r-- | net/core/filter.c | 83 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 10 | ||||
-rw-r--r-- | net/core/pktgen.c | 6 | ||||
-rw-r--r-- | net/core/request_sock.c | 4 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 9 | ||||
-rw-r--r-- | net/core/sock.c | 14 | ||||
-rw-r--r-- | net/core/timestamping.c | 6 |
9 files changed, 71 insertions, 64 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 89204e8c0e14..126694116852 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2131,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
2131 | } else { | 2131 | } else { |
2132 | struct sock *sk = skb->sk; | 2132 | struct sock *sk = skb->sk; |
2133 | queue_index = sk_tx_queue_get(sk); | 2133 | queue_index = sk_tx_queue_get(sk); |
2134 | if (queue_index < 0) { | 2134 | if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) { |
2135 | 2135 | ||
2136 | queue_index = 0; | 2136 | queue_index = 0; |
2137 | if (dev->real_num_tx_queues > 1) | 2137 | if (dev->real_num_tx_queues > 1) |
diff --git a/net/core/dst.c b/net/core/dst.c index 8abe628b79f1..b99c7c7ffce2 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, | |||
370 | 370 | ||
371 | static struct notifier_block dst_dev_notifier = { | 371 | static struct notifier_block dst_dev_notifier = { |
372 | .notifier_call = dst_dev_event, | 372 | .notifier_call = dst_dev_event, |
373 | .priority = -10, /* must be called after other network notifiers */ | ||
373 | }; | 374 | }; |
374 | 375 | ||
375 | void __init dst_init(void) | 376 | void __init dst_init(void) |
diff --git a/net/core/filter.c b/net/core/filter.c index 7beaec36b541..ae21a0d3c4a2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter); | |||
112 | */ | 112 | */ |
113 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) | 113 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) |
114 | { | 114 | { |
115 | struct sock_filter *fentry; /* We walk down these */ | ||
116 | void *ptr; | 115 | void *ptr; |
117 | u32 A = 0; /* Accumulator */ | 116 | u32 A = 0; /* Accumulator */ |
118 | u32 X = 0; /* Index Register */ | 117 | u32 X = 0; /* Index Register */ |
119 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ | 118 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ |
119 | unsigned long memvalid = 0; | ||
120 | u32 tmp; | 120 | u32 tmp; |
121 | int k; | 121 | int k; |
122 | int pc; | 122 | int pc; |
123 | 123 | ||
124 | BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); | ||
124 | /* | 125 | /* |
125 | * Process array of filter instructions. | 126 | * Process array of filter instructions. |
126 | */ | 127 | */ |
127 | for (pc = 0; pc < flen; pc++) { | 128 | for (pc = 0; pc < flen; pc++) { |
128 | fentry = &filter[pc]; | 129 | const struct sock_filter *fentry = &filter[pc]; |
130 | u32 f_k = fentry->k; | ||
129 | 131 | ||
130 | switch (fentry->code) { | 132 | switch (fentry->code) { |
131 | case BPF_S_ALU_ADD_X: | 133 | case BPF_S_ALU_ADD_X: |
132 | A += X; | 134 | A += X; |
133 | continue; | 135 | continue; |
134 | case BPF_S_ALU_ADD_K: | 136 | case BPF_S_ALU_ADD_K: |
135 | A += fentry->k; | 137 | A += f_k; |
136 | continue; | 138 | continue; |
137 | case BPF_S_ALU_SUB_X: | 139 | case BPF_S_ALU_SUB_X: |
138 | A -= X; | 140 | A -= X; |
139 | continue; | 141 | continue; |
140 | case BPF_S_ALU_SUB_K: | 142 | case BPF_S_ALU_SUB_K: |
141 | A -= fentry->k; | 143 | A -= f_k; |
142 | continue; | 144 | continue; |
143 | case BPF_S_ALU_MUL_X: | 145 | case BPF_S_ALU_MUL_X: |
144 | A *= X; | 146 | A *= X; |
145 | continue; | 147 | continue; |
146 | case BPF_S_ALU_MUL_K: | 148 | case BPF_S_ALU_MUL_K: |
147 | A *= fentry->k; | 149 | A *= f_k; |
148 | continue; | 150 | continue; |
149 | case BPF_S_ALU_DIV_X: | 151 | case BPF_S_ALU_DIV_X: |
150 | if (X == 0) | 152 | if (X == 0) |
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
152 | A /= X; | 154 | A /= X; |
153 | continue; | 155 | continue; |
154 | case BPF_S_ALU_DIV_K: | 156 | case BPF_S_ALU_DIV_K: |
155 | A /= fentry->k; | 157 | A /= f_k; |
156 | continue; | 158 | continue; |
157 | case BPF_S_ALU_AND_X: | 159 | case BPF_S_ALU_AND_X: |
158 | A &= X; | 160 | A &= X; |
159 | continue; | 161 | continue; |
160 | case BPF_S_ALU_AND_K: | 162 | case BPF_S_ALU_AND_K: |
161 | A &= fentry->k; | 163 | A &= f_k; |
162 | continue; | 164 | continue; |
163 | case BPF_S_ALU_OR_X: | 165 | case BPF_S_ALU_OR_X: |
164 | A |= X; | 166 | A |= X; |
165 | continue; | 167 | continue; |
166 | case BPF_S_ALU_OR_K: | 168 | case BPF_S_ALU_OR_K: |
167 | A |= fentry->k; | 169 | A |= f_k; |
168 | continue; | 170 | continue; |
169 | case BPF_S_ALU_LSH_X: | 171 | case BPF_S_ALU_LSH_X: |
170 | A <<= X; | 172 | A <<= X; |
171 | continue; | 173 | continue; |
172 | case BPF_S_ALU_LSH_K: | 174 | case BPF_S_ALU_LSH_K: |
173 | A <<= fentry->k; | 175 | A <<= f_k; |
174 | continue; | 176 | continue; |
175 | case BPF_S_ALU_RSH_X: | 177 | case BPF_S_ALU_RSH_X: |
176 | A >>= X; | 178 | A >>= X; |
177 | continue; | 179 | continue; |
178 | case BPF_S_ALU_RSH_K: | 180 | case BPF_S_ALU_RSH_K: |
179 | A >>= fentry->k; | 181 | A >>= f_k; |
180 | continue; | 182 | continue; |
181 | case BPF_S_ALU_NEG: | 183 | case BPF_S_ALU_NEG: |
182 | A = -A; | 184 | A = -A; |
183 | continue; | 185 | continue; |
184 | case BPF_S_JMP_JA: | 186 | case BPF_S_JMP_JA: |
185 | pc += fentry->k; | 187 | pc += f_k; |
186 | continue; | 188 | continue; |
187 | case BPF_S_JMP_JGT_K: | 189 | case BPF_S_JMP_JGT_K: |
188 | pc += (A > fentry->k) ? fentry->jt : fentry->jf; | 190 | pc += (A > f_k) ? fentry->jt : fentry->jf; |
189 | continue; | 191 | continue; |
190 | case BPF_S_JMP_JGE_K: | 192 | case BPF_S_JMP_JGE_K: |
191 | pc += (A >= fentry->k) ? fentry->jt : fentry->jf; | 193 | pc += (A >= f_k) ? fentry->jt : fentry->jf; |
192 | continue; | 194 | continue; |
193 | case BPF_S_JMP_JEQ_K: | 195 | case BPF_S_JMP_JEQ_K: |
194 | pc += (A == fentry->k) ? fentry->jt : fentry->jf; | 196 | pc += (A == f_k) ? fentry->jt : fentry->jf; |
195 | continue; | 197 | continue; |
196 | case BPF_S_JMP_JSET_K: | 198 | case BPF_S_JMP_JSET_K: |
197 | pc += (A & fentry->k) ? fentry->jt : fentry->jf; | 199 | pc += (A & f_k) ? fentry->jt : fentry->jf; |
198 | continue; | 200 | continue; |
199 | case BPF_S_JMP_JGT_X: | 201 | case BPF_S_JMP_JGT_X: |
200 | pc += (A > X) ? fentry->jt : fentry->jf; | 202 | pc += (A > X) ? fentry->jt : fentry->jf; |
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
209 | pc += (A & X) ? fentry->jt : fentry->jf; | 211 | pc += (A & X) ? fentry->jt : fentry->jf; |
210 | continue; | 212 | continue; |
211 | case BPF_S_LD_W_ABS: | 213 | case BPF_S_LD_W_ABS: |
212 | k = fentry->k; | 214 | k = f_k; |
213 | load_w: | 215 | load_w: |
214 | ptr = load_pointer(skb, k, 4, &tmp); | 216 | ptr = load_pointer(skb, k, 4, &tmp); |
215 | if (ptr != NULL) { | 217 | if (ptr != NULL) { |
@@ -218,7 +220,7 @@ load_w: | |||
218 | } | 220 | } |
219 | break; | 221 | break; |
220 | case BPF_S_LD_H_ABS: | 222 | case BPF_S_LD_H_ABS: |
221 | k = fentry->k; | 223 | k = f_k; |
222 | load_h: | 224 | load_h: |
223 | ptr = load_pointer(skb, k, 2, &tmp); | 225 | ptr = load_pointer(skb, k, 2, &tmp); |
224 | if (ptr != NULL) { | 226 | if (ptr != NULL) { |
@@ -227,7 +229,7 @@ load_h: | |||
227 | } | 229 | } |
228 | break; | 230 | break; |
229 | case BPF_S_LD_B_ABS: | 231 | case BPF_S_LD_B_ABS: |
230 | k = fentry->k; | 232 | k = f_k; |
231 | load_b: | 233 | load_b: |
232 | ptr = load_pointer(skb, k, 1, &tmp); | 234 | ptr = load_pointer(skb, k, 1, &tmp); |
233 | if (ptr != NULL) { | 235 | if (ptr != NULL) { |
@@ -242,32 +244,34 @@ load_b: | |||
242 | X = skb->len; | 244 | X = skb->len; |
243 | continue; | 245 | continue; |
244 | case BPF_S_LD_W_IND: | 246 | case BPF_S_LD_W_IND: |
245 | k = X + fentry->k; | 247 | k = X + f_k; |
246 | goto load_w; | 248 | goto load_w; |
247 | case BPF_S_LD_H_IND: | 249 | case BPF_S_LD_H_IND: |
248 | k = X + fentry->k; | 250 | k = X + f_k; |
249 | goto load_h; | 251 | goto load_h; |
250 | case BPF_S_LD_B_IND: | 252 | case BPF_S_LD_B_IND: |
251 | k = X + fentry->k; | 253 | k = X + f_k; |
252 | goto load_b; | 254 | goto load_b; |
253 | case BPF_S_LDX_B_MSH: | 255 | case BPF_S_LDX_B_MSH: |
254 | ptr = load_pointer(skb, fentry->k, 1, &tmp); | 256 | ptr = load_pointer(skb, f_k, 1, &tmp); |
255 | if (ptr != NULL) { | 257 | if (ptr != NULL) { |
256 | X = (*(u8 *)ptr & 0xf) << 2; | 258 | X = (*(u8 *)ptr & 0xf) << 2; |
257 | continue; | 259 | continue; |
258 | } | 260 | } |
259 | return 0; | 261 | return 0; |
260 | case BPF_S_LD_IMM: | 262 | case BPF_S_LD_IMM: |
261 | A = fentry->k; | 263 | A = f_k; |
262 | continue; | 264 | continue; |
263 | case BPF_S_LDX_IMM: | 265 | case BPF_S_LDX_IMM: |
264 | X = fentry->k; | 266 | X = f_k; |
265 | continue; | 267 | continue; |
266 | case BPF_S_LD_MEM: | 268 | case BPF_S_LD_MEM: |
267 | A = mem[fentry->k]; | 269 | A = (memvalid & (1UL << f_k)) ? |
270 | mem[f_k] : 0; | ||
268 | continue; | 271 | continue; |
269 | case BPF_S_LDX_MEM: | 272 | case BPF_S_LDX_MEM: |
270 | X = mem[fentry->k]; | 273 | X = (memvalid & (1UL << f_k)) ? |
274 | mem[f_k] : 0; | ||
271 | continue; | 275 | continue; |
272 | case BPF_S_MISC_TAX: | 276 | case BPF_S_MISC_TAX: |
273 | X = A; | 277 | X = A; |
@@ -276,14 +280,16 @@ load_b: | |||
276 | A = X; | 280 | A = X; |
277 | continue; | 281 | continue; |
278 | case BPF_S_RET_K: | 282 | case BPF_S_RET_K: |
279 | return fentry->k; | 283 | return f_k; |
280 | case BPF_S_RET_A: | 284 | case BPF_S_RET_A: |
281 | return A; | 285 | return A; |
282 | case BPF_S_ST: | 286 | case BPF_S_ST: |
283 | mem[fentry->k] = A; | 287 | memvalid |= 1UL << f_k; |
288 | mem[f_k] = A; | ||
284 | continue; | 289 | continue; |
285 | case BPF_S_STX: | 290 | case BPF_S_STX: |
286 | mem[fentry->k] = X; | 291 | memvalid |= 1UL << f_k; |
292 | mem[f_k] = X; | ||
287 | continue; | 293 | continue; |
288 | default: | 294 | default: |
289 | WARN_ON(1); | 295 | WARN_ON(1); |
@@ -583,23 +589,16 @@ int sk_chk_filter(struct sock_filter *filter, int flen) | |||
583 | EXPORT_SYMBOL(sk_chk_filter); | 589 | EXPORT_SYMBOL(sk_chk_filter); |
584 | 590 | ||
585 | /** | 591 | /** |
586 | * sk_filter_rcu_release: Release a socket filter by rcu_head | 592 | * sk_filter_release_rcu - Release a socket filter by rcu_head |
587 | * @rcu: rcu_head that contains the sk_filter to free | 593 | * @rcu: rcu_head that contains the sk_filter to free |
588 | */ | 594 | */ |
589 | static void sk_filter_rcu_release(struct rcu_head *rcu) | 595 | void sk_filter_release_rcu(struct rcu_head *rcu) |
590 | { | 596 | { |
591 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); | 597 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); |
592 | 598 | ||
593 | sk_filter_release(fp); | 599 | kfree(fp); |
594 | } | ||
595 | |||
596 | static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp) | ||
597 | { | ||
598 | unsigned int size = sk_filter_len(fp); | ||
599 | |||
600 | atomic_sub(size, &sk->sk_omem_alloc); | ||
601 | call_rcu_bh(&fp->rcu, sk_filter_rcu_release); | ||
602 | } | 600 | } |
601 | EXPORT_SYMBOL(sk_filter_release_rcu); | ||
603 | 602 | ||
604 | /** | 603 | /** |
605 | * sk_attach_filter - attach a socket filter | 604 | * sk_attach_filter - attach a socket filter |
@@ -643,7 +642,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
643 | rcu_assign_pointer(sk->sk_filter, fp); | 642 | rcu_assign_pointer(sk->sk_filter, fp); |
644 | 643 | ||
645 | if (old_fp) | 644 | if (old_fp) |
646 | sk_filter_delayed_uncharge(sk, old_fp); | 645 | sk_filter_uncharge(sk, old_fp); |
647 | return 0; | 646 | return 0; |
648 | } | 647 | } |
649 | EXPORT_SYMBOL_GPL(sk_attach_filter); | 648 | EXPORT_SYMBOL_GPL(sk_attach_filter); |
@@ -657,7 +656,7 @@ int sk_detach_filter(struct sock *sk) | |||
657 | sock_owned_by_user(sk)); | 656 | sock_owned_by_user(sk)); |
658 | if (filter) { | 657 | if (filter) { |
659 | rcu_assign_pointer(sk->sk_filter, NULL); | 658 | rcu_assign_pointer(sk->sk_filter, NULL); |
660 | sk_filter_delayed_uncharge(sk, filter); | 659 | sk_filter_uncharge(sk, filter); |
661 | ret = 0; | 660 | ret = 0; |
662 | } | 661 | } |
663 | return ret; | 662 | return ret; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index a5ff5a89f376..7f902cad10f8 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -712,15 +712,21 @@ static void rx_queue_release(struct kobject *kobj) | |||
712 | 712 | ||
713 | 713 | ||
714 | map = rcu_dereference_raw(queue->rps_map); | 714 | map = rcu_dereference_raw(queue->rps_map); |
715 | if (map) | 715 | if (map) { |
716 | RCU_INIT_POINTER(queue->rps_map, NULL); | ||
716 | call_rcu(&map->rcu, rps_map_release); | 717 | call_rcu(&map->rcu, rps_map_release); |
718 | } | ||
717 | 719 | ||
718 | flow_table = rcu_dereference_raw(queue->rps_flow_table); | 720 | flow_table = rcu_dereference_raw(queue->rps_flow_table); |
719 | if (flow_table) | 721 | if (flow_table) { |
722 | RCU_INIT_POINTER(queue->rps_flow_table, NULL); | ||
720 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); | 723 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); |
724 | } | ||
721 | 725 | ||
722 | if (atomic_dec_and_test(&first->count)) | 726 | if (atomic_dec_and_test(&first->count)) |
723 | kfree(first); | 727 | kfree(first); |
728 | else | ||
729 | memset(kobj, 0, sizeof(*kobj)); | ||
724 | } | 730 | } |
725 | 731 | ||
726 | static struct kobj_type rx_queue_ktype = { | 732 | static struct kobj_type rx_queue_ktype = { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index fbce4b05a53e..33bc3823ac6f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -887,7 +887,7 @@ static ssize_t pktgen_if_write(struct file *file, | |||
887 | i += len; | 887 | i += len; |
888 | 888 | ||
889 | if (debug) { | 889 | if (debug) { |
890 | size_t copy = min(count, 1023); | 890 | size_t copy = min_t(size_t, count, 1023); |
891 | char tb[copy + 1]; | 891 | char tb[copy + 1]; |
892 | if (copy_from_user(tb, user_buffer, copy)) | 892 | if (copy_from_user(tb, user_buffer, copy)) |
893 | return -EFAULT; | 893 | return -EFAULT; |
@@ -2612,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2612 | /* Update any of the values, used when we're incrementing various | 2612 | /* Update any of the values, used when we're incrementing various |
2613 | * fields. | 2613 | * fields. |
2614 | */ | 2614 | */ |
2615 | queue_map = pkt_dev->cur_queue_map; | ||
2616 | mod_cur_headers(pkt_dev); | 2615 | mod_cur_headers(pkt_dev); |
2616 | queue_map = pkt_dev->cur_queue_map; | ||
2617 | 2617 | ||
2618 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2618 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2619 | 2619 | ||
@@ -2976,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2976 | /* Update any of the values, used when we're incrementing various | 2976 | /* Update any of the values, used when we're incrementing various |
2977 | * fields. | 2977 | * fields. |
2978 | */ | 2978 | */ |
2979 | queue_map = pkt_dev->cur_queue_map; | ||
2980 | mod_cur_headers(pkt_dev); | 2979 | mod_cur_headers(pkt_dev); |
2980 | queue_map = pkt_dev->cur_queue_map; | ||
2981 | 2981 | ||
2982 | skb = __netdev_alloc_skb(odev, | 2982 | skb = __netdev_alloc_skb(odev, |
2983 | pkt_dev->cur_pkt_size + 64 | 2983 | pkt_dev->cur_pkt_size + 64 |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 7552495aff7a..fceeb37d7161 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -45,9 +45,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, | |||
45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); | 45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); |
46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); | 46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); |
47 | if (lopt_size > PAGE_SIZE) | 47 | if (lopt_size > PAGE_SIZE) |
48 | lopt = __vmalloc(lopt_size, | 48 | lopt = vzalloc(lopt_size); |
49 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
50 | PAGE_KERNEL); | ||
51 | else | 49 | else |
52 | lopt = kzalloc(lopt_size, GFP_KERNEL); | 50 | lopt = kzalloc(lopt_size, GFP_KERNEL); |
53 | if (lopt == NULL) | 51 | if (lopt == NULL) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8121268ddbdd..841c287ef40a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev) | |||
347 | if (!ops) | 347 | if (!ops) |
348 | return 0; | 348 | return 0; |
349 | 349 | ||
350 | size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ | 350 | size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ |
351 | nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ | 351 | nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ |
352 | 352 | ||
353 | if (ops->get_size) | 353 | if (ops->get_size) |
354 | /* IFLA_INFO_DATA + nested data */ | 354 | /* IFLA_INFO_DATA + nested data */ |
355 | size += nlmsg_total_size(sizeof(struct nlattr)) + | 355 | size += nla_total_size(sizeof(struct nlattr)) + |
356 | ops->get_size(dev); | 356 | ops->get_size(dev); |
357 | 357 | ||
358 | if (ops->get_xstats_size) | 358 | if (ops->get_xstats_size) |
359 | size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ | 359 | /* IFLA_INFO_XSTATS */ |
360 | size += nla_total_size(ops->get_xstats_size(dev)); | ||
360 | 361 | ||
361 | return size; | 362 | return size; |
362 | } | 363 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 3eed5424e659..fb6080111461 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) | |||
1653 | { | 1653 | { |
1654 | struct proto *prot = sk->sk_prot; | 1654 | struct proto *prot = sk->sk_prot; |
1655 | int amt = sk_mem_pages(size); | 1655 | int amt = sk_mem_pages(size); |
1656 | int allocated; | 1656 | long allocated; |
1657 | 1657 | ||
1658 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; | 1658 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
1659 | allocated = atomic_add_return(amt, prot->memory_allocated); | 1659 | allocated = atomic_long_add_return(amt, prot->memory_allocated); |
1660 | 1660 | ||
1661 | /* Under limit. */ | 1661 | /* Under limit. */ |
1662 | if (allocated <= prot->sysctl_mem[0]) { | 1662 | if (allocated <= prot->sysctl_mem[0]) { |
@@ -1714,7 +1714,7 @@ suppress_allocation: | |||
1714 | 1714 | ||
1715 | /* Alas. Undo changes. */ | 1715 | /* Alas. Undo changes. */ |
1716 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; | 1716 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; |
1717 | atomic_sub(amt, prot->memory_allocated); | 1717 | atomic_long_sub(amt, prot->memory_allocated); |
1718 | return 0; | 1718 | return 0; |
1719 | } | 1719 | } |
1720 | EXPORT_SYMBOL(__sk_mem_schedule); | 1720 | EXPORT_SYMBOL(__sk_mem_schedule); |
@@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk) | |||
1727 | { | 1727 | { |
1728 | struct proto *prot = sk->sk_prot; | 1728 | struct proto *prot = sk->sk_prot; |
1729 | 1729 | ||
1730 | atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, | 1730 | atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, |
1731 | prot->memory_allocated); | 1731 | prot->memory_allocated); |
1732 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; | 1732 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; |
1733 | 1733 | ||
1734 | if (prot->memory_pressure && *prot->memory_pressure && | 1734 | if (prot->memory_pressure && *prot->memory_pressure && |
1735 | (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) | 1735 | (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) |
1736 | *prot->memory_pressure = 0; | 1736 | *prot->memory_pressure = 0; |
1737 | } | 1737 | } |
1738 | EXPORT_SYMBOL(__sk_mem_reclaim); | 1738 | EXPORT_SYMBOL(__sk_mem_reclaim); |
@@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method) | |||
2452 | 2452 | ||
2453 | static void proto_seq_printf(struct seq_file *seq, struct proto *proto) | 2453 | static void proto_seq_printf(struct seq_file *seq, struct proto *proto) |
2454 | { | 2454 | { |
2455 | seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " | 2455 | seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " |
2456 | "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", | 2456 | "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", |
2457 | proto->name, | 2457 | proto->name, |
2458 | proto->obj_size, | 2458 | proto->obj_size, |
2459 | sock_prot_inuse_get(seq_file_net(seq), proto), | 2459 | sock_prot_inuse_get(seq_file_net(seq), proto), |
2460 | proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, | 2460 | proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, |
2461 | proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", | 2461 | proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", |
2462 | proto->max_header, | 2462 | proto->max_header, |
2463 | proto->slab == NULL ? "no" : "yes", | 2463 | proto->slab == NULL ? "no" : "yes", |
diff --git a/net/core/timestamping.c b/net/core/timestamping.c index 0ae6c22da85b..c19bb4ee405e 100644 --- a/net/core/timestamping.c +++ b/net/core/timestamping.c | |||
@@ -96,11 +96,13 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb) | |||
96 | struct phy_device *phydev; | 96 | struct phy_device *phydev; |
97 | unsigned int type; | 97 | unsigned int type; |
98 | 98 | ||
99 | skb_push(skb, ETH_HLEN); | 99 | if (skb_headroom(skb) < ETH_HLEN) |
100 | return false; | ||
101 | __skb_push(skb, ETH_HLEN); | ||
100 | 102 | ||
101 | type = classify(skb); | 103 | type = classify(skb); |
102 | 104 | ||
103 | skb_pull(skb, ETH_HLEN); | 105 | __skb_pull(skb, ETH_HLEN); |
104 | 106 | ||
105 | switch (type) { | 107 | switch (type) { |
106 | case PTP_CLASS_V1_IPV4: | 108 | case PTP_CLASS_V1_IPV4: |