diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/flow.c | 12 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 7 | ||||
-rw-r--r-- | net/core/sock.c | 6 |
3 files changed, 18 insertions, 7 deletions
diff --git a/net/core/flow.c b/net/core/flow.c index 8ae42de9c79e..e318c7e98042 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -358,6 +358,18 @@ void flow_cache_flush(void) | |||
358 | put_online_cpus(); | 358 | put_online_cpus(); |
359 | } | 359 | } |
360 | 360 | ||
361 | static void flow_cache_flush_task(struct work_struct *work) | ||
362 | { | ||
363 | flow_cache_flush(); | ||
364 | } | ||
365 | |||
366 | static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); | ||
367 | |||
368 | void flow_cache_flush_deferred(void) | ||
369 | { | ||
370 | schedule_work(&flow_cache_flush_work); | ||
371 | } | ||
372 | |||
361 | static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) | 373 | static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) |
362 | { | 374 | { |
363 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); | 375 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index c71c434a4c05..385aefe53648 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | |||
665 | if (count) { | 665 | if (count) { |
666 | int i; | 666 | int i; |
667 | 667 | ||
668 | if (count > 1<<30) { | 668 | if (count > INT_MAX) |
669 | return -EINVAL; | ||
670 | count = roundup_pow_of_two(count); | ||
671 | if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table)) | ||
672 | / sizeof(struct rps_dev_flow)) { | ||
669 | /* Enforce a limit to prevent overflow */ | 673 | /* Enforce a limit to prevent overflow */ |
670 | return -EINVAL; | 674 | return -EINVAL; |
671 | } | 675 | } |
672 | count = roundup_pow_of_two(count); | ||
673 | table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); | 676 | table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); |
674 | if (!table) | 677 | if (!table) |
675 | return -ENOMEM; | 678 | return -ENOMEM; |
diff --git a/net/core/sock.c b/net/core/sock.c index 4ed7b1d12f5e..b23f174ab84c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
288 | unsigned long flags; | 288 | unsigned long flags; |
289 | struct sk_buff_head *list = &sk->sk_receive_queue; | 289 | struct sk_buff_head *list = &sk->sk_receive_queue; |
290 | 290 | ||
291 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces | 291 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { |
292 | number of warnings when compiling with -W --ANK | ||
293 | */ | ||
294 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
295 | (unsigned)sk->sk_rcvbuf) { | ||
296 | atomic_inc(&sk->sk_drops); | 292 | atomic_inc(&sk->sk_drops); |
297 | trace_sock_rcvqueue_full(sk, skb); | 293 | trace_sock_rcvqueue_full(sk, skb); |
298 | return -ENOMEM; | 294 | return -ENOMEM; |