diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/core/net-sysfs.c | 12 | ||||
| -rw-r--r-- | net/core/netpoll.c | 2 | ||||
| -rw-r--r-- | net/ipv4/fib_trie.c | 13 | ||||
| -rw-r--r-- | net/ipv4/inet_fragment.c | 1 | ||||
| -rw-r--r-- | net/ipv4/tcp_metrics.c | 15 | ||||
| -rw-r--r-- | net/tipc/link.c | 11 |
6 files changed, 22 insertions, 32 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 7427ab5e27d8..981fed397d1d 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -606,21 +606,11 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | |||
| 606 | return sprintf(buf, "%lu\n", val); | 606 | return sprintf(buf, "%lu\n", val); |
| 607 | } | 607 | } |
| 608 | 608 | ||
| 609 | static void rps_dev_flow_table_release_work(struct work_struct *work) | ||
| 610 | { | ||
| 611 | struct rps_dev_flow_table *table = container_of(work, | ||
| 612 | struct rps_dev_flow_table, free_work); | ||
| 613 | |||
| 614 | vfree(table); | ||
| 615 | } | ||
| 616 | |||
| 617 | static void rps_dev_flow_table_release(struct rcu_head *rcu) | 609 | static void rps_dev_flow_table_release(struct rcu_head *rcu) |
| 618 | { | 610 | { |
| 619 | struct rps_dev_flow_table *table = container_of(rcu, | 611 | struct rps_dev_flow_table *table = container_of(rcu, |
| 620 | struct rps_dev_flow_table, rcu); | 612 | struct rps_dev_flow_table, rcu); |
| 621 | 613 | vfree(table); | |
| 622 | INIT_WORK(&table->free_work, rps_dev_flow_table_release_work); | ||
| 623 | schedule_work(&table->free_work); | ||
| 624 | } | 614 | } |
| 625 | 615 | ||
| 626 | static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | 616 | static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a5802a8b652f..cec074be8c43 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -206,7 +206,7 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
| 206 | * the dev_open/close paths use this to block netpoll activity | 206 | * the dev_open/close paths use this to block netpoll activity |
| 207 | * while changing device state | 207 | * while changing device state |
| 208 | */ | 208 | */ |
| 209 | if (!down_trylock(&ni->dev_lock)) | 209 | if (down_trylock(&ni->dev_lock)) |
| 210 | return; | 210 | return; |
| 211 | 211 | ||
| 212 | if (!netif_running(dev)) { | 212 | if (!netif_running(dev)) { |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index ff06b7543d9f..49616fed9340 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -125,7 +125,6 @@ struct tnode { | |||
| 125 | unsigned int empty_children; /* KEYLENGTH bits needed */ | 125 | unsigned int empty_children; /* KEYLENGTH bits needed */ |
| 126 | union { | 126 | union { |
| 127 | struct rcu_head rcu; | 127 | struct rcu_head rcu; |
| 128 | struct work_struct work; | ||
| 129 | struct tnode *tnode_free; | 128 | struct tnode *tnode_free; |
| 130 | }; | 129 | }; |
| 131 | struct rt_trie_node __rcu *child[0]; | 130 | struct rt_trie_node __rcu *child[0]; |
| @@ -383,12 +382,6 @@ static struct tnode *tnode_alloc(size_t size) | |||
| 383 | return vzalloc(size); | 382 | return vzalloc(size); |
| 384 | } | 383 | } |
| 385 | 384 | ||
| 386 | static void __tnode_vfree(struct work_struct *arg) | ||
| 387 | { | ||
| 388 | struct tnode *tn = container_of(arg, struct tnode, work); | ||
| 389 | vfree(tn); | ||
| 390 | } | ||
| 391 | |||
| 392 | static void __tnode_free_rcu(struct rcu_head *head) | 385 | static void __tnode_free_rcu(struct rcu_head *head) |
| 393 | { | 386 | { |
| 394 | struct tnode *tn = container_of(head, struct tnode, rcu); | 387 | struct tnode *tn = container_of(head, struct tnode, rcu); |
| @@ -397,10 +390,8 @@ static void __tnode_free_rcu(struct rcu_head *head) | |||
| 397 | 390 | ||
| 398 | if (size <= PAGE_SIZE) | 391 | if (size <= PAGE_SIZE) |
| 399 | kfree(tn); | 392 | kfree(tn); |
| 400 | else { | 393 | else |
| 401 | INIT_WORK(&tn->work, __tnode_vfree); | 394 | vfree(tn); |
| 402 | schedule_work(&tn->work); | ||
| 403 | } | ||
| 404 | } | 395 | } |
| 405 | 396 | ||
| 406 | static inline void tnode_free(struct tnode *tn) | 397 | static inline void tnode_free(struct tnode *tn) |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index e97d66a1fdde..7e06641e36ae 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -305,6 +305,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, | |||
| 305 | setup_timer(&q->timer, f->frag_expire, (unsigned long)q); | 305 | setup_timer(&q->timer, f->frag_expire, (unsigned long)q); |
| 306 | spin_lock_init(&q->lock); | 306 | spin_lock_init(&q->lock); |
| 307 | atomic_set(&q->refcnt, 1); | 307 | atomic_set(&q->refcnt, 1); |
| 308 | INIT_LIST_HEAD(&q->lru_list); | ||
| 308 | 309 | ||
| 309 | return q; | 310 | return q; |
| 310 | } | 311 | } |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index f696d7c2e9fa..f6a005c485a9 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
| @@ -96,7 +96,8 @@ struct tcpm_hash_bucket { | |||
| 96 | 96 | ||
| 97 | static DEFINE_SPINLOCK(tcp_metrics_lock); | 97 | static DEFINE_SPINLOCK(tcp_metrics_lock); |
| 98 | 98 | ||
| 99 | static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst) | 99 | static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst, |
| 100 | bool fastopen_clear) | ||
| 100 | { | 101 | { |
| 101 | u32 val; | 102 | u32 val; |
| 102 | 103 | ||
| @@ -122,9 +123,11 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst) | |||
| 122 | tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); | 123 | tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); |
| 123 | tm->tcpm_ts = 0; | 124 | tm->tcpm_ts = 0; |
| 124 | tm->tcpm_ts_stamp = 0; | 125 | tm->tcpm_ts_stamp = 0; |
| 125 | tm->tcpm_fastopen.mss = 0; | 126 | if (fastopen_clear) { |
| 126 | tm->tcpm_fastopen.syn_loss = 0; | 127 | tm->tcpm_fastopen.mss = 0; |
| 127 | tm->tcpm_fastopen.cookie.len = 0; | 128 | tm->tcpm_fastopen.syn_loss = 0; |
| 129 | tm->tcpm_fastopen.cookie.len = 0; | ||
| 130 | } | ||
| 128 | } | 131 | } |
| 129 | 132 | ||
| 130 | static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, | 133 | static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, |
| @@ -154,7 +157,7 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, | |||
| 154 | } | 157 | } |
| 155 | tm->tcpm_addr = *addr; | 158 | tm->tcpm_addr = *addr; |
| 156 | 159 | ||
| 157 | tcpm_suck_dst(tm, dst); | 160 | tcpm_suck_dst(tm, dst, true); |
| 158 | 161 | ||
| 159 | if (likely(!reclaim)) { | 162 | if (likely(!reclaim)) { |
| 160 | tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain; | 163 | tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain; |
| @@ -171,7 +174,7 @@ out_unlock: | |||
| 171 | static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) | 174 | static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) |
| 172 | { | 175 | { |
| 173 | if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) | 176 | if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) |
| 174 | tcpm_suck_dst(tm, dst); | 177 | tcpm_suck_dst(tm, dst, false); |
| 175 | } | 178 | } |
| 176 | 179 | ||
| 177 | #define TCP_METRICS_RECLAIM_DEPTH 5 | 180 | #define TCP_METRICS_RECLAIM_DEPTH 5 |
diff --git a/net/tipc/link.c b/net/tipc/link.c index daa6080a2a0c..a80feee5197a 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -2306,8 +2306,11 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr, | |||
| 2306 | struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); | 2306 | struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); |
| 2307 | u32 msg_typ = msg_type(tunnel_msg); | 2307 | u32 msg_typ = msg_type(tunnel_msg); |
| 2308 | u32 msg_count = msg_msgcnt(tunnel_msg); | 2308 | u32 msg_count = msg_msgcnt(tunnel_msg); |
| 2309 | u32 bearer_id = msg_bearer_id(tunnel_msg); | ||
| 2309 | 2310 | ||
| 2310 | dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)]; | 2311 | if (bearer_id >= MAX_BEARERS) |
| 2312 | goto exit; | ||
| 2313 | dest_link = (*l_ptr)->owner->links[bearer_id]; | ||
| 2311 | if (!dest_link) | 2314 | if (!dest_link) |
| 2312 | goto exit; | 2315 | goto exit; |
| 2313 | if (dest_link == *l_ptr) { | 2316 | if (dest_link == *l_ptr) { |
| @@ -2521,14 +2524,16 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, | |||
| 2521 | struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm); | 2524 | struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm); |
| 2522 | u32 msg_sz = msg_size(imsg); | 2525 | u32 msg_sz = msg_size(imsg); |
| 2523 | u32 fragm_sz = msg_data_sz(fragm); | 2526 | u32 fragm_sz = msg_data_sz(fragm); |
| 2524 | u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz); | 2527 | u32 exp_fragm_cnt; |
| 2525 | u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE; | 2528 | u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE; |
| 2529 | |||
| 2526 | if (msg_type(imsg) == TIPC_MCAST_MSG) | 2530 | if (msg_type(imsg) == TIPC_MCAST_MSG) |
| 2527 | max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE; | 2531 | max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE; |
| 2528 | if (msg_size(imsg) > max) { | 2532 | if (fragm_sz == 0 || msg_size(imsg) > max) { |
| 2529 | kfree_skb(fbuf); | 2533 | kfree_skb(fbuf); |
| 2530 | return 0; | 2534 | return 0; |
| 2531 | } | 2535 | } |
| 2536 | exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz); | ||
| 2532 | pbuf = tipc_buf_acquire(msg_size(imsg)); | 2537 | pbuf = tipc_buf_acquire(msg_size(imsg)); |
| 2533 | if (pbuf != NULL) { | 2538 | if (pbuf != NULL) { |
| 2534 | pbuf->next = *pending; | 2539 | pbuf->next = *pending; |
