aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2013-08-15 18:31:06 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-15 18:51:20 -0400
commit16b304f3404f8e0243d5ee2b70b68767b7b59b2b (patch)
treeb18d6ba0555291573c237dea733a39f03b6b5192 /net/netlink
parentde98ac5eee037a7484ee9b87eae2e0fb67bdd3ec (diff)
netlink: Eliminate kmalloc in netlink dump operation.
Following patch stores struct netlink_callback in netlink_sock to avoid allocating and freeing it on every netlink dump msg. Only one dump operation is allowed for a given socket at a time therefore we can safely convert cb pointer to cb struct inside netlink_sock. Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink')
-rw-r--r--net/netlink/af_netlink.c97
-rw-r--r--net/netlink/af_netlink.h3
2 files changed, 44 insertions, 56 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6273772aa30c..a17dda1bbee0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -595,7 +595,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
595 * for dumps is performed here. A dump is allowed to continue 595 * for dumps is performed here. A dump is allowed to continue
596 * if at least half the ring is unused. 596 * if at least half the ring is unused.
597 */ 597 */
598 while (nlk->cb != NULL && netlink_dump_space(nlk)) { 598 while (nlk->cb_running && netlink_dump_space(nlk)) {
599 err = netlink_dump(sk); 599 err = netlink_dump(sk);
600 if (err < 0) { 600 if (err < 0) {
601 sk->sk_err = err; 601 sk->sk_err = err;
@@ -802,18 +802,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
802#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0 802#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
803#endif /* CONFIG_NETLINK_MMAP */ 803#endif /* CONFIG_NETLINK_MMAP */
804 804
805static void netlink_destroy_callback(struct netlink_callback *cb)
806{
807 kfree_skb(cb->skb);
808 kfree(cb);
809}
810
811static void netlink_consume_callback(struct netlink_callback *cb)
812{
813 consume_skb(cb->skb);
814 kfree(cb);
815}
816
817static void netlink_skb_destructor(struct sk_buff *skb) 805static void netlink_skb_destructor(struct sk_buff *skb)
818{ 806{
819#ifdef CONFIG_NETLINK_MMAP 807#ifdef CONFIG_NETLINK_MMAP
@@ -872,12 +860,12 @@ static void netlink_sock_destruct(struct sock *sk)
872{ 860{
873 struct netlink_sock *nlk = nlk_sk(sk); 861 struct netlink_sock *nlk = nlk_sk(sk);
874 862
875 if (nlk->cb) { 863 if (nlk->cb_running) {
876 if (nlk->cb->done) 864 if (nlk->cb.done)
877 nlk->cb->done(nlk->cb); 865 nlk->cb.done(&nlk->cb);
878 866
879 module_put(nlk->cb->module); 867 module_put(nlk->cb.module);
880 netlink_destroy_callback(nlk->cb); 868 kfree_skb(nlk->cb.skb);
881 } 869 }
882 870
883 skb_queue_purge(&sk->sk_receive_queue); 871 skb_queue_purge(&sk->sk_receive_queue);
@@ -2350,7 +2338,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2350 2338
2351 skb_free_datagram(sk, skb); 2339 skb_free_datagram(sk, skb);
2352 2340
2353 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 2341 if (nlk->cb_running &&
2342 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2354 ret = netlink_dump(sk); 2343 ret = netlink_dump(sk);
2355 if (ret) { 2344 if (ret) {
2356 sk->sk_err = ret; 2345 sk->sk_err = ret;
@@ -2566,13 +2555,12 @@ static int netlink_dump(struct sock *sk)
2566 int alloc_size; 2555 int alloc_size;
2567 2556
2568 mutex_lock(nlk->cb_mutex); 2557 mutex_lock(nlk->cb_mutex);
2569 2558 if (!nlk->cb_running) {
2570 cb = nlk->cb;
2571 if (cb == NULL) {
2572 err = -EINVAL; 2559 err = -EINVAL;
2573 goto errout_skb; 2560 goto errout_skb;
2574 } 2561 }
2575 2562
2563 cb = &nlk->cb;
2576 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); 2564 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2577 2565
2578 if (!netlink_rx_is_mmaped(sk) && 2566 if (!netlink_rx_is_mmaped(sk) &&
@@ -2610,11 +2598,11 @@ static int netlink_dump(struct sock *sk)
2610 2598
2611 if (cb->done) 2599 if (cb->done)
2612 cb->done(cb); 2600 cb->done(cb);
2613 nlk->cb = NULL;
2614 mutex_unlock(nlk->cb_mutex);
2615 2601
2602 nlk->cb_running = false;
2603 mutex_unlock(nlk->cb_mutex);
2616 module_put(cb->module); 2604 module_put(cb->module);
2617 netlink_consume_callback(cb); 2605 consume_skb(cb->skb);
2618 return 0; 2606 return 0;
2619 2607
2620errout_skb: 2608errout_skb:
@@ -2632,59 +2620,51 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2632 struct netlink_sock *nlk; 2620 struct netlink_sock *nlk;
2633 int ret; 2621 int ret;
2634 2622
2635 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2636 if (cb == NULL)
2637 return -ENOBUFS;
2638
2639 /* Memory mapped dump requests need to be copied to avoid looping 2623 /* Memory mapped dump requests need to be copied to avoid looping
2640 * on the pending state in netlink_mmap_sendmsg() while the CB hold 2624 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2641 * a reference to the skb. 2625 * a reference to the skb.
2642 */ 2626 */
2643 if (netlink_skb_is_mmaped(skb)) { 2627 if (netlink_skb_is_mmaped(skb)) {
2644 skb = skb_copy(skb, GFP_KERNEL); 2628 skb = skb_copy(skb, GFP_KERNEL);
2645 if (skb == NULL) { 2629 if (skb == NULL)
2646 kfree(cb);
2647 return -ENOBUFS; 2630 return -ENOBUFS;
2648 }
2649 } else 2631 } else
2650 atomic_inc(&skb->users); 2632 atomic_inc(&skb->users);
2651 2633
2652 cb->dump = control->dump;
2653 cb->done = control->done;
2654 cb->nlh = nlh;
2655 cb->data = control->data;
2656 cb->module = control->module;
2657 cb->min_dump_alloc = control->min_dump_alloc;
2658 cb->skb = skb;
2659
2660 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); 2634 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2661 if (sk == NULL) { 2635 if (sk == NULL) {
2662 netlink_destroy_callback(cb); 2636 ret = -ECONNREFUSED;
2663 return -ECONNREFUSED; 2637 goto error_free;
2664 } 2638 }
2665 nlk = nlk_sk(sk);
2666 2639
2640 nlk = nlk_sk(sk);
2667 mutex_lock(nlk->cb_mutex); 2641 mutex_lock(nlk->cb_mutex);
2668 /* A dump is in progress... */ 2642 /* A dump is in progress... */
2669 if (nlk->cb) { 2643 if (nlk->cb_running) {
2670 mutex_unlock(nlk->cb_mutex);
2671 netlink_destroy_callback(cb);
2672 ret = -EBUSY; 2644 ret = -EBUSY;
2673 goto out; 2645 goto error_unlock;
2674 } 2646 }
2675 /* add reference of module which cb->dump belongs to */ 2647 /* add reference of module which cb->dump belongs to */
2676 if (!try_module_get(cb->module)) { 2648 if (!try_module_get(control->module)) {
2677 mutex_unlock(nlk->cb_mutex);
2678 netlink_destroy_callback(cb);
2679 ret = -EPROTONOSUPPORT; 2649 ret = -EPROTONOSUPPORT;
2680 goto out; 2650 goto error_unlock;
2681 } 2651 }
2682 2652
2683 nlk->cb = cb; 2653 cb = &nlk->cb;
2654 memset(cb, 0, sizeof(*cb));
2655 cb->dump = control->dump;
2656 cb->done = control->done;
2657 cb->nlh = nlh;
2658 cb->data = control->data;
2659 cb->module = control->module;
2660 cb->min_dump_alloc = control->min_dump_alloc;
2661 cb->skb = skb;
2662
2663 nlk->cb_running = true;
2664
2684 mutex_unlock(nlk->cb_mutex); 2665 mutex_unlock(nlk->cb_mutex);
2685 2666
2686 ret = netlink_dump(sk); 2667 ret = netlink_dump(sk);
2687out:
2688 sock_put(sk); 2668 sock_put(sk);
2689 2669
2690 if (ret) 2670 if (ret)
@@ -2694,6 +2674,13 @@ out:
2694 * signal not to send ACK even if it was requested. 2674 * signal not to send ACK even if it was requested.
2695 */ 2675 */
2696 return -EINTR; 2676 return -EINTR;
2677
2678error_unlock:
2679 sock_put(sk);
2680 mutex_unlock(nlk->cb_mutex);
2681error_free:
2682 kfree_skb(skb);
2683 return ret;
2697} 2684}
2698EXPORT_SYMBOL(__netlink_dump_start); 2685EXPORT_SYMBOL(__netlink_dump_start);
2699 2686
@@ -2916,14 +2903,14 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
2916 struct sock *s = v; 2903 struct sock *s = v;
2917 struct netlink_sock *nlk = nlk_sk(s); 2904 struct netlink_sock *nlk = nlk_sk(s);
2918 2905
2919 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", 2906 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
2920 s, 2907 s,
2921 s->sk_protocol, 2908 s->sk_protocol,
2922 nlk->portid, 2909 nlk->portid,
2923 nlk->groups ? (u32)nlk->groups[0] : 0, 2910 nlk->groups ? (u32)nlk->groups[0] : 0,
2924 sk_rmem_alloc_get(s), 2911 sk_rmem_alloc_get(s),
2925 sk_wmem_alloc_get(s), 2912 sk_wmem_alloc_get(s),
2926 nlk->cb, 2913 nlk->cb_running,
2927 atomic_read(&s->sk_refcnt), 2914 atomic_read(&s->sk_refcnt),
2928 atomic_read(&s->sk_drops), 2915 atomic_read(&s->sk_drops),
2929 sock_i_ino(s) 2916 sock_i_ino(s)
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index eaa88d187cdc..acbd774eeb7c 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -32,7 +32,8 @@ struct netlink_sock {
32 unsigned long *groups; 32 unsigned long *groups;
33 unsigned long state; 33 unsigned long state;
34 wait_queue_head_t wait; 34 wait_queue_head_t wait;
35 struct netlink_callback *cb; 35 bool cb_running;
36 struct netlink_callback cb;
36 struct mutex *cb_mutex; 37 struct mutex *cb_mutex;
37 struct mutex cb_def_mutex; 38 struct mutex cb_def_mutex;
38 void (*netlink_rcv)(struct sk_buff *skb); 39 void (*netlink_rcv)(struct sk_buff *skb);