aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink/af_netlink.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2013-04-17 02:47:05 -0400
committerDavid S. Miller <davem@davemloft.net>2013-04-19 14:57:58 -0400
commitcd1df525da59c64244d27b4548ff5d132489488a (patch)
tree13cdf72588bd1124daf8bb74f8d1d27ab060ca2b /net/netlink/af_netlink.c
parentf9c2288837ba072b21dba955f04a4c97eaa77b1e (diff)
netlink: add flow control for memory mapped I/O
Add flow control for memory mapped RX. Since user-space usually doesn't invoke recvmsg() when using memory mapped I/O, flow control is performed in netlink_poll(). Dumps are allowed to continue if at least half of the ring frames are unused. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink/af_netlink.c')
-rw-r--r--net/netlink/af_netlink.c88
1 files changed, 61 insertions, 27 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d120b5d4d86a..2a3e9ba814c4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -110,6 +111,29 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u
110 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask]; 111 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
111} 112}
112 113
114static void netlink_overrun(struct sock *sk)
115{
116 struct netlink_sock *nlk = nlk_sk(sk);
117
118 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
119 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
120 sk->sk_err = ENOBUFS;
121 sk->sk_error_report(sk);
122 }
123 }
124 atomic_inc(&sk->sk_drops);
125}
126
127static void netlink_rcv_wake(struct sock *sk)
128{
129 struct netlink_sock *nlk = nlk_sk(sk);
130
131 if (skb_queue_empty(&sk->sk_receive_queue))
132 clear_bit(NETLINK_CONGESTED, &nlk->state);
133 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
134 wake_up_interruptible(&nlk->wait);
135}
136
113#ifdef CONFIG_NETLINK_MMAP 137#ifdef CONFIG_NETLINK_MMAP
114static bool netlink_skb_is_mmaped(const struct sk_buff *skb) 138static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
115{ 139{
@@ -441,15 +465,48 @@ static void netlink_forward_ring(struct netlink_ring *ring)
441 } while (ring->head != head); 465 } while (ring->head != head);
442} 466}
443 467
468static bool netlink_dump_space(struct netlink_sock *nlk)
469{
470 struct netlink_ring *ring = &nlk->rx_ring;
471 struct nl_mmap_hdr *hdr;
472 unsigned int n;
473
474 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
475 if (hdr == NULL)
476 return false;
477
478 n = ring->head + ring->frame_max / 2;
479 if (n > ring->frame_max)
480 n -= ring->frame_max;
481
482 hdr = __netlink_lookup_frame(ring, n);
483
484 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
485}
486
444static unsigned int netlink_poll(struct file *file, struct socket *sock, 487static unsigned int netlink_poll(struct file *file, struct socket *sock,
445 poll_table *wait) 488 poll_table *wait)
446{ 489{
447 struct sock *sk = sock->sk; 490 struct sock *sk = sock->sk;
448 struct netlink_sock *nlk = nlk_sk(sk); 491 struct netlink_sock *nlk = nlk_sk(sk);
449 unsigned int mask; 492 unsigned int mask;
493 int err;
450 494
451 if (nlk->cb != NULL && nlk->rx_ring.pg_vec != NULL) 495 if (nlk->rx_ring.pg_vec != NULL) {
452 netlink_dump(sk); 496 /* Memory mapped sockets don't call recvmsg(), so flow control
497 * for dumps is performed here. A dump is allowed to continue
498 * if at least half the ring is unused.
499 */
500 while (nlk->cb != NULL && netlink_dump_space(nlk)) {
501 err = netlink_dump(sk);
502 if (err < 0) {
503 sk->sk_err = err;
504 sk->sk_error_report(sk);
505 break;
506 }
507 }
508 netlink_rcv_wake(sk);
509 }
453 510
454 mask = datagram_poll(file, sock, wait); 511 mask = datagram_poll(file, sock, wait);
455 512
@@ -623,8 +680,7 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
623 if (hdr == NULL) { 680 if (hdr == NULL) {
624 spin_unlock_bh(&sk->sk_receive_queue.lock); 681 spin_unlock_bh(&sk->sk_receive_queue.lock);
625 kfree_skb(skb); 682 kfree_skb(skb);
626 sk->sk_err = ENOBUFS; 683 netlink_overrun(sk);
627 sk->sk_error_report(sk);
628 return; 684 return;
629 } 685 }
630 netlink_increment_head(ring); 686 netlink_increment_head(ring);
@@ -1329,19 +1385,6 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1329 return 0; 1385 return 0;
1330} 1386}
1331 1387
1332static void netlink_overrun(struct sock *sk)
1333{
1334 struct netlink_sock *nlk = nlk_sk(sk);
1335
1336 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
1337 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
1338 sk->sk_err = ENOBUFS;
1339 sk->sk_error_report(sk);
1340 }
1341 }
1342 atomic_inc(&sk->sk_drops);
1343}
1344
1345static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) 1388static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1346{ 1389{
1347 struct sock *sock; 1390 struct sock *sock;
@@ -1484,16 +1527,6 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1484 return skb; 1527 return skb;
1485} 1528}
1486 1529
1487static void netlink_rcv_wake(struct sock *sk)
1488{
1489 struct netlink_sock *nlk = nlk_sk(sk);
1490
1491 if (skb_queue_empty(&sk->sk_receive_queue))
1492 clear_bit(NETLINK_CONGESTED, &nlk->state);
1493 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
1494 wake_up_interruptible(&nlk->wait);
1495}
1496
1497static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, 1530static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1498 struct sock *ssk) 1531 struct sock *ssk)
1499{ 1532{
@@ -1597,6 +1630,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1597err2: 1630err2:
1598 kfree_skb(skb); 1631 kfree_skb(skb);
1599 spin_unlock_bh(&sk->sk_receive_queue.lock); 1632 spin_unlock_bh(&sk->sk_receive_queue.lock);
1633 netlink_overrun(sk);
1600err1: 1634err1:
1601 sock_put(sk); 1635 sock_put(sk);
1602 return NULL; 1636 return NULL;