aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/appletalk/atalk_proc.c4
-rw-r--r--net/appletalk/ddp.c9
-rw-r--r--net/atm/common.c12
-rw-r--r--net/atm/ioctl.c3
-rw-r--r--net/atm/proc.c4
-rw-r--r--net/atm/raw.c2
-rw-r--r--net/ax25/af_ax25.c14
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/core/dev.c130
-rw-r--r--net/core/skbuff.c13
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/econet/af_econet.c6
-rw-r--r--net/ieee802154/af_ieee802154.c12
-rw-r--r--net/ieee802154/dgram.c3
-rw-r--r--net/ipv4/fib_trie.c24
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/raw.c7
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/ipx/ipx_proc.c4
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/iucv/af_iucv.c297
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/llc/llc_proc.c4
-rw-r--r--net/mac80211/debugfs.c25
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/mac80211/util.c25
-rw-r--r--net/mac80211/wext.c31
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c9
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/rfkill/core.c56
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sunrpc/Makefile1
-rw-r--r--net/sunrpc/backchannel_rqst.c281
-rw-r--r--net/sunrpc/bc_svc.c81
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/clnt.c143
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/stats.c8
-rw-r--r--net/sunrpc/sunrpc.h37
-rw-r--r--net/sunrpc/svc.c134
-rw-r--r--net/sunrpc/svc_xprt.c57
-rw-r--r--net/sunrpc/svcsock.c161
-rw-r--r--net/sunrpc/xprt.c60
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c8
-rw-r--r--net/sunrpc/xprtsock.c217
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/nl80211.c95
-rw-r--r--net/x25/af_x25.c30
-rw-r--r--net/x25/x25_proc.c4
-rw-r--r--net/x25/x25_timer.c2
59 files changed, 1629 insertions, 508 deletions
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index fd8e0847b254..80caad1a31a5 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -204,8 +204,8 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
204 "%02X %d\n", 204 "%02X %d\n",
205 s->sk_type, ntohs(at->src_net), at->src_node, at->src_port, 205 s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
206 ntohs(at->dest_net), at->dest_node, at->dest_port, 206 ntohs(at->dest_net), at->dest_node, at->dest_port,
207 atomic_read(&s->sk_wmem_alloc), 207 sk_wmem_alloc_get(s),
208 atomic_read(&s->sk_rmem_alloc), 208 sk_rmem_alloc_get(s),
209 s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); 209 s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
210out: 210out:
211 return 0; 211 return 0;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index b603cbacdc58..590b83963622 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -162,8 +162,7 @@ static void atalk_destroy_timer(unsigned long data)
162{ 162{
163 struct sock *sk = (struct sock *)data; 163 struct sock *sk = (struct sock *)data;
164 164
165 if (atomic_read(&sk->sk_wmem_alloc) || 165 if (sk_has_allocations(sk)) {
166 atomic_read(&sk->sk_rmem_alloc)) {
167 sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; 166 sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
168 add_timer(&sk->sk_timer); 167 add_timer(&sk->sk_timer);
169 } else 168 } else
@@ -175,8 +174,7 @@ static inline void atalk_destroy_socket(struct sock *sk)
175 atalk_remove_socket(sk); 174 atalk_remove_socket(sk);
176 skb_queue_purge(&sk->sk_receive_queue); 175 skb_queue_purge(&sk->sk_receive_queue);
177 176
178 if (atomic_read(&sk->sk_wmem_alloc) || 177 if (sk_has_allocations(sk)) {
179 atomic_read(&sk->sk_rmem_alloc)) {
180 setup_timer(&sk->sk_timer, atalk_destroy_timer, 178 setup_timer(&sk->sk_timer, atalk_destroy_timer,
181 (unsigned long)sk); 179 (unsigned long)sk);
182 sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; 180 sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
@@ -1750,8 +1748,7 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1750 switch (cmd) { 1748 switch (cmd) {
1751 /* Protocol layer */ 1749 /* Protocol layer */
1752 case TIOCOUTQ: { 1750 case TIOCOUTQ: {
1753 long amount = sk->sk_sndbuf - 1751 long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1754 atomic_read(&sk->sk_wmem_alloc);
1755 1752
1756 if (amount < 0) 1753 if (amount < 0)
1757 amount = 0; 1754 amount = 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index d34edbe754c8..c1c97936192c 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -62,15 +62,15 @@ static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size)
62 struct sk_buff *skb; 62 struct sk_buff *skb;
63 struct sock *sk = sk_atm(vcc); 63 struct sock *sk = sk_atm(vcc);
64 64
65 if (atomic_read(&sk->sk_wmem_alloc) && !atm_may_send(vcc, size)) { 65 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
66 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", 66 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
67 atomic_read(&sk->sk_wmem_alloc), size, 67 sk_wmem_alloc_get(sk), size,
68 sk->sk_sndbuf); 68 sk->sk_sndbuf);
69 return NULL; 69 return NULL;
70 } 70 }
71 while (!(skb = alloc_skb(size,GFP_KERNEL))) schedule(); 71 while (!(skb = alloc_skb(size, GFP_KERNEL)))
72 pr_debug("AlTx %d += %d\n", atomic_read(&sk->sk_wmem_alloc), 72 schedule();
73 skb->truesize); 73 pr_debug("AlTx %d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
74 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 74 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
75 return skb; 75 return skb;
76} 76}
@@ -145,7 +145,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
145 memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); 145 memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc));
146 memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); 146 memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc));
147 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ 147 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
148 atomic_set(&sk->sk_wmem_alloc, 0); 148 atomic_set(&sk->sk_wmem_alloc, 1);
149 atomic_set(&sk->sk_rmem_alloc, 0); 149 atomic_set(&sk->sk_rmem_alloc, 0);
150 vcc->push = NULL; 150 vcc->push = NULL;
151 vcc->pop = NULL; 151 vcc->pop = NULL;
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 76ed3c8d26e6..4da8892ced5f 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -63,8 +63,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg
63 error = -EINVAL; 63 error = -EINVAL;
64 goto done; 64 goto done;
65 } 65 }
66 error = put_user(sk->sk_sndbuf - 66 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
67 atomic_read(&sk->sk_wmem_alloc),
68 (int __user *) argp) ? -EFAULT : 0; 67 (int __user *) argp) ? -EFAULT : 0;
69 goto done; 68 goto done;
70 case SIOCINQ: 69 case SIOCINQ:
diff --git a/net/atm/proc.c b/net/atm/proc.c
index e7b3b273907d..38de5ff61ecd 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -204,8 +204,8 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
204 seq_printf(seq, "%3d", sk->sk_family); 204 seq_printf(seq, "%3d", sk->sk_family);
205 } 205 }
206 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, 206 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err,
207 atomic_read(&sk->sk_wmem_alloc), sk->sk_sndbuf, 207 sk_wmem_alloc_get(sk), sk->sk_sndbuf,
208 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf, 208 sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
209 atomic_read(&sk->sk_refcnt)); 209 atomic_read(&sk->sk_refcnt));
210} 210}
211 211
diff --git a/net/atm/raw.c b/net/atm/raw.c
index b0a2d8cb6744..cbfcc71a17b1 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -33,7 +33,7 @@ static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
33 struct sock *sk = sk_atm(vcc); 33 struct sock *sk = sk_atm(vcc);
34 34
35 pr_debug("APopR (%d) %d -= %d\n", vcc->vci, 35 pr_debug("APopR (%d) %d -= %d\n", vcc->vci,
36 atomic_read(&sk->sk_wmem_alloc), skb->truesize); 36 sk_wmem_alloc_get(sk), skb->truesize);
37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
38 dev_kfree_skb_any(skb); 38 dev_kfree_skb_any(skb);
39 sk->sk_write_space(sk); 39 sk->sk_write_space(sk);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index fd9d06f291dc..da0f64f82b57 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -330,8 +330,7 @@ void ax25_destroy_socket(ax25_cb *ax25)
330 } 330 }
331 331
332 if (ax25->sk != NULL) { 332 if (ax25->sk != NULL) {
333 if (atomic_read(&ax25->sk->sk_wmem_alloc) || 333 if (sk_has_allocations(ax25->sk)) {
334 atomic_read(&ax25->sk->sk_rmem_alloc)) {
335 /* Defer: outstanding buffers */ 334 /* Defer: outstanding buffers */
336 setup_timer(&ax25->dtimer, ax25_destroy_timer, 335 setup_timer(&ax25->dtimer, ax25_destroy_timer,
337 (unsigned long)ax25); 336 (unsigned long)ax25);
@@ -1691,7 +1690,8 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1691 switch (cmd) { 1690 switch (cmd) {
1692 case TIOCOUTQ: { 1691 case TIOCOUTQ: {
1693 long amount; 1692 long amount;
1694 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1693
1694 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1695 if (amount < 0) 1695 if (amount < 0)
1696 amount = 0; 1696 amount = 0;
1697 res = put_user(amount, (int __user *)argp); 1697 res = put_user(amount, (int __user *)argp);
@@ -1781,8 +1781,8 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1781 ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ); 1781 ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ);
1782 ax25_info.n2count = ax25->n2count; 1782 ax25_info.n2count = ax25->n2count;
1783 ax25_info.state = ax25->state; 1783 ax25_info.state = ax25->state;
1784 ax25_info.rcv_q = atomic_read(&sk->sk_rmem_alloc); 1784 ax25_info.rcv_q = sk_wmem_alloc_get(sk);
1785 ax25_info.snd_q = atomic_read(&sk->sk_wmem_alloc); 1785 ax25_info.snd_q = sk_rmem_alloc_get(sk);
1786 ax25_info.vs = ax25->vs; 1786 ax25_info.vs = ax25->vs;
1787 ax25_info.vr = ax25->vr; 1787 ax25_info.vr = ax25->vr;
1788 ax25_info.va = ax25->va; 1788 ax25_info.va = ax25->va;
@@ -1922,8 +1922,8 @@ static int ax25_info_show(struct seq_file *seq, void *v)
1922 1922
1923 if (ax25->sk != NULL) { 1923 if (ax25->sk != NULL) {
1924 seq_printf(seq, " %d %d %lu\n", 1924 seq_printf(seq, " %d %d %lu\n",
1925 atomic_read(&ax25->sk->sk_wmem_alloc), 1925 sk_wmem_alloc_get(ax25->sk),
1926 atomic_read(&ax25->sk->sk_rmem_alloc), 1926 sk_rmem_alloc_get(ax25->sk),
1927 sock_i_ino(ax25->sk)); 1927 sock_i_ino(ax25->sk));
1928 } else { 1928 } else {
1929 seq_puts(seq, " * * *\n"); 1929 seq_puts(seq, " * * *\n");
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 02b9baa1930b..0250e0600150 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -337,7 +337,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
337 if (sk->sk_state == BT_LISTEN) 337 if (sk->sk_state == BT_LISTEN)
338 return -EINVAL; 338 return -EINVAL;
339 339
340 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 340 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
341 if (amount < 0) 341 if (amount < 0)
342 amount = 0; 342 amount = 0;
343 err = put_user(amount, (int __user *) arg); 343 err = put_user(amount, (int __user *) arg);
diff --git a/net/core/dev.c b/net/core/dev.c
index 576a61574a93..baf2dc13a34a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3461,10 +3461,10 @@ void __dev_set_rx_mode(struct net_device *dev)
3461 /* Unicast addresses changes may only happen under the rtnl, 3461 /* Unicast addresses changes may only happen under the rtnl,
3462 * therefore calling __dev_set_promiscuity here is safe. 3462 * therefore calling __dev_set_promiscuity here is safe.
3463 */ 3463 */
3464 if (dev->uc_count > 0 && !dev->uc_promisc) { 3464 if (dev->uc.count > 0 && !dev->uc_promisc) {
3465 __dev_set_promiscuity(dev, 1); 3465 __dev_set_promiscuity(dev, 1);
3466 dev->uc_promisc = 1; 3466 dev->uc_promisc = 1;
3467 } else if (dev->uc_count == 0 && dev->uc_promisc) { 3467 } else if (dev->uc.count == 0 && dev->uc_promisc) {
3468 __dev_set_promiscuity(dev, -1); 3468 __dev_set_promiscuity(dev, -1);
3469 dev->uc_promisc = 0; 3469 dev->uc_promisc = 0;
3470 } 3470 }
@@ -3483,9 +3483,8 @@ void dev_set_rx_mode(struct net_device *dev)
3483 3483
3484/* hw addresses list handling functions */ 3484/* hw addresses list handling functions */
3485 3485
3486static int __hw_addr_add(struct list_head *list, int *delta, 3486static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3487 unsigned char *addr, int addr_len, 3487 int addr_len, unsigned char addr_type)
3488 unsigned char addr_type)
3489{ 3488{
3490 struct netdev_hw_addr *ha; 3489 struct netdev_hw_addr *ha;
3491 int alloc_size; 3490 int alloc_size;
@@ -3493,7 +3492,7 @@ static int __hw_addr_add(struct list_head *list, int *delta,
3493 if (addr_len > MAX_ADDR_LEN) 3492 if (addr_len > MAX_ADDR_LEN)
3494 return -EINVAL; 3493 return -EINVAL;
3495 3494
3496 list_for_each_entry(ha, list, list) { 3495 list_for_each_entry(ha, &list->list, list) {
3497 if (!memcmp(ha->addr, addr, addr_len) && 3496 if (!memcmp(ha->addr, addr, addr_len) &&
3498 ha->type == addr_type) { 3497 ha->type == addr_type) {
3499 ha->refcount++; 3498 ha->refcount++;
@@ -3512,9 +3511,8 @@ static int __hw_addr_add(struct list_head *list, int *delta,
3512 ha->type = addr_type; 3511 ha->type = addr_type;
3513 ha->refcount = 1; 3512 ha->refcount = 1;
3514 ha->synced = false; 3513 ha->synced = false;
3515 list_add_tail_rcu(&ha->list, list); 3514 list_add_tail_rcu(&ha->list, &list->list);
3516 if (delta) 3515 list->count++;
3517 (*delta)++;
3518 return 0; 3516 return 0;
3519} 3517}
3520 3518
@@ -3526,120 +3524,121 @@ static void ha_rcu_free(struct rcu_head *head)
3526 kfree(ha); 3524 kfree(ha);
3527} 3525}
3528 3526
3529static int __hw_addr_del(struct list_head *list, int *delta, 3527static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3530 unsigned char *addr, int addr_len, 3528 int addr_len, unsigned char addr_type)
3531 unsigned char addr_type)
3532{ 3529{
3533 struct netdev_hw_addr *ha; 3530 struct netdev_hw_addr *ha;
3534 3531
3535 list_for_each_entry(ha, list, list) { 3532 list_for_each_entry(ha, &list->list, list) {
3536 if (!memcmp(ha->addr, addr, addr_len) && 3533 if (!memcmp(ha->addr, addr, addr_len) &&
3537 (ha->type == addr_type || !addr_type)) { 3534 (ha->type == addr_type || !addr_type)) {
3538 if (--ha->refcount) 3535 if (--ha->refcount)
3539 return 0; 3536 return 0;
3540 list_del_rcu(&ha->list); 3537 list_del_rcu(&ha->list);
3541 call_rcu(&ha->rcu_head, ha_rcu_free); 3538 call_rcu(&ha->rcu_head, ha_rcu_free);
3542 if (delta) 3539 list->count--;
3543 (*delta)--;
3544 return 0; 3540 return 0;
3545 } 3541 }
3546 } 3542 }
3547 return -ENOENT; 3543 return -ENOENT;
3548} 3544}
3549 3545
3550static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta, 3546static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3551 struct list_head *from_list, int addr_len, 3547 struct netdev_hw_addr_list *from_list,
3548 int addr_len,
3552 unsigned char addr_type) 3549 unsigned char addr_type)
3553{ 3550{
3554 int err; 3551 int err;
3555 struct netdev_hw_addr *ha, *ha2; 3552 struct netdev_hw_addr *ha, *ha2;
3556 unsigned char type; 3553 unsigned char type;
3557 3554
3558 list_for_each_entry(ha, from_list, list) { 3555 list_for_each_entry(ha, &from_list->list, list) {
3559 type = addr_type ? addr_type : ha->type; 3556 type = addr_type ? addr_type : ha->type;
3560 err = __hw_addr_add(to_list, to_delta, ha->addr, 3557 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3561 addr_len, type);
3562 if (err) 3558 if (err)
3563 goto unroll; 3559 goto unroll;
3564 } 3560 }
3565 return 0; 3561 return 0;
3566 3562
3567unroll: 3563unroll:
3568 list_for_each_entry(ha2, from_list, list) { 3564 list_for_each_entry(ha2, &from_list->list, list) {
3569 if (ha2 == ha) 3565 if (ha2 == ha)
3570 break; 3566 break;
3571 type = addr_type ? addr_type : ha2->type; 3567 type = addr_type ? addr_type : ha2->type;
3572 __hw_addr_del(to_list, to_delta, ha2->addr, 3568 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3573 addr_len, type);
3574 } 3569 }
3575 return err; 3570 return err;
3576} 3571}
3577 3572
3578static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta, 3573static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3579 struct list_head *from_list, int addr_len, 3574 struct netdev_hw_addr_list *from_list,
3575 int addr_len,
3580 unsigned char addr_type) 3576 unsigned char addr_type)
3581{ 3577{
3582 struct netdev_hw_addr *ha; 3578 struct netdev_hw_addr *ha;
3583 unsigned char type; 3579 unsigned char type;
3584 3580
3585 list_for_each_entry(ha, from_list, list) { 3581 list_for_each_entry(ha, &from_list->list, list) {
3586 type = addr_type ? addr_type : ha->type; 3582 type = addr_type ? addr_type : ha->type;
3587 __hw_addr_del(to_list, to_delta, ha->addr, 3583 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3588 addr_len, addr_type);
3589 } 3584 }
3590} 3585}
3591 3586
3592static int __hw_addr_sync(struct list_head *to_list, int *to_delta, 3587static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3593 struct list_head *from_list, int *from_delta, 3588 struct netdev_hw_addr_list *from_list,
3594 int addr_len) 3589 int addr_len)
3595{ 3590{
3596 int err = 0; 3591 int err = 0;
3597 struct netdev_hw_addr *ha, *tmp; 3592 struct netdev_hw_addr *ha, *tmp;
3598 3593
3599 list_for_each_entry_safe(ha, tmp, from_list, list) { 3594 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3600 if (!ha->synced) { 3595 if (!ha->synced) {
3601 err = __hw_addr_add(to_list, to_delta, ha->addr, 3596 err = __hw_addr_add(to_list, ha->addr,
3602 addr_len, ha->type); 3597 addr_len, ha->type);
3603 if (err) 3598 if (err)
3604 break; 3599 break;
3605 ha->synced = true; 3600 ha->synced = true;
3606 ha->refcount++; 3601 ha->refcount++;
3607 } else if (ha->refcount == 1) { 3602 } else if (ha->refcount == 1) {
3608 __hw_addr_del(to_list, to_delta, ha->addr, 3603 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3609 addr_len, ha->type); 3604 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3610 __hw_addr_del(from_list, from_delta, ha->addr,
3611 addr_len, ha->type);
3612 } 3605 }
3613 } 3606 }
3614 return err; 3607 return err;
3615} 3608}
3616 3609
3617static void __hw_addr_unsync(struct list_head *to_list, int *to_delta, 3610static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3618 struct list_head *from_list, int *from_delta, 3611 struct netdev_hw_addr_list *from_list,
3619 int addr_len) 3612 int addr_len)
3620{ 3613{
3621 struct netdev_hw_addr *ha, *tmp; 3614 struct netdev_hw_addr *ha, *tmp;
3622 3615
3623 list_for_each_entry_safe(ha, tmp, from_list, list) { 3616 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3624 if (ha->synced) { 3617 if (ha->synced) {
3625 __hw_addr_del(to_list, to_delta, ha->addr, 3618 __hw_addr_del(to_list, ha->addr,
3626 addr_len, ha->type); 3619 addr_len, ha->type);
3627 ha->synced = false; 3620 ha->synced = false;
3628 __hw_addr_del(from_list, from_delta, ha->addr, 3621 __hw_addr_del(from_list, ha->addr,
3629 addr_len, ha->type); 3622 addr_len, ha->type);
3630 } 3623 }
3631 } 3624 }
3632} 3625}
3633 3626
3634 3627static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3635static void __hw_addr_flush(struct list_head *list)
3636{ 3628{
3637 struct netdev_hw_addr *ha, *tmp; 3629 struct netdev_hw_addr *ha, *tmp;
3638 3630
3639 list_for_each_entry_safe(ha, tmp, list, list) { 3631 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3640 list_del_rcu(&ha->list); 3632 list_del_rcu(&ha->list);
3641 call_rcu(&ha->rcu_head, ha_rcu_free); 3633 call_rcu(&ha->rcu_head, ha_rcu_free);
3642 } 3634 }
3635 list->count = 0;
3636}
3637
3638static void __hw_addr_init(struct netdev_hw_addr_list *list)
3639{
3640 INIT_LIST_HEAD(&list->list);
3641 list->count = 0;
3643} 3642}
3644 3643
3645/* Device addresses handling functions */ 3644/* Device addresses handling functions */
@@ -3648,7 +3647,7 @@ static void dev_addr_flush(struct net_device *dev)
3648{ 3647{
3649 /* rtnl_mutex must be held here */ 3648 /* rtnl_mutex must be held here */
3650 3649
3651 __hw_addr_flush(&dev->dev_addr_list); 3650 __hw_addr_flush(&dev->dev_addrs);
3652 dev->dev_addr = NULL; 3651 dev->dev_addr = NULL;
3653} 3652}
3654 3653
@@ -3660,16 +3659,16 @@ static int dev_addr_init(struct net_device *dev)
3660 3659
3661 /* rtnl_mutex must be held here */ 3660 /* rtnl_mutex must be held here */
3662 3661
3663 INIT_LIST_HEAD(&dev->dev_addr_list); 3662 __hw_addr_init(&dev->dev_addrs);
3664 memset(addr, 0, sizeof(addr)); 3663 memset(addr, 0, sizeof(addr));
3665 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr), 3664 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3666 NETDEV_HW_ADDR_T_LAN); 3665 NETDEV_HW_ADDR_T_LAN);
3667 if (!err) { 3666 if (!err) {
3668 /* 3667 /*
3669 * Get the first (previously created) address from the list 3668 * Get the first (previously created) address from the list
3670 * and set dev_addr pointer to this location. 3669 * and set dev_addr pointer to this location.
3671 */ 3670 */
3672 ha = list_first_entry(&dev->dev_addr_list, 3671 ha = list_first_entry(&dev->dev_addrs.list,
3673 struct netdev_hw_addr, list); 3672 struct netdev_hw_addr, list);
3674 dev->dev_addr = ha->addr; 3673 dev->dev_addr = ha->addr;
3675 } 3674 }
@@ -3694,8 +3693,7 @@ int dev_addr_add(struct net_device *dev, unsigned char *addr,
3694 3693
3695 ASSERT_RTNL(); 3694 ASSERT_RTNL();
3696 3695
3697 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len, 3696 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3698 addr_type);
3699 if (!err) 3697 if (!err)
3700 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 3698 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3701 return err; 3699 return err;
@@ -3725,11 +3723,12 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr,
3725 * We can not remove the first address from the list because 3723 * We can not remove the first address from the list because
3726 * dev->dev_addr points to that. 3724 * dev->dev_addr points to that.
3727 */ 3725 */
3728 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list); 3726 ha = list_first_entry(&dev->dev_addrs.list,
3727 struct netdev_hw_addr, list);
3729 if (ha->addr == dev->dev_addr && ha->refcount == 1) 3728 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3730 return -ENOENT; 3729 return -ENOENT;
3731 3730
3732 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len, 3731 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3733 addr_type); 3732 addr_type);
3734 if (!err) 3733 if (!err)
3735 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 3734 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
@@ -3757,8 +3756,7 @@ int dev_addr_add_multiple(struct net_device *to_dev,
3757 3756
3758 if (from_dev->addr_len != to_dev->addr_len) 3757 if (from_dev->addr_len != to_dev->addr_len)
3759 return -EINVAL; 3758 return -EINVAL;
3760 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL, 3759 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3761 &from_dev->dev_addr_list,
3762 to_dev->addr_len, addr_type); 3760 to_dev->addr_len, addr_type);
3763 if (!err) 3761 if (!err)
3764 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); 3762 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
@@ -3784,15 +3782,14 @@ int dev_addr_del_multiple(struct net_device *to_dev,
3784 3782
3785 if (from_dev->addr_len != to_dev->addr_len) 3783 if (from_dev->addr_len != to_dev->addr_len)
3786 return -EINVAL; 3784 return -EINVAL;
3787 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL, 3785 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3788 &from_dev->dev_addr_list,
3789 to_dev->addr_len, addr_type); 3786 to_dev->addr_len, addr_type);
3790 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); 3787 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3791 return 0; 3788 return 0;
3792} 3789}
3793EXPORT_SYMBOL(dev_addr_del_multiple); 3790EXPORT_SYMBOL(dev_addr_del_multiple);
3794 3791
3795/* unicast and multicast addresses handling functions */ 3792/* multicast addresses handling functions */
3796 3793
3797int __dev_addr_delete(struct dev_addr_list **list, int *count, 3794int __dev_addr_delete(struct dev_addr_list **list, int *count,
3798 void *addr, int alen, int glbl) 3795 void *addr, int alen, int glbl)
@@ -3868,8 +3865,8 @@ int dev_unicast_delete(struct net_device *dev, void *addr)
3868 3865
3869 ASSERT_RTNL(); 3866 ASSERT_RTNL();
3870 3867
3871 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr, 3868 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3872 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); 3869 NETDEV_HW_ADDR_T_UNICAST);
3873 if (!err) 3870 if (!err)
3874 __dev_set_rx_mode(dev); 3871 __dev_set_rx_mode(dev);
3875 return err; 3872 return err;
@@ -3892,8 +3889,8 @@ int dev_unicast_add(struct net_device *dev, void *addr)
3892 3889
3893 ASSERT_RTNL(); 3890 ASSERT_RTNL();
3894 3891
3895 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr, 3892 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3896 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); 3893 NETDEV_HW_ADDR_T_UNICAST);
3897 if (!err) 3894 if (!err)
3898 __dev_set_rx_mode(dev); 3895 __dev_set_rx_mode(dev);
3899 return err; 3896 return err;
@@ -3966,8 +3963,7 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3966 if (to->addr_len != from->addr_len) 3963 if (to->addr_len != from->addr_len)
3967 return -EINVAL; 3964 return -EINVAL;
3968 3965
3969 err = __hw_addr_sync(&to->uc_list, &to->uc_count, 3966 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
3970 &from->uc_list, &from->uc_count, to->addr_len);
3971 if (!err) 3967 if (!err)
3972 __dev_set_rx_mode(to); 3968 __dev_set_rx_mode(to);
3973 return err; 3969 return err;
@@ -3990,8 +3986,7 @@ void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3990 if (to->addr_len != from->addr_len) 3986 if (to->addr_len != from->addr_len)
3991 return; 3987 return;
3992 3988
3993 __hw_addr_unsync(&to->uc_list, &to->uc_count, 3989 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
3994 &from->uc_list, &from->uc_count, to->addr_len);
3995 __dev_set_rx_mode(to); 3990 __dev_set_rx_mode(to);
3996} 3991}
3997EXPORT_SYMBOL(dev_unicast_unsync); 3992EXPORT_SYMBOL(dev_unicast_unsync);
@@ -4000,15 +3995,14 @@ static void dev_unicast_flush(struct net_device *dev)
4000{ 3995{
4001 /* rtnl_mutex must be held here */ 3996 /* rtnl_mutex must be held here */
4002 3997
4003 __hw_addr_flush(&dev->uc_list); 3998 __hw_addr_flush(&dev->uc);
4004 dev->uc_count = 0;
4005} 3999}
4006 4000
4007static void dev_unicast_init(struct net_device *dev) 4001static void dev_unicast_init(struct net_device *dev)
4008{ 4002{
4009 /* rtnl_mutex must be held here */ 4003 /* rtnl_mutex must be held here */
4010 4004
4011 INIT_LIST_HEAD(&dev->uc_list); 4005 __hw_addr_init(&dev->uc);
4012} 4006}
4013 4007
4014 4008
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5c93435b0347..9e0597d189b0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -204,6 +204,10 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
204 skb->end = skb->tail + size; 204 skb->end = skb->tail + size;
205 kmemcheck_annotate_bitfield(skb, flags1); 205 kmemcheck_annotate_bitfield(skb, flags1);
206 kmemcheck_annotate_bitfield(skb, flags2); 206 kmemcheck_annotate_bitfield(skb, flags2);
207#ifdef NET_SKBUFF_DATA_USES_OFFSET
208 skb->mac_header = ~0U;
209#endif
210
207 /* make sure we initialize shinfo sequentially */ 211 /* make sure we initialize shinfo sequentially */
208 shinfo = skb_shinfo(skb); 212 shinfo = skb_shinfo(skb);
209 atomic_set(&shinfo->dataref, 1); 213 atomic_set(&shinfo->dataref, 1);
@@ -665,7 +669,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
665 /* {transport,network,mac}_header are relative to skb->head */ 669 /* {transport,network,mac}_header are relative to skb->head */
666 new->transport_header += offset; 670 new->transport_header += offset;
667 new->network_header += offset; 671 new->network_header += offset;
668 new->mac_header += offset; 672 if (skb_mac_header_was_set(new))
673 new->mac_header += offset;
669#endif 674#endif
670 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 675 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
671 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 676 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
@@ -847,7 +852,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
847 skb->tail += off; 852 skb->tail += off;
848 skb->transport_header += off; 853 skb->transport_header += off;
849 skb->network_header += off; 854 skb->network_header += off;
850 skb->mac_header += off; 855 if (skb_mac_header_was_set(skb))
856 skb->mac_header += off;
851 skb->csum_start += nhead; 857 skb->csum_start += nhead;
852 skb->cloned = 0; 858 skb->cloned = 0;
853 skb->hdr_len = 0; 859 skb->hdr_len = 0;
@@ -939,7 +945,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
939#ifdef NET_SKBUFF_DATA_USES_OFFSET 945#ifdef NET_SKBUFF_DATA_USES_OFFSET
940 n->transport_header += off; 946 n->transport_header += off;
941 n->network_header += off; 947 n->network_header += off;
942 n->mac_header += off; 948 if (skb_mac_header_was_set(skb))
949 n->mac_header += off;
943#endif 950#endif
944 951
945 return n; 952 return n;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index a5e3a593e472..d351b8db0df5 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1240,7 +1240,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1240 return val; 1240 return val;
1241 1241
1242 case TIOCOUTQ: 1242 case TIOCOUTQ:
1243 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1243 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1244 if (amount < 0) 1244 if (amount < 0)
1245 amount = 0; 1245 amount = 0;
1246 err = put_user(amount, (int __user *)arg); 1246 err = put_user(amount, (int __user *)arg);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 8121bf0029e3..2e1f836d4240 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -540,8 +540,7 @@ static void econet_destroy_timer(unsigned long data)
540{ 540{
541 struct sock *sk=(struct sock *)data; 541 struct sock *sk=(struct sock *)data;
542 542
543 if (!atomic_read(&sk->sk_wmem_alloc) && 543 if (!sk_has_allocations(sk)) {
544 !atomic_read(&sk->sk_rmem_alloc)) {
545 sk_free(sk); 544 sk_free(sk);
546 return; 545 return;
547 } 546 }
@@ -579,8 +578,7 @@ static int econet_release(struct socket *sock)
579 578
580 skb_queue_purge(&sk->sk_receive_queue); 579 skb_queue_purge(&sk->sk_receive_queue);
581 580
582 if (atomic_read(&sk->sk_rmem_alloc) || 581 if (sk_has_allocations(sk)) {
583 atomic_read(&sk->sk_wmem_alloc)) {
584 sk->sk_timer.data = (unsigned long)sk; 582 sk->sk_timer.data = (unsigned long)sk;
585 sk->sk_timer.expires = jiffies + HZ; 583 sk->sk_timer.expires = jiffies + HZ;
586 sk->sk_timer.function = econet_destroy_timer; 584 sk->sk_timer.function = econet_destroy_timer;
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 882a927cefae..3bb6bdb1dac1 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -39,14 +39,6 @@
39 39
40#include "af802154.h" 40#include "af802154.h"
41 41
42#define DBG_DUMP(data, len) { \
43 int i; \
44 pr_debug("function: %s: data: len %d:\n", __func__, len); \
45 for (i = 0; i < len; i++) {\
46 pr_debug("%02x: %02x\n", i, (data)[i]); \
47 } \
48}
49
50/* 42/*
51 * Utility function for families 43 * Utility function for families
52 */ 44 */
@@ -302,10 +294,12 @@ static struct net_proto_family ieee802154_family_ops = {
302static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev, 294static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
303 struct packet_type *pt, struct net_device *orig_dev) 295 struct packet_type *pt, struct net_device *orig_dev)
304{ 296{
305 DBG_DUMP(skb->data, skb->len);
306 if (!netif_running(dev)) 297 if (!netif_running(dev))
307 return -ENODEV; 298 return -ENODEV;
308 pr_debug("got frame, type %d, dev %p\n", dev->type, dev); 299 pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
300#ifdef DEBUG
301 print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
302#endif
309 303
310 if (!net_eq(dev_net(dev), &init_net)) 304 if (!net_eq(dev_net(dev), &init_net))
311 goto drop; 305 goto drop;
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1779677aed46..14d39840dd62 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -126,7 +126,8 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
126 switch (cmd) { 126 switch (cmd) {
127 case SIOCOUTQ: 127 case SIOCOUTQ:
128 { 128 {
129 int amount = atomic_read(&sk->sk_wmem_alloc); 129 int amount = sk_wmem_alloc_get(sk);
130
130 return put_user(amount, (int __user *)arg); 131 return put_user(amount, (int __user *)arg);
131 } 132 }
132 133
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d1a39b1277d6..012cf5a68581 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -391,13 +391,8 @@ static inline void tnode_free(struct tnode *tn)
391static void tnode_free_safe(struct tnode *tn) 391static void tnode_free_safe(struct tnode *tn)
392{ 392{
393 BUG_ON(IS_LEAF(tn)); 393 BUG_ON(IS_LEAF(tn));
394 394 tn->tnode_free = tnode_free_head;
395 if (node_parent((struct node *) tn)) { 395 tnode_free_head = tn;
396 tn->tnode_free = tnode_free_head;
397 tnode_free_head = tn;
398 } else {
399 tnode_free(tn);
400 }
401} 396}
402 397
403static void tnode_free_flush(void) 398static void tnode_free_flush(void)
@@ -1009,7 +1004,7 @@ fib_find_node(struct trie *t, u32 key)
1009 return NULL; 1004 return NULL;
1010} 1005}
1011 1006
1012static struct node *trie_rebalance(struct trie *t, struct tnode *tn) 1007static void trie_rebalance(struct trie *t, struct tnode *tn)
1013{ 1008{
1014 int wasfull; 1009 int wasfull;
1015 t_key cindex, key; 1010 t_key cindex, key;
@@ -1033,12 +1028,13 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
1033 } 1028 }
1034 1029
1035 /* Handle last (top) tnode */ 1030 /* Handle last (top) tnode */
1036 if (IS_TNODE(tn)) { 1031 if (IS_TNODE(tn))
1037 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1032 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1038 tnode_free_flush();
1039 }
1040 1033
1041 return (struct node *)tn; 1034 rcu_assign_pointer(t->trie, (struct node *)tn);
1035 tnode_free_flush();
1036
1037 return;
1042} 1038}
1043 1039
1044/* only used from updater-side */ 1040/* only used from updater-side */
@@ -1186,7 +1182,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1186 1182
1187 /* Rebalance the trie */ 1183 /* Rebalance the trie */
1188 1184
1189 rcu_assign_pointer(t->trie, trie_rebalance(t, tp)); 1185 trie_rebalance(t, tp);
1190done: 1186done:
1191 return fa_head; 1187 return fa_head;
1192} 1188}
@@ -1605,7 +1601,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
1605 if (tp) { 1601 if (tp) {
1606 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits); 1602 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
1607 put_child(t, (struct tnode *)tp, cindex, NULL); 1603 put_child(t, (struct tnode *)tp, cindex, NULL);
1608 rcu_assign_pointer(t->trie, trie_rebalance(t, tp)); 1604 trie_rebalance(t, tp);
1609 } else 1605 } else
1610 rcu_assign_pointer(t->trie, NULL); 1606 rcu_assign_pointer(t->trie, NULL);
1611 1607
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index b0b273503e2a..a706a47f4dbb 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -156,10 +156,10 @@ static int inet_csk_diag_fill(struct sock *sk,
156 r->idiag_inode = sock_i_ino(sk); 156 r->idiag_inode = sock_i_ino(sk);
157 157
158 if (minfo) { 158 if (minfo) {
159 minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc); 159 minfo->idiag_rmem = sk_rmem_alloc_get(sk);
160 minfo->idiag_wmem = sk->sk_wmem_queued; 160 minfo->idiag_wmem = sk->sk_wmem_queued;
161 minfo->idiag_fmem = sk->sk_forward_alloc; 161 minfo->idiag_fmem = sk->sk_forward_alloc;
162 minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc); 162 minfo->idiag_tmem = sk_wmem_alloc_get(sk);
163 } 163 }
164 164
165 handler->idiag_get_info(sk, r, info); 165 handler->idiag_get_info(sk, r, info);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3dc9171a272f..2979f14bb188 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -799,7 +799,8 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
799{ 799{
800 switch (cmd) { 800 switch (cmd) {
801 case SIOCOUTQ: { 801 case SIOCOUTQ: {
802 int amount = atomic_read(&sk->sk_wmem_alloc); 802 int amount = sk_wmem_alloc_get(sk);
803
803 return put_user(amount, (int __user *)arg); 804 return put_user(amount, (int __user *)arg);
804 } 805 }
805 case SIOCINQ: { 806 case SIOCINQ: {
@@ -935,8 +936,8 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
935 seq_printf(seq, "%4d: %08X:%04X %08X:%04X" 936 seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
936 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 937 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
937 i, src, srcp, dest, destp, sp->sk_state, 938 i, src, srcp, dest, destp, sp->sk_state,
938 atomic_read(&sp->sk_wmem_alloc), 939 sk_wmem_alloc_get(sp),
939 atomic_read(&sp->sk_rmem_alloc), 940 sk_rmem_alloc_get(sp),
940 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 941 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
941 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 942 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
942} 943}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd76b3cb7092..65b3a8b11a6c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1085,8 +1085,16 @@ restart:
1085 now = jiffies; 1085 now = jiffies;
1086 1086
1087 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1087 if (!rt_caching(dev_net(rt->u.dst.dev))) {
1088 rt_drop(rt); 1088 /*
1089 return 0; 1089 * If we're not caching, just tell the caller we
1090 * were successful and don't touch the route. The
1091 * caller hold the sole reference to the cache entry, and
1092 * it will be released when the caller is done with it.
1093 * If we drop it here, the callers have no way to resolve routes
1094 * when we're not caching. Instead, just point *rp at rt, so
1095 * the caller gets a single use out of the route
1096 */
1097 goto report_and_exit;
1090 } 1098 }
1091 1099
1092 rthp = &rt_hash_table[hash].chain; 1100 rthp = &rt_hash_table[hash].chain;
@@ -1217,6 +1225,8 @@ restart:
1217 rcu_assign_pointer(rt_hash_table[hash].chain, rt); 1225 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1218 1226
1219 spin_unlock_bh(rt_hash_lock_addr(hash)); 1227 spin_unlock_bh(rt_hash_lock_addr(hash));
1228
1229report_and_exit:
1220 if (rp) 1230 if (rp)
1221 *rp = rt; 1231 *rp = rt;
1222 else 1232 else
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8f4158d7c9a6..80e3812837ad 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -840,7 +840,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
840 switch (cmd) { 840 switch (cmd) {
841 case SIOCOUTQ: 841 case SIOCOUTQ:
842 { 842 {
843 int amount = atomic_read(&sk->sk_wmem_alloc); 843 int amount = sk_wmem_alloc_get(sk);
844
844 return put_user(amount, (int __user *)arg); 845 return put_user(amount, (int __user *)arg);
845 } 846 }
846 847
@@ -1721,8 +1722,8 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1721 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 1722 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
1722 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", 1723 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
1723 bucket, src, srcp, dest, destp, sp->sk_state, 1724 bucket, src, srcp, dest, destp, sp->sk_state,
1724 atomic_read(&sp->sk_wmem_alloc), 1725 sk_wmem_alloc_get(sp),
1725 atomic_read(&sp->sk_rmem_alloc), 1726 sk_rmem_alloc_get(sp),
1726 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 1727 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1727 atomic_read(&sp->sk_refcnt), sp, 1728 atomic_read(&sp->sk_refcnt), sp,
1728 atomic_read(&sp->sk_drops), len); 1729 atomic_read(&sp->sk_drops), len);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 36a090d87a3d..8b0b6f948063 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1130,7 +1130,8 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1130 switch(cmd) { 1130 switch(cmd) {
1131 case SIOCOUTQ: 1131 case SIOCOUTQ:
1132 { 1132 {
1133 int amount = atomic_read(&sk->sk_wmem_alloc); 1133 int amount = sk_wmem_alloc_get(sk);
1134
1134 return put_user(amount, (int __user *)arg); 1135 return put_user(amount, (int __user *)arg);
1135 } 1136 }
1136 case SIOCINQ: 1137 case SIOCINQ:
@@ -1236,8 +1237,8 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1236 dest->s6_addr32[0], dest->s6_addr32[1], 1237 dest->s6_addr32[0], dest->s6_addr32[1],
1237 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1238 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1238 sp->sk_state, 1239 sp->sk_state,
1239 atomic_read(&sp->sk_wmem_alloc), 1240 sk_wmem_alloc_get(sp),
1240 atomic_read(&sp->sk_rmem_alloc), 1241 sk_rmem_alloc_get(sp),
1241 0, 0L, 0, 1242 0, 0L, 0,
1242 sock_i_uid(sp), 0, 1243 sock_i_uid(sp), 0,
1243 sock_i_ino(sp), 1244 sock_i_ino(sp),
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fc333d854728..023beda6b224 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1061,8 +1061,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1061 dest->s6_addr32[0], dest->s6_addr32[1], 1061 dest->s6_addr32[0], dest->s6_addr32[1],
1062 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1062 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1063 sp->sk_state, 1063 sp->sk_state,
1064 atomic_read(&sp->sk_wmem_alloc), 1064 sk_wmem_alloc_get(sp),
1065 atomic_read(&sp->sk_rmem_alloc), 1065 sk_rmem_alloc_get(sp),
1066 0, 0L, 0, 1066 0, 0L, 0,
1067 sock_i_uid(sp), 0, 1067 sock_i_uid(sp), 0,
1068 sock_i_ino(sp), 1068 sock_i_ino(sp),
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 1627050e29fd..417b0e309495 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1835,7 +1835,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1835 1835
1836 switch (cmd) { 1836 switch (cmd) {
1837 case TIOCOUTQ: 1837 case TIOCOUTQ:
1838 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1838 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1839 if (amount < 0) 1839 if (amount < 0)
1840 amount = 0; 1840 amount = 0;
1841 rc = put_user(amount, (int __user *)argp); 1841 rc = put_user(amount, (int __user *)argp);
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 5ed97ad0e2e3..576178482f89 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -280,8 +280,8 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
280 } 280 }
281 281
282 seq_printf(seq, "%08X %08X %02X %03d\n", 282 seq_printf(seq, "%08X %08X %02X %03d\n",
283 atomic_read(&s->sk_wmem_alloc), 283 sk_wmem_alloc_get(s),
284 atomic_read(&s->sk_rmem_alloc), 284 sk_rmem_alloc_get(s),
285 s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); 285 s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
286out: 286out:
287 return 0; 287 return 0;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 3eb5bcc75f99..5922febe25c4 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1762,7 +1762,8 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1762 switch (cmd) { 1762 switch (cmd) {
1763 case TIOCOUTQ: { 1763 case TIOCOUTQ: {
1764 long amount; 1764 long amount;
1765 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1765
1766 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1766 if (amount < 0) 1767 if (amount < 0)
1767 amount = 0; 1768 amount = 0;
1768 if (put_user(amount, (unsigned int __user *)arg)) 1769 if (put_user(amount, (unsigned int __user *)arg))
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 656cbd195825..6be5f92d1094 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -54,6 +54,38 @@ static const u8 iprm_shutdown[8] =
54#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ 54#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55#define CB_TRGCLS_LEN (TRGCLS_SIZE) 55#define CB_TRGCLS_LEN (TRGCLS_SIZE)
56 56
57#define __iucv_sock_wait(sk, condition, timeo, ret) \
58do { \
59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \
61 ret = 0; \
62 while (!(condition)) { \
63 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
64 if (!__timeo) { \
65 ret = -EAGAIN; \
66 break; \
67 } \
68 if (signal_pending(current)) { \
69 ret = sock_intr_errno(__timeo); \
70 break; \
71 } \
72 release_sock(sk); \
73 __timeo = schedule_timeout(__timeo); \
74 lock_sock(sk); \
75 ret = sock_error(sk); \
76 if (ret) \
77 break; \
78 } \
79 finish_wait(sk->sk_sleep, &__wait); \
80} while (0)
81
82#define iucv_sock_wait(sk, condition, timeo) \
83({ \
84 int __ret = 0; \
85 if (!(condition)) \
86 __iucv_sock_wait(sk, condition, timeo, __ret); \
87 __ret; \
88})
57 89
58static void iucv_sock_kill(struct sock *sk); 90static void iucv_sock_kill(struct sock *sk);
59static void iucv_sock_close(struct sock *sk); 91static void iucv_sock_close(struct sock *sk);
@@ -238,6 +270,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
238 return msg->length; 270 return msg->length;
239} 271}
240 272
273/**
274 * iucv_sock_in_state() - check for specific states
275 * @sk: sock structure
276 * @state: first iucv sk state
277 * @state: second iucv sk state
278 *
279 * Returns true if the socket in either in the first or second state.
280 */
281static int iucv_sock_in_state(struct sock *sk, int state, int state2)
282{
283 return (sk->sk_state == state || sk->sk_state == state2);
284}
285
286/**
287 * iucv_below_msglim() - function to check if messages can be sent
288 * @sk: sock structure
289 *
290 * Returns true if the send queue length is lower than the message limit.
291 * Always returns true if the socket is not connected (no iucv path for
292 * checking the message limit).
293 */
294static inline int iucv_below_msglim(struct sock *sk)
295{
296 struct iucv_sock *iucv = iucv_sk(sk);
297
298 if (sk->sk_state != IUCV_CONNECTED)
299 return 1;
300 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
301}
302
303/**
304 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
305 */
306static void iucv_sock_wake_msglim(struct sock *sk)
307{
308 read_lock(&sk->sk_callback_lock);
309 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
310 wake_up_interruptible_all(sk->sk_sleep);
311 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
312 read_unlock(&sk->sk_callback_lock);
313}
314
241/* Timers */ 315/* Timers */
242static void iucv_sock_timeout(unsigned long arg) 316static void iucv_sock_timeout(unsigned long arg)
243{ 317{
@@ -329,7 +403,9 @@ static void iucv_sock_close(struct sock *sk)
329 timeo = sk->sk_lingertime; 403 timeo = sk->sk_lingertime;
330 else 404 else
331 timeo = IUCV_DISCONN_TIMEOUT; 405 timeo = IUCV_DISCONN_TIMEOUT;
332 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); 406 err = iucv_sock_wait(sk,
407 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
408 timeo);
333 } 409 }
334 410
335 case IUCV_CLOSING: /* fall through */ 411 case IUCV_CLOSING: /* fall through */
@@ -510,39 +586,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
510 return NULL; 586 return NULL;
511} 587}
512 588
513int iucv_sock_wait_state(struct sock *sk, int state, int state2,
514 unsigned long timeo)
515{
516 DECLARE_WAITQUEUE(wait, current);
517 int err = 0;
518
519 add_wait_queue(sk->sk_sleep, &wait);
520 while (sk->sk_state != state && sk->sk_state != state2) {
521 set_current_state(TASK_INTERRUPTIBLE);
522
523 if (!timeo) {
524 err = -EAGAIN;
525 break;
526 }
527
528 if (signal_pending(current)) {
529 err = sock_intr_errno(timeo);
530 break;
531 }
532
533 release_sock(sk);
534 timeo = schedule_timeout(timeo);
535 lock_sock(sk);
536
537 err = sock_error(sk);
538 if (err)
539 break;
540 }
541 set_current_state(TASK_RUNNING);
542 remove_wait_queue(sk->sk_sleep, &wait);
543 return err;
544}
545
546/* Bind an unbound socket */ 589/* Bind an unbound socket */
547static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 590static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
548 int addr_len) 591 int addr_len)
@@ -687,8 +730,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
687 } 730 }
688 731
689 if (sk->sk_state != IUCV_CONNECTED) { 732 if (sk->sk_state != IUCV_CONNECTED) {
690 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, 733 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
691 sock_sndtimeo(sk, flags & O_NONBLOCK)); 734 IUCV_DISCONN),
735 sock_sndtimeo(sk, flags & O_NONBLOCK));
692 } 736 }
693 737
694 if (sk->sk_state == IUCV_DISCONN) { 738 if (sk->sk_state == IUCV_DISCONN) {
@@ -842,9 +886,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
842 struct iucv_message txmsg; 886 struct iucv_message txmsg;
843 struct cmsghdr *cmsg; 887 struct cmsghdr *cmsg;
844 int cmsg_done; 888 int cmsg_done;
889 long timeo;
845 char user_id[9]; 890 char user_id[9];
846 char appl_id[9]; 891 char appl_id[9];
847 int err; 892 int err;
893 int noblock = msg->msg_flags & MSG_DONTWAIT;
848 894
849 err = sock_error(sk); 895 err = sock_error(sk);
850 if (err) 896 if (err)
@@ -864,108 +910,119 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
864 goto out; 910 goto out;
865 } 911 }
866 912
867 if (sk->sk_state == IUCV_CONNECTED) { 913 /* Return if the socket is not in connected state */
868 /* initialize defaults */ 914 if (sk->sk_state != IUCV_CONNECTED) {
869 cmsg_done = 0; /* check for duplicate headers */ 915 err = -ENOTCONN;
870 txmsg.class = 0; 916 goto out;
917 }
871 918
872 /* iterate over control messages */ 919 /* initialize defaults */
873 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; 920 cmsg_done = 0; /* check for duplicate headers */
874 cmsg = CMSG_NXTHDR(msg, cmsg)) { 921 txmsg.class = 0;
875 922
876 if (!CMSG_OK(msg, cmsg)) { 923 /* iterate over control messages */
877 err = -EINVAL; 924 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
878 goto out; 925 cmsg = CMSG_NXTHDR(msg, cmsg)) {
879 } 926
927 if (!CMSG_OK(msg, cmsg)) {
928 err = -EINVAL;
929 goto out;
930 }
931
932 if (cmsg->cmsg_level != SOL_IUCV)
933 continue;
880 934
881 if (cmsg->cmsg_level != SOL_IUCV) 935 if (cmsg->cmsg_type & cmsg_done) {
882 continue; 936 err = -EINVAL;
937 goto out;
938 }
939 cmsg_done |= cmsg->cmsg_type;
883 940
884 if (cmsg->cmsg_type & cmsg_done) { 941 switch (cmsg->cmsg_type) {
942 case SCM_IUCV_TRGCLS:
943 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
885 err = -EINVAL; 944 err = -EINVAL;
886 goto out; 945 goto out;
887 } 946 }
888 cmsg_done |= cmsg->cmsg_type;
889
890 switch (cmsg->cmsg_type) {
891 case SCM_IUCV_TRGCLS:
892 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
893 err = -EINVAL;
894 goto out;
895 }
896 947
897 /* set iucv message target class */ 948 /* set iucv message target class */
898 memcpy(&txmsg.class, 949 memcpy(&txmsg.class,
899 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); 950 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
900 951
901 break; 952 break;
902 953
903 default: 954 default:
904 err = -EINVAL; 955 err = -EINVAL;
905 goto out; 956 goto out;
906 break; 957 break;
907 }
908 } 958 }
959 }
909 960
910 /* allocate one skb for each iucv message: 961 /* allocate one skb for each iucv message:
911 * this is fine for SOCK_SEQPACKET (unless we want to support 962 * this is fine for SOCK_SEQPACKET (unless we want to support
912 * segmented records using the MSG_EOR flag), but 963 * segmented records using the MSG_EOR flag), but
913 * for SOCK_STREAM we might want to improve it in future */ 964 * for SOCK_STREAM we might want to improve it in future */
914 if (!(skb = sock_alloc_send_skb(sk, len, 965 skb = sock_alloc_send_skb(sk, len, noblock, &err);
915 msg->msg_flags & MSG_DONTWAIT, 966 if (!skb)
916 &err))) 967 goto out;
917 goto out; 968 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
969 err = -EFAULT;
970 goto fail;
971 }
918 972
919 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 973 /* wait if outstanding messages for iucv path has reached */
920 err = -EFAULT; 974 timeo = sock_sndtimeo(sk, noblock);
921 goto fail; 975 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
922 } 976 if (err)
977 goto fail;
923 978
924 /* increment and save iucv message tag for msg_completion cbk */ 979 /* return -ECONNRESET if the socket is no longer connected */
925 txmsg.tag = iucv->send_tag++; 980 if (sk->sk_state != IUCV_CONNECTED) {
926 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 981 err = -ECONNRESET;
927 skb_queue_tail(&iucv->send_skb_q, skb); 982 goto fail;
983 }
928 984
929 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 985 /* increment and save iucv message tag for msg_completion cbk */
930 && skb->len <= 7) { 986 txmsg.tag = iucv->send_tag++;
931 err = iucv_send_iprm(iucv->path, &txmsg, skb); 987 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
988 skb_queue_tail(&iucv->send_skb_q, skb);
932 989
933 /* on success: there is no message_complete callback 990 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
934 * for an IPRMDATA msg; remove skb from send queue */ 991 && skb->len <= 7) {
935 if (err == 0) { 992 err = iucv_send_iprm(iucv->path, &txmsg, skb);
936 skb_unlink(skb, &iucv->send_skb_q);
937 kfree_skb(skb);
938 }
939 993
940 /* this error should never happen since the 994 /* on success: there is no message_complete callback
941 * IUCV_IPRMDATA path flag is set... sever path */ 995 * for an IPRMDATA msg; remove skb from send queue */
942 if (err == 0x15) { 996 if (err == 0) {
943 iucv_path_sever(iucv->path, NULL); 997 skb_unlink(skb, &iucv->send_skb_q);
944 skb_unlink(skb, &iucv->send_skb_q); 998 kfree_skb(skb);
945 err = -EPIPE; 999 }
946 goto fail; 1000
947 } 1001 /* this error should never happen since the
948 } else 1002 * IUCV_IPRMDATA path flag is set... sever path */
949 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 1003 if (err == 0x15) {
950 (void *) skb->data, skb->len); 1004 iucv_path_sever(iucv->path, NULL);
951 if (err) {
952 if (err == 3) {
953 user_id[8] = 0;
954 memcpy(user_id, iucv->dst_user_id, 8);
955 appl_id[8] = 0;
956 memcpy(appl_id, iucv->dst_name, 8);
957 pr_err("Application %s on z/VM guest %s"
958 " exceeds message limit\n",
959 user_id, appl_id);
960 }
961 skb_unlink(skb, &iucv->send_skb_q); 1005 skb_unlink(skb, &iucv->send_skb_q);
962 err = -EPIPE; 1006 err = -EPIPE;
963 goto fail; 1007 goto fail;
964 } 1008 }
965 1009 } else
966 } else { 1010 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
967 err = -ENOTCONN; 1011 (void *) skb->data, skb->len);
968 goto out; 1012 if (err) {
1013 if (err == 3) {
1014 user_id[8] = 0;
1015 memcpy(user_id, iucv->dst_user_id, 8);
1016 appl_id[8] = 0;
1017 memcpy(appl_id, iucv->dst_name, 8);
1018 pr_err("Application %s on z/VM guest %s"
1019 " exceeds message limit\n",
1020 appl_id, user_id);
1021 err = -EAGAIN;
1022 } else
1023 err = -EPIPE;
1024 skb_unlink(skb, &iucv->send_skb_q);
1025 goto fail;
969 } 1026 }
970 1027
971 release_sock(sk); 1028 release_sock(sk);
@@ -1581,7 +1638,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
1581 1638
1582 spin_unlock_irqrestore(&list->lock, flags); 1639 spin_unlock_irqrestore(&list->lock, flags);
1583 1640
1584 kfree_skb(this); 1641 if (this) {
1642 kfree_skb(this);
1643 /* wake up any process waiting for sending */
1644 iucv_sock_wake_msglim(sk);
1645 }
1585 } 1646 }
1586 BUG_ON(!this); 1647 BUG_ON(!this);
1587 1648
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 643c1be2d02e..dba9abd27f90 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3662,8 +3662,8 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
3662 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", 3662 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
3663 s, 3663 s,
3664 atomic_read(&s->sk_refcnt), 3664 atomic_read(&s->sk_refcnt),
3665 atomic_read(&s->sk_rmem_alloc), 3665 sk_rmem_alloc_get(s),
3666 atomic_read(&s->sk_wmem_alloc), 3666 sk_wmem_alloc_get(s),
3667 sock_i_uid(s), 3667 sock_i_uid(s),
3668 sock_i_ino(s) 3668 sock_i_ino(s)
3669 ); 3669 );
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index d208b3396d94..f97be471fe2e 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -134,8 +134,8 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
134 seq_printf(seq, "@%02X ", llc->sap->laddr.lsap); 134 seq_printf(seq, "@%02X ", llc->sap->laddr.lsap);
135 llc_ui_format_mac(seq, llc->daddr.mac); 135 llc_ui_format_mac(seq, llc->daddr.mac);
136 seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap, 136 seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap,
137 atomic_read(&sk->sk_wmem_alloc), 137 sk_wmem_alloc_get(sk),
138 atomic_read(&sk->sk_rmem_alloc) - llc->copied_seq, 138 sk_rmem_alloc_get(sk) - llc->copied_seq,
139 sk->sk_state, 139 sk->sk_state,
140 sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1, 140 sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1,
141 llc->link); 141 llc->link);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 11c72311f35b..6c439cd5ccea 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -163,6 +163,29 @@ static const struct file_operations noack_ops = {
163 .open = mac80211_open_file_generic 163 .open = mac80211_open_file_generic
164}; 164};
165 165
166static ssize_t queues_read(struct file *file, char __user *user_buf,
167 size_t count, loff_t *ppos)
168{
169 struct ieee80211_local *local = file->private_data;
170 unsigned long flags;
171 char buf[IEEE80211_MAX_QUEUES * 20];
172 int q, res = 0;
173
174 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
175 for (q = 0; q < local->hw.queues; q++)
176 res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q,
177 local->queue_stop_reasons[q],
178 __netif_subqueue_stopped(local->mdev, q));
179 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
180
181 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
182}
183
184static const struct file_operations queues_ops = {
185 .read = queues_read,
186 .open = mac80211_open_file_generic
187};
188
166/* statistics stuff */ 189/* statistics stuff */
167 190
168#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ 191#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
@@ -298,6 +321,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
298 DEBUGFS_ADD(total_ps_buffered); 321 DEBUGFS_ADD(total_ps_buffered);
299 DEBUGFS_ADD(wep_iv); 322 DEBUGFS_ADD(wep_iv);
300 DEBUGFS_ADD(tsf); 323 DEBUGFS_ADD(tsf);
324 DEBUGFS_ADD(queues);
301 DEBUGFS_ADD_MODE(reset, 0200); 325 DEBUGFS_ADD_MODE(reset, 0200);
302 DEBUGFS_ADD(noack); 326 DEBUGFS_ADD(noack);
303 327
@@ -350,6 +374,7 @@ void debugfs_hw_del(struct ieee80211_local *local)
350 DEBUGFS_DEL(total_ps_buffered); 374 DEBUGFS_DEL(total_ps_buffered);
351 DEBUGFS_DEL(wep_iv); 375 DEBUGFS_DEL(wep_iv);
352 DEBUGFS_DEL(tsf); 376 DEBUGFS_DEL(tsf);
377 DEBUGFS_DEL(queues);
353 DEBUGFS_DEL(reset); 378 DEBUGFS_DEL(reset);
354 DEBUGFS_DEL(noack); 379 DEBUGFS_DEL(noack);
355 380
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 4dbc28964196..68eb5052179a 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -783,6 +783,7 @@ struct ieee80211_local {
783 struct dentry *total_ps_buffered; 783 struct dentry *total_ps_buffered;
784 struct dentry *wep_iv; 784 struct dentry *wep_iv;
785 struct dentry *tsf; 785 struct dentry *tsf;
786 struct dentry *queues;
786 struct dentry *reset; 787 struct dentry *reset;
787 struct dentry *noack; 788 struct dentry *noack;
788 struct dentry *statistics; 789 struct dentry *statistics;
@@ -1100,7 +1101,6 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
1100u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, 1101u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
1101 struct ieee802_11_elems *elems, 1102 struct ieee802_11_elems *elems,
1102 u64 filter, u32 crc); 1103 u64 filter, u32 crc);
1103int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
1104u32 ieee80211_mandatory_rates(struct ieee80211_local *local, 1104u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
1105 enum ieee80211_band band); 1105 enum ieee80211_band band);
1106 1106
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d779c57a8220..aca22b00b6a3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1102,14 +1102,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1102 struct sta_info *sta; 1102 struct sta_info *sta;
1103 u32 changed = 0, config_changed = 0; 1103 u32 changed = 0, config_changed = 0;
1104 1104
1105 rcu_read_lock();
1106
1107 sta = sta_info_get(local, ifmgd->bssid);
1108 if (!sta) {
1109 rcu_read_unlock();
1110 return;
1111 }
1112
1113 if (deauth) { 1105 if (deauth) {
1114 ifmgd->direct_probe_tries = 0; 1106 ifmgd->direct_probe_tries = 0;
1115 ifmgd->auth_tries = 0; 1107 ifmgd->auth_tries = 0;
@@ -1120,7 +1112,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1120 netif_tx_stop_all_queues(sdata->dev); 1112 netif_tx_stop_all_queues(sdata->dev);
1121 netif_carrier_off(sdata->dev); 1113 netif_carrier_off(sdata->dev);
1122 1114
1123 ieee80211_sta_tear_down_BA_sessions(sta); 1115 rcu_read_lock();
1116 sta = sta_info_get(local, ifmgd->bssid);
1117 if (sta)
1118 ieee80211_sta_tear_down_BA_sessions(sta);
1119 rcu_read_unlock();
1124 1120
1125 bss = ieee80211_rx_bss_get(local, ifmgd->bssid, 1121 bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
1126 conf->channel->center_freq, 1122 conf->channel->center_freq,
@@ -1156,8 +1152,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1156 ifmgd->ssid, ifmgd->ssid_len); 1152 ifmgd->ssid, ifmgd->ssid_len);
1157 } 1153 }
1158 1154
1159 rcu_read_unlock();
1160
1161 ieee80211_set_wmm_default(sdata); 1155 ieee80211_set_wmm_default(sdata);
1162 1156
1163 ieee80211_recalc_idle(local); 1157 ieee80211_recalc_idle(local);
@@ -2223,7 +2217,10 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata)
2223 capa_mask, capa_val); 2217 capa_mask, capa_val);
2224 2218
2225 if (bss) { 2219 if (bss) {
2226 ieee80211_set_freq(sdata, bss->cbss.channel->center_freq); 2220 local->oper_channel = bss->cbss.channel;
2221 local->oper_channel_type = NL80211_CHAN_NO_HT;
2222 ieee80211_hw_config(local, 0);
2223
2227 if (!(ifmgd->flags & IEEE80211_STA_SSID_SET)) 2224 if (!(ifmgd->flags & IEEE80211_STA_SSID_SET))
2228 ieee80211_sta_set_ssid(sdata, bss->ssid, 2225 ieee80211_sta_set_ssid(sdata, bss->ssid,
2229 bss->ssid_len); 2226 bss->ssid_len);
@@ -2445,6 +2442,14 @@ void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata)
2445 ieee80211_set_disassoc(sdata, true, true, 2442 ieee80211_set_disassoc(sdata, true, true,
2446 WLAN_REASON_DEAUTH_LEAVING); 2443 WLAN_REASON_DEAUTH_LEAVING);
2447 2444
2445 if (ifmgd->ssid_len == 0) {
2446 /*
2447 * Only allow association to be started if a valid SSID
2448 * is configured.
2449 */
2450 return;
2451 }
2452
2448 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) || 2453 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) ||
2449 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE) 2454 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE)
2450 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); 2455 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
@@ -2476,6 +2481,10 @@ int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size
2476 ifmgd = &sdata->u.mgd; 2481 ifmgd = &sdata->u.mgd;
2477 2482
2478 if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) { 2483 if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) {
2484 if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)
2485 ieee80211_set_disassoc(sdata, true, true,
2486 WLAN_REASON_DEAUTH_LEAVING);
2487
2479 /* 2488 /*
2480 * Do not use reassociation if SSID is changed (different ESS). 2489 * Do not use reassociation if SSID is changed (different ESS).
2481 */ 2490 */
@@ -2500,6 +2509,11 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
2500{ 2509{
2501 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2510 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2502 2511
2512 if (compare_ether_addr(bssid, ifmgd->bssid) != 0 &&
2513 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)
2514 ieee80211_set_disassoc(sdata, true, true,
2515 WLAN_REASON_DEAUTH_LEAVING);
2516
2503 if (is_valid_ether_addr(bssid)) { 2517 if (is_valid_ether_addr(bssid)) {
2504 memcpy(ifmgd->bssid, bssid, ETH_ALEN); 2518 memcpy(ifmgd->bssid, bssid, ETH_ALEN);
2505 ifmgd->flags |= IEEE80211_STA_BSSID_SET; 2519 ifmgd->flags |= IEEE80211_STA_BSSID_SET;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 66ce96a69f31..915e77769312 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -774,31 +774,6 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
774 dev_queue_xmit(skb); 774 dev_queue_xmit(skb);
775} 775}
776 776
777int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz)
778{
779 int ret = -EINVAL;
780 struct ieee80211_channel *chan;
781 struct ieee80211_local *local = sdata->local;
782
783 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
784
785 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
786 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
787 chan->flags & IEEE80211_CHAN_NO_IBSS)
788 return ret;
789 local->oper_channel = chan;
790 local->oper_channel_type = NL80211_CHAN_NO_HT;
791
792 if (local->sw_scanning || local->hw_scanning)
793 ret = 0;
794 else
795 ret = ieee80211_hw_config(
796 local, IEEE80211_CONF_CHANGE_CHANNEL);
797 }
798
799 return ret;
800}
801
802u32 ieee80211_mandatory_rates(struct ieee80211_local *local, 777u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
803 enum ieee80211_band band) 778 enum ieee80211_band band)
804{ 779{
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index d2d81b103341..1da81f456744 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -55,6 +55,8 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
55 struct iw_freq *freq, char *extra) 55 struct iw_freq *freq, char *extra)
56{ 56{
57 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 57 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
58 struct ieee80211_local *local = sdata->local;
59 struct ieee80211_channel *chan;
58 60
59 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 61 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
60 return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra); 62 return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra);
@@ -69,17 +71,38 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
69 IEEE80211_STA_AUTO_CHANNEL_SEL; 71 IEEE80211_STA_AUTO_CHANNEL_SEL;
70 return 0; 72 return 0;
71 } else 73 } else
72 return ieee80211_set_freq(sdata, 74 chan = ieee80211_get_channel(local->hw.wiphy,
73 ieee80211_channel_to_frequency(freq->m)); 75 ieee80211_channel_to_frequency(freq->m));
74 } else { 76 } else {
75 int i, div = 1000000; 77 int i, div = 1000000;
76 for (i = 0; i < freq->e; i++) 78 for (i = 0; i < freq->e; i++)
77 div /= 10; 79 div /= 10;
78 if (div > 0) 80 if (div <= 0)
79 return ieee80211_set_freq(sdata, freq->m / div);
80 else
81 return -EINVAL; 81 return -EINVAL;
82 chan = ieee80211_get_channel(local->hw.wiphy, freq->m / div);
82 } 83 }
84
85 if (!chan)
86 return -EINVAL;
87
88 if (chan->flags & IEEE80211_CHAN_DISABLED)
89 return -EINVAL;
90
91 /*
92 * no change except maybe auto -> fixed, ignore the HT
93 * setting so you can fix a channel you're on already
94 */
95 if (local->oper_channel == chan)
96 return 0;
97
98 if (sdata->vif.type == NL80211_IFTYPE_STATION)
99 ieee80211_sta_req_auth(sdata);
100
101 local->oper_channel = chan;
102 local->oper_channel_type = NL80211_CHAN_NO_HT;
103 ieee80211_hw_config(local, 0);
104
105 return 0;
83} 106}
84 107
85 108
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8b6bbb3032b0..2936fa3b6dc8 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1914,8 +1914,8 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1914 s->sk_protocol, 1914 s->sk_protocol,
1915 nlk->pid, 1915 nlk->pid,
1916 nlk->groups ? (u32)nlk->groups[0] : 0, 1916 nlk->groups ? (u32)nlk->groups[0] : 0,
1917 atomic_read(&s->sk_rmem_alloc), 1917 sk_rmem_alloc_get(s),
1918 atomic_read(&s->sk_wmem_alloc), 1918 sk_wmem_alloc_get(s),
1919 nlk->cb, 1919 nlk->cb,
1920 atomic_read(&s->sk_refcnt), 1920 atomic_read(&s->sk_refcnt),
1921 atomic_read(&s->sk_drops) 1921 atomic_read(&s->sk_drops)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 3be0e016ab7d..ce51ce012cda 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -286,8 +286,7 @@ void nr_destroy_socket(struct sock *sk)
286 kfree_skb(skb); 286 kfree_skb(skb);
287 } 287 }
288 288
289 if (atomic_read(&sk->sk_wmem_alloc) || 289 if (sk_has_allocations(sk)) {
290 atomic_read(&sk->sk_rmem_alloc)) {
291 /* Defer: outstanding buffers */ 290 /* Defer: outstanding buffers */
292 sk->sk_timer.function = nr_destroy_timer; 291 sk->sk_timer.function = nr_destroy_timer;
293 sk->sk_timer.expires = jiffies + 2 * HZ; 292 sk->sk_timer.expires = jiffies + 2 * HZ;
@@ -1206,7 +1205,7 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1206 long amount; 1205 long amount;
1207 1206
1208 lock_sock(sk); 1207 lock_sock(sk);
1209 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1208 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1210 if (amount < 0) 1209 if (amount < 0)
1211 amount = 0; 1210 amount = 0;
1212 release_sock(sk); 1211 release_sock(sk);
@@ -1342,8 +1341,8 @@ static int nr_info_show(struct seq_file *seq, void *v)
1342 nr->n2count, 1341 nr->n2count,
1343 nr->n2, 1342 nr->n2,
1344 nr->window, 1343 nr->window,
1345 atomic_read(&s->sk_wmem_alloc), 1344 sk_wmem_alloc_get(s),
1346 atomic_read(&s->sk_rmem_alloc), 1345 sk_rmem_alloc_get(s),
1347 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 1346 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1348 1347
1349 bh_unlock_sock(s); 1348 bh_unlock_sock(s);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4f76e5552d8e..ebe5718baa31 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1987,7 +1987,8 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
1987 switch (cmd) { 1987 switch (cmd) {
1988 case SIOCOUTQ: 1988 case SIOCOUTQ:
1989 { 1989 {
1990 int amount = atomic_read(&sk->sk_wmem_alloc); 1990 int amount = sk_wmem_alloc_get(sk);
1991
1991 return put_user(amount, (int __user *)arg); 1992 return put_user(amount, (int __user *)arg);
1992 } 1993 }
1993 case SIOCINQ: 1994 case SIOCINQ:
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 4e68ab439d5d..79693fe2001e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -56,7 +56,6 @@ struct rfkill {
56 u32 idx; 56 u32 idx;
57 57
58 bool registered; 58 bool registered;
59 bool suspended;
60 bool persistent; 59 bool persistent;
61 60
62 const struct rfkill_ops *ops; 61 const struct rfkill_ops *ops;
@@ -224,7 +223,7 @@ static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
224 223
225static void rfkill_event(struct rfkill *rfkill) 224static void rfkill_event(struct rfkill *rfkill)
226{ 225{
227 if (!rfkill->registered || rfkill->suspended) 226 if (!rfkill->registered)
228 return; 227 return;
229 228
230 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); 229 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
@@ -270,6 +269,9 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
270 unsigned long flags; 269 unsigned long flags;
271 int err; 270 int err;
272 271
272 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
273 return;
274
273 /* 275 /*
274 * Some platforms (...!) generate input events which affect the 276 * Some platforms (...!) generate input events which affect the
275 * _hard_ kill state -- whenever something tries to change the 277 * _hard_ kill state -- whenever something tries to change the
@@ -292,9 +294,6 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
292 rfkill->state |= RFKILL_BLOCK_SW_SETCALL; 294 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
293 spin_unlock_irqrestore(&rfkill->lock, flags); 295 spin_unlock_irqrestore(&rfkill->lock, flags);
294 296
295 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
296 return;
297
298 err = rfkill->ops->set_block(rfkill->data, blocked); 297 err = rfkill->ops->set_block(rfkill->data, blocked);
299 298
300 spin_lock_irqsave(&rfkill->lock, flags); 299 spin_lock_irqsave(&rfkill->lock, flags);
@@ -508,19 +507,32 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
508 blocked = blocked || hwblock; 507 blocked = blocked || hwblock;
509 spin_unlock_irqrestore(&rfkill->lock, flags); 508 spin_unlock_irqrestore(&rfkill->lock, flags);
510 509
511 if (!rfkill->registered) { 510 if (!rfkill->registered)
512 rfkill->persistent = true; 511 return blocked;
513 } else {
514 if (prev != blocked && !hwblock)
515 schedule_work(&rfkill->uevent_work);
516 512
517 rfkill_led_trigger_event(rfkill); 513 if (prev != blocked && !hwblock)
518 } 514 schedule_work(&rfkill->uevent_work);
515
516 rfkill_led_trigger_event(rfkill);
519 517
520 return blocked; 518 return blocked;
521} 519}
522EXPORT_SYMBOL(rfkill_set_sw_state); 520EXPORT_SYMBOL(rfkill_set_sw_state);
523 521
522void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
523{
524 unsigned long flags;
525
526 BUG_ON(!rfkill);
527 BUG_ON(rfkill->registered);
528
529 spin_lock_irqsave(&rfkill->lock, flags);
530 __rfkill_set_sw_state(rfkill, blocked);
531 rfkill->persistent = true;
532 spin_unlock_irqrestore(&rfkill->lock, flags);
533}
534EXPORT_SYMBOL(rfkill_init_sw_state);
535
524void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) 536void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
525{ 537{
526 unsigned long flags; 538 unsigned long flags;
@@ -598,6 +610,15 @@ static ssize_t rfkill_idx_show(struct device *dev,
598 return sprintf(buf, "%d\n", rfkill->idx); 610 return sprintf(buf, "%d\n", rfkill->idx);
599} 611}
600 612
613static ssize_t rfkill_persistent_show(struct device *dev,
614 struct device_attribute *attr,
615 char *buf)
616{
617 struct rfkill *rfkill = to_rfkill(dev);
618
619 return sprintf(buf, "%d\n", rfkill->persistent);
620}
621
601static u8 user_state_from_blocked(unsigned long state) 622static u8 user_state_from_blocked(unsigned long state)
602{ 623{
603 if (state & RFKILL_BLOCK_HW) 624 if (state & RFKILL_BLOCK_HW)
@@ -656,6 +677,7 @@ static struct device_attribute rfkill_dev_attrs[] = {
656 __ATTR(name, S_IRUGO, rfkill_name_show, NULL), 677 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
657 __ATTR(type, S_IRUGO, rfkill_type_show, NULL), 678 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
658 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), 679 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
680 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
659 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), 681 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
660 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), 682 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
661 __ATTR_NULL 683 __ATTR_NULL
@@ -718,8 +740,6 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
718 740
719 rfkill_pause_polling(rfkill); 741 rfkill_pause_polling(rfkill);
720 742
721 rfkill->suspended = true;
722
723 return 0; 743 return 0;
724} 744}
725 745
@@ -728,10 +748,10 @@ static int rfkill_resume(struct device *dev)
728 struct rfkill *rfkill = to_rfkill(dev); 748 struct rfkill *rfkill = to_rfkill(dev);
729 bool cur; 749 bool cur;
730 750
731 cur = !!(rfkill->state & RFKILL_BLOCK_SW); 751 if (!rfkill->persistent) {
732 rfkill_set_block(rfkill, cur); 752 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
733 753 rfkill_set_block(rfkill, cur);
734 rfkill->suspended = false; 754 }
735 755
736 rfkill_resume_polling(rfkill); 756 rfkill_resume_polling(rfkill);
737 757
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 877a7f65f707..6bd8e93869ed 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -356,8 +356,7 @@ void rose_destroy_socket(struct sock *sk)
356 kfree_skb(skb); 356 kfree_skb(skb);
357 } 357 }
358 358
359 if (atomic_read(&sk->sk_wmem_alloc) || 359 if (sk_has_allocations(sk)) {
360 atomic_read(&sk->sk_rmem_alloc)) {
361 /* Defer: outstanding buffers */ 360 /* Defer: outstanding buffers */
362 setup_timer(&sk->sk_timer, rose_destroy_timer, 361 setup_timer(&sk->sk_timer, rose_destroy_timer,
363 (unsigned long)sk); 362 (unsigned long)sk);
@@ -1310,7 +1309,8 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1310 switch (cmd) { 1309 switch (cmd) {
1311 case TIOCOUTQ: { 1310 case TIOCOUTQ: {
1312 long amount; 1311 long amount;
1313 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1312
1313 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1314 if (amount < 0) 1314 if (amount < 0)
1315 amount = 0; 1315 amount = 0;
1316 return put_user(amount, (unsigned int __user *) argp); 1316 return put_user(amount, (unsigned int __user *) argp);
@@ -1481,8 +1481,8 @@ static int rose_info_show(struct seq_file *seq, void *v)
1481 rose->hb / HZ, 1481 rose->hb / HZ,
1482 ax25_display_timer(&rose->idletimer) / (60 * HZ), 1482 ax25_display_timer(&rose->idletimer) / (60 * HZ),
1483 rose->idle / (60 * HZ), 1483 rose->idle / (60 * HZ),
1484 atomic_read(&s->sk_wmem_alloc), 1484 sk_wmem_alloc_get(s),
1485 atomic_read(&s->sk_rmem_alloc), 1485 sk_rmem_alloc_get(s),
1486 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 1486 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1487 } 1487 }
1488 1488
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index f8f047b61245..723964c3ee4f 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -294,6 +294,8 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
294 if (police->tcfp_ewma_rate && 294 if (police->tcfp_ewma_rate &&
295 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 295 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
296 police->tcf_qstats.overlimits++; 296 police->tcf_qstats.overlimits++;
297 if (police->tcf_action == TC_ACT_SHOT)
298 police->tcf_qstats.drops++;
297 spin_unlock(&police->tcf_lock); 299 spin_unlock(&police->tcf_lock);
298 return police->tcf_action; 300 return police->tcf_action;
299 } 301 }
@@ -327,6 +329,8 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
327 } 329 }
328 330
329 police->tcf_qstats.overlimits++; 331 police->tcf_qstats.overlimits++;
332 if (police->tcf_action == TC_ACT_SHOT)
333 police->tcf_qstats.drops++;
330 spin_unlock(&police->tcf_lock); 334 spin_unlock(&police->tcf_lock);
331 return police->tcf_action; 335 return police->tcf_action;
332} 336}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 266151ae85a3..18d85d259104 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -349,13 +349,13 @@ META_COLLECTOR(int_sk_type)
349META_COLLECTOR(int_sk_rmem_alloc) 349META_COLLECTOR(int_sk_rmem_alloc)
350{ 350{
351 SKIP_NONLOCAL(skb); 351 SKIP_NONLOCAL(skb);
352 dst->value = atomic_read(&skb->sk->sk_rmem_alloc); 352 dst->value = sk_rmem_alloc_get(skb->sk);
353} 353}
354 354
355META_COLLECTOR(int_sk_wmem_alloc) 355META_COLLECTOR(int_sk_wmem_alloc)
356{ 356{
357 SKIP_NONLOCAL(skb); 357 SKIP_NONLOCAL(skb);
358 dst->value = atomic_read(&skb->sk->sk_wmem_alloc); 358 dst->value = sk_wmem_alloc_get(skb->sk);
359} 359}
360 360
361META_COLLECTOR(int_sk_omem_alloc) 361META_COLLECTOR(int_sk_omem_alloc)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0f01e5d8a24f..35ba035970a2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -130,7 +130,7 @@ static inline int sctp_wspace(struct sctp_association *asoc)
130 if (asoc->ep->sndbuf_policy) 130 if (asoc->ep->sndbuf_policy)
131 amt = asoc->sndbuf_used; 131 amt = asoc->sndbuf_used;
132 else 132 else
133 amt = atomic_read(&asoc->base.sk->sk_wmem_alloc); 133 amt = sk_wmem_alloc_get(asoc->base.sk);
134 134
135 if (amt >= asoc->base.sk->sk_sndbuf) { 135 if (amt >= asoc->base.sk->sk_sndbuf) {
136 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) 136 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
@@ -6523,7 +6523,7 @@ static int sctp_writeable(struct sock *sk)
6523{ 6523{
6524 int amt = 0; 6524 int amt = 0;
6525 6525
6526 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 6526 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
6527 if (amt < 0) 6527 if (amt < 0)
6528 amt = 0; 6528 amt = 0;
6529 return amt; 6529 return amt;
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 5369aa369b35..db73fd2a3f0e 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -13,5 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
13 rpcb_clnt.o timer.o xdr.o \ 13 rpcb_clnt.o timer.o xdr.o \
14 sunrpc_syms.o cache.o rpc_pipe.o \ 14 sunrpc_syms.o cache.o rpc_pipe.o \
15 svc_xprt.o 15 svc_xprt.o
16sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
16sunrpc-$(CONFIG_PROC_FS) += stats.o 17sunrpc-$(CONFIG_PROC_FS) += stats.o
17sunrpc-$(CONFIG_SYSCTL) += sysctl.o 18sunrpc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
new file mode 100644
index 000000000000..553621fb2c41
--- /dev/null
+++ b/net/sunrpc/backchannel_rqst.c
@@ -0,0 +1,281 @@
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
25#include <linux/sunrpc/xprt.h>
26
27#ifdef RPC_DEBUG
28#define RPCDBG_FACILITY RPCDBG_TRANS
29#endif
30
31#if defined(CONFIG_NFS_V4_1)
32
33/*
34 * Helper routines that track the number of preallocation elements
35 * on the transport.
36 */
37static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
38{
39 return xprt->bc_alloc_count > 0;
40}
41
42static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
43{
44 xprt->bc_alloc_count += n;
45}
46
47static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
48{
49 return xprt->bc_alloc_count -= n;
50}
51
52/*
53 * Free the preallocated rpc_rqst structure and the memory
54 * buffers hanging off of it.
55 */
56static void xprt_free_allocation(struct rpc_rqst *req)
57{
58 struct xdr_buf *xbufp;
59
60 dprintk("RPC: free allocations for req= %p\n", req);
61 BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
62 xbufp = &req->rq_private_buf;
63 free_page((unsigned long)xbufp->head[0].iov_base);
64 xbufp = &req->rq_snd_buf;
65 free_page((unsigned long)xbufp->head[0].iov_base);
66 list_del(&req->rq_bc_pa_list);
67 kfree(req);
68}
69
70/*
71 * Preallocate up to min_reqs structures and related buffers for use
72 * by the backchannel. This function can be called multiple times
73 * when creating new sessions that use the same rpc_xprt. The
74 * preallocated buffers are added to the pool of resources used by
75 * the rpc_xprt. Anyone of these resources may be used used by an
76 * incoming callback request. It's up to the higher levels in the
77 * stack to enforce that the maximum number of session slots is not
78 * being exceeded.
79 *
80 * Some callback arguments can be large. For example, a pNFS server
81 * using multiple deviceids. The list can be unbound, but the client
82 * has the ability to tell the server the maximum size of the callback
83 * requests. Each deviceID is 16 bytes, so allocate one page
84 * for the arguments to have enough room to receive a number of these
85 * deviceIDs. The NFS client indicates to the pNFS server that its
86 * callback requests can be up to 4096 bytes in size.
87 */
88int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
89{
90 struct page *page_rcv = NULL, *page_snd = NULL;
91 struct xdr_buf *xbufp = NULL;
92 struct rpc_rqst *req, *tmp;
93 struct list_head tmp_list;
94 int i;
95
96 dprintk("RPC: setup backchannel transport\n");
97
98 /*
99 * We use a temporary list to keep track of the preallocated
100 * buffers. Once we're done building the list we splice it
101 * into the backchannel preallocation list off of the rpc_xprt
102 * struct. This helps minimize the amount of time the list
103 * lock is held on the rpc_xprt struct. It also makes cleanup
104 * easier in case of memory allocation errors.
105 */
106 INIT_LIST_HEAD(&tmp_list);
107 for (i = 0; i < min_reqs; i++) {
108 /* Pre-allocate one backchannel rpc_rqst */
109 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
110 if (req == NULL) {
111 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
112 goto out_free;
113 }
114
115 /* Add the allocated buffer to the tmp list */
116 dprintk("RPC: adding req= %p\n", req);
117 list_add(&req->rq_bc_pa_list, &tmp_list);
118
119 req->rq_xprt = xprt;
120 INIT_LIST_HEAD(&req->rq_list);
121 INIT_LIST_HEAD(&req->rq_bc_list);
122
123 /* Preallocate one XDR receive buffer */
124 page_rcv = alloc_page(GFP_KERNEL);
125 if (page_rcv == NULL) {
126 printk(KERN_ERR "Failed to create bc receive xbuf\n");
127 goto out_free;
128 }
129 xbufp = &req->rq_rcv_buf;
130 xbufp->head[0].iov_base = page_address(page_rcv);
131 xbufp->head[0].iov_len = PAGE_SIZE;
132 xbufp->tail[0].iov_base = NULL;
133 xbufp->tail[0].iov_len = 0;
134 xbufp->page_len = 0;
135 xbufp->len = PAGE_SIZE;
136 xbufp->buflen = PAGE_SIZE;
137
138 /* Preallocate one XDR send buffer */
139 page_snd = alloc_page(GFP_KERNEL);
140 if (page_snd == NULL) {
141 printk(KERN_ERR "Failed to create bc snd xbuf\n");
142 goto out_free;
143 }
144
145 xbufp = &req->rq_snd_buf;
146 xbufp->head[0].iov_base = page_address(page_snd);
147 xbufp->head[0].iov_len = 0;
148 xbufp->tail[0].iov_base = NULL;
149 xbufp->tail[0].iov_len = 0;
150 xbufp->page_len = 0;
151 xbufp->len = 0;
152 xbufp->buflen = PAGE_SIZE;
153 }
154
155 /*
156 * Add the temporary list to the backchannel preallocation list
157 */
158 spin_lock_bh(&xprt->bc_pa_lock);
159 list_splice(&tmp_list, &xprt->bc_pa_list);
160 xprt_inc_alloc_count(xprt, min_reqs);
161 spin_unlock_bh(&xprt->bc_pa_lock);
162
163 dprintk("RPC: setup backchannel transport done\n");
164 return 0;
165
166out_free:
167 /*
168 * Memory allocation failed, free the temporary list
169 */
170 list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
171 xprt_free_allocation(req);
172
173 dprintk("RPC: setup backchannel transport failed\n");
174 return -1;
175}
176EXPORT_SYMBOL(xprt_setup_backchannel);
177
178/*
179 * Destroys the backchannel preallocated structures.
180 * Since these structures may have been allocated by multiple calls
181 * to xprt_setup_backchannel, we only destroy up to the maximum number
182 * of reqs specified by the caller.
183 * @xprt: the transport holding the preallocated strucures
184 * @max_reqs the maximum number of preallocated structures to destroy
185 */
186void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
187{
188 struct rpc_rqst *req = NULL, *tmp = NULL;
189
190 dprintk("RPC: destroy backchannel transport\n");
191
192 BUG_ON(max_reqs == 0);
193 spin_lock_bh(&xprt->bc_pa_lock);
194 xprt_dec_alloc_count(xprt, max_reqs);
195 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
196 dprintk("RPC: req=%p\n", req);
197 xprt_free_allocation(req);
198 if (--max_reqs == 0)
199 break;
200 }
201 spin_unlock_bh(&xprt->bc_pa_lock);
202
203 dprintk("RPC: backchannel list empty= %s\n",
204 list_empty(&xprt->bc_pa_list) ? "true" : "false");
205}
206EXPORT_SYMBOL(xprt_destroy_backchannel);
207
208/*
209 * One or more rpc_rqst structure have been preallocated during the
210 * backchannel setup. Buffer space for the send and private XDR buffers
211 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
212 * to this request. Use xprt_free_bc_request to return it.
213 *
214 * We know that we're called in soft interrupt context, grab the spin_lock
215 * since there is no need to grab the bottom half spin_lock.
216 *
217 * Return an available rpc_rqst, otherwise NULL if non are available.
218 */
219struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
220{
221 struct rpc_rqst *req;
222
223 dprintk("RPC: allocate a backchannel request\n");
224 spin_lock(&xprt->bc_pa_lock);
225 if (!list_empty(&xprt->bc_pa_list)) {
226 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
227 rq_bc_pa_list);
228 list_del(&req->rq_bc_pa_list);
229 } else {
230 req = NULL;
231 }
232 spin_unlock(&xprt->bc_pa_lock);
233
234 if (req != NULL) {
235 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
236 req->rq_reply_bytes_recvd = 0;
237 req->rq_bytes_sent = 0;
238 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
239 sizeof(req->rq_private_buf));
240 }
241 dprintk("RPC: backchannel req=%p\n", req);
242 return req;
243}
244
245/*
246 * Return the preallocated rpc_rqst structure and XDR buffers
247 * associated with this rpc_task.
248 */
249void xprt_free_bc_request(struct rpc_rqst *req)
250{
251 struct rpc_xprt *xprt = req->rq_xprt;
252
253 dprintk("RPC: free backchannel req=%p\n", req);
254
255 smp_mb__before_clear_bit();
256 BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
257 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
258 smp_mb__after_clear_bit();
259
260 if (!xprt_need_to_requeue(xprt)) {
261 /*
262 * The last remaining session was destroyed while this
263 * entry was in use. Free the entry and don't attempt
264 * to add back to the list because there is no need to
265 * have anymore preallocated entries.
266 */
267 dprintk("RPC: Last session removed req=%p\n", req);
268 xprt_free_allocation(req);
269 return;
270 }
271
272 /*
273 * Return it to the list of preallocations so that it
274 * may be reused by a new callback request.
275 */
276 spin_lock_bh(&xprt->bc_pa_lock);
277 list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
278 spin_unlock_bh(&xprt->bc_pa_lock);
279}
280
281#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
new file mode 100644
index 000000000000..13f214f53120
--- /dev/null
+++ b/net/sunrpc/bc_svc.c
@@ -0,0 +1,81 @@
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24/*
25 * The NFSv4.1 callback service helper routines.
26 * They implement the transport level processing required to send the
27 * reply over an existing open connection previously established by the client.
28 */
29
30#if defined(CONFIG_NFS_V4_1)
31
32#include <linux/module.h>
33
34#include <linux/sunrpc/xprt.h>
35#include <linux/sunrpc/sched.h>
36#include <linux/sunrpc/bc_xprt.h>
37
38#define RPCDBG_FACILITY RPCDBG_SVCDSP
39
40void bc_release_request(struct rpc_task *task)
41{
42 struct rpc_rqst *req = task->tk_rqstp;
43
44 dprintk("RPC: bc_release_request: task= %p\n", task);
45
46 /*
47 * Release this request only if it's a backchannel
48 * preallocated request
49 */
50 if (!bc_prealloc(req))
51 return;
52 xprt_free_bc_request(req);
53}
54
55/* Empty callback ops */
56static const struct rpc_call_ops nfs41_callback_ops = {
57};
58
59
60/*
61 * Send the callback reply
62 */
63int bc_send(struct rpc_rqst *req)
64{
65 struct rpc_task *task;
66 int ret;
67
68 dprintk("RPC: bc_send req= %p\n", req);
69 task = rpc_run_bc_task(req, &nfs41_callback_ops);
70 if (IS_ERR(task))
71 ret = PTR_ERR(task);
72 else {
73 BUG_ON(atomic_read(&task->tk_count) != 1);
74 ret = task->tk_status;
75 rpc_put_task(task);
76 }
77 return ret;
78 dprintk("RPC: bc_send ret= %d \n", ret);
79}
80
81#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 20029a79a5de..ff0c23053d2f 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -488,7 +488,7 @@ static void do_cache_clean(struct work_struct *work)
488{ 488{
489 int delay = 5; 489 int delay = 5;
490 if (cache_clean() == -1) 490 if (cache_clean() == -1)
491 delay = 30*HZ; 491 delay = round_jiffies_relative(30*HZ);
492 492
493 if (list_empty(&cache_list)) 493 if (list_empty(&cache_list))
494 delay = 0; 494 delay = 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5abab094441f..5bc2f45bddf0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -36,7 +36,9 @@
36#include <linux/sunrpc/clnt.h> 36#include <linux/sunrpc/clnt.h>
37#include <linux/sunrpc/rpc_pipe_fs.h> 37#include <linux/sunrpc/rpc_pipe_fs.h>
38#include <linux/sunrpc/metrics.h> 38#include <linux/sunrpc/metrics.h>
39#include <linux/sunrpc/bc_xprt.h>
39 40
41#include "sunrpc.h"
40 42
41#ifdef RPC_DEBUG 43#ifdef RPC_DEBUG
42# define RPCDBG_FACILITY RPCDBG_CALL 44# define RPCDBG_FACILITY RPCDBG_CALL
@@ -63,6 +65,9 @@ static void call_decode(struct rpc_task *task);
63static void call_bind(struct rpc_task *task); 65static void call_bind(struct rpc_task *task);
64static void call_bind_status(struct rpc_task *task); 66static void call_bind_status(struct rpc_task *task);
65static void call_transmit(struct rpc_task *task); 67static void call_transmit(struct rpc_task *task);
68#if defined(CONFIG_NFS_V4_1)
69static void call_bc_transmit(struct rpc_task *task);
70#endif /* CONFIG_NFS_V4_1 */
66static void call_status(struct rpc_task *task); 71static void call_status(struct rpc_task *task);
67static void call_transmit_status(struct rpc_task *task); 72static void call_transmit_status(struct rpc_task *task);
68static void call_refresh(struct rpc_task *task); 73static void call_refresh(struct rpc_task *task);
@@ -613,6 +618,50 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
613} 618}
614EXPORT_SYMBOL_GPL(rpc_call_async); 619EXPORT_SYMBOL_GPL(rpc_call_async);
615 620
621#if defined(CONFIG_NFS_V4_1)
622/**
623 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
624 * rpc_execute against it
625 * @ops: RPC call ops
626 */
627struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
628 const struct rpc_call_ops *tk_ops)
629{
630 struct rpc_task *task;
631 struct xdr_buf *xbufp = &req->rq_snd_buf;
632 struct rpc_task_setup task_setup_data = {
633 .callback_ops = tk_ops,
634 };
635
636 dprintk("RPC: rpc_run_bc_task req= %p\n", req);
637 /*
638 * Create an rpc_task to send the data
639 */
640 task = rpc_new_task(&task_setup_data);
641 if (!task) {
642 xprt_free_bc_request(req);
643 goto out;
644 }
645 task->tk_rqstp = req;
646
647 /*
648 * Set up the xdr_buf length.
649 * This also indicates that the buffer is XDR encoded already.
650 */
651 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
652 xbufp->tail[0].iov_len;
653
654 task->tk_action = call_bc_transmit;
655 atomic_inc(&task->tk_count);
656 BUG_ON(atomic_read(&task->tk_count) != 2);
657 rpc_execute(task);
658
659out:
660 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
661 return task;
662}
663#endif /* CONFIG_NFS_V4_1 */
664
616void 665void
617rpc_call_start(struct rpc_task *task) 666rpc_call_start(struct rpc_task *task)
618{ 667{
@@ -695,6 +744,19 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
695EXPORT_SYMBOL_GPL(rpc_force_rebind); 744EXPORT_SYMBOL_GPL(rpc_force_rebind);
696 745
697/* 746/*
747 * Restart an (async) RPC call from the call_prepare state.
748 * Usually called from within the exit handler.
749 */
750void
751rpc_restart_call_prepare(struct rpc_task *task)
752{
753 if (RPC_ASSASSINATED(task))
754 return;
755 task->tk_action = rpc_prepare_task;
756}
757EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
758
759/*
698 * Restart an (async) RPC call. Usually called from within the 760 * Restart an (async) RPC call. Usually called from within the
699 * exit handler. 761 * exit handler.
700 */ 762 */
@@ -1085,7 +1147,7 @@ call_transmit(struct rpc_task *task)
1085 * in order to allow access to the socket to other RPC requests. 1147 * in order to allow access to the socket to other RPC requests.
1086 */ 1148 */
1087 call_transmit_status(task); 1149 call_transmit_status(task);
1088 if (task->tk_msg.rpc_proc->p_decode != NULL) 1150 if (rpc_reply_expected(task))
1089 return; 1151 return;
1090 task->tk_action = rpc_exit_task; 1152 task->tk_action = rpc_exit_task;
1091 rpc_wake_up_queued_task(&task->tk_xprt->pending, task); 1153 rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
@@ -1120,6 +1182,72 @@ call_transmit_status(struct rpc_task *task)
1120 } 1182 }
1121} 1183}
1122 1184
1185#if defined(CONFIG_NFS_V4_1)
1186/*
1187 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1188 * addition, disconnect on connectivity errors.
1189 */
1190static void
1191call_bc_transmit(struct rpc_task *task)
1192{
1193 struct rpc_rqst *req = task->tk_rqstp;
1194
1195 BUG_ON(task->tk_status != 0);
1196 task->tk_status = xprt_prepare_transmit(task);
1197 if (task->tk_status == -EAGAIN) {
1198 /*
1199 * Could not reserve the transport. Try again after the
1200 * transport is released.
1201 */
1202 task->tk_status = 0;
1203 task->tk_action = call_bc_transmit;
1204 return;
1205 }
1206
1207 task->tk_action = rpc_exit_task;
1208 if (task->tk_status < 0) {
1209 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1210 "error: %d\n", task->tk_status);
1211 return;
1212 }
1213
1214 xprt_transmit(task);
1215 xprt_end_transmit(task);
1216 dprint_status(task);
1217 switch (task->tk_status) {
1218 case 0:
1219 /* Success */
1220 break;
1221 case -EHOSTDOWN:
1222 case -EHOSTUNREACH:
1223 case -ENETUNREACH:
1224 case -ETIMEDOUT:
1225 /*
1226 * Problem reaching the server. Disconnect and let the
1227 * forechannel reestablish the connection. The server will
1228 * have to retransmit the backchannel request and we'll
1229 * reprocess it. Since these ops are idempotent, there's no
1230 * need to cache our reply at this time.
1231 */
1232 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1233 "error: %d\n", task->tk_status);
1234 xprt_conditional_disconnect(task->tk_xprt,
1235 req->rq_connect_cookie);
1236 break;
1237 default:
1238 /*
1239 * We were unable to reply and will have to drop the
1240 * request. The server should reconnect and retransmit.
1241 */
1242 BUG_ON(task->tk_status == -EAGAIN);
1243 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1244 "error: %d\n", task->tk_status);
1245 break;
1246 }
1247 rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1248}
1249#endif /* CONFIG_NFS_V4_1 */
1250
1123/* 1251/*
1124 * 6. Sort out the RPC call status 1252 * 6. Sort out the RPC call status
1125 */ 1253 */
@@ -1130,8 +1258,8 @@ call_status(struct rpc_task *task)
1130 struct rpc_rqst *req = task->tk_rqstp; 1258 struct rpc_rqst *req = task->tk_rqstp;
1131 int status; 1259 int status;
1132 1260
1133 if (req->rq_received > 0 && !req->rq_bytes_sent) 1261 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1134 task->tk_status = req->rq_received; 1262 task->tk_status = req->rq_reply_bytes_recvd;
1135 1263
1136 dprint_status(task); 1264 dprint_status(task);
1137 1265
@@ -1248,7 +1376,7 @@ call_decode(struct rpc_task *task)
1248 1376
1249 /* 1377 /*
1250 * Ensure that we see all writes made by xprt_complete_rqst() 1378 * Ensure that we see all writes made by xprt_complete_rqst()
1251 * before it changed req->rq_received. 1379 * before it changed req->rq_reply_bytes_recvd.
1252 */ 1380 */
1253 smp_rmb(); 1381 smp_rmb();
1254 req->rq_rcv_buf.len = req->rq_private_buf.len; 1382 req->rq_rcv_buf.len = req->rq_private_buf.len;
@@ -1289,7 +1417,7 @@ out_retry:
1289 task->tk_status = 0; 1417 task->tk_status = 0;
1290 /* Note: rpc_verify_header() may have freed the RPC slot */ 1418 /* Note: rpc_verify_header() may have freed the RPC slot */
1291 if (task->tk_rqstp == req) { 1419 if (task->tk_rqstp == req) {
1292 req->rq_received = req->rq_rcv_buf.len = 0; 1420 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1293 if (task->tk_client->cl_discrtry) 1421 if (task->tk_client->cl_discrtry)
1294 xprt_conditional_disconnect(task->tk_xprt, 1422 xprt_conditional_disconnect(task->tk_xprt,
1295 req->rq_connect_cookie); 1423 req->rq_connect_cookie);
@@ -1377,13 +1505,14 @@ rpc_verify_header(struct rpc_task *task)
1377 } 1505 }
1378 if ((len -= 3) < 0) 1506 if ((len -= 3) < 0)
1379 goto out_overflow; 1507 goto out_overflow;
1380 p += 1; /* skip XID */
1381 1508
1509 p += 1; /* skip XID */
1382 if ((n = ntohl(*p++)) != RPC_REPLY) { 1510 if ((n = ntohl(*p++)) != RPC_REPLY) {
1383 dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1511 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
1384 task->tk_pid, __func__, n); 1512 task->tk_pid, __func__, n);
1385 goto out_garbage; 1513 goto out_garbage;
1386 } 1514 }
1515
1387 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1516 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1388 if (--len < 0) 1517 if (--len < 0)
1389 goto out_overflow; 1518 goto out_overflow;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ff50a0546865..1102ce1251f7 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(rpc_delay);
569/* 569/*
570 * Helper to call task->tk_ops->rpc_call_prepare 570 * Helper to call task->tk_ops->rpc_call_prepare
571 */ 571 */
572static void rpc_prepare_task(struct rpc_task *task) 572void rpc_prepare_task(struct rpc_task *task)
573{ 573{
574 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 574 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
575} 575}
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1ef6e46d9da2..1b4e6791ecf3 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -141,12 +141,14 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats);
141void rpc_count_iostats(struct rpc_task *task) 141void rpc_count_iostats(struct rpc_task *task)
142{ 142{
143 struct rpc_rqst *req = task->tk_rqstp; 143 struct rpc_rqst *req = task->tk_rqstp;
144 struct rpc_iostats *stats = task->tk_client->cl_metrics; 144 struct rpc_iostats *stats;
145 struct rpc_iostats *op_metrics; 145 struct rpc_iostats *op_metrics;
146 long rtt, execute, queue; 146 long rtt, execute, queue;
147 147
148 if (!stats || !req) 148 if (!task->tk_client || !task->tk_client->cl_metrics || !req)
149 return; 149 return;
150
151 stats = task->tk_client->cl_metrics;
150 op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; 152 op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
151 153
152 op_metrics->om_ops++; 154 op_metrics->om_ops++;
@@ -154,7 +156,7 @@ void rpc_count_iostats(struct rpc_task *task)
154 op_metrics->om_timeouts += task->tk_timeouts; 156 op_metrics->om_timeouts += task->tk_timeouts;
155 157
156 op_metrics->om_bytes_sent += task->tk_bytes_sent; 158 op_metrics->om_bytes_sent += task->tk_bytes_sent;
157 op_metrics->om_bytes_recv += req->rq_received; 159 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
158 160
159 queue = (long)req->rq_xtime - task->tk_start; 161 queue = (long)req->rq_xtime - task->tk_start;
160 if (queue < 0) 162 if (queue < 0)
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
new file mode 100644
index 000000000000..5d9dd742264b
--- /dev/null
+++ b/net/sunrpc/sunrpc.h
@@ -0,0 +1,37 @@
1/******************************************************************************
2
3(c) 2008 NetApp. All Rights Reserved.
4
5NetApp provides this source code under the GPL v2 License.
6The GPL v2 license is available at
7http://opensource.org/licenses/gpl-license.php.
8
9THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
10"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
11LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
12A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
14EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
15PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
16PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
17LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
18NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20
21******************************************************************************/
22
23/*
24 * Functions and macros used internally by RPC
25 */
26
27#ifndef _NET_SUNRPC_SUNRPC_H
28#define _NET_SUNRPC_SUNRPC_H
29
30static inline int rpc_reply_expected(struct rpc_task *task)
31{
32 return (task->tk_msg.rpc_proc != NULL) &&
33 (task->tk_msg.rpc_proc->p_decode != NULL);
34}
35
36#endif /* _NET_SUNRPC_SUNRPC_H */
37
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5ed8931dfe98..952f206ff307 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -25,6 +25,7 @@
25#include <linux/sunrpc/stats.h> 25#include <linux/sunrpc/stats.h>
26#include <linux/sunrpc/svcsock.h> 26#include <linux/sunrpc/svcsock.h>
27#include <linux/sunrpc/clnt.h> 27#include <linux/sunrpc/clnt.h>
28#include <linux/sunrpc/bc_xprt.h>
28 29
29#define RPCDBG_FACILITY RPCDBG_SVCDSP 30#define RPCDBG_FACILITY RPCDBG_SVCDSP
30 31
@@ -486,6 +487,10 @@ svc_destroy(struct svc_serv *serv)
486 if (svc_serv_is_pooled(serv)) 487 if (svc_serv_is_pooled(serv))
487 svc_pool_map_put(); 488 svc_pool_map_put();
488 489
490#if defined(CONFIG_NFS_V4_1)
491 svc_sock_destroy(serv->bc_xprt);
492#endif /* CONFIG_NFS_V4_1 */
493
489 svc_unregister(serv); 494 svc_unregister(serv);
490 kfree(serv->sv_pools); 495 kfree(serv->sv_pools);
491 kfree(serv); 496 kfree(serv);
@@ -970,20 +975,18 @@ svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
970} 975}
971 976
972/* 977/*
973 * Process the RPC request. 978 * Common routine for processing the RPC request.
974 */ 979 */
975int 980static int
976svc_process(struct svc_rqst *rqstp) 981svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
977{ 982{
978 struct svc_program *progp; 983 struct svc_program *progp;
979 struct svc_version *versp = NULL; /* compiler food */ 984 struct svc_version *versp = NULL; /* compiler food */
980 struct svc_procedure *procp = NULL; 985 struct svc_procedure *procp = NULL;
981 struct kvec * argv = &rqstp->rq_arg.head[0];
982 struct kvec * resv = &rqstp->rq_res.head[0];
983 struct svc_serv *serv = rqstp->rq_server; 986 struct svc_serv *serv = rqstp->rq_server;
984 kxdrproc_t xdr; 987 kxdrproc_t xdr;
985 __be32 *statp; 988 __be32 *statp;
986 u32 dir, prog, vers, proc; 989 u32 prog, vers, proc;
987 __be32 auth_stat, rpc_stat; 990 __be32 auth_stat, rpc_stat;
988 int auth_res; 991 int auth_res;
989 __be32 *reply_statp; 992 __be32 *reply_statp;
@@ -993,19 +996,6 @@ svc_process(struct svc_rqst *rqstp)
993 if (argv->iov_len < 6*4) 996 if (argv->iov_len < 6*4)
994 goto err_short_len; 997 goto err_short_len;
995 998
996 /* setup response xdr_buf.
997 * Initially it has just one page
998 */
999 rqstp->rq_resused = 1;
1000 resv->iov_base = page_address(rqstp->rq_respages[0]);
1001 resv->iov_len = 0;
1002 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1003 rqstp->rq_res.len = 0;
1004 rqstp->rq_res.page_base = 0;
1005 rqstp->rq_res.page_len = 0;
1006 rqstp->rq_res.buflen = PAGE_SIZE;
1007 rqstp->rq_res.tail[0].iov_base = NULL;
1008 rqstp->rq_res.tail[0].iov_len = 0;
1009 /* Will be turned off only in gss privacy case: */ 999 /* Will be turned off only in gss privacy case: */
1010 rqstp->rq_splice_ok = 1; 1000 rqstp->rq_splice_ok = 1;
1011 /* Will be turned off only when NFSv4 Sessions are used */ 1001 /* Will be turned off only when NFSv4 Sessions are used */
@@ -1014,17 +1004,13 @@ svc_process(struct svc_rqst *rqstp)
1014 /* Setup reply header */ 1004 /* Setup reply header */
1015 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 1005 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1016 1006
1017 rqstp->rq_xid = svc_getu32(argv);
1018 svc_putu32(resv, rqstp->rq_xid); 1007 svc_putu32(resv, rqstp->rq_xid);
1019 1008
1020 dir = svc_getnl(argv);
1021 vers = svc_getnl(argv); 1009 vers = svc_getnl(argv);
1022 1010
1023 /* First words of reply: */ 1011 /* First words of reply: */
1024 svc_putnl(resv, 1); /* REPLY */ 1012 svc_putnl(resv, 1); /* REPLY */
1025 1013
1026 if (dir != 0) /* direction != CALL */
1027 goto err_bad_dir;
1028 if (vers != 2) /* RPC version number */ 1014 if (vers != 2) /* RPC version number */
1029 goto err_bad_rpc; 1015 goto err_bad_rpc;
1030 1016
@@ -1147,7 +1133,7 @@ svc_process(struct svc_rqst *rqstp)
1147 sendit: 1133 sendit:
1148 if (svc_authorise(rqstp)) 1134 if (svc_authorise(rqstp))
1149 goto dropit; 1135 goto dropit;
1150 return svc_send(rqstp); 1136 return 1; /* Caller can now send it */
1151 1137
1152 dropit: 1138 dropit:
1153 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1139 svc_authorise(rqstp); /* doesn't hurt to call this twice */
@@ -1161,12 +1147,6 @@ err_short_len:
1161 1147
1162 goto dropit; /* drop request */ 1148 goto dropit; /* drop request */
1163 1149
1164err_bad_dir:
1165 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1166
1167 serv->sv_stats->rpcbadfmt++;
1168 goto dropit; /* drop request */
1169
1170err_bad_rpc: 1150err_bad_rpc:
1171 serv->sv_stats->rpcbadfmt++; 1151 serv->sv_stats->rpcbadfmt++;
1172 svc_putnl(resv, 1); /* REJECT */ 1152 svc_putnl(resv, 1); /* REJECT */
@@ -1220,6 +1200,100 @@ err_bad:
1220EXPORT_SYMBOL_GPL(svc_process); 1200EXPORT_SYMBOL_GPL(svc_process);
1221 1201
1222/* 1202/*
1203 * Process the RPC request.
1204 */
1205int
1206svc_process(struct svc_rqst *rqstp)
1207{
1208 struct kvec *argv = &rqstp->rq_arg.head[0];
1209 struct kvec *resv = &rqstp->rq_res.head[0];
1210 struct svc_serv *serv = rqstp->rq_server;
1211 u32 dir;
1212 int error;
1213
1214 /*
1215 * Setup response xdr_buf.
1216 * Initially it has just one page
1217 */
1218 rqstp->rq_resused = 1;
1219 resv->iov_base = page_address(rqstp->rq_respages[0]);
1220 resv->iov_len = 0;
1221 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1222 rqstp->rq_res.len = 0;
1223 rqstp->rq_res.page_base = 0;
1224 rqstp->rq_res.page_len = 0;
1225 rqstp->rq_res.buflen = PAGE_SIZE;
1226 rqstp->rq_res.tail[0].iov_base = NULL;
1227 rqstp->rq_res.tail[0].iov_len = 0;
1228
1229 rqstp->rq_xid = svc_getu32(argv);
1230
1231 dir = svc_getnl(argv);
1232 if (dir != 0) {
1233 /* direction != CALL */
1234 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1235 serv->sv_stats->rpcbadfmt++;
1236 svc_drop(rqstp);
1237 return 0;
1238 }
1239
1240 error = svc_process_common(rqstp, argv, resv);
1241 if (error <= 0)
1242 return error;
1243
1244 return svc_send(rqstp);
1245}
1246
1247#if defined(CONFIG_NFS_V4_1)
1248/*
1249 * Process a backchannel RPC request that arrived over an existing
1250 * outbound connection
1251 */
1252int
1253bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1254 struct svc_rqst *rqstp)
1255{
1256 struct kvec *argv = &rqstp->rq_arg.head[0];
1257 struct kvec *resv = &rqstp->rq_res.head[0];
1258 int error;
1259
1260 /* Build the svc_rqst used by the common processing routine */
1261 rqstp->rq_xprt = serv->bc_xprt;
1262 rqstp->rq_xid = req->rq_xid;
1263 rqstp->rq_prot = req->rq_xprt->prot;
1264 rqstp->rq_server = serv;
1265
1266 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1267 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1268 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1269 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1270
1271 /* reset result send buffer "put" position */
1272 resv->iov_len = 0;
1273
1274 if (rqstp->rq_prot != IPPROTO_TCP) {
1275 printk(KERN_ERR "No support for Non-TCP transports!\n");
1276 BUG();
1277 }
1278
1279 /*
1280 * Skip the next two words because they've already been
1281 * processed in the trasport
1282 */
1283 svc_getu32(argv); /* XID */
1284 svc_getnl(argv); /* CALLDIR */
1285
1286 error = svc_process_common(rqstp, argv, resv);
1287 if (error <= 0)
1288 return error;
1289
1290 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1291 return bc_send(req);
1292}
1293EXPORT_SYMBOL(bc_svc_process);
1294#endif /* CONFIG_NFS_V4_1 */
1295
1296/*
1223 * Return (transport-specific) limit on the rpc payload. 1297 * Return (transport-specific) limit on the rpc payload.
1224 */ 1298 */
1225u32 svc_max_payload(const struct svc_rqst *rqstp) 1299u32 svc_max_payload(const struct svc_rqst *rqstp)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c200d92e57e4..6f33d33cc064 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -11,6 +11,7 @@
11#include <net/sock.h> 11#include <net/sock.h>
12#include <linux/sunrpc/stats.h> 12#include <linux/sunrpc/stats.h>
13#include <linux/sunrpc/svc_xprt.h> 13#include <linux/sunrpc/svc_xprt.h>
14#include <linux/sunrpc/svcsock.h>
14 15
15#define RPCDBG_FACILITY RPCDBG_SVCXPRT 16#define RPCDBG_FACILITY RPCDBG_SVCXPRT
16 17
@@ -1097,36 +1098,58 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
1097} 1098}
1098EXPORT_SYMBOL_GPL(svc_find_xprt); 1099EXPORT_SYMBOL_GPL(svc_find_xprt);
1099 1100
1100/* 1101static int svc_one_xprt_name(const struct svc_xprt *xprt,
1101 * Format a buffer with a list of the active transports. A zero for 1102 char *pos, int remaining)
1102 * the buflen parameter disables target buffer overflow checking. 1103{
1104 int len;
1105
1106 len = snprintf(pos, remaining, "%s %u\n",
1107 xprt->xpt_class->xcl_name,
1108 svc_xprt_local_port(xprt));
1109 if (len >= remaining)
1110 return -ENAMETOOLONG;
1111 return len;
1112}
1113
1114/**
1115 * svc_xprt_names - format a buffer with a list of transport names
1116 * @serv: pointer to an RPC service
1117 * @buf: pointer to a buffer to be filled in
1118 * @buflen: length of buffer to be filled in
1119 *
1120 * Fills in @buf with a string containing a list of transport names,
1121 * each name terminated with '\n'.
1122 *
1123 * Returns positive length of the filled-in string on success; otherwise
1124 * a negative errno value is returned if an error occurs.
1103 */ 1125 */
1104int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen) 1126int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
1105{ 1127{
1106 struct svc_xprt *xprt; 1128 struct svc_xprt *xprt;
1107 char xprt_str[64]; 1129 int len, totlen;
1108 int totlen = 0; 1130 char *pos;
1109 int len;
1110 1131
1111 /* Sanity check args */ 1132 /* Sanity check args */
1112 if (!serv) 1133 if (!serv)
1113 return 0; 1134 return 0;
1114 1135
1115 spin_lock_bh(&serv->sv_lock); 1136 spin_lock_bh(&serv->sv_lock);
1137
1138 pos = buf;
1139 totlen = 0;
1116 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1140 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1117 len = snprintf(xprt_str, sizeof(xprt_str), 1141 len = svc_one_xprt_name(xprt, pos, buflen - totlen);
1118 "%s %d\n", xprt->xpt_class->xcl_name, 1142 if (len < 0) {
1119 svc_xprt_local_port(xprt)); 1143 *buf = '\0';
1120 /* If the string was truncated, replace with error string */ 1144 totlen = len;
1121 if (len >= sizeof(xprt_str)) 1145 }
1122 strcpy(xprt_str, "name-too-long\n"); 1146 if (len <= 0)
1123 /* Don't overflow buffer */
1124 len = strlen(xprt_str);
1125 if (buflen && (len + totlen >= buflen))
1126 break; 1147 break;
1127 strcpy(buf+totlen, xprt_str); 1148
1149 pos += len;
1128 totlen += len; 1150 totlen += len;
1129 } 1151 }
1152
1130 spin_unlock_bh(&serv->sv_lock); 1153 spin_unlock_bh(&serv->sv_lock);
1131 return totlen; 1154 return totlen;
1132} 1155}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9d504234af4a..23128ee191ae 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -240,42 +240,76 @@ out:
240/* 240/*
241 * Report socket names for nfsdfs 241 * Report socket names for nfsdfs
242 */ 242 */
243static int one_sock_name(char *buf, struct svc_sock *svsk) 243static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
244{ 244{
245 const struct sock *sk = svsk->sk_sk;
246 const char *proto_name = sk->sk_protocol == IPPROTO_UDP ?
247 "udp" : "tcp";
245 int len; 248 int len;
246 249
247 switch(svsk->sk_sk->sk_family) { 250 switch (sk->sk_family) {
248 case AF_INET: 251 case PF_INET:
249 len = sprintf(buf, "ipv4 %s %pI4 %d\n", 252 len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n",
250 svsk->sk_sk->sk_protocol == IPPROTO_UDP ? 253 proto_name,
251 "udp" : "tcp", 254 &inet_sk(sk)->rcv_saddr,
252 &inet_sk(svsk->sk_sk)->rcv_saddr, 255 inet_sk(sk)->num);
253 inet_sk(svsk->sk_sk)->num); 256 break;
257 case PF_INET6:
258 len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
259 proto_name,
260 &inet6_sk(sk)->rcv_saddr,
261 inet_sk(sk)->num);
254 break; 262 break;
255 default: 263 default:
256 len = sprintf(buf, "*unknown-%d*\n", 264 len = snprintf(buf, remaining, "*unknown-%d*\n",
257 svsk->sk_sk->sk_family); 265 sk->sk_family);
266 }
267
268 if (len >= remaining) {
269 *buf = '\0';
270 return -ENAMETOOLONG;
258 } 271 }
259 return len; 272 return len;
260} 273}
261 274
262int 275/**
263svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) 276 * svc_sock_names - construct a list of listener names in a string
277 * @serv: pointer to RPC service
278 * @buf: pointer to a buffer to fill in with socket names
279 * @buflen: size of the buffer to be filled
280 * @toclose: pointer to '\0'-terminated C string containing the name
281 * of a listener to be closed
282 *
283 * Fills in @buf with a '\n'-separated list of names of listener
284 * sockets. If @toclose is not NULL, the socket named by @toclose
285 * is closed, and is not included in the output list.
286 *
287 * Returns positive length of the socket name string, or a negative
288 * errno value on error.
289 */
290int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen,
291 const char *toclose)
264{ 292{
265 struct svc_sock *svsk, *closesk = NULL; 293 struct svc_sock *svsk, *closesk = NULL;
266 int len = 0; 294 int len = 0;
267 295
268 if (!serv) 296 if (!serv)
269 return 0; 297 return 0;
298
270 spin_lock_bh(&serv->sv_lock); 299 spin_lock_bh(&serv->sv_lock);
271 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) { 300 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) {
272 int onelen = one_sock_name(buf+len, svsk); 301 int onelen = svc_one_sock_name(svsk, buf + len, buflen - len);
273 if (toclose && strcmp(toclose, buf+len) == 0) 302 if (onelen < 0) {
303 len = onelen;
304 break;
305 }
306 if (toclose && strcmp(toclose, buf + len) == 0)
274 closesk = svsk; 307 closesk = svsk;
275 else 308 else
276 len += onelen; 309 len += onelen;
277 } 310 }
278 spin_unlock_bh(&serv->sv_lock); 311 spin_unlock_bh(&serv->sv_lock);
312
279 if (closesk) 313 if (closesk)
280 /* Should unregister with portmap, but you cannot 314 /* Should unregister with portmap, but you cannot
281 * unregister just one protocol... 315 * unregister just one protocol...
@@ -346,6 +380,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
346 sock->sk->sk_sndbuf = snd * 2; 380 sock->sk->sk_sndbuf = snd * 2;
347 sock->sk->sk_rcvbuf = rcv * 2; 381 sock->sk->sk_rcvbuf = rcv * 2;
348 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; 382 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
383 sock->sk->sk_write_space(sock->sk);
349 release_sock(sock->sk); 384 release_sock(sock->sk);
350#endif 385#endif
351} 386}
@@ -387,6 +422,15 @@ static void svc_write_space(struct sock *sk)
387 } 422 }
388} 423}
389 424
425static void svc_tcp_write_space(struct sock *sk)
426{
427 struct socket *sock = sk->sk_socket;
428
429 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
430 clear_bit(SOCK_NOSPACE, &sock->flags);
431 svc_write_space(sk);
432}
433
390/* 434/*
391 * Copy the UDP datagram's destination address to the rqstp structure. 435 * Copy the UDP datagram's destination address to the rqstp structure.
392 * The 'destination' address in this case is the address to which the 436 * The 'destination' address in this case is the address to which the
@@ -427,13 +471,14 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
427 long all[SVC_PKTINFO_SPACE / sizeof(long)]; 471 long all[SVC_PKTINFO_SPACE / sizeof(long)];
428 } buffer; 472 } buffer;
429 struct cmsghdr *cmh = &buffer.hdr; 473 struct cmsghdr *cmh = &buffer.hdr;
430 int err, len;
431 struct msghdr msg = { 474 struct msghdr msg = {
432 .msg_name = svc_addr(rqstp), 475 .msg_name = svc_addr(rqstp),
433 .msg_control = cmh, 476 .msg_control = cmh,
434 .msg_controllen = sizeof(buffer), 477 .msg_controllen = sizeof(buffer),
435 .msg_flags = MSG_DONTWAIT, 478 .msg_flags = MSG_DONTWAIT,
436 }; 479 };
480 size_t len;
481 int err;
437 482
438 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) 483 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
439 /* udp sockets need large rcvbuf as all pending 484 /* udp sockets need large rcvbuf as all pending
@@ -465,8 +510,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
465 return -EAGAIN; 510 return -EAGAIN;
466 } 511 }
467 len = svc_addr_len(svc_addr(rqstp)); 512 len = svc_addr_len(svc_addr(rqstp));
468 if (len < 0) 513 if (len == 0)
469 return len; 514 return -EAFNOSUPPORT;
470 rqstp->rq_addrlen = len; 515 rqstp->rq_addrlen = len;
471 if (skb->tstamp.tv64 == 0) { 516 if (skb->tstamp.tv64 == 0) {
472 skb->tstamp = ktime_get_real(); 517 skb->tstamp = ktime_get_real();
@@ -980,25 +1025,16 @@ static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
980static int svc_tcp_has_wspace(struct svc_xprt *xprt) 1025static int svc_tcp_has_wspace(struct svc_xprt *xprt)
981{ 1026{
982 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1027 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
983 struct svc_serv *serv = svsk->sk_xprt.xpt_server; 1028 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
984 int required; 1029 int required;
985 int wspace;
986 1030
987 /* 1031 if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
988 * Set the SOCK_NOSPACE flag before checking the available 1032 return 1;
989 * sock space. 1033 required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
990 */ 1034 if (sk_stream_wspace(svsk->sk_sk) >= required)
1035 return 1;
991 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 1036 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
992 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; 1037 return 0;
993 wspace = sk_stream_wspace(svsk->sk_sk);
994
995 if (wspace < sk_stream_min_wspace(svsk->sk_sk))
996 return 0;
997 if (required * 2 > wspace)
998 return 0;
999
1000 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1001 return 1;
1002} 1038}
1003 1039
1004static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, 1040static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
@@ -1054,7 +1090,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1054 dprintk("setting up TCP socket for reading\n"); 1090 dprintk("setting up TCP socket for reading\n");
1055 sk->sk_state_change = svc_tcp_state_change; 1091 sk->sk_state_change = svc_tcp_state_change;
1056 sk->sk_data_ready = svc_tcp_data_ready; 1092 sk->sk_data_ready = svc_tcp_data_ready;
1057 sk->sk_write_space = svc_write_space; 1093 sk->sk_write_space = svc_tcp_write_space;
1058 1094
1059 svsk->sk_reclen = 0; 1095 svsk->sk_reclen = 0;
1060 svsk->sk_tcplen = 0; 1096 svsk->sk_tcplen = 0;
@@ -1148,9 +1184,19 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1148 return svsk; 1184 return svsk;
1149} 1185}
1150 1186
1151int svc_addsock(struct svc_serv *serv, 1187/**
1152 int fd, 1188 * svc_addsock - add a listener socket to an RPC service
1153 char *name_return) 1189 * @serv: pointer to RPC service to which to add a new listener
1190 * @fd: file descriptor of the new listener
1191 * @name_return: pointer to buffer to fill in with name of listener
1192 * @len: size of the buffer
1193 *
1194 * Fills in socket name and returns positive length of name if successful.
1195 * Name is terminated with '\n'. On error, returns a negative errno
1196 * value.
1197 */
1198int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
1199 const size_t len)
1154{ 1200{
1155 int err = 0; 1201 int err = 0;
1156 struct socket *so = sockfd_lookup(fd, &err); 1202 struct socket *so = sockfd_lookup(fd, &err);
@@ -1190,7 +1236,7 @@ int svc_addsock(struct svc_serv *serv,
1190 sockfd_put(so); 1236 sockfd_put(so);
1191 return err; 1237 return err;
1192 } 1238 }
1193 return one_sock_name(name_return, svsk); 1239 return svc_one_sock_name(svsk, name_return, len);
1194} 1240}
1195EXPORT_SYMBOL_GPL(svc_addsock); 1241EXPORT_SYMBOL_GPL(svc_addsock);
1196 1242
@@ -1327,3 +1373,42 @@ static void svc_sock_free(struct svc_xprt *xprt)
1327 sock_release(svsk->sk_sock); 1373 sock_release(svsk->sk_sock);
1328 kfree(svsk); 1374 kfree(svsk);
1329} 1375}
1376
1377/*
1378 * Create a svc_xprt.
1379 *
1380 * For internal use only (e.g. nfsv4.1 backchannel).
1381 * Callers should typically use the xpo_create() method.
1382 */
1383struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
1384{
1385 struct svc_sock *svsk;
1386 struct svc_xprt *xprt = NULL;
1387
1388 dprintk("svc: %s\n", __func__);
1389 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
1390 if (!svsk)
1391 goto out;
1392
1393 xprt = &svsk->sk_xprt;
1394 if (prot == IPPROTO_TCP)
1395 svc_xprt_init(&svc_tcp_class, xprt, serv);
1396 else if (prot == IPPROTO_UDP)
1397 svc_xprt_init(&svc_udp_class, xprt, serv);
1398 else
1399 BUG();
1400out:
1401 dprintk("svc: %s return %p\n", __func__, xprt);
1402 return xprt;
1403}
1404EXPORT_SYMBOL_GPL(svc_sock_create);
1405
1406/*
1407 * Destroy a svc_sock.
1408 */
1409void svc_sock_destroy(struct svc_xprt *xprt)
1410{
1411 if (xprt)
1412 kfree(container_of(xprt, struct svc_sock, sk_xprt));
1413}
1414EXPORT_SYMBOL_GPL(svc_sock_destroy);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 06ca058572f2..f412a852bc73 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -12,8 +12,9 @@
12 * - Next, the caller puts together the RPC message, stuffs it into 12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit(). 13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the 14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that 15 * transport's wait list. At the same time, if a reply is expected,
16 * is run after the packet's timeout has expired. 16 * it installs a timer that is run after the packet's timeout has
17 * expired.
17 * - When a packet arrives, the data_ready handler walks the list of 18 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the 19 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed. 20 * caller is woken up, and the timer removed.
@@ -46,6 +47,8 @@
46#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
47#include <linux/sunrpc/metrics.h> 48#include <linux/sunrpc/metrics.h>
48 49
50#include "sunrpc.h"
51
49/* 52/*
50 * Local variables 53 * Local variables
51 */ 54 */
@@ -192,8 +195,8 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
192 */ 195 */
193int xprt_reserve_xprt(struct rpc_task *task) 196int xprt_reserve_xprt(struct rpc_task *task)
194{ 197{
195 struct rpc_xprt *xprt = task->tk_xprt;
196 struct rpc_rqst *req = task->tk_rqstp; 198 struct rpc_rqst *req = task->tk_rqstp;
199 struct rpc_xprt *xprt = req->rq_xprt;
197 200
198 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 201 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
199 if (task == xprt->snd_task) 202 if (task == xprt->snd_task)
@@ -803,9 +806,10 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
803 806
804 list_del_init(&req->rq_list); 807 list_del_init(&req->rq_list);
805 req->rq_private_buf.len = copied; 808 req->rq_private_buf.len = copied;
806 /* Ensure all writes are done before we update req->rq_received */ 809 /* Ensure all writes are done before we update */
810 /* req->rq_reply_bytes_recvd */
807 smp_wmb(); 811 smp_wmb();
808 req->rq_received = copied; 812 req->rq_reply_bytes_recvd = copied;
809 rpc_wake_up_queued_task(&xprt->pending, task); 813 rpc_wake_up_queued_task(&xprt->pending, task);
810} 814}
811EXPORT_SYMBOL_GPL(xprt_complete_rqst); 815EXPORT_SYMBOL_GPL(xprt_complete_rqst);
@@ -820,7 +824,7 @@ static void xprt_timer(struct rpc_task *task)
820 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 824 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
821 825
822 spin_lock_bh(&xprt->transport_lock); 826 spin_lock_bh(&xprt->transport_lock);
823 if (!req->rq_received) { 827 if (!req->rq_reply_bytes_recvd) {
824 if (xprt->ops->timer) 828 if (xprt->ops->timer)
825 xprt->ops->timer(task); 829 xprt->ops->timer(task);
826 } else 830 } else
@@ -842,8 +846,8 @@ int xprt_prepare_transmit(struct rpc_task *task)
842 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 846 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
843 847
844 spin_lock_bh(&xprt->transport_lock); 848 spin_lock_bh(&xprt->transport_lock);
845 if (req->rq_received && !req->rq_bytes_sent) { 849 if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
846 err = req->rq_received; 850 err = req->rq_reply_bytes_recvd;
847 goto out_unlock; 851 goto out_unlock;
848 } 852 }
849 if (!xprt->ops->reserve_xprt(task)) 853 if (!xprt->ops->reserve_xprt(task))
@@ -855,7 +859,7 @@ out_unlock:
855 859
856void xprt_end_transmit(struct rpc_task *task) 860void xprt_end_transmit(struct rpc_task *task)
857{ 861{
858 xprt_release_write(task->tk_xprt, task); 862 xprt_release_write(task->tk_rqstp->rq_xprt, task);
859} 863}
860 864
861/** 865/**
@@ -872,8 +876,11 @@ void xprt_transmit(struct rpc_task *task)
872 876
873 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 877 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
874 878
875 if (!req->rq_received) { 879 if (!req->rq_reply_bytes_recvd) {
876 if (list_empty(&req->rq_list)) { 880 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
881 /*
882 * Add to the list only if we're expecting a reply
883 */
877 spin_lock_bh(&xprt->transport_lock); 884 spin_lock_bh(&xprt->transport_lock);
878 /* Update the softirq receive buffer */ 885 /* Update the softirq receive buffer */
879 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 886 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
@@ -908,8 +915,13 @@ void xprt_transmit(struct rpc_task *task)
908 /* Don't race with disconnect */ 915 /* Don't race with disconnect */
909 if (!xprt_connected(xprt)) 916 if (!xprt_connected(xprt))
910 task->tk_status = -ENOTCONN; 917 task->tk_status = -ENOTCONN;
911 else if (!req->rq_received) 918 else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
919 /*
920 * Sleep on the pending queue since
921 * we're expecting a reply.
922 */
912 rpc_sleep_on(&xprt->pending, task, xprt_timer); 923 rpc_sleep_on(&xprt->pending, task, xprt_timer);
924 }
913 spin_unlock_bh(&xprt->transport_lock); 925 spin_unlock_bh(&xprt->transport_lock);
914} 926}
915 927
@@ -982,11 +994,17 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
982 */ 994 */
983void xprt_release(struct rpc_task *task) 995void xprt_release(struct rpc_task *task)
984{ 996{
985 struct rpc_xprt *xprt = task->tk_xprt; 997 struct rpc_xprt *xprt;
986 struct rpc_rqst *req; 998 struct rpc_rqst *req;
999 int is_bc_request;
987 1000
988 if (!(req = task->tk_rqstp)) 1001 if (!(req = task->tk_rqstp))
989 return; 1002 return;
1003
1004 /* Preallocated backchannel request? */
1005 is_bc_request = bc_prealloc(req);
1006
1007 xprt = req->rq_xprt;
990 rpc_count_iostats(task); 1008 rpc_count_iostats(task);
991 spin_lock_bh(&xprt->transport_lock); 1009 spin_lock_bh(&xprt->transport_lock);
992 xprt->ops->release_xprt(xprt, task); 1010 xprt->ops->release_xprt(xprt, task);
@@ -999,10 +1017,19 @@ void xprt_release(struct rpc_task *task)
999 mod_timer(&xprt->timer, 1017 mod_timer(&xprt->timer,
1000 xprt->last_used + xprt->idle_timeout); 1018 xprt->last_used + xprt->idle_timeout);
1001 spin_unlock_bh(&xprt->transport_lock); 1019 spin_unlock_bh(&xprt->transport_lock);
1002 xprt->ops->buf_free(req->rq_buffer); 1020 if (!bc_prealloc(req))
1021 xprt->ops->buf_free(req->rq_buffer);
1003 task->tk_rqstp = NULL; 1022 task->tk_rqstp = NULL;
1004 if (req->rq_release_snd_buf) 1023 if (req->rq_release_snd_buf)
1005 req->rq_release_snd_buf(req); 1024 req->rq_release_snd_buf(req);
1025
1026 /*
1027 * Early exit if this is a backchannel preallocated request.
1028 * There is no need to have it added to the RPC slot list.
1029 */
1030 if (is_bc_request)
1031 return;
1032
1006 memset(req, 0, sizeof(*req)); /* mark unused */ 1033 memset(req, 0, sizeof(*req)); /* mark unused */
1007 1034
1008 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1035 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
@@ -1049,6 +1076,11 @@ found:
1049 1076
1050 INIT_LIST_HEAD(&xprt->free); 1077 INIT_LIST_HEAD(&xprt->free);
1051 INIT_LIST_HEAD(&xprt->recv); 1078 INIT_LIST_HEAD(&xprt->recv);
1079#if defined(CONFIG_NFS_V4_1)
1080 spin_lock_init(&xprt->bc_pa_lock);
1081 INIT_LIST_HEAD(&xprt->bc_pa_list);
1082#endif /* CONFIG_NFS_V4_1 */
1083
1052 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1084 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1053 setup_timer(&xprt->timer, xprt_init_autodisconnect, 1085 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1054 (unsigned long)xprt); 1086 (unsigned long)xprt);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 42a6f9f20285..9e884383134f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -397,14 +397,14 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
397 if (!ch) 397 if (!ch)
398 return 0; 398 return 0;
399 399
400 /* Allocate temporary reply and chunk maps */
401 rpl_map = svc_rdma_get_req_map();
402 chl_map = svc_rdma_get_req_map();
403
404 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count); 400 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
405 if (ch_count > RPCSVC_MAXPAGES) 401 if (ch_count > RPCSVC_MAXPAGES)
406 return -EINVAL; 402 return -EINVAL;
407 403
404 /* Allocate temporary reply and chunk maps */
405 rpl_map = svc_rdma_get_req_map();
406 chl_map = svc_rdma_get_req_map();
407
408 if (!xprt->sc_frmr_pg_list_len) 408 if (!xprt->sc_frmr_pg_list_len)
409 sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp, 409 sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
410 rpl_map, chl_map, ch_count, 410 rpl_map, chl_map, ch_count,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6c2d61586551..83c73c4d017a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -34,6 +34,9 @@
34#include <linux/sunrpc/sched.h> 34#include <linux/sunrpc/sched.h>
35#include <linux/sunrpc/xprtsock.h> 35#include <linux/sunrpc/xprtsock.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#ifdef CONFIG_NFS_V4_1
38#include <linux/sunrpc/bc_xprt.h>
39#endif
37 40
38#include <net/sock.h> 41#include <net/sock.h>
39#include <net/checksum.h> 42#include <net/checksum.h>
@@ -270,6 +273,13 @@ struct sock_xprt {
270#define TCP_RCV_COPY_FRAGHDR (1UL << 1) 273#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
271#define TCP_RCV_COPY_XID (1UL << 2) 274#define TCP_RCV_COPY_XID (1UL << 2)
272#define TCP_RCV_COPY_DATA (1UL << 3) 275#define TCP_RCV_COPY_DATA (1UL << 3)
276#define TCP_RCV_READ_CALLDIR (1UL << 4)
277#define TCP_RCV_COPY_CALLDIR (1UL << 5)
278
279/*
280 * TCP RPC flags
281 */
282#define TCP_RPC_REPLY (1UL << 6)
273 283
274static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 284static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
275{ 285{
@@ -956,7 +966,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
956 transport->tcp_offset = 0; 966 transport->tcp_offset = 0;
957 967
958 /* Sanity check of the record length */ 968 /* Sanity check of the record length */
959 if (unlikely(transport->tcp_reclen < 4)) { 969 if (unlikely(transport->tcp_reclen < 8)) {
960 dprintk("RPC: invalid TCP record fragment length\n"); 970 dprintk("RPC: invalid TCP record fragment length\n");
961 xprt_force_disconnect(xprt); 971 xprt_force_disconnect(xprt);
962 return; 972 return;
@@ -991,33 +1001,77 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r
991 if (used != len) 1001 if (used != len)
992 return; 1002 return;
993 transport->tcp_flags &= ~TCP_RCV_COPY_XID; 1003 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
994 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1004 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
995 transport->tcp_copied = 4; 1005 transport->tcp_copied = 4;
996 dprintk("RPC: reading reply for XID %08x\n", 1006 dprintk("RPC: reading %s XID %08x\n",
1007 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1008 : "request with",
997 ntohl(transport->tcp_xid)); 1009 ntohl(transport->tcp_xid));
998 xs_tcp_check_fraghdr(transport); 1010 xs_tcp_check_fraghdr(transport);
999} 1011}
1000 1012
1001static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 1013static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1014 struct xdr_skb_reader *desc)
1002{ 1015{
1003 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1016 size_t len, used;
1004 struct rpc_rqst *req; 1017 u32 offset;
1018 __be32 calldir;
1019
1020 /*
1021 * We want transport->tcp_offset to be 8 at the end of this routine
1022 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1023 * When this function is called for the first time,
1024 * transport->tcp_offset is 4 (after having already read the xid).
1025 */
1026 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1027 len = sizeof(calldir) - offset;
1028 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
1029 used = xdr_skb_read_bits(desc, &calldir, len);
1030 transport->tcp_offset += used;
1031 if (used != len)
1032 return;
1033 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1034 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1035 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1036 /*
1037 * We don't yet have the XDR buffer, so we will write the calldir
1038 * out after we get the buffer from the 'struct rpc_rqst'
1039 */
1040 if (ntohl(calldir) == RPC_REPLY)
1041 transport->tcp_flags |= TCP_RPC_REPLY;
1042 else
1043 transport->tcp_flags &= ~TCP_RPC_REPLY;
1044 dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
1045 (transport->tcp_flags & TCP_RPC_REPLY) ?
1046 "reply for" : "request with", calldir);
1047 xs_tcp_check_fraghdr(transport);
1048}
1049
1050static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1051 struct xdr_skb_reader *desc,
1052 struct rpc_rqst *req)
1053{
1054 struct sock_xprt *transport =
1055 container_of(xprt, struct sock_xprt, xprt);
1005 struct xdr_buf *rcvbuf; 1056 struct xdr_buf *rcvbuf;
1006 size_t len; 1057 size_t len;
1007 ssize_t r; 1058 ssize_t r;
1008 1059
1009 /* Find and lock the request corresponding to this xid */ 1060 rcvbuf = &req->rq_private_buf;
1010 spin_lock(&xprt->transport_lock); 1061
1011 req = xprt_lookup_rqst(xprt, transport->tcp_xid); 1062 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1012 if (!req) { 1063 /*
1013 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1064 * Save the RPC direction in the XDR buffer
1014 dprintk("RPC: XID %08x request not found!\n", 1065 */
1015 ntohl(transport->tcp_xid)); 1066 __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
1016 spin_unlock(&xprt->transport_lock); 1067 htonl(RPC_REPLY) : 0;
1017 return; 1068
1069 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1070 &calldir, sizeof(calldir));
1071 transport->tcp_copied += sizeof(calldir);
1072 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1018 } 1073 }
1019 1074
1020 rcvbuf = &req->rq_private_buf;
1021 len = desc->count; 1075 len = desc->count;
1022 if (len > transport->tcp_reclen - transport->tcp_offset) { 1076 if (len > transport->tcp_reclen - transport->tcp_offset) {
1023 struct xdr_skb_reader my_desc; 1077 struct xdr_skb_reader my_desc;
@@ -1054,7 +1108,7 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea
1054 "tcp_offset = %u, tcp_reclen = %u\n", 1108 "tcp_offset = %u, tcp_reclen = %u\n",
1055 xprt, transport->tcp_copied, 1109 xprt, transport->tcp_copied,
1056 transport->tcp_offset, transport->tcp_reclen); 1110 transport->tcp_offset, transport->tcp_reclen);
1057 goto out; 1111 return;
1058 } 1112 }
1059 1113
1060 dprintk("RPC: XID %08x read %Zd bytes\n", 1114 dprintk("RPC: XID %08x read %Zd bytes\n",
@@ -1070,11 +1124,125 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea
1070 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1124 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1071 } 1125 }
1072 1126
1073out: 1127 return;
1128}
1129
1130/*
1131 * Finds the request corresponding to the RPC xid and invokes the common
1132 * tcp read code to read the data.
1133 */
1134static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1135 struct xdr_skb_reader *desc)
1136{
1137 struct sock_xprt *transport =
1138 container_of(xprt, struct sock_xprt, xprt);
1139 struct rpc_rqst *req;
1140
1141 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1142
1143 /* Find and lock the request corresponding to this xid */
1144 spin_lock(&xprt->transport_lock);
1145 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1146 if (!req) {
1147 dprintk("RPC: XID %08x request not found!\n",
1148 ntohl(transport->tcp_xid));
1149 spin_unlock(&xprt->transport_lock);
1150 return -1;
1151 }
1152
1153 xs_tcp_read_common(xprt, desc, req);
1154
1074 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1155 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1075 xprt_complete_rqst(req->rq_task, transport->tcp_copied); 1156 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1157
1076 spin_unlock(&xprt->transport_lock); 1158 spin_unlock(&xprt->transport_lock);
1077 xs_tcp_check_fraghdr(transport); 1159 return 0;
1160}
1161
1162#if defined(CONFIG_NFS_V4_1)
1163/*
1164 * Obtains an rpc_rqst previously allocated and invokes the common
1165 * tcp read code to read the data. The result is placed in the callback
1166 * queue.
1167 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1168 * connection and return -1.
1169 */
1170static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
1171 struct xdr_skb_reader *desc)
1172{
1173 struct sock_xprt *transport =
1174 container_of(xprt, struct sock_xprt, xprt);
1175 struct rpc_rqst *req;
1176
1177 req = xprt_alloc_bc_request(xprt);
1178 if (req == NULL) {
1179 printk(KERN_WARNING "Callback slot table overflowed\n");
1180 xprt_force_disconnect(xprt);
1181 return -1;
1182 }
1183
1184 req->rq_xid = transport->tcp_xid;
1185 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1186 xs_tcp_read_common(xprt, desc, req);
1187
1188 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
1189 struct svc_serv *bc_serv = xprt->bc_serv;
1190
1191 /*
1192 * Add callback request to callback list. The callback
1193 * service sleeps on the sv_cb_waitq waiting for new
1194 * requests. Wake it up after adding enqueing the
1195 * request.
1196 */
1197 dprintk("RPC: add callback request to list\n");
1198 spin_lock(&bc_serv->sv_cb_lock);
1199 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
1200 spin_unlock(&bc_serv->sv_cb_lock);
1201 wake_up(&bc_serv->sv_cb_waitq);
1202 }
1203
1204 req->rq_private_buf.len = transport->tcp_copied;
1205
1206 return 0;
1207}
1208
1209static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1210 struct xdr_skb_reader *desc)
1211{
1212 struct sock_xprt *transport =
1213 container_of(xprt, struct sock_xprt, xprt);
1214
1215 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1216 xs_tcp_read_reply(xprt, desc) :
1217 xs_tcp_read_callback(xprt, desc);
1218}
1219#else
1220static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1221 struct xdr_skb_reader *desc)
1222{
1223 return xs_tcp_read_reply(xprt, desc);
1224}
1225#endif /* CONFIG_NFS_V4_1 */
1226
1227/*
1228 * Read data off the transport. This can be either an RPC_CALL or an
1229 * RPC_REPLY. Relay the processing to helper functions.
1230 */
1231static void xs_tcp_read_data(struct rpc_xprt *xprt,
1232 struct xdr_skb_reader *desc)
1233{
1234 struct sock_xprt *transport =
1235 container_of(xprt, struct sock_xprt, xprt);
1236
1237 if (_xs_tcp_read_data(xprt, desc) == 0)
1238 xs_tcp_check_fraghdr(transport);
1239 else {
1240 /*
1241 * The transport_lock protects the request handling.
1242 * There's no need to hold it to update the tcp_flags.
1243 */
1244 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1245 }
1078} 1246}
1079 1247
1080static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1248static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
@@ -1114,9 +1282,14 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
1114 xs_tcp_read_xid(transport, &desc); 1282 xs_tcp_read_xid(transport, &desc);
1115 continue; 1283 continue;
1116 } 1284 }
1285 /* Read in the call/reply flag */
1286 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1287 xs_tcp_read_calldir(transport, &desc);
1288 continue;
1289 }
1117 /* Read in the request data */ 1290 /* Read in the request data */
1118 if (transport->tcp_flags & TCP_RCV_COPY_DATA) { 1291 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1119 xs_tcp_read_request(xprt, &desc); 1292 xs_tcp_read_data(xprt, &desc);
1120 continue; 1293 continue;
1121 } 1294 }
1122 /* Skip over any trailing bytes on short reads */ 1295 /* Skip over any trailing bytes on short reads */
@@ -1792,6 +1965,7 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1792 */ 1965 */
1793 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); 1966 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1794 xprt_force_disconnect(xprt); 1967 xprt_force_disconnect(xprt);
1968 break;
1795 case -ECONNREFUSED: 1969 case -ECONNREFUSED:
1796 case -ECONNRESET: 1970 case -ECONNRESET:
1797 case -ENETUNREACH: 1971 case -ENETUNREACH:
@@ -2010,6 +2184,9 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2010 .buf_free = rpc_free, 2184 .buf_free = rpc_free,
2011 .send_request = xs_tcp_send_request, 2185 .send_request = xs_tcp_send_request,
2012 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2186 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2187#if defined(CONFIG_NFS_V4_1)
2188 .release_request = bc_release_request,
2189#endif /* CONFIG_NFS_V4_1 */
2013 .close = xs_tcp_close, 2190 .close = xs_tcp_close,
2014 .destroy = xs_destroy, 2191 .destroy = xs_destroy,
2015 .print_stats = xs_tcp_print_stats, 2192 .print_stats = xs_tcp_print_stats,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9dcc6e7f96ec..36d4e44d6233 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1946,7 +1946,7 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1946 1946
1947 switch (cmd) { 1947 switch (cmd) {
1948 case SIOCOUTQ: 1948 case SIOCOUTQ:
1949 amount = atomic_read(&sk->sk_wmem_alloc); 1949 amount = sk_wmem_alloc_get(sk);
1950 err = put_user(amount, (int __user *)arg); 1950 err = put_user(amount, (int __user *)arg);
1951 break; 1951 break;
1952 case SIOCINQ: 1952 case SIOCINQ:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 24168560ebae..241bddd0b4f1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1687,13 +1687,52 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1687 if (err) 1687 if (err)
1688 goto out_rtnl; 1688 goto out_rtnl;
1689 1689
1690 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 1690 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1691 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 1691 if (err)
1692 err = -EINVAL;
1693 goto out; 1692 goto out;
1693
1694 /* validate settings */
1695 err = 0;
1696
1697 switch (dev->ieee80211_ptr->iftype) {
1698 case NL80211_IFTYPE_AP:
1699 case NL80211_IFTYPE_AP_VLAN:
1700 /* disallow mesh-specific things */
1701 if (params.plink_action)
1702 err = -EINVAL;
1703 break;
1704 case NL80211_IFTYPE_STATION:
1705 /* disallow everything but AUTHORIZED flag */
1706 if (params.plink_action)
1707 err = -EINVAL;
1708 if (params.vlan)
1709 err = -EINVAL;
1710 if (params.supported_rates)
1711 err = -EINVAL;
1712 if (params.ht_capa)
1713 err = -EINVAL;
1714 if (params.listen_interval >= 0)
1715 err = -EINVAL;
1716 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
1717 err = -EINVAL;
1718 break;
1719 case NL80211_IFTYPE_MESH_POINT:
1720 /* disallow things mesh doesn't support */
1721 if (params.vlan)
1722 err = -EINVAL;
1723 if (params.ht_capa)
1724 err = -EINVAL;
1725 if (params.listen_interval >= 0)
1726 err = -EINVAL;
1727 if (params.supported_rates)
1728 err = -EINVAL;
1729 if (params.sta_flags_mask)
1730 err = -EINVAL;
1731 break;
1732 default:
1733 err = -EINVAL;
1694 } 1734 }
1695 1735
1696 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1697 if (err) 1736 if (err)
1698 goto out; 1737 goto out;
1699 1738
@@ -1728,9 +1767,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1728 if (!info->attrs[NL80211_ATTR_MAC]) 1767 if (!info->attrs[NL80211_ATTR_MAC])
1729 return -EINVAL; 1768 return -EINVAL;
1730 1769
1731 if (!info->attrs[NL80211_ATTR_STA_AID])
1732 return -EINVAL;
1733
1734 if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 1770 if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
1735 return -EINVAL; 1771 return -EINVAL;
1736 1772
@@ -1745,9 +1781,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1745 params.listen_interval = 1781 params.listen_interval =
1746 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1782 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1747 1783
1748 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 1784 if (info->attrs[NL80211_ATTR_STA_AID]) {
1749 if (!params.aid || params.aid > IEEE80211_MAX_AID) 1785 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
1750 return -EINVAL; 1786 if (!params.aid || params.aid > IEEE80211_MAX_AID)
1787 return -EINVAL;
1788 }
1751 1789
1752 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 1790 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1753 params.ht_capa = 1791 params.ht_capa =
@@ -1762,13 +1800,39 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1762 if (err) 1800 if (err)
1763 goto out_rtnl; 1801 goto out_rtnl;
1764 1802
1765 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 1803 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1766 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 1804 if (err)
1767 err = -EINVAL;
1768 goto out; 1805 goto out;
1806
1807 /* validate settings */
1808 err = 0;
1809
1810 switch (dev->ieee80211_ptr->iftype) {
1811 case NL80211_IFTYPE_AP:
1812 case NL80211_IFTYPE_AP_VLAN:
1813 /* all ok but must have AID */
1814 if (!params.aid)
1815 err = -EINVAL;
1816 break;
1817 case NL80211_IFTYPE_MESH_POINT:
1818 /* disallow things mesh doesn't support */
1819 if (params.vlan)
1820 err = -EINVAL;
1821 if (params.aid)
1822 err = -EINVAL;
1823 if (params.ht_capa)
1824 err = -EINVAL;
1825 if (params.listen_interval >= 0)
1826 err = -EINVAL;
1827 if (params.supported_rates)
1828 err = -EINVAL;
1829 if (params.sta_flags_mask)
1830 err = -EINVAL;
1831 break;
1832 default:
1833 err = -EINVAL;
1769 } 1834 }
1770 1835
1771 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1772 if (err) 1836 if (err)
1773 goto out; 1837 goto out;
1774 1838
@@ -1812,7 +1876,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
1812 goto out_rtnl; 1876 goto out_rtnl;
1813 1877
1814 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 1878 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1815 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 1879 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
1880 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
1816 err = -EINVAL; 1881 err = -EINVAL;
1817 goto out; 1882 goto out;
1818 } 1883 }
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index ed80af8ca5fb..21cdc872004e 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -332,14 +332,14 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
332/* 332/*
333 * Deferred destroy. 333 * Deferred destroy.
334 */ 334 */
335void x25_destroy_socket(struct sock *); 335static void __x25_destroy_socket(struct sock *);
336 336
337/* 337/*
338 * handler for deferred kills. 338 * handler for deferred kills.
339 */ 339 */
340static void x25_destroy_timer(unsigned long data) 340static void x25_destroy_timer(unsigned long data)
341{ 341{
342 x25_destroy_socket((struct sock *)data); 342 x25_destroy_socket_from_timer((struct sock *)data);
343} 343}
344 344
345/* 345/*
@@ -349,12 +349,10 @@ static void x25_destroy_timer(unsigned long data)
349 * will touch it and we are (fairly 8-) ) safe. 349 * will touch it and we are (fairly 8-) ) safe.
350 * Not static as it's used by the timer 350 * Not static as it's used by the timer
351 */ 351 */
352void x25_destroy_socket(struct sock *sk) 352static void __x25_destroy_socket(struct sock *sk)
353{ 353{
354 struct sk_buff *skb; 354 struct sk_buff *skb;
355 355
356 sock_hold(sk);
357 lock_sock(sk);
358 x25_stop_heartbeat(sk); 356 x25_stop_heartbeat(sk);
359 x25_stop_timer(sk); 357 x25_stop_timer(sk);
360 358
@@ -374,8 +372,7 @@ void x25_destroy_socket(struct sock *sk)
374 kfree_skb(skb); 372 kfree_skb(skb);
375 } 373 }
376 374
377 if (atomic_read(&sk->sk_wmem_alloc) || 375 if (sk_has_allocations(sk)) {
378 atomic_read(&sk->sk_rmem_alloc)) {
379 /* Defer: outstanding buffers */ 376 /* Defer: outstanding buffers */
380 sk->sk_timer.expires = jiffies + 10 * HZ; 377 sk->sk_timer.expires = jiffies + 10 * HZ;
381 sk->sk_timer.function = x25_destroy_timer; 378 sk->sk_timer.function = x25_destroy_timer;
@@ -385,7 +382,22 @@ void x25_destroy_socket(struct sock *sk)
385 /* drop last reference so sock_put will free */ 382 /* drop last reference so sock_put will free */
386 __sock_put(sk); 383 __sock_put(sk);
387 } 384 }
385}
386
387void x25_destroy_socket_from_timer(struct sock *sk)
388{
389 sock_hold(sk);
390 bh_lock_sock(sk);
391 __x25_destroy_socket(sk);
392 bh_unlock_sock(sk);
393 sock_put(sk);
394}
388 395
396static void x25_destroy_socket(struct sock *sk)
397{
398 sock_hold(sk);
399 lock_sock(sk);
400 __x25_destroy_socket(sk);
389 release_sock(sk); 401 release_sock(sk);
390 sock_put(sk); 402 sock_put(sk);
391} 403}
@@ -1259,8 +1271,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1259 1271
1260 switch (cmd) { 1272 switch (cmd) {
1261 case TIOCOUTQ: { 1273 case TIOCOUTQ: {
1262 int amount = sk->sk_sndbuf - 1274 int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1263 atomic_read(&sk->sk_wmem_alloc); 1275
1264 if (amount < 0) 1276 if (amount < 0)
1265 amount = 0; 1277 amount = 0;
1266 rc = put_user(amount, (unsigned int __user *)argp); 1278 rc = put_user(amount, (unsigned int __user *)argp);
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 1afa44d25beb..0a04e62e0e18 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -163,8 +163,8 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
163 devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr, 163 devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr,
164 x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ, 164 x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ,
165 x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ, 165 x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ,
166 atomic_read(&s->sk_wmem_alloc), 166 sk_wmem_alloc_get(s),
167 atomic_read(&s->sk_rmem_alloc), 167 sk_rmem_alloc_get(s),
168 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 168 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
169out: 169out:
170 return 0; 170 return 0;
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index d3e3e54db936..5c5db1a36399 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -113,7 +113,7 @@ static void x25_heartbeat_expiry(unsigned long param)
113 (sk->sk_state == TCP_LISTEN && 113 (sk->sk_state == TCP_LISTEN &&
114 sock_flag(sk, SOCK_DEAD))) { 114 sock_flag(sk, SOCK_DEAD))) {
115 bh_unlock_sock(sk); 115 bh_unlock_sock(sk);
116 x25_destroy_socket(sk); 116 x25_destroy_socket_from_timer(sk);
117 return; 117 return;
118 } 118 }
119 break; 119 break;