aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/lec.c10
-rw-r--r--net/atm/mpc.c6
-rw-r--r--net/atm/raw.c2
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/bluetooth/l2cap_sock.c6
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c4
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_vlan.c7
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/core/skbuff.c16
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/decnet/dn_nsp_in.c4
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/nfc/llcp_core.c2
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/rds/tcp.h4
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_recv.c8
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rxrpc/ar-input.c6
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/ulpqueue.c4
-rw-r--r--net/sunrpc/svcsock.c12
-rw-r--r--net/sunrpc/xprtsock.c8
-rw-r--r--net/tipc/server.c4
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c2
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c4
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/x25/x25_in.c2
52 files changed, 119 insertions, 114 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 8215f7cb170b..ba291ce4bdff 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -68,7 +68,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
68 68
69 sk = sk_atm(atmarpd); 69 sk = sk_atm(atmarpd);
70 skb_queue_tail(&sk->sk_receive_queue, skb); 70 skb_queue_tail(&sk->sk_receive_queue, skb);
71 sk->sk_data_ready(sk, skb->len); 71 sk->sk_data_ready(sk);
72 return 0; 72 return 0;
73} 73}
74 74
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5a2f602d07e1..4c5b8ba0f84f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -152,7 +152,7 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
152 atm_force_charge(priv->lecd, skb2->truesize); 152 atm_force_charge(priv->lecd, skb2->truesize);
153 sk = sk_atm(priv->lecd); 153 sk = sk_atm(priv->lecd);
154 skb_queue_tail(&sk->sk_receive_queue, skb2); 154 skb_queue_tail(&sk->sk_receive_queue, skb2);
155 sk->sk_data_ready(sk, skb2->len); 155 sk->sk_data_ready(sk);
156 } 156 }
157} 157}
158#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 158#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
@@ -447,7 +447,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
447 atm_force_charge(priv->lecd, skb2->truesize); 447 atm_force_charge(priv->lecd, skb2->truesize);
448 sk = sk_atm(priv->lecd); 448 sk = sk_atm(priv->lecd);
449 skb_queue_tail(&sk->sk_receive_queue, skb2); 449 skb_queue_tail(&sk->sk_receive_queue, skb2);
450 sk->sk_data_ready(sk, skb2->len); 450 sk->sk_data_ready(sk);
451 } 451 }
452 } 452 }
453#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 453#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
@@ -530,13 +530,13 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
530 atm_force_charge(priv->lecd, skb->truesize); 530 atm_force_charge(priv->lecd, skb->truesize);
531 sk = sk_atm(priv->lecd); 531 sk = sk_atm(priv->lecd);
532 skb_queue_tail(&sk->sk_receive_queue, skb); 532 skb_queue_tail(&sk->sk_receive_queue, skb);
533 sk->sk_data_ready(sk, skb->len); 533 sk->sk_data_ready(sk);
534 534
535 if (data != NULL) { 535 if (data != NULL) {
536 pr_debug("about to send %d bytes of data\n", data->len); 536 pr_debug("about to send %d bytes of data\n", data->len);
537 atm_force_charge(priv->lecd, data->truesize); 537 atm_force_charge(priv->lecd, data->truesize);
538 skb_queue_tail(&sk->sk_receive_queue, data); 538 skb_queue_tail(&sk->sk_receive_queue, data);
539 sk->sk_data_ready(sk, skb->len); 539 sk->sk_data_ready(sk);
540 } 540 }
541 541
542 return 0; 542 return 0;
@@ -616,7 +616,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
616 616
617 pr_debug("%s: To daemon\n", dev->name); 617 pr_debug("%s: To daemon\n", dev->name);
618 skb_queue_tail(&sk->sk_receive_queue, skb); 618 skb_queue_tail(&sk->sk_receive_queue, skb);
619 sk->sk_data_ready(sk, skb->len); 619 sk->sk_data_ready(sk);
620 } else { /* Data frame, queue to protocol handlers */ 620 } else { /* Data frame, queue to protocol handlers */
621 struct lec_arp_table *entry; 621 struct lec_arp_table *entry;
622 unsigned char *src, *dst; 622 unsigned char *src, *dst;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 91dc58f1124d..e8e0e7a8a23d 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -706,7 +706,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
706 dprintk("(%s) control packet arrived\n", dev->name); 706 dprintk("(%s) control packet arrived\n", dev->name);
707 /* Pass control packets to daemon */ 707 /* Pass control packets to daemon */
708 skb_queue_tail(&sk->sk_receive_queue, skb); 708 skb_queue_tail(&sk->sk_receive_queue, skb);
709 sk->sk_data_ready(sk, skb->len); 709 sk->sk_data_ready(sk);
710 return; 710 return;
711 } 711 }
712 712
@@ -992,7 +992,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
992 992
993 sk = sk_atm(mpc->mpoad_vcc); 993 sk = sk_atm(mpc->mpoad_vcc);
994 skb_queue_tail(&sk->sk_receive_queue, skb); 994 skb_queue_tail(&sk->sk_receive_queue, skb);
995 sk->sk_data_ready(sk, skb->len); 995 sk->sk_data_ready(sk);
996 996
997 return 0; 997 return 0;
998} 998}
@@ -1273,7 +1273,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1273 1273
1274 sk = sk_atm(vcc); 1274 sk = sk_atm(vcc);
1275 skb_queue_tail(&sk->sk_receive_queue, skb); 1275 skb_queue_tail(&sk->sk_receive_queue, skb);
1276 sk->sk_data_ready(sk, skb->len); 1276 sk->sk_data_ready(sk);
1277 dprintk("exiting\n"); 1277 dprintk("exiting\n");
1278} 1278}
1279 1279
diff --git a/net/atm/raw.c b/net/atm/raw.c
index b4f7b9ff3c74..2e17e97a7a8b 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -25,7 +25,7 @@ static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
25 struct sock *sk = sk_atm(vcc); 25 struct sock *sk = sk_atm(vcc);
26 26
27 skb_queue_tail(&sk->sk_receive_queue, skb); 27 skb_queue_tail(&sk->sk_receive_queue, skb);
28 sk->sk_data_ready(sk, skb->len); 28 sk->sk_data_ready(sk);
29 } 29 }
30} 30}
31 31
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 4176887e72eb..523bce72f698 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -51,7 +51,7 @@ static void sigd_put_skb(struct sk_buff *skb)
51#endif 51#endif
52 atm_force_charge(sigd, skb->truesize); 52 atm_force_charge(sigd, skb->truesize);
53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); 53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
55} 55}
56 56
57static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg) 57static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 96f4cab3a2f9..7ed8ab724819 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -422,7 +422,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
422 422
423 if (sk) { 423 if (sk) {
424 if (!sock_flag(sk, SOCK_DEAD)) 424 if (!sock_flag(sk, SOCK_DEAD))
425 sk->sk_data_ready(sk, skb->len); 425 sk->sk_data_ready(sk);
426 sock_put(sk); 426 sock_put(sk);
427 } else { 427 } else {
428free: 428free:
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f59e00c2daa9..ef5e5b04f34f 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1271,7 +1271,7 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
1271 1271
1272 if (parent) { 1272 if (parent) {
1273 bt_accept_unlink(sk); 1273 bt_accept_unlink(sk);
1274 parent->sk_data_ready(parent, 0); 1274 parent->sk_data_ready(parent);
1275 } else { 1275 } else {
1276 sk->sk_state_change(sk); 1276 sk->sk_state_change(sk);
1277 } 1277 }
@@ -1327,7 +1327,7 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1327 sk->sk_state_change(sk); 1327 sk->sk_state_change(sk);
1328 1328
1329 if (parent) 1329 if (parent)
1330 parent->sk_data_ready(parent, 0); 1330 parent->sk_data_ready(parent);
1331 1331
1332 release_sock(sk); 1332 release_sock(sk);
1333} 1333}
@@ -1340,7 +1340,7 @@ static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
1340 1340
1341 parent = bt_sk(sk)->parent; 1341 parent = bt_sk(sk)->parent;
1342 if (parent) 1342 if (parent)
1343 parent->sk_data_ready(parent, 0); 1343 parent->sk_data_ready(parent);
1344 1344
1345 release_sock(sk); 1345 release_sock(sk);
1346} 1346}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 633cceeb943e..cf620260affa 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -186,9 +186,9 @@ static void rfcomm_l2state_change(struct sock *sk)
186 rfcomm_schedule(); 186 rfcomm_schedule();
187} 187}
188 188
189static void rfcomm_l2data_ready(struct sock *sk, int bytes) 189static void rfcomm_l2data_ready(struct sock *sk)
190{ 190{
191 BT_DBG("%p bytes %d", sk, bytes); 191 BT_DBG("%p", sk);
192 rfcomm_schedule(); 192 rfcomm_schedule();
193} 193}
194 194
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index eabd25ab5ad9..c603a5eb4720 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -54,7 +54,7 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
54 54
55 atomic_add(skb->len, &sk->sk_rmem_alloc); 55 atomic_add(skb->len, &sk->sk_rmem_alloc);
56 skb_queue_tail(&sk->sk_receive_queue, skb); 56 skb_queue_tail(&sk->sk_receive_queue, skb);
57 sk->sk_data_ready(sk, skb->len); 57 sk->sk_data_ready(sk);
58 58
59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 59 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
60 rfcomm_dlc_throttle(d); 60 rfcomm_dlc_throttle(d);
@@ -84,7 +84,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
84 sock_set_flag(sk, SOCK_ZAPPED); 84 sock_set_flag(sk, SOCK_ZAPPED);
85 bt_accept_unlink(sk); 85 bt_accept_unlink(sk);
86 } 86 }
87 parent->sk_data_ready(parent, 0); 87 parent->sk_data_ready(parent);
88 } else { 88 } else {
89 if (d->state == BT_CONNECTED) 89 if (d->state == BT_CONNECTED)
90 rfcomm_session_getaddr(d->session, 90 rfcomm_session_getaddr(d->session,
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index ab1e6fcca4c5..c06dbd3938e8 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1024,7 +1024,7 @@ static void sco_conn_ready(struct sco_conn *conn)
1024 sk->sk_state = BT_CONNECTED; 1024 sk->sk_state = BT_CONNECTED;
1025 1025
1026 /* Wake up parent */ 1026 /* Wake up parent */
1027 parent->sk_data_ready(parent, 1); 1027 parent->sk_data_ready(parent);
1028 1028
1029 bh_unlock_sock(parent); 1029 bh_unlock_sock(parent);
1030 1030
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index d0cca3c65f01..7985deaff52f 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -73,7 +73,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
73 goto drop; 73 goto drop;
74 74
75 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) 75 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
76 goto drop; 76 goto out;
77 77
78 /* insert into forwarding database after filtering to avoid spoofing */ 78 /* insert into forwarding database after filtering to avoid spoofing */
79 br = p->br; 79 br = p->br;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 91510712c7a7..4a3716102789 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -170,7 +170,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
170 * rejected. 170 * rejected.
171 */ 171 */
172 if (!v) 172 if (!v)
173 return false; 173 goto drop;
174 174
175 /* If vlan tx offload is disabled on bridge device and frame was 175 /* If vlan tx offload is disabled on bridge device and frame was
176 * sent from vlan device on the bridge device, it does not have 176 * sent from vlan device on the bridge device, it does not have
@@ -193,7 +193,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
193 * vlan untagged or priority-tagged traffic belongs to. 193 * vlan untagged or priority-tagged traffic belongs to.
194 */ 194 */
195 if (pvid == VLAN_N_VID) 195 if (pvid == VLAN_N_VID)
196 return false; 196 goto drop;
197 197
198 /* PVID is set on this port. Any untagged or priority-tagged 198 /* PVID is set on this port. Any untagged or priority-tagged
199 * ingress frame is considered to belong to this vlan. 199 * ingress frame is considered to belong to this vlan.
@@ -216,7 +216,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
216 /* Frame had a valid vlan tag. See if vlan is allowed */ 216 /* Frame had a valid vlan tag. See if vlan is allowed */
217 if (test_bit(*vid, v->vlan_bitmap)) 217 if (test_bit(*vid, v->vlan_bitmap))
218 return true; 218 return true;
219 219drop:
220 kfree_skb(skb);
220 return false; 221 return false;
221} 222}
222 223
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index d6be3edb7a43..e8437094d15f 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -124,7 +124,6 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 int err; 126 int err;
127 int skb_len;
128 unsigned long flags; 127 unsigned long flags;
129 struct sk_buff_head *list = &sk->sk_receive_queue; 128 struct sk_buff_head *list = &sk->sk_receive_queue;
130 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -153,14 +152,13 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
153 * may be freed by other threads of control pulling packets 152 * may be freed by other threads of control pulling packets
154 * from the queue. 153 * from the queue.
155 */ 154 */
156 skb_len = skb->len;
157 spin_lock_irqsave(&list->lock, flags); 155 spin_lock_irqsave(&list->lock, flags);
158 if (!sock_flag(sk, SOCK_DEAD)) 156 if (!sock_flag(sk, SOCK_DEAD))
159 __skb_queue_tail(list, skb); 157 __skb_queue_tail(list, skb);
160 spin_unlock_irqrestore(&list->lock, flags); 158 spin_unlock_irqrestore(&list->lock, flags);
161 159
162 if (!sock_flag(sk, SOCK_DEAD)) 160 if (!sock_flag(sk, SOCK_DEAD))
163 sk->sk_data_ready(sk, skb_len); 161 sk->sk_data_ready(sk);
164 else 162 else
165 kfree_skb(skb); 163 kfree_skb(skb);
166 return 0; 164 return 0;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 4f55f9ce63fa..dac7f9b98687 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -383,7 +383,7 @@ static void con_sock_state_closed(struct ceph_connection *con)
383 */ 383 */
384 384
385/* data available on socket, or listen socket received a connect */ 385/* data available on socket, or listen socket received a connect */
386static void ceph_sock_data_ready(struct sock *sk, int count_unused) 386static void ceph_sock_data_ready(struct sock *sk)
387{ 387{
388 struct ceph_connection *con = sk->sk_user_data; 388 struct ceph_connection *con = sk->sk_user_data;
389 if (atomic_read(&con->msgr->stopping)) { 389 if (atomic_read(&con->msgr->stopping)) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d068ec25db1e..0304f981f7ff 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3338,7 +3338,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3338 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3338 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3339 txq = netdev_get_tx_queue(odev, queue_map); 3339 txq = netdev_get_tx_queue(odev, queue_map);
3340 3340
3341 __netif_tx_lock_bh(txq); 3341 local_bh_disable();
3342
3343 HARD_TX_LOCK(odev, txq, smp_processor_id());
3342 3344
3343 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { 3345 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
3344 ret = NETDEV_TX_BUSY; 3346 ret = NETDEV_TX_BUSY;
@@ -3374,7 +3376,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3374 pkt_dev->last_ok = 0; 3376 pkt_dev->last_ok = 0;
3375 } 3377 }
3376unlock: 3378unlock:
3377 __netif_tx_unlock_bh(txq); 3379 HARD_TX_UNLOCK(odev, txq);
3380
3381 local_bh_enable();
3378 3382
3379 /* If pkt_dev->count is zero, then run forever */ 3383 /* If pkt_dev->count is zero, then run forever */
3380 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3384 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 30c7d35dd862..1b62343f5837 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3458,8 +3458,6 @@ static void sock_rmem_free(struct sk_buff *skb)
3458 */ 3458 */
3459int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3459int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3460{ 3460{
3461 int len = skb->len;
3462
3463 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3461 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3464 (unsigned int)sk->sk_rcvbuf) 3462 (unsigned int)sk->sk_rcvbuf)
3465 return -ENOMEM; 3463 return -ENOMEM;
@@ -3474,7 +3472,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3474 3472
3475 skb_queue_tail(&sk->sk_error_queue, skb); 3473 skb_queue_tail(&sk->sk_error_queue, skb);
3476 if (!sock_flag(sk, SOCK_DEAD)) 3474 if (!sock_flag(sk, SOCK_DEAD))
3477 sk->sk_data_ready(sk, len); 3475 sk->sk_data_ready(sk);
3478 return 0; 3476 return 0;
3479} 3477}
3480EXPORT_SYMBOL(sock_queue_err_skb); 3478EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3937,12 +3935,14 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
3937unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 3935unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3938{ 3936{
3939 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3937 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3940 unsigned int hdr_len;
3941 3938
3942 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3939 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3943 hdr_len = tcp_hdrlen(skb); 3940 return tcp_hdrlen(skb) + shinfo->gso_size;
3944 else 3941
3945 hdr_len = sizeof(struct udphdr); 3942 /* UFO sets gso_size to the size of the fragmentation
3946 return hdr_len + shinfo->gso_size; 3943 * payload, i.e. the size of the L4 (UDP) header is already
3944 * accounted for.
3945 */
3946 return shinfo->gso_size;
3947} 3947}
3948EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 3948EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
diff --git a/net/core/sock.c b/net/core/sock.c
index c0fc6bdad1e3..b4fff008136f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -428,7 +428,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
428 spin_unlock_irqrestore(&list->lock, flags); 428 spin_unlock_irqrestore(&list->lock, flags);
429 429
430 if (!sock_flag(sk, SOCK_DEAD)) 430 if (!sock_flag(sk, SOCK_DEAD))
431 sk->sk_data_ready(sk, skb_len); 431 sk->sk_data_ready(sk);
432 return 0; 432 return 0;
433} 433}
434EXPORT_SYMBOL(sock_queue_rcv_skb); 434EXPORT_SYMBOL(sock_queue_rcv_skb);
@@ -2196,7 +2196,7 @@ static void sock_def_error_report(struct sock *sk)
2196 rcu_read_unlock(); 2196 rcu_read_unlock();
2197} 2197}
2198 2198
2199static void sock_def_readable(struct sock *sk, int len) 2199static void sock_def_readable(struct sock *sk)
2200{ 2200{
2201 struct socket_wq *wq; 2201 struct socket_wq *wq;
2202 2202
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 14cdafad7a90..3c8ec7d4a34e 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -28,7 +28,7 @@ static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); 28 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
29 __skb_queue_tail(&sk->sk_receive_queue, skb); 29 __skb_queue_tail(&sk->sk_receive_queue, skb);
30 skb_set_owner_r(skb, sk); 30 skb_set_owner_r(skb, sk);
31 sk->sk_data_ready(sk, 0); 31 sk->sk_data_ready(sk);
32} 32}
33 33
34static void dccp_fin(struct sock *sk, struct sk_buff *skb) 34static void dccp_fin(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 9e2f78bc1553..c69eb9c4fbb8 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -237,7 +237,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
237 237
238 /* Wakeup parent, send SIGIO */ 238 /* Wakeup parent, send SIGIO */
239 if (state == DCCP_RESPOND && child->sk_state != state) 239 if (state == DCCP_RESPOND && child->sk_state != state)
240 parent->sk_data_ready(parent, 0); 240 parent->sk_data_ready(parent);
241 } else { 241 } else {
242 /* Alas, it is possible again, because we do lookup 242 /* Alas, it is possible again, because we do lookup
243 * in main socket hash table and lock on listening 243 * in main socket hash table and lock on listening
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index c344163e6ac0..fe5f01485d33 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -585,7 +585,6 @@ out:
585static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 585static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
586{ 586{
587 int err; 587 int err;
588 int skb_len;
589 588
590 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 589 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
591 number of warnings when compiling with -W --ANK 590 number of warnings when compiling with -W --ANK
@@ -600,12 +599,11 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
600 if (err) 599 if (err)
601 goto out; 600 goto out;
602 601
603 skb_len = skb->len;
604 skb_set_owner_r(skb, sk); 602 skb_set_owner_r(skb, sk);
605 skb_queue_tail(queue, skb); 603 skb_queue_tail(queue, skb);
606 604
607 if (!sock_flag(sk, SOCK_DEAD)) 605 if (!sock_flag(sk, SOCK_DEAD))
608 sk->sk_data_ready(sk, skb_len); 606 sk->sk_data_ready(sk);
609out: 607out:
610 return err; 608 return err;
611} 609}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ec4f762efda5..94213c891565 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -463,6 +463,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
463static void ipgre_tunnel_setup(struct net_device *dev) 463static void ipgre_tunnel_setup(struct net_device *dev)
464{ 464{
465 dev->netdev_ops = &ipgre_netdev_ops; 465 dev->netdev_ops = &ipgre_netdev_ops;
466 dev->type = ARPHRD_IPGRE;
466 ip_tunnel_setup(dev, ipgre_net_id); 467 ip_tunnel_setup(dev, ipgre_net_id);
467} 468}
468 469
@@ -501,7 +502,6 @@ static int ipgre_tunnel_init(struct net_device *dev)
501 memcpy(dev->dev_addr, &iph->saddr, 4); 502 memcpy(dev->dev_addr, &iph->saddr, 4);
502 memcpy(dev->broadcast, &iph->daddr, 4); 503 memcpy(dev->broadcast, &iph->daddr, 4);
503 504
504 dev->type = ARPHRD_IPGRE;
505 dev->flags = IFF_NOARP; 505 dev->flags = IFF_NOARP;
506 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 506 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
507 dev->addr_len = 4; 507 dev->addr_len = 4;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 687ddef4e574..afcee51b90ed 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -337,6 +337,7 @@ static const struct net_device_ops vti_netdev_ops = {
337static void vti_tunnel_setup(struct net_device *dev) 337static void vti_tunnel_setup(struct net_device *dev)
338{ 338{
339 dev->netdev_ops = &vti_netdev_ops; 339 dev->netdev_ops = &vti_netdev_ops;
340 dev->type = ARPHRD_TUNNEL;
340 ip_tunnel_setup(dev, vti_net_id); 341 ip_tunnel_setup(dev, vti_net_id);
341} 342}
342 343
@@ -348,7 +349,6 @@ static int vti_tunnel_init(struct net_device *dev)
348 memcpy(dev->dev_addr, &iph->saddr, 4); 349 memcpy(dev->dev_addr, &iph->saddr, 4);
349 memcpy(dev->broadcast, &iph->daddr, 4); 350 memcpy(dev->broadcast, &iph->daddr, 4);
350 351
351 dev->type = ARPHRD_TUNNEL;
352 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 352 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
353 dev->mtu = ETH_DATA_LEN; 353 dev->mtu = ETH_DATA_LEN;
354 dev->flags = IFF_NOARP; 354 dev->flags = IFF_NOARP;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e1661f46fd19..d6b46eb2f94c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4413,7 +4413,7 @@ queue_and_out:
4413 if (eaten > 0) 4413 if (eaten > 0)
4414 kfree_skb_partial(skb, fragstolen); 4414 kfree_skb_partial(skb, fragstolen);
4415 if (!sock_flag(sk, SOCK_DEAD)) 4415 if (!sock_flag(sk, SOCK_DEAD))
4416 sk->sk_data_ready(sk, 0); 4416 sk->sk_data_ready(sk);
4417 return; 4417 return;
4418 } 4418 }
4419 4419
@@ -4914,7 +4914,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
4914 BUG(); 4914 BUG();
4915 tp->urg_data = TCP_URG_VALID | tmp; 4915 tp->urg_data = TCP_URG_VALID | tmp;
4916 if (!sock_flag(sk, SOCK_DEAD)) 4916 if (!sock_flag(sk, SOCK_DEAD))
4917 sk->sk_data_ready(sk, 0); 4917 sk->sk_data_ready(sk);
4918 } 4918 }
4919 } 4919 }
4920} 4920}
@@ -5000,11 +5000,11 @@ static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5000 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 5000 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
5001 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 5001 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
5002 tp->ucopy.wakeup = 1; 5002 tp->ucopy.wakeup = 1;
5003 sk->sk_data_ready(sk, 0); 5003 sk->sk_data_ready(sk);
5004 } 5004 }
5005 } else if (chunk > 0) { 5005 } else if (chunk > 0) {
5006 tp->ucopy.wakeup = 1; 5006 tp->ucopy.wakeup = 1;
5007 sk->sk_data_ready(sk, 0); 5007 sk->sk_data_ready(sk);
5008 } 5008 }
5009out: 5009out:
5010 return copied_early; 5010 return copied_early;
@@ -5275,7 +5275,7 @@ no_ack:
5275#endif 5275#endif
5276 if (eaten) 5276 if (eaten)
5277 kfree_skb_partial(skb, fragstolen); 5277 kfree_skb_partial(skb, fragstolen);
5278 sk->sk_data_ready(sk, 0); 5278 sk->sk_data_ready(sk);
5279 return; 5279 return;
5280 } 5280 }
5281 } 5281 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6379894ec210..438f3b95143d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1434,7 +1434,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 1434 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1435 tp->syn_data_acked = 1; 1435 tp->syn_data_acked = 1;
1436 } 1436 }
1437 sk->sk_data_ready(sk, 0); 1437 sk->sk_data_ready(sk);
1438 bh_unlock_sock(child); 1438 bh_unlock_sock(child);
1439 sock_put(child); 1439 sock_put(child);
1440 WARN_ON(req->sk == NULL); 1440 WARN_ON(req->sk == NULL);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ca788ada5bd3..05c1b155251d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -745,7 +745,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
745 skb->len); 745 skb->len);
746 /* Wakeup parent, send SIGIO */ 746 /* Wakeup parent, send SIGIO */
747 if (state == TCP_SYN_RECV && child->sk_state != state) 747 if (state == TCP_SYN_RECV && child->sk_state != state)
748 parent->sk_data_ready(parent, 0); 748 parent->sk_data_ready(parent);
749 } else { 749 } else {
750 /* Alas, it is possible again, because we do lookup 750 /* Alas, it is possible again, because we do lookup
751 * in main socket hash table and lock on listening 751 * in main socket hash table and lock on listening
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5ca56cee2dae..e289830ed6e3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -798,7 +798,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
798 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 798 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
799 799
800 fl6.flowi6_proto = IPPROTO_TCP; 800 fl6.flowi6_proto = IPPROTO_TCP;
801 if (rt6_need_strict(&fl6.daddr) || !oif) 801 if (rt6_need_strict(&fl6.daddr) && !oif)
802 fl6.flowi6_oif = inet6_iif(skb); 802 fl6.flowi6_oif = inet6_iif(skb);
803 else 803 else
804 fl6.flowi6_oif = oif; 804 fl6.flowi6_oif = oif;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a5e03119107a..01e77b0ae075 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1757,7 +1757,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1757 1757
1758 /* Wake up accept */ 1758 /* Wake up accept */
1759 nsk->sk_state = IUCV_CONNECTED; 1759 nsk->sk_state = IUCV_CONNECTED;
1760 sk->sk_data_ready(sk, 1); 1760 sk->sk_data_ready(sk);
1761 err = 0; 1761 err = 0;
1762fail: 1762fail:
1763 bh_unlock_sock(sk); 1763 bh_unlock_sock(sk);
@@ -1968,7 +1968,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1968 if (!err) { 1968 if (!err) {
1969 iucv_accept_enqueue(sk, nsk); 1969 iucv_accept_enqueue(sk, nsk);
1970 nsk->sk_state = IUCV_CONNECTED; 1970 nsk->sk_state = IUCV_CONNECTED;
1971 sk->sk_data_ready(sk, 1); 1971 sk->sk_data_ready(sk);
1972 } else 1972 } else
1973 iucv_sock_kill(nsk); 1973 iucv_sock_kill(nsk);
1974 bh_unlock_sock(sk); 1974 bh_unlock_sock(sk);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index e72589a8400d..f3c83073afc4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -205,7 +205,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
206 skb_set_owner_r(*skb2, sk); 206 skb_set_owner_r(*skb2, sk);
207 skb_queue_tail(&sk->sk_receive_queue, *skb2); 207 skb_queue_tail(&sk->sk_receive_queue, *skb2);
208 sk->sk_data_ready(sk, (*skb2)->len); 208 sk->sk_data_ready(sk);
209 *skb2 = NULL; 209 *skb2 = NULL;
210 err = 0; 210 err = 0;
211 } 211 }
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index d276e2d4a589..950909f04ee6 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -753,9 +753,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
753 session->deref = pppol2tp_session_sock_put; 753 session->deref = pppol2tp_session_sock_put;
754 754
755 /* If PMTU discovery was enabled, use the MTU that was discovered */ 755 /* If PMTU discovery was enabled, use the MTU that was discovered */
756 dst = sk_dst_get(sk); 756 dst = sk_dst_get(tunnel->sock);
757 if (dst != NULL) { 757 if (dst != NULL) {
758 u32 pmtu = dst_mtu(__sk_dst_get(sk)); 758 u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
759 if (pmtu != 0) 759 if (pmtu != 0)
760 session->mtu = session->mru = pmtu - 760 session->mtu = session->mru = pmtu -
761 PPPOL2TP_HEADER_OVERHEAD; 761 PPPOL2TP_HEADER_OVERHEAD;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c2d585c4f7c5..894cda0206bb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1653,7 +1653,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1653 else 1653 else
1654#endif /* CONFIG_NETLINK_MMAP */ 1654#endif /* CONFIG_NETLINK_MMAP */
1655 skb_queue_tail(&sk->sk_receive_queue, skb); 1655 skb_queue_tail(&sk->sk_receive_queue, skb);
1656 sk->sk_data_ready(sk, len); 1656 sk->sk_data_ready(sk);
1657 return len; 1657 return len;
1658} 1658}
1659 1659
@@ -2394,7 +2394,7 @@ out:
2394 return err ? : copied; 2394 return err ? : copied;
2395} 2395}
2396 2396
2397static void netlink_data_ready(struct sock *sk, int len) 2397static void netlink_data_ready(struct sock *sk)
2398{ 2398{
2399 BUG(); 2399 BUG();
2400} 2400}
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index b74aa0755521..ede50d197e10 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1011,7 +1011,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
1011 skb_queue_head(&sk->sk_receive_queue, skb); 1011 skb_queue_head(&sk->sk_receive_queue, skb);
1012 1012
1013 if (!sock_flag(sk, SOCK_DEAD)) 1013 if (!sock_flag(sk, SOCK_DEAD))
1014 sk->sk_data_ready(sk, skb->len); 1014 sk->sk_data_ready(sk);
1015 1015
1016 bh_unlock_sock(sk); 1016 bh_unlock_sock(sk);
1017 1017
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index b486f12ae243..b4671958fcf9 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -976,7 +976,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
976 new_sk->sk_state = LLCP_CONNECTED; 976 new_sk->sk_state = LLCP_CONNECTED;
977 977
978 /* Wake the listening processes */ 978 /* Wake the listening processes */
979 parent->sk_data_ready(parent, 0); 979 parent->sk_data_ready(parent);
980 980
981 /* Send CC */ 981 /* Send CC */
982 nfc_llcp_send_cc(new_sock); 982 nfc_llcp_send_cc(new_sock);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 72e0c71fb01d..b85c67ccb797 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1848,7 +1848,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1848 skb->dropcount = atomic_read(&sk->sk_drops); 1848 skb->dropcount = atomic_read(&sk->sk_drops);
1849 __skb_queue_tail(&sk->sk_receive_queue, skb); 1849 __skb_queue_tail(&sk->sk_receive_queue, skb);
1850 spin_unlock(&sk->sk_receive_queue.lock); 1850 spin_unlock(&sk->sk_receive_queue.lock);
1851 sk->sk_data_ready(sk, skb->len); 1851 sk->sk_data_ready(sk);
1852 return 0; 1852 return 0;
1853 1853
1854drop_n_acct: 1854drop_n_acct:
@@ -2054,7 +2054,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2054 else 2054 else
2055 prb_clear_blk_fill_status(&po->rx_ring); 2055 prb_clear_blk_fill_status(&po->rx_ring);
2056 2056
2057 sk->sk_data_ready(sk, 0); 2057 sk->sk_data_ready(sk);
2058 2058
2059drop_n_restore: 2059drop_n_restore:
2060 if (skb_head != skb->data && skb_shared(skb)) { 2060 if (skb_head != skb->data && skb_shared(skb)) {
@@ -2069,7 +2069,7 @@ ring_is_full:
2069 po->stats.stats1.tp_drops++; 2069 po->stats.stats1.tp_drops++;
2070 spin_unlock(&sk->sk_receive_queue.lock); 2070 spin_unlock(&sk->sk_receive_queue.lock);
2071 2071
2072 sk->sk_data_ready(sk, 0); 2072 sk->sk_data_ready(sk);
2073 kfree_skb(copy_skb); 2073 kfree_skb(copy_skb);
2074 goto drop_n_restore; 2074 goto drop_n_restore;
2075} 2075}
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index a2fba7edfd1f..66dc65e7c6a1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -37,7 +37,7 @@
37struct gprs_dev { 37struct gprs_dev {
38 struct sock *sk; 38 struct sock *sk;
39 void (*old_state_change)(struct sock *); 39 void (*old_state_change)(struct sock *);
40 void (*old_data_ready)(struct sock *, int); 40 void (*old_data_ready)(struct sock *);
41 void (*old_write_space)(struct sock *); 41 void (*old_write_space)(struct sock *);
42 42
43 struct net_device *dev; 43 struct net_device *dev;
@@ -146,7 +146,7 @@ drop:
146 return err; 146 return err;
147} 147}
148 148
149static void gprs_data_ready(struct sock *sk, int len) 149static void gprs_data_ready(struct sock *sk)
150{ 150{
151 struct gprs_dev *gp = sk->sk_user_data; 151 struct gprs_dev *gp = sk->sk_user_data;
152 struct sk_buff *skb; 152 struct sk_buff *skb;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e77411735de8..70a547ea5177 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -462,10 +462,9 @@ out:
462queue: 462queue:
463 skb->dev = NULL; 463 skb->dev = NULL;
464 skb_set_owner_r(skb, sk); 464 skb_set_owner_r(skb, sk);
465 err = skb->len;
466 skb_queue_tail(queue, skb); 465 skb_queue_tail(queue, skb);
467 if (!sock_flag(sk, SOCK_DEAD)) 466 if (!sock_flag(sk, SOCK_DEAD))
468 sk->sk_data_ready(sk, err); 467 sk->sk_data_ready(sk);
469 return NET_RX_SUCCESS; 468 return NET_RX_SUCCESS;
470} 469}
471 470
@@ -587,10 +586,9 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
587 pn->rx_credits--; 586 pn->rx_credits--;
588 skb->dev = NULL; 587 skb->dev = NULL;
589 skb_set_owner_r(skb, sk); 588 skb_set_owner_r(skb, sk);
590 err = skb->len;
591 skb_queue_tail(&sk->sk_receive_queue, skb); 589 skb_queue_tail(&sk->sk_receive_queue, skb);
592 if (!sock_flag(sk, SOCK_DEAD)) 590 if (!sock_flag(sk, SOCK_DEAD))
593 sk->sk_data_ready(sk, err); 591 sk->sk_data_ready(sk);
594 return NET_RX_SUCCESS; 592 return NET_RX_SUCCESS;
595 593
596 case PNS_PEP_CONNECT_RESP: 594 case PNS_PEP_CONNECT_RESP:
@@ -698,7 +696,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
698 skb_queue_head(&sk->sk_receive_queue, skb); 696 skb_queue_head(&sk->sk_receive_queue, skb);
699 sk_acceptq_added(sk); 697 sk_acceptq_added(sk);
700 if (!sock_flag(sk, SOCK_DEAD)) 698 if (!sock_flag(sk, SOCK_DEAD))
701 sk->sk_data_ready(sk, 0); 699 sk->sk_data_ready(sk);
702 return NET_RX_SUCCESS; 700 return NET_RX_SUCCESS;
703 701
704 case PNS_PEP_DISCONNECT_REQ: 702 case PNS_PEP_DISCONNECT_REQ:
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9cf2927d0021..65637491f728 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -61,12 +61,12 @@ void rds_tcp_state_change(struct sock *sk);
61/* tcp_listen.c */ 61/* tcp_listen.c */
62int rds_tcp_listen_init(void); 62int rds_tcp_listen_init(void);
63void rds_tcp_listen_stop(void); 63void rds_tcp_listen_stop(void);
64void rds_tcp_listen_data_ready(struct sock *sk, int bytes); 64void rds_tcp_listen_data_ready(struct sock *sk);
65 65
66/* tcp_recv.c */ 66/* tcp_recv.c */
67int rds_tcp_recv_init(void); 67int rds_tcp_recv_init(void);
68void rds_tcp_recv_exit(void); 68void rds_tcp_recv_exit(void);
69void rds_tcp_data_ready(struct sock *sk, int bytes); 69void rds_tcp_data_ready(struct sock *sk);
70int rds_tcp_recv(struct rds_connection *conn); 70int rds_tcp_recv(struct rds_connection *conn);
71void rds_tcp_inc_free(struct rds_incoming *inc); 71void rds_tcp_inc_free(struct rds_incoming *inc);
72int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 72int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 7787537e9c2e..4e638f851185 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -108,9 +108,9 @@ static void rds_tcp_accept_worker(struct work_struct *work)
108 cond_resched(); 108 cond_resched();
109} 109}
110 110
111void rds_tcp_listen_data_ready(struct sock *sk, int bytes) 111void rds_tcp_listen_data_ready(struct sock *sk)
112{ 112{
113 void (*ready)(struct sock *sk, int bytes); 113 void (*ready)(struct sock *sk);
114 114
115 rdsdebug("listen data ready sk %p\n", sk); 115 rdsdebug("listen data ready sk %p\n", sk);
116 116
@@ -132,7 +132,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
132 132
133out: 133out:
134 read_unlock(&sk->sk_callback_lock); 134 read_unlock(&sk->sk_callback_lock);
135 ready(sk, bytes); 135 ready(sk);
136} 136}
137 137
138int rds_tcp_listen_init(void) 138int rds_tcp_listen_init(void)
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 4fac4f2bb9dc..9ae6e0a264ec 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -314,13 +314,13 @@ int rds_tcp_recv(struct rds_connection *conn)
314 return ret; 314 return ret;
315} 315}
316 316
317void rds_tcp_data_ready(struct sock *sk, int bytes) 317void rds_tcp_data_ready(struct sock *sk)
318{ 318{
319 void (*ready)(struct sock *sk, int bytes); 319 void (*ready)(struct sock *sk);
320 struct rds_connection *conn; 320 struct rds_connection *conn;
321 struct rds_tcp_connection *tc; 321 struct rds_tcp_connection *tc;
322 322
323 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 323 rdsdebug("data ready sk %p\n", sk);
324 324
325 read_lock(&sk->sk_callback_lock); 325 read_lock(&sk->sk_callback_lock);
326 conn = sk->sk_user_data; 326 conn = sk->sk_user_data;
@@ -337,7 +337,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
338out: 338out:
339 read_unlock(&sk->sk_callback_lock); 339 read_unlock(&sk->sk_callback_lock);
340 ready(sk, bytes); 340 ready(sk);
341} 341}
342 342
343int rds_tcp_recv_init(void) 343int rds_tcp_recv_init(void)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c2cca2ee6aef..8451c8cdc9de 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1041,7 +1041,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
1041 rose_start_heartbeat(make); 1041 rose_start_heartbeat(make);
1042 1042
1043 if (!sock_flag(sk, SOCK_DEAD)) 1043 if (!sock_flag(sk, SOCK_DEAD))
1044 sk->sk_data_ready(sk, skb->len); 1044 sk->sk_data_ready(sk);
1045 1045
1046 return 1; 1046 return 1;
1047} 1047}
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 73742647c135..63b21e580de9 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -113,7 +113,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
113 spin_unlock_bh(&sk->sk_receive_queue.lock); 113 spin_unlock_bh(&sk->sk_receive_queue.lock);
114 114
115 if (!sock_flag(sk, SOCK_DEAD)) 115 if (!sock_flag(sk, SOCK_DEAD))
116 sk->sk_data_ready(sk, skb_len); 116 sk->sk_data_ready(sk);
117 } 117 }
118 skb = NULL; 118 skb = NULL;
119 } else { 119 } else {
@@ -632,14 +632,14 @@ cant_find_conn:
632 * handle data received on the local endpoint 632 * handle data received on the local endpoint
633 * - may be called in interrupt context 633 * - may be called in interrupt context
634 */ 634 */
635void rxrpc_data_ready(struct sock *sk, int count) 635void rxrpc_data_ready(struct sock *sk)
636{ 636{
637 struct rxrpc_skb_priv *sp; 637 struct rxrpc_skb_priv *sp;
638 struct rxrpc_local *local; 638 struct rxrpc_local *local;
639 struct sk_buff *skb; 639 struct sk_buff *skb;
640 int ret; 640 int ret;
641 641
642 _enter("%p, %d", sk, count); 642 _enter("%p", sk);
643 643
644 ASSERT(!irqs_disabled()); 644 ASSERT(!irqs_disabled());
645 645
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index c831d44b0841..ba9fd36d3f15 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -518,7 +518,7 @@ void rxrpc_UDP_error_handler(struct work_struct *);
518 */ 518 */
519extern const char *rxrpc_pkts[]; 519extern const char *rxrpc_pkts[];
520 520
521void rxrpc_data_ready(struct sock *, int); 521void rxrpc_data_ready(struct sock *);
522int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); 522int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
523void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); 523void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
524 524
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5f83a6a2fa67..e13519e9df80 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6604,6 +6604,12 @@ static void sctp_wake_up_waiters(struct sock *sk,
6604 if (asoc->ep->sndbuf_policy) 6604 if (asoc->ep->sndbuf_policy)
6605 return __sctp_write_space(asoc); 6605 return __sctp_write_space(asoc);
6606 6606
6607 /* If association goes down and is just flushing its
6608 * outq, then just normally notify others.
6609 */
6610 if (asoc->base.dead)
6611 return sctp_write_space(sk);
6612
6607 /* Accounting for the sndbuf space is per socket, so we 6613 /* Accounting for the sndbuf space is per socket, so we
6608 * need to wake up others, try to be fair and in case of 6614 * need to wake up others, try to be fair and in case of
6609 * other associations, let them have a go first instead 6615 * other associations, let them have a go first instead
@@ -6739,7 +6745,7 @@ do_nonblock:
6739 goto out; 6745 goto out;
6740} 6746}
6741 6747
6742void sctp_data_ready(struct sock *sk, int len) 6748void sctp_data_ready(struct sock *sk)
6743{ 6749{
6744 struct socket_wq *wq; 6750 struct socket_wq *wq;
6745 6751
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 5dc94117e9d4..7144eb6a1b95 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -259,7 +259,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
259 sctp_ulpq_clear_pd(ulpq); 259 sctp_ulpq_clear_pd(ulpq);
260 260
261 if (queue == &sk->sk_receive_queue) 261 if (queue == &sk->sk_receive_queue)
262 sk->sk_data_ready(sk, 0); 262 sk->sk_data_ready(sk);
263 return 1; 263 return 1;
264 264
265out_free: 265out_free:
@@ -1135,5 +1135,5 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1135 1135
1136 /* If there is data waiting, send it up the socket now. */ 1136 /* If there is data waiting, send it up the socket now. */
1137 if (sctp_ulpq_clear_pd(ulpq) || ev) 1137 if (sctp_ulpq_clear_pd(ulpq) || ev)
1138 sk->sk_data_ready(sk, 0); 1138 sk->sk_data_ready(sk);
1139} 1139}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d06cb8752dcd..43bcb4699d69 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -60,7 +60,7 @@
60 60
61static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 61static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
62 int flags); 62 int flags);
63static void svc_udp_data_ready(struct sock *, int); 63static void svc_udp_data_ready(struct sock *);
64static int svc_udp_recvfrom(struct svc_rqst *); 64static int svc_udp_recvfrom(struct svc_rqst *);
65static int svc_udp_sendto(struct svc_rqst *); 65static int svc_udp_sendto(struct svc_rqst *);
66static void svc_sock_detach(struct svc_xprt *); 66static void svc_sock_detach(struct svc_xprt *);
@@ -403,14 +403,14 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
403/* 403/*
404 * INET callback when data has been received on the socket. 404 * INET callback when data has been received on the socket.
405 */ 405 */
406static void svc_udp_data_ready(struct sock *sk, int count) 406static void svc_udp_data_ready(struct sock *sk)
407{ 407{
408 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 408 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
409 wait_queue_head_t *wq = sk_sleep(sk); 409 wait_queue_head_t *wq = sk_sleep(sk);
410 410
411 if (svsk) { 411 if (svsk) {
412 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 412 dprintk("svc: socket %p(inet %p), busy=%d\n",
413 svsk, sk, count, 413 svsk, sk,
414 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); 414 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
415 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 415 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
416 svc_xprt_enqueue(&svsk->sk_xprt); 416 svc_xprt_enqueue(&svsk->sk_xprt);
@@ -731,7 +731,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
731 * A data_ready event on a listening socket means there's a connection 731 * A data_ready event on a listening socket means there's a connection
732 * pending. Do not use state_change as a substitute for it. 732 * pending. Do not use state_change as a substitute for it.
733 */ 733 */
734static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 734static void svc_tcp_listen_data_ready(struct sock *sk)
735{ 735{
736 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 736 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
737 wait_queue_head_t *wq; 737 wait_queue_head_t *wq;
@@ -783,7 +783,7 @@ static void svc_tcp_state_change(struct sock *sk)
783 wake_up_interruptible_all(wq); 783 wake_up_interruptible_all(wq);
784} 784}
785 785
786static void svc_tcp_data_ready(struct sock *sk, int count) 786static void svc_tcp_data_ready(struct sock *sk)
787{ 787{
788 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 788 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
789 wait_queue_head_t *wq = sk_sleep(sk); 789 wait_queue_head_t *wq = sk_sleep(sk);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6735e1d1e9bb..25a3dcf15cae 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -254,7 +254,7 @@ struct sock_xprt {
254 /* 254 /*
255 * Saved socket callback addresses 255 * Saved socket callback addresses
256 */ 256 */
257 void (*old_data_ready)(struct sock *, int); 257 void (*old_data_ready)(struct sock *);
258 void (*old_state_change)(struct sock *); 258 void (*old_state_change)(struct sock *);
259 void (*old_write_space)(struct sock *); 259 void (*old_write_space)(struct sock *);
260 void (*old_error_report)(struct sock *); 260 void (*old_error_report)(struct sock *);
@@ -951,7 +951,7 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
951 * 951 *
952 * Currently this assumes we can read the whole reply in a single gulp. 952 * Currently this assumes we can read the whole reply in a single gulp.
953 */ 953 */
954static void xs_local_data_ready(struct sock *sk, int len) 954static void xs_local_data_ready(struct sock *sk)
955{ 955{
956 struct rpc_task *task; 956 struct rpc_task *task;
957 struct rpc_xprt *xprt; 957 struct rpc_xprt *xprt;
@@ -1014,7 +1014,7 @@ static void xs_local_data_ready(struct sock *sk, int len)
1014 * @len: how much data to read 1014 * @len: how much data to read
1015 * 1015 *
1016 */ 1016 */
1017static void xs_udp_data_ready(struct sock *sk, int len) 1017static void xs_udp_data_ready(struct sock *sk)
1018{ 1018{
1019 struct rpc_task *task; 1019 struct rpc_task *task;
1020 struct rpc_xprt *xprt; 1020 struct rpc_xprt *xprt;
@@ -1437,7 +1437,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
1437 * @bytes: how much data to read 1437 * @bytes: how much data to read
1438 * 1438 *
1439 */ 1439 */
1440static void xs_tcp_data_ready(struct sock *sk, int bytes) 1440static void xs_tcp_data_ready(struct sock *sk)
1441{ 1441{
1442 struct rpc_xprt *xprt; 1442 struct rpc_xprt *xprt;
1443 read_descriptor_t rd_desc; 1443 read_descriptor_t rd_desc;
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 646a930eefbf..a538a02f869b 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -119,7 +119,7 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
119 return con; 119 return con;
120} 120}
121 121
122static void sock_data_ready(struct sock *sk, int unused) 122static void sock_data_ready(struct sock *sk)
123{ 123{
124 struct tipc_conn *con; 124 struct tipc_conn *con;
125 125
@@ -297,7 +297,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
297 newcon->usr_data = s->tipc_conn_new(newcon->conid); 297 newcon->usr_data = s->tipc_conn_new(newcon->conid);
298 298
299 /* Wake up receive process in case of 'SYN+' message */ 299 /* Wake up receive process in case of 'SYN+' message */
300 newsock->sk->sk_data_ready(newsock->sk, 0); 300 newsock->sk->sk_data_ready(newsock->sk);
301 return ret; 301 return ret;
302} 302}
303 303
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index adc12e227303..3c0256962f7d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -45,7 +45,7 @@
45#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 45#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
46 46
47static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 47static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
48static void tipc_data_ready(struct sock *sk, int len); 48static void tipc_data_ready(struct sock *sk);
49static void tipc_write_space(struct sock *sk); 49static void tipc_write_space(struct sock *sk);
50static int tipc_release(struct socket *sock); 50static int tipc_release(struct socket *sock);
51static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 51static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
@@ -1248,7 +1248,7 @@ static void tipc_write_space(struct sock *sk)
1248 * @sk: socket 1248 * @sk: socket
1249 * @len: the length of messages 1249 * @len: the length of messages
1250 */ 1250 */
1251static void tipc_data_ready(struct sock *sk, int len) 1251static void tipc_data_ready(struct sock *sk)
1252{ 1252{
1253 struct socket_wq *wq; 1253 struct socket_wq *wq;
1254 1254
@@ -1410,7 +1410,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1410 __skb_queue_tail(&sk->sk_receive_queue, buf); 1410 __skb_queue_tail(&sk->sk_receive_queue, buf);
1411 skb_set_owner_r(buf, sk); 1411 skb_set_owner_r(buf, sk);
1412 1412
1413 sk->sk_data_ready(sk, 0); 1413 sk->sk_data_ready(sk);
1414 return TIPC_OK; 1414 return TIPC_OK;
1415} 1415}
1416 1416
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 94404f19f9de..bb7e8ba821f4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1217,7 +1217,7 @@ restart:
1217 __skb_queue_tail(&other->sk_receive_queue, skb); 1217 __skb_queue_tail(&other->sk_receive_queue, skb);
1218 spin_unlock(&other->sk_receive_queue.lock); 1218 spin_unlock(&other->sk_receive_queue.lock);
1219 unix_state_unlock(other); 1219 unix_state_unlock(other);
1220 other->sk_data_ready(other, 0); 1220 other->sk_data_ready(other);
1221 sock_put(other); 1221 sock_put(other);
1222 return 0; 1222 return 0;
1223 1223
@@ -1600,7 +1600,7 @@ restart:
1600 if (max_level > unix_sk(other)->recursion_level) 1600 if (max_level > unix_sk(other)->recursion_level)
1601 unix_sk(other)->recursion_level = max_level; 1601 unix_sk(other)->recursion_level = max_level;
1602 unix_state_unlock(other); 1602 unix_state_unlock(other);
1603 other->sk_data_ready(other, len); 1603 other->sk_data_ready(other);
1604 sock_put(other); 1604 sock_put(other);
1605 scm_destroy(siocb->scm); 1605 scm_destroy(siocb->scm);
1606 return len; 1606 return len;
@@ -1706,7 +1706,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1706 if (max_level > unix_sk(other)->recursion_level) 1706 if (max_level > unix_sk(other)->recursion_level)
1707 unix_sk(other)->recursion_level = max_level; 1707 unix_sk(other)->recursion_level = max_level;
1708 unix_state_unlock(other); 1708 unix_state_unlock(other);
1709 other->sk_data_ready(other, size); 1709 other->sk_data_ready(other);
1710 sent += size; 1710 sent += size;
1711 } 1711 }
1712 1712
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
index 9a730744e7bc..9b7f207f2bee 100644
--- a/net/vmw_vsock/vmci_transport_notify.c
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -315,7 +315,7 @@ vmci_transport_handle_wrote(struct sock *sk,
315 struct vsock_sock *vsk = vsock_sk(sk); 315 struct vsock_sock *vsk = vsock_sk(sk);
316 PKT_FIELD(vsk, sent_waiting_read) = false; 316 PKT_FIELD(vsk, sent_waiting_read) = false;
317#endif 317#endif
318 sk->sk_data_ready(sk, 0); 318 sk->sk_data_ready(sk);
319} 319}
320 320
321static void vmci_transport_notify_pkt_socket_init(struct sock *sk) 321static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
index 622bd7aa1016..dc9c7929a2f9 100644
--- a/net/vmw_vsock/vmci_transport_notify_qstate.c
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -92,7 +92,7 @@ vmci_transport_handle_wrote(struct sock *sk,
92 bool bottom_half, 92 bool bottom_half,
93 struct sockaddr_vm *dst, struct sockaddr_vm *src) 93 struct sockaddr_vm *dst, struct sockaddr_vm *src)
94{ 94{
95 sk->sk_data_ready(sk, 0); 95 sk->sk_data_ready(sk);
96} 96}
97 97
98static void vsock_block_update_write_window(struct sock *sk) 98static void vsock_block_update_write_window(struct sock *sk)
@@ -290,7 +290,7 @@ vmci_transport_notify_pkt_recv_post_dequeue(
290 /* See the comment in 290 /* See the comment in
291 * vmci_transport_notify_pkt_send_post_enqueue(). 291 * vmci_transport_notify_pkt_send_post_enqueue().
292 */ 292 */
293 sk->sk_data_ready(sk, 0); 293 sk->sk_data_ready(sk);
294 } 294 }
295 295
296 return err; 296 return err;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 6177479c7de9..5ad4418ef093 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1064,7 +1064,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
1064 x25_start_heartbeat(make); 1064 x25_start_heartbeat(make);
1065 1065
1066 if (!sock_flag(sk, SOCK_DEAD)) 1066 if (!sock_flag(sk, SOCK_DEAD))
1067 sk->sk_data_ready(sk, skb->len); 1067 sk->sk_data_ready(sk);
1068 rc = 1; 1068 rc = 1;
1069 sock_put(sk); 1069 sock_put(sk);
1070out: 1070out:
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index d1b0dc79bb6f..7ac50098a375 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -79,7 +79,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
79 skb_set_owner_r(skbn, sk); 79 skb_set_owner_r(skbn, sk);
80 skb_queue_tail(&sk->sk_receive_queue, skbn); 80 skb_queue_tail(&sk->sk_receive_queue, skbn);
81 if (!sock_flag(sk, SOCK_DEAD)) 81 if (!sock_flag(sk, SOCK_DEAD))
82 sk->sk_data_ready(sk, skbn->len); 82 sk->sk_data_ready(sk);
83 83
84 return 0; 84 return 0;
85} 85}