aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-12-22 12:57:02 -0500
committerJiri Kosina <jkosina@suse.cz>2010-12-22 12:57:02 -0500
commit4b7bd364700d9ac8372eff48832062b936d0793b (patch)
tree0dbf78c95456a0b02d07fcd473281f04a87e266d /net
parentc0d8768af260e2cbb4bf659ae6094a262c86b085 (diff)
parent90a8a73c06cc32b609a880d48449d7083327e11a (diff)
Merge branch 'master' into for-next
Conflicts: MAINTAINERS arch/arm/mach-omap2/pm24xx.c drivers/scsi/bfa/bfa_fcpim.c Needed to update to apply fixes for which the old branch was too outdated.
Diffstat (limited to 'net')
-rw-r--r--net/atm/atm_sysfs.c3
-rw-r--r--net/atm/resources.c7
-rw-r--r--net/atm/resources.h2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/l2cap.c8
-rw-r--r--net/bluetooth/rfcomm/core.c13
-rw-r--r--net/bluetooth/sco.c6
-rw-r--r--net/caif/caif_config_util.c13
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c45
-rw-r--r--net/caif/cfcnfg.c17
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/caif/cfdbgl.c14
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/ceph/Makefile22
-rw-r--r--net/ceph/buffer.c2
-rw-r--r--net/ceph/messenger.c21
-rw-r--r--net/ceph/osd_client.c25
-rw-r--r--net/ceph/pagevec.c16
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/filter.c83
-rw-r--r--net/core/net-sysfs.c10
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/request_sock.c4
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/sock.c14
-rw-r--r--net/core/timestamping.c6
-rw-r--r--net/dccp/input.c3
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/sysctl_net_decnet.c4
-rw-r--r--net/econet/af_econet.c99
-rw-r--r--net/ipv4/fib_lookup.h5
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_diag.c27
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c40
-rw-r--r--net/ipv4/proc.c9
-rw-r--r--net/ipv4/sysctl_net_ipv4.c11
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c42
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c56
-rw-r--r--net/ipv6/ip6_tunnel.c7
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/irda/af_irda.c1
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/irda/irttp.c30
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/l2tp/l2tp_ip.c6
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mac80211/tx.c28
-rw-r--r--net/netfilter/ipvs/Kconfig1
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c6
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rds/loop.c4
-rw-r--r--net/rds/message.c7
-rw-r--r--net/rds/rdma.c128
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rds/tcp.c6
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_cgroup.c2
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c12
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/socket.c19
-rw-r--r--net/sunrpc/clnt.c24
-rw-r--r--net/sunrpc/stats.c4
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/unix/af_unix.c37
-rw-r--r--net/unix/garbage.c9
-rw-r--r--net/wireless/chan.c54
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/x25/x25_facilities.c20
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/x25/x25_link.c1
-rw-r--r--net/xfrm/xfrm_hash.c2
-rw-r--r--net/xfrm/xfrm_state.c2
100 files changed, 759 insertions, 460 deletions
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index 799c631f0fed..f7fa67c78766 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -143,12 +143,13 @@ static struct class atm_class = {
143 .dev_uevent = atm_uevent, 143 .dev_uevent = atm_uevent,
144}; 144};
145 145
146int atm_register_sysfs(struct atm_dev *adev) 146int atm_register_sysfs(struct atm_dev *adev, struct device *parent)
147{ 147{
148 struct device *cdev = &adev->class_dev; 148 struct device *cdev = &adev->class_dev;
149 int i, j, err; 149 int i, j, err;
150 150
151 cdev->class = &atm_class; 151 cdev->class = &atm_class;
152 cdev->parent = parent;
152 dev_set_drvdata(cdev, adev); 153 dev_set_drvdata(cdev, adev);
153 154
154 dev_set_name(cdev, "%s%d", adev->type, adev->number); 155 dev_set_name(cdev, "%s%d", adev->type, adev->number);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index d29e58261511..23f45ce6f351 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -74,8 +74,9 @@ struct atm_dev *atm_dev_lookup(int number)
74} 74}
75EXPORT_SYMBOL(atm_dev_lookup); 75EXPORT_SYMBOL(atm_dev_lookup);
76 76
77struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, 77struct atm_dev *atm_dev_register(const char *type, struct device *parent,
78 int number, unsigned long *flags) 78 const struct atmdev_ops *ops, int number,
79 unsigned long *flags)
79{ 80{
80 struct atm_dev *dev, *inuse; 81 struct atm_dev *dev, *inuse;
81 82
@@ -115,7 +116,7 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
115 goto out_fail; 116 goto out_fail;
116 } 117 }
117 118
118 if (atm_register_sysfs(dev) < 0) { 119 if (atm_register_sysfs(dev, parent) < 0) {
119 pr_err("atm_register_sysfs failed for dev %s\n", type); 120 pr_err("atm_register_sysfs failed for dev %s\n", type);
120 atm_proc_dev_deregister(dev); 121 atm_proc_dev_deregister(dev);
121 goto out_fail; 122 goto out_fail;
diff --git a/net/atm/resources.h b/net/atm/resources.h
index 126fb1840dfb..521431e30507 100644
--- a/net/atm/resources.h
+++ b/net/atm/resources.h
@@ -42,6 +42,6 @@ static inline void atm_proc_dev_deregister(struct atm_dev *dev)
42 42
43#endif /* CONFIG_PROC_FS */ 43#endif /* CONFIG_PROC_FS */
44 44
45int atm_register_sysfs(struct atm_dev *adev); 45int atm_register_sysfs(struct atm_dev *adev, struct device *parent);
46void atm_unregister_sysfs(struct atm_dev *adev); 46void atm_unregister_sysfs(struct atm_dev *adev);
47#endif 47#endif
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 26eaebf4aaa9..bb86d2932394 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1392 ax25_cb *ax25; 1392 ax25_cb *ax25;
1393 int err = 0; 1393 int err = 0;
1394 1394
1395 memset(fsa, 0, sizeof(fsa));
1395 lock_sock(sk); 1396 lock_sock(sk);
1396 ax25 = ax25_sk(sk); 1397 ax25 = ax25_sk(sk);
1397 1398
@@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1403 1404
1404 fsa->fsa_ax25.sax25_family = AF_AX25; 1405 fsa->fsa_ax25.sax25_family = AF_AX25;
1405 fsa->fsa_ax25.sax25_call = ax25->dest_addr; 1406 fsa->fsa_ax25.sax25_call = ax25->dest_addr;
1406 fsa->fsa_ax25.sax25_ndigis = 0;
1407 1407
1408 if (ax25->digipeat != NULL) { 1408 if (ax25->digipeat != NULL) {
1409 ndigi = ax25->digipeat->ndigi; 1409 ndigi = ax25->digipeat->ndigi;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index bfef5bae0b3a..84093b0000b9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1175,6 +1175,12 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1175 hci_send_cmd(hdev, 1175 hci_send_cmd(hdev,
1176 HCI_OP_READ_REMOTE_EXT_FEATURES, 1176 HCI_OP_READ_REMOTE_EXT_FEATURES,
1177 sizeof(cp), &cp); 1177 sizeof(cp), &cp);
1178 } else if (!ev->status && conn->out &&
1179 conn->sec_level == BT_SECURITY_HIGH) {
1180 struct hci_cp_auth_requested cp;
1181 cp.handle = ev->handle;
1182 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1183 sizeof(cp), &cp);
1178 } else { 1184 } else {
1179 conn->state = BT_CONNECTED; 1185 conn->state = BT_CONNECTED;
1180 hci_proto_connect_cfm(conn, ev->status); 1186 hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 98fdfa1fbddd..86a91543172a 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
1config BT_HIDP 1config BT_HIDP
2 tristate "HIDP protocol support" 2 tristate "HIDP protocol support"
3 depends on BT && BT_L2CAP && INPUT 3 depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
4 select HID 4 select HID
5 help 5 help
6 HIDP (Human Interface Device Protocol) is a transport layer 6 HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index daa7a988d9a6..cd8f6ea03841 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2421,11 +2421,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
2421 break; 2421 break;
2422 2422
2423 case 2: 2423 case 2:
2424 *val = __le16_to_cpu(*((__le16 *) opt->val)); 2424 *val = get_unaligned_le16(opt->val);
2425 break; 2425 break;
2426 2426
2427 case 4: 2427 case 4:
2428 *val = __le32_to_cpu(*((__le32 *) opt->val)); 2428 *val = get_unaligned_le32(opt->val);
2429 break; 2429 break;
2430 2430
2431 default: 2431 default:
@@ -2452,11 +2452,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2452 break; 2452 break;
2453 2453
2454 case 2: 2454 case 2:
2455 *((__le16 *) opt->val) = cpu_to_le16(val); 2455 put_unaligned_le16(val, opt->val);
2456 break; 2456 break;
2457 2457
2458 case 4: 2458 case 4:
2459 *((__le32 *) opt->val) = cpu_to_le32(val); 2459 put_unaligned_le32(val, opt->val);
2460 break; 2460 break;
2461 2461
2462 default: 2462 default:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 39a5d87e33b4..fa642aa652bd 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr);
79 79
80static void rfcomm_process_connect(struct rfcomm_session *s); 80static void rfcomm_process_connect(struct rfcomm_session *s);
81 81
82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); 82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
83 bdaddr_t *dst,
84 u8 sec_level,
85 int *err);
83static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); 86static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
84static void rfcomm_session_del(struct rfcomm_session *s); 87static void rfcomm_session_del(struct rfcomm_session *s);
85 88
@@ -401,7 +404,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
401 404
402 s = rfcomm_session_get(src, dst); 405 s = rfcomm_session_get(src, dst);
403 if (!s) { 406 if (!s) {
404 s = rfcomm_session_create(src, dst, &err); 407 s = rfcomm_session_create(src, dst, d->sec_level, &err);
405 if (!s) 408 if (!s)
406 return err; 409 return err;
407 } 410 }
@@ -679,7 +682,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
679 rfcomm_session_put(s); 682 rfcomm_session_put(s);
680} 683}
681 684
682static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) 685static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
686 bdaddr_t *dst,
687 u8 sec_level,
688 int *err)
683{ 689{
684 struct rfcomm_session *s = NULL; 690 struct rfcomm_session *s = NULL;
685 struct sockaddr_l2 addr; 691 struct sockaddr_l2 addr;
@@ -704,6 +710,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
704 sk = sock->sk; 710 sk = sock->sk;
705 lock_sock(sk); 711 lock_sock(sk);
706 l2cap_pi(sk)->imtu = l2cap_mtu; 712 l2cap_pi(sk)->imtu = l2cap_mtu;
713 l2cap_pi(sk)->sec_level = sec_level;
707 if (l2cap_ertm) 714 if (l2cap_ertm)
708 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 715 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
709 release_sock(sk); 716 release_sock(sk);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index d0927d1fdada..66b9e5c0523a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -882,7 +882,7 @@ static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
882 int lm = 0; 882 int lm = 0;
883 883
884 if (type != SCO_LINK && type != ESCO_LINK) 884 if (type != SCO_LINK && type != ESCO_LINK)
885 return 0; 885 return -EINVAL;
886 886
887 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 887 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
888 888
@@ -908,7 +908,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
908 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 908 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
909 909
910 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 910 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
911 return 0; 911 return -EINVAL;
912 912
913 if (!status) { 913 if (!status) {
914 struct sco_conn *conn; 914 struct sco_conn *conn;
@@ -927,7 +927,7 @@ static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
927 BT_DBG("hcon %p reason %d", hcon, reason); 927 BT_DBG("hcon %p reason %d", hcon, reason);
928 928
929 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 929 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
930 return 0; 930 return -EINVAL;
931 931
932 sco_conn_del(hcon, bt_err(reason)); 932 sco_conn_del(hcon, bt_err(reason));
933 933
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 76ae68303d3a..d522d8c1703e 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -16,11 +16,18 @@ int connect_req_to_link_param(struct cfcnfg *cnfg,
16{ 16{
17 struct dev_info *dev_info; 17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref; 18 enum cfcnfg_phy_preference pref;
19 int res;
20
19 memset(l, 0, sizeof(*l)); 21 memset(l, 0, sizeof(*l));
20 l->priority = s->priority; 22 /* In caif protocol low value is high priority */
23 l->priority = CAIF_PRIO_MAX - s->priority + 1;
21 24
22 if (s->link_name[0] != '\0') 25 if (s->ifindex != 0){
23 l->phyid = cfcnfg_get_named(cnfg, s->link_name); 26 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
27 if (res < 0)
28 return res;
29 l->phyid = res;
30 }
24 else { 31 else {
25 switch (s->link_selector) { 32 switch (s->link_selector) {
26 case CAIF_LINK_HIGH_BANDW: 33 case CAIF_LINK_HIGH_BANDW:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index b99369a055d1..a42a408306e4 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -307,6 +307,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
307 307
308 case NETDEV_UNREGISTER: 308 case NETDEV_UNREGISTER:
309 caifd = caif_get(dev); 309 caifd = caif_get(dev);
310 if (caifd == NULL)
311 break;
310 netdev_info(dev, "unregister\n"); 312 netdev_info(dev, "unregister\n");
311 atomic_set(&caifd->state, what); 313 atomic_set(&caifd->state, what);
312 caif_device_destroy(dev); 314 caif_device_destroy(dev);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2eca2dd0000f..1bf0cf503796 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -716,8 +716,7 @@ static int setsockopt(struct socket *sock,
716{ 716{
717 struct sock *sk = sock->sk; 717 struct sock *sk = sock->sk;
718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
719 int prio, linksel; 719 int linksel;
720 struct ifreq ifreq;
721 720
722 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) 721 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
723 return -ENOPROTOOPT; 722 return -ENOPROTOOPT;
@@ -735,33 +734,6 @@ static int setsockopt(struct socket *sock,
735 release_sock(&cf_sk->sk); 734 release_sock(&cf_sk->sk);
736 return 0; 735 return 0;
737 736
738 case SO_PRIORITY:
739 if (lvl != SOL_SOCKET)
740 goto bad_sol;
741 if (ol < sizeof(int))
742 return -EINVAL;
743 if (copy_from_user(&prio, ov, sizeof(int)))
744 return -EINVAL;
745 lock_sock(&(cf_sk->sk));
746 cf_sk->conn_req.priority = prio;
747 release_sock(&cf_sk->sk);
748 return 0;
749
750 case SO_BINDTODEVICE:
751 if (lvl != SOL_SOCKET)
752 goto bad_sol;
753 if (ol < sizeof(struct ifreq))
754 return -EINVAL;
755 if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
756 return -EFAULT;
757 lock_sock(&(cf_sk->sk));
758 strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
759 sizeof(cf_sk->conn_req.link_name));
760 cf_sk->conn_req.link_name
761 [sizeof(cf_sk->conn_req.link_name)-1] = 0;
762 release_sock(&cf_sk->sk);
763 return 0;
764
765 case CAIFSO_REQ_PARAM: 737 case CAIFSO_REQ_PARAM:
766 if (lvl != SOL_CAIF) 738 if (lvl != SOL_CAIF)
767 goto bad_sol; 739 goto bad_sol;
@@ -880,6 +852,18 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
880 sock->state = SS_CONNECTING; 852 sock->state = SS_CONNECTING;
881 sk->sk_state = CAIF_CONNECTING; 853 sk->sk_state = CAIF_CONNECTING;
882 854
855 /* Check priority value comming from socket */
856 /* if priority value is out of range it will be ajusted */
857 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
858 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
859 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
860 cf_sk->conn_req.priority = CAIF_PRIO_MIN;
861 else
862 cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
863
864 /*ifindex = id of the interface.*/
865 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
866
883 dbfs_atomic_inc(&cnt.num_connect_req); 867 dbfs_atomic_inc(&cnt.num_connect_req);
884 cf_sk->layer.receive = caif_sktrecv_cb; 868 cf_sk->layer.receive = caif_sktrecv_cb;
885 err = caif_connect_client(&cf_sk->conn_req, 869 err = caif_connect_client(&cf_sk->conn_req,
@@ -905,6 +889,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
905 cf_sk->maxframe = mtu - (headroom + tailroom); 889 cf_sk->maxframe = mtu - (headroom + tailroom);
906 if (cf_sk->maxframe < 1) { 890 if (cf_sk->maxframe < 1) {
907 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); 891 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
892 err = -ENODEV;
908 goto out; 893 goto out;
909 } 894 }
910 895
@@ -1142,7 +1127,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1142 set_rx_flow_on(cf_sk); 1127 set_rx_flow_on(cf_sk);
1143 1128
1144 /* Set default options on configuration */ 1129 /* Set default options on configuration */
1145 cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; 1130 cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
1146 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1131 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1147 cf_sk->conn_req.protocol = protocol; 1132 cf_sk->conn_req.protocol = protocol;
1148 /* Increase the number of sockets created. */ 1133 /* Increase the number of sockets created. */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 41adafd18914..21ede141018a 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -173,18 +173,15 @@ static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
173 return NULL; 173 return NULL;
174} 174}
175 175
176int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) 176
177int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
177{ 178{
178 int i; 179 int i;
179 180 for (i = 0; i < MAX_PHY_LAYERS; i++)
180 /* Try to match with specified name */ 181 if (cnfg->phy_layers[i].frm_layer != NULL &&
181 for (i = 0; i < MAX_PHY_LAYERS; i++) { 182 cnfg->phy_layers[i].ifindex == ifi)
182 if (cnfg->phy_layers[i].frm_layer != NULL 183 return i;
183 && strcmp(cnfg->phy_layers[i].phy_layer->name, 184 return -ENODEV;
184 name) == 0)
185 return cnfg->phy_layers[i].frm_layer->id;
186 }
187 return 0;
188} 185}
189 186
190int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 187int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 08f267a109aa..3cd8f978e309 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -361,11 +361,10 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
361 struct cfctrl_request_info *p, *tmp; 361 struct cfctrl_request_info *p, *tmp;
362 struct cfctrl *ctrl = container_obj(layr); 362 struct cfctrl *ctrl = container_obj(layr);
363 spin_lock(&ctrl->info_list_lock); 363 spin_lock(&ctrl->info_list_lock);
364 pr_warn("enter\n");
365 364
366 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 365 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
367 if (p->client_layer == adap_layer) { 366 if (p->client_layer == adap_layer) {
368 pr_warn("cancel req :%d\n", p->sequence_no); 367 pr_debug("cancel req :%d\n", p->sequence_no);
369 list_del(&p->list); 368 list_del(&p->list);
370 kfree(p); 369 kfree(p);
371 } 370 }
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 496fda9ac66f..11a2af4c162a 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -12,6 +12,8 @@
12#include <net/caif/cfsrvl.h> 12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
14 14
15#define container_obj(layr) ((struct cfsrvl *) layr)
16
15static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); 17static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
16static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); 18static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
17 19
@@ -38,5 +40,17 @@ static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
38 40
39static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) 41static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
40{ 42{
43 struct cfsrvl *service = container_obj(layr);
44 struct caif_payload_info *info;
45 int ret;
46
47 if (!cfsrvl_ready(service, &ret))
48 return ret;
49
50 /* Add info for MUX-layer to route the packet out */
51 info = cfpkt_info(pkt);
52 info->channel_id = service->layer.id;
53 info->dev_info = &service->dev_info;
54
41 return layr->dn->transmit(layr->dn, pkt); 55 return layr->dn->transmit(layr->dn, pkt);
42} 56}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index bde8481e8d25..e2fb5fa75795 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -193,7 +193,7 @@ out:
193 193
194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) 194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
195{ 195{
196 caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); 196 caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
197 197
198 /* Add info for MUX-layer to route the packet out. */ 198 /* Add info for MUX-layer to route the packet out. */
199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; 199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 08ffe9e4be20..6faa8256e10c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
125 struct list_head tx_ops; 125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs; 126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read; 127 struct proc_dir_entry *bcm_proc_read;
128 char procname [9]; /* pointer printed in ASCII with \0 */ 128 char procname [20]; /* pointer printed in ASCII with \0 */
129}; 129};
130 130
131static inline struct bcm_sock *bcm_sk(const struct sock *sk) 131static inline struct bcm_sock *bcm_sk(const struct sock *sk)
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index aab1cabb8035..5f19415ec9c0 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -1,9 +1,6 @@
1# 1#
2# Makefile for CEPH filesystem. 2# Makefile for CEPH filesystem.
3# 3#
4
5ifneq ($(KERNELRELEASE),)
6
7obj-$(CONFIG_CEPH_LIB) += libceph.o 4obj-$(CONFIG_CEPH_LIB) += libceph.o
8 5
9libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ 6libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
@@ -16,22 +13,3 @@ libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
16 ceph_fs.o ceph_strings.o ceph_hash.o \ 13 ceph_fs.o ceph_strings.o ceph_hash.o \
17 pagevec.o 14 pagevec.o
18 15
19else
20#Otherwise we were called directly from the command
21# line; invoke the kernel build system.
22
23KERNELDIR ?= /lib/modules/$(shell uname -r)/build
24PWD := $(shell pwd)
25
26default: all
27
28all:
29 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
30
31modules_install:
32 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
33
34clean:
35 $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
36
37endif
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 53d8abfa25d5..bf3e6a13c215 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -19,7 +19,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
19 if (b->vec.iov_base) { 19 if (b->vec.iov_base) {
20 b->is_vmalloc = false; 20 b->is_vmalloc = false;
21 } else { 21 } else {
22 b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); 22 b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL);
23 if (!b->vec.iov_base) { 23 if (!b->vec.iov_base) {
24 kfree(b); 24 kfree(b);
25 return NULL; 25 return NULL;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0e8157ee5d43..b6ff4a1519ab 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -97,11 +97,9 @@ struct workqueue_struct *ceph_msgr_wq;
97int ceph_msgr_init(void) 97int ceph_msgr_init(void)
98{ 98{
99 ceph_msgr_wq = create_workqueue("ceph-msgr"); 99 ceph_msgr_wq = create_workqueue("ceph-msgr");
100 if (IS_ERR(ceph_msgr_wq)) { 100 if (!ceph_msgr_wq) {
101 int ret = PTR_ERR(ceph_msgr_wq); 101 pr_err("msgr_init failed to create workqueue\n");
102 pr_err("msgr_init failed to create workqueue: %d\n", ret); 102 return -ENOMEM;
103 ceph_msgr_wq = NULL;
104 return ret;
105 } 103 }
106 return 0; 104 return 0;
107} 105}
@@ -540,8 +538,7 @@ static void prepare_write_message(struct ceph_connection *con)
540 /* initialize page iterator */ 538 /* initialize page iterator */
541 con->out_msg_pos.page = 0; 539 con->out_msg_pos.page = 0;
542 if (m->pages) 540 if (m->pages)
543 con->out_msg_pos.page_pos = 541 con->out_msg_pos.page_pos = m->page_alignment;
544 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
545 else 542 else
546 con->out_msg_pos.page_pos = 0; 543 con->out_msg_pos.page_pos = 0;
547 con->out_msg_pos.data_pos = 0; 544 con->out_msg_pos.data_pos = 0;
@@ -1491,7 +1488,7 @@ static int read_partial_message(struct ceph_connection *con)
1491 struct ceph_msg *m = con->in_msg; 1488 struct ceph_msg *m = con->in_msg;
1492 int ret; 1489 int ret;
1493 int to, left; 1490 int to, left;
1494 unsigned front_len, middle_len, data_len, data_off; 1491 unsigned front_len, middle_len, data_len;
1495 int datacrc = con->msgr->nocrc; 1492 int datacrc = con->msgr->nocrc;
1496 int skip; 1493 int skip;
1497 u64 seq; 1494 u64 seq;
@@ -1527,19 +1524,17 @@ static int read_partial_message(struct ceph_connection *con)
1527 data_len = le32_to_cpu(con->in_hdr.data_len); 1524 data_len = le32_to_cpu(con->in_hdr.data_len);
1528 if (data_len > CEPH_MSG_MAX_DATA_LEN) 1525 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1529 return -EIO; 1526 return -EIO;
1530 data_off = le16_to_cpu(con->in_hdr.data_off);
1531 1527
1532 /* verify seq# */ 1528 /* verify seq# */
1533 seq = le64_to_cpu(con->in_hdr.seq); 1529 seq = le64_to_cpu(con->in_hdr.seq);
1534 if ((s64)seq - (s64)con->in_seq < 1) { 1530 if ((s64)seq - (s64)con->in_seq < 1) {
1535 pr_info("skipping %s%lld %s seq %lld, expected %lld\n", 1531 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1536 ENTITY_NAME(con->peer_name), 1532 ENTITY_NAME(con->peer_name),
1537 ceph_pr_addr(&con->peer_addr.in_addr), 1533 ceph_pr_addr(&con->peer_addr.in_addr),
1538 seq, con->in_seq + 1); 1534 seq, con->in_seq + 1);
1539 con->in_base_pos = -front_len - middle_len - data_len - 1535 con->in_base_pos = -front_len - middle_len - data_len -
1540 sizeof(m->footer); 1536 sizeof(m->footer);
1541 con->in_tag = CEPH_MSGR_TAG_READY; 1537 con->in_tag = CEPH_MSGR_TAG_READY;
1542 con->in_seq++;
1543 return 0; 1538 return 0;
1544 } else if ((s64)seq - (s64)con->in_seq > 1) { 1539 } else if ((s64)seq - (s64)con->in_seq > 1) {
1545 pr_err("read_partial_message bad seq %lld expected %lld\n", 1540 pr_err("read_partial_message bad seq %lld expected %lld\n",
@@ -1576,7 +1571,7 @@ static int read_partial_message(struct ceph_connection *con)
1576 1571
1577 con->in_msg_pos.page = 0; 1572 con->in_msg_pos.page = 0;
1578 if (m->pages) 1573 if (m->pages)
1579 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; 1574 con->in_msg_pos.page_pos = m->page_alignment;
1580 else 1575 else
1581 con->in_msg_pos.page_pos = 0; 1576 con->in_msg_pos.page_pos = 0;
1582 con->in_msg_pos.data_pos = 0; 1577 con->in_msg_pos.data_pos = 0;
@@ -2301,6 +2296,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2301 2296
2302 /* data */ 2297 /* data */
2303 m->nr_pages = 0; 2298 m->nr_pages = 0;
2299 m->page_alignment = 0;
2304 m->pages = NULL; 2300 m->pages = NULL;
2305 m->pagelist = NULL; 2301 m->pagelist = NULL;
2306 m->bio = NULL; 2302 m->bio = NULL;
@@ -2370,6 +2366,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2370 type, front_len); 2366 type, front_len);
2371 return NULL; 2367 return NULL;
2372 } 2368 }
2369 msg->page_alignment = le16_to_cpu(hdr->data_off);
2373 } 2370 }
2374 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2371 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2375 2372
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 79391994b3ed..3e20a122ffa2 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -71,6 +71,7 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
71 op->extent.length = objlen; 71 op->extent.length = objlen;
72 } 72 }
73 req->r_num_pages = calc_pages_for(off, *plen); 73 req->r_num_pages = calc_pages_for(off, *plen);
74 req->r_page_alignment = off & ~PAGE_MASK;
74 if (op->op == CEPH_OSD_OP_WRITE) 75 if (op->op == CEPH_OSD_OP_WRITE)
75 op->payload_len = *plen; 76 op->payload_len = *plen;
76 77
@@ -390,6 +391,8 @@ void ceph_osdc_build_request(struct ceph_osd_request *req,
390 req->r_request->hdr.data_len = cpu_to_le32(data_len); 391 req->r_request->hdr.data_len = cpu_to_le32(data_len);
391 } 392 }
392 393
394 req->r_request->page_alignment = req->r_page_alignment;
395
393 BUG_ON(p > msg->front.iov_base + msg->front.iov_len); 396 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
394 msg_size = p - msg->front.iov_base; 397 msg_size = p - msg->front.iov_base;
395 msg->front.iov_len = msg_size; 398 msg->front.iov_len = msg_size;
@@ -419,7 +422,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
419 u32 truncate_seq, 422 u32 truncate_seq,
420 u64 truncate_size, 423 u64 truncate_size,
421 struct timespec *mtime, 424 struct timespec *mtime,
422 bool use_mempool, int num_reply) 425 bool use_mempool, int num_reply,
426 int page_align)
423{ 427{
424 struct ceph_osd_req_op ops[3]; 428 struct ceph_osd_req_op ops[3];
425 struct ceph_osd_request *req; 429 struct ceph_osd_request *req;
@@ -447,6 +451,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
447 calc_layout(osdc, vino, layout, off, plen, req, ops); 451 calc_layout(osdc, vino, layout, off, plen, req, ops);
448 req->r_file_layout = *layout; /* keep a copy */ 452 req->r_file_layout = *layout; /* keep a copy */
449 453
454 /* in case it differs from natural alignment that calc_layout
455 filled in for us */
456 req->r_page_alignment = page_align;
457
450 ceph_osdc_build_request(req, off, plen, ops, 458 ceph_osdc_build_request(req, off, plen, ops,
451 snapc, 459 snapc,
452 mtime, 460 mtime,
@@ -1489,7 +1497,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1489 struct ceph_vino vino, struct ceph_file_layout *layout, 1497 struct ceph_vino vino, struct ceph_file_layout *layout,
1490 u64 off, u64 *plen, 1498 u64 off, u64 *plen,
1491 u32 truncate_seq, u64 truncate_size, 1499 u32 truncate_seq, u64 truncate_size,
1492 struct page **pages, int num_pages) 1500 struct page **pages, int num_pages, int page_align)
1493{ 1501{
1494 struct ceph_osd_request *req; 1502 struct ceph_osd_request *req;
1495 int rc = 0; 1503 int rc = 0;
@@ -1499,15 +1507,15 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1499 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1507 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1500 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 1508 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1501 NULL, 0, truncate_seq, truncate_size, NULL, 1509 NULL, 0, truncate_seq, truncate_size, NULL,
1502 false, 1); 1510 false, 1, page_align);
1503 if (!req) 1511 if (!req)
1504 return -ENOMEM; 1512 return -ENOMEM;
1505 1513
1506 /* it may be a short read due to an object boundary */ 1514 /* it may be a short read due to an object boundary */
1507 req->r_pages = pages; 1515 req->r_pages = pages;
1508 1516
1509 dout("readpages final extent is %llu~%llu (%d pages)\n", 1517 dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
1510 off, *plen, req->r_num_pages); 1518 off, *plen, req->r_num_pages, page_align);
1511 1519
1512 rc = ceph_osdc_start_request(osdc, req, false); 1520 rc = ceph_osdc_start_request(osdc, req, false);
1513 if (!rc) 1521 if (!rc)
@@ -1533,6 +1541,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1533{ 1541{
1534 struct ceph_osd_request *req; 1542 struct ceph_osd_request *req;
1535 int rc = 0; 1543 int rc = 0;
1544 int page_align = off & ~PAGE_MASK;
1536 1545
1537 BUG_ON(vino.snap != CEPH_NOSNAP); 1546 BUG_ON(vino.snap != CEPH_NOSNAP);
1538 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1547 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
@@ -1541,7 +1550,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1541 CEPH_OSD_FLAG_WRITE, 1550 CEPH_OSD_FLAG_WRITE,
1542 snapc, do_sync, 1551 snapc, do_sync,
1543 truncate_seq, truncate_size, mtime, 1552 truncate_seq, truncate_size, mtime,
1544 nofail, 1); 1553 nofail, 1, page_align);
1545 if (!req) 1554 if (!req)
1546 return -ENOMEM; 1555 return -ENOMEM;
1547 1556
@@ -1638,8 +1647,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
1638 m = ceph_msg_get(req->r_reply); 1647 m = ceph_msg_get(req->r_reply);
1639 1648
1640 if (data_len > 0) { 1649 if (data_len > 0) {
1641 unsigned data_off = le16_to_cpu(hdr->data_off); 1650 int want = calc_pages_for(req->r_page_alignment, data_len);
1642 int want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
1643 1651
1644 if (unlikely(req->r_num_pages < want)) { 1652 if (unlikely(req->r_num_pages < want)) {
1645 pr_warning("tid %lld reply %d > expected %d pages\n", 1653 pr_warning("tid %lld reply %d > expected %d pages\n",
@@ -1651,6 +1659,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
1651 } 1659 }
1652 m->pages = req->r_pages; 1660 m->pages = req->r_pages;
1653 m->nr_pages = req->r_num_pages; 1661 m->nr_pages = req->r_num_pages;
1662 m->page_alignment = req->r_page_alignment;
1654#ifdef CONFIG_BLOCK 1663#ifdef CONFIG_BLOCK
1655 m->bio = req->r_bio; 1664 m->bio = req->r_bio;
1656#endif 1665#endif
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 54caf0687155..1a040e64c69f 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -13,8 +13,7 @@
13 * build a vector of user pages 13 * build a vector of user pages
14 */ 14 */
15struct page **ceph_get_direct_page_vector(const char __user *data, 15struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages, 16 int num_pages, bool write_page)
17 loff_t off, size_t len)
18{ 17{
19 struct page **pages; 18 struct page **pages;
20 int rc; 19 int rc;
@@ -25,24 +24,27 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
25 24
26 down_read(&current->mm->mmap_sem); 25 down_read(&current->mm->mmap_sem);
27 rc = get_user_pages(current, current->mm, (unsigned long)data, 26 rc = get_user_pages(current, current->mm, (unsigned long)data,
28 num_pages, 0, 0, pages, NULL); 27 num_pages, write_page, 0, pages, NULL);
29 up_read(&current->mm->mmap_sem); 28 up_read(&current->mm->mmap_sem);
30 if (rc < 0) 29 if (rc < num_pages)
31 goto fail; 30 goto fail;
32 return pages; 31 return pages;
33 32
34fail: 33fail:
35 kfree(pages); 34 ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
36 return ERR_PTR(rc); 35 return ERR_PTR(rc);
37} 36}
38EXPORT_SYMBOL(ceph_get_direct_page_vector); 37EXPORT_SYMBOL(ceph_get_direct_page_vector);
39 38
40void ceph_put_page_vector(struct page **pages, int num_pages) 39void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
41{ 40{
42 int i; 41 int i;
43 42
44 for (i = 0; i < num_pages; i++) 43 for (i = 0; i < num_pages; i++) {
44 if (dirty)
45 set_page_dirty_lock(pages[i]);
45 put_page(pages[i]); 46 put_page(pages[i]);
47 }
46 kfree(pages); 48 kfree(pages);
47} 49}
48EXPORT_SYMBOL(ceph_put_page_vector); 50EXPORT_SYMBOL(ceph_put_page_vector);
diff --git a/net/core/dev.c b/net/core/dev.c
index 89204e8c0e14..126694116852 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2131,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2131 } else { 2131 } else {
2132 struct sock *sk = skb->sk; 2132 struct sock *sk = skb->sk;
2133 queue_index = sk_tx_queue_get(sk); 2133 queue_index = sk_tx_queue_get(sk);
2134 if (queue_index < 0) { 2134 if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
2135 2135
2136 queue_index = 0; 2136 queue_index = 0;
2137 if (dev->real_num_tx_queues > 1) 2137 if (dev->real_num_tx_queues > 1)
diff --git a/net/core/dst.c b/net/core/dst.c
index 8abe628b79f1..b99c7c7ffce2 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
370 370
371static struct notifier_block dst_dev_notifier = { 371static struct notifier_block dst_dev_notifier = {
372 .notifier_call = dst_dev_event, 372 .notifier_call = dst_dev_event,
373 .priority = -10, /* must be called after other network notifiers */
373}; 374};
374 375
375void __init dst_init(void) 376void __init dst_init(void)
diff --git a/net/core/filter.c b/net/core/filter.c
index 7beaec36b541..ae21a0d3c4a2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
112 */ 112 */
113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
114{ 114{
115 struct sock_filter *fentry; /* We walk down these */
116 void *ptr; 115 void *ptr;
117 u32 A = 0; /* Accumulator */ 116 u32 A = 0; /* Accumulator */
118 u32 X = 0; /* Index Register */ 117 u32 X = 0; /* Index Register */
119 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
119 unsigned long memvalid = 0;
120 u32 tmp; 120 u32 tmp;
121 int k; 121 int k;
122 int pc; 122 int pc;
123 123
124 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
124 /* 125 /*
125 * Process array of filter instructions. 126 * Process array of filter instructions.
126 */ 127 */
127 for (pc = 0; pc < flen; pc++) { 128 for (pc = 0; pc < flen; pc++) {
128 fentry = &filter[pc]; 129 const struct sock_filter *fentry = &filter[pc];
130 u32 f_k = fentry->k;
129 131
130 switch (fentry->code) { 132 switch (fentry->code) {
131 case BPF_S_ALU_ADD_X: 133 case BPF_S_ALU_ADD_X:
132 A += X; 134 A += X;
133 continue; 135 continue;
134 case BPF_S_ALU_ADD_K: 136 case BPF_S_ALU_ADD_K:
135 A += fentry->k; 137 A += f_k;
136 continue; 138 continue;
137 case BPF_S_ALU_SUB_X: 139 case BPF_S_ALU_SUB_X:
138 A -= X; 140 A -= X;
139 continue; 141 continue;
140 case BPF_S_ALU_SUB_K: 142 case BPF_S_ALU_SUB_K:
141 A -= fentry->k; 143 A -= f_k;
142 continue; 144 continue;
143 case BPF_S_ALU_MUL_X: 145 case BPF_S_ALU_MUL_X:
144 A *= X; 146 A *= X;
145 continue; 147 continue;
146 case BPF_S_ALU_MUL_K: 148 case BPF_S_ALU_MUL_K:
147 A *= fentry->k; 149 A *= f_k;
148 continue; 150 continue;
149 case BPF_S_ALU_DIV_X: 151 case BPF_S_ALU_DIV_X:
150 if (X == 0) 152 if (X == 0)
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
152 A /= X; 154 A /= X;
153 continue; 155 continue;
154 case BPF_S_ALU_DIV_K: 156 case BPF_S_ALU_DIV_K:
155 A /= fentry->k; 157 A /= f_k;
156 continue; 158 continue;
157 case BPF_S_ALU_AND_X: 159 case BPF_S_ALU_AND_X:
158 A &= X; 160 A &= X;
159 continue; 161 continue;
160 case BPF_S_ALU_AND_K: 162 case BPF_S_ALU_AND_K:
161 A &= fentry->k; 163 A &= f_k;
162 continue; 164 continue;
163 case BPF_S_ALU_OR_X: 165 case BPF_S_ALU_OR_X:
164 A |= X; 166 A |= X;
165 continue; 167 continue;
166 case BPF_S_ALU_OR_K: 168 case BPF_S_ALU_OR_K:
167 A |= fentry->k; 169 A |= f_k;
168 continue; 170 continue;
169 case BPF_S_ALU_LSH_X: 171 case BPF_S_ALU_LSH_X:
170 A <<= X; 172 A <<= X;
171 continue; 173 continue;
172 case BPF_S_ALU_LSH_K: 174 case BPF_S_ALU_LSH_K:
173 A <<= fentry->k; 175 A <<= f_k;
174 continue; 176 continue;
175 case BPF_S_ALU_RSH_X: 177 case BPF_S_ALU_RSH_X:
176 A >>= X; 178 A >>= X;
177 continue; 179 continue;
178 case BPF_S_ALU_RSH_K: 180 case BPF_S_ALU_RSH_K:
179 A >>= fentry->k; 181 A >>= f_k;
180 continue; 182 continue;
181 case BPF_S_ALU_NEG: 183 case BPF_S_ALU_NEG:
182 A = -A; 184 A = -A;
183 continue; 185 continue;
184 case BPF_S_JMP_JA: 186 case BPF_S_JMP_JA:
185 pc += fentry->k; 187 pc += f_k;
186 continue; 188 continue;
187 case BPF_S_JMP_JGT_K: 189 case BPF_S_JMP_JGT_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf; 190 pc += (A > f_k) ? fentry->jt : fentry->jf;
189 continue; 191 continue;
190 case BPF_S_JMP_JGE_K: 192 case BPF_S_JMP_JGE_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 193 pc += (A >= f_k) ? fentry->jt : fentry->jf;
192 continue; 194 continue;
193 case BPF_S_JMP_JEQ_K: 195 case BPF_S_JMP_JEQ_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf; 196 pc += (A == f_k) ? fentry->jt : fentry->jf;
195 continue; 197 continue;
196 case BPF_S_JMP_JSET_K: 198 case BPF_S_JMP_JSET_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf; 199 pc += (A & f_k) ? fentry->jt : fentry->jf;
198 continue; 200 continue;
199 case BPF_S_JMP_JGT_X: 201 case BPF_S_JMP_JGT_X:
200 pc += (A > X) ? fentry->jt : fentry->jf; 202 pc += (A > X) ? fentry->jt : fentry->jf;
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
209 pc += (A & X) ? fentry->jt : fentry->jf; 211 pc += (A & X) ? fentry->jt : fentry->jf;
210 continue; 212 continue;
211 case BPF_S_LD_W_ABS: 213 case BPF_S_LD_W_ABS:
212 k = fentry->k; 214 k = f_k;
213load_w: 215load_w:
214 ptr = load_pointer(skb, k, 4, &tmp); 216 ptr = load_pointer(skb, k, 4, &tmp);
215 if (ptr != NULL) { 217 if (ptr != NULL) {
@@ -218,7 +220,7 @@ load_w:
218 } 220 }
219 break; 221 break;
220 case BPF_S_LD_H_ABS: 222 case BPF_S_LD_H_ABS:
221 k = fentry->k; 223 k = f_k;
222load_h: 224load_h:
223 ptr = load_pointer(skb, k, 2, &tmp); 225 ptr = load_pointer(skb, k, 2, &tmp);
224 if (ptr != NULL) { 226 if (ptr != NULL) {
@@ -227,7 +229,7 @@ load_h:
227 } 229 }
228 break; 230 break;
229 case BPF_S_LD_B_ABS: 231 case BPF_S_LD_B_ABS:
230 k = fentry->k; 232 k = f_k;
231load_b: 233load_b:
232 ptr = load_pointer(skb, k, 1, &tmp); 234 ptr = load_pointer(skb, k, 1, &tmp);
233 if (ptr != NULL) { 235 if (ptr != NULL) {
@@ -242,32 +244,34 @@ load_b:
242 X = skb->len; 244 X = skb->len;
243 continue; 245 continue;
244 case BPF_S_LD_W_IND: 246 case BPF_S_LD_W_IND:
245 k = X + fentry->k; 247 k = X + f_k;
246 goto load_w; 248 goto load_w;
247 case BPF_S_LD_H_IND: 249 case BPF_S_LD_H_IND:
248 k = X + fentry->k; 250 k = X + f_k;
249 goto load_h; 251 goto load_h;
250 case BPF_S_LD_B_IND: 252 case BPF_S_LD_B_IND:
251 k = X + fentry->k; 253 k = X + f_k;
252 goto load_b; 254 goto load_b;
253 case BPF_S_LDX_B_MSH: 255 case BPF_S_LDX_B_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp); 256 ptr = load_pointer(skb, f_k, 1, &tmp);
255 if (ptr != NULL) { 257 if (ptr != NULL) {
256 X = (*(u8 *)ptr & 0xf) << 2; 258 X = (*(u8 *)ptr & 0xf) << 2;
257 continue; 259 continue;
258 } 260 }
259 return 0; 261 return 0;
260 case BPF_S_LD_IMM: 262 case BPF_S_LD_IMM:
261 A = fentry->k; 263 A = f_k;
262 continue; 264 continue;
263 case BPF_S_LDX_IMM: 265 case BPF_S_LDX_IMM:
264 X = fentry->k; 266 X = f_k;
265 continue; 267 continue;
266 case BPF_S_LD_MEM: 268 case BPF_S_LD_MEM:
267 A = mem[fentry->k]; 269 A = (memvalid & (1UL << f_k)) ?
270 mem[f_k] : 0;
268 continue; 271 continue;
269 case BPF_S_LDX_MEM: 272 case BPF_S_LDX_MEM:
270 X = mem[fentry->k]; 273 X = (memvalid & (1UL << f_k)) ?
274 mem[f_k] : 0;
271 continue; 275 continue;
272 case BPF_S_MISC_TAX: 276 case BPF_S_MISC_TAX:
273 X = A; 277 X = A;
@@ -276,14 +280,16 @@ load_b:
276 A = X; 280 A = X;
277 continue; 281 continue;
278 case BPF_S_RET_K: 282 case BPF_S_RET_K:
279 return fentry->k; 283 return f_k;
280 case BPF_S_RET_A: 284 case BPF_S_RET_A:
281 return A; 285 return A;
282 case BPF_S_ST: 286 case BPF_S_ST:
283 mem[fentry->k] = A; 287 memvalid |= 1UL << f_k;
288 mem[f_k] = A;
284 continue; 289 continue;
285 case BPF_S_STX: 290 case BPF_S_STX:
286 mem[fentry->k] = X; 291 memvalid |= 1UL << f_k;
292 mem[f_k] = X;
287 continue; 293 continue;
288 default: 294 default:
289 WARN_ON(1); 295 WARN_ON(1);
@@ -583,23 +589,16 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
583EXPORT_SYMBOL(sk_chk_filter); 589EXPORT_SYMBOL(sk_chk_filter);
584 590
585/** 591/**
586 * sk_filter_rcu_release: Release a socket filter by rcu_head 592 * sk_filter_release_rcu - Release a socket filter by rcu_head
587 * @rcu: rcu_head that contains the sk_filter to free 593 * @rcu: rcu_head that contains the sk_filter to free
588 */ 594 */
589static void sk_filter_rcu_release(struct rcu_head *rcu) 595void sk_filter_release_rcu(struct rcu_head *rcu)
590{ 596{
591 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 597 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
592 598
593 sk_filter_release(fp); 599 kfree(fp);
594}
595
596static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
597{
598 unsigned int size = sk_filter_len(fp);
599
600 atomic_sub(size, &sk->sk_omem_alloc);
601 call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
602} 600}
601EXPORT_SYMBOL(sk_filter_release_rcu);
603 602
604/** 603/**
605 * sk_attach_filter - attach a socket filter 604 * sk_attach_filter - attach a socket filter
@@ -643,7 +642,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
643 rcu_assign_pointer(sk->sk_filter, fp); 642 rcu_assign_pointer(sk->sk_filter, fp);
644 643
645 if (old_fp) 644 if (old_fp)
646 sk_filter_delayed_uncharge(sk, old_fp); 645 sk_filter_uncharge(sk, old_fp);
647 return 0; 646 return 0;
648} 647}
649EXPORT_SYMBOL_GPL(sk_attach_filter); 648EXPORT_SYMBOL_GPL(sk_attach_filter);
@@ -657,7 +656,7 @@ int sk_detach_filter(struct sock *sk)
657 sock_owned_by_user(sk)); 656 sock_owned_by_user(sk));
658 if (filter) { 657 if (filter) {
659 rcu_assign_pointer(sk->sk_filter, NULL); 658 rcu_assign_pointer(sk->sk_filter, NULL);
660 sk_filter_delayed_uncharge(sk, filter); 659 sk_filter_uncharge(sk, filter);
661 ret = 0; 660 ret = 0;
662 } 661 }
663 return ret; 662 return ret;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a5ff5a89f376..7f902cad10f8 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -712,15 +712,21 @@ static void rx_queue_release(struct kobject *kobj)
712 712
713 713
714 map = rcu_dereference_raw(queue->rps_map); 714 map = rcu_dereference_raw(queue->rps_map);
715 if (map) 715 if (map) {
716 RCU_INIT_POINTER(queue->rps_map, NULL);
716 call_rcu(&map->rcu, rps_map_release); 717 call_rcu(&map->rcu, rps_map_release);
718 }
717 719
718 flow_table = rcu_dereference_raw(queue->rps_flow_table); 720 flow_table = rcu_dereference_raw(queue->rps_flow_table);
719 if (flow_table) 721 if (flow_table) {
722 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
720 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 723 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
724 }
721 725
722 if (atomic_dec_and_test(&first->count)) 726 if (atomic_dec_and_test(&first->count))
723 kfree(first); 727 kfree(first);
728 else
729 memset(kobj, 0, sizeof(*kobj));
724} 730}
725 731
726static struct kobj_type rx_queue_ktype = { 732static struct kobj_type rx_queue_ktype = {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fbce4b05a53e..33bc3823ac6f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -887,7 +887,7 @@ static ssize_t pktgen_if_write(struct file *file,
887 i += len; 887 i += len;
888 888
889 if (debug) { 889 if (debug) {
890 size_t copy = min(count, 1023); 890 size_t copy = min_t(size_t, count, 1023);
891 char tb[copy + 1]; 891 char tb[copy + 1];
892 if (copy_from_user(tb, user_buffer, copy)) 892 if (copy_from_user(tb, user_buffer, copy))
893 return -EFAULT; 893 return -EFAULT;
@@ -2612,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2612 /* Update any of the values, used when we're incrementing various 2612 /* Update any of the values, used when we're incrementing various
2613 * fields. 2613 * fields.
2614 */ 2614 */
2615 queue_map = pkt_dev->cur_queue_map;
2616 mod_cur_headers(pkt_dev); 2615 mod_cur_headers(pkt_dev);
2616 queue_map = pkt_dev->cur_queue_map;
2617 2617
2618 datalen = (odev->hard_header_len + 16) & ~0xf; 2618 datalen = (odev->hard_header_len + 16) & ~0xf;
2619 2619
@@ -2976,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2976 /* Update any of the values, used when we're incrementing various 2976 /* Update any of the values, used when we're incrementing various
2977 * fields. 2977 * fields.
2978 */ 2978 */
2979 queue_map = pkt_dev->cur_queue_map;
2980 mod_cur_headers(pkt_dev); 2979 mod_cur_headers(pkt_dev);
2980 queue_map = pkt_dev->cur_queue_map;
2981 2981
2982 skb = __netdev_alloc_skb(odev, 2982 skb = __netdev_alloc_skb(odev,
2983 pkt_dev->cur_pkt_size + 64 2983 pkt_dev->cur_pkt_size + 64
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 7552495aff7a..fceeb37d7161 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -45,9 +45,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
45 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); 45 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
46 lopt_size += nr_table_entries * sizeof(struct request_sock *); 46 lopt_size += nr_table_entries * sizeof(struct request_sock *);
47 if (lopt_size > PAGE_SIZE) 47 if (lopt_size > PAGE_SIZE)
48 lopt = __vmalloc(lopt_size, 48 lopt = vzalloc(lopt_size);
49 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
50 PAGE_KERNEL);
51 else 49 else
52 lopt = kzalloc(lopt_size, GFP_KERNEL); 50 lopt = kzalloc(lopt_size, GFP_KERNEL);
53 if (lopt == NULL) 51 if (lopt == NULL)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8121268ddbdd..841c287ef40a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
347 if (!ops) 347 if (!ops)
348 return 0; 348 return 0;
349 349
350 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 350 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
351 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 351 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
352 352
353 if (ops->get_size) 353 if (ops->get_size)
354 /* IFLA_INFO_DATA + nested data */ 354 /* IFLA_INFO_DATA + nested data */
355 size += nlmsg_total_size(sizeof(struct nlattr)) + 355 size += nla_total_size(sizeof(struct nlattr)) +
356 ops->get_size(dev); 356 ops->get_size(dev);
357 357
358 if (ops->get_xstats_size) 358 if (ops->get_xstats_size)
359 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ 359 /* IFLA_INFO_XSTATS */
360 size += nla_total_size(ops->get_xstats_size(dev));
360 361
361 return size; 362 return size;
362} 363}
diff --git a/net/core/sock.c b/net/core/sock.c
index 3eed5424e659..fb6080111461 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
1653{ 1653{
1654 struct proto *prot = sk->sk_prot; 1654 struct proto *prot = sk->sk_prot;
1655 int amt = sk_mem_pages(size); 1655 int amt = sk_mem_pages(size);
1656 int allocated; 1656 long allocated;
1657 1657
1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1659 allocated = atomic_add_return(amt, prot->memory_allocated); 1659 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1660 1660
1661 /* Under limit. */ 1661 /* Under limit. */
1662 if (allocated <= prot->sysctl_mem[0]) { 1662 if (allocated <= prot->sysctl_mem[0]) {
@@ -1714,7 +1714,7 @@ suppress_allocation:
1714 1714
1715 /* Alas. Undo changes. */ 1715 /* Alas. Undo changes. */
1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1717 atomic_sub(amt, prot->memory_allocated); 1717 atomic_long_sub(amt, prot->memory_allocated);
1718 return 0; 1718 return 0;
1719} 1719}
1720EXPORT_SYMBOL(__sk_mem_schedule); 1720EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk)
1727{ 1727{
1728 struct proto *prot = sk->sk_prot; 1728 struct proto *prot = sk->sk_prot;
1729 1729
1730 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1730 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1731 prot->memory_allocated); 1731 prot->memory_allocated);
1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1733 1733
1734 if (prot->memory_pressure && *prot->memory_pressure && 1734 if (prot->memory_pressure && *prot->memory_pressure &&
1735 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1735 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1736 *prot->memory_pressure = 0; 1736 *prot->memory_pressure = 0;
1737} 1737}
1738EXPORT_SYMBOL(__sk_mem_reclaim); 1738EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method)
2452 2452
2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2454{ 2454{
2455 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2455 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2457 proto->name, 2457 proto->name,
2458 proto->obj_size, 2458 proto->obj_size,
2459 sock_prot_inuse_get(seq_file_net(seq), proto), 2459 sock_prot_inuse_get(seq_file_net(seq), proto),
2460 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2460 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2462 proto->max_header, 2462 proto->max_header,
2463 proto->slab == NULL ? "no" : "yes", 2463 proto->slab == NULL ? "no" : "yes",
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 0ae6c22da85b..c19bb4ee405e 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -96,11 +96,13 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
96 struct phy_device *phydev; 96 struct phy_device *phydev;
97 unsigned int type; 97 unsigned int type;
98 98
99 skb_push(skb, ETH_HLEN); 99 if (skb_headroom(skb) < ETH_HLEN)
100 return false;
101 __skb_push(skb, ETH_HLEN);
100 102
101 type = classify(skb); 103 type = classify(skb);
102 104
103 skb_pull(skb, ETH_HLEN); 105 __skb_pull(skb, ETH_HLEN);
104 106
105 switch (type) { 107 switch (type) {
106 case PTP_CLASS_V1_IPV4: 108 case PTP_CLASS_V1_IPV4:
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 265985370fa1..e424a09e83f6 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -239,7 +239,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
239 dccp_update_gsr(sk, seqno); 239 dccp_update_gsr(sk, seqno);
240 240
241 if (dh->dccph_type != DCCP_PKT_SYNC && 241 if (dh->dccph_type != DCCP_PKT_SYNC &&
242 (ackno != DCCP_PKT_WITHOUT_ACK_SEQ)) 242 ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
243 after48(ackno, dp->dccps_gar))
243 dp->dccps_gar = ackno; 244 dp->dccps_gar = ackno;
244 } else { 245 } else {
245 unsigned long now = jiffies; 246 unsigned long now = jiffies;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d6b93d19790f..6f97268ed85f 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops;
155static DEFINE_RWLOCK(dn_hash_lock); 155static DEFINE_RWLOCK(dn_hash_lock);
156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; 156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157static struct hlist_head dn_wild_sk; 157static struct hlist_head dn_wild_sk;
158static atomic_t decnet_memory_allocated; 158static atomic_long_t decnet_memory_allocated;
159 159
160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); 160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); 161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
@@ -1556,6 +1556,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1556 if (r_len > sizeof(struct linkinfo_dn)) 1556 if (r_len > sizeof(struct linkinfo_dn))
1557 r_len = sizeof(struct linkinfo_dn); 1557 r_len = sizeof(struct linkinfo_dn);
1558 1558
1559 memset(&link, 0, sizeof(link));
1560
1559 switch(sock->state) { 1561 switch(sock->state) {
1560 case SS_CONNECTING: 1562 case SS_CONNECTING:
1561 link.idn_linkstate = LL_CONNECTING; 1563 link.idn_linkstate = LL_CONNECTING;
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index be3eb8e23288..28f8b5e5f73b 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -38,7 +38,7 @@ int decnet_log_martians = 1;
38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; 38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
39 39
40/* Reasonable defaults, I hope, based on tcp's defaults */ 40/* Reasonable defaults, I hope, based on tcp's defaults */
41int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; 41long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
44 44
@@ -324,7 +324,7 @@ static ctl_table dn_table[] = {
324 .data = &sysctl_decnet_mem, 324 .data = &sysctl_decnet_mem,
325 .maxlen = sizeof(sysctl_decnet_mem), 325 .maxlen = sizeof(sysctl_decnet_mem),
326 .mode = 0644, 326 .mode = 0644,
327 .proc_handler = proc_dointvec, 327 .proc_handler = proc_doulongvec_minmax
328 }, 328 },
329 { 329 {
330 .procname = "decnet_rmem", 330 .procname = "decnet_rmem",
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index f8c1ae4b41f0..15dcc1a586b4 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -31,6 +31,7 @@
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/udp.h> 32#include <linux/udp.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/vmalloc.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/inet_common.h> 36#include <net/inet_common.h>
36#include <linux/stat.h> 37#include <linux/stat.h>
@@ -276,12 +277,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
276#endif 277#endif
277#ifdef CONFIG_ECONET_AUNUDP 278#ifdef CONFIG_ECONET_AUNUDP
278 struct msghdr udpmsg; 279 struct msghdr udpmsg;
279 struct iovec iov[msg->msg_iovlen+1]; 280 struct iovec iov[2];
280 struct aunhdr ah; 281 struct aunhdr ah;
281 struct sockaddr_in udpdest; 282 struct sockaddr_in udpdest;
282 __kernel_size_t size; 283 __kernel_size_t size;
283 int i;
284 mm_segment_t oldfs; 284 mm_segment_t oldfs;
285 char *userbuf;
285#endif 286#endif
286 287
287 /* 288 /*
@@ -297,23 +298,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
297 298
298 mutex_lock(&econet_mutex); 299 mutex_lock(&econet_mutex);
299 300
300 if (saddr == NULL) { 301 if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
301 struct econet_sock *eo = ec_sk(sk); 302 mutex_unlock(&econet_mutex);
302 303 return -EINVAL;
303 addr.station = eo->station; 304 }
304 addr.net = eo->net; 305 addr.station = saddr->addr.station;
305 port = eo->port; 306 addr.net = saddr->addr.net;
306 cb = eo->cb; 307 port = saddr->port;
307 } else { 308 cb = saddr->cb;
308 if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
309 mutex_unlock(&econet_mutex);
310 return -EINVAL;
311 }
312 addr.station = saddr->addr.station;
313 addr.net = saddr->addr.net;
314 port = saddr->port;
315 cb = saddr->cb;
316 }
317 309
318 /* Look for a device with the right network number. */ 310 /* Look for a device with the right network number. */
319 dev = net2dev_map[addr.net]; 311 dev = net2dev_map[addr.net];
@@ -328,17 +320,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
328 } 320 }
329 } 321 }
330 322
331 if (len + 15 > dev->mtu) {
332 mutex_unlock(&econet_mutex);
333 return -EMSGSIZE;
334 }
335
336 if (dev->type == ARPHRD_ECONET) { 323 if (dev->type == ARPHRD_ECONET) {
337 /* Real hardware Econet. We're not worthy etc. */ 324 /* Real hardware Econet. We're not worthy etc. */
338#ifdef CONFIG_ECONET_NATIVE 325#ifdef CONFIG_ECONET_NATIVE
339 unsigned short proto = 0; 326 unsigned short proto = 0;
340 int res; 327 int res;
341 328
329 if (len + 15 > dev->mtu) {
330 mutex_unlock(&econet_mutex);
331 return -EMSGSIZE;
332 }
333
342 dev_hold(dev); 334 dev_hold(dev);
343 335
344 skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), 336 skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
@@ -351,7 +343,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
351 343
352 eb = (struct ec_cb *)&skb->cb; 344 eb = (struct ec_cb *)&skb->cb;
353 345
354 /* BUG: saddr may be NULL */
355 eb->cookie = saddr->cookie; 346 eb->cookie = saddr->cookie;
356 eb->sec = *saddr; 347 eb->sec = *saddr;
357 eb->sent = ec_tx_done; 348 eb->sent = ec_tx_done;
@@ -415,6 +406,11 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
415 return -ENETDOWN; /* No socket - can't send */ 406 return -ENETDOWN; /* No socket - can't send */
416 } 407 }
417 408
409 if (len > 32768) {
410 err = -E2BIG;
411 goto error;
412 }
413
418 /* Make up a UDP datagram and hand it off to some higher intellect. */ 414 /* Make up a UDP datagram and hand it off to some higher intellect. */
419 415
420 memset(&udpdest, 0, sizeof(udpdest)); 416 memset(&udpdest, 0, sizeof(udpdest));
@@ -446,36 +442,26 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
446 442
447 /* tack our header on the front of the iovec */ 443 /* tack our header on the front of the iovec */
448 size = sizeof(struct aunhdr); 444 size = sizeof(struct aunhdr);
449 /*
450 * XXX: that is b0rken. We can't mix userland and kernel pointers
451 * in iovec, since on a lot of platforms copy_from_user() will
452 * *not* work with the kernel and userland ones at the same time,
453 * regardless of what we do with set_fs(). And we are talking about
454 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
455 * apply. Any suggestions on fixing that code? -- AV
456 */
457 iov[0].iov_base = (void *)&ah; 445 iov[0].iov_base = (void *)&ah;
458 iov[0].iov_len = size; 446 iov[0].iov_len = size;
459 for (i = 0; i < msg->msg_iovlen; i++) { 447
460 void __user *base = msg->msg_iov[i].iov_base; 448 userbuf = vmalloc(len);
461 size_t iov_len = msg->msg_iov[i].iov_len; 449 if (userbuf == NULL) {
462 /* Check it now since we switch to KERNEL_DS later. */ 450 err = -ENOMEM;
463 if (!access_ok(VERIFY_READ, base, iov_len)) { 451 goto error;
464 mutex_unlock(&econet_mutex);
465 return -EFAULT;
466 }
467 iov[i+1].iov_base = base;
468 iov[i+1].iov_len = iov_len;
469 size += iov_len;
470 } 452 }
471 453
454 iov[1].iov_base = userbuf;
455 iov[1].iov_len = len;
456 err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
457 if (err)
458 goto error_free_buf;
459
472 /* Get a skbuff (no data, just holds our cb information) */ 460 /* Get a skbuff (no data, just holds our cb information) */
473 if ((skb = sock_alloc_send_skb(sk, 0, 461 if ((skb = sock_alloc_send_skb(sk, 0,
474 msg->msg_flags & MSG_DONTWAIT, 462 msg->msg_flags & MSG_DONTWAIT,
475 &err)) == NULL) { 463 &err)) == NULL)
476 mutex_unlock(&econet_mutex); 464 goto error_free_buf;
477 return err;
478 }
479 465
480 eb = (struct ec_cb *)&skb->cb; 466 eb = (struct ec_cb *)&skb->cb;
481 467
@@ -491,7 +477,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
491 udpmsg.msg_name = (void *)&udpdest; 477 udpmsg.msg_name = (void *)&udpdest;
492 udpmsg.msg_namelen = sizeof(udpdest); 478 udpmsg.msg_namelen = sizeof(udpdest);
493 udpmsg.msg_iov = &iov[0]; 479 udpmsg.msg_iov = &iov[0];
494 udpmsg.msg_iovlen = msg->msg_iovlen + 1; 480 udpmsg.msg_iovlen = 2;
495 udpmsg.msg_control = NULL; 481 udpmsg.msg_control = NULL;
496 udpmsg.msg_controllen = 0; 482 udpmsg.msg_controllen = 0;
497 udpmsg.msg_flags=0; 483 udpmsg.msg_flags=0;
@@ -499,9 +485,13 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
499 oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */ 485 oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */
500 err = sock_sendmsg(udpsock, &udpmsg, size); 486 err = sock_sendmsg(udpsock, &udpmsg, size);
501 set_fs(oldfs); 487 set_fs(oldfs);
488
489error_free_buf:
490 vfree(userbuf);
502#else 491#else
503 err = -EPROTOTYPE; 492 err = -EPROTOTYPE;
504#endif 493#endif
494 error:
505 mutex_unlock(&econet_mutex); 495 mutex_unlock(&econet_mutex);
506 496
507 return err; 497 return err;
@@ -671,6 +661,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
671 err = 0; 661 err = 0;
672 switch (cmd) { 662 switch (cmd) {
673 case SIOCSIFADDR: 663 case SIOCSIFADDR:
664 if (!capable(CAP_NET_ADMIN)) {
665 err = -EPERM;
666 break;
667 }
668
674 edev = dev->ec_ptr; 669 edev = dev->ec_ptr;
675 if (edev == NULL) { 670 if (edev == NULL) {
676 /* Magic up a new one. */ 671 /* Magic up a new one. */
@@ -856,9 +851,13 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
856{ 851{
857 struct iphdr *ip = ip_hdr(skb); 852 struct iphdr *ip = ip_hdr(skb);
858 unsigned char stn = ntohl(ip->saddr) & 0xff; 853 unsigned char stn = ntohl(ip->saddr) & 0xff;
854 struct dst_entry *dst = skb_dst(skb);
855 struct ec_device *edev = NULL;
859 struct sock *sk = NULL; 856 struct sock *sk = NULL;
860 struct sk_buff *newskb; 857 struct sk_buff *newskb;
861 struct ec_device *edev = skb->dev->ec_ptr; 858
859 if (dst)
860 edev = dst->dev->ec_ptr;
862 861
863 if (! edev) 862 if (! edev)
864 goto bad; 863 goto bad;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index a29edf2219c8..c079cc0ec651 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -47,11 +47,8 @@ extern int fib_detect_death(struct fib_info *fi, int order,
47static inline void fib_result_assign(struct fib_result *res, 47static inline void fib_result_assign(struct fib_result *res,
48 struct fib_info *fi) 48 struct fib_info *fi)
49{ 49{
50 if (res->fi != NULL) 50 /* we used to play games with refcounts, but we now use RCU */
51 fib_info_put(res->fi);
52 res->fi = fi; 51 res->fi = fi;
53 if (fi != NULL)
54 atomic_inc(&fi->fib_clntref);
55} 52}
56 53
57#endif /* _FIB_LOOKUP_H */ 54#endif /* _FIB_LOOKUP_H */
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 200eb538fbb3..0f280348e0fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -365,7 +365,7 @@ static struct tnode *tnode_alloc(size_t size)
365 if (size <= PAGE_SIZE) 365 if (size <= PAGE_SIZE)
366 return kzalloc(size, GFP_KERNEL); 366 return kzalloc(size, GFP_KERNEL);
367 else 367 else
368 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 368 return vzalloc(size);
369} 369}
370 370
371static void __tnode_vfree(struct work_struct *arg) 371static void __tnode_vfree(struct work_struct *arg)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 96bc7f9475a3..e5d1a44bcbdf 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -569,6 +569,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
569 /* No need to clone since we're just using its address. */ 569 /* No need to clone since we're just using its address. */
570 rt2 = rt; 570 rt2 = rt;
571 571
572 if (!fl.nl_u.ip4_u.saddr)
573 fl.nl_u.ip4_u.saddr = rt->rt_src;
574
572 err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0); 575 err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
573 switch (err) { 576 switch (err) {
574 case 0: 577 case 0:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c8877c6c7216..3c53c2d89e3b 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2306,10 +2306,8 @@ void ip_mc_drop_socket(struct sock *sk)
2306 2306
2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2308 (void) ip_mc_leave_src(sk, iml, in_dev); 2308 (void) ip_mc_leave_src(sk, iml, in_dev);
2309 if (in_dev != NULL) { 2309 if (in_dev != NULL)
2310 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2310 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2311 in_dev_put(in_dev);
2312 }
2313 /* decrease mem now to avoid the memleak warning */ 2311 /* decrease mem now to avoid the memleak warning */
2314 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2312 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2315 call_rcu(&iml->rcu, ip_mc_socklist_reclaim); 2313 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ba8042665849..2ada17129fce 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk,
490{ 490{
491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
492 492
493 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 493 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
494 struct inet_diag_entry entry; 494 struct inet_diag_entry entry;
495 struct rtattr *bc = (struct rtattr *)(r + 1); 495 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
496 sizeof(*r),
497 INET_DIAG_REQ_BYTECODE);
496 struct inet_sock *inet = inet_sk(sk); 498 struct inet_sock *inet = inet_sk(sk);
497 499
498 entry.family = sk->sk_family; 500 entry.family = sk->sk_family;
@@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk,
512 entry.dport = ntohs(inet->inet_dport); 514 entry.dport = ntohs(inet->inet_dport);
513 entry.userlocks = sk->sk_userlocks; 515 entry.userlocks = sk->sk_userlocks;
514 516
515 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 517 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
516 return 0; 518 return 0;
517 } 519 }
518 520
@@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
527{ 529{
528 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 530 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
529 531
530 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 532 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
531 struct inet_diag_entry entry; 533 struct inet_diag_entry entry;
532 struct rtattr *bc = (struct rtattr *)(r + 1); 534 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
535 sizeof(*r),
536 INET_DIAG_REQ_BYTECODE);
533 537
534 entry.family = tw->tw_family; 538 entry.family = tw->tw_family;
535#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 539#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
548 entry.dport = ntohs(tw->tw_dport); 552 entry.dport = ntohs(tw->tw_dport);
549 entry.userlocks = 0; 553 entry.userlocks = 0;
550 554
551 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 555 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
552 return 0; 556 return 0;
553 } 557 }
554 558
@@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
618 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 622 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
619 struct inet_connection_sock *icsk = inet_csk(sk); 623 struct inet_connection_sock *icsk = inet_csk(sk);
620 struct listen_sock *lopt; 624 struct listen_sock *lopt;
621 struct rtattr *bc = NULL; 625 const struct nlattr *bc = NULL;
622 struct inet_sock *inet = inet_sk(sk); 626 struct inet_sock *inet = inet_sk(sk);
623 int j, s_j; 627 int j, s_j;
624 int reqnum, s_reqnum; 628 int reqnum, s_reqnum;
@@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
638 if (!lopt || !lopt->qlen) 642 if (!lopt || !lopt->qlen)
639 goto out; 643 goto out;
640 644
641 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 645 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
642 bc = (struct rtattr *)(r + 1); 646 bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
647 INET_DIAG_REQ_BYTECODE);
643 entry.sport = inet->inet_num; 648 entry.sport = inet->inet_num;
644 entry.userlocks = sk->sk_userlocks; 649 entry.userlocks = sk->sk_userlocks;
645 } 650 }
@@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
672 &ireq->rmt_addr; 677 &ireq->rmt_addr;
673 entry.dport = ntohs(ireq->rmt_port); 678 entry.dport = ntohs(ireq->rmt_port);
674 679
675 if (!inet_diag_bc_run(RTA_DATA(bc), 680 if (!inet_diag_bc_run(nla_data(bc),
676 RTA_PAYLOAD(bc), &entry)) 681 nla_len(bc), &entry))
677 continue; 682 continue;
678 } 683 }
679 684
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 1b344f30b463..3c0369a3a663 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -133,8 +133,7 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
133 } 133 }
134 } 134 }
135 } 135 }
136 sk_add_bind_node(child, &tb->owners); 136 inet_bind_hash(child, tb, port);
137 inet_csk(child)->icsk_bind_hash = tb;
138 spin_unlock(&head->lock); 137 spin_unlock(&head->lock);
139 138
140 return 0; 139 return 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 01087e035b7d..70ff77f02eee 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1325,7 +1325,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
1325{ 1325{
1326 struct ip_tunnel *tunnel = netdev_priv(dev); 1326 struct ip_tunnel *tunnel = netdev_priv(dev);
1327 struct iphdr *iph = &tunnel->parms.iph; 1327 struct iphdr *iph = &tunnel->parms.iph;
1328 struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
1329 1328
1330 tunnel->dev = dev; 1329 tunnel->dev = dev;
1331 strcpy(tunnel->parms.name, dev->name); 1330 strcpy(tunnel->parms.name, dev->name);
@@ -1336,7 +1335,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
1336 tunnel->hlen = sizeof(struct iphdr) + 4; 1335 tunnel->hlen = sizeof(struct iphdr) + 4;
1337 1336
1338 dev_hold(dev); 1337 dev_hold(dev);
1339 rcu_assign_pointer(ign->tunnels_wc[0], tunnel);
1340} 1338}
1341 1339
1342 1340
@@ -1383,10 +1381,12 @@ static int __net_init ipgre_init_net(struct net *net)
1383 if ((err = register_netdev(ign->fb_tunnel_dev))) 1381 if ((err = register_netdev(ign->fb_tunnel_dev)))
1384 goto err_reg_dev; 1382 goto err_reg_dev;
1385 1383
1384 rcu_assign_pointer(ign->tunnels_wc[0],
1385 netdev_priv(ign->fb_tunnel_dev));
1386 return 0; 1386 return 0;
1387 1387
1388err_reg_dev: 1388err_reg_dev:
1389 free_netdev(ign->fb_tunnel_dev); 1389 ipgre_dev_free(ign->fb_tunnel_dev);
1390err_alloc_dev: 1390err_alloc_dev:
1391 return err; 1391 return err;
1392} 1392}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3cad2591ace0..3fac340a28d5 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -927,6 +927,7 @@ static int get_info(struct net *net, void __user *user,
927 private = &tmp; 927 private = &tmp;
928 } 928 }
929#endif 929#endif
930 memset(&info, 0, sizeof(info));
930 info.valid_hooks = t->valid_hooks; 931 info.valid_hooks = t->valid_hooks;
931 memcpy(info.hook_entry, private->hook_entry, 932 memcpy(info.hook_entry, private->hook_entry,
932 sizeof(info.hook_entry)); 933 sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d31b007a6d80..a846d633b3b6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1124,6 +1124,7 @@ static int get_info(struct net *net, void __user *user,
1124 private = &tmp; 1124 private = &tmp;
1125 } 1125 }
1126#endif 1126#endif
1127 memset(&info, 0, sizeof(info));
1127 info.valid_hooks = t->valid_hooks; 1128 info.valid_hooks = t->valid_hooks;
1128 memcpy(info.hook_entry, private->hook_entry, 1129 memcpy(info.hook_entry, private->hook_entry,
1129 sizeof(info.hook_entry)); 1130 sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 295c97431e43..c04787ce1a71 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -47,26 +47,6 @@ __nf_nat_proto_find(u_int8_t protonum)
47 return rcu_dereference(nf_nat_protos[protonum]); 47 return rcu_dereference(nf_nat_protos[protonum]);
48} 48}
49 49
50static const struct nf_nat_protocol *
51nf_nat_proto_find_get(u_int8_t protonum)
52{
53 const struct nf_nat_protocol *p;
54
55 rcu_read_lock();
56 p = __nf_nat_proto_find(protonum);
57 if (!try_module_get(p->me))
58 p = &nf_nat_unknown_protocol;
59 rcu_read_unlock();
60
61 return p;
62}
63
64static void
65nf_nat_proto_put(const struct nf_nat_protocol *p)
66{
67 module_put(p->me);
68}
69
70/* We keep an extra hash for each conntrack, for fast searching. */ 50/* We keep an extra hash for each conntrack, for fast searching. */
71static inline unsigned int 51static inline unsigned int
72hash_by_src(const struct net *net, u16 zone, 52hash_by_src(const struct net *net, u16 zone,
@@ -588,6 +568,26 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
588#include <linux/netfilter/nfnetlink.h> 568#include <linux/netfilter/nfnetlink.h>
589#include <linux/netfilter/nfnetlink_conntrack.h> 569#include <linux/netfilter/nfnetlink_conntrack.h>
590 570
571static const struct nf_nat_protocol *
572nf_nat_proto_find_get(u_int8_t protonum)
573{
574 const struct nf_nat_protocol *p;
575
576 rcu_read_lock();
577 p = __nf_nat_proto_find(protonum);
578 if (!try_module_get(p->me))
579 p = &nf_nat_unknown_protocol;
580 rcu_read_unlock();
581
582 return p;
583}
584
585static void
586nf_nat_proto_put(const struct nf_nat_protocol *p)
587{
588 module_put(p->me);
589}
590
591static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 591static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
592 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 592 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
593 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 593 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4ae1f203f7cb..b14ec7d03b6e 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
59 local_bh_enable(); 59 local_bh_enable();
60 60
61 socket_seq_show(seq); 61 socket_seq_show(seq);
62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
63 sock_prot_inuse_get(net, &tcp_prot), orphans, 63 sock_prot_inuse_get(net, &tcp_prot), orphans,
64 tcp_death_row.tw_count, sockets, 64 tcp_death_row.tw_count, sockets,
65 atomic_read(&tcp_memory_allocated)); 65 atomic_long_read(&tcp_memory_allocated));
66 seq_printf(seq, "UDP: inuse %d mem %d\n", 66 seq_printf(seq, "UDP: inuse %d mem %ld\n",
67 sock_prot_inuse_get(net, &udp_prot), 67 sock_prot_inuse_get(net, &udp_prot),
68 atomic_read(&udp_memory_allocated)); 68 atomic_long_read(&udp_memory_allocated));
69 seq_printf(seq, "UDPLITE: inuse %d\n", 69 seq_printf(seq, "UDPLITE: inuse %d\n",
70 sock_prot_inuse_get(net, &udplite_prot)); 70 sock_prot_inuse_get(net, &udplite_prot));
71 seq_printf(seq, "RAW: inuse %d\n", 71 seq_printf(seq, "RAW: inuse %d\n",
@@ -253,6 +253,7 @@ static const struct snmp_mib snmp4_net_list[] = {
253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), 253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
256 SNMP_MIB_SENTINEL 257 SNMP_MIB_SENTINEL
257}; 258};
258 259
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d96c1da4b17c..1b4ec21497a4 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -26,6 +26,8 @@ static int zero;
26static int tcp_retr1_max = 255; 26static int tcp_retr1_max = 255;
27static int ip_local_port_range_min[] = { 1, 1 }; 27static int ip_local_port_range_min[] = { 1, 1 };
28static int ip_local_port_range_max[] = { 65535, 65535 }; 28static int ip_local_port_range_max[] = { 65535, 65535 };
29static int tcp_adv_win_scale_min = -31;
30static int tcp_adv_win_scale_max = 31;
29 31
30/* Update system visible IP port range */ 32/* Update system visible IP port range */
31static void set_local_port_range(int range[2]) 33static void set_local_port_range(int range[2])
@@ -398,7 +400,7 @@ static struct ctl_table ipv4_table[] = {
398 .data = &sysctl_tcp_mem, 400 .data = &sysctl_tcp_mem,
399 .maxlen = sizeof(sysctl_tcp_mem), 401 .maxlen = sizeof(sysctl_tcp_mem),
400 .mode = 0644, 402 .mode = 0644,
401 .proc_handler = proc_dointvec 403 .proc_handler = proc_doulongvec_minmax
402 }, 404 },
403 { 405 {
404 .procname = "tcp_wmem", 406 .procname = "tcp_wmem",
@@ -426,7 +428,9 @@ static struct ctl_table ipv4_table[] = {
426 .data = &sysctl_tcp_adv_win_scale, 428 .data = &sysctl_tcp_adv_win_scale,
427 .maxlen = sizeof(int), 429 .maxlen = sizeof(int),
428 .mode = 0644, 430 .mode = 0644,
429 .proc_handler = proc_dointvec 431 .proc_handler = proc_dointvec_minmax,
432 .extra1 = &tcp_adv_win_scale_min,
433 .extra2 = &tcp_adv_win_scale_max,
430 }, 434 },
431 { 435 {
432 .procname = "tcp_tw_reuse", 436 .procname = "tcp_tw_reuse",
@@ -602,8 +606,7 @@ static struct ctl_table ipv4_table[] = {
602 .data = &sysctl_udp_mem, 606 .data = &sysctl_udp_mem,
603 .maxlen = sizeof(sysctl_udp_mem), 607 .maxlen = sizeof(sysctl_udp_mem),
604 .mode = 0644, 608 .mode = 0644,
605 .proc_handler = proc_dointvec_minmax, 609 .proc_handler = proc_doulongvec_minmax,
606 .extra1 = &zero
607 }, 610 },
608 { 611 {
609 .procname = "udp_rmem_min", 612 .procname = "udp_rmem_min",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1664a0590bb8..f15c36a706ec 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
282struct percpu_counter tcp_orphan_count; 282struct percpu_counter tcp_orphan_count;
283EXPORT_SYMBOL_GPL(tcp_orphan_count); 283EXPORT_SYMBOL_GPL(tcp_orphan_count);
284 284
285int sysctl_tcp_mem[3] __read_mostly; 285long sysctl_tcp_mem[3] __read_mostly;
286int sysctl_tcp_wmem[3] __read_mostly; 286int sysctl_tcp_wmem[3] __read_mostly;
287int sysctl_tcp_rmem[3] __read_mostly; 287int sysctl_tcp_rmem[3] __read_mostly;
288 288
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem);
290EXPORT_SYMBOL(sysctl_tcp_rmem); 290EXPORT_SYMBOL(sysctl_tcp_rmem);
291EXPORT_SYMBOL(sysctl_tcp_wmem); 291EXPORT_SYMBOL(sysctl_tcp_wmem);
292 292
293atomic_t tcp_memory_allocated; /* Current allocated memory. */ 293atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
294EXPORT_SYMBOL(tcp_memory_allocated); 294EXPORT_SYMBOL(tcp_memory_allocated);
295 295
296/* 296/*
@@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2246 /* Values greater than interface MTU won't take effect. However 2246 /* Values greater than interface MTU won't take effect. However
2247 * at the point when this call is done we typically don't yet 2247 * at the point when this call is done we typically don't yet
2248 * know which interface is going to be used */ 2248 * know which interface is going to be used */
2249 if (val < 8 || val > MAX_TCP_WINDOW) { 2249 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2250 err = -EINVAL; 2250 err = -EINVAL;
2251 break; 2251 break;
2252 } 2252 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3357f69e353d..6d8ab1c4efc3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk)
259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
260 sizeof(struct sk_buff); 260 sizeof(struct sk_buff);
261 261
262 if (sk->sk_sndbuf < 3 * sndmem) 262 if (sk->sk_sndbuf < 3 * sndmem) {
263 sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 263 sk->sk_sndbuf = 3 * sndmem;
264 if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
265 sk->sk_sndbuf = sysctl_tcp_wmem[2];
266 }
264} 267}
265 268
266/* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 269/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk)
396 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 399 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
397 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 400 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
398 !tcp_memory_pressure && 401 !tcp_memory_pressure &&
399 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 402 atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
400 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 403 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
401 sysctl_tcp_rmem[2]); 404 sysctl_tcp_rmem[2]);
402 } 405 }
@@ -4861,7 +4864,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk)
4861 return 0; 4864 return 0;
4862 4865
4863 /* If we are under soft global TCP memory pressure, do not expand. */ 4866 /* If we are under soft global TCP memory pressure, do not expand. */
4864 if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4867 if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
4865 return 0; 4868 return 0;
4866 4869
4867 /* If we filled the congestion window, do not expand. */ 4870 /* If we filled the congestion window, do not expand. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8f8527d41682..e13da6de1fc7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
415 !icsk->icsk_backoff) 415 !icsk->icsk_backoff)
416 break; 416 break;
417 417
418 if (sock_owned_by_user(sk))
419 break;
420
418 icsk->icsk_backoff--; 421 icsk->icsk_backoff--;
419 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << 422 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
420 icsk->icsk_backoff; 423 icsk->icsk_backoff;
@@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
429 if (remaining) { 432 if (remaining) {
430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 433 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431 remaining, TCP_RTO_MAX); 434 remaining, TCP_RTO_MAX);
432 } else if (sock_owned_by_user(sk)) {
433 /* RTO revert clocked out retransmission,
434 * but socket is locked. Will defer. */
435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
436 HZ/20, TCP_RTO_MAX);
437 } else { 435 } else {
438 /* RTO revert clocked out retransmission. 436 /* RTO revert clocked out retransmission.
439 * Will retransmit now */ 437 * Will retransmit now */
@@ -2045,7 +2043,9 @@ get_req:
2045 } 2043 }
2046get_sk: 2044get_sk:
2047 sk_nulls_for_each_from(sk, node) { 2045 sk_nulls_for_each_from(sk, node) {
2048 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { 2046 if (!net_eq(sock_net(sk), net))
2047 continue;
2048 if (sk->sk_family == st->family) {
2049 cur = sk; 2049 cur = sk;
2050 goto out; 2050 goto out;
2051 } 2051 }
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 43cf901d7659..a66735f75963 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -347,7 +347,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
347 * socket up. We've got bigger problems than 347 * socket up. We've got bigger problems than
348 * non-graceful socket closings. 348 * non-graceful socket closings.
349 */ 349 */
350 LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n"); 350 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
351 } 351 }
352 352
353 tcp_update_metrics(sk); 353 tcp_update_metrics(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e96152207164..8750d4050b48 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -231,11 +231,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
231 /* when initializing use the value from init_rcv_wnd 231 /* when initializing use the value from init_rcv_wnd
232 * rather than the default from above 232 * rather than the default from above
233 */ 233 */
234 if (init_rcv_wnd && 234 if (init_rcv_wnd)
235 (*rcv_wnd > init_rcv_wnd * mss)) 235 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
236 *rcv_wnd = init_rcv_wnd * mss; 236 else
237 else if (*rcv_wnd > init_cwnd * mss) 237 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
238 *rcv_wnd = init_cwnd * mss;
239 } 238 }
240 239
241 /* Set the clamp no higher than max representable value */ 240 /* Set the clamp no higher than max representable value */
@@ -386,27 +385,30 @@ struct tcp_out_options {
386 */ 385 */
387static u8 tcp_cookie_size_check(u8 desired) 386static u8 tcp_cookie_size_check(u8 desired)
388{ 387{
389 if (desired > 0) { 388 int cookie_size;
389
390 if (desired > 0)
390 /* previously specified */ 391 /* previously specified */
391 return desired; 392 return desired;
392 } 393
393 if (sysctl_tcp_cookie_size <= 0) { 394 cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
395 if (cookie_size <= 0)
394 /* no default specified */ 396 /* no default specified */
395 return 0; 397 return 0;
396 } 398
397 if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) { 399 if (cookie_size <= TCP_COOKIE_MIN)
398 /* value too small, specify minimum */ 400 /* value too small, specify minimum */
399 return TCP_COOKIE_MIN; 401 return TCP_COOKIE_MIN;
400 } 402
401 if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) { 403 if (cookie_size >= TCP_COOKIE_MAX)
402 /* value too large, specify maximum */ 404 /* value too large, specify maximum */
403 return TCP_COOKIE_MAX; 405 return TCP_COOKIE_MAX;
404 } 406
405 if (0x1 & sysctl_tcp_cookie_size) { 407 if (cookie_size & 1)
406 /* 8-bit multiple, illegal, fix it */ 408 /* 8-bit multiple, illegal, fix it */
407 return (u8)(sysctl_tcp_cookie_size + 0x1); 409 cookie_size++;
408 } 410
409 return (u8)sysctl_tcp_cookie_size; 411 return (u8)cookie_size;
410} 412}
411 413
412/* Write previously computed TCP options to the packet. 414/* Write previously computed TCP options to the packet.
@@ -1513,6 +1515,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1513 struct tcp_sock *tp = tcp_sk(sk); 1515 struct tcp_sock *tp = tcp_sk(sk);
1514 const struct inet_connection_sock *icsk = inet_csk(sk); 1516 const struct inet_connection_sock *icsk = inet_csk(sk);
1515 u32 send_win, cong_win, limit, in_flight; 1517 u32 send_win, cong_win, limit, in_flight;
1518 int win_divisor;
1516 1519
1517 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) 1520 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
1518 goto send_now; 1521 goto send_now;
@@ -1544,13 +1547,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1544 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1547 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1545 goto send_now; 1548 goto send_now;
1546 1549
1547 if (sysctl_tcp_tso_win_divisor) { 1550 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1551 if (win_divisor) {
1548 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1552 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1549 1553
1550 /* If at least some fraction of a window is available, 1554 /* If at least some fraction of a window is available,
1551 * just use it. 1555 * just use it.
1552 */ 1556 */
1553 chunk /= sysctl_tcp_tso_win_divisor; 1557 chunk /= win_divisor;
1554 if (limit >= chunk) 1558 if (limit >= chunk)
1555 goto send_now; 1559 goto send_now;
1556 } else { 1560 } else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 28cb2d733a3c..5e0a3a582a59 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -110,7 +110,7 @@
110struct udp_table udp_table __read_mostly; 110struct udp_table udp_table __read_mostly;
111EXPORT_SYMBOL(udp_table); 111EXPORT_SYMBOL(udp_table);
112 112
113int sysctl_udp_mem[3] __read_mostly; 113long sysctl_udp_mem[3] __read_mostly;
114EXPORT_SYMBOL(sysctl_udp_mem); 114EXPORT_SYMBOL(sysctl_udp_mem);
115 115
116int sysctl_udp_rmem_min __read_mostly; 116int sysctl_udp_rmem_min __read_mostly;
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min);
119int sysctl_udp_wmem_min __read_mostly; 119int sysctl_udp_wmem_min __read_mostly;
120EXPORT_SYMBOL(sysctl_udp_wmem_min); 120EXPORT_SYMBOL(sysctl_udp_wmem_min);
121 121
122atomic_t udp_memory_allocated; 122atomic_long_t udp_memory_allocated;
123EXPORT_SYMBOL(udp_memory_allocated); 123EXPORT_SYMBOL(udp_memory_allocated);
124 124
125#define MAX_UDP_PORTS 65536 125#define MAX_UDP_PORTS 65536
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e048ec62d109..93b7a933a775 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -98,7 +98,11 @@
98#endif 98#endif
99 99
100#define INFINITY_LIFE_TIME 0xFFFFFFFF 100#define INFINITY_LIFE_TIME 0xFFFFFFFF
101#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b))) 101
102static inline u32 cstamp_delta(unsigned long cstamp)
103{
104 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
105}
102 106
103#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) 107#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
104#define ADDRCONF_TIMER_FUZZ (HZ / 4) 108#define ADDRCONF_TIMER_FUZZ (HZ / 4)
@@ -2740,10 +2744,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2740 /* Flag it for later restoration when link comes up */ 2744 /* Flag it for later restoration when link comes up */
2741 ifa->flags |= IFA_F_TENTATIVE; 2745 ifa->flags |= IFA_F_TENTATIVE;
2742 ifa->state = INET6_IFADDR_STATE_DAD; 2746 ifa->state = INET6_IFADDR_STATE_DAD;
2743
2744 write_unlock_bh(&idev->lock);
2745
2746 in6_ifa_hold(ifa);
2747 } else { 2747 } else {
2748 list_del(&ifa->if_list); 2748 list_del(&ifa->if_list);
2749 2749
@@ -2758,19 +2758,15 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2758 ifa->state = INET6_IFADDR_STATE_DEAD; 2758 ifa->state = INET6_IFADDR_STATE_DEAD;
2759 spin_unlock_bh(&ifa->state_lock); 2759 spin_unlock_bh(&ifa->state_lock);
2760 2760
2761 if (state == INET6_IFADDR_STATE_DEAD) 2761 if (state != INET6_IFADDR_STATE_DEAD) {
2762 goto put_ifa; 2762 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2763 } 2763 atomic_notifier_call_chain(&inet6addr_chain,
2764 2764 NETDEV_DOWN, ifa);
2765 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2765 }
2766 if (ifa->state == INET6_IFADDR_STATE_DEAD)
2767 atomic_notifier_call_chain(&inet6addr_chain,
2768 NETDEV_DOWN, ifa);
2769
2770put_ifa:
2771 in6_ifa_put(ifa);
2772 2766
2773 write_lock_bh(&idev->lock); 2767 in6_ifa_put(ifa);
2768 write_lock_bh(&idev->lock);
2769 }
2774 } 2770 }
2775 2771
2776 list_splice(&keep_list, &idev->addr_list); 2772 list_splice(&keep_list, &idev->addr_list);
@@ -3452,10 +3448,8 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
3452{ 3448{
3453 struct ifa_cacheinfo ci; 3449 struct ifa_cacheinfo ci;
3454 3450
3455 ci.cstamp = (u32)(TIME_DELTA(cstamp, INITIAL_JIFFIES) / HZ * 100 3451 ci.cstamp = cstamp_delta(cstamp);
3456 + TIME_DELTA(cstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); 3452 ci.tstamp = cstamp_delta(tstamp);
3457 ci.tstamp = (u32)(TIME_DELTA(tstamp, INITIAL_JIFFIES) / HZ * 100
3458 + TIME_DELTA(tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ);
3459 ci.ifa_prefered = preferred; 3453 ci.ifa_prefered = preferred;
3460 ci.ifa_valid = valid; 3454 ci.ifa_valid = valid;
3461 3455
@@ -3806,8 +3800,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3806 array[DEVCONF_AUTOCONF] = cnf->autoconf; 3800 array[DEVCONF_AUTOCONF] = cnf->autoconf;
3807 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits; 3801 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
3808 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits; 3802 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
3809 array[DEVCONF_RTR_SOLICIT_INTERVAL] = cnf->rtr_solicit_interval; 3803 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
3810 array[DEVCONF_RTR_SOLICIT_DELAY] = cnf->rtr_solicit_delay; 3804 jiffies_to_msecs(cnf->rtr_solicit_interval);
3805 array[DEVCONF_RTR_SOLICIT_DELAY] =
3806 jiffies_to_msecs(cnf->rtr_solicit_delay);
3811 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; 3807 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
3812#ifdef CONFIG_IPV6_PRIVACY 3808#ifdef CONFIG_IPV6_PRIVACY
3813 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr; 3809 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
@@ -3821,7 +3817,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3821 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo; 3817 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
3822#ifdef CONFIG_IPV6_ROUTER_PREF 3818#ifdef CONFIG_IPV6_ROUTER_PREF
3823 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref; 3819 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
3824 array[DEVCONF_RTR_PROBE_INTERVAL] = cnf->rtr_probe_interval; 3820 array[DEVCONF_RTR_PROBE_INTERVAL] =
3821 jiffies_to_msecs(cnf->rtr_probe_interval);
3825#ifdef CONFIG_IPV6_ROUTE_INFO 3822#ifdef CONFIG_IPV6_ROUTE_INFO
3826 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen; 3823 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
3827#endif 3824#endif
@@ -3937,10 +3934,9 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3937 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); 3934 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
3938 3935
3939 ci.max_reasm_len = IPV6_MAXPLEN; 3936 ci.max_reasm_len = IPV6_MAXPLEN;
3940 ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100 3937 ci.tstamp = cstamp_delta(idev->tstamp);
3941 + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); 3938 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
3942 ci.reachable_time = idev->nd_parms->reachable_time; 3939 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
3943 ci.retrans_time = idev->nd_parms->retrans_time;
3944 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); 3940 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
3945 3941
3946 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); 3942 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
@@ -4025,11 +4021,11 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
4025 kfree_skb(skb); 4021 kfree_skb(skb);
4026 goto errout; 4022 goto errout;
4027 } 4023 }
4028 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 4024 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
4029 return; 4025 return;
4030errout: 4026errout:
4031 if (err < 0) 4027 if (err < 0)
4032 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err); 4028 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
4033} 4029}
4034 4030
4035static inline size_t inet6_prefix_nlmsg_size(void) 4031static inline size_t inet6_prefix_nlmsg_size(void)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2a59610c2a58..70e891a20fb9 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1175,6 +1175,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1175 sizeof (struct ipv6hdr); 1175 sizeof (struct ipv6hdr);
1176 1176
1177 dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); 1177 dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
1178 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1179 dev->mtu-=8;
1178 1180
1179 if (dev->mtu < IPV6_MIN_MTU) 1181 if (dev->mtu < IPV6_MIN_MTU)
1180 dev->mtu = IPV6_MIN_MTU; 1182 dev->mtu = IPV6_MIN_MTU;
@@ -1363,12 +1365,17 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
1363 1365
1364static void ip6_tnl_dev_setup(struct net_device *dev) 1366static void ip6_tnl_dev_setup(struct net_device *dev)
1365{ 1367{
1368 struct ip6_tnl *t;
1369
1366 dev->netdev_ops = &ip6_tnl_netdev_ops; 1370 dev->netdev_ops = &ip6_tnl_netdev_ops;
1367 dev->destructor = ip6_dev_free; 1371 dev->destructor = ip6_dev_free;
1368 1372
1369 dev->type = ARPHRD_TUNNEL6; 1373 dev->type = ARPHRD_TUNNEL6;
1370 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); 1374 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
1371 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); 1375 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
1376 t = netdev_priv(dev);
1377 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1378 dev->mtu-=8;
1372 dev->flags |= IFF_NOARP; 1379 dev->flags |= IFF_NOARP;
1373 dev->addr_len = sizeof(struct in6_addr); 1380 dev->addr_len = sizeof(struct in6_addr);
1374 dev->features |= NETIF_F_NETNS_LOCAL; 1381 dev->features |= NETIF_F_NETNS_LOCAL;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 51df035897e7..455582384ece 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1137,6 +1137,7 @@ static int get_info(struct net *net, void __user *user,
1137 private = &tmp; 1137 private = &tmp;
1138 } 1138 }
1139#endif 1139#endif
1140 memset(&info, 0, sizeof(info));
1140 info.valid_hooks = t->valid_hooks; 1141 info.valid_hooks = t->valid_hooks;
1141 memcpy(info.hook_entry, private->hook_entry, 1142 memcpy(info.hook_entry, private->hook_entry,
1142 sizeof(info.hook_entry)); 1143 sizeof(info.hook_entry));
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 3a3f129a44cb..79d43aa8fa8d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -286,7 +286,7 @@ found:
286 286
287 /* Check for overlap with preceding fragment. */ 287 /* Check for overlap with preceding fragment. */
288 if (prev && 288 if (prev &&
289 (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) 289 (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
290 goto discard_fq; 290 goto discard_fq;
291 291
292 /* Look for overlap with succeeding segment. */ 292 /* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index d082eaeefa25..24b3558b8e67 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -126,6 +126,8 @@ static const struct snmp_mib snmp6_udp6_list[] = {
126 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), 126 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
127 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), 127 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
128 SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS), 128 SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
129 SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
130 SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
129 SNMP_MIB_SENTINEL 131 SNMP_MIB_SENTINEL
130}; 132};
131 133
@@ -134,6 +136,8 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
134 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), 136 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
135 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), 137 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
136 SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), 138 SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
139 SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
140 SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS),
137 SNMP_MIB_SENTINEL 141 SNMP_MIB_SENTINEL
138}; 142};
139 143
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index c7ba3149633f..0f2766453759 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -349,7 +349,7 @@ found:
349 349
350 /* Check for overlap with preceding fragment. */ 350 /* Check for overlap with preceding fragment. */
351 if (prev && 351 if (prev &&
352 (FRAG6_CB(prev)->offset + prev->len) - offset > 0) 352 (FRAG6_CB(prev)->offset + prev->len) > offset)
353 goto discard_fq; 353 goto discard_fq;
354 354
355 /* Look for overlap with succeeding segment. */ 355 /* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 25661f968f3f..96455ffb76fb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1945,8 +1945,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1946 struct neighbour *neigh; 1946 struct neighbour *neigh;
1947 1947
1948 if (rt == NULL) 1948 if (rt == NULL) {
1949 if (net_ratelimit())
1950 pr_warning("IPv6: Maximum number of routes reached,"
1951 " consider increasing route/max_size.\n");
1949 return ERR_PTR(-ENOMEM); 1952 return ERR_PTR(-ENOMEM);
1953 }
1950 1954
1951 dev_hold(net->loopback_dev); 1955 dev_hold(net->loopback_dev);
1952 in6_dev_hold(idev); 1956 in6_dev_hold(idev);
@@ -2741,6 +2745,7 @@ static void __net_exit ip6_route_net_exit(struct net *net)
2741 kfree(net->ipv6.ip6_prohibit_entry); 2745 kfree(net->ipv6.ip6_prohibit_entry);
2742 kfree(net->ipv6.ip6_blk_hole_entry); 2746 kfree(net->ipv6.ip6_blk_hole_entry);
2743#endif 2747#endif
2748 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2744} 2749}
2745 2750
2746static struct pernet_operations ip6_route_net_ops = { 2751static struct pernet_operations ip6_route_net_ops = {
@@ -2832,5 +2837,6 @@ void ip6_route_cleanup(void)
2832 xfrm6_fini(); 2837 xfrm6_fini();
2833 fib6_gc_cleanup(); 2838 fib6_gc_cleanup();
2834 unregister_pernet_subsys(&ip6_route_net_ops); 2839 unregister_pernet_subsys(&ip6_route_net_ops);
2840 dst_entries_destroy(&ip6_dst_blackhole_ops);
2835 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 2841 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2836} 2842}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d6bfaec3bbbf..8c4d00c7cd2b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -606,8 +606,9 @@ static int ipip6_rcv(struct sk_buff *skb)
606 return 0; 606 return 0;
607 } 607 }
608 608
609 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 609 /* no tunnel matched, let upstream know, ipsec may handle it */
610 rcu_read_unlock(); 610 rcu_read_unlock();
611 return 1;
611out: 612out:
612 kfree_skb(skb); 613 kfree_skb(skb);
613 return 0; 614 return 0;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 7f097989cde2..a6de3059746d 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -45,7 +45,6 @@
45#include <linux/capability.h> 45#include <linux/capability.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/types.h> 47#include <linux/types.h>
48#include <linux/smp_lock.h>
49#include <linux/socket.h> 48#include <linux/socket.h>
50#include <linux/sockios.h> 49#include <linux/sockios.h>
51#include <linux/slab.h> 50#include <linux/slab.h>
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 7fa86373de41..7c567b8aa89a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/smp_lock.h>
19#include "irnet_ppp.h" /* Private header */ 18#include "irnet_ppp.h" /* Private header */
20/* Please put other headers in irnet.h - Thanks */ 19/* Please put other headers in irnet.h - Thanks */
21 20
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 285761e77d90..f6054f9ccbe3 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -550,22 +550,30 @@ EXPORT_SYMBOL(irttp_close_tsap);
550 */ 550 */
551int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) 551int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
552{ 552{
553 int ret;
554
553 IRDA_ASSERT(self != NULL, return -1;); 555 IRDA_ASSERT(self != NULL, return -1;);
554 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 556 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
555 IRDA_ASSERT(skb != NULL, return -1;); 557 IRDA_ASSERT(skb != NULL, return -1;);
556 558
557 IRDA_DEBUG(4, "%s()\n", __func__); 559 IRDA_DEBUG(4, "%s()\n", __func__);
558 560
561 /* Take shortcut on zero byte packets */
562 if (skb->len == 0) {
563 ret = 0;
564 goto err;
565 }
566
559 /* Check that nothing bad happens */ 567 /* Check that nothing bad happens */
560 if ((skb->len == 0) || (!self->connected)) { 568 if (!self->connected) {
561 IRDA_DEBUG(1, "%s(), No data, or not connected\n", 569 IRDA_WARNING("%s(), Not connected\n", __func__);
562 __func__); 570 ret = -ENOTCONN;
563 goto err; 571 goto err;
564 } 572 }
565 573
566 if (skb->len > self->max_seg_size) { 574 if (skb->len > self->max_seg_size) {
567 IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n", 575 IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
568 __func__); 576 ret = -EMSGSIZE;
569 goto err; 577 goto err;
570 } 578 }
571 579
@@ -576,7 +584,7 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
576 584
577err: 585err:
578 dev_kfree_skb(skb); 586 dev_kfree_skb(skb);
579 return -1; 587 return ret;
580} 588}
581EXPORT_SYMBOL(irttp_udata_request); 589EXPORT_SYMBOL(irttp_udata_request);
582 590
@@ -599,9 +607,15 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
599 IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, 607 IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
600 skb_queue_len(&self->tx_queue)); 608 skb_queue_len(&self->tx_queue));
601 609
610 /* Take shortcut on zero byte packets */
611 if (skb->len == 0) {
612 ret = 0;
613 goto err;
614 }
615
602 /* Check that nothing bad happens */ 616 /* Check that nothing bad happens */
603 if ((skb->len == 0) || (!self->connected)) { 617 if (!self->connected) {
604 IRDA_WARNING("%s: No data, or not connected\n", __func__); 618 IRDA_WARNING("%s: Not connected\n", __func__);
605 ret = -ENOTCONN; 619 ret = -ENOTCONN;
606 goto err; 620 goto err;
607 } 621 }
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 104ec3b283d4..b8dbae82fab8 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -249,7 +249,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
249 struct seq_file *seq; 249 struct seq_file *seq;
250 int rc = -ENOMEM; 250 int rc = -ENOMEM;
251 251
252 pd = kzalloc(GFP_KERNEL, sizeof(*pd)); 252 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
253 if (pd == NULL) 253 if (pd == NULL)
254 goto out; 254 goto out;
255 255
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 0bf6a59545ab..522e219f3558 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -674,4 +674,8 @@ MODULE_LICENSE("GPL");
674MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); 674MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
675MODULE_DESCRIPTION("L2TP over IP"); 675MODULE_DESCRIPTION("L2TP over IP");
676MODULE_VERSION("1.0"); 676MODULE_VERSION("1.0");
677MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); 677
678/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
679 * enums
680 */
681MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 582612998211..e35dbe55f520 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -317,8 +317,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
317 goto out; 317 goto out;
318 rc = -ENODEV; 318 rc = -ENODEV;
319 rtnl_lock(); 319 rtnl_lock();
320 rcu_read_lock();
320 if (sk->sk_bound_dev_if) { 321 if (sk->sk_bound_dev_if) {
321 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); 322 llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
322 if (llc->dev) { 323 if (llc->dev) {
323 if (!addr->sllc_arphrd) 324 if (!addr->sllc_arphrd)
324 addr->sllc_arphrd = llc->dev->type; 325 addr->sllc_arphrd = llc->dev->type;
@@ -329,13 +330,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
329 !llc_mac_match(addr->sllc_mac, 330 !llc_mac_match(addr->sllc_mac,
330 llc->dev->dev_addr)) { 331 llc->dev->dev_addr)) {
331 rc = -EINVAL; 332 rc = -EINVAL;
332 dev_put(llc->dev);
333 llc->dev = NULL; 333 llc->dev = NULL;
334 } 334 }
335 } 335 }
336 } else 336 } else
337 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, 337 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
338 addr->sllc_mac); 338 addr->sllc_mac);
339 rcu_read_unlock();
339 rtnl_unlock(); 340 rtnl_unlock();
340 if (!llc->dev) 341 if (!llc->dev)
341 goto out; 342 goto out;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 4d6f8653ec88..8e8ea9cb7093 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -92,7 +92,7 @@ config MAC80211_MESH
92config MAC80211_LEDS 92config MAC80211_LEDS
93 bool "Enable LED triggers" 93 bool "Enable LED triggers"
94 depends on MAC80211 94 depends on MAC80211
95 select NEW_LEDS 95 depends on LEDS_CLASS
96 select LEDS_TRIGGERS 96 select LEDS_TRIGGERS
97 ---help--- 97 ---help---
98 This option enables a few LED triggers for different 98 This option enables a few LED triggers for different
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f9163b12c7f1..7aa85591dbe7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -391,6 +391,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
391 u32 hw_reconf_flags = 0; 391 u32 hw_reconf_flags = 0;
392 int i; 392 int i;
393 393
394 if (local->scan_sdata == sdata)
395 ieee80211_scan_cancel(local);
396
394 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 397 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
395 398
396 /* 399 /*
@@ -523,9 +526,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
523 synchronize_rcu(); 526 synchronize_rcu();
524 skb_queue_purge(&sdata->skb_queue); 527 skb_queue_purge(&sdata->skb_queue);
525 528
526 if (local->scan_sdata == sdata)
527 ieee80211_scan_cancel(local);
528
529 /* 529 /*
530 * Disable beaconing here for mesh only, AP and IBSS 530 * Disable beaconing here for mesh only, AP and IBSS
531 * are already taken care of. 531 * are already taken care of.
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 902b03ee8f60..54fb4a0e76f0 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2247,6 +2247,10 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2247 break; 2247 break;
2248 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2248 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2249 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2249 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2250 if (is_multicast_ether_addr(mgmt->da) &&
2251 !is_broadcast_ether_addr(mgmt->da))
2252 return RX_DROP_MONITOR;
2253
2250 /* process only for station */ 2254 /* process only for station */
2251 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2255 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2252 return RX_DROP_MONITOR; 2256 return RX_DROP_MONITOR;
@@ -2741,6 +2745,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2741 2745
2742 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2746 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2743 return; 2747 return;
2748 goto out;
2744 } 2749 }
2745 } 2750 }
2746 2751
@@ -2780,6 +2785,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2780 return; 2785 return;
2781 } 2786 }
2782 2787
2788 out:
2783 dev_kfree_skb(skb); 2789 dev_kfree_skb(skb);
2784} 2790}
2785 2791
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 96c594309506..7a637b80a62e 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1587,7 +1587,12 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1587 list) { 1587 list) {
1588 if (!ieee80211_sdata_running(tmp_sdata)) 1588 if (!ieee80211_sdata_running(tmp_sdata))
1589 continue; 1589 continue;
1590 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) 1590 if (tmp_sdata->vif.type ==
1591 NL80211_IFTYPE_MONITOR ||
1592 tmp_sdata->vif.type ==
1593 NL80211_IFTYPE_AP_VLAN ||
1594 tmp_sdata->vif.type ==
1595 NL80211_IFTYPE_WDS)
1591 continue; 1596 continue;
1592 if (compare_ether_addr(tmp_sdata->vif.addr, 1597 if (compare_ether_addr(tmp_sdata->vif.addr,
1593 hdr->addr2) == 0) { 1598 hdr->addr2) == 0) {
@@ -1732,15 +1737,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1732 int nh_pos, h_pos; 1737 int nh_pos, h_pos;
1733 struct sta_info *sta = NULL; 1738 struct sta_info *sta = NULL;
1734 u32 sta_flags = 0; 1739 u32 sta_flags = 0;
1740 struct sk_buff *tmp_skb;
1735 1741
1736 if (unlikely(skb->len < ETH_HLEN)) { 1742 if (unlikely(skb->len < ETH_HLEN)) {
1737 ret = NETDEV_TX_OK; 1743 ret = NETDEV_TX_OK;
1738 goto fail; 1744 goto fail;
1739 } 1745 }
1740 1746
1741 nh_pos = skb_network_header(skb) - skb->data;
1742 h_pos = skb_transport_header(skb) - skb->data;
1743
1744 /* convert Ethernet header to proper 802.11 header (based on 1747 /* convert Ethernet header to proper 802.11 header (based on
1745 * operation mode) */ 1748 * operation mode) */
1746 ethertype = (skb->data[12] << 8) | skb->data[13]; 1749 ethertype = (skb->data[12] << 8) | skb->data[13];
@@ -1913,6 +1916,20 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1913 goto fail; 1916 goto fail;
1914 } 1917 }
1915 1918
1919 /*
1920 * If the skb is shared we need to obtain our own copy.
1921 */
1922 if (skb_shared(skb)) {
1923 tmp_skb = skb;
1924 skb = skb_copy(skb, GFP_ATOMIC);
1925 kfree_skb(tmp_skb);
1926
1927 if (!skb) {
1928 ret = NETDEV_TX_OK;
1929 goto fail;
1930 }
1931 }
1932
1916 hdr.frame_control = fc; 1933 hdr.frame_control = fc;
1917 hdr.duration_id = 0; 1934 hdr.duration_id = 0;
1918 hdr.seq_ctrl = 0; 1935 hdr.seq_ctrl = 0;
@@ -1931,6 +1948,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1931 encaps_len = 0; 1948 encaps_len = 0;
1932 } 1949 }
1933 1950
1951 nh_pos = skb_network_header(skb) - skb->data;
1952 h_pos = skb_transport_header(skb) - skb->data;
1953
1934 skb_pull(skb, skip_header_bytes); 1954 skb_pull(skb, skip_header_bytes);
1935 nh_pos -= skip_header_bytes; 1955 nh_pos -= skip_header_bytes;
1936 h_pos -= skip_header_bytes; 1956 h_pos -= skip_header_bytes;
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index a22dac227055..70bd1d0774c6 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -4,6 +4,7 @@
4menuconfig IP_VS 4menuconfig IP_VS
5 tristate "IP virtual server support" 5 tristate "IP virtual server support"
6 depends on NET && INET && NETFILTER 6 depends on NET && INET && NETFILTER
7 depends on (NF_CONNTRACK || NF_CONNTRACK=n)
7 ---help--- 8 ---help---
8 IP Virtual Server support will let you build a high-performance 9 IP Virtual Server support will let you build a high-performance
9 virtual server based on cluster of two or more real servers. This 10 virtual server based on cluster of two or more real servers. This
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 1eacf8d9966a..27a5ea6b6a0f 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1312,7 +1312,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1312 if (!hash) { 1312 if (!hash) {
1313 *vmalloced = 1; 1313 *vmalloced = 1;
1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1315 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 1315 hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1316 PAGE_KERNEL);
1316 } 1317 }
1317 1318
1318 if (hash && nulls) 1319 if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ed6d92958023..dc7bb74110df 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -292,6 +292,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
292 292
293 for (i = 0; i < MAX_NF_CT_PROTO; i++) 293 for (i = 0; i < MAX_NF_CT_PROTO; i++)
294 proto_array[i] = &nf_conntrack_l4proto_generic; 294 proto_array[i] = &nf_conntrack_l4proto_generic;
295
296 /* Before making proto_array visible to lockless readers,
297 * we must make sure its content is committed to memory.
298 */
299 smp_wmb();
300
295 nf_ct_protos[l4proto->l3proto] = proto_array; 301 nf_ct_protos[l4proto->l3proto] = proto_array;
296 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != 302 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
297 &nf_conntrack_l4proto_generic) { 303 &nf_conntrack_l4proto_generic) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3616f27b9d46..8298e676f5a0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1610 1610
1611 err = -EINVAL; 1611 err = -EINVAL;
1612 vnet_hdr_len = sizeof(vnet_hdr); 1612 vnet_hdr_len = sizeof(vnet_hdr);
1613 if ((len -= vnet_hdr_len) < 0) 1613 if (len < vnet_hdr_len)
1614 goto out_free; 1614 goto out_free;
1615 1615
1616 len -= vnet_hdr_len;
1617
1616 if (skb_is_gso(skb)) { 1618 if (skb_is_gso(skb)) {
1617 struct skb_shared_info *sinfo = skb_shinfo(skb); 1619 struct skb_shared_info *sinfo = skb_shinfo(skb);
1618 1620
@@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1719 rcu_read_lock(); 1721 rcu_read_lock();
1720 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 1722 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1721 if (dev) 1723 if (dev)
1722 strlcpy(uaddr->sa_data, dev->name, 15); 1724 strncpy(uaddr->sa_data, dev->name, 14);
1723 else 1725 else
1724 memset(uaddr->sa_data, 0, 14); 1726 memset(uaddr->sa_data, 0, 14);
1725 rcu_read_unlock(); 1727 rcu_read_unlock();
@@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1742 sll->sll_family = AF_PACKET; 1744 sll->sll_family = AF_PACKET;
1743 sll->sll_ifindex = po->ifindex; 1745 sll->sll_ifindex = po->ifindex;
1744 sll->sll_protocol = po->num; 1746 sll->sll_protocol = po->num;
1747 sll->sll_pkttype = 0;
1745 rcu_read_lock(); 1748 rcu_read_lock();
1746 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 1749 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1747 if (dev) { 1750 if (dev) {
diff --git a/net/rds/loop.c b/net/rds/loop.c
index c390156b426f..aeec1d483b17 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -134,8 +134,12 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
134static void rds_loop_conn_free(void *arg) 134static void rds_loop_conn_free(void *arg)
135{ 135{
136 struct rds_loop_connection *lc = arg; 136 struct rds_loop_connection *lc = arg;
137 unsigned long flags;
138
137 rdsdebug("lc %p\n", lc); 139 rdsdebug("lc %p\n", lc);
140 spin_lock_irqsave(&loop_conns_lock, flags);
138 list_del(&lc->loop_node); 141 list_del(&lc->loop_node);
142 spin_unlock_irqrestore(&loop_conns_lock, flags);
139 kfree(lc); 143 kfree(lc);
140} 144}
141 145
diff --git a/net/rds/message.c b/net/rds/message.c
index a84545dae370..1fd3d29023d7 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -224,6 +224,9 @@ struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
224 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); 224 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
225 WARN_ON(!nents); 225 WARN_ON(!nents);
226 226
227 if (rm->m_used_sgs + nents > rm->m_total_sgs)
228 return NULL;
229
227 sg_ret = &sg_first[rm->m_used_sgs]; 230 sg_ret = &sg_first[rm->m_used_sgs];
228 sg_init_table(sg_ret, nents); 231 sg_init_table(sg_ret, nents);
229 rm->m_used_sgs += nents; 232 rm->m_used_sgs += nents;
@@ -246,6 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
246 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 249 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
247 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 250 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
248 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); 251 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
252 if (!rm->data.op_sg) {
253 rds_message_put(rm);
254 return ERR_PTR(-ENOMEM);
255 }
249 256
250 for (i = 0; i < rm->data.op_nents; ++i) { 257 for (i = 0; i < rm->data.op_nents; ++i) {
251 sg_set_page(&rm->data.op_sg[i], 258 sg_set_page(&rm->data.op_sg[i],
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 1a41debca1ce..4e37c1cbe8b2 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -479,13 +479,38 @@ void rds_atomic_free_op(struct rm_atomic_op *ao)
479 479
480 480
481/* 481/*
482 * Count the number of pages needed to describe an incoming iovec. 482 * Count the number of pages needed to describe an incoming iovec array.
483 */ 483 */
484static int rds_rdma_pages(struct rds_rdma_args *args) 484static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
485{
486 int tot_pages = 0;
487 unsigned int nr_pages;
488 unsigned int i;
489
490 /* figure out the number of pages in the vector */
491 for (i = 0; i < nr_iovecs; i++) {
492 nr_pages = rds_pages_in_vec(&iov[i]);
493 if (nr_pages == 0)
494 return -EINVAL;
495
496 tot_pages += nr_pages;
497
498 /*
499 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
500 * so tot_pages cannot overflow without first going negative.
501 */
502 if (tot_pages < 0)
503 return -EINVAL;
504 }
505
506 return tot_pages;
507}
508
509int rds_rdma_extra_size(struct rds_rdma_args *args)
485{ 510{
486 struct rds_iovec vec; 511 struct rds_iovec vec;
487 struct rds_iovec __user *local_vec; 512 struct rds_iovec __user *local_vec;
488 unsigned int tot_pages = 0; 513 int tot_pages = 0;
489 unsigned int nr_pages; 514 unsigned int nr_pages;
490 unsigned int i; 515 unsigned int i;
491 516
@@ -502,14 +527,16 @@ static int rds_rdma_pages(struct rds_rdma_args *args)
502 return -EINVAL; 527 return -EINVAL;
503 528
504 tot_pages += nr_pages; 529 tot_pages += nr_pages;
505 }
506 530
507 return tot_pages; 531 /*
508} 532 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
533 * so tot_pages cannot overflow without first going negative.
534 */
535 if (tot_pages < 0)
536 return -EINVAL;
537 }
509 538
510int rds_rdma_extra_size(struct rds_rdma_args *args) 539 return tot_pages * sizeof(struct scatterlist);
511{
512 return rds_rdma_pages(args) * sizeof(struct scatterlist);
513} 540}
514 541
515/* 542/*
@@ -520,13 +547,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
520 struct cmsghdr *cmsg) 547 struct cmsghdr *cmsg)
521{ 548{
522 struct rds_rdma_args *args; 549 struct rds_rdma_args *args;
523 struct rds_iovec vec;
524 struct rm_rdma_op *op = &rm->rdma; 550 struct rm_rdma_op *op = &rm->rdma;
525 int nr_pages; 551 int nr_pages;
526 unsigned int nr_bytes; 552 unsigned int nr_bytes;
527 struct page **pages = NULL; 553 struct page **pages = NULL;
528 struct rds_iovec __user *local_vec; 554 struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
529 unsigned int nr; 555 int iov_size;
530 unsigned int i, j; 556 unsigned int i, j;
531 int ret = 0; 557 int ret = 0;
532 558
@@ -541,14 +567,31 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
541 goto out; 567 goto out;
542 } 568 }
543 569
544 if (args->nr_local > (u64)UINT_MAX) { 570 if (args->nr_local > UIO_MAXIOV) {
545 ret = -EMSGSIZE; 571 ret = -EMSGSIZE;
546 goto out; 572 goto out;
547 } 573 }
548 574
549 nr_pages = rds_rdma_pages(args); 575 /* Check whether to allocate the iovec area */
550 if (nr_pages < 0) 576 iov_size = args->nr_local * sizeof(struct rds_iovec);
577 if (args->nr_local > UIO_FASTIOV) {
578 iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
579 if (!iovs) {
580 ret = -ENOMEM;
581 goto out;
582 }
583 }
584
585 if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
586 ret = -EFAULT;
587 goto out;
588 }
589
590 nr_pages = rds_rdma_pages(iovs, args->nr_local);
591 if (nr_pages < 0) {
592 ret = -EINVAL;
551 goto out; 593 goto out;
594 }
552 595
553 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 596 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
554 if (!pages) { 597 if (!pages) {
@@ -564,6 +607,10 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
564 op->op_recverr = rs->rs_recverr; 607 op->op_recverr = rs->rs_recverr;
565 WARN_ON(!nr_pages); 608 WARN_ON(!nr_pages);
566 op->op_sg = rds_message_alloc_sgs(rm, nr_pages); 609 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
610 if (!op->op_sg) {
611 ret = -ENOMEM;
612 goto out;
613 }
567 614
568 if (op->op_notify || op->op_recverr) { 615 if (op->op_notify || op->op_recverr) {
569 /* We allocate an uninitialized notifier here, because 616 /* We allocate an uninitialized notifier here, because
@@ -597,50 +644,40 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
597 (unsigned long long)args->remote_vec.addr, 644 (unsigned long long)args->remote_vec.addr,
598 op->op_rkey); 645 op->op_rkey);
599 646
600 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
601
602 for (i = 0; i < args->nr_local; i++) { 647 for (i = 0; i < args->nr_local; i++) {
603 if (copy_from_user(&vec, &local_vec[i], 648 struct rds_iovec *iov = &iovs[i];
604 sizeof(struct rds_iovec))) { 649 /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
605 ret = -EFAULT; 650 unsigned int nr = rds_pages_in_vec(iov);
606 goto out;
607 }
608
609 nr = rds_pages_in_vec(&vec);
610 if (nr == 0) {
611 ret = -EINVAL;
612 goto out;
613 }
614 651
615 rs->rs_user_addr = vec.addr; 652 rs->rs_user_addr = iov->addr;
616 rs->rs_user_bytes = vec.bytes; 653 rs->rs_user_bytes = iov->bytes;
617 654
618 /* If it's a WRITE operation, we want to pin the pages for reading. 655 /* If it's a WRITE operation, we want to pin the pages for reading.
619 * If it's a READ operation, we need to pin the pages for writing. 656 * If it's a READ operation, we need to pin the pages for writing.
620 */ 657 */
621 ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write); 658 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
622 if (ret < 0) 659 if (ret < 0)
623 goto out; 660 goto out;
624 661
625 rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n", 662 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
626 nr_bytes, nr, vec.bytes, vec.addr); 663 nr_bytes, nr, iov->bytes, iov->addr);
627 664
628 nr_bytes += vec.bytes; 665 nr_bytes += iov->bytes;
629 666
630 for (j = 0; j < nr; j++) { 667 for (j = 0; j < nr; j++) {
631 unsigned int offset = vec.addr & ~PAGE_MASK; 668 unsigned int offset = iov->addr & ~PAGE_MASK;
632 struct scatterlist *sg; 669 struct scatterlist *sg;
633 670
634 sg = &op->op_sg[op->op_nents + j]; 671 sg = &op->op_sg[op->op_nents + j];
635 sg_set_page(sg, pages[j], 672 sg_set_page(sg, pages[j],
636 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), 673 min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
637 offset); 674 offset);
638 675
639 rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n", 676 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
640 sg->offset, sg->length, vec.addr, vec.bytes); 677 sg->offset, sg->length, iov->addr, iov->bytes);
641 678
642 vec.addr += sg->length; 679 iov->addr += sg->length;
643 vec.bytes -= sg->length; 680 iov->bytes -= sg->length;
644 } 681 }
645 682
646 op->op_nents += nr; 683 op->op_nents += nr;
@@ -655,13 +692,14 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
655 } 692 }
656 op->op_bytes = nr_bytes; 693 op->op_bytes = nr_bytes;
657 694
658 ret = 0;
659out: 695out:
696 if (iovs != iovstack)
697 sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
660 kfree(pages); 698 kfree(pages);
661 if (ret) 699 if (ret)
662 rds_rdma_free_op(op); 700 rds_rdma_free_op(op);
663 701 else
664 rds_stats_inc(s_send_rdma); 702 rds_stats_inc(s_send_rdma);
665 703
666 return ret; 704 return ret;
667} 705}
@@ -773,6 +811,10 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
773 rm->atomic.op_active = 1; 811 rm->atomic.op_active = 1;
774 rm->atomic.op_recverr = rs->rs_recverr; 812 rm->atomic.op_recverr = rs->rs_recverr;
775 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); 813 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
814 if (!rm->atomic.op_sg) {
815 ret = -ENOMEM;
816 goto err;
817 }
776 818
777 /* verify 8 byte-aligned */ 819 /* verify 8 byte-aligned */
778 if (args->local_addr & 0x7) { 820 if (args->local_addr & 0x7) {
diff --git a/net/rds/send.c b/net/rds/send.c
index 0bc9db17a87d..35b9c2e9caf1 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -973,6 +973,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
973 /* Attach data to the rm */ 973 /* Attach data to the rm */
974 if (payload_len) { 974 if (payload_len) {
975 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); 975 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
976 if (!rm->data.op_sg) {
977 ret = -ENOMEM;
978 goto out;
979 }
976 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); 980 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
977 if (ret) 981 if (ret)
978 goto out; 982 goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 08a8c6cf2d10..8e0a32001c90 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -221,7 +221,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
221static void rds_tcp_conn_free(void *arg) 221static void rds_tcp_conn_free(void *arg)
222{ 222{
223 struct rds_tcp_connection *tc = arg; 223 struct rds_tcp_connection *tc = arg;
224 unsigned long flags;
224 rdsdebug("freeing tc %p\n", tc); 225 rdsdebug("freeing tc %p\n", tc);
226
227 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
228 list_del(&tc->t_tcp_node);
229 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
230
225 kmem_cache_free(rds_tcp_conn_slab, tc); 231 kmem_cache_free(rds_tcp_conn_slab, tc);
226} 232}
227 233
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index efd4f95fd050..f23d9155b1ef 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
268 goto nla_put_failure; 268 goto nla_put_failure;
269 269
270 nla_nest_end(skb, nest); 270 nla_nest_end(skb, nest);
271
272 if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
273 goto nla_put_failure;
274
271 return skb->len; 275 return skb->len;
272 276
273nla_put_failure: 277nla_put_failure:
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 37dff78e9cb1..d49c40fb7e09 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = {
34 .populate = cgrp_populate, 34 .populate = cgrp_populate,
35#ifdef CONFIG_NET_CLS_CGROUP 35#ifdef CONFIG_NET_CLS_CGROUP
36 .subsys_id = net_cls_subsys_id, 36 .subsys_id = net_cls_subsys_id,
37#else
38#define net_cls_subsys_id net_cls_subsys.subsys_id
39#endif 37#endif
40 .module = THIS_MODULE, 38 .module = THIS_MODULE,
41}; 39};
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 763253257411..ea8f566e720c 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -103,7 +103,8 @@ retry:
103 103
104static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) 104static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
105{ 105{
106 textsearch_destroy(EM_TEXT_PRIV(m)->config); 106 if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
107 textsearch_destroy(EM_TEXT_PRIV(m)->config);
107} 108}
108 109
109static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) 110static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1ef29c74d85e..e58f9476f29c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific;
92struct kmem_cache *sctp_chunk_cachep __read_mostly; 92struct kmem_cache *sctp_chunk_cachep __read_mostly;
93struct kmem_cache *sctp_bucket_cachep __read_mostly; 93struct kmem_cache *sctp_bucket_cachep __read_mostly;
94 94
95int sysctl_sctp_mem[3]; 95long sysctl_sctp_mem[3];
96int sysctl_sctp_rmem[3]; 96int sysctl_sctp_rmem[3];
97int sysctl_sctp_wmem[3]; 97int sysctl_sctp_wmem[3];
98 98
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e34ca9cc1167..0b9ee34ad35c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; 111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
112 112
113extern struct kmem_cache *sctp_bucket_cachep; 113extern struct kmem_cache *sctp_bucket_cachep;
114extern int sysctl_sctp_mem[3]; 114extern long sysctl_sctp_mem[3];
115extern int sysctl_sctp_rmem[3]; 115extern int sysctl_sctp_rmem[3];
116extern int sysctl_sctp_wmem[3]; 116extern int sysctl_sctp_wmem[3];
117 117
118static int sctp_memory_pressure; 118static int sctp_memory_pressure;
119static atomic_t sctp_memory_allocated; 119static atomic_long_t sctp_memory_allocated;
120struct percpu_counter sctp_sockets_allocated; 120struct percpu_counter sctp_sockets_allocated;
121 121
122static void sctp_enter_memory_pressure(struct sock *sk) 122static void sctp_enter_memory_pressure(struct sock *sk)
@@ -2932,6 +2932,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2932 struct sctp_association *asoc = NULL; 2932 struct sctp_association *asoc = NULL;
2933 struct sctp_setpeerprim prim; 2933 struct sctp_setpeerprim prim;
2934 struct sctp_chunk *chunk; 2934 struct sctp_chunk *chunk;
2935 struct sctp_af *af;
2935 int err; 2936 int err;
2936 2937
2937 sp = sctp_sk(sk); 2938 sp = sctp_sk(sk);
@@ -2959,6 +2960,13 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2959 if (!sctp_state(asoc, ESTABLISHED)) 2960 if (!sctp_state(asoc, ESTABLISHED))
2960 return -ENOTCONN; 2961 return -ENOTCONN;
2961 2962
2963 af = sctp_get_af_specific(prim.sspp_addr.ss_family);
2964 if (!af)
2965 return -EINVAL;
2966
2967 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
2968 return -EADDRNOTAVAIL;
2969
2962 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) 2970 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
2963 return -EADDRNOTAVAIL; 2971 return -EADDRNOTAVAIL;
2964 2972
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 832590bbe0c0..50cb57f0919e 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -54,7 +54,7 @@ static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16; 55static int rwnd_scale_max = 16;
56 56
57extern int sysctl_sctp_mem[3]; 57extern long sysctl_sctp_mem[3];
58extern int sysctl_sctp_rmem[3]; 58extern int sysctl_sctp_rmem[3];
59extern int sysctl_sctp_wmem[3]; 59extern int sysctl_sctp_wmem[3];
60 60
@@ -203,7 +203,7 @@ static ctl_table sctp_table[] = {
203 .data = &sysctl_sctp_mem, 203 .data = &sysctl_sctp_mem,
204 .maxlen = sizeof(sysctl_sctp_mem), 204 .maxlen = sizeof(sysctl_sctp_mem),
205 .mode = 0644, 205 .mode = 0644,
206 .proc_handler = proc_dointvec, 206 .proc_handler = proc_doulongvec_minmax
207 }, 207 },
208 { 208 {
209 .procname = "sctp_rmem", 209 .procname = "sctp_rmem",
diff --git a/net/socket.c b/net/socket.c
index 5247ae10f374..088fb3fd45e0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -732,6 +732,21 @@ static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
732 return ret; 732 return ret;
733} 733}
734 734
735/**
736 * kernel_recvmsg - Receive a message from a socket (kernel space)
737 * @sock: The socket to receive the message from
738 * @msg: Received message
739 * @vec: Input s/g array for message data
740 * @num: Size of input s/g array
741 * @size: Number of bytes to read
742 * @flags: Message flags (MSG_DONTWAIT, etc...)
743 *
744 * On return the msg structure contains the scatter/gather array passed in the
745 * vec argument. The array is modified so that it consists of the unfilled
746 * portion of the original array.
747 *
748 * The returned value is the total number of bytes received, or an error.
749 */
735int kernel_recvmsg(struct socket *sock, struct msghdr *msg, 750int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
736 struct kvec *vec, size_t num, size_t size, int flags) 751 struct kvec *vec, size_t num, size_t size, int flags)
737{ 752{
@@ -1652,6 +1667,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1652 struct iovec iov; 1667 struct iovec iov;
1653 int fput_needed; 1668 int fput_needed;
1654 1669
1670 if (len > INT_MAX)
1671 len = INT_MAX;
1655 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1672 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1656 if (!sock) 1673 if (!sock)
1657 goto out; 1674 goto out;
@@ -1709,6 +1726,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1709 int err, err2; 1726 int err, err2;
1710 int fput_needed; 1727 int fput_needed;
1711 1728
1729 if (size > INT_MAX)
1730 size = INT_MAX;
1712 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1731 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1713 if (!sock) 1732 if (!sock)
1714 goto out; 1733 goto out;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 9dab9573be41..92ce94f5146b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -989,20 +989,26 @@ call_refreshresult(struct rpc_task *task)
989 dprint_status(task); 989 dprint_status(task);
990 990
991 task->tk_status = 0; 991 task->tk_status = 0;
992 task->tk_action = call_allocate; 992 task->tk_action = call_refresh;
993 if (status >= 0 && rpcauth_uptodatecred(task))
994 return;
995 switch (status) { 993 switch (status) {
996 case -EACCES: 994 case 0:
997 rpc_exit(task, -EACCES); 995 if (rpcauth_uptodatecred(task))
998 return; 996 task->tk_action = call_allocate;
999 case -ENOMEM:
1000 rpc_exit(task, -ENOMEM);
1001 return; 997 return;
1002 case -ETIMEDOUT: 998 case -ETIMEDOUT:
1003 rpc_delay(task, 3*HZ); 999 rpc_delay(task, 3*HZ);
1000 case -EAGAIN:
1001 status = -EACCES;
1002 if (!task->tk_cred_retry)
1003 break;
1004 task->tk_cred_retry--;
1005 dprintk("RPC: %5u %s: retry refresh creds\n",
1006 task->tk_pid, __func__);
1007 return;
1004 } 1008 }
1005 task->tk_action = call_refresh; 1009 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1010 task->tk_pid, __func__, status);
1011 rpc_exit(task, status);
1006} 1012}
1007 1013
1008/* 1014/*
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index f71a73107ae9..80df89d957ba 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -115,9 +115,7 @@ EXPORT_SYMBOL_GPL(svc_seq_show);
115 */ 115 */
116struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 116struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
117{ 117{
118 struct rpc_iostats *new; 118 return kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
119 new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
120 return new;
121} 119}
122EXPORT_SYMBOL_GPL(rpc_alloc_iostats); 120EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
123 121
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c82fe739fbdc..3f2c5559ca1a 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -5,7 +5,6 @@
5 */ 5 */
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/smp_lock.h>
9#include <linux/errno.h> 8#include <linux/errno.h>
10#include <linux/freezer.h> 9#include <linux/freezer.h>
11#include <linux/kthread.h> 10#include <linux/kthread.h>
@@ -213,6 +212,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
213 spin_lock(&svc_xprt_class_lock); 212 spin_lock(&svc_xprt_class_lock);
214 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 213 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
215 struct svc_xprt *newxprt; 214 struct svc_xprt *newxprt;
215 unsigned short newport;
216 216
217 if (strcmp(xprt_name, xcl->xcl_name)) 217 if (strcmp(xprt_name, xcl->xcl_name))
218 continue; 218 continue;
@@ -231,8 +231,9 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
231 spin_lock_bh(&serv->sv_lock); 231 spin_lock_bh(&serv->sv_lock);
232 list_add(&newxprt->xpt_list, &serv->sv_permsocks); 232 list_add(&newxprt->xpt_list, &serv->sv_permsocks);
233 spin_unlock_bh(&serv->sv_lock); 233 spin_unlock_bh(&serv->sv_lock);
234 newport = svc_xprt_local_port(newxprt);
234 clear_bit(XPT_BUSY, &newxprt->xpt_flags); 235 clear_bit(XPT_BUSY, &newxprt->xpt_flags);
235 return svc_xprt_local_port(newxprt); 236 return newport;
236 } 237 }
237 err: 238 err:
238 spin_unlock(&svc_xprt_class_lock); 239 spin_unlock(&svc_xprt_class_lock);
@@ -426,8 +427,13 @@ void svc_xprt_received(struct svc_xprt *xprt)
426{ 427{
427 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 428 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
428 xprt->xpt_pool = NULL; 429 xprt->xpt_pool = NULL;
430 /* As soon as we clear busy, the xprt could be closed and
431 * 'put', so we need a reference to call svc_xprt_enqueue with:
432 */
433 svc_xprt_get(xprt);
429 clear_bit(XPT_BUSY, &xprt->xpt_flags); 434 clear_bit(XPT_BUSY, &xprt->xpt_flags);
430 svc_xprt_enqueue(xprt); 435 svc_xprt_enqueue(xprt);
436 svc_xprt_put(xprt);
431} 437}
432EXPORT_SYMBOL_GPL(svc_xprt_received); 438EXPORT_SYMBOL_GPL(svc_xprt_received);
433 439
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 33217fc3d697..e9f0d5004483 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -396,6 +396,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
397 struct tipc_sock *tsock = tipc_sk(sock->sk); 397 struct tipc_sock *tsock = tipc_sk(sock->sk);
398 398
399 memset(addr, 0, sizeof(*addr));
399 if (peer) { 400 if (peer) {
400 if ((sock->state != SS_CONNECTED) && 401 if ((sock->state != SS_CONNECTED) &&
401 ((peer != 2) || (sock->state != SS_DISCONNECTING))) 402 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3c95304a0817..2268e6798124 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1343,9 +1343,25 @@ static void unix_destruct_scm(struct sk_buff *skb)
1343 sock_wfree(skb); 1343 sock_wfree(skb);
1344} 1344}
1345 1345
1346#define MAX_RECURSION_LEVEL 4
1347
1346static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1348static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1347{ 1349{
1348 int i; 1350 int i;
1351 unsigned char max_level = 0;
1352 int unix_sock_count = 0;
1353
1354 for (i = scm->fp->count - 1; i >= 0; i--) {
1355 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1356
1357 if (sk) {
1358 unix_sock_count++;
1359 max_level = max(max_level,
1360 unix_sk(sk)->recursion_level);
1361 }
1362 }
1363 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1364 return -ETOOMANYREFS;
1349 1365
1350 /* 1366 /*
1351 * Need to duplicate file references for the sake of garbage 1367 * Need to duplicate file references for the sake of garbage
@@ -1356,9 +1372,11 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1356 if (!UNIXCB(skb).fp) 1372 if (!UNIXCB(skb).fp)
1357 return -ENOMEM; 1373 return -ENOMEM;
1358 1374
1359 for (i = scm->fp->count-1; i >= 0; i--) 1375 if (unix_sock_count) {
1360 unix_inflight(scm->fp->fp[i]); 1376 for (i = scm->fp->count - 1; i >= 0; i--)
1361 return 0; 1377 unix_inflight(scm->fp->fp[i]);
1378 }
1379 return max_level;
1362} 1380}
1363 1381
1364static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1382static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
@@ -1393,6 +1411,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1393 struct sk_buff *skb; 1411 struct sk_buff *skb;
1394 long timeo; 1412 long timeo;
1395 struct scm_cookie tmp_scm; 1413 struct scm_cookie tmp_scm;
1414 int max_level;
1396 1415
1397 if (NULL == siocb->scm) 1416 if (NULL == siocb->scm)
1398 siocb->scm = &tmp_scm; 1417 siocb->scm = &tmp_scm;
@@ -1431,8 +1450,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1431 goto out; 1450 goto out;
1432 1451
1433 err = unix_scm_to_skb(siocb->scm, skb, true); 1452 err = unix_scm_to_skb(siocb->scm, skb, true);
1434 if (err) 1453 if (err < 0)
1435 goto out_free; 1454 goto out_free;
1455 max_level = err + 1;
1436 unix_get_secdata(siocb->scm, skb); 1456 unix_get_secdata(siocb->scm, skb);
1437 1457
1438 skb_reset_transport_header(skb); 1458 skb_reset_transport_header(skb);
@@ -1514,6 +1534,8 @@ restart:
1514 if (sock_flag(other, SOCK_RCVTSTAMP)) 1534 if (sock_flag(other, SOCK_RCVTSTAMP))
1515 __net_timestamp(skb); 1535 __net_timestamp(skb);
1516 skb_queue_tail(&other->sk_receive_queue, skb); 1536 skb_queue_tail(&other->sk_receive_queue, skb);
1537 if (max_level > unix_sk(other)->recursion_level)
1538 unix_sk(other)->recursion_level = max_level;
1517 unix_state_unlock(other); 1539 unix_state_unlock(other);
1518 other->sk_data_ready(other, len); 1540 other->sk_data_ready(other, len);
1519 sock_put(other); 1541 sock_put(other);
@@ -1544,6 +1566,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1544 int sent = 0; 1566 int sent = 0;
1545 struct scm_cookie tmp_scm; 1567 struct scm_cookie tmp_scm;
1546 bool fds_sent = false; 1568 bool fds_sent = false;
1569 int max_level;
1547 1570
1548 if (NULL == siocb->scm) 1571 if (NULL == siocb->scm)
1549 siocb->scm = &tmp_scm; 1572 siocb->scm = &tmp_scm;
@@ -1607,10 +1630,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1607 1630
1608 /* Only send the fds in the first buffer */ 1631 /* Only send the fds in the first buffer */
1609 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); 1632 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1610 if (err) { 1633 if (err < 0) {
1611 kfree_skb(skb); 1634 kfree_skb(skb);
1612 goto out_err; 1635 goto out_err;
1613 } 1636 }
1637 max_level = err + 1;
1614 fds_sent = true; 1638 fds_sent = true;
1615 1639
1616 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 1640 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
@@ -1626,6 +1650,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1626 goto pipe_err_free; 1650 goto pipe_err_free;
1627 1651
1628 skb_queue_tail(&other->sk_receive_queue, skb); 1652 skb_queue_tail(&other->sk_receive_queue, skb);
1653 if (max_level > unix_sk(other)->recursion_level)
1654 unix_sk(other)->recursion_level = max_level;
1629 unix_state_unlock(other); 1655 unix_state_unlock(other);
1630 other->sk_data_ready(other, size); 1656 other->sk_data_ready(other, size);
1631 sent += size; 1657 sent += size;
@@ -1845,6 +1871,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1845 unix_state_lock(sk); 1871 unix_state_lock(sk);
1846 skb = skb_dequeue(&sk->sk_receive_queue); 1872 skb = skb_dequeue(&sk->sk_receive_queue);
1847 if (skb == NULL) { 1873 if (skb == NULL) {
1874 unix_sk(sk)->recursion_level = 0;
1848 if (copied >= target) 1875 if (copied >= target)
1849 goto unlock; 1876 goto unlock;
1850 1877
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index c8df6fda0b1f..f89f83bf828e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -96,7 +96,7 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
96unsigned int unix_tot_inflight; 96unsigned int unix_tot_inflight;
97 97
98 98
99static struct sock *unix_get_socket(struct file *filp) 99struct sock *unix_get_socket(struct file *filp)
100{ 100{
101 struct sock *u_sock = NULL; 101 struct sock *u_sock = NULL;
102 struct inode *inode = filp->f_path.dentry->d_inode; 102 struct inode *inode = filp->f_path.dentry->d_inode;
@@ -259,9 +259,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
259} 259}
260 260
261static bool gc_in_progress = false; 261static bool gc_in_progress = false;
262#define UNIX_INFLIGHT_TRIGGER_GC 16000
262 263
263void wait_for_unix_gc(void) 264void wait_for_unix_gc(void)
264{ 265{
266 /*
267 * If number of inflight sockets is insane,
268 * force a garbage collect right now.
269 */
270 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
271 unix_gc();
265 wait_event(unix_gc_wait, gc_in_progress == false); 272 wait_event(unix_gc_wait, gc_in_progress == false);
266} 273}
267 274
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index d0c92dddb26b..17cd0c04d139 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -44,6 +44,38 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
44 return chan; 44 return chan;
45} 45}
46 46
47static bool can_beacon_sec_chan(struct wiphy *wiphy,
48 struct ieee80211_channel *chan,
49 enum nl80211_channel_type channel_type)
50{
51 struct ieee80211_channel *sec_chan;
52 int diff;
53
54 switch (channel_type) {
55 case NL80211_CHAN_HT40PLUS:
56 diff = 20;
57 break;
58 case NL80211_CHAN_HT40MINUS:
59 diff = -20;
60 break;
61 default:
62 return false;
63 }
64
65 sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
66 if (!sec_chan)
67 return false;
68
69 /* we'll need a DFS capability later */
70 if (sec_chan->flags & (IEEE80211_CHAN_DISABLED |
71 IEEE80211_CHAN_PASSIVE_SCAN |
72 IEEE80211_CHAN_NO_IBSS |
73 IEEE80211_CHAN_RADAR))
74 return false;
75
76 return true;
77}
78
47int cfg80211_set_freq(struct cfg80211_registered_device *rdev, 79int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
48 struct wireless_dev *wdev, int freq, 80 struct wireless_dev *wdev, int freq,
49 enum nl80211_channel_type channel_type) 81 enum nl80211_channel_type channel_type)
@@ -68,6 +100,28 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
68 if (!chan) 100 if (!chan)
69 return -EINVAL; 101 return -EINVAL;
70 102
103 /* Both channels should be able to initiate communication */
104 if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC ||
105 wdev->iftype == NL80211_IFTYPE_AP ||
106 wdev->iftype == NL80211_IFTYPE_AP_VLAN ||
107 wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
108 wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
109 switch (channel_type) {
110 case NL80211_CHAN_HT40PLUS:
111 case NL80211_CHAN_HT40MINUS:
112 if (!can_beacon_sec_chan(&rdev->wiphy, chan,
113 channel_type)) {
114 printk(KERN_DEBUG
115 "cfg80211: Secondary channel not "
116 "allowed to initiate communication\n");
117 return -EINVAL;
118 }
119 break;
120 default:
121 break;
122 }
123 }
124
71 result = rdev->ops->set_channel(&rdev->wiphy, 125 result = rdev->ops->set_channel(&rdev->wiphy,
72 wdev ? wdev->netdev : NULL, 126 wdev ? wdev->netdev : NULL,
73 chan, channel_type); 127 chan, channel_type);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c506241f8637..4e78e3f26798 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -224,8 +224,8 @@ static int nl80211_prepare_netdev_dump(struct sk_buff *skb,
224 } 224 }
225 225
226 *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); 226 *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
227 if (IS_ERR(dev)) { 227 if (IS_ERR(*rdev)) {
228 err = PTR_ERR(dev); 228 err = PTR_ERR(*rdev);
229 goto out_rtnl; 229 goto out_rtnl;
230 } 230 }
231 231
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 771bab00754b..55187c8f6420 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
61 while (len > 0) { 61 while (len > 0) {
62 switch (*p & X25_FAC_CLASS_MASK) { 62 switch (*p & X25_FAC_CLASS_MASK) {
63 case X25_FAC_CLASS_A: 63 case X25_FAC_CLASS_A:
64 if (len < 2)
65 return 0;
64 switch (*p) { 66 switch (*p) {
65 case X25_FAC_REVERSE: 67 case X25_FAC_REVERSE:
66 if((p[1] & 0x81) == 0x81) { 68 if((p[1] & 0x81) == 0x81) {
@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
104 len -= 2; 106 len -= 2;
105 break; 107 break;
106 case X25_FAC_CLASS_B: 108 case X25_FAC_CLASS_B:
109 if (len < 3)
110 return 0;
107 switch (*p) { 111 switch (*p) {
108 case X25_FAC_PACKET_SIZE: 112 case X25_FAC_PACKET_SIZE:
109 facilities->pacsize_in = p[1]; 113 facilities->pacsize_in = p[1];
@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
125 len -= 3; 129 len -= 3;
126 break; 130 break;
127 case X25_FAC_CLASS_C: 131 case X25_FAC_CLASS_C:
132 if (len < 4)
133 return 0;
128 printk(KERN_DEBUG "X.25: unknown facility %02X, " 134 printk(KERN_DEBUG "X.25: unknown facility %02X, "
129 "values %02X, %02X, %02X\n", 135 "values %02X, %02X, %02X\n",
130 p[0], p[1], p[2], p[3]); 136 p[0], p[1], p[2], p[3]);
@@ -132,26 +138,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
132 len -= 4; 138 len -= 4;
133 break; 139 break;
134 case X25_FAC_CLASS_D: 140 case X25_FAC_CLASS_D:
141 if (len < p[1] + 2)
142 return 0;
135 switch (*p) { 143 switch (*p) {
136 case X25_FAC_CALLING_AE: 144 case X25_FAC_CALLING_AE:
137 if (p[1] > X25_MAX_DTE_FACIL_LEN) 145 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
138 break; 146 return 0;
139 dte_facs->calling_len = p[2]; 147 dte_facs->calling_len = p[2];
140 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 148 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
141 *vc_fac_mask |= X25_MASK_CALLING_AE; 149 *vc_fac_mask |= X25_MASK_CALLING_AE;
142 break; 150 break;
143 case X25_FAC_CALLED_AE: 151 case X25_FAC_CALLED_AE:
144 if (p[1] > X25_MAX_DTE_FACIL_LEN) 152 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
145 break; 153 return 0;
146 dte_facs->called_len = p[2]; 154 dte_facs->called_len = p[2];
147 memcpy(dte_facs->called_ae, &p[3], p[1] - 1); 155 memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
148 *vc_fac_mask |= X25_MASK_CALLED_AE; 156 *vc_fac_mask |= X25_MASK_CALLED_AE;
149 break; 157 break;
150 default: 158 default:
151 printk(KERN_DEBUG "X.25: unknown facility %02X," 159 printk(KERN_DEBUG "X.25: unknown facility %02X,"
152 "length %d, values %02X, %02X, " 160 "length %d\n", p[0], p[1]);
153 "%02X, %02X\n",
154 p[0], p[1], p[2], p[3], p[4], p[5]);
155 break; 161 break;
156 } 162 }
157 len -= p[1] + 2; 163 len -= p[1] + 2;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 63178961efac..f729f022be69 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
119 &x25->vc_facil_mask); 119 &x25->vc_facil_mask);
120 if (len > 0) 120 if (len > 0)
121 skb_pull(skb, len); 121 skb_pull(skb, len);
122 else
123 return -1;
122 /* 124 /*
123 * Copy any Call User Data. 125 * Copy any Call User Data.
124 */ 126 */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 73e7b954ad28..b25c6463c3e9 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -394,6 +394,7 @@ void __exit x25_link_free(void)
394 list_for_each_safe(entry, tmp, &x25_neigh_list) { 394 list_for_each_safe(entry, tmp, &x25_neigh_list) {
395 nb = list_entry(entry, struct x25_neigh, node); 395 nb = list_entry(entry, struct x25_neigh, node);
396 __x25_remove_neigh(nb); 396 __x25_remove_neigh(nb);
397 dev_put(nb->dev);
397 } 398 }
398 write_unlock_bh(&x25_neigh_list_lock); 399 write_unlock_bh(&x25_neigh_list_lock);
399} 400}
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c
index a2023ec52329..1e98bc0fe0a5 100644
--- a/net/xfrm/xfrm_hash.c
+++ b/net/xfrm/xfrm_hash.c
@@ -19,7 +19,7 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz)
19 if (sz <= PAGE_SIZE) 19 if (sz <= PAGE_SIZE)
20 n = kzalloc(sz, GFP_KERNEL); 20 n = kzalloc(sz, GFP_KERNEL);
21 else if (hashdist) 21 else if (hashdist)
22 n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 22 n = vzalloc(sz);
23 else 23 else
24 n = (struct hlist_head *) 24 n = (struct hlist_head *)
25 __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 25 __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index eb96ce52f178..220ebc05c7af 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1268,7 +1268,7 @@ struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1268 1268
1269 return xc; 1269 return xc;
1270error: 1270error:
1271 kfree(xc); 1271 xfrm_state_put(xc);
1272 return NULL; 1272 return NULL;
1273} 1273}
1274EXPORT_SYMBOL(xfrm_state_migrate); 1274EXPORT_SYMBOL(xfrm_state_migrate);