aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c79
-rw-r--r--net/core/ethtool.c49
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/neighbour.c15
-rw-r--r--net/core/netfilter.c648
-rw-r--r--net/core/request_sock.c28
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/skbuff.c158
-rw-r--r--net/core/sock.c133
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/core/utils.c2
-rw-r--r--net/core/wireless.c8
14 files changed, 370 insertions, 777 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index f5f5e58943e8..630da0f0579e 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -12,7 +12,6 @@ obj-y += dev.o ethtool.o dev_mcast.o dst.o \
12 12
13obj-$(CONFIG_XFRM) += flow.o 13obj-$(CONFIG_XFRM) += flow.o
14obj-$(CONFIG_SYSFS) += net-sysfs.o 14obj-$(CONFIG_SYSFS) += net-sysfs.o
15obj-$(CONFIG_NETFILTER) += netfilter.o
16obj-$(CONFIG_NET_DIVERT) += dv.o 15obj-$(CONFIG_NET_DIVERT) += dv.o
17obj-$(CONFIG_NET_PKTGEN) += pktgen.o 16obj-$(CONFIG_NET_PKTGEN) += pktgen.o
18obj-$(CONFIG_NET_RADIO) += wireless.o 17obj-$(CONFIG_NET_RADIO) += wireless.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index fcee054b6f75..da9bf71421a7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -43,7 +43,6 @@
43#include <linux/errno.h> 43#include <linux/errno.h>
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <linux/inet.h> 45#include <linux/inet.h>
46#include <linux/tcp.h>
47#include <linux/netdevice.h> 46#include <linux/netdevice.h>
48#include <linux/rtnetlink.h> 47#include <linux/rtnetlink.h>
49#include <linux/poll.h> 48#include <linux/poll.h>
@@ -51,9 +50,10 @@
51 50
52#include <net/protocol.h> 51#include <net/protocol.h>
53#include <linux/skbuff.h> 52#include <linux/skbuff.h>
54#include <net/sock.h>
55#include <net/checksum.h>
56 53
54#include <net/checksum.h>
55#include <net/sock.h>
56#include <net/tcp_states.h>
57 57
58/* 58/*
59 * Is a socket 'connection oriented' ? 59 * Is a socket 'connection oriented' ?
diff --git a/net/core/dev.c b/net/core/dev.c
index faf59b02c4bf..c01511e3d0c1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -267,10 +267,6 @@ void dev_add_pack(struct packet_type *pt)
267 spin_unlock_bh(&ptype_lock); 267 spin_unlock_bh(&ptype_lock);
268} 268}
269 269
270extern void linkwatch_run_queue(void);
271
272
273
274/** 270/**
275 * __dev_remove_pack - remove packet handler 271 * __dev_remove_pack - remove packet handler
276 * @pt: packet type declaration 272 * @pt: packet type declaration
@@ -1009,13 +1005,22 @@ void net_disable_timestamp(void)
1009 atomic_dec(&netstamp_needed); 1005 atomic_dec(&netstamp_needed);
1010} 1006}
1011 1007
1012static inline void net_timestamp(struct timeval *stamp) 1008void __net_timestamp(struct sk_buff *skb)
1009{
1010 struct timeval tv;
1011
1012 do_gettimeofday(&tv);
1013 skb_set_timestamp(skb, &tv);
1014}
1015EXPORT_SYMBOL(__net_timestamp);
1016
1017static inline void net_timestamp(struct sk_buff *skb)
1013{ 1018{
1014 if (atomic_read(&netstamp_needed)) 1019 if (atomic_read(&netstamp_needed))
1015 do_gettimeofday(stamp); 1020 __net_timestamp(skb);
1016 else { 1021 else {
1017 stamp->tv_sec = 0; 1022 skb->tstamp.off_sec = 0;
1018 stamp->tv_usec = 0; 1023 skb->tstamp.off_usec = 0;
1019 } 1024 }
1020} 1025}
1021 1026
@@ -1027,7 +1032,8 @@ static inline void net_timestamp(struct timeval *stamp)
1027void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1032void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1028{ 1033{
1029 struct packet_type *ptype; 1034 struct packet_type *ptype;
1030 net_timestamp(&skb->stamp); 1035
1036 net_timestamp(skb);
1031 1037
1032 rcu_read_lock(); 1038 rcu_read_lock();
1033 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1039 list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -1058,7 +1064,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1058 1064
1059 skb2->h.raw = skb2->nh.raw; 1065 skb2->h.raw = skb2->nh.raw;
1060 skb2->pkt_type = PACKET_OUTGOING; 1066 skb2->pkt_type = PACKET_OUTGOING;
1061 ptype->func(skb2, skb->dev, ptype); 1067 ptype->func(skb2, skb->dev, ptype, skb->dev);
1062 } 1068 }
1063 } 1069 }
1064 rcu_read_unlock(); 1070 rcu_read_unlock();
@@ -1123,8 +1129,6 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1123#define illegal_highdma(dev, skb) (0) 1129#define illegal_highdma(dev, skb) (0)
1124#endif 1130#endif
1125 1131
1126extern void skb_release_data(struct sk_buff *);
1127
1128/* Keep head the same: replace data */ 1132/* Keep head the same: replace data */
1129int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) 1133int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask)
1130{ 1134{
@@ -1379,8 +1383,8 @@ int netif_rx(struct sk_buff *skb)
1379 if (netpoll_rx(skb)) 1383 if (netpoll_rx(skb))
1380 return NET_RX_DROP; 1384 return NET_RX_DROP;
1381 1385
1382 if (!skb->stamp.tv_sec) 1386 if (!skb->tstamp.off_sec)
1383 net_timestamp(&skb->stamp); 1387 net_timestamp(skb);
1384 1388
1385 /* 1389 /*
1386 * The code is rearranged so that the path is the most 1390 * The code is rearranged so that the path is the most
@@ -1425,14 +1429,14 @@ int netif_rx_ni(struct sk_buff *skb)
1425 1429
1426EXPORT_SYMBOL(netif_rx_ni); 1430EXPORT_SYMBOL(netif_rx_ni);
1427 1431
1428static __inline__ void skb_bond(struct sk_buff *skb) 1432static inline struct net_device *skb_bond(struct sk_buff *skb)
1429{ 1433{
1430 struct net_device *dev = skb->dev; 1434 struct net_device *dev = skb->dev;
1431 1435
1432 if (dev->master) { 1436 if (dev->master)
1433 skb->real_dev = skb->dev;
1434 skb->dev = dev->master; 1437 skb->dev = dev->master;
1435 } 1438
1439 return dev;
1436} 1440}
1437 1441
1438static void net_tx_action(struct softirq_action *h) 1442static void net_tx_action(struct softirq_action *h)
@@ -1482,10 +1486,11 @@ static void net_tx_action(struct softirq_action *h)
1482} 1486}
1483 1487
1484static __inline__ int deliver_skb(struct sk_buff *skb, 1488static __inline__ int deliver_skb(struct sk_buff *skb,
1485 struct packet_type *pt_prev) 1489 struct packet_type *pt_prev,
1490 struct net_device *orig_dev)
1486{ 1491{
1487 atomic_inc(&skb->users); 1492 atomic_inc(&skb->users);
1488 return pt_prev->func(skb, skb->dev, pt_prev); 1493 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1489} 1494}
1490 1495
1491#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) 1496#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
@@ -1496,7 +1501,8 @@ struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1496void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); 1501void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1497 1502
1498static __inline__ int handle_bridge(struct sk_buff **pskb, 1503static __inline__ int handle_bridge(struct sk_buff **pskb,
1499 struct packet_type **pt_prev, int *ret) 1504 struct packet_type **pt_prev, int *ret,
1505 struct net_device *orig_dev)
1500{ 1506{
1501 struct net_bridge_port *port; 1507 struct net_bridge_port *port;
1502 1508
@@ -1505,14 +1511,14 @@ static __inline__ int handle_bridge(struct sk_buff **pskb,
1505 return 0; 1511 return 0;
1506 1512
1507 if (*pt_prev) { 1513 if (*pt_prev) {
1508 *ret = deliver_skb(*pskb, *pt_prev); 1514 *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1509 *pt_prev = NULL; 1515 *pt_prev = NULL;
1510 } 1516 }
1511 1517
1512 return br_handle_frame_hook(port, pskb); 1518 return br_handle_frame_hook(port, pskb);
1513} 1519}
1514#else 1520#else
1515#define handle_bridge(skb, pt_prev, ret) (0) 1521#define handle_bridge(skb, pt_prev, ret, orig_dev) (0)
1516#endif 1522#endif
1517 1523
1518#ifdef CONFIG_NET_CLS_ACT 1524#ifdef CONFIG_NET_CLS_ACT
@@ -1534,17 +1540,14 @@ static int ing_filter(struct sk_buff *skb)
1534 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); 1540 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1535 if (MAX_RED_LOOP < ttl++) { 1541 if (MAX_RED_LOOP < ttl++) {
1536 printk("Redir loop detected Dropping packet (%s->%s)\n", 1542 printk("Redir loop detected Dropping packet (%s->%s)\n",
1537 skb->input_dev?skb->input_dev->name:"??",skb->dev->name); 1543 skb->input_dev->name, skb->dev->name);
1538 return TC_ACT_SHOT; 1544 return TC_ACT_SHOT;
1539 } 1545 }
1540 1546
1541 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl); 1547 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1542 1548
1543 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS); 1549 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1544 if (NULL == skb->input_dev) { 1550
1545 skb->input_dev = skb->dev;
1546 printk("ing_filter: fixed %s out %s\n",skb->input_dev->name,skb->dev->name);
1547 }
1548 spin_lock(&dev->ingress_lock); 1551 spin_lock(&dev->ingress_lock);
1549 if ((q = dev->qdisc_ingress) != NULL) 1552 if ((q = dev->qdisc_ingress) != NULL)
1550 result = q->enqueue(skb, q); 1553 result = q->enqueue(skb, q);
@@ -1559,6 +1562,7 @@ static int ing_filter(struct sk_buff *skb)
1559int netif_receive_skb(struct sk_buff *skb) 1562int netif_receive_skb(struct sk_buff *skb)
1560{ 1563{
1561 struct packet_type *ptype, *pt_prev; 1564 struct packet_type *ptype, *pt_prev;
1565 struct net_device *orig_dev;
1562 int ret = NET_RX_DROP; 1566 int ret = NET_RX_DROP;
1563 unsigned short type; 1567 unsigned short type;
1564 1568
@@ -1566,10 +1570,13 @@ int netif_receive_skb(struct sk_buff *skb)
1566 if (skb->dev->poll && netpoll_rx(skb)) 1570 if (skb->dev->poll && netpoll_rx(skb))
1567 return NET_RX_DROP; 1571 return NET_RX_DROP;
1568 1572
1569 if (!skb->stamp.tv_sec) 1573 if (!skb->tstamp.off_sec)
1570 net_timestamp(&skb->stamp); 1574 net_timestamp(skb);
1575
1576 if (!skb->input_dev)
1577 skb->input_dev = skb->dev;
1571 1578
1572 skb_bond(skb); 1579 orig_dev = skb_bond(skb);
1573 1580
1574 __get_cpu_var(netdev_rx_stat).total++; 1581 __get_cpu_var(netdev_rx_stat).total++;
1575 1582
@@ -1590,14 +1597,14 @@ int netif_receive_skb(struct sk_buff *skb)
1590 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1597 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1591 if (!ptype->dev || ptype->dev == skb->dev) { 1598 if (!ptype->dev || ptype->dev == skb->dev) {
1592 if (pt_prev) 1599 if (pt_prev)
1593 ret = deliver_skb(skb, pt_prev); 1600 ret = deliver_skb(skb, pt_prev, orig_dev);
1594 pt_prev = ptype; 1601 pt_prev = ptype;
1595 } 1602 }
1596 } 1603 }
1597 1604
1598#ifdef CONFIG_NET_CLS_ACT 1605#ifdef CONFIG_NET_CLS_ACT
1599 if (pt_prev) { 1606 if (pt_prev) {
1600 ret = deliver_skb(skb, pt_prev); 1607 ret = deliver_skb(skb, pt_prev, orig_dev);
1601 pt_prev = NULL; /* noone else should process this after*/ 1608 pt_prev = NULL; /* noone else should process this after*/
1602 } else { 1609 } else {
1603 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); 1610 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
@@ -1616,7 +1623,7 @@ ncls:
1616 1623
1617 handle_diverter(skb); 1624 handle_diverter(skb);
1618 1625
1619 if (handle_bridge(&skb, &pt_prev, &ret)) 1626 if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1620 goto out; 1627 goto out;
1621 1628
1622 type = skb->protocol; 1629 type = skb->protocol;
@@ -1624,13 +1631,13 @@ ncls:
1624 if (ptype->type == type && 1631 if (ptype->type == type &&
1625 (!ptype->dev || ptype->dev == skb->dev)) { 1632 (!ptype->dev || ptype->dev == skb->dev)) {
1626 if (pt_prev) 1633 if (pt_prev)
1627 ret = deliver_skb(skb, pt_prev); 1634 ret = deliver_skb(skb, pt_prev, orig_dev);
1628 pt_prev = ptype; 1635 pt_prev = ptype;
1629 } 1636 }
1630 } 1637 }
1631 1638
1632 if (pt_prev) { 1639 if (pt_prev) {
1633 ret = pt_prev->func(skb, skb->dev, pt_prev); 1640 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1634 } else { 1641 } else {
1635 kfree_skb(skb); 1642 kfree_skb(skb);
1636 /* Jamal, now you will not able to escape explaining 1643 /* Jamal, now you will not able to escape explaining
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a3eeb88e1c81..289c1b5a8e4a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -81,6 +81,18 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data)
81 return 0; 81 return 0;
82} 82}
83 83
84int ethtool_op_get_perm_addr(struct net_device *dev, struct ethtool_perm_addr *addr, u8 *data)
85{
86 unsigned char len = dev->addr_len;
87 if ( addr->size < len )
88 return -ETOOSMALL;
89
90 addr->size = len;
91 memcpy(data, dev->perm_addr, len);
92 return 0;
93}
94
95
84/* Handlers for each ethtool command */ 96/* Handlers for each ethtool command */
85 97
86static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 98static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
@@ -683,6 +695,39 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
683 return ret; 695 return ret;
684} 696}
685 697
698static int ethtool_get_perm_addr(struct net_device *dev, void *useraddr)
699{
700 struct ethtool_perm_addr epaddr;
701 u8 *data;
702 int ret;
703
704 if (!dev->ethtool_ops->get_perm_addr)
705 return -EOPNOTSUPP;
706
707 if (copy_from_user(&epaddr,useraddr,sizeof(epaddr)))
708 return -EFAULT;
709
710 data = kmalloc(epaddr.size, GFP_USER);
711 if (!data)
712 return -ENOMEM;
713
714 ret = dev->ethtool_ops->get_perm_addr(dev,&epaddr,data);
715 if (ret)
716 return ret;
717
718 ret = -EFAULT;
719 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
720 goto out;
721 useraddr += sizeof(epaddr);
722 if (copy_to_user(useraddr, data, epaddr.size))
723 goto out;
724 ret = 0;
725
726 out:
727 kfree(data);
728 return ret;
729}
730
686/* The main entry point in this file. Called from net/core/dev.c */ 731/* The main entry point in this file. Called from net/core/dev.c */
687 732
688int dev_ethtool(struct ifreq *ifr) 733int dev_ethtool(struct ifreq *ifr)
@@ -806,6 +851,9 @@ int dev_ethtool(struct ifreq *ifr)
806 case ETHTOOL_GSTATS: 851 case ETHTOOL_GSTATS:
807 rc = ethtool_get_stats(dev, useraddr); 852 rc = ethtool_get_stats(dev, useraddr);
808 break; 853 break;
854 case ETHTOOL_GPERMADDR:
855 rc = ethtool_get_perm_addr(dev, useraddr);
856 break;
809 default: 857 default:
810 rc = -EOPNOTSUPP; 858 rc = -EOPNOTSUPP;
811 } 859 }
@@ -826,6 +874,7 @@ int dev_ethtool(struct ifreq *ifr)
826 874
827EXPORT_SYMBOL(dev_ethtool); 875EXPORT_SYMBOL(dev_ethtool);
828EXPORT_SYMBOL(ethtool_op_get_link); 876EXPORT_SYMBOL(ethtool_op_get_link);
877EXPORT_SYMBOL_GPL(ethtool_op_get_perm_addr);
829EXPORT_SYMBOL(ethtool_op_get_sg); 878EXPORT_SYMBOL(ethtool_op_get_sg);
830EXPORT_SYMBOL(ethtool_op_get_tso); 879EXPORT_SYMBOL(ethtool_op_get_tso);
831EXPORT_SYMBOL(ethtool_op_get_tx_csum); 880EXPORT_SYMBOL(ethtool_op_get_tx_csum);
diff --git a/net/core/flow.c b/net/core/flow.c
index f289570b15a3..7e95b39de9fd 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
42 42
43#define flow_table(cpu) (per_cpu(flow_tables, cpu)) 43#define flow_table(cpu) (per_cpu(flow_tables, cpu))
44 44
45static kmem_cache_t *flow_cachep; 45static kmem_cache_t *flow_cachep __read_mostly;
46 46
47static int flow_lwm, flow_hwm; 47static int flow_lwm, flow_hwm;
48 48
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1beb782ac41b..39fc55edf691 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1217,7 +1217,7 @@ static void neigh_proxy_process(unsigned long arg)
1217 1217
1218 while (skb != (struct sk_buff *)&tbl->proxy_queue) { 1218 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1219 struct sk_buff *back = skb; 1219 struct sk_buff *back = skb;
1220 long tdif = back->stamp.tv_usec - now; 1220 long tdif = NEIGH_CB(back)->sched_next - now;
1221 1221
1222 skb = skb->next; 1222 skb = skb->next;
1223 if (tdif <= 0) { 1223 if (tdif <= 0) {
@@ -1248,8 +1248,9 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1248 kfree_skb(skb); 1248 kfree_skb(skb);
1249 return; 1249 return;
1250 } 1250 }
1251 skb->stamp.tv_sec = LOCALLY_ENQUEUED; 1251
1252 skb->stamp.tv_usec = sched_next; 1252 NEIGH_CB(skb)->sched_next = sched_next;
1253 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1253 1254
1254 spin_lock(&tbl->proxy_queue.lock); 1255 spin_lock(&tbl->proxy_queue.lock);
1255 if (del_timer(&tbl->proxy_timer)) { 1256 if (del_timer(&tbl->proxy_timer)) {
@@ -2342,8 +2343,8 @@ void neigh_app_ns(struct neighbour *n)
2342 } 2343 }
2343 nlh = (struct nlmsghdr *)skb->data; 2344 nlh = (struct nlmsghdr *)skb->data;
2344 nlh->nlmsg_flags = NLM_F_REQUEST; 2345 nlh->nlmsg_flags = NLM_F_REQUEST;
2345 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH; 2346 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2346 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC); 2347 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2347} 2348}
2348 2349
2349static void neigh_app_notify(struct neighbour *n) 2350static void neigh_app_notify(struct neighbour *n)
@@ -2360,8 +2361,8 @@ static void neigh_app_notify(struct neighbour *n)
2360 return; 2361 return;
2361 } 2362 }
2362 nlh = (struct nlmsghdr *)skb->data; 2363 nlh = (struct nlmsghdr *)skb->data;
2363 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH; 2364 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2364 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC); 2365 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2365} 2366}
2366 2367
2367#endif /* CONFIG_ARPD */ 2368#endif /* CONFIG_ARPD */
diff --git a/net/core/netfilter.c b/net/core/netfilter.c
deleted file mode 100644
index 076c156d5eda..000000000000
--- a/net/core/netfilter.c
+++ /dev/null
@@ -1,648 +0,0 @@
1/* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
8 *
9 * February 2000: Modified by James Morris to have 1 queue per protocol.
10 * 15-Mar-2000: Added NF_REPEAT --RR.
11 * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
12 */
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/netfilter.h>
16#include <net/protocol.h>
17#include <linux/init.h>
18#include <linux/skbuff.h>
19#include <linux/wait.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/if.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/tcp.h>
26#include <linux/udp.h>
27#include <linux/icmp.h>
28#include <net/sock.h>
29#include <net/route.h>
30#include <linux/ip.h>
31
32/* In this code, we can be waiting indefinitely for userspace to
33 * service a packet if a hook returns NF_QUEUE. We could keep a count
34 * of skbuffs queued for userspace, and not deregister a hook unless
35 * this is zero, but that sucks. Now, we simply check when the
36 * packets come back: if the hook is gone, the packet is discarded. */
37#ifdef CONFIG_NETFILTER_DEBUG
38#define NFDEBUG(format, args...) printk(format , ## args)
39#else
40#define NFDEBUG(format, args...)
41#endif
42
43/* Sockopts only registered and called from user context, so
44 net locking would be overkill. Also, [gs]etsockopt calls may
45 sleep. */
46static DECLARE_MUTEX(nf_sockopt_mutex);
47
48struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
49static LIST_HEAD(nf_sockopts);
50static DEFINE_SPINLOCK(nf_hook_lock);
51
52/*
53 * A queue handler may be registered for each protocol. Each is protected by
54 * long term mutex. The handler must provide an an outfn() to accept packets
55 * for queueing and must reinject all packets it receives, no matter what.
56 */
57static struct nf_queue_handler_t {
58 nf_queue_outfn_t outfn;
59 void *data;
60} queue_handler[NPROTO];
61static DEFINE_RWLOCK(queue_handler_lock);
62
63int nf_register_hook(struct nf_hook_ops *reg)
64{
65 struct list_head *i;
66
67 spin_lock_bh(&nf_hook_lock);
68 list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
69 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
70 break;
71 }
72 list_add_rcu(&reg->list, i->prev);
73 spin_unlock_bh(&nf_hook_lock);
74
75 synchronize_net();
76 return 0;
77}
78
79void nf_unregister_hook(struct nf_hook_ops *reg)
80{
81 spin_lock_bh(&nf_hook_lock);
82 list_del_rcu(&reg->list);
83 spin_unlock_bh(&nf_hook_lock);
84
85 synchronize_net();
86}
87
88/* Do exclusive ranges overlap? */
89static inline int overlap(int min1, int max1, int min2, int max2)
90{
91 return max1 > min2 && min1 < max2;
92}
93
94/* Functions to register sockopt ranges (exclusive). */
95int nf_register_sockopt(struct nf_sockopt_ops *reg)
96{
97 struct list_head *i;
98 int ret = 0;
99
100 if (down_interruptible(&nf_sockopt_mutex) != 0)
101 return -EINTR;
102
103 list_for_each(i, &nf_sockopts) {
104 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
105 if (ops->pf == reg->pf
106 && (overlap(ops->set_optmin, ops->set_optmax,
107 reg->set_optmin, reg->set_optmax)
108 || overlap(ops->get_optmin, ops->get_optmax,
109 reg->get_optmin, reg->get_optmax))) {
110 NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
111 ops->set_optmin, ops->set_optmax,
112 ops->get_optmin, ops->get_optmax,
113 reg->set_optmin, reg->set_optmax,
114 reg->get_optmin, reg->get_optmax);
115 ret = -EBUSY;
116 goto out;
117 }
118 }
119
120 list_add(&reg->list, &nf_sockopts);
121out:
122 up(&nf_sockopt_mutex);
123 return ret;
124}
125
126void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
127{
128 /* No point being interruptible: we're probably in cleanup_module() */
129 restart:
130 down(&nf_sockopt_mutex);
131 if (reg->use != 0) {
132 /* To be woken by nf_sockopt call... */
133 /* FIXME: Stuart Young's name appears gratuitously. */
134 set_current_state(TASK_UNINTERRUPTIBLE);
135 reg->cleanup_task = current;
136 up(&nf_sockopt_mutex);
137 schedule();
138 goto restart;
139 }
140 list_del(&reg->list);
141 up(&nf_sockopt_mutex);
142}
143
144/* Call get/setsockopt() */
145static int nf_sockopt(struct sock *sk, int pf, int val,
146 char __user *opt, int *len, int get)
147{
148 struct list_head *i;
149 struct nf_sockopt_ops *ops;
150 int ret;
151
152 if (down_interruptible(&nf_sockopt_mutex) != 0)
153 return -EINTR;
154
155 list_for_each(i, &nf_sockopts) {
156 ops = (struct nf_sockopt_ops *)i;
157 if (ops->pf == pf) {
158 if (get) {
159 if (val >= ops->get_optmin
160 && val < ops->get_optmax) {
161 ops->use++;
162 up(&nf_sockopt_mutex);
163 ret = ops->get(sk, val, opt, len);
164 goto out;
165 }
166 } else {
167 if (val >= ops->set_optmin
168 && val < ops->set_optmax) {
169 ops->use++;
170 up(&nf_sockopt_mutex);
171 ret = ops->set(sk, val, opt, *len);
172 goto out;
173 }
174 }
175 }
176 }
177 up(&nf_sockopt_mutex);
178 return -ENOPROTOOPT;
179
180 out:
181 down(&nf_sockopt_mutex);
182 ops->use--;
183 if (ops->cleanup_task)
184 wake_up_process(ops->cleanup_task);
185 up(&nf_sockopt_mutex);
186 return ret;
187}
188
189int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
190 int len)
191{
192 return nf_sockopt(sk, pf, val, opt, &len, 0);
193}
194
195int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
196{
197 return nf_sockopt(sk, pf, val, opt, len, 1);
198}
199
200static unsigned int nf_iterate(struct list_head *head,
201 struct sk_buff **skb,
202 int hook,
203 const struct net_device *indev,
204 const struct net_device *outdev,
205 struct list_head **i,
206 int (*okfn)(struct sk_buff *),
207 int hook_thresh)
208{
209 unsigned int verdict;
210
211 /*
212 * The caller must not block between calls to this
213 * function because of risk of continuing from deleted element.
214 */
215 list_for_each_continue_rcu(*i, head) {
216 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
217
218 if (hook_thresh > elem->priority)
219 continue;
220
221 /* Optimization: we don't need to hold module
222 reference here, since function can't sleep. --RR */
223 verdict = elem->hook(hook, skb, indev, outdev, okfn);
224 if (verdict != NF_ACCEPT) {
225#ifdef CONFIG_NETFILTER_DEBUG
226 if (unlikely(verdict > NF_MAX_VERDICT)) {
227 NFDEBUG("Evil return from %p(%u).\n",
228 elem->hook, hook);
229 continue;
230 }
231#endif
232 if (verdict != NF_REPEAT)
233 return verdict;
234 *i = (*i)->prev;
235 }
236 }
237 return NF_ACCEPT;
238}
239
240int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
241{
242 int ret;
243
244 write_lock_bh(&queue_handler_lock);
245 if (queue_handler[pf].outfn)
246 ret = -EBUSY;
247 else {
248 queue_handler[pf].outfn = outfn;
249 queue_handler[pf].data = data;
250 ret = 0;
251 }
252 write_unlock_bh(&queue_handler_lock);
253
254 return ret;
255}
256
257/* The caller must flush their queue before this */
258int nf_unregister_queue_handler(int pf)
259{
260 write_lock_bh(&queue_handler_lock);
261 queue_handler[pf].outfn = NULL;
262 queue_handler[pf].data = NULL;
263 write_unlock_bh(&queue_handler_lock);
264
265 return 0;
266}
267
268/*
269 * Any packet that leaves via this function must come back
270 * through nf_reinject().
271 */
272static int nf_queue(struct sk_buff *skb,
273 struct list_head *elem,
274 int pf, unsigned int hook,
275 struct net_device *indev,
276 struct net_device *outdev,
277 int (*okfn)(struct sk_buff *))
278{
279 int status;
280 struct nf_info *info;
281#ifdef CONFIG_BRIDGE_NETFILTER
282 struct net_device *physindev = NULL;
283 struct net_device *physoutdev = NULL;
284#endif
285
286 /* QUEUE == DROP if noone is waiting, to be safe. */
287 read_lock(&queue_handler_lock);
288 if (!queue_handler[pf].outfn) {
289 read_unlock(&queue_handler_lock);
290 kfree_skb(skb);
291 return 1;
292 }
293
294 info = kmalloc(sizeof(*info), GFP_ATOMIC);
295 if (!info) {
296 if (net_ratelimit())
297 printk(KERN_ERR "OOM queueing packet %p\n",
298 skb);
299 read_unlock(&queue_handler_lock);
300 kfree_skb(skb);
301 return 1;
302 }
303
304 *info = (struct nf_info) {
305 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
306
307 /* If it's going away, ignore hook. */
308 if (!try_module_get(info->elem->owner)) {
309 read_unlock(&queue_handler_lock);
310 kfree(info);
311 return 0;
312 }
313
314 /* Bump dev refs so they don't vanish while packet is out */
315 if (indev) dev_hold(indev);
316 if (outdev) dev_hold(outdev);
317
318#ifdef CONFIG_BRIDGE_NETFILTER
319 if (skb->nf_bridge) {
320 physindev = skb->nf_bridge->physindev;
321 if (physindev) dev_hold(physindev);
322 physoutdev = skb->nf_bridge->physoutdev;
323 if (physoutdev) dev_hold(physoutdev);
324 }
325#endif
326
327 status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
328 read_unlock(&queue_handler_lock);
329
330 if (status < 0) {
331 /* James M doesn't say fuck enough. */
332 if (indev) dev_put(indev);
333 if (outdev) dev_put(outdev);
334#ifdef CONFIG_BRIDGE_NETFILTER
335 if (physindev) dev_put(physindev);
336 if (physoutdev) dev_put(physoutdev);
337#endif
338 module_put(info->elem->owner);
339 kfree(info);
340 kfree_skb(skb);
341 return 1;
342 }
343 return 1;
344}
345
346/* Returns 1 if okfn() needs to be executed by the caller,
347 * -EPERM for NF_DROP, 0 otherwise. */
348int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
349 struct net_device *indev,
350 struct net_device *outdev,
351 int (*okfn)(struct sk_buff *),
352 int hook_thresh)
353{
354 struct list_head *elem;
355 unsigned int verdict;
356 int ret = 0;
357
358 /* We may already have this, but read-locks nest anyway */
359 rcu_read_lock();
360
361 elem = &nf_hooks[pf][hook];
362next_hook:
363 verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
364 outdev, &elem, okfn, hook_thresh);
365 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
366 ret = 1;
367 goto unlock;
368 } else if (verdict == NF_DROP) {
369 kfree_skb(*pskb);
370 ret = -EPERM;
371 } else if (verdict == NF_QUEUE) {
372 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
373 if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn))
374 goto next_hook;
375 }
376unlock:
377 rcu_read_unlock();
378 return ret;
379}
380
381void nf_reinject(struct sk_buff *skb, struct nf_info *info,
382 unsigned int verdict)
383{
384 struct list_head *elem = &info->elem->list;
385 struct list_head *i;
386
387 rcu_read_lock();
388
389 /* Release those devices we held, or Alexey will kill me. */
390 if (info->indev) dev_put(info->indev);
391 if (info->outdev) dev_put(info->outdev);
392#ifdef CONFIG_BRIDGE_NETFILTER
393 if (skb->nf_bridge) {
394 if (skb->nf_bridge->physindev)
395 dev_put(skb->nf_bridge->physindev);
396 if (skb->nf_bridge->physoutdev)
397 dev_put(skb->nf_bridge->physoutdev);
398 }
399#endif
400
401 /* Drop reference to owner of hook which queued us. */
402 module_put(info->elem->owner);
403
404 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
405 if (i == elem)
406 break;
407 }
408
409 if (elem == &nf_hooks[info->pf][info->hook]) {
410 /* The module which sent it to userspace is gone. */
411 NFDEBUG("%s: module disappeared, dropping packet.\n",
412 __FUNCTION__);
413 verdict = NF_DROP;
414 }
415
416 /* Continue traversal iff userspace said ok... */
417 if (verdict == NF_REPEAT) {
418 elem = elem->prev;
419 verdict = NF_ACCEPT;
420 }
421
422 if (verdict == NF_ACCEPT) {
423 next_hook:
424 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
425 &skb, info->hook,
426 info->indev, info->outdev, &elem,
427 info->okfn, INT_MIN);
428 }
429
430 switch (verdict) {
431 case NF_ACCEPT:
432 info->okfn(skb);
433 break;
434
435 case NF_QUEUE:
436 if (!nf_queue(skb, elem, info->pf, info->hook,
437 info->indev, info->outdev, info->okfn))
438 goto next_hook;
439 break;
440 }
441 rcu_read_unlock();
442
443 if (verdict == NF_DROP)
444 kfree_skb(skb);
445
446 kfree(info);
447 return;
448}
449
450#ifdef CONFIG_INET
451/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
452int ip_route_me_harder(struct sk_buff **pskb)
453{
454 struct iphdr *iph = (*pskb)->nh.iph;
455 struct rtable *rt;
456 struct flowi fl = {};
457 struct dst_entry *odst;
458 unsigned int hh_len;
459
460 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
461 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
462 */
463 if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
464 fl.nl_u.ip4_u.daddr = iph->daddr;
465 fl.nl_u.ip4_u.saddr = iph->saddr;
466 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
467 fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
468#ifdef CONFIG_IP_ROUTE_FWMARK
469 fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
470#endif
471 fl.proto = iph->protocol;
472 if (ip_route_output_key(&rt, &fl) != 0)
473 return -1;
474
475 /* Drop old route. */
476 dst_release((*pskb)->dst);
477 (*pskb)->dst = &rt->u.dst;
478 } else {
479 /* non-local src, find valid iif to satisfy
480 * rp-filter when calling ip_route_input. */
481 fl.nl_u.ip4_u.daddr = iph->saddr;
482 if (ip_route_output_key(&rt, &fl) != 0)
483 return -1;
484
485 odst = (*pskb)->dst;
486 if (ip_route_input(*pskb, iph->daddr, iph->saddr,
487 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
488 dst_release(&rt->u.dst);
489 return -1;
490 }
491 dst_release(&rt->u.dst);
492 dst_release(odst);
493 }
494
495 if ((*pskb)->dst->error)
496 return -1;
497
498 /* Change in oif may mean change in hh_len. */
499 hh_len = (*pskb)->dst->dev->hard_header_len;
500 if (skb_headroom(*pskb) < hh_len) {
501 struct sk_buff *nskb;
502
503 nskb = skb_realloc_headroom(*pskb, hh_len);
504 if (!nskb)
505 return -1;
506 if ((*pskb)->sk)
507 skb_set_owner_w(nskb, (*pskb)->sk);
508 kfree_skb(*pskb);
509 *pskb = nskb;
510 }
511
512 return 0;
513}
514EXPORT_SYMBOL(ip_route_me_harder);
515
516int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len)
517{
518 struct sk_buff *nskb;
519
520 if (writable_len > (*pskb)->len)
521 return 0;
522
523 /* Not exclusive use of packet? Must copy. */
524 if (skb_shared(*pskb) || skb_cloned(*pskb))
525 goto copy_skb;
526
527 return pskb_may_pull(*pskb, writable_len);
528
529copy_skb:
530 nskb = skb_copy(*pskb, GFP_ATOMIC);
531 if (!nskb)
532 return 0;
533 BUG_ON(skb_is_nonlinear(nskb));
534
535 /* Rest of kernel will get very unhappy if we pass it a
536 suddenly-orphaned skbuff */
537 if ((*pskb)->sk)
538 skb_set_owner_w(nskb, (*pskb)->sk);
539 kfree_skb(*pskb);
540 *pskb = nskb;
541 return 1;
542}
543EXPORT_SYMBOL(skb_ip_make_writable);
544#endif /*CONFIG_INET*/
545
546/* Internal logging interface, which relies on the real
547 LOG target modules */
548
549#define NF_LOG_PREFIXLEN 128
550
551static nf_logfn *nf_logging[NPROTO]; /* = NULL */
552static int reported = 0;
553static DEFINE_SPINLOCK(nf_log_lock);
554
555int nf_log_register(int pf, nf_logfn *logfn)
556{
557 int ret = -EBUSY;
558
559 /* Any setup of logging members must be done before
560 * substituting pointer. */
561 spin_lock(&nf_log_lock);
562 if (!nf_logging[pf]) {
563 rcu_assign_pointer(nf_logging[pf], logfn);
564 ret = 0;
565 }
566 spin_unlock(&nf_log_lock);
567 return ret;
568}
569
570void nf_log_unregister(int pf, nf_logfn *logfn)
571{
572 spin_lock(&nf_log_lock);
573 if (nf_logging[pf] == logfn)
574 nf_logging[pf] = NULL;
575 spin_unlock(&nf_log_lock);
576
577 /* Give time to concurrent readers. */
578 synchronize_net();
579}
580
581void nf_log_packet(int pf,
582 unsigned int hooknum,
583 const struct sk_buff *skb,
584 const struct net_device *in,
585 const struct net_device *out,
586 const char *fmt, ...)
587{
588 va_list args;
589 char prefix[NF_LOG_PREFIXLEN];
590 nf_logfn *logfn;
591
592 rcu_read_lock();
593 logfn = rcu_dereference(nf_logging[pf]);
594 if (logfn) {
595 va_start(args, fmt);
596 vsnprintf(prefix, sizeof(prefix), fmt, args);
597 va_end(args);
598 /* We must read logging before nf_logfn[pf] */
599 logfn(hooknum, skb, in, out, prefix);
600 } else if (!reported) {
601 printk(KERN_WARNING "nf_log_packet: can\'t log yet, "
602 "no backend logging module loaded in!\n");
603 reported++;
604 }
605 rcu_read_unlock();
606}
607EXPORT_SYMBOL(nf_log_register);
608EXPORT_SYMBOL(nf_log_unregister);
609EXPORT_SYMBOL(nf_log_packet);
610
611/* This does not belong here, but locally generated errors need it if connection
612 tracking in use: without this, connection may not be in hash table, and hence
613 manufactured ICMP or RST packets will not be associated with it. */
614void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
615
616void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
617{
618 void (*attach)(struct sk_buff *, struct sk_buff *);
619
620 if (skb->nfct && (attach = ip_ct_attach) != NULL) {
621 mb(); /* Just to be sure: must be read before executing this */
622 attach(new, skb);
623 }
624}
625
626void __init netfilter_init(void)
627{
628 int i, h;
629
630 for (i = 0; i < NPROTO; i++) {
631 for (h = 0; h < NF_MAX_HOOKS; h++)
632 INIT_LIST_HEAD(&nf_hooks[i][h]);
633 }
634}
635
636EXPORT_SYMBOL(ip_ct_attach);
637EXPORT_SYMBOL(nf_ct_attach);
638EXPORT_SYMBOL(nf_getsockopt);
639EXPORT_SYMBOL(nf_hook_slow);
640EXPORT_SYMBOL(nf_hooks);
641EXPORT_SYMBOL(nf_register_hook);
642EXPORT_SYMBOL(nf_register_queue_handler);
643EXPORT_SYMBOL(nf_register_sockopt);
644EXPORT_SYMBOL(nf_reinject);
645EXPORT_SYMBOL(nf_setsockopt);
646EXPORT_SYMBOL(nf_unregister_hook);
647EXPORT_SYMBOL(nf_unregister_queue_handler);
648EXPORT_SYMBOL(nf_unregister_sockopt);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index bb55675f0685..b8203de5ff07 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -32,7 +32,6 @@
32 * Further increasing requires to change hash table size. 32 * Further increasing requires to change hash table size.
33 */ 33 */
34int sysctl_max_syn_backlog = 256; 34int sysctl_max_syn_backlog = 256;
35EXPORT_SYMBOL(sysctl_max_syn_backlog);
36 35
37int reqsk_queue_alloc(struct request_sock_queue *queue, 36int reqsk_queue_alloc(struct request_sock_queue *queue,
38 const int nr_table_entries) 37 const int nr_table_entries)
@@ -53,6 +52,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
53 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); 52 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
54 rwlock_init(&queue->syn_wait_lock); 53 rwlock_init(&queue->syn_wait_lock);
55 queue->rskq_accept_head = queue->rskq_accept_head = NULL; 54 queue->rskq_accept_head = queue->rskq_accept_head = NULL;
55 queue->rskq_defer_accept = 0;
56 lopt->nr_table_entries = nr_table_entries;
56 57
57 write_lock_bh(&queue->syn_wait_lock); 58 write_lock_bh(&queue->syn_wait_lock);
58 queue->listen_opt = lopt; 59 queue->listen_opt = lopt;
@@ -62,3 +63,28 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
62} 63}
63 64
64EXPORT_SYMBOL(reqsk_queue_alloc); 65EXPORT_SYMBOL(reqsk_queue_alloc);
66
67void reqsk_queue_destroy(struct request_sock_queue *queue)
68{
69 /* make all the listen_opt local to us */
70 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
71
72 if (lopt->qlen != 0) {
73 int i;
74
75 for (i = 0; i < lopt->nr_table_entries; i++) {
76 struct request_sock *req;
77
78 while ((req = lopt->syn_table[i]) != NULL) {
79 lopt->syn_table[i] = req->dl_next;
80 lopt->qlen--;
81 reqsk_free(req);
82 }
83 }
84 }
85
86 BUG_TRAP(lopt->qlen == 0);
87 kfree(lopt);
88}
89
90EXPORT_SYMBOL(reqsk_queue_destroy);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4b1bb30e6381..9bed7569ce3f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -148,7 +148,7 @@ int rtnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
148{ 148{
149 int err = 0; 149 int err = 0;
150 150
151 NETLINK_CB(skb).dst_groups = group; 151 NETLINK_CB(skb).dst_group = group;
152 if (echo) 152 if (echo)
153 atomic_inc(&skb->users); 153 atomic_inc(&skb->users);
154 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 154 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
@@ -458,8 +458,8 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
458 kfree_skb(skb); 458 kfree_skb(skb);
459 return; 459 return;
460 } 460 }
461 NETLINK_CB(skb).dst_groups = RTMGRP_LINK; 461 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
462 netlink_broadcast(rtnl, skb, 0, RTMGRP_LINK, GFP_KERNEL); 462 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_KERNEL);
463} 463}
464 464
465static int rtnetlink_done(struct netlink_callback *cb) 465static int rtnetlink_done(struct netlink_callback *cb)
@@ -708,7 +708,8 @@ void __init rtnetlink_init(void)
708 if (!rta_buf) 708 if (!rta_buf)
709 panic("rtnetlink_init: cannot allocate rta_buf\n"); 709 panic("rtnetlink_init: cannot allocate rta_buf\n");
710 710
711 rtnl = netlink_kernel_create(NETLINK_ROUTE, rtnetlink_rcv); 711 rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv,
712 THIS_MODULE);
712 if (rtnl == NULL) 713 if (rtnl == NULL)
713 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 714 panic("rtnetlink_init: cannot initialize rtnetlink\n");
714 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); 715 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7eab867ede59..f80a28785610 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -68,7 +68,10 @@
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69#include <asm/system.h> 69#include <asm/system.h>
70 70
71static kmem_cache_t *skbuff_head_cache; 71static kmem_cache_t *skbuff_head_cache __read_mostly;
72static kmem_cache_t *skbuff_fclone_cache __read_mostly;
73
74struct timeval __read_mostly skb_tv_base;
72 75
73/* 76/*
74 * Keep out-of-line to prevent kernel bloat. 77 * Keep out-of-line to prevent kernel bloat.
@@ -118,7 +121,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
118 */ 121 */
119 122
120/** 123/**
121 * alloc_skb - allocate a network buffer 124 * __alloc_skb - allocate a network buffer
122 * @size: size to allocate 125 * @size: size to allocate
123 * @gfp_mask: allocation mask 126 * @gfp_mask: allocation mask
124 * 127 *
@@ -129,14 +132,20 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
129 * Buffers may only be allocated from interrupts using a @gfp_mask of 132 * Buffers may only be allocated from interrupts using a @gfp_mask of
130 * %GFP_ATOMIC. 133 * %GFP_ATOMIC.
131 */ 134 */
132struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) 135struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask,
136 int fclone)
133{ 137{
134 struct sk_buff *skb; 138 struct sk_buff *skb;
135 u8 *data; 139 u8 *data;
136 140
137 /* Get the HEAD */ 141 /* Get the HEAD */
138 skb = kmem_cache_alloc(skbuff_head_cache, 142 if (fclone)
139 gfp_mask & ~__GFP_DMA); 143 skb = kmem_cache_alloc(skbuff_fclone_cache,
144 gfp_mask & ~__GFP_DMA);
145 else
146 skb = kmem_cache_alloc(skbuff_head_cache,
147 gfp_mask & ~__GFP_DMA);
148
140 if (!skb) 149 if (!skb)
141 goto out; 150 goto out;
142 151
@@ -153,7 +162,15 @@ struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask)
153 skb->data = data; 162 skb->data = data;
154 skb->tail = data; 163 skb->tail = data;
155 skb->end = data + size; 164 skb->end = data + size;
165 if (fclone) {
166 struct sk_buff *child = skb + 1;
167 atomic_t *fclone_ref = (atomic_t *) (child + 1);
156 168
169 skb->fclone = SKB_FCLONE_ORIG;
170 atomic_set(fclone_ref, 1);
171
172 child->fclone = SKB_FCLONE_UNAVAILABLE;
173 }
157 atomic_set(&(skb_shinfo(skb)->dataref), 1); 174 atomic_set(&(skb_shinfo(skb)->dataref), 1);
158 skb_shinfo(skb)->nr_frags = 0; 175 skb_shinfo(skb)->nr_frags = 0;
159 skb_shinfo(skb)->tso_size = 0; 176 skb_shinfo(skb)->tso_size = 0;
@@ -266,8 +283,34 @@ void skb_release_data(struct sk_buff *skb)
266 */ 283 */
267void kfree_skbmem(struct sk_buff *skb) 284void kfree_skbmem(struct sk_buff *skb)
268{ 285{
286 struct sk_buff *other;
287 atomic_t *fclone_ref;
288
269 skb_release_data(skb); 289 skb_release_data(skb);
270 kmem_cache_free(skbuff_head_cache, skb); 290 switch (skb->fclone) {
291 case SKB_FCLONE_UNAVAILABLE:
292 kmem_cache_free(skbuff_head_cache, skb);
293 break;
294
295 case SKB_FCLONE_ORIG:
296 fclone_ref = (atomic_t *) (skb + 2);
297 if (atomic_dec_and_test(fclone_ref))
298 kmem_cache_free(skbuff_fclone_cache, skb);
299 break;
300
301 case SKB_FCLONE_CLONE:
302 fclone_ref = (atomic_t *) (skb + 1);
303 other = skb - 1;
304
305 /* The clone portion is available for
306 * fast-cloning again.
307 */
308 skb->fclone = SKB_FCLONE_UNAVAILABLE;
309
310 if (atomic_dec_and_test(fclone_ref))
311 kmem_cache_free(skbuff_fclone_cache, other);
312 break;
313 };
271} 314}
272 315
273/** 316/**
@@ -281,8 +324,6 @@ void kfree_skbmem(struct sk_buff *skb)
281 324
282void __kfree_skb(struct sk_buff *skb) 325void __kfree_skb(struct sk_buff *skb)
283{ 326{
284 BUG_ON(skb->list != NULL);
285
286 dst_release(skb->dst); 327 dst_release(skb->dst);
287#ifdef CONFIG_XFRM 328#ifdef CONFIG_XFRM
288 secpath_put(skb->sp); 329 secpath_put(skb->sp);
@@ -302,7 +343,6 @@ void __kfree_skb(struct sk_buff *skb)
302 skb->tc_index = 0; 343 skb->tc_index = 0;
303#ifdef CONFIG_NET_CLS_ACT 344#ifdef CONFIG_NET_CLS_ACT
304 skb->tc_verd = 0; 345 skb->tc_verd = 0;
305 skb->tc_classid = 0;
306#endif 346#endif
307#endif 347#endif
308 348
@@ -325,19 +365,27 @@ void __kfree_skb(struct sk_buff *skb)
325 365
326struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) 366struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
327{ 367{
328 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 368 struct sk_buff *n;
329 369
330 if (!n) 370 n = skb + 1;
331 return NULL; 371 if (skb->fclone == SKB_FCLONE_ORIG &&
372 n->fclone == SKB_FCLONE_UNAVAILABLE) {
373 atomic_t *fclone_ref = (atomic_t *) (n + 1);
374 n->fclone = SKB_FCLONE_CLONE;
375 atomic_inc(fclone_ref);
376 } else {
377 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
378 if (!n)
379 return NULL;
380 n->fclone = SKB_FCLONE_UNAVAILABLE;
381 }
332 382
333#define C(x) n->x = skb->x 383#define C(x) n->x = skb->x
334 384
335 n->next = n->prev = NULL; 385 n->next = n->prev = NULL;
336 n->list = NULL;
337 n->sk = NULL; 386 n->sk = NULL;
338 C(stamp); 387 C(tstamp);
339 C(dev); 388 C(dev);
340 C(real_dev);
341 C(h); 389 C(h);
342 C(nh); 390 C(nh);
343 C(mac); 391 C(mac);
@@ -361,7 +409,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
361 n->destructor = NULL; 409 n->destructor = NULL;
362#ifdef CONFIG_NETFILTER 410#ifdef CONFIG_NETFILTER
363 C(nfmark); 411 C(nfmark);
364 C(nfcache);
365 C(nfct); 412 C(nfct);
366 nf_conntrack_get(skb->nfct); 413 nf_conntrack_get(skb->nfct);
367 C(nfctinfo); 414 C(nfctinfo);
@@ -370,9 +417,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
370 nf_bridge_get(skb->nf_bridge); 417 nf_bridge_get(skb->nf_bridge);
371#endif 418#endif
372#endif /*CONFIG_NETFILTER*/ 419#endif /*CONFIG_NETFILTER*/
373#if defined(CONFIG_HIPPI)
374 C(private);
375#endif
376#ifdef CONFIG_NET_SCHED 420#ifdef CONFIG_NET_SCHED
377 C(tc_index); 421 C(tc_index);
378#ifdef CONFIG_NET_CLS_ACT 422#ifdef CONFIG_NET_CLS_ACT
@@ -380,7 +424,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
380 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 424 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
381 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 425 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
382 C(input_dev); 426 C(input_dev);
383 C(tc_classid);
384#endif 427#endif
385 428
386#endif 429#endif
@@ -404,10 +447,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
404 */ 447 */
405 unsigned long offset = new->data - old->data; 448 unsigned long offset = new->data - old->data;
406 449
407 new->list = NULL;
408 new->sk = NULL; 450 new->sk = NULL;
409 new->dev = old->dev; 451 new->dev = old->dev;
410 new->real_dev = old->real_dev;
411 new->priority = old->priority; 452 new->priority = old->priority;
412 new->protocol = old->protocol; 453 new->protocol = old->protocol;
413 new->dst = dst_clone(old->dst); 454 new->dst = dst_clone(old->dst);
@@ -419,12 +460,12 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
419 new->mac.raw = old->mac.raw + offset; 460 new->mac.raw = old->mac.raw + offset;
420 memcpy(new->cb, old->cb, sizeof(old->cb)); 461 memcpy(new->cb, old->cb, sizeof(old->cb));
421 new->local_df = old->local_df; 462 new->local_df = old->local_df;
463 new->fclone = SKB_FCLONE_UNAVAILABLE;
422 new->pkt_type = old->pkt_type; 464 new->pkt_type = old->pkt_type;
423 new->stamp = old->stamp; 465 new->tstamp = old->tstamp;
424 new->destructor = NULL; 466 new->destructor = NULL;
425#ifdef CONFIG_NETFILTER 467#ifdef CONFIG_NETFILTER
426 new->nfmark = old->nfmark; 468 new->nfmark = old->nfmark;
427 new->nfcache = old->nfcache;
428 new->nfct = old->nfct; 469 new->nfct = old->nfct;
429 nf_conntrack_get(old->nfct); 470 nf_conntrack_get(old->nfct);
430 new->nfctinfo = old->nfctinfo; 471 new->nfctinfo = old->nfctinfo;
@@ -1344,50 +1385,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1344 __skb_queue_tail(list, newsk); 1385 __skb_queue_tail(list, newsk);
1345 spin_unlock_irqrestore(&list->lock, flags); 1386 spin_unlock_irqrestore(&list->lock, flags);
1346} 1387}
1388
1347/** 1389/**
1348 * skb_unlink - remove a buffer from a list 1390 * skb_unlink - remove a buffer from a list
1349 * @skb: buffer to remove 1391 * @skb: buffer to remove
1392 * @list: list to use
1350 * 1393 *
1351 * Place a packet after a given packet in a list. The list locks are taken 1394 * Remove a packet from a list. The list locks are taken and this
1352 * and this function is atomic with respect to other list locked calls 1395 * function is atomic with respect to other list locked calls
1353 * 1396 *
1354 * Works even without knowing the list it is sitting on, which can be 1397 * You must know what list the SKB is on.
1355 * handy at times. It also means that THE LIST MUST EXIST when you
1356 * unlink. Thus a list must have its contents unlinked before it is
1357 * destroyed.
1358 */ 1398 */
1359void skb_unlink(struct sk_buff *skb) 1399void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1360{ 1400{
1361 struct sk_buff_head *list = skb->list; 1401 unsigned long flags;
1362
1363 if (list) {
1364 unsigned long flags;
1365 1402
1366 spin_lock_irqsave(&list->lock, flags); 1403 spin_lock_irqsave(&list->lock, flags);
1367 if (skb->list == list) 1404 __skb_unlink(skb, list);
1368 __skb_unlink(skb, skb->list); 1405 spin_unlock_irqrestore(&list->lock, flags);
1369 spin_unlock_irqrestore(&list->lock, flags);
1370 }
1371} 1406}
1372 1407
1373
1374/** 1408/**
1375 * skb_append - append a buffer 1409 * skb_append - append a buffer
1376 * @old: buffer to insert after 1410 * @old: buffer to insert after
1377 * @newsk: buffer to insert 1411 * @newsk: buffer to insert
1412 * @list: list to use
1378 * 1413 *
1379 * Place a packet after a given packet in a list. The list locks are taken 1414 * Place a packet after a given packet in a list. The list locks are taken
1380 * and this function is atomic with respect to other list locked calls. 1415 * and this function is atomic with respect to other list locked calls.
1381 * A buffer cannot be placed on two lists at the same time. 1416 * A buffer cannot be placed on two lists at the same time.
1382 */ 1417 */
1383 1418void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1384void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1385{ 1419{
1386 unsigned long flags; 1420 unsigned long flags;
1387 1421
1388 spin_lock_irqsave(&old->list->lock, flags); 1422 spin_lock_irqsave(&list->lock, flags);
1389 __skb_append(old, newsk); 1423 __skb_append(old, newsk, list);
1390 spin_unlock_irqrestore(&old->list->lock, flags); 1424 spin_unlock_irqrestore(&list->lock, flags);
1391} 1425}
1392 1426
1393 1427
@@ -1395,19 +1429,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1395 * skb_insert - insert a buffer 1429 * skb_insert - insert a buffer
1396 * @old: buffer to insert before 1430 * @old: buffer to insert before
1397 * @newsk: buffer to insert 1431 * @newsk: buffer to insert
1432 * @list: list to use
1433 *
1434 * Place a packet before a given packet in a list. The list locks are
1435 * taken and this function is atomic with respect to other list locked
1436 * calls.
1398 * 1437 *
1399 * Place a packet before a given packet in a list. The list locks are taken
1400 * and this function is atomic with respect to other list locked calls
1401 * A buffer cannot be placed on two lists at the same time. 1438 * A buffer cannot be placed on two lists at the same time.
1402 */ 1439 */
1403 1440void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1404void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
1405{ 1441{
1406 unsigned long flags; 1442 unsigned long flags;
1407 1443
1408 spin_lock_irqsave(&old->list->lock, flags); 1444 spin_lock_irqsave(&list->lock, flags);
1409 __skb_insert(newsk, old->prev, old, old->list); 1445 __skb_insert(newsk, old->prev, old, list);
1410 spin_unlock_irqrestore(&old->list->lock, flags); 1446 spin_unlock_irqrestore(&list->lock, flags);
1411} 1447}
1412 1448
1413#if 0 1449#if 0
@@ -1663,12 +1699,23 @@ void __init skb_init(void)
1663 NULL, NULL); 1699 NULL, NULL);
1664 if (!skbuff_head_cache) 1700 if (!skbuff_head_cache)
1665 panic("cannot create skbuff cache"); 1701 panic("cannot create skbuff cache");
1702
1703 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
1704 (2*sizeof(struct sk_buff)) +
1705 sizeof(atomic_t),
1706 0,
1707 SLAB_HWCACHE_ALIGN,
1708 NULL, NULL);
1709 if (!skbuff_fclone_cache)
1710 panic("cannot create skbuff cache");
1711
1712 do_gettimeofday(&skb_tv_base);
1666} 1713}
1667 1714
1668EXPORT_SYMBOL(___pskb_trim); 1715EXPORT_SYMBOL(___pskb_trim);
1669EXPORT_SYMBOL(__kfree_skb); 1716EXPORT_SYMBOL(__kfree_skb);
1670EXPORT_SYMBOL(__pskb_pull_tail); 1717EXPORT_SYMBOL(__pskb_pull_tail);
1671EXPORT_SYMBOL(alloc_skb); 1718EXPORT_SYMBOL(__alloc_skb);
1672EXPORT_SYMBOL(pskb_copy); 1719EXPORT_SYMBOL(pskb_copy);
1673EXPORT_SYMBOL(pskb_expand_head); 1720EXPORT_SYMBOL(pskb_expand_head);
1674EXPORT_SYMBOL(skb_checksum); 1721EXPORT_SYMBOL(skb_checksum);
@@ -1696,3 +1743,4 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
1696EXPORT_SYMBOL(skb_seq_read); 1743EXPORT_SYMBOL(skb_seq_read);
1697EXPORT_SYMBOL(skb_abort_seq_read); 1744EXPORT_SYMBOL(skb_abort_seq_read);
1698EXPORT_SYMBOL(skb_find_text); 1745EXPORT_SYMBOL(skb_find_text);
1746EXPORT_SYMBOL(skb_tv_base);
diff --git a/net/core/sock.c b/net/core/sock.c
index 12f6d9a2a522..ccd10fd65682 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -260,7 +260,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
260 260
261 if (val > sysctl_wmem_max) 261 if (val > sysctl_wmem_max)
262 val = sysctl_wmem_max; 262 val = sysctl_wmem_max;
263 263set_sndbuf:
264 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 264 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
265 if ((val * 2) < SOCK_MIN_SNDBUF) 265 if ((val * 2) < SOCK_MIN_SNDBUF)
266 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 266 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
@@ -274,6 +274,13 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
274 sk->sk_write_space(sk); 274 sk->sk_write_space(sk);
275 break; 275 break;
276 276
277 case SO_SNDBUFFORCE:
278 if (!capable(CAP_NET_ADMIN)) {
279 ret = -EPERM;
280 break;
281 }
282 goto set_sndbuf;
283
277 case SO_RCVBUF: 284 case SO_RCVBUF:
278 /* Don't error on this BSD doesn't and if you think 285 /* Don't error on this BSD doesn't and if you think
279 about it this is right. Otherwise apps have to 286 about it this is right. Otherwise apps have to
@@ -282,7 +289,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
282 289
283 if (val > sysctl_rmem_max) 290 if (val > sysctl_rmem_max)
284 val = sysctl_rmem_max; 291 val = sysctl_rmem_max;
285 292set_rcvbuf:
286 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 293 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
287 /* FIXME: is this lower bound the right one? */ 294 /* FIXME: is this lower bound the right one? */
288 if ((val * 2) < SOCK_MIN_RCVBUF) 295 if ((val * 2) < SOCK_MIN_RCVBUF)
@@ -291,6 +298,13 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
291 sk->sk_rcvbuf = val * 2; 298 sk->sk_rcvbuf = val * 2;
292 break; 299 break;
293 300
301 case SO_RCVBUFFORCE:
302 if (!capable(CAP_NET_ADMIN)) {
303 ret = -EPERM;
304 break;
305 }
306 goto set_rcvbuf;
307
294 case SO_KEEPALIVE: 308 case SO_KEEPALIVE:
295#ifdef CONFIG_INET 309#ifdef CONFIG_INET
296 if (sk->sk_protocol == IPPROTO_TCP) 310 if (sk->sk_protocol == IPPROTO_TCP)
@@ -686,6 +700,80 @@ void sk_free(struct sock *sk)
686 module_put(owner); 700 module_put(owner);
687} 701}
688 702
703struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority)
704{
705 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
706
707 if (newsk != NULL) {
708 struct sk_filter *filter;
709
710 memcpy(newsk, sk, sk->sk_prot->obj_size);
711
712 /* SANITY */
713 sk_node_init(&newsk->sk_node);
714 sock_lock_init(newsk);
715 bh_lock_sock(newsk);
716
717 atomic_set(&newsk->sk_rmem_alloc, 0);
718 atomic_set(&newsk->sk_wmem_alloc, 0);
719 atomic_set(&newsk->sk_omem_alloc, 0);
720 skb_queue_head_init(&newsk->sk_receive_queue);
721 skb_queue_head_init(&newsk->sk_write_queue);
722
723 rwlock_init(&newsk->sk_dst_lock);
724 rwlock_init(&newsk->sk_callback_lock);
725
726 newsk->sk_dst_cache = NULL;
727 newsk->sk_wmem_queued = 0;
728 newsk->sk_forward_alloc = 0;
729 newsk->sk_send_head = NULL;
730 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
731 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
732
733 sock_reset_flag(newsk, SOCK_DONE);
734 skb_queue_head_init(&newsk->sk_error_queue);
735
736 filter = newsk->sk_filter;
737 if (filter != NULL)
738 sk_filter_charge(newsk, filter);
739
740 if (unlikely(xfrm_sk_clone_policy(newsk))) {
741 /* It is still raw copy of parent, so invalidate
742 * destructor and make plain sk_free() */
743 newsk->sk_destruct = NULL;
744 sk_free(newsk);
745 newsk = NULL;
746 goto out;
747 }
748
749 newsk->sk_err = 0;
750 newsk->sk_priority = 0;
751 atomic_set(&newsk->sk_refcnt, 2);
752
753 /*
754 * Increment the counter in the same struct proto as the master
755 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
756 * is the same as sk->sk_prot->socks, as this field was copied
757 * with memcpy).
758 *
759 * This _changes_ the previous behaviour, where
760 * tcp_create_openreq_child always was incrementing the
761 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
762 * to be taken into account in all callers. -acme
763 */
764 sk_refcnt_debug_inc(newsk);
765 newsk->sk_socket = NULL;
766 newsk->sk_sleep = NULL;
767
768 if (newsk->sk_prot->sockets_allocated)
769 atomic_inc(newsk->sk_prot->sockets_allocated);
770 }
771out:
772 return newsk;
773}
774
775EXPORT_SYMBOL_GPL(sk_clone);
776
689void __init sk_init(void) 777void __init sk_init(void)
690{ 778{
691 if (num_physpages <= 4096) { 779 if (num_physpages <= 4096) {
@@ -1353,11 +1441,7 @@ void sk_common_release(struct sock *sk)
1353 1441
1354 xfrm_sk_free_policy(sk); 1442 xfrm_sk_free_policy(sk);
1355 1443
1356#ifdef INET_REFCNT_DEBUG 1444 sk_refcnt_debug_release(sk);
1357 if (atomic_read(&sk->sk_refcnt) != 1)
1358 printk(KERN_DEBUG "Destruction of the socket %p delayed, c=%d\n",
1359 sk, atomic_read(&sk->sk_refcnt));
1360#endif
1361 sock_put(sk); 1445 sock_put(sk);
1362} 1446}
1363 1447
@@ -1368,7 +1452,8 @@ static LIST_HEAD(proto_list);
1368 1452
1369int proto_register(struct proto *prot, int alloc_slab) 1453int proto_register(struct proto *prot, int alloc_slab)
1370{ 1454{
1371 char *request_sock_slab_name; 1455 char *request_sock_slab_name = NULL;
1456 char *timewait_sock_slab_name;
1372 int rc = -ENOBUFS; 1457 int rc = -ENOBUFS;
1373 1458
1374 if (alloc_slab) { 1459 if (alloc_slab) {
@@ -1399,6 +1484,23 @@ int proto_register(struct proto *prot, int alloc_slab)
1399 goto out_free_request_sock_slab_name; 1484 goto out_free_request_sock_slab_name;
1400 } 1485 }
1401 } 1486 }
1487
1488 if (prot->twsk_obj_size) {
1489 static const char mask[] = "tw_sock_%s";
1490
1491 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1492
1493 if (timewait_sock_slab_name == NULL)
1494 goto out_free_request_sock_slab;
1495
1496 sprintf(timewait_sock_slab_name, mask, prot->name);
1497 prot->twsk_slab = kmem_cache_create(timewait_sock_slab_name,
1498 prot->twsk_obj_size,
1499 0, SLAB_HWCACHE_ALIGN,
1500 NULL, NULL);
1501 if (prot->twsk_slab == NULL)
1502 goto out_free_timewait_sock_slab_name;
1503 }
1402 } 1504 }
1403 1505
1404 write_lock(&proto_list_lock); 1506 write_lock(&proto_list_lock);
@@ -1407,6 +1509,13 @@ int proto_register(struct proto *prot, int alloc_slab)
1407 rc = 0; 1509 rc = 0;
1408out: 1510out:
1409 return rc; 1511 return rc;
1512out_free_timewait_sock_slab_name:
1513 kfree(timewait_sock_slab_name);
1514out_free_request_sock_slab:
1515 if (prot->rsk_prot && prot->rsk_prot->slab) {
1516 kmem_cache_destroy(prot->rsk_prot->slab);
1517 prot->rsk_prot->slab = NULL;
1518 }
1410out_free_request_sock_slab_name: 1519out_free_request_sock_slab_name:
1411 kfree(request_sock_slab_name); 1520 kfree(request_sock_slab_name);
1412out_free_sock_slab: 1521out_free_sock_slab:
@@ -1434,6 +1543,14 @@ void proto_unregister(struct proto *prot)
1434 prot->rsk_prot->slab = NULL; 1543 prot->rsk_prot->slab = NULL;
1435 } 1544 }
1436 1545
1546 if (prot->twsk_slab != NULL) {
1547 const char *name = kmem_cache_name(prot->twsk_slab);
1548
1549 kmem_cache_destroy(prot->twsk_slab);
1550 kfree(name);
1551 prot->twsk_slab = NULL;
1552 }
1553
1437 list_del(&prot->node); 1554 list_del(&prot->node);
1438 write_unlock(&proto_list_lock); 1555 write_unlock(&proto_list_lock);
1439} 1556}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 8f817ad9f546..2f278c8e4743 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -9,23 +9,18 @@
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/socket.h>
13#include <net/sock.h>
12 14
13#ifdef CONFIG_SYSCTL 15#ifdef CONFIG_SYSCTL
14 16
15extern int netdev_max_backlog; 17extern int netdev_max_backlog;
16extern int netdev_budget;
17extern int weight_p; 18extern int weight_p;
18extern int net_msg_cost;
19extern int net_msg_burst;
20 19
21extern __u32 sysctl_wmem_max; 20extern __u32 sysctl_wmem_max;
22extern __u32 sysctl_rmem_max; 21extern __u32 sysctl_rmem_max;
23extern __u32 sysctl_wmem_default;
24extern __u32 sysctl_rmem_default;
25 22
26extern int sysctl_core_destroy_delay; 23extern int sysctl_core_destroy_delay;
27extern int sysctl_optmem_max;
28extern int sysctl_somaxconn;
29 24
30#ifdef CONFIG_NET_DIVERT 25#ifdef CONFIG_NET_DIVERT
31extern char sysctl_divert_version[]; 26extern char sysctl_divert_version[];
diff --git a/net/core/utils.c b/net/core/utils.c
index 88eb8b68e26b..7b5970fc9e40 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -16,7 +16,9 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/jiffies.h> 17#include <linux/jiffies.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/inet.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/net.h>
20#include <linux/string.h> 22#include <linux/string.h>
21#include <linux/types.h> 23#include <linux/types.h>
22#include <linux/random.h> 24#include <linux/random.h>
diff --git a/net/core/wireless.c b/net/core/wireless.c
index 3ff5639c0b78..5caae2399f3a 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -571,10 +571,6 @@ static int wireless_seq_show(struct seq_file *seq, void *v)
571 return 0; 571 return 0;
572} 572}
573 573
574extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
575extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
576extern void dev_seq_stop(struct seq_file *seq, void *v);
577
578static struct seq_operations wireless_seq_ops = { 574static struct seq_operations wireless_seq_ops = {
579 .start = dev_seq_start, 575 .start = dev_seq_start,
580 .next = dev_seq_next, 576 .next = dev_seq_next,
@@ -1144,8 +1140,8 @@ static inline void rtmsg_iwinfo(struct net_device * dev,
1144 kfree_skb(skb); 1140 kfree_skb(skb);
1145 return; 1141 return;
1146 } 1142 }
1147 NETLINK_CB(skb).dst_groups = RTMGRP_LINK; 1143 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
1148 netlink_broadcast(rtnl, skb, 0, RTMGRP_LINK, GFP_ATOMIC); 1144 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC);
1149} 1145}
1150#endif /* WE_EVENT_NETLINK */ 1146#endif /* WE_EVENT_NETLINK */
1151 1147