aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-03 20:10:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-03 20:10:39 -0400
commitc42f1d4b523950c4af060f8fc0c7016755d8a3bc (patch)
tree8322c9c14b0c7055ce924d12c1150d2acb0ea95e /net
parent913a90416918a591e6d5ece036b795c58a08131d (diff)
parent5a8887d39e1ba5ee2d4ccb94b14d6f2dce5ddfca (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Transfer padding was wrong for full-speed USB in ASIX driver, fix from Ingo van Lil. 2) Propagate the negative packet offset fix into the PowerPC BPF JIT. From Jan Seiffert. 3) dl2k driver's private ioctls were letting unprivileged tasks make MII writes and other ugly bits like that. Fix from Jeff Mahoney. 4) Fix TX VLAN and RX packet drops in ucc_geth, from Joakim Tjernlund. 5) OOPS and network namespace fixes in IPVS from Hans Schillstrom and Julian Anastasov. 6) Fix races and sleeping in locked context bugs in drop_monitor, from Neil Horman. 7) Fix link status indication in smsc95xx driver, from Paolo Pisati. 8) Fix bridge netfilter OOPS, from Peter Huang. 9) L2TP sendmsg can return on error conditions with the socket lock held, oops. Fix from Sasha Levin. 10) udp_diag should return meaningful values for socket memory usage, from Shan Wei. 11) Eric Dumazet is so awesome he gets his own section: Socket memory cgroup code (I never should have applied those patches, grumble...) made erroneous changes to sk_sockets_allocated_read_positive(). It was changed to use percpu_counter_sum_positive (which requires BH disabling) instead of percpu_counter_read_positive (which does not). Revert back to avoid crashes and lockdep warnings. Adjust the default tcp_adv_win_scale and tcp_rmem[2] values to fix throughput regressions. This is necessary as a result of our more precise skb->truesize tracking. Fix SKB leak in netem packet scheduler. 12) New device IDs for various bluetooth devices, from Manoj Iyer, AceLan Kao, and Steven Harms. 13) Fix command completion race in ipw2200, from Stanislav Yakovlev. 14) Fix rtlwifi oops on unload, from Larry Finger. 15) Fix hard_mtu when adjusting hard_header_len in smsc95xx driver. From Stephane Fillod. 16) ehea driver registers it's IRQ before all the necessary state is setup, resulting in crashes. Fix from Thadeu Lima de Souza Cascardo. 17) Fix PHY connection failures in davinci_emac driver, from Anatolij Gustschin. 18) Missing break; in switch statement in bluetooth's hci_cmd_complete_evt(). Fix from Szymon Janc. 19) Fix queue programming in iwlwifi, from Johannes Berg. 20) Interrupt throttling defaults not being actually programmed into the hardware, fix from Jeff Kirsher and Ying Cai. 21) TLAN driver SKB encoding in descriptor busted on 64-bit, fix from Benjamin Poirier. 22) Fix blind status block RX producer pointer deref in TG3 driver, from Matt Carlson. 23) Promisc and multicast are busted on ehea, fixes from Thadeu Lima de Souza Cascardo. 24) Fix crashes in 6lowpan, from Alexander Smirnov. 25) tcp_complete_cwr() needs to be careful to not rewind the CWND to ssthresh if ssthresh has the "infinite" value. Fix from Yuchung Cheng. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (81 commits) sungem: Fix WakeOnLan tcp: change tcp_adv_win_scale and tcp_rmem[2] net: l2tp: unlock socket lock before returning from l2tp_ip_sendmsg drop_monitor: prevent init path from scheduling on the wrong cpu usbnet: fix failure handling in usbnet_probe usbnet: fix leak of transfer buffer of dev->interrupt ucc_geth: Add 16 bytes to max TX frame for VLANs net: ucc_geth, increase no. of HW RX descriptors netem: fix possible skb leak sky2: fix receive length error in mixed non-VLAN/VLAN traffic sky2: propogate rx hash when packet is copied net: fix two typos in skbuff.h cxgb3: Don't call cxgb_vlan_mode until q locks are initialized ixgbe: fix calling skb_put on nonlinear skb assertion bug ixgbe: Fix a memory leak in IEEE DCB igbvf: fix the bug when initializing the igbvf smsc75xx: enable mac to detect speed/duplex from phy smsc75xx: declare smsc75xx's MII as GMII capable smsc75xx: fix phy interrupt acknowledge smsc75xx: fix phy init reset loop ...
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_event.c3
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/core/drop_monitor.c88
-rw-r--r--net/ieee802154/6lowpan.c40
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/sched/sch_netem.c6
27 files changed, 246 insertions, 106 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 92a857e3786d..edfd61addcec 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1215,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1215 return NULL; 1215 return NULL;
1216} 1216}
1217 1217
1218static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1218static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1219 u8 key_type, u8 old_key_type) 1219 u8 key_type, u8 old_key_type)
1220{ 1220{
1221 /* Legacy key */ 1221 /* Legacy key */
1222 if (key_type < 0x03) 1222 if (key_type < 0x03)
1223 return 1; 1223 return true;
1224 1224
1225 /* Debug keys are insecure so don't store them persistently */ 1225 /* Debug keys are insecure so don't store them persistently */
1226 if (key_type == HCI_LK_DEBUG_COMBINATION) 1226 if (key_type == HCI_LK_DEBUG_COMBINATION)
1227 return 0; 1227 return false;
1228 1228
1229 /* Changed combination key and there's no previous one */ 1229 /* Changed combination key and there's no previous one */
1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1231 return 0; 1231 return false;
1232 1232
1233 /* Security mode 3 case */ 1233 /* Security mode 3 case */
1234 if (!conn) 1234 if (!conn)
1235 return 1; 1235 return true;
1236 1236
1237 /* Neither local nor remote side had no-bonding as requirement */ 1237 /* Neither local nor remote side had no-bonding as requirement */
1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1239 return 1; 1239 return true;
1240 1240
1241 /* Local side had dedicated bonding as requirement */ 1241 /* Local side had dedicated bonding as requirement */
1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1243 return 1; 1243 return true;
1244 1244
1245 /* Remote side had dedicated bonding as requirement */ 1245 /* Remote side had dedicated bonding as requirement */
1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1247 return 1; 1247 return true;
1248 1248
1249 /* If none of the above criteria match, then don't store the key 1249 /* If none of the above criteria match, then don't store the key
1250 * persistently */ 1250 * persistently */
1251 return 0; 1251 return false;
1252} 1252}
1253 1253
1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) 1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1285,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1286{ 1286{
1287 struct link_key *key, *old_key; 1287 struct link_key *key, *old_key;
1288 u8 old_key_type, persistent; 1288 u8 old_key_type;
1289 bool persistent;
1289 1290
1290 old_key = hci_find_link_key(hdev, bdaddr); 1291 old_key = hci_find_link_key(hdev, bdaddr);
1291 if (old_key) { 1292 if (old_key) {
@@ -1328,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1328 1329
1329 mgmt_new_link_key(hdev, key, persistent); 1330 mgmt_new_link_key(hdev, key, persistent);
1330 1331
1331 if (!persistent) { 1332 if (conn)
1332 list_del(&key->list); 1333 conn->flush_key = !persistent;
1333 kfree(key);
1334 }
1335 1334
1336 return 0; 1335 return 0;
1337} 1336}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b37531094c49..6c065254afc0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1901 } 1901 }
1902 1902
1903 if (ev->status == 0) { 1903 if (ev->status == 0) {
1904 if (conn->type == ACL_LINK && conn->flush_key)
1905 hci_remove_link_key(hdev, &conn->dst);
1904 hci_proto_disconn_cfm(conn, ev->reason); 1906 hci_proto_disconn_cfm(conn, ev->reason);
1905 hci_conn_del(conn); 1907 hci_conn_del(conn);
1906 } 1908 }
@@ -2311,6 +2313,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2311 2313
2312 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2314 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2313 hci_cc_user_passkey_neg_reply(hdev, skb); 2315 hci_cc_user_passkey_neg_reply(hdev, skb);
2316 break;
2314 2317
2315 case HCI_OP_LE_SET_SCAN_PARAM: 2318 case HCI_OP_LE_SET_SCAN_PARAM:
2316 hci_cc_le_set_scan_param(hdev, skb); 2319 hci_cc_le_set_scan_param(hdev, skb);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4ef275c69675..4bb03b111122 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2884,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2884 return 0; 2884 return 0;
2885} 2885}
2886 2886
2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent) 2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
2888{ 2888{
2889 struct mgmt_ev_new_link_key ev; 2889 struct mgmt_ev_new_link_key ev;
2890 2890
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f65344e711..a2098e3de500 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
47 kfree_skb(skb); 47 kfree_skb(skb);
48 } else { 48 } else {
49 skb_push(skb, ETH_HLEN); 49 skb_push(skb, ETH_HLEN);
50 br_drop_fake_rtable(skb);
50 dev_queue_xmit(skb); 51 dev_queue_xmit(skb);
51 } 52 }
52 53
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f3817133..d7f49b63ab0f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
156 rt->dst.dev = br->dev; 156 rt->dst.dev = br->dev;
157 rt->dst.path = &rt->dst; 157 rt->dst.path = &rt->dst;
158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); 158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
159 rt->dst.flags = DST_NOXFRM | DST_NOPEER; 159 rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
160 rt->dst.ops = &fake_dst_ops; 160 rt->dst.ops = &fake_dst_ops;
161} 161}
162 162
@@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
694 const struct net_device *out, 694 const struct net_device *out,
695 int (*okfn)(struct sk_buff *)) 695 int (*okfn)(struct sk_buff *))
696{ 696{
697 struct rtable *rt = skb_rtable(skb); 697 br_drop_fake_rtable(skb);
698
699 if (rt && rt == bridge_parent_rtable(in))
700 skb_dst_drop(skb);
701
702 return NF_ACCEPT; 698 return NF_ACCEPT;
703} 699}
704 700
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 5c3c81a609e5..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
42 * netlink alerts 42 * netlink alerts
43 */ 43 */
44static int trace_state = TRACE_OFF; 44static int trace_state = TRACE_OFF;
45static DEFINE_SPINLOCK(trace_state_lock); 45static DEFINE_MUTEX(trace_state_mutex);
46 46
47struct per_cpu_dm_data { 47struct per_cpu_dm_data {
48 struct work_struct dm_alert_work; 48 struct work_struct dm_alert_work;
49 struct sk_buff *skb; 49 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 50 atomic_t dm_hit_count;
51 struct timer_list send_timer; 51 struct timer_list send_timer;
52 int cpu;
52}; 53};
53 54
54struct dm_hw_stat_delta { 55struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79 size_t al; 80 size_t al;
80 struct net_dm_alert_msg *msg; 81 struct net_dm_alert_msg *msg;
81 struct nlattr *nla; 82 struct nlattr *nla;
83 struct sk_buff *skb;
84 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
82 85
83 al = sizeof(struct net_dm_alert_msg); 86 al = sizeof(struct net_dm_alert_msg);
84 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 87 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
85 al += sizeof(struct nlattr); 88 al += sizeof(struct nlattr);
86 89
87 data->skb = genlmsg_new(al, GFP_KERNEL); 90 skb = genlmsg_new(al, GFP_KERNEL);
88 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 91
89 0, NET_DM_CMD_ALERT); 92 if (skb) {
90 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); 93 genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
91 msg = nla_data(nla); 94 0, NET_DM_CMD_ALERT);
92 memset(msg, 0, al); 95 nla = nla_reserve(skb, NLA_UNSPEC,
93 atomic_set(&data->dm_hit_count, dm_hit_limit); 96 sizeof(struct net_dm_alert_msg));
97 msg = nla_data(nla);
98 memset(msg, 0, al);
99 } else
100 schedule_work_on(data->cpu, &data->dm_alert_work);
101
102 /*
103 * Don't need to lock this, since we are guaranteed to only
104 * run this on a single cpu at a time.
105 * Note also that we only update data->skb if the old and new skb
106 * pointers don't match. This ensures that we don't continually call
107 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
108 */
109 if (skb != oskb) {
110 rcu_assign_pointer(data->skb, skb);
111
112 synchronize_rcu();
113
114 atomic_set(&data->dm_hit_count, dm_hit_limit);
115 }
116
94} 117}
95 118
96static void send_dm_alert(struct work_struct *unused) 119static void send_dm_alert(struct work_struct *unused)
97{ 120{
98 struct sk_buff *skb; 121 struct sk_buff *skb;
99 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
123
124 WARN_ON_ONCE(data->cpu != smp_processor_id());
100 125
101 /* 126 /*
102 * Grab the skb we're about to send 127 * Grab the skb we're about to send
103 */ 128 */
104 skb = data->skb; 129 skb = rcu_dereference_protected(data->skb, 1);
105 130
106 /* 131 /*
107 * Replace it with a new one 132 * Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
111 /* 136 /*
112 * Ship it! 137 * Ship it!
113 */ 138 */
114 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 139 if (skb)
140 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
115 141
142 put_cpu_var(dm_cpu_data);
116} 143}
117 144
118/* 145/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
123 */ 150 */
124static void sched_send_work(unsigned long unused) 151static void sched_send_work(unsigned long unused)
125{ 152{
126 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 153 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
154
155 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
127 156
128 schedule_work(&data->dm_alert_work); 157 put_cpu_var(dm_cpu_data);
129} 158}
130 159
131static void trace_drop_common(struct sk_buff *skb, void *location) 160static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
134 struct nlmsghdr *nlh; 163 struct nlmsghdr *nlh;
135 struct nlattr *nla; 164 struct nlattr *nla;
136 int i; 165 int i;
137 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 166 struct sk_buff *dskb;
167 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
168
169
170 rcu_read_lock();
171 dskb = rcu_dereference(data->skb);
138 172
173 if (!dskb)
174 goto out;
139 175
140 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 176 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
141 /* 177 /*
@@ -144,7 +180,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
144 goto out; 180 goto out;
145 } 181 }
146 182
147 nlh = (struct nlmsghdr *)data->skb->data; 183 nlh = (struct nlmsghdr *)dskb->data;
148 nla = genlmsg_data(nlmsg_data(nlh)); 184 nla = genlmsg_data(nlmsg_data(nlh));
149 msg = nla_data(nla); 185 msg = nla_data(nla);
150 for (i = 0; i < msg->entries; i++) { 186 for (i = 0; i < msg->entries; i++) {
@@ -158,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
158 /* 194 /*
159 * We need to create a new entry 195 * We need to create a new entry
160 */ 196 */
161 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 197 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
162 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 198 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
163 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 199 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
164 msg->points[msg->entries].count = 1; 200 msg->points[msg->entries].count = 1;
@@ -170,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
170 } 206 }
171 207
172out: 208out:
209 rcu_read_unlock();
210 put_cpu_var(dm_cpu_data);
173 return; 211 return;
174} 212}
175 213
@@ -214,7 +252,7 @@ static int set_all_monitor_traces(int state)
214 struct dm_hw_stat_delta *new_stat = NULL; 252 struct dm_hw_stat_delta *new_stat = NULL;
215 struct dm_hw_stat_delta *temp; 253 struct dm_hw_stat_delta *temp;
216 254
217 spin_lock(&trace_state_lock); 255 mutex_lock(&trace_state_mutex);
218 256
219 if (state == trace_state) { 257 if (state == trace_state) {
220 rc = -EAGAIN; 258 rc = -EAGAIN;
@@ -253,7 +291,7 @@ static int set_all_monitor_traces(int state)
253 rc = -EINPROGRESS; 291 rc = -EINPROGRESS;
254 292
255out_unlock: 293out_unlock:
256 spin_unlock(&trace_state_lock); 294 mutex_unlock(&trace_state_mutex);
257 295
258 return rc; 296 return rc;
259} 297}
@@ -296,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
296 334
297 new_stat->dev = dev; 335 new_stat->dev = dev;
298 new_stat->last_rx = jiffies; 336 new_stat->last_rx = jiffies;
299 spin_lock(&trace_state_lock); 337 mutex_lock(&trace_state_mutex);
300 list_add_rcu(&new_stat->list, &hw_stats_list); 338 list_add_rcu(&new_stat->list, &hw_stats_list);
301 spin_unlock(&trace_state_lock); 339 mutex_unlock(&trace_state_mutex);
302 break; 340 break;
303 case NETDEV_UNREGISTER: 341 case NETDEV_UNREGISTER:
304 spin_lock(&trace_state_lock); 342 mutex_lock(&trace_state_mutex);
305 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 343 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
306 if (new_stat->dev == dev) { 344 if (new_stat->dev == dev) {
307 new_stat->dev = NULL; 345 new_stat->dev = NULL;
@@ -312,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
312 } 350 }
313 } 351 }
314 } 352 }
315 spin_unlock(&trace_state_lock); 353 mutex_unlock(&trace_state_mutex);
316 break; 354 break;
317 } 355 }
318out: 356out:
@@ -368,13 +406,15 @@ static int __init init_net_drop_monitor(void)
368 406
369 for_each_present_cpu(cpu) { 407 for_each_present_cpu(cpu) {
370 data = &per_cpu(dm_cpu_data, cpu); 408 data = &per_cpu(dm_cpu_data, cpu);
371 reset_per_cpu_data(data); 409 data->cpu = cpu;
372 INIT_WORK(&data->dm_alert_work, send_dm_alert); 410 INIT_WORK(&data->dm_alert_work, send_dm_alert);
373 init_timer(&data->send_timer); 411 init_timer(&data->send_timer);
374 data->send_timer.data = cpu; 412 data->send_timer.data = cpu;
375 data->send_timer.function = sched_send_work; 413 data->send_timer.function = sched_send_work;
414 reset_per_cpu_data(data);
376 } 415 }
377 416
417
378 goto out; 418 goto out;
379 419
380out_unreg: 420out_unreg:
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 368515885368..840821b90bcd 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1044,6 +1044,24 @@ static void lowpan_dev_free(struct net_device *dev)
1044 free_netdev(dev); 1044 free_netdev(dev);
1045} 1045}
1046 1046
1047static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
1048{
1049 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1050 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
1051}
1052
1053static u16 lowpan_get_pan_id(const struct net_device *dev)
1054{
1055 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1056 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
1057}
1058
1059static u16 lowpan_get_short_addr(const struct net_device *dev)
1060{
1061 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1062 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
1063}
1064
1047static struct header_ops lowpan_header_ops = { 1065static struct header_ops lowpan_header_ops = {
1048 .create = lowpan_header_create, 1066 .create = lowpan_header_create,
1049}; 1067};
@@ -1053,6 +1071,12 @@ static const struct net_device_ops lowpan_netdev_ops = {
1053 .ndo_set_mac_address = eth_mac_addr, 1071 .ndo_set_mac_address = eth_mac_addr,
1054}; 1072};
1055 1073
1074static struct ieee802154_mlme_ops lowpan_mlme = {
1075 .get_pan_id = lowpan_get_pan_id,
1076 .get_phy = lowpan_get_phy,
1077 .get_short_addr = lowpan_get_short_addr,
1078};
1079
1056static void lowpan_setup(struct net_device *dev) 1080static void lowpan_setup(struct net_device *dev)
1057{ 1081{
1058 pr_debug("(%s)\n", __func__); 1082 pr_debug("(%s)\n", __func__);
@@ -1070,6 +1094,7 @@ static void lowpan_setup(struct net_device *dev)
1070 1094
1071 dev->netdev_ops = &lowpan_netdev_ops; 1095 dev->netdev_ops = &lowpan_netdev_ops;
1072 dev->header_ops = &lowpan_header_ops; 1096 dev->header_ops = &lowpan_header_ops;
1097 dev->ml_priv = &lowpan_mlme;
1073 dev->destructor = lowpan_dev_free; 1098 dev->destructor = lowpan_dev_free;
1074} 1099}
1075 1100
@@ -1143,6 +1168,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1143 list_add_tail(&entry->list, &lowpan_devices); 1168 list_add_tail(&entry->list, &lowpan_devices);
1144 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); 1169 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1145 1170
1171 spin_lock_init(&flist_lock);
1172
1146 register_netdevice(dev); 1173 register_netdevice(dev);
1147 1174
1148 return 0; 1175 return 0;
@@ -1152,11 +1179,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1152{ 1179{
1153 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); 1180 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1154 struct net_device *real_dev = lowpan_dev->real_dev; 1181 struct net_device *real_dev = lowpan_dev->real_dev;
1155 struct lowpan_dev_record *entry; 1182 struct lowpan_dev_record *entry, *tmp;
1156 struct lowpan_dev_record *tmp; 1183 struct lowpan_fragment *frame, *tframe;
1157 1184
1158 ASSERT_RTNL(); 1185 ASSERT_RTNL();
1159 1186
1187 spin_lock(&flist_lock);
1188 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1189 del_timer(&frame->timer);
1190 list_del(&frame->list);
1191 dev_kfree_skb(frame->skb);
1192 kfree(frame);
1193 }
1194 spin_unlock(&flist_lock);
1195
1160 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1196 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1161 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { 1197 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1162 if (entry->ldev == dev) { 1198 if (entry->ldev == dev) {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8d25a1c557eb..8f8db724bfaf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
141 goto rtattr_failure; 141 goto rtattr_failure;
142 142
143 if (icsk == NULL) { 143 if (icsk == NULL) {
144 r->idiag_rqueue = r->idiag_wqueue = 0; 144 handler->idiag_get_info(sk, r, NULL);
145 goto out; 145 goto out;
146 } 146 }
147 147
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8bb6adeb62c0..1272a88c2a63 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3243,7 +3243,7 @@ void __init tcp_init(void)
3243{ 3243{
3244 struct sk_buff *skb = NULL; 3244 struct sk_buff *skb = NULL;
3245 unsigned long limit; 3245 unsigned long limit;
3246 int max_share, cnt; 3246 int max_rshare, max_wshare, cnt;
3247 unsigned int i; 3247 unsigned int i;
3248 unsigned long jiffy = jiffies; 3248 unsigned long jiffy = jiffies;
3249 3249
@@ -3303,15 +3303,16 @@ void __init tcp_init(void)
3303 tcp_init_mem(&init_net); 3303 tcp_init_mem(&init_net);
3304 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3304 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3305 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 3305 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3306 max_share = min(4UL*1024*1024, limit); 3306 max_wshare = min(4UL*1024*1024, limit);
3307 max_rshare = min(6UL*1024*1024, limit);
3307 3308
3308 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3309 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3309 sysctl_tcp_wmem[1] = 16*1024; 3310 sysctl_tcp_wmem[1] = 16*1024;
3310 sysctl_tcp_wmem[2] = max(64*1024, max_share); 3311 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3311 3312
3312 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3313 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3313 sysctl_tcp_rmem[1] = 87380; 3314 sysctl_tcp_rmem[1] = 87380;
3314 sysctl_tcp_rmem[2] = max(87380, max_share); 3315 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3315 3316
3316 pr_info("Hash tables configured (established %u bind %u)\n", 3317 pr_info("Hash tables configured (established %u bind %u)\n",
3317 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3318 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3ff364065376..257b61789eeb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -495,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
495 goto new_measure; 495 goto new_measure;
496 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 496 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
497 return; 497 return;
498 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 498 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
499 499
500new_measure: 500new_measure:
501 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 501 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2868,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2868 2868
2869 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2869 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2870 if (tp->undo_marker) { 2870 if (tp->undo_marker) {
2871 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2871 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2872 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2872 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2873 else /* PRR */ 2873 tp->snd_cwnd_stamp = tcp_time_stamp;
2874 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2875 /* PRR algorithm. */
2874 tp->snd_cwnd = tp->snd_ssthresh; 2876 tp->snd_cwnd = tp->snd_ssthresh;
2875 tp->snd_cwnd_stamp = tcp_time_stamp; 2877 tp->snd_cwnd_stamp = tcp_time_stamp;
2878 }
2876 } 2879 }
2877 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2880 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2878} 2881}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb6..a7f86a3cd502 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
146 return udp_dump_one(&udp_table, in_skb, nlh, req); 146 return udp_dump_one(&udp_table, in_skb, nlh, req);
147} 147}
148 148
149static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
150 void *info)
151{
152 r->idiag_rqueue = sk_rmem_alloc_get(sk);
153 r->idiag_wqueue = sk_wmem_alloc_get(sk);
154}
155
149static const struct inet_diag_handler udp_diag_handler = { 156static const struct inet_diag_handler udp_diag_handler = {
150 .dump = udp_diag_dump, 157 .dump = udp_diag_dump,
151 .dump_one = udp_diag_dump_one, 158 .dump_one = udp_diag_dump_one,
159 .idiag_get_info = udp_diag_get_info,
152 .idiag_type = IPPROTO_UDP, 160 .idiag_type = IPPROTO_UDP,
153}; 161};
154 162
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
167static const struct inet_diag_handler udplite_diag_handler = { 175static const struct inet_diag_handler udplite_diag_handler = {
168 .dump = udplite_diag_dump, 176 .dump = udplite_diag_dump,
169 .dump_one = udplite_diag_dump_one, 177 .dump_one = udplite_diag_dump_one,
178 .idiag_get_info = udp_diag_get_info,
170 .idiag_type = IPPROTO_UDPLITE, 179 .idiag_type = IPPROTO_UDPLITE,
171}; 180};
172 181
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 585d93ecee2d..6274f0be82b0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -442,8 +442,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
442 442
443 daddr = lip->l2tp_addr.s_addr; 443 daddr = lip->l2tp_addr.s_addr;
444 } else { 444 } else {
445 rc = -EDESTADDRREQ;
445 if (sk->sk_state != TCP_ESTABLISHED) 446 if (sk->sk_state != TCP_ESTABLISHED)
446 return -EDESTADDRREQ; 447 goto out;
447 448
448 daddr = inet->inet_daddr; 449 daddr = inet->inet_daddr;
449 connected = 1; 450 connected = 1;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d9798a307f20..db8fae51714c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1210,7 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1210 struct sk_buff *skb); 1210 struct sk_buff *skb);
1211void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1211void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1212void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1212void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1213void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata); 1213void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1214 1214
1215/* IBSS code */ 1215/* IBSS code */
1216void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1216void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 401c01f0731e..c20051b7ffcd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -486,6 +486,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
486 /* free all potentially still buffered bcast frames */ 486 /* free all potentially still buffered bcast frames */
487 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); 487 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
488 skb_queue_purge(&sdata->u.ap.ps_bc_buf); 488 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
489 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
490 ieee80211_mgd_stop(sdata);
489 } 491 }
490 492
491 if (going_down) 493 if (going_down)
@@ -644,8 +646,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
644 646
645 if (ieee80211_vif_is_mesh(&sdata->vif)) 647 if (ieee80211_vif_is_mesh(&sdata->vif))
646 mesh_rmc_free(sdata); 648 mesh_rmc_free(sdata);
647 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
648 ieee80211_mgd_teardown(sdata);
649 649
650 flushed = sta_info_flush(local, sdata); 650 flushed = sta_info_flush(local, sdata);
651 WARN_ON(flushed); 651 WARN_ON(flushed);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f76da5b3f5c5..20c680bfc3ae 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3497,7 +3497,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3497 return 0; 3497 return 0;
3498} 3498}
3499 3499
3500void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata) 3500void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
3501{ 3501{
3502 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3502 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3503 3503
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 782a60198df4..e76facc69e95 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1158,7 +1158,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1158 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1158 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1159 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1159 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1160 return TX_DROP; 1160 return TX_DROP;
1161 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1161 } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
1162 tx->sdata->control_port_protocol == tx->skb->protocol) {
1162 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1163 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1163 } 1164 }
1164 if (!tx->sta) 1165 if (!tx->sta)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816e7788..00bdb1d9d690 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1924,6 +1924,7 @@ protocol_fail:
1924control_fail: 1924control_fail:
1925 ip_vs_estimator_net_cleanup(net); 1925 ip_vs_estimator_net_cleanup(net);
1926estimator_fail: 1926estimator_fail:
1927 net->ipvs = NULL;
1927 return -ENOMEM; 1928 return -ENOMEM;
1928} 1929}
1929 1930
@@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
1936 ip_vs_control_net_cleanup(net); 1937 ip_vs_control_net_cleanup(net);
1937 ip_vs_estimator_net_cleanup(net); 1938 ip_vs_estimator_net_cleanup(net);
1938 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); 1939 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1940 net->ipvs = NULL;
1939} 1941}
1940 1942
1941static void __net_exit __ip_vs_dev_cleanup(struct net *net) 1943static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void)
1993 goto cleanup_dev; 1995 goto cleanup_dev;
1994 } 1996 }
1995 1997
1998 ret = ip_vs_register_nl_ioctl();
1999 if (ret < 0) {
2000 pr_err("can't register netlink/ioctl.\n");
2001 goto cleanup_hooks;
2002 }
2003
1996 pr_info("ipvs loaded.\n"); 2004 pr_info("ipvs loaded.\n");
1997 2005
1998 return ret; 2006 return ret;
1999 2007
2008cleanup_hooks:
2009 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2000cleanup_dev: 2010cleanup_dev:
2001 unregister_pernet_device(&ipvs_core_dev_ops); 2011 unregister_pernet_device(&ipvs_core_dev_ops);
2002cleanup_sub: 2012cleanup_sub:
@@ -2012,6 +2022,7 @@ exit:
2012 2022
2013static void __exit ip_vs_cleanup(void) 2023static void __exit ip_vs_cleanup(void)
2014{ 2024{
2025 ip_vs_unregister_nl_ioctl();
2015 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2026 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2016 unregister_pernet_device(&ipvs_core_dev_ops); 2027 unregister_pernet_device(&ipvs_core_dev_ops);
2017 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2028 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe189af61..f5589987fc80 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3680 return 0; 3680 return 0;
3681} 3681}
3682 3682
3683void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) 3683void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3684{ 3684{
3685 struct netns_ipvs *ipvs = net_ipvs(net); 3685 struct netns_ipvs *ipvs = net_ipvs(net);
3686 3686
@@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
3692#else 3692#else
3693 3693
3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } 3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
3695void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { } 3695void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
3696 3696
3697#endif 3697#endif
3698 3698
@@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3750 free_percpu(ipvs->tot_stats.cpustats); 3750 free_percpu(ipvs->tot_stats.cpustats);
3751} 3751}
3752 3752
3753int __init ip_vs_control_init(void) 3753int __init ip_vs_register_nl_ioctl(void)
3754{ 3754{
3755 int idx;
3756 int ret; 3755 int ret;
3757 3756
3758 EnterFunction(2);
3759
3760 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3761 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3762 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3763 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3764 }
3765
3766 smp_wmb(); /* Do we really need it now ? */
3767
3768 ret = nf_register_sockopt(&ip_vs_sockopts); 3757 ret = nf_register_sockopt(&ip_vs_sockopts);
3769 if (ret) { 3758 if (ret) {
3770 pr_err("cannot register sockopt.\n"); 3759 pr_err("cannot register sockopt.\n");
@@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void)
3776 pr_err("cannot register Generic Netlink interface.\n"); 3765 pr_err("cannot register Generic Netlink interface.\n");
3777 goto err_genl; 3766 goto err_genl;
3778 } 3767 }
3779
3780 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3781 if (ret < 0)
3782 goto err_notf;
3783
3784 LeaveFunction(2);
3785 return 0; 3768 return 0;
3786 3769
3787err_notf:
3788 ip_vs_genl_unregister();
3789err_genl: 3770err_genl:
3790 nf_unregister_sockopt(&ip_vs_sockopts); 3771 nf_unregister_sockopt(&ip_vs_sockopts);
3791err_sock: 3772err_sock:
3792 return ret; 3773 return ret;
3793} 3774}
3794 3775
3776void ip_vs_unregister_nl_ioctl(void)
3777{
3778 ip_vs_genl_unregister();
3779 nf_unregister_sockopt(&ip_vs_sockopts);
3780}
3781
3782int __init ip_vs_control_init(void)
3783{
3784 int idx;
3785 int ret;
3786
3787 EnterFunction(2);
3788
3789 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3790 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3791 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3792 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3793 }
3794
3795 smp_wmb(); /* Do we really need it now ? */
3796
3797 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3798 if (ret < 0)
3799 return ret;
3800
3801 LeaveFunction(2);
3802 return 0;
3803}
3804
3795 3805
3796void ip_vs_control_cleanup(void) 3806void ip_vs_control_cleanup(void)
3797{ 3807{
3798 EnterFunction(2); 3808 EnterFunction(2);
3799 unregister_netdevice_notifier(&ip_vs_dst_notifier); 3809 unregister_netdevice_notifier(&ip_vs_dst_notifier);
3800 ip_vs_genl_unregister();
3801 nf_unregister_sockopt(&ip_vs_sockopts);
3802 LeaveFunction(2); 3810 LeaveFunction(2);
3803} 3811}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74ee4f68..e39f693dd3e4 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
439 struct ip_vs_app *app; 439 struct ip_vs_app *app;
440 struct netns_ipvs *ipvs = net_ipvs(net); 440 struct netns_ipvs *ipvs = net_ipvs(net);
441 441
442 if (!ipvs)
443 return -ENOENT;
442 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); 444 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
443 if (!app) 445 if (!app)
444 return -ENOMEM; 446 return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283fd058..caa43704e55e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
551{ 551{
552 struct netns_ipvs *ipvs = net_ipvs(net); 552 struct netns_ipvs *ipvs = net_ipvs(net);
553 553
554 if (!ipvs)
555 return -ENOENT;
556
554 if (!net_eq(net, &init_net)) { 557 if (!net_eq(net, &init_net)) {
555 ipvs->lblc_ctl_table = kmemdup(vs_vars_table, 558 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
556 sizeof(vs_vars_table), 559 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f8cce7..548bf37aa29e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
745{ 745{
746 struct netns_ipvs *ipvs = net_ipvs(net); 746 struct netns_ipvs *ipvs = net_ipvs(net);
747 747
748 if (!ipvs)
749 return -ENOENT;
750
748 if (!net_eq(net, &init_net)) { 751 if (!net_eq(net, &init_net)) {
749 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, 752 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
750 sizeof(vs_vars_table), 753 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a8833250..ed835e67a07e 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
59 return 0; 59 return 0;
60} 60}
61 61
62#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
63 defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
64 defined(CONFIG_IP_VS_PROTO_ESP)
65/* 62/*
66 * register an ipvs protocols netns related data 63 * register an ipvs protocols netns related data
67 */ 64 */
@@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
81 ipvs->proto_data_table[hash] = pd; 78 ipvs->proto_data_table[hash] = pd;
82 atomic_set(&pd->appcnt, 0); /* Init app counter */ 79 atomic_set(&pd->appcnt, 0); /* Init app counter */
83 80
84 if (pp->init_netns != NULL) 81 if (pp->init_netns != NULL) {
85 pp->init_netns(net, pd); 82 int ret = pp->init_netns(net, pd);
83 if (ret) {
84 /* unlink an free proto data */
85 ipvs->proto_data_table[hash] = pd->next;
86 kfree(pd);
87 return ret;
88 }
89 }
86 90
87 return 0; 91 return 0;
88} 92}
89#endif
90 93
91/* 94/*
92 * unregister an ipvs protocol 95 * unregister an ipvs protocol
@@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
316 */ 319 */
317int __net_init ip_vs_protocol_net_init(struct net *net) 320int __net_init ip_vs_protocol_net_init(struct net *net)
318{ 321{
322 int i, ret;
323 static struct ip_vs_protocol *protos[] = {
319#ifdef CONFIG_IP_VS_PROTO_TCP 324#ifdef CONFIG_IP_VS_PROTO_TCP
320 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); 325 &ip_vs_protocol_tcp,
321#endif 326#endif
322#ifdef CONFIG_IP_VS_PROTO_UDP 327#ifdef CONFIG_IP_VS_PROTO_UDP
323 register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); 328 &ip_vs_protocol_udp,
324#endif 329#endif
325#ifdef CONFIG_IP_VS_PROTO_SCTP 330#ifdef CONFIG_IP_VS_PROTO_SCTP
326 register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); 331 &ip_vs_protocol_sctp,
327#endif 332#endif
328#ifdef CONFIG_IP_VS_PROTO_AH 333#ifdef CONFIG_IP_VS_PROTO_AH
329 register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); 334 &ip_vs_protocol_ah,
330#endif 335#endif
331#ifdef CONFIG_IP_VS_PROTO_ESP 336#ifdef CONFIG_IP_VS_PROTO_ESP
332 register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); 337 &ip_vs_protocol_esp,
333#endif 338#endif
339 };
340
341 for (i = 0; i < ARRAY_SIZE(protos); i++) {
342 ret = register_ip_vs_proto_netns(net, protos[i]);
343 if (ret < 0)
344 goto cleanup;
345 }
334 return 0; 346 return 0;
347
348cleanup:
349 ip_vs_protocol_net_cleanup(net);
350 return ret;
335} 351}
336 352
337void __net_exit ip_vs_protocol_net_cleanup(struct net *net) 353void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f5..9f3fb751c491 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
1090 * timeouts is netns related now. 1090 * timeouts is netns related now.
1091 * --------------------------------------------- 1091 * ---------------------------------------------
1092 */ 1092 */
1093static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) 1093static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1094{ 1094{
1095 struct netns_ipvs *ipvs = net_ipvs(net); 1095 struct netns_ipvs *ipvs = net_ipvs(net);
1096 1096
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1098 spin_lock_init(&ipvs->sctp_app_lock); 1098 spin_lock_init(&ipvs->sctp_app_lock);
1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, 1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
1100 sizeof(sctp_timeouts)); 1100 sizeof(sctp_timeouts));
1101 if (!pd->timeout_table)
1102 return -ENOMEM;
1103 return 0;
1101} 1104}
1102 1105
1103static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) 1106static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af83..cd609cc62721 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
677 * timeouts is netns related now. 677 * timeouts is netns related now.
678 * --------------------------------------------- 678 * ---------------------------------------------
679 */ 679 */
680static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) 680static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
681{ 681{
682 struct netns_ipvs *ipvs = net_ipvs(net); 682 struct netns_ipvs *ipvs = net_ipvs(net);
683 683
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
685 spin_lock_init(&ipvs->tcp_app_lock); 685 spin_lock_init(&ipvs->tcp_app_lock);
686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, 686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
687 sizeof(tcp_timeouts)); 687 sizeof(tcp_timeouts));
688 if (!pd->timeout_table)
689 return -ENOMEM;
688 pd->tcp_state_table = tcp_states; 690 pd->tcp_state_table = tcp_states;
691 return 0;
689} 692}
690 693
691static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) 694static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896bb..2fedb2dcb3d1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; 467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
468} 468}
469 469
470static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) 470static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
471{ 471{
472 struct netns_ipvs *ipvs = net_ipvs(net); 472 struct netns_ipvs *ipvs = net_ipvs(net);
473 473
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
475 spin_lock_init(&ipvs->udp_app_lock); 475 spin_lock_init(&ipvs->udp_app_lock);
476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, 476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
477 sizeof(udp_timeouts)); 477 sizeof(udp_timeouts));
478 if (!pd->timeout_table)
479 return -ENOMEM;
480 return 0;
478} 481}
479 482
480static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) 483static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e93fa58..3746d8b9a478 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -227,7 +227,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
227 } 227 }
228 228
229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
230 if (info->timeout) { 230 if (info->timeout[0]) {
231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
232 struct nf_conn_timeout *timeout_ext; 232 struct nf_conn_timeout *timeout_ext;
233 233
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548fa7ae9..ebd22966f748 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
410 (skb->ip_summed == CHECKSUM_PARTIAL && 410 (skb->ip_summed == CHECKSUM_PARTIAL &&
411 skb_checksum_help(skb))) { 411 skb_checksum_help(skb)))
412 sch->qstats.drops++; 412 return qdisc_drop(skb, sch);
413 return NET_XMIT_DROP;
414 }
415 413
416 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 414 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
417 } 415 }