aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-07 23:35:40 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-07 23:35:40 -0400
commit0d6c4a2e4641bbc556dd74d3aa158c413a972492 (patch)
treeda944af17682659bb433dc2282dcb48380c14cd1 /net
parent6e06c0e2347ec79d0bd5702b2438fe883f784545 (diff)
parent1c430a727fa512500a422ffe4712166c550ea06a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/intel/e1000e/param.c drivers/net/wireless/iwlwifi/iwl-agn-rx.c drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c drivers/net/wireless/iwlwifi/iwl-trans.h Resolved the iwlwifi conflict with mainline using 3-way diff posted by John Linville and Stephen Rothwell. In 'net' we added a bug fix to make iwlwifi report a more accurate skb->truesize but this conflicted with RX path changes that happened meanwhile in net-next. In e1000e a conflict arose in the validation code for settings of adapter->itr. 'net-next' had more sophisticated logic so that logic was used. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_event.c3
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/core/drop_monitor.c88
-rw-r--r--net/ieee802154/6lowpan.c40
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sunrpc/clnt.c50
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/sunrpc_syms.c17
30 files changed, 296 insertions, 126 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 92a857e3786d..edfd61addcec 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1215,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1215 return NULL; 1215 return NULL;
1216} 1216}
1217 1217
1218static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1218static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1219 u8 key_type, u8 old_key_type) 1219 u8 key_type, u8 old_key_type)
1220{ 1220{
1221 /* Legacy key */ 1221 /* Legacy key */
1222 if (key_type < 0x03) 1222 if (key_type < 0x03)
1223 return 1; 1223 return true;
1224 1224
1225 /* Debug keys are insecure so don't store them persistently */ 1225 /* Debug keys are insecure so don't store them persistently */
1226 if (key_type == HCI_LK_DEBUG_COMBINATION) 1226 if (key_type == HCI_LK_DEBUG_COMBINATION)
1227 return 0; 1227 return false;
1228 1228
1229 /* Changed combination key and there's no previous one */ 1229 /* Changed combination key and there's no previous one */
1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1231 return 0; 1231 return false;
1232 1232
1233 /* Security mode 3 case */ 1233 /* Security mode 3 case */
1234 if (!conn) 1234 if (!conn)
1235 return 1; 1235 return true;
1236 1236
1237 /* Neither local nor remote side had no-bonding as requirement */ 1237 /* Neither local nor remote side had no-bonding as requirement */
1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1239 return 1; 1239 return true;
1240 1240
1241 /* Local side had dedicated bonding as requirement */ 1241 /* Local side had dedicated bonding as requirement */
1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1243 return 1; 1243 return true;
1244 1244
1245 /* Remote side had dedicated bonding as requirement */ 1245 /* Remote side had dedicated bonding as requirement */
1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1247 return 1; 1247 return true;
1248 1248
1249 /* If none of the above criteria match, then don't store the key 1249 /* If none of the above criteria match, then don't store the key
1250 * persistently */ 1250 * persistently */
1251 return 0; 1251 return false;
1252} 1252}
1253 1253
1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) 1254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1285,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1286{ 1286{
1287 struct link_key *key, *old_key; 1287 struct link_key *key, *old_key;
1288 u8 old_key_type, persistent; 1288 u8 old_key_type;
1289 bool persistent;
1289 1290
1290 old_key = hci_find_link_key(hdev, bdaddr); 1291 old_key = hci_find_link_key(hdev, bdaddr);
1291 if (old_key) { 1292 if (old_key) {
@@ -1328,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1328 1329
1329 mgmt_new_link_key(hdev, key, persistent); 1330 mgmt_new_link_key(hdev, key, persistent);
1330 1331
1331 if (!persistent) { 1332 if (conn)
1332 list_del(&key->list); 1333 conn->flush_key = !persistent;
1333 kfree(key);
1334 }
1335 1334
1336 return 0; 1335 return 0;
1337} 1336}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b37531094c49..6c065254afc0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1901 } 1901 }
1902 1902
1903 if (ev->status == 0) { 1903 if (ev->status == 0) {
1904 if (conn->type == ACL_LINK && conn->flush_key)
1905 hci_remove_link_key(hdev, &conn->dst);
1904 hci_proto_disconn_cfm(conn, ev->reason); 1906 hci_proto_disconn_cfm(conn, ev->reason);
1905 hci_conn_del(conn); 1907 hci_conn_del(conn);
1906 } 1908 }
@@ -2311,6 +2313,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2311 2313
2312 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2314 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2313 hci_cc_user_passkey_neg_reply(hdev, skb); 2315 hci_cc_user_passkey_neg_reply(hdev, skb);
2316 break;
2314 2317
2315 case HCI_OP_LE_SET_SCAN_PARAM: 2318 case HCI_OP_LE_SET_SCAN_PARAM:
2316 hci_cc_le_set_scan_param(hdev, skb); 2319 hci_cc_le_set_scan_param(hdev, skb);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4ef275c69675..4bb03b111122 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2884,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2884 return 0; 2884 return 0;
2885} 2885}
2886 2886
2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent) 2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
2888{ 2888{
2889 struct mgmt_ev_new_link_key ev; 2889 struct mgmt_ev_new_link_key ev;
2890 2890
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index aab8470707c4..e9466d412707 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
47 kfree_skb(skb); 47 kfree_skb(skb);
48 } else { 48 } else {
49 skb_push(skb, ETH_HLEN); 49 skb_push(skb, ETH_HLEN);
50 br_drop_fake_rtable(skb);
50 dev_queue_xmit(skb); 51 dev_queue_xmit(skb);
51 } 52 }
52 53
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 9d4f09c3520f..53f083686ae4 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
156 rt->dst.dev = br->dev; 156 rt->dst.dev = br->dev;
157 rt->dst.path = &rt->dst; 157 rt->dst.path = &rt->dst;
158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); 158 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
159 rt->dst.flags = DST_NOXFRM | DST_NOPEER; 159 rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
160 rt->dst.ops = &fake_dst_ops; 160 rt->dst.ops = &fake_dst_ops;
161} 161}
162 162
@@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
694 const struct net_device *out, 694 const struct net_device *out,
695 int (*okfn)(struct sk_buff *)) 695 int (*okfn)(struct sk_buff *))
696{ 696{
697 struct rtable *rt = skb_rtable(skb); 697 br_drop_fake_rtable(skb);
698
699 if (rt && rt == bridge_parent_rtable(in))
700 skb_dst_drop(skb);
701
702 return NF_ACCEPT; 698 return NF_ACCEPT;
703} 699}
704 700
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 5c3c81a609e5..a7cad741df01 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
42 * netlink alerts 42 * netlink alerts
43 */ 43 */
44static int trace_state = TRACE_OFF; 44static int trace_state = TRACE_OFF;
45static DEFINE_SPINLOCK(trace_state_lock); 45static DEFINE_MUTEX(trace_state_mutex);
46 46
47struct per_cpu_dm_data { 47struct per_cpu_dm_data {
48 struct work_struct dm_alert_work; 48 struct work_struct dm_alert_work;
49 struct sk_buff *skb; 49 struct sk_buff __rcu *skb;
50 atomic_t dm_hit_count; 50 atomic_t dm_hit_count;
51 struct timer_list send_timer; 51 struct timer_list send_timer;
52 int cpu;
52}; 53};
53 54
54struct dm_hw_stat_delta { 55struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
79 size_t al; 80 size_t al;
80 struct net_dm_alert_msg *msg; 81 struct net_dm_alert_msg *msg;
81 struct nlattr *nla; 82 struct nlattr *nla;
83 struct sk_buff *skb;
84 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
82 85
83 al = sizeof(struct net_dm_alert_msg); 86 al = sizeof(struct net_dm_alert_msg);
84 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 87 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
85 al += sizeof(struct nlattr); 88 al += sizeof(struct nlattr);
86 89
87 data->skb = genlmsg_new(al, GFP_KERNEL); 90 skb = genlmsg_new(al, GFP_KERNEL);
88 genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, 91
89 0, NET_DM_CMD_ALERT); 92 if (skb) {
90 nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); 93 genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
91 msg = nla_data(nla); 94 0, NET_DM_CMD_ALERT);
92 memset(msg, 0, al); 95 nla = nla_reserve(skb, NLA_UNSPEC,
93 atomic_set(&data->dm_hit_count, dm_hit_limit); 96 sizeof(struct net_dm_alert_msg));
97 msg = nla_data(nla);
98 memset(msg, 0, al);
99 } else
100 schedule_work_on(data->cpu, &data->dm_alert_work);
101
102 /*
103 * Don't need to lock this, since we are guaranteed to only
104 * run this on a single cpu at a time.
105 * Note also that we only update data->skb if the old and new skb
106 * pointers don't match. This ensures that we don't continually call
107 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
108 */
109 if (skb != oskb) {
110 rcu_assign_pointer(data->skb, skb);
111
112 synchronize_rcu();
113
114 atomic_set(&data->dm_hit_count, dm_hit_limit);
115 }
116
94} 117}
95 118
96static void send_dm_alert(struct work_struct *unused) 119static void send_dm_alert(struct work_struct *unused)
97{ 120{
98 struct sk_buff *skb; 121 struct sk_buff *skb;
99 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 122 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
123
124 WARN_ON_ONCE(data->cpu != smp_processor_id());
100 125
101 /* 126 /*
102 * Grab the skb we're about to send 127 * Grab the skb we're about to send
103 */ 128 */
104 skb = data->skb; 129 skb = rcu_dereference_protected(data->skb, 1);
105 130
106 /* 131 /*
107 * Replace it with a new one 132 * Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
111 /* 136 /*
112 * Ship it! 137 * Ship it!
113 */ 138 */
114 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 139 if (skb)
140 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
115 141
142 put_cpu_var(dm_cpu_data);
116} 143}
117 144
118/* 145/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
123 */ 150 */
124static void sched_send_work(unsigned long unused) 151static void sched_send_work(unsigned long unused)
125{ 152{
126 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 153 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
154
155 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
127 156
128 schedule_work(&data->dm_alert_work); 157 put_cpu_var(dm_cpu_data);
129} 158}
130 159
131static void trace_drop_common(struct sk_buff *skb, void *location) 160static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
134 struct nlmsghdr *nlh; 163 struct nlmsghdr *nlh;
135 struct nlattr *nla; 164 struct nlattr *nla;
136 int i; 165 int i;
137 struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); 166 struct sk_buff *dskb;
167 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
168
169
170 rcu_read_lock();
171 dskb = rcu_dereference(data->skb);
138 172
173 if (!dskb)
174 goto out;
139 175
140 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 176 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
141 /* 177 /*
@@ -144,7 +180,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
144 goto out; 180 goto out;
145 } 181 }
146 182
147 nlh = (struct nlmsghdr *)data->skb->data; 183 nlh = (struct nlmsghdr *)dskb->data;
148 nla = genlmsg_data(nlmsg_data(nlh)); 184 nla = genlmsg_data(nlmsg_data(nlh));
149 msg = nla_data(nla); 185 msg = nla_data(nla);
150 for (i = 0; i < msg->entries; i++) { 186 for (i = 0; i < msg->entries; i++) {
@@ -158,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
158 /* 194 /*
159 * We need to create a new entry 195 * We need to create a new entry
160 */ 196 */
161 __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); 197 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
162 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 198 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
163 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 199 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
164 msg->points[msg->entries].count = 1; 200 msg->points[msg->entries].count = 1;
@@ -170,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
170 } 206 }
171 207
172out: 208out:
209 rcu_read_unlock();
210 put_cpu_var(dm_cpu_data);
173 return; 211 return;
174} 212}
175 213
@@ -214,7 +252,7 @@ static int set_all_monitor_traces(int state)
214 struct dm_hw_stat_delta *new_stat = NULL; 252 struct dm_hw_stat_delta *new_stat = NULL;
215 struct dm_hw_stat_delta *temp; 253 struct dm_hw_stat_delta *temp;
216 254
217 spin_lock(&trace_state_lock); 255 mutex_lock(&trace_state_mutex);
218 256
219 if (state == trace_state) { 257 if (state == trace_state) {
220 rc = -EAGAIN; 258 rc = -EAGAIN;
@@ -253,7 +291,7 @@ static int set_all_monitor_traces(int state)
253 rc = -EINPROGRESS; 291 rc = -EINPROGRESS;
254 292
255out_unlock: 293out_unlock:
256 spin_unlock(&trace_state_lock); 294 mutex_unlock(&trace_state_mutex);
257 295
258 return rc; 296 return rc;
259} 297}
@@ -296,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
296 334
297 new_stat->dev = dev; 335 new_stat->dev = dev;
298 new_stat->last_rx = jiffies; 336 new_stat->last_rx = jiffies;
299 spin_lock(&trace_state_lock); 337 mutex_lock(&trace_state_mutex);
300 list_add_rcu(&new_stat->list, &hw_stats_list); 338 list_add_rcu(&new_stat->list, &hw_stats_list);
301 spin_unlock(&trace_state_lock); 339 mutex_unlock(&trace_state_mutex);
302 break; 340 break;
303 case NETDEV_UNREGISTER: 341 case NETDEV_UNREGISTER:
304 spin_lock(&trace_state_lock); 342 mutex_lock(&trace_state_mutex);
305 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { 343 list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
306 if (new_stat->dev == dev) { 344 if (new_stat->dev == dev) {
307 new_stat->dev = NULL; 345 new_stat->dev = NULL;
@@ -312,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
312 } 350 }
313 } 351 }
314 } 352 }
315 spin_unlock(&trace_state_lock); 353 mutex_unlock(&trace_state_mutex);
316 break; 354 break;
317 } 355 }
318out: 356out:
@@ -368,13 +406,15 @@ static int __init init_net_drop_monitor(void)
368 406
369 for_each_present_cpu(cpu) { 407 for_each_present_cpu(cpu) {
370 data = &per_cpu(dm_cpu_data, cpu); 408 data = &per_cpu(dm_cpu_data, cpu);
371 reset_per_cpu_data(data); 409 data->cpu = cpu;
372 INIT_WORK(&data->dm_alert_work, send_dm_alert); 410 INIT_WORK(&data->dm_alert_work, send_dm_alert);
373 init_timer(&data->send_timer); 411 init_timer(&data->send_timer);
374 data->send_timer.data = cpu; 412 data->send_timer.data = cpu;
375 data->send_timer.function = sched_send_work; 413 data->send_timer.function = sched_send_work;
414 reset_per_cpu_data(data);
376 } 415 }
377 416
417
378 goto out; 418 goto out;
379 419
380out_unreg: 420out_unreg:
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 4b5701c8bf97..32eb4179e8fa 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1059,6 +1059,24 @@ static void lowpan_dev_free(struct net_device *dev)
1059 free_netdev(dev); 1059 free_netdev(dev);
1060} 1060}
1061 1061
1062static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
1063{
1064 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1065 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
1066}
1067
1068static u16 lowpan_get_pan_id(const struct net_device *dev)
1069{
1070 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1071 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
1072}
1073
1074static u16 lowpan_get_short_addr(const struct net_device *dev)
1075{
1076 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1077 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
1078}
1079
1062static struct header_ops lowpan_header_ops = { 1080static struct header_ops lowpan_header_ops = {
1063 .create = lowpan_header_create, 1081 .create = lowpan_header_create,
1064}; 1082};
@@ -1068,6 +1086,12 @@ static const struct net_device_ops lowpan_netdev_ops = {
1068 .ndo_set_mac_address = eth_mac_addr, 1086 .ndo_set_mac_address = eth_mac_addr,
1069}; 1087};
1070 1088
1089static struct ieee802154_mlme_ops lowpan_mlme = {
1090 .get_pan_id = lowpan_get_pan_id,
1091 .get_phy = lowpan_get_phy,
1092 .get_short_addr = lowpan_get_short_addr,
1093};
1094
1071static void lowpan_setup(struct net_device *dev) 1095static void lowpan_setup(struct net_device *dev)
1072{ 1096{
1073 pr_debug("(%s)\n", __func__); 1097 pr_debug("(%s)\n", __func__);
@@ -1085,6 +1109,7 @@ static void lowpan_setup(struct net_device *dev)
1085 1109
1086 dev->netdev_ops = &lowpan_netdev_ops; 1110 dev->netdev_ops = &lowpan_netdev_ops;
1087 dev->header_ops = &lowpan_header_ops; 1111 dev->header_ops = &lowpan_header_ops;
1112 dev->ml_priv = &lowpan_mlme;
1088 dev->destructor = lowpan_dev_free; 1113 dev->destructor = lowpan_dev_free;
1089} 1114}
1090 1115
@@ -1158,6 +1183,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1158 list_add_tail(&entry->list, &lowpan_devices); 1183 list_add_tail(&entry->list, &lowpan_devices);
1159 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); 1184 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
1160 1185
1186 spin_lock_init(&flist_lock);
1187
1161 register_netdevice(dev); 1188 register_netdevice(dev);
1162 1189
1163 return 0; 1190 return 0;
@@ -1167,11 +1194,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
1167{ 1194{
1168 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); 1195 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
1169 struct net_device *real_dev = lowpan_dev->real_dev; 1196 struct net_device *real_dev = lowpan_dev->real_dev;
1170 struct lowpan_dev_record *entry; 1197 struct lowpan_dev_record *entry, *tmp;
1171 struct lowpan_dev_record *tmp; 1198 struct lowpan_fragment *frame, *tframe;
1172 1199
1173 ASSERT_RTNL(); 1200 ASSERT_RTNL();
1174 1201
1202 spin_lock(&flist_lock);
1203 list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
1204 del_timer(&frame->timer);
1205 list_del(&frame->list);
1206 dev_kfree_skb(frame->skb);
1207 kfree(frame);
1208 }
1209 spin_unlock(&flist_lock);
1210
1175 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1211 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1176 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { 1212 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1177 if (entry->ldev == dev) { 1213 if (entry->ldev == dev) {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 9f24028a3ba7..46d1e7199a8c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
141 goto rtattr_failure; 141 goto rtattr_failure;
142 142
143 if (icsk == NULL) { 143 if (icsk == NULL) {
144 r->idiag_rqueue = r->idiag_wqueue = 0; 144 handler->idiag_get_info(sk, r, NULL);
145 goto out; 145 goto out;
146 } 146 }
147 147
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c2cff8b62772..565406287f6f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3515,7 +3515,7 @@ void __init tcp_init(void)
3515{ 3515{
3516 struct sk_buff *skb = NULL; 3516 struct sk_buff *skb = NULL;
3517 unsigned long limit; 3517 unsigned long limit;
3518 int max_share, cnt; 3518 int max_rshare, max_wshare, cnt;
3519 unsigned int i; 3519 unsigned int i;
3520 unsigned long jiffy = jiffies; 3520 unsigned long jiffy = jiffies;
3521 3521
@@ -3575,15 +3575,16 @@ void __init tcp_init(void)
3575 tcp_init_mem(&init_net); 3575 tcp_init_mem(&init_net);
3576 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3576 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3577 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 3577 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3578 max_share = min(4UL*1024*1024, limit); 3578 max_wshare = min(4UL*1024*1024, limit);
3579 max_rshare = min(6UL*1024*1024, limit);
3579 3580
3580 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3581 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3581 sysctl_tcp_wmem[1] = 16*1024; 3582 sysctl_tcp_wmem[1] = 16*1024;
3582 sysctl_tcp_wmem[2] = max(64*1024, max_share); 3583 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3583 3584
3584 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3585 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3585 sysctl_tcp_rmem[1] = 87380; 3586 sysctl_tcp_rmem[1] = 87380;
3586 sysctl_tcp_rmem[2] = max(87380, max_share); 3587 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3587 3588
3588 pr_info("Hash tables configured (established %u bind %u)\n", 3589 pr_info("Hash tables configured (established %u bind %u)\n",
3589 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3590 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7b2d351f24db..eb58b94301ec 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -496,7 +496,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
496 goto new_measure; 496 goto new_measure;
497 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 497 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
498 return; 498 return;
499 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 499 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
500 500
501new_measure: 501new_measure:
502 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 502 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2904,11 +2904,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2904 2904
2905 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2905 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2906 if (tp->undo_marker) { 2906 if (tp->undo_marker) {
2907 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2907 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2908 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2908 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2909 else /* PRR */ 2909 tp->snd_cwnd_stamp = tcp_time_stamp;
2910 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2911 /* PRR algorithm. */
2910 tp->snd_cwnd = tp->snd_ssthresh; 2912 tp->snd_cwnd = tp->snd_ssthresh;
2911 tp->snd_cwnd_stamp = tcp_time_stamp; 2913 tp->snd_cwnd_stamp = tcp_time_stamp;
2914 }
2912 } 2915 }
2913 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2916 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2914} 2917}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb6..a7f86a3cd502 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
146 return udp_dump_one(&udp_table, in_skb, nlh, req); 146 return udp_dump_one(&udp_table, in_skb, nlh, req);
147} 147}
148 148
149static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
150 void *info)
151{
152 r->idiag_rqueue = sk_rmem_alloc_get(sk);
153 r->idiag_wqueue = sk_wmem_alloc_get(sk);
154}
155
149static const struct inet_diag_handler udp_diag_handler = { 156static const struct inet_diag_handler udp_diag_handler = {
150 .dump = udp_diag_dump, 157 .dump = udp_diag_dump,
151 .dump_one = udp_diag_dump_one, 158 .dump_one = udp_diag_dump_one,
159 .idiag_get_info = udp_diag_get_info,
152 .idiag_type = IPPROTO_UDP, 160 .idiag_type = IPPROTO_UDP,
153}; 161};
154 162
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
167static const struct inet_diag_handler udplite_diag_handler = { 175static const struct inet_diag_handler udplite_diag_handler = {
168 .dump = udplite_diag_dump, 176 .dump = udplite_diag_dump,
169 .dump_one = udplite_diag_dump_one, 177 .dump_one = udplite_diag_dump_one,
178 .idiag_get_info = udp_diag_get_info,
170 .idiag_type = IPPROTO_UDPLITE, 179 .idiag_type = IPPROTO_UDPLITE,
171}; 180};
172 181
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index adefe5225dca..c89a32fb5d5e 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -393,8 +393,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
393 393
394 daddr = lip->l2tp_addr.s_addr; 394 daddr = lip->l2tp_addr.s_addr;
395 } else { 395 } else {
396 rc = -EDESTADDRREQ;
396 if (sk->sk_state != TCP_ESTABLISHED) 397 if (sk->sk_state != TCP_ESTABLISHED)
397 return -EDESTADDRREQ; 398 goto out;
398 399
399 daddr = inet->inet_daddr; 400 daddr = inet->inet_daddr;
400 connected = 1; 401 connected = 1;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 851fb7dc893c..6cd89d414f22 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1234,7 +1234,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1234 struct sk_buff *skb); 1234 struct sk_buff *skb);
1235void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1235void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1236void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1236void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1237void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata); 1237void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1238 1238
1239/* IBSS code */ 1239/* IBSS code */
1240void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1240void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ba86978dd561..3e05a8bfddf0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -606,6 +606,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
606 /* free all potentially still buffered bcast frames */ 606 /* free all potentially still buffered bcast frames */
607 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); 607 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
608 skb_queue_purge(&sdata->u.ap.ps_bc_buf); 608 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
609 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
610 ieee80211_mgd_stop(sdata);
609 } 611 }
610 612
611 if (going_down) 613 if (going_down)
@@ -768,8 +770,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
768 770
769 if (ieee80211_vif_is_mesh(&sdata->vif)) 771 if (ieee80211_vif_is_mesh(&sdata->vif))
770 mesh_rmc_free(sdata); 772 mesh_rmc_free(sdata);
771 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
772 ieee80211_mgd_teardown(sdata);
773 773
774 flushed = sta_info_flush(local, sdata); 774 flushed = sta_info_flush(local, sdata);
775 WARN_ON(flushed); 775 WARN_ON(flushed);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index c8836fa7d627..03f93f958fa4 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3523,7 +3523,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3523 return 0; 3523 return 0;
3524} 3524}
3525 3525
3526void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata) 3526void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
3527{ 3527{
3528 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3528 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3529 3529
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 44001c7e0e58..d67d36f57d78 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1159,7 +1159,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1159 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1159 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1160 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1160 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1161 return TX_DROP; 1161 return TX_DROP;
1162 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1162 } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
1163 tx->sdata->control_port_protocol == tx->skb->protocol) {
1163 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1164 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1164 } 1165 }
1165 if (!tx->sta) 1166 if (!tx->sta)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b5a5c7363f83..c8f36b96f44f 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1924,6 +1924,7 @@ protocol_fail:
1924control_fail: 1924control_fail:
1925 ip_vs_estimator_net_cleanup(net); 1925 ip_vs_estimator_net_cleanup(net);
1926estimator_fail: 1926estimator_fail:
1927 net->ipvs = NULL;
1927 return -ENOMEM; 1928 return -ENOMEM;
1928} 1929}
1929 1930
@@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
1936 ip_vs_control_net_cleanup(net); 1937 ip_vs_control_net_cleanup(net);
1937 ip_vs_estimator_net_cleanup(net); 1938 ip_vs_estimator_net_cleanup(net);
1938 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); 1939 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1940 net->ipvs = NULL;
1939} 1941}
1940 1942
1941static void __net_exit __ip_vs_dev_cleanup(struct net *net) 1943static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void)
1993 goto cleanup_dev; 1995 goto cleanup_dev;
1994 } 1996 }
1995 1997
1998 ret = ip_vs_register_nl_ioctl();
1999 if (ret < 0) {
2000 pr_err("can't register netlink/ioctl.\n");
2001 goto cleanup_hooks;
2002 }
2003
1996 pr_info("ipvs loaded.\n"); 2004 pr_info("ipvs loaded.\n");
1997 2005
1998 return ret; 2006 return ret;
1999 2007
2008cleanup_hooks:
2009 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2000cleanup_dev: 2010cleanup_dev:
2001 unregister_pernet_device(&ipvs_core_dev_ops); 2011 unregister_pernet_device(&ipvs_core_dev_ops);
2002cleanup_sub: 2012cleanup_sub:
@@ -2012,6 +2022,7 @@ exit:
2012 2022
2013static void __exit ip_vs_cleanup(void) 2023static void __exit ip_vs_cleanup(void)
2014{ 2024{
2025 ip_vs_unregister_nl_ioctl();
2015 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2026 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2016 unregister_pernet_device(&ipvs_core_dev_ops); 2027 unregister_pernet_device(&ipvs_core_dev_ops);
2017 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2028 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e3707d2bd9c7..37b91996bfba 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3680 return 0; 3680 return 0;
3681} 3681}
3682 3682
3683void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) 3683void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3684{ 3684{
3685 struct netns_ipvs *ipvs = net_ipvs(net); 3685 struct netns_ipvs *ipvs = net_ipvs(net);
3686 3686
@@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
3692#else 3692#else
3693 3693
3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } 3694int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
3695void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { } 3695void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
3696 3696
3697#endif 3697#endif
3698 3698
@@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3750 free_percpu(ipvs->tot_stats.cpustats); 3750 free_percpu(ipvs->tot_stats.cpustats);
3751} 3751}
3752 3752
3753int __init ip_vs_control_init(void) 3753int __init ip_vs_register_nl_ioctl(void)
3754{ 3754{
3755 int idx;
3756 int ret; 3755 int ret;
3757 3756
3758 EnterFunction(2);
3759
3760 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3761 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3762 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3763 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3764 }
3765
3766 smp_wmb(); /* Do we really need it now ? */
3767
3768 ret = nf_register_sockopt(&ip_vs_sockopts); 3757 ret = nf_register_sockopt(&ip_vs_sockopts);
3769 if (ret) { 3758 if (ret) {
3770 pr_err("cannot register sockopt.\n"); 3759 pr_err("cannot register sockopt.\n");
@@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void)
3776 pr_err("cannot register Generic Netlink interface.\n"); 3765 pr_err("cannot register Generic Netlink interface.\n");
3777 goto err_genl; 3766 goto err_genl;
3778 } 3767 }
3779
3780 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3781 if (ret < 0)
3782 goto err_notf;
3783
3784 LeaveFunction(2);
3785 return 0; 3768 return 0;
3786 3769
3787err_notf:
3788 ip_vs_genl_unregister();
3789err_genl: 3770err_genl:
3790 nf_unregister_sockopt(&ip_vs_sockopts); 3771 nf_unregister_sockopt(&ip_vs_sockopts);
3791err_sock: 3772err_sock:
3792 return ret; 3773 return ret;
3793} 3774}
3794 3775
3776void ip_vs_unregister_nl_ioctl(void)
3777{
3778 ip_vs_genl_unregister();
3779 nf_unregister_sockopt(&ip_vs_sockopts);
3780}
3781
3782int __init ip_vs_control_init(void)
3783{
3784 int idx;
3785 int ret;
3786
3787 EnterFunction(2);
3788
3789 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
3790 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3791 INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
3792 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3793 }
3794
3795 smp_wmb(); /* Do we really need it now ? */
3796
3797 ret = register_netdevice_notifier(&ip_vs_dst_notifier);
3798 if (ret < 0)
3799 return ret;
3800
3801 LeaveFunction(2);
3802 return 0;
3803}
3804
3795 3805
3796void ip_vs_control_cleanup(void) 3806void ip_vs_control_cleanup(void)
3797{ 3807{
3798 EnterFunction(2); 3808 EnterFunction(2);
3799 unregister_netdevice_notifier(&ip_vs_dst_notifier); 3809 unregister_netdevice_notifier(&ip_vs_dst_notifier);
3800 ip_vs_genl_unregister();
3801 nf_unregister_sockopt(&ip_vs_sockopts);
3802 LeaveFunction(2); 3810 LeaveFunction(2);
3803} 3811}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index debb8c71422c..510f2b5a5855 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
439 struct ip_vs_app *app; 439 struct ip_vs_app *app;
440 struct netns_ipvs *ipvs = net_ipvs(net); 440 struct netns_ipvs *ipvs = net_ipvs(net);
441 441
442 if (!ipvs)
443 return -ENOENT;
442 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); 444 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
443 if (!app) 445 if (!app)
444 return -ENOMEM; 446 return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 1024466de124..9b0de9a0e08e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
551{ 551{
552 struct netns_ipvs *ipvs = net_ipvs(net); 552 struct netns_ipvs *ipvs = net_ipvs(net);
553 553
554 if (!ipvs)
555 return -ENOENT;
556
554 if (!net_eq(net, &init_net)) { 557 if (!net_eq(net, &init_net)) {
555 ipvs->lblc_ctl_table = kmemdup(vs_vars_table, 558 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
556 sizeof(vs_vars_table), 559 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 9261825a6579..9dcd39a48897 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
745{ 745{
746 struct netns_ipvs *ipvs = net_ipvs(net); 746 struct netns_ipvs *ipvs = net_ipvs(net);
747 747
748 if (!ipvs)
749 return -ENOENT;
750
748 if (!net_eq(net, &init_net)) { 751 if (!net_eq(net, &init_net)) {
749 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, 752 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
750 sizeof(vs_vars_table), 753 sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 6eda11de8c05..fdc82ad9cc0e 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
59 return 0; 59 return 0;
60} 60}
61 61
62#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
63 defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
64 defined(CONFIG_IP_VS_PROTO_ESP)
65/* 62/*
66 * register an ipvs protocols netns related data 63 * register an ipvs protocols netns related data
67 */ 64 */
@@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
81 ipvs->proto_data_table[hash] = pd; 78 ipvs->proto_data_table[hash] = pd;
82 atomic_set(&pd->appcnt, 0); /* Init app counter */ 79 atomic_set(&pd->appcnt, 0); /* Init app counter */
83 80
84 if (pp->init_netns != NULL) 81 if (pp->init_netns != NULL) {
85 pp->init_netns(net, pd); 82 int ret = pp->init_netns(net, pd);
83 if (ret) {
84 /* unlink an free proto data */
85 ipvs->proto_data_table[hash] = pd->next;
86 kfree(pd);
87 return ret;
88 }
89 }
86 90
87 return 0; 91 return 0;
88} 92}
89#endif
90 93
91/* 94/*
92 * unregister an ipvs protocol 95 * unregister an ipvs protocol
@@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
316 */ 319 */
317int __net_init ip_vs_protocol_net_init(struct net *net) 320int __net_init ip_vs_protocol_net_init(struct net *net)
318{ 321{
322 int i, ret;
323 static struct ip_vs_protocol *protos[] = {
319#ifdef CONFIG_IP_VS_PROTO_TCP 324#ifdef CONFIG_IP_VS_PROTO_TCP
320 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); 325 &ip_vs_protocol_tcp,
321#endif 326#endif
322#ifdef CONFIG_IP_VS_PROTO_UDP 327#ifdef CONFIG_IP_VS_PROTO_UDP
323 register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); 328 &ip_vs_protocol_udp,
324#endif 329#endif
325#ifdef CONFIG_IP_VS_PROTO_SCTP 330#ifdef CONFIG_IP_VS_PROTO_SCTP
326 register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); 331 &ip_vs_protocol_sctp,
327#endif 332#endif
328#ifdef CONFIG_IP_VS_PROTO_AH 333#ifdef CONFIG_IP_VS_PROTO_AH
329 register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); 334 &ip_vs_protocol_ah,
330#endif 335#endif
331#ifdef CONFIG_IP_VS_PROTO_ESP 336#ifdef CONFIG_IP_VS_PROTO_ESP
332 register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); 337 &ip_vs_protocol_esp,
333#endif 338#endif
339 };
340
341 for (i = 0; i < ARRAY_SIZE(protos); i++) {
342 ret = register_ip_vs_proto_netns(net, protos[i]);
343 if (ret < 0)
344 goto cleanup;
345 }
334 return 0; 346 return 0;
347
348cleanup:
349 ip_vs_protocol_net_cleanup(net);
350 return ret;
335} 351}
336 352
337void __net_exit ip_vs_protocol_net_cleanup(struct net *net) 353void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f5..9f3fb751c491 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
1090 * timeouts is netns related now. 1090 * timeouts is netns related now.
1091 * --------------------------------------------- 1091 * ---------------------------------------------
1092 */ 1092 */
1093static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) 1093static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1094{ 1094{
1095 struct netns_ipvs *ipvs = net_ipvs(net); 1095 struct netns_ipvs *ipvs = net_ipvs(net);
1096 1096
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1098 spin_lock_init(&ipvs->sctp_app_lock); 1098 spin_lock_init(&ipvs->sctp_app_lock);
1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, 1099 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
1100 sizeof(sctp_timeouts)); 1100 sizeof(sctp_timeouts));
1101 if (!pd->timeout_table)
1102 return -ENOMEM;
1103 return 0;
1101} 1104}
1102 1105
1103static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) 1106static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af83..cd609cc62721 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
677 * timeouts is netns related now. 677 * timeouts is netns related now.
678 * --------------------------------------------- 678 * ---------------------------------------------
679 */ 679 */
680static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) 680static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
681{ 681{
682 struct netns_ipvs *ipvs = net_ipvs(net); 682 struct netns_ipvs *ipvs = net_ipvs(net);
683 683
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
685 spin_lock_init(&ipvs->tcp_app_lock); 685 spin_lock_init(&ipvs->tcp_app_lock);
686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, 686 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
687 sizeof(tcp_timeouts)); 687 sizeof(tcp_timeouts));
688 if (!pd->timeout_table)
689 return -ENOMEM;
688 pd->tcp_state_table = tcp_states; 690 pd->tcp_state_table = tcp_states;
691 return 0;
689} 692}
690 693
691static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) 694static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896bb..2fedb2dcb3d1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; 467 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
468} 468}
469 469
470static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) 470static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
471{ 471{
472 struct netns_ipvs *ipvs = net_ipvs(net); 472 struct netns_ipvs *ipvs = net_ipvs(net);
473 473
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
475 spin_lock_init(&ipvs->udp_app_lock); 475 spin_lock_init(&ipvs->udp_app_lock);
476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, 476 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
477 sizeof(udp_timeouts)); 477 sizeof(udp_timeouts));
478 if (!pd->timeout_table)
479 return -ENOMEM;
480 return 0;
478} 481}
479 482
480static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) 483static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e93fa58..3746d8b9a478 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -227,7 +227,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
227 } 227 }
228 228
229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
230 if (info->timeout) { 230 if (info->timeout[0]) {
231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
232 struct nf_conn_timeout *timeout_ext; 232 struct nf_conn_timeout *timeout_ext;
233 233
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 231cd11aa6e2..a2a95aabf9c2 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -413,10 +413,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
413 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 413 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
414 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 414 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
415 (skb->ip_summed == CHECKSUM_PARTIAL && 415 (skb->ip_summed == CHECKSUM_PARTIAL &&
416 skb_checksum_help(skb))) { 416 skb_checksum_help(skb)))
417 sch->qstats.drops++; 417 return qdisc_drop(skb, sch);
418 return NET_XMIT_DROP;
419 }
420 418
421 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 419 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
422 } 420 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 67972462a543..adf2990acebf 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -176,16 +176,22 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
176 return 0; 176 return 0;
177} 177}
178 178
179static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, 179static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
180 struct super_block *sb) 180{
181 if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
182 ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
183 return 1;
184 return 0;
185}
186
187static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
188 struct super_block *sb)
181{ 189{
182 struct dentry *dentry; 190 struct dentry *dentry;
183 int err = 0; 191 int err = 0;
184 192
185 switch (event) { 193 switch (event) {
186 case RPC_PIPEFS_MOUNT: 194 case RPC_PIPEFS_MOUNT:
187 if (clnt->cl_program->pipe_dir_name == NULL)
188 break;
189 dentry = rpc_setup_pipedir_sb(sb, clnt, 195 dentry = rpc_setup_pipedir_sb(sb, clnt,
190 clnt->cl_program->pipe_dir_name); 196 clnt->cl_program->pipe_dir_name);
191 BUG_ON(dentry == NULL); 197 BUG_ON(dentry == NULL);
@@ -208,6 +214,20 @@ static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
208 return err; 214 return err;
209} 215}
210 216
217static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
218 struct super_block *sb)
219{
220 int error = 0;
221
222 for (;; clnt = clnt->cl_parent) {
223 if (!rpc_clnt_skip_event(clnt, event))
224 error = __rpc_clnt_handle_event(clnt, event, sb);
225 if (error || clnt == clnt->cl_parent)
226 break;
227 }
228 return error;
229}
230
211static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) 231static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
212{ 232{
213 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 233 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
@@ -215,10 +235,12 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
215 235
216 spin_lock(&sn->rpc_client_lock); 236 spin_lock(&sn->rpc_client_lock);
217 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 237 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
218 if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || 238 if (clnt->cl_program->pipe_dir_name == NULL)
219 ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) 239 break;
240 if (rpc_clnt_skip_event(clnt, event))
241 continue;
242 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
220 continue; 243 continue;
221 atomic_inc(&clnt->cl_count);
222 spin_unlock(&sn->rpc_client_lock); 244 spin_unlock(&sn->rpc_client_lock);
223 return clnt; 245 return clnt;
224 } 246 }
@@ -257,6 +279,14 @@ void rpc_clients_notifier_unregister(void)
257 return rpc_pipefs_notifier_unregister(&rpc_clients_block); 279 return rpc_pipefs_notifier_unregister(&rpc_clients_block);
258} 280}
259 281
282static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
283{
284 clnt->cl_nodelen = strlen(nodename);
285 if (clnt->cl_nodelen > UNX_MAXNODENAME)
286 clnt->cl_nodelen = UNX_MAXNODENAME;
287 memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
288}
289
260static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 290static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
261{ 291{
262 const struct rpc_program *program = args->program; 292 const struct rpc_program *program = args->program;
@@ -337,10 +367,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
337 } 367 }
338 368
339 /* save the nodename */ 369 /* save the nodename */
340 clnt->cl_nodelen = strlen(init_utsname()->nodename); 370 rpc_clnt_set_nodename(clnt, utsname()->nodename);
341 if (clnt->cl_nodelen > UNX_MAXNODENAME)
342 clnt->cl_nodelen = UNX_MAXNODENAME;
343 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
344 rpc_register_client(clnt); 371 rpc_register_client(clnt);
345 return clnt; 372 return clnt;
346 373
@@ -499,6 +526,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
499 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 526 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
500 if (err != 0) 527 if (err != 0)
501 goto out_no_path; 528 goto out_no_path;
529 rpc_clnt_set_nodename(new, utsname()->nodename);
502 if (new->cl_auth) 530 if (new->cl_auth)
503 atomic_inc(&new->cl_auth->au_count); 531 atomic_inc(&new->cl_auth->au_count);
504 atomic_inc(&clnt->cl_count); 532 atomic_inc(&clnt->cl_count);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0af37fc46818..3b62cf288031 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1126,19 +1126,20 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1126 return -ENOMEM; 1126 return -ENOMEM;
1127 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, 1127 dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net,
1128 NET_NAME(net)); 1128 NET_NAME(net));
1129 sn->pipefs_sb = sb;
1129 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1130 err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1130 RPC_PIPEFS_MOUNT, 1131 RPC_PIPEFS_MOUNT,
1131 sb); 1132 sb);
1132 if (err) 1133 if (err)
1133 goto err_depopulate; 1134 goto err_depopulate;
1134 sb->s_fs_info = get_net(net); 1135 sb->s_fs_info = get_net(net);
1135 sn->pipefs_sb = sb;
1136 return 0; 1136 return 0;
1137 1137
1138err_depopulate: 1138err_depopulate:
1139 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1139 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1140 RPC_PIPEFS_UMOUNT, 1140 RPC_PIPEFS_UMOUNT,
1141 sb); 1141 sb);
1142 sn->pipefs_sb = NULL;
1142 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); 1143 __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
1143 return err; 1144 return err;
1144} 1145}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8adfc88e793a..3d6498af9adc 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -75,20 +75,21 @@ static struct pernet_operations sunrpc_net_ops = {
75static int __init 75static int __init
76init_sunrpc(void) 76init_sunrpc(void)
77{ 77{
78 int err = register_rpc_pipefs(); 78 int err = rpc_init_mempool();
79 if (err) 79 if (err)
80 goto out; 80 goto out;
81 err = rpc_init_mempool();
82 if (err)
83 goto out2;
84 err = rpcauth_init_module(); 81 err = rpcauth_init_module();
85 if (err) 82 if (err)
86 goto out3; 83 goto out2;
87 84
88 cache_initialize(); 85 cache_initialize();
89 86
90 err = register_pernet_subsys(&sunrpc_net_ops); 87 err = register_pernet_subsys(&sunrpc_net_ops);
91 if (err) 88 if (err)
89 goto out3;
90
91 err = register_rpc_pipefs();
92 if (err)
92 goto out4; 93 goto out4;
93#ifdef RPC_DEBUG 94#ifdef RPC_DEBUG
94 rpc_register_sysctl(); 95 rpc_register_sysctl();
@@ -98,11 +99,11 @@ init_sunrpc(void)
98 return 0; 99 return 0;
99 100
100out4: 101out4:
101 rpcauth_remove_module(); 102 unregister_pernet_subsys(&sunrpc_net_ops);
102out3: 103out3:
103 rpc_destroy_mempool(); 104 rpcauth_remove_module();
104out2: 105out2:
105 unregister_rpc_pipefs(); 106 rpc_destroy_mempool();
106out: 107out:
107 return err; 108 return err;
108} 109}