diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 11:39:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 11:39:24 -0400 |
commit | 53ee7569ce8beb3fd3fc0817116c29298d72353f (patch) | |
tree | f3dcce10508c2126347e40b468fd6d3c3cc7006a /net | |
parent | 4d9dec4db2efbd7edb549bd02373995b67496983 (diff) | |
parent | 1b6e2ceb4745b5838cb94463131d19dbea6cf0e3 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits)
bnx2x: allow device properly initialize after hotplug
bnx2x: fix DMAE timeout according to hw specifications
bnx2x: properly handle CFC DEL in cnic flow
bnx2x: call dev_kfree_skb_any instead of dev_kfree_skb
net: filter: move forward declarations to avoid compile warnings
pktgen: refactor pg_init() code
pktgen: use vzalloc_node() instead of vmalloc_node() + memset()
net: skb_trim explicitely check the linearity instead of data_len
ipv4: Give backtrace in ip_rt_bug().
net: avoid synchronize_rcu() in dev_deactivate_many
net: remove synchronize_net() from netdev_set_master()
rtnetlink: ignore NETDEV_RELEASE and NETDEV_JOIN event
net: rename NETDEV_BONDING_DESLAVE to NETDEV_RELEASE
bridge: call NETDEV_JOIN notifiers when add a slave
netpoll: disable netpoll when enslave a device
macvlan: Forward unicast frames in bridge mode to lowerdev
net: Remove linux/prefetch.h include from linux/skbuff.h
ipv4: Include linux/prefetch.h in fib_trie.c
netlabel: Remove prefetches from list handlers.
drivers/net: add prefetch header for prefetch users
...
Fixed up prefetch parts: removed a few duplicate prefetch.h includes,
fixed the location of the igb prefetch.h, took my version of the
skbuff.h code without the extra parentheses etc.
Diffstat (limited to 'net')
-rw-r--r-- | net/bridge/br_if.c | 3 | ||||
-rw-r--r-- | net/caif/caif_dev.c | 7 | ||||
-rw-r--r-- | net/caif/caif_socket.c | 13 | ||||
-rw-r--r-- | net/caif/cfcnfg.c | 44 | ||||
-rw-r--r-- | net/caif/cfctrl.c | 44 | ||||
-rw-r--r-- | net/caif/cfmuxl.c | 49 | ||||
-rw-r--r-- | net/core/dev.c | 4 | ||||
-rw-r--r-- | net/core/pktgen.c | 22 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 2 | ||||
-rw-r--r-- | net/ipv4/route.c | 1 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 17 |
11 files changed, 134 insertions, 72 deletions
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 5dbdfdfc3a34..1bacca4cb676 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -147,6 +147,7 @@ static void del_nbp(struct net_bridge_port *p) | |||
147 | dev->priv_flags &= ~IFF_BRIDGE_PORT; | 147 | dev->priv_flags &= ~IFF_BRIDGE_PORT; |
148 | 148 | ||
149 | netdev_rx_handler_unregister(dev); | 149 | netdev_rx_handler_unregister(dev); |
150 | synchronize_net(); | ||
150 | 151 | ||
151 | netdev_set_master(dev, NULL); | 152 | netdev_set_master(dev, NULL); |
152 | 153 | ||
@@ -338,6 +339,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
338 | if (IS_ERR(p)) | 339 | if (IS_ERR(p)) |
339 | return PTR_ERR(p); | 340 | return PTR_ERR(p); |
340 | 341 | ||
342 | call_netdevice_notifiers(NETDEV_JOIN, dev); | ||
343 | |||
341 | err = dev_set_promiscuity(dev, 1); | 344 | err = dev_set_promiscuity(dev, 1); |
342 | if (err) | 345 | if (err) |
343 | goto put_back; | 346 | goto put_back; |
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 366ca0fb7a29..682c0fedf360 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -142,6 +142,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev, | |||
142 | { | 142 | { |
143 | struct cfpkt *pkt; | 143 | struct cfpkt *pkt; |
144 | struct caif_device_entry *caifd; | 144 | struct caif_device_entry *caifd; |
145 | int err; | ||
145 | 146 | ||
146 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); | 147 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); |
147 | 148 | ||
@@ -159,7 +160,11 @@ static int receive(struct sk_buff *skb, struct net_device *dev, | |||
159 | caifd_hold(caifd); | 160 | caifd_hold(caifd); |
160 | rcu_read_unlock(); | 161 | rcu_read_unlock(); |
161 | 162 | ||
162 | caifd->layer.up->receive(caifd->layer.up, pkt); | 163 | err = caifd->layer.up->receive(caifd->layer.up, pkt); |
164 | |||
165 | /* For -EILSEQ the packet is not freed so so it now */ | ||
166 | if (err == -EILSEQ) | ||
167 | cfpkt_destroy(pkt); | ||
163 | 168 | ||
164 | /* Release reference to stack upwards */ | 169 | /* Release reference to stack upwards */ |
165 | caifd_put(caifd); | 170 | caifd_put(caifd); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index b840395ced1d..a98628086452 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
20 | #include <linux/debugfs.h> | 20 | #include <linux/debugfs.h> |
21 | #include <linux/caif/caif_socket.h> | 21 | #include <linux/caif/caif_socket.h> |
22 | #include <asm/atomic.h> | 22 | #include <linux/atomic.h> |
23 | #include <net/sock.h> | 23 | #include <net/sock.h> |
24 | #include <net/tcp_states.h> | 24 | #include <net/tcp_states.h> |
25 | #include <net/caif/caif_layer.h> | 25 | #include <net/caif/caif_layer.h> |
@@ -816,6 +816,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
816 | if (sk->sk_shutdown & SHUTDOWN_MASK) { | 816 | if (sk->sk_shutdown & SHUTDOWN_MASK) { |
817 | /* Allow re-connect after SHUTDOWN_IND */ | 817 | /* Allow re-connect after SHUTDOWN_IND */ |
818 | caif_disconnect_client(sock_net(sk), &cf_sk->layer); | 818 | caif_disconnect_client(sock_net(sk), &cf_sk->layer); |
819 | caif_free_client(&cf_sk->layer); | ||
819 | break; | 820 | break; |
820 | } | 821 | } |
821 | /* No reconnect on a seqpacket socket */ | 822 | /* No reconnect on a seqpacket socket */ |
@@ -926,7 +927,6 @@ static int caif_release(struct socket *sock) | |||
926 | { | 927 | { |
927 | struct sock *sk = sock->sk; | 928 | struct sock *sk = sock->sk; |
928 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | 929 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
929 | int res = 0; | ||
930 | 930 | ||
931 | if (!sk) | 931 | if (!sk) |
932 | return 0; | 932 | return 0; |
@@ -953,10 +953,7 @@ static int caif_release(struct socket *sock) | |||
953 | sk->sk_state = CAIF_DISCONNECTED; | 953 | sk->sk_state = CAIF_DISCONNECTED; |
954 | sk->sk_shutdown = SHUTDOWN_MASK; | 954 | sk->sk_shutdown = SHUTDOWN_MASK; |
955 | 955 | ||
956 | if (cf_sk->sk.sk_socket->state == SS_CONNECTED || | 956 | caif_disconnect_client(sock_net(sk), &cf_sk->layer); |
957 | cf_sk->sk.sk_socket->state == SS_CONNECTING) | ||
958 | res = caif_disconnect_client(sock_net(sk), &cf_sk->layer); | ||
959 | |||
960 | cf_sk->sk.sk_socket->state = SS_DISCONNECTING; | 957 | cf_sk->sk.sk_socket->state = SS_DISCONNECTING; |
961 | wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); | 958 | wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); |
962 | 959 | ||
@@ -964,7 +961,7 @@ static int caif_release(struct socket *sock) | |||
964 | sk_stream_kill_queues(&cf_sk->sk); | 961 | sk_stream_kill_queues(&cf_sk->sk); |
965 | release_sock(sk); | 962 | release_sock(sk); |
966 | sock_put(sk); | 963 | sock_put(sk); |
967 | return res; | 964 | return 0; |
968 | } | 965 | } |
969 | 966 | ||
970 | /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ | 967 | /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ |
@@ -1120,7 +1117,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, | |||
1120 | set_rx_flow_on(cf_sk); | 1117 | set_rx_flow_on(cf_sk); |
1121 | 1118 | ||
1122 | /* Set default options on configuration */ | 1119 | /* Set default options on configuration */ |
1123 | cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; | 1120 | cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; |
1124 | cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; | 1121 | cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; |
1125 | cf_sk->conn_req.protocol = protocol; | 1122 | cf_sk->conn_req.protocol = protocol; |
1126 | /* Increase the number of sockets created. */ | 1123 | /* Increase the number of sockets created. */ |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 351c2ca7e7b9..52fe33bee029 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -182,39 +182,26 @@ static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) | |||
182 | 182 | ||
183 | int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) | 183 | int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) |
184 | { | 184 | { |
185 | u8 channel_id = 0; | 185 | u8 channel_id; |
186 | int ret = 0; | ||
187 | struct cflayer *servl = NULL; | ||
188 | struct cfcnfg *cfg = get_cfcnfg(net); | 186 | struct cfcnfg *cfg = get_cfcnfg(net); |
189 | 187 | ||
190 | caif_assert(adap_layer != NULL); | 188 | caif_assert(adap_layer != NULL); |
191 | |||
192 | channel_id = adap_layer->id; | ||
193 | if (adap_layer->dn == NULL || channel_id == 0) { | ||
194 | pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); | ||
195 | ret = -ENOTCONN; | ||
196 | goto end; | ||
197 | } | ||
198 | |||
199 | servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); | ||
200 | if (servl == NULL) { | ||
201 | pr_err("PROTOCOL ERROR - " | ||
202 | "Error removing service_layer Channel_Id(%d)", | ||
203 | channel_id); | ||
204 | ret = -EINVAL; | ||
205 | goto end; | ||
206 | } | ||
207 | |||
208 | ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); | ||
209 | |||
210 | end: | ||
211 | cfctrl_cancel_req(cfg->ctrl, adap_layer); | 189 | cfctrl_cancel_req(cfg->ctrl, adap_layer); |
190 | channel_id = adap_layer->id; | ||
191 | if (channel_id != 0) { | ||
192 | struct cflayer *servl; | ||
193 | servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); | ||
194 | if (servl != NULL) | ||
195 | layer_set_up(servl, NULL); | ||
196 | } else | ||
197 | pr_debug("nothing to disconnect\n"); | ||
198 | cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); | ||
212 | 199 | ||
213 | /* Do RCU sync before initiating cleanup */ | 200 | /* Do RCU sync before initiating cleanup */ |
214 | synchronize_rcu(); | 201 | synchronize_rcu(); |
215 | if (adap_layer->ctrlcmd != NULL) | 202 | if (adap_layer->ctrlcmd != NULL) |
216 | adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); | 203 | adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); |
217 | return ret; | 204 | return 0; |
218 | 205 | ||
219 | } | 206 | } |
220 | EXPORT_SYMBOL(caif_disconnect_client); | 207 | EXPORT_SYMBOL(caif_disconnect_client); |
@@ -400,6 +387,14 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, | |||
400 | struct cfcnfg_phyinfo *phyinfo; | 387 | struct cfcnfg_phyinfo *phyinfo; |
401 | struct net_device *netdev; | 388 | struct net_device *netdev; |
402 | 389 | ||
390 | if (channel_id == 0) { | ||
391 | pr_warn("received channel_id zero\n"); | ||
392 | if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) | ||
393 | adapt_layer->ctrlcmd(adapt_layer, | ||
394 | CAIF_CTRLCMD_INIT_FAIL_RSP, 0); | ||
395 | return; | ||
396 | } | ||
397 | |||
403 | rcu_read_lock(); | 398 | rcu_read_lock(); |
404 | 399 | ||
405 | if (adapt_layer == NULL) { | 400 | if (adapt_layer == NULL) { |
@@ -523,7 +518,6 @@ got_phyid: | |||
523 | phyinfo->use_stx = stx; | 518 | phyinfo->use_stx = stx; |
524 | phyinfo->use_fcs = fcs; | 519 | phyinfo->use_fcs = fcs; |
525 | 520 | ||
526 | phy_layer->type = phy_type; | ||
527 | frml = cffrml_create(phyid, fcs); | 521 | frml = cffrml_create(phyid, fcs); |
528 | 522 | ||
529 | if (!frml) { | 523 | if (!frml) { |
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 0c00a6015dda..e22671bed669 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c | |||
@@ -178,20 +178,23 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) | |||
178 | void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) | 178 | void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) |
179 | { | 179 | { |
180 | struct cfctrl *cfctrl = container_obj(layer); | 180 | struct cfctrl *cfctrl = container_obj(layer); |
181 | int ret; | ||
182 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | 181 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); |
182 | struct cflayer *dn = cfctrl->serv.layer.dn; | ||
183 | if (!pkt) { | 183 | if (!pkt) { |
184 | pr_warn("Out of memory\n"); | 184 | pr_warn("Out of memory\n"); |
185 | return; | 185 | return; |
186 | } | 186 | } |
187 | if (!dn) { | ||
188 | pr_debug("not able to send enum request\n"); | ||
189 | return; | ||
190 | } | ||
187 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); | 191 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); |
188 | init_info(cfpkt_info(pkt), cfctrl); | 192 | init_info(cfpkt_info(pkt), cfctrl); |
189 | cfpkt_info(pkt)->dev_info->id = physlinkid; | 193 | cfpkt_info(pkt)->dev_info->id = physlinkid; |
190 | cfctrl->serv.dev_info.id = physlinkid; | 194 | cfctrl->serv.dev_info.id = physlinkid; |
191 | cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); | 195 | cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); |
192 | cfpkt_addbdy(pkt, physlinkid); | 196 | cfpkt_addbdy(pkt, physlinkid); |
193 | ret = | 197 | dn->transmit(dn, pkt); |
194 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
195 | } | 198 | } |
196 | 199 | ||
197 | int cfctrl_linkup_request(struct cflayer *layer, | 200 | int cfctrl_linkup_request(struct cflayer *layer, |
@@ -206,6 +209,12 @@ int cfctrl_linkup_request(struct cflayer *layer, | |||
206 | int ret; | 209 | int ret; |
207 | char utility_name[16]; | 210 | char utility_name[16]; |
208 | struct cfpkt *pkt; | 211 | struct cfpkt *pkt; |
212 | struct cflayer *dn = cfctrl->serv.layer.dn; | ||
213 | |||
214 | if (!dn) { | ||
215 | pr_debug("not able to send linkup request\n"); | ||
216 | return -ENODEV; | ||
217 | } | ||
209 | 218 | ||
210 | if (cfctrl_cancel_req(layer, user_layer) > 0) { | 219 | if (cfctrl_cancel_req(layer, user_layer) > 0) { |
211 | /* Slight Paranoia, check if already connecting */ | 220 | /* Slight Paranoia, check if already connecting */ |
@@ -282,7 +291,7 @@ int cfctrl_linkup_request(struct cflayer *layer, | |||
282 | */ | 291 | */ |
283 | cfpkt_info(pkt)->dev_info->id = param->phyid; | 292 | cfpkt_info(pkt)->dev_info->id = param->phyid; |
284 | ret = | 293 | ret = |
285 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | 294 | dn->transmit(dn, pkt); |
286 | if (ret < 0) { | 295 | if (ret < 0) { |
287 | int count; | 296 | int count; |
288 | 297 | ||
@@ -301,15 +310,23 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, | |||
301 | int ret; | 310 | int ret; |
302 | struct cfctrl *cfctrl = container_obj(layer); | 311 | struct cfctrl *cfctrl = container_obj(layer); |
303 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | 312 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); |
313 | struct cflayer *dn = cfctrl->serv.layer.dn; | ||
314 | |||
304 | if (!pkt) { | 315 | if (!pkt) { |
305 | pr_warn("Out of memory\n"); | 316 | pr_warn("Out of memory\n"); |
306 | return -ENOMEM; | 317 | return -ENOMEM; |
307 | } | 318 | } |
319 | |||
320 | if (!dn) { | ||
321 | pr_debug("not able to send link-down request\n"); | ||
322 | return -ENODEV; | ||
323 | } | ||
324 | |||
308 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); | 325 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); |
309 | cfpkt_addbdy(pkt, channelid); | 326 | cfpkt_addbdy(pkt, channelid); |
310 | init_info(cfpkt_info(pkt), cfctrl); | 327 | init_info(cfpkt_info(pkt), cfctrl); |
311 | ret = | 328 | ret = |
312 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | 329 | dn->transmit(dn, pkt); |
313 | #ifndef CAIF_NO_LOOP | 330 | #ifndef CAIF_NO_LOOP |
314 | cfctrl->loop_linkused[channelid] = 0; | 331 | cfctrl->loop_linkused[channelid] = 0; |
315 | #endif | 332 | #endif |
@@ -351,7 +368,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) | |||
351 | cfpkt_extr_head(pkt, &cmdrsp, 1); | 368 | cfpkt_extr_head(pkt, &cmdrsp, 1); |
352 | cmd = cmdrsp & CFCTRL_CMD_MASK; | 369 | cmd = cmdrsp & CFCTRL_CMD_MASK; |
353 | if (cmd != CFCTRL_CMD_LINK_ERR | 370 | if (cmd != CFCTRL_CMD_LINK_ERR |
354 | && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { | 371 | && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp) |
372 | && CFCTRL_ERR_BIT != (CFCTRL_ERR_BIT & cmdrsp)) { | ||
355 | if (handle_loop(cfctrl, cmd, pkt) != 0) | 373 | if (handle_loop(cfctrl, cmd, pkt) != 0) |
356 | cmdrsp |= CFCTRL_ERR_BIT; | 374 | cmdrsp |= CFCTRL_ERR_BIT; |
357 | } | 375 | } |
@@ -477,7 +495,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) | |||
477 | cfpkt_extr_head(pkt, ¶m, len); | 495 | cfpkt_extr_head(pkt, ¶m, len); |
478 | break; | 496 | break; |
479 | default: | 497 | default: |
480 | pr_warn("Request setup - invalid link type (%d)\n", | 498 | pr_warn("Request setup, invalid type (%d)\n", |
481 | serv); | 499 | serv); |
482 | goto error; | 500 | goto error; |
483 | } | 501 | } |
@@ -489,7 +507,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) | |||
489 | 507 | ||
490 | if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || | 508 | if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || |
491 | cfpkt_erroneous(pkt)) { | 509 | cfpkt_erroneous(pkt)) { |
492 | pr_err("Invalid O/E bit or parse error on CAIF control channel\n"); | 510 | pr_err("Invalid O/E bit or parse error " |
511 | "on CAIF control channel\n"); | ||
493 | cfctrl->res.reject_rsp(cfctrl->serv.layer.up, | 512 | cfctrl->res.reject_rsp(cfctrl->serv.layer.up, |
494 | 0, | 513 | 0, |
495 | req ? req->client_layer | 514 | req ? req->client_layer |
@@ -550,9 +569,8 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
550 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | 569 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: |
551 | case CAIF_CTRLCMD_FLOW_OFF_IND: | 570 | case CAIF_CTRLCMD_FLOW_OFF_IND: |
552 | spin_lock_bh(&this->info_list_lock); | 571 | spin_lock_bh(&this->info_list_lock); |
553 | if (!list_empty(&this->list)) { | 572 | if (!list_empty(&this->list)) |
554 | pr_debug("Received flow off in control layer\n"); | 573 | pr_debug("Received flow off in control layer\n"); |
555 | } | ||
556 | spin_unlock_bh(&this->info_list_lock); | 574 | spin_unlock_bh(&this->info_list_lock); |
557 | break; | 575 | break; |
558 | case _CAIF_CTRLCMD_PHYIF_DOWN_IND: { | 576 | case _CAIF_CTRLCMD_PHYIF_DOWN_IND: { |
@@ -587,16 +605,16 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) | |||
587 | case CFCTRL_CMD_LINK_SETUP: | 605 | case CFCTRL_CMD_LINK_SETUP: |
588 | spin_lock_bh(&ctrl->loop_linkid_lock); | 606 | spin_lock_bh(&ctrl->loop_linkid_lock); |
589 | if (!dec) { | 607 | if (!dec) { |
590 | for (linkid = last_linkid + 1; linkid < 255; linkid++) | 608 | for (linkid = last_linkid + 1; linkid < 254; linkid++) |
591 | if (!ctrl->loop_linkused[linkid]) | 609 | if (!ctrl->loop_linkused[linkid]) |
592 | goto found; | 610 | goto found; |
593 | } | 611 | } |
594 | dec = 1; | 612 | dec = 1; |
595 | for (linkid = last_linkid - 1; linkid > 0; linkid--) | 613 | for (linkid = last_linkid - 1; linkid > 1; linkid--) |
596 | if (!ctrl->loop_linkused[linkid]) | 614 | if (!ctrl->loop_linkused[linkid]) |
597 | goto found; | 615 | goto found; |
598 | spin_unlock_bh(&ctrl->loop_linkid_lock); | 616 | spin_unlock_bh(&ctrl->loop_linkid_lock); |
599 | 617 | return -1; | |
600 | found: | 618 | found: |
601 | if (linkid < 10) | 619 | if (linkid < 10) |
602 | dec = 0; | 620 | dec = 0; |
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 2a56df7e0a4b..3a66b8c10e09 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c | |||
@@ -62,16 +62,6 @@ struct cflayer *cfmuxl_create(void) | |||
62 | return &this->layer; | 62 | return &this->layer; |
63 | } | 63 | } |
64 | 64 | ||
65 | int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) | ||
66 | { | ||
67 | struct cfmuxl *muxl = container_obj(layr); | ||
68 | |||
69 | spin_lock_bh(&muxl->receive_lock); | ||
70 | list_add_rcu(&up->node, &muxl->srvl_list); | ||
71 | spin_unlock_bh(&muxl->receive_lock); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) | 65 | int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) |
76 | { | 66 | { |
77 | struct cfmuxl *muxl = (struct cfmuxl *) layr; | 67 | struct cfmuxl *muxl = (struct cfmuxl *) layr; |
@@ -93,6 +83,24 @@ static struct cflayer *get_from_id(struct list_head *list, u16 id) | |||
93 | return NULL; | 83 | return NULL; |
94 | } | 84 | } |
95 | 85 | ||
86 | int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) | ||
87 | { | ||
88 | struct cfmuxl *muxl = container_obj(layr); | ||
89 | struct cflayer *old; | ||
90 | |||
91 | spin_lock_bh(&muxl->receive_lock); | ||
92 | |||
93 | /* Two entries with same id is wrong, so remove old layer from mux */ | ||
94 | old = get_from_id(&muxl->srvl_list, linkid); | ||
95 | if (old != NULL) | ||
96 | list_del_rcu(&old->node); | ||
97 | |||
98 | list_add_rcu(&up->node, &muxl->srvl_list); | ||
99 | spin_unlock_bh(&muxl->receive_lock); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
96 | struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) | 104 | struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) |
97 | { | 105 | { |
98 | struct cfmuxl *muxl = container_obj(layr); | 106 | struct cfmuxl *muxl = container_obj(layr); |
@@ -146,6 +154,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | |||
146 | struct cfmuxl *muxl = container_obj(layr); | 154 | struct cfmuxl *muxl = container_obj(layr); |
147 | int idx = id % UP_CACHE_SIZE; | 155 | int idx = id % UP_CACHE_SIZE; |
148 | 156 | ||
157 | if (id == 0) { | ||
158 | pr_warn("Trying to remove control layer\n"); | ||
159 | return NULL; | ||
160 | } | ||
161 | |||
149 | spin_lock_bh(&muxl->receive_lock); | 162 | spin_lock_bh(&muxl->receive_lock); |
150 | up = get_from_id(&muxl->srvl_list, id); | 163 | up = get_from_id(&muxl->srvl_list, id); |
151 | if (up == NULL) | 164 | if (up == NULL) |
@@ -235,12 +248,26 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
235 | { | 248 | { |
236 | struct cfmuxl *muxl = container_obj(layr); | 249 | struct cfmuxl *muxl = container_obj(layr); |
237 | struct cflayer *layer; | 250 | struct cflayer *layer; |
251 | int idx; | ||
238 | 252 | ||
239 | rcu_read_lock(); | 253 | rcu_read_lock(); |
240 | list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { | 254 | list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { |
241 | if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) | 255 | |
256 | if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { | ||
257 | |||
258 | if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || | ||
259 | ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && | ||
260 | layer->id != 0) { | ||
261 | |||
262 | idx = layer->id % UP_CACHE_SIZE; | ||
263 | spin_lock_bh(&muxl->receive_lock); | ||
264 | rcu_assign_pointer(muxl->up_cache[idx], NULL); | ||
265 | list_del_rcu(&layer->node); | ||
266 | spin_unlock_bh(&muxl->receive_lock); | ||
267 | } | ||
242 | /* NOTE: ctrlcmd is not allowed to block */ | 268 | /* NOTE: ctrlcmd is not allowed to block */ |
243 | layer->ctrlcmd(layer, ctrl, phyid); | 269 | layer->ctrlcmd(layer, ctrl, phyid); |
270 | } | ||
244 | } | 271 | } |
245 | rcu_read_unlock(); | 272 | rcu_read_unlock(); |
246 | } | 273 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index d94537914a71..bcb05cb799c1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4294,10 +4294,8 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) | |||
4294 | 4294 | ||
4295 | slave->master = master; | 4295 | slave->master = master; |
4296 | 4296 | ||
4297 | if (old) { | 4297 | if (old) |
4298 | synchronize_net(); | ||
4299 | dev_put(old); | 4298 | dev_put(old); |
4300 | } | ||
4301 | return 0; | 4299 | return 0; |
4302 | } | 4300 | } |
4303 | EXPORT_SYMBOL(netdev_set_master); | 4301 | EXPORT_SYMBOL(netdev_set_master); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 67870e9fd097..f76079cd750c 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3544,13 +3544,12 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
3544 | return -ENOMEM; | 3544 | return -ENOMEM; |
3545 | 3545 | ||
3546 | strcpy(pkt_dev->odevname, ifname); | 3546 | strcpy(pkt_dev->odevname, ifname); |
3547 | pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state), | 3547 | pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state), |
3548 | node); | 3548 | node); |
3549 | if (pkt_dev->flows == NULL) { | 3549 | if (pkt_dev->flows == NULL) { |
3550 | kfree(pkt_dev); | 3550 | kfree(pkt_dev); |
3551 | return -ENOMEM; | 3551 | return -ENOMEM; |
3552 | } | 3552 | } |
3553 | memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state)); | ||
3554 | 3553 | ||
3555 | pkt_dev->removal_mark = 0; | 3554 | pkt_dev->removal_mark = 0; |
3556 | pkt_dev->min_pkt_size = ETH_ZLEN; | 3555 | pkt_dev->min_pkt_size = ETH_ZLEN; |
@@ -3708,6 +3707,7 @@ static int __init pg_init(void) | |||
3708 | { | 3707 | { |
3709 | int cpu; | 3708 | int cpu; |
3710 | struct proc_dir_entry *pe; | 3709 | struct proc_dir_entry *pe; |
3710 | int ret = 0; | ||
3711 | 3711 | ||
3712 | pr_info("%s", version); | 3712 | pr_info("%s", version); |
3713 | 3713 | ||
@@ -3718,11 +3718,10 @@ static int __init pg_init(void) | |||
3718 | pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); | 3718 | pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); |
3719 | if (pe == NULL) { | 3719 | if (pe == NULL) { |
3720 | pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); | 3720 | pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); |
3721 | proc_net_remove(&init_net, PG_PROC_DIR); | 3721 | ret = -EINVAL; |
3722 | return -EINVAL; | 3722 | goto remove_dir; |
3723 | } | 3723 | } |
3724 | 3724 | ||
3725 | /* Register us to receive netdevice events */ | ||
3726 | register_netdevice_notifier(&pktgen_notifier_block); | 3725 | register_netdevice_notifier(&pktgen_notifier_block); |
3727 | 3726 | ||
3728 | for_each_online_cpu(cpu) { | 3727 | for_each_online_cpu(cpu) { |
@@ -3736,13 +3735,18 @@ static int __init pg_init(void) | |||
3736 | 3735 | ||
3737 | if (list_empty(&pktgen_threads)) { | 3736 | if (list_empty(&pktgen_threads)) { |
3738 | pr_err("ERROR: Initialization failed for all threads\n"); | 3737 | pr_err("ERROR: Initialization failed for all threads\n"); |
3739 | unregister_netdevice_notifier(&pktgen_notifier_block); | 3738 | ret = -ENODEV; |
3740 | remove_proc_entry(PGCTRL, pg_proc_dir); | 3739 | goto unregister; |
3741 | proc_net_remove(&init_net, PG_PROC_DIR); | ||
3742 | return -ENODEV; | ||
3743 | } | 3740 | } |
3744 | 3741 | ||
3745 | return 0; | 3742 | return 0; |
3743 | |||
3744 | unregister: | ||
3745 | unregister_netdevice_notifier(&pktgen_notifier_block); | ||
3746 | remove_proc_entry(PGCTRL, pg_proc_dir); | ||
3747 | remove_dir: | ||
3748 | proc_net_remove(&init_net, PG_PROC_DIR); | ||
3749 | return ret; | ||
3746 | } | 3750 | } |
3747 | 3751 | ||
3748 | static void __exit pg_cleanup(void) | 3752 | static void __exit pg_cleanup(void) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d2ba2597c75a..d1644e317e70 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1956,6 +1956,8 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
1956 | case NETDEV_GOING_DOWN: | 1956 | case NETDEV_GOING_DOWN: |
1957 | case NETDEV_UNREGISTER: | 1957 | case NETDEV_UNREGISTER: |
1958 | case NETDEV_UNREGISTER_BATCH: | 1958 | case NETDEV_UNREGISTER_BATCH: |
1959 | case NETDEV_RELEASE: | ||
1960 | case NETDEV_JOIN: | ||
1959 | break; | 1961 | break; |
1960 | default: | 1962 | default: |
1961 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); | 1963 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b24d58e6bbcd..52b0b956508b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1665,6 +1665,7 @@ static int ip_rt_bug(struct sk_buff *skb) | |||
1665 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, | 1665 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, |
1666 | skb->dev ? skb->dev->name : "?"); | 1666 | skb->dev ? skb->dev->name : "?"); |
1667 | kfree_skb(skb); | 1667 | kfree_skb(skb); |
1668 | WARN_ON(1); | ||
1668 | return 0; | 1669 | return 0; |
1669 | } | 1670 | } |
1670 | 1671 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index c84b65920d1b..b1721d71c27c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -815,9 +815,17 @@ static bool some_qdisc_is_busy(struct net_device *dev) | |||
815 | return false; | 815 | return false; |
816 | } | 816 | } |
817 | 817 | ||
818 | /** | ||
819 | * dev_deactivate_many - deactivate transmissions on several devices | ||
820 | * @head: list of devices to deactivate | ||
821 | * | ||
822 | * This function returns only when all outstanding transmissions | ||
823 | * have completed, unless all devices are in dismantle phase. | ||
824 | */ | ||
818 | void dev_deactivate_many(struct list_head *head) | 825 | void dev_deactivate_many(struct list_head *head) |
819 | { | 826 | { |
820 | struct net_device *dev; | 827 | struct net_device *dev; |
828 | bool sync_needed = false; | ||
821 | 829 | ||
822 | list_for_each_entry(dev, head, unreg_list) { | 830 | list_for_each_entry(dev, head, unreg_list) { |
823 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, | 831 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, |
@@ -827,10 +835,15 @@ void dev_deactivate_many(struct list_head *head) | |||
827 | &noop_qdisc); | 835 | &noop_qdisc); |
828 | 836 | ||
829 | dev_watchdog_down(dev); | 837 | dev_watchdog_down(dev); |
838 | sync_needed |= !dev->dismantle; | ||
830 | } | 839 | } |
831 | 840 | ||
832 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ | 841 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. |
833 | synchronize_rcu(); | 842 | * This is avoided if all devices are in dismantle phase : |
843 | * Caller will call synchronize_net() for us | ||
844 | */ | ||
845 | if (sync_needed) | ||
846 | synchronize_net(); | ||
834 | 847 | ||
835 | /* Wait for outstanding qdisc_run calls. */ | 848 | /* Wait for outstanding qdisc_run calls. */ |
836 | list_for_each_entry(dev, head, unreg_list) | 849 | list_for_each_entry(dev, head, unreg_list) |