aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c72
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/netconsole.c26
-rw-r--r--drivers/net/rionet.c4
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/notifier.h3
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/net/caif/caif_layer.h36
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/caif/caif_dev.c7
-rw-r--r--net/caif/caif_socket.c13
-rw-r--r--net/caif/cfcnfg.c44
-rw-r--r--net/caif/cfctrl.c44
-rw-r--r--net/caif/cfmuxl.c49
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/pktgen.c22
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/sched/sch_generic.c17
22 files changed, 216 insertions, 156 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 64d01e728a9..d5bd35b7f2e 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -131,7 +131,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
131 131
132 /* release skb */ 132 /* release skb */
133 WARN_ON(!skb); 133 WARN_ON(!skb);
134 dev_kfree_skb(skb); 134 dev_kfree_skb_any(skb);
135 tx_buf->first_bd = 0; 135 tx_buf->first_bd = 0;
136 tx_buf->skb = NULL; 136 tx_buf->skb = NULL;
137 137
@@ -465,7 +465,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
465 } else { 465 } else {
466 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 466 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
467 " - dropping packet!\n"); 467 " - dropping packet!\n");
468 dev_kfree_skb(skb); 468 dev_kfree_skb_any(skb);
469 } 469 }
470 470
471 471
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index fab161e8030..1a3545bd8a9 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -840,7 +840,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
840 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, 840 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
841 DMA_FROM_DEVICE); 841 DMA_FROM_DEVICE);
842 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 842 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
843 dev_kfree_skb(skb); 843 dev_kfree_skb_any(skb);
844 return -ENOMEM; 844 return -ENOMEM;
845 } 845 }
846 846
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f45c0caf324..a97d9be331d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -571,7 +571,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
571 struct dmae_command *dmae) 571 struct dmae_command *dmae)
572{ 572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40; 574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
575 int rc = 0; 575 int rc = 0;
576 576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n", 577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
@@ -3666,7 +3666,8 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3666 union event_ring_elem *elem) 3666 union event_ring_elem *elem)
3667{ 3667{
3668 if (!bp->cnic_eth_dev.starting_cid || 3668 if (!bp->cnic_eth_dev.starting_cid ||
3669 cid < bp->cnic_eth_dev.starting_cid) 3669 (cid < bp->cnic_eth_dev.starting_cid &&
3670 cid != bp->cnic_eth_dev.iscsi_l2_cid))
3670 return 1; 3671 return 1;
3671 3672
3672 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 3673 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
@@ -7287,51 +7288,35 @@ static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7287 msleep(MCP_ONE_TIMEOUT); 7288 msleep(MCP_ONE_TIMEOUT);
7288} 7289}
7289 7290
7290static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 7291/*
7292 * initializes bp->common.shmem_base and waits for validity signature to appear
7293 */
7294static int bnx2x_init_shmem(struct bnx2x *bp)
7291{ 7295{
7292 u32 shmem, cnt, validity_offset, val; 7296 int cnt = 0;
7293 int rc = 0; 7297 u32 val = 0;
7294
7295 msleep(100);
7296 7298
7297 /* Get shmem offset */ 7299 do {
7298 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7300 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7299 if (shmem == 0) { 7301 if (bp->common.shmem_base) {
7300 BNX2X_ERR("Shmem 0 return failure\n"); 7302 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7301 rc = -ENOTTY; 7303 if (val & SHR_MEM_VALIDITY_MB)
7302 goto exit_lbl; 7304 return 0;
7303 } 7305 }
7304 7306
7305 validity_offset = offsetof(struct shmem_region, validity_map[0]); 7307 bnx2x_mcp_wait_one(bp);
7306 7308
7307 /* Wait for MCP to come up */ 7309 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
7308 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7309 /* TBD: its best to check validity map of last port.
7310 * currently checks on port 0.
7311 */
7312 val = REG_RD(bp, shmem + validity_offset);
7313 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7314 shmem + validity_offset, val);
7315 7310
7316 /* check that shared memory is valid. */ 7311 BNX2X_ERR("BAD MCP validity signature\n");
7317 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7318 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7319 break;
7320 7312
7321 bnx2x_mcp_wait_one(bp); 7313 return -ENODEV;
7322 } 7314}
7323
7324 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7325 7315
7326 /* Check that shared memory is valid. This indicates that MCP is up. */ 7316static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7327 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 7317{
7328 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 7318 int rc = bnx2x_init_shmem(bp);
7329 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7330 rc = -ENOTTY;
7331 goto exit_lbl;
7332 }
7333 7319
7334exit_lbl:
7335 /* Restore the `magic' bit value */ 7320 /* Restore the `magic' bit value */
7336 if (!CHIP_IS_E1(bp)) 7321 if (!CHIP_IS_E1(bp))
7337 bnx2x_clp_reset_done(bp, magic_val); 7322 bnx2x_clp_reset_done(bp, magic_val);
@@ -7844,10 +7829,12 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7844 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 7829 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7845 bp->common.flash_size, bp->common.flash_size); 7830 bp->common.flash_size, bp->common.flash_size);
7846 7831
7847 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7832 bnx2x_init_shmem(bp);
7833
7848 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 7834 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7849 MISC_REG_GENERIC_CR_1 : 7835 MISC_REG_GENERIC_CR_1 :
7850 MISC_REG_GENERIC_CR_0)); 7836 MISC_REG_GENERIC_CR_0));
7837
7851 bp->link_params.shmem_base = bp->common.shmem_base; 7838 bp->link_params.shmem_base = bp->common.shmem_base;
7852 bp->link_params.shmem2_base = bp->common.shmem2_base; 7839 bp->link_params.shmem2_base = bp->common.shmem2_base;
7853 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 7840 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
@@ -7859,11 +7846,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7859 return; 7846 return;
7860 } 7847 }
7861 7848
7862 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7863 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7864 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7865 BNX2X_ERR("BAD MCP validity signature\n");
7866
7867 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7849 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7868 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 7850 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7869 7851
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 088fd845ffd..6dc42846154 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1640,6 +1640,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1640 } 1640 }
1641 } 1641 }
1642 1642
1643 call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1644
1643 /* If this is the first slave, then we need to set the master's hardware 1645 /* If this is the first slave, then we need to set the master's hardware
1644 * address to be the same as the slave's. */ 1646 * address to be the same as the slave's. */
1645 if (is_zero_ether_addr(bond->dev->dev_addr)) 1647 if (is_zero_ether_addr(bond->dev->dev_addr))
@@ -1972,7 +1974,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1972 } 1974 }
1973 1975
1974 block_netpoll_tx(); 1976 block_netpoll_tx();
1975 netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE); 1977 netdev_bonding_change(bond_dev, NETDEV_RELEASE);
1976 write_lock_bh(&bond->lock); 1978 write_lock_bh(&bond->lock);
1977 1979
1978 slave = bond_get_slave_by_dev(bond, slave_dev); 1980 slave = bond_get_slave_by_dev(bond, slave_dev);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d72a70615c0..d6aeaa5f25e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -238,10 +238,8 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
238 238
239 dest = macvlan_hash_lookup(port, eth->h_dest); 239 dest = macvlan_hash_lookup(port, eth->h_dest);
240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
241 unsigned int length = skb->len + ETH_HLEN; 241 /* send to lowerdev first for its network taps */
242 int ret = dest->forward(dest->dev, skb); 242 vlan->forward(vlan->lowerdev, skb);
243 macvlan_count_rx(dest, length,
244 ret == NET_RX_SUCCESS, 0);
245 243
246 return NET_XMIT_SUCCESS; 244 return NET_XMIT_SUCCESS;
247 } 245 }
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a83e101440f..dfc82720065 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -621,11 +621,10 @@ static int netconsole_netdev_event(struct notifier_block *this,
621 bool stopped = false; 621 bool stopped = false;
622 622
623 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || 623 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
624 event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN)) 624 event == NETDEV_RELEASE || event == NETDEV_JOIN))
625 goto done; 625 goto done;
626 626
627 spin_lock_irqsave(&target_list_lock, flags); 627 spin_lock_irqsave(&target_list_lock, flags);
628restart:
629 list_for_each_entry(nt, &target_list, list) { 628 list_for_each_entry(nt, &target_list, list) {
630 netconsole_target_get(nt); 629 netconsole_target_get(nt);
631 if (nt->np.dev == dev) { 630 if (nt->np.dev == dev) {
@@ -633,6 +632,8 @@ restart:
633 case NETDEV_CHANGENAME: 632 case NETDEV_CHANGENAME:
634 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); 633 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
635 break; 634 break;
635 case NETDEV_RELEASE:
636 case NETDEV_JOIN:
636 case NETDEV_UNREGISTER: 637 case NETDEV_UNREGISTER:
637 /* 638 /*
638 * rtnl_lock already held 639 * rtnl_lock already held
@@ -647,11 +648,7 @@ restart:
647 dev_put(nt->np.dev); 648 dev_put(nt->np.dev);
648 nt->np.dev = NULL; 649 nt->np.dev = NULL;
649 netconsole_target_put(nt); 650 netconsole_target_put(nt);
650 goto restart;
651 } 651 }
652 /* Fall through */
653 case NETDEV_GOING_DOWN:
654 case NETDEV_BONDING_DESLAVE:
655 nt->enabled = 0; 652 nt->enabled = 0;
656 stopped = true; 653 stopped = true;
657 break; 654 break;
@@ -660,10 +657,21 @@ restart:
660 netconsole_target_put(nt); 657 netconsole_target_put(nt);
661 } 658 }
662 spin_unlock_irqrestore(&target_list_lock, flags); 659 spin_unlock_irqrestore(&target_list_lock, flags);
663 if (stopped && (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)) 660 if (stopped) {
664 printk(KERN_INFO "netconsole: network logging stopped on " 661 printk(KERN_INFO "netconsole: network logging stopped on "
665 "interface %s as it %s\n", dev->name, 662 "interface %s as it ", dev->name);
666 event == NETDEV_UNREGISTER ? "unregistered" : "released slaves"); 663 switch (event) {
664 case NETDEV_UNREGISTER:
665 printk(KERN_CONT "unregistered\n");
666 break;
667 case NETDEV_RELEASE:
668 printk(KERN_CONT "released slaves\n");
669 break;
670 case NETDEV_JOIN:
671 printk(KERN_CONT "is joining a master device\n");
672 break;
673 }
674 }
667 675
668done: 676done:
669 return NOTIFY_DONE; 677 return NOTIFY_DONE;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 26afbaae23f..77c5092a6a4 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -162,8 +162,8 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
162 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); 162 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
163 163
164 if (netif_msg_tx_queued(rnet)) 164 if (netif_msg_tx_queued(rnet))
165 printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME, 165 printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
166 (u32) skb, skb->len); 166 skb->len);
167 167
168 return 0; 168 return 0;
169} 169}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 4609b85e559..9ee3f9fb0b4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -131,6 +131,10 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
131#define SKF_LL_OFF (-0x200000) 131#define SKF_LL_OFF (-0x200000)
132 132
133#ifdef __KERNEL__ 133#ifdef __KERNEL__
134
135struct sk_buff;
136struct sock;
137
134struct sk_filter 138struct sk_filter
135{ 139{
136 atomic_t refcnt; 140 atomic_t refcnt;
@@ -146,9 +150,6 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp)
146 return fp->len * sizeof(struct sock_filter) + sizeof(*fp); 150 return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
147} 151}
148 152
149struct sk_buff;
150struct sock;
151
152extern int sk_filter(struct sock *sk, struct sk_buff *skb); 153extern int sk_filter(struct sock *sk, struct sk_buff *skb);
153extern unsigned int sk_run_filter(const struct sk_buff *skb, 154extern unsigned int sk_run_filter(const struct sk_buff *skb,
154 const struct sock_filter *filter); 155 const struct sock_filter *filter);
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 621dfa16acc..c0688b0168b 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -209,8 +209,9 @@ static inline int notifier_to_errno(int ret)
209#define NETDEV_POST_TYPE_CHANGE 0x000F 209#define NETDEV_POST_TYPE_CHANGE 0x000F
210#define NETDEV_POST_INIT 0x0010 210#define NETDEV_POST_INIT 0x0010
211#define NETDEV_UNREGISTER_BATCH 0x0011 211#define NETDEV_UNREGISTER_BATCH 0x0011
212#define NETDEV_BONDING_DESLAVE 0x0012 212#define NETDEV_RELEASE 0x0012
213#define NETDEV_NOTIFY_PEERS 0x0013 213#define NETDEV_NOTIFY_PEERS 0x0013
214#define NETDEV_JOIN 0x0014
214 215
215#define SYS_DOWN 0x0001 /* Notify of system down */ 216#define SYS_DOWN 0x0001 /* Notify of system down */
216#define SYS_RESTART SYS_DOWN 217#define SYS_RESTART SYS_DOWN
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 16c9c091555..e8b78ce1447 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1442,7 +1442,7 @@ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1442 1442
1443static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1443static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1444{ 1444{
1445 if (unlikely(skb->data_len)) { 1445 if (unlikely(skb_is_nonlinear(skb))) {
1446 WARN_ON(1); 1446 WARN_ON(1);
1447 return; 1447 return;
1448 } 1448 }
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index c8b07a904e7..35bc7883cf9 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -15,7 +15,6 @@ struct cfpktq;
15struct caif_payload_info; 15struct caif_payload_info;
16struct caif_packet_funcs; 16struct caif_packet_funcs;
17 17
18
19#define CAIF_LAYER_NAME_SZ 16 18#define CAIF_LAYER_NAME_SZ 16
20 19
21/** 20/**
@@ -33,7 +32,6 @@ do { \
33 } \ 32 } \
34} while (0) 33} while (0)
35 34
36
37/** 35/**
38 * enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd(). 36 * enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd().
39 * 37 *
@@ -141,7 +139,7 @@ enum caif_direction {
141 * - All layers must use this structure. If embedding it, then place this 139 * - All layers must use this structure. If embedding it, then place this
142 * structure first in the layer specific structure. 140 * structure first in the layer specific structure.
143 * 141 *
144 * - Each layer should not depend on any others layer private data. 142 * - Each layer should not depend on any others layer's private data.
145 * 143 *
146 * - In order to send data upwards do 144 * - In order to send data upwards do
147 * layer->up->receive(layer->up, packet); 145 * layer->up->receive(layer->up, packet);
@@ -155,16 +153,23 @@ struct cflayer {
155 struct list_head node; 153 struct list_head node;
156 154
157 /* 155 /*
158 * receive() - Receive Function. 156 * receive() - Receive Function (non-blocking).
159 * Contract: Each layer must implement a receive function passing the 157 * Contract: Each layer must implement a receive function passing the
160 * CAIF packets upwards in the stack. 158 * CAIF packets upwards in the stack.
161 * Packet handling rules: 159 * Packet handling rules:
162 * - The CAIF packet (cfpkt) cannot be accessed after 160 * - The CAIF packet (cfpkt) ownership is passed to the
163 * passing it to the next layer using up->receive(). 161 * called receive function. This means that the the
162 * packet cannot be accessed after passing it to the
163 * above layer using up->receive().
164 *
164 * - If parsing of the packet fails, the packet must be 165 * - If parsing of the packet fails, the packet must be
165 * destroyed and -1 returned from the function. 166 * destroyed and negative error code returned
167 * from the function.
168 * EXCEPTION: If the framing layer (cffrml) returns
169 * -EILSEQ, the packet is not freed.
170 *
166 * - If parsing succeeds (and above layers return OK) then 171 * - If parsing succeeds (and above layers return OK) then
167 * the function must return a value > 0. 172 * the function must return a value >= 0.
168 * 173 *
169 * Returns result < 0 indicates an error, 0 or positive value 174 * Returns result < 0 indicates an error, 0 or positive value
170 * indicates success. 175 * indicates success.
@@ -176,7 +181,7 @@ struct cflayer {
176 int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt); 181 int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt);
177 182
178 /* 183 /*
179 * transmit() - Transmit Function. 184 * transmit() - Transmit Function (non-blocking).
180 * Contract: Each layer must implement a transmit function passing the 185 * Contract: Each layer must implement a transmit function passing the
181 * CAIF packet downwards in the stack. 186 * CAIF packet downwards in the stack.
182 * Packet handling rules: 187 * Packet handling rules:
@@ -185,15 +190,16 @@ struct cflayer {
185 * cannot be accessed after passing it to the below 190 * cannot be accessed after passing it to the below
186 * layer using dn->transmit(). 191 * layer using dn->transmit().
187 * 192 *
188 * - If transmit fails, however, the ownership is returned 193 * - Upon error the packet ownership is still passed on,
189 * to thecaller. The caller of "dn->transmit()" must 194 * so the packet shall be freed where error is detected.
190 * destroy or resend packet. 195 * Callers of the transmit function shall not free packets,
196 * but errors shall be returned.
191 * 197 *
192 * - Return value less than zero means error, zero or 198 * - Return value less than zero means error, zero or
193 * greater than zero means OK. 199 * greater than zero means OK.
194 * 200 *
195 * result < 0 indicates an error, 0 or positive value 201 * Returns result < 0 indicates an error, 0 or positive value
196 * indicate success. 202 * indicates success.
197 * 203 *
198 * @layr: Pointer to the current layer the receive function 204 * @layr: Pointer to the current layer the receive function
199 * isimplemented for (this pointer). 205 * isimplemented for (this pointer).
@@ -202,7 +208,7 @@ struct cflayer {
202 int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt); 208 int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt);
203 209
204 /* 210 /*
205 * cttrlcmd() - Control Function upwards in CAIF Stack. 211 * cttrlcmd() - Control Function upwards in CAIF Stack (non-blocking).
206 * Used for signaling responses (CAIF_CTRLCMD_*_RSP) 212 * Used for signaling responses (CAIF_CTRLCMD_*_RSP)
207 * and asynchronous events from the modem (CAIF_CTRLCMD_*_IND) 213 * and asynchronous events from the modem (CAIF_CTRLCMD_*_IND)
208 * 214 *
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 5dbdfdfc3a3..1bacca4cb67 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -147,6 +147,7 @@ static void del_nbp(struct net_bridge_port *p)
147 dev->priv_flags &= ~IFF_BRIDGE_PORT; 147 dev->priv_flags &= ~IFF_BRIDGE_PORT;
148 148
149 netdev_rx_handler_unregister(dev); 149 netdev_rx_handler_unregister(dev);
150 synchronize_net();
150 151
151 netdev_set_master(dev, NULL); 152 netdev_set_master(dev, NULL);
152 153
@@ -338,6 +339,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
338 if (IS_ERR(p)) 339 if (IS_ERR(p))
339 return PTR_ERR(p); 340 return PTR_ERR(p);
340 341
342 call_netdevice_notifiers(NETDEV_JOIN, dev);
343
341 err = dev_set_promiscuity(dev, 1); 344 err = dev_set_promiscuity(dev, 1);
342 if (err) 345 if (err)
343 goto put_back; 346 goto put_back;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 366ca0fb7a2..682c0fedf36 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -142,6 +142,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
142{ 142{
143 struct cfpkt *pkt; 143 struct cfpkt *pkt;
144 struct caif_device_entry *caifd; 144 struct caif_device_entry *caifd;
145 int err;
145 146
146 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 147 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
147 148
@@ -159,7 +160,11 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
159 caifd_hold(caifd); 160 caifd_hold(caifd);
160 rcu_read_unlock(); 161 rcu_read_unlock();
161 162
162 caifd->layer.up->receive(caifd->layer.up, pkt); 163 err = caifd->layer.up->receive(caifd->layer.up, pkt);
164
165 /* For -EILSEQ the packet is not freed so so it now */
166 if (err == -EILSEQ)
167 cfpkt_destroy(pkt);
163 168
164 /* Release reference to stack upwards */ 169 /* Release reference to stack upwards */
165 caifd_put(caifd); 170 caifd_put(caifd);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index b840395ced1..a9862808645 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/caif/caif_socket.h> 21#include <linux/caif/caif_socket.h>
22#include <asm/atomic.h> 22#include <linux/atomic.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
@@ -816,6 +816,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
816 if (sk->sk_shutdown & SHUTDOWN_MASK) { 816 if (sk->sk_shutdown & SHUTDOWN_MASK) {
817 /* Allow re-connect after SHUTDOWN_IND */ 817 /* Allow re-connect after SHUTDOWN_IND */
818 caif_disconnect_client(sock_net(sk), &cf_sk->layer); 818 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
819 caif_free_client(&cf_sk->layer);
819 break; 820 break;
820 } 821 }
821 /* No reconnect on a seqpacket socket */ 822 /* No reconnect on a seqpacket socket */
@@ -926,7 +927,6 @@ static int caif_release(struct socket *sock)
926{ 927{
927 struct sock *sk = sock->sk; 928 struct sock *sk = sock->sk;
928 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 929 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
929 int res = 0;
930 930
931 if (!sk) 931 if (!sk)
932 return 0; 932 return 0;
@@ -953,10 +953,7 @@ static int caif_release(struct socket *sock)
953 sk->sk_state = CAIF_DISCONNECTED; 953 sk->sk_state = CAIF_DISCONNECTED;
954 sk->sk_shutdown = SHUTDOWN_MASK; 954 sk->sk_shutdown = SHUTDOWN_MASK;
955 955
956 if (cf_sk->sk.sk_socket->state == SS_CONNECTED || 956 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
957 cf_sk->sk.sk_socket->state == SS_CONNECTING)
958 res = caif_disconnect_client(sock_net(sk), &cf_sk->layer);
959
960 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; 957 cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
961 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); 958 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
962 959
@@ -964,7 +961,7 @@ static int caif_release(struct socket *sock)
964 sk_stream_kill_queues(&cf_sk->sk); 961 sk_stream_kill_queues(&cf_sk->sk);
965 release_sock(sk); 962 release_sock(sk);
966 sock_put(sk); 963 sock_put(sk);
967 return res; 964 return 0;
968} 965}
969 966
970/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ 967/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
@@ -1120,7 +1117,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1120 set_rx_flow_on(cf_sk); 1117 set_rx_flow_on(cf_sk);
1121 1118
1122 /* Set default options on configuration */ 1119 /* Set default options on configuration */
1123 cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; 1120 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
1124 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1121 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1125 cf_sk->conn_req.protocol = protocol; 1122 cf_sk->conn_req.protocol = protocol;
1126 /* Increase the number of sockets created. */ 1123 /* Increase the number of sockets created. */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 351c2ca7e7b..52fe33bee02 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -182,39 +182,26 @@ static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
182 182
183int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) 183int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
184{ 184{
185 u8 channel_id = 0; 185 u8 channel_id;
186 int ret = 0;
187 struct cflayer *servl = NULL;
188 struct cfcnfg *cfg = get_cfcnfg(net); 186 struct cfcnfg *cfg = get_cfcnfg(net);
189 187
190 caif_assert(adap_layer != NULL); 188 caif_assert(adap_layer != NULL);
191
192 channel_id = adap_layer->id;
193 if (adap_layer->dn == NULL || channel_id == 0) {
194 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
195 ret = -ENOTCONN;
196 goto end;
197 }
198
199 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
200 if (servl == NULL) {
201 pr_err("PROTOCOL ERROR - "
202 "Error removing service_layer Channel_Id(%d)",
203 channel_id);
204 ret = -EINVAL;
205 goto end;
206 }
207
208 ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
209
210end:
211 cfctrl_cancel_req(cfg->ctrl, adap_layer); 189 cfctrl_cancel_req(cfg->ctrl, adap_layer);
190 channel_id = adap_layer->id;
191 if (channel_id != 0) {
192 struct cflayer *servl;
193 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
194 if (servl != NULL)
195 layer_set_up(servl, NULL);
196 } else
197 pr_debug("nothing to disconnect\n");
198 cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
212 199
213 /* Do RCU sync before initiating cleanup */ 200 /* Do RCU sync before initiating cleanup */
214 synchronize_rcu(); 201 synchronize_rcu();
215 if (adap_layer->ctrlcmd != NULL) 202 if (adap_layer->ctrlcmd != NULL)
216 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 203 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
217 return ret; 204 return 0;
218 205
219} 206}
220EXPORT_SYMBOL(caif_disconnect_client); 207EXPORT_SYMBOL(caif_disconnect_client);
@@ -400,6 +387,14 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
400 struct cfcnfg_phyinfo *phyinfo; 387 struct cfcnfg_phyinfo *phyinfo;
401 struct net_device *netdev; 388 struct net_device *netdev;
402 389
390 if (channel_id == 0) {
391 pr_warn("received channel_id zero\n");
392 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
393 adapt_layer->ctrlcmd(adapt_layer,
394 CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
395 return;
396 }
397
403 rcu_read_lock(); 398 rcu_read_lock();
404 399
405 if (adapt_layer == NULL) { 400 if (adapt_layer == NULL) {
@@ -523,7 +518,6 @@ got_phyid:
523 phyinfo->use_stx = stx; 518 phyinfo->use_stx = stx;
524 phyinfo->use_fcs = fcs; 519 phyinfo->use_fcs = fcs;
525 520
526 phy_layer->type = phy_type;
527 frml = cffrml_create(phyid, fcs); 521 frml = cffrml_create(phyid, fcs);
528 522
529 if (!frml) { 523 if (!frml) {
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 0c00a6015dd..e22671bed66 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -178,20 +178,23 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
178void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) 178void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
179{ 179{
180 struct cfctrl *cfctrl = container_obj(layer); 180 struct cfctrl *cfctrl = container_obj(layer);
181 int ret;
182 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 181 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
182 struct cflayer *dn = cfctrl->serv.layer.dn;
183 if (!pkt) { 183 if (!pkt) {
184 pr_warn("Out of memory\n"); 184 pr_warn("Out of memory\n");
185 return; 185 return;
186 } 186 }
187 if (!dn) {
188 pr_debug("not able to send enum request\n");
189 return;
190 }
187 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 191 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
188 init_info(cfpkt_info(pkt), cfctrl); 192 init_info(cfpkt_info(pkt), cfctrl);
189 cfpkt_info(pkt)->dev_info->id = physlinkid; 193 cfpkt_info(pkt)->dev_info->id = physlinkid;
190 cfctrl->serv.dev_info.id = physlinkid; 194 cfctrl->serv.dev_info.id = physlinkid;
191 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); 195 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
192 cfpkt_addbdy(pkt, physlinkid); 196 cfpkt_addbdy(pkt, physlinkid);
193 ret = 197 dn->transmit(dn, pkt);
194 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
195} 198}
196 199
197int cfctrl_linkup_request(struct cflayer *layer, 200int cfctrl_linkup_request(struct cflayer *layer,
@@ -206,6 +209,12 @@ int cfctrl_linkup_request(struct cflayer *layer,
206 int ret; 209 int ret;
207 char utility_name[16]; 210 char utility_name[16];
208 struct cfpkt *pkt; 211 struct cfpkt *pkt;
212 struct cflayer *dn = cfctrl->serv.layer.dn;
213
214 if (!dn) {
215 pr_debug("not able to send linkup request\n");
216 return -ENODEV;
217 }
209 218
210 if (cfctrl_cancel_req(layer, user_layer) > 0) { 219 if (cfctrl_cancel_req(layer, user_layer) > 0) {
211 /* Slight Paranoia, check if already connecting */ 220 /* Slight Paranoia, check if already connecting */
@@ -282,7 +291,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
282 */ 291 */
283 cfpkt_info(pkt)->dev_info->id = param->phyid; 292 cfpkt_info(pkt)->dev_info->id = param->phyid;
284 ret = 293 ret =
285 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 294 dn->transmit(dn, pkt);
286 if (ret < 0) { 295 if (ret < 0) {
287 int count; 296 int count;
288 297
@@ -301,15 +310,23 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
301 int ret; 310 int ret;
302 struct cfctrl *cfctrl = container_obj(layer); 311 struct cfctrl *cfctrl = container_obj(layer);
303 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 312 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
313 struct cflayer *dn = cfctrl->serv.layer.dn;
314
304 if (!pkt) { 315 if (!pkt) {
305 pr_warn("Out of memory\n"); 316 pr_warn("Out of memory\n");
306 return -ENOMEM; 317 return -ENOMEM;
307 } 318 }
319
320 if (!dn) {
321 pr_debug("not able to send link-down request\n");
322 return -ENODEV;
323 }
324
308 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 325 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
309 cfpkt_addbdy(pkt, channelid); 326 cfpkt_addbdy(pkt, channelid);
310 init_info(cfpkt_info(pkt), cfctrl); 327 init_info(cfpkt_info(pkt), cfctrl);
311 ret = 328 ret =
312 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 329 dn->transmit(dn, pkt);
313#ifndef CAIF_NO_LOOP 330#ifndef CAIF_NO_LOOP
314 cfctrl->loop_linkused[channelid] = 0; 331 cfctrl->loop_linkused[channelid] = 0;
315#endif 332#endif
@@ -351,7 +368,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
351 cfpkt_extr_head(pkt, &cmdrsp, 1); 368 cfpkt_extr_head(pkt, &cmdrsp, 1);
352 cmd = cmdrsp & CFCTRL_CMD_MASK; 369 cmd = cmdrsp & CFCTRL_CMD_MASK;
353 if (cmd != CFCTRL_CMD_LINK_ERR 370 if (cmd != CFCTRL_CMD_LINK_ERR
354 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { 371 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
372 && CFCTRL_ERR_BIT != (CFCTRL_ERR_BIT & cmdrsp)) {
355 if (handle_loop(cfctrl, cmd, pkt) != 0) 373 if (handle_loop(cfctrl, cmd, pkt) != 0)
356 cmdrsp |= CFCTRL_ERR_BIT; 374 cmdrsp |= CFCTRL_ERR_BIT;
357 } 375 }
@@ -477,7 +495,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
477 cfpkt_extr_head(pkt, &param, len); 495 cfpkt_extr_head(pkt, &param, len);
478 break; 496 break;
479 default: 497 default:
480 pr_warn("Request setup - invalid link type (%d)\n", 498 pr_warn("Request setup, invalid type (%d)\n",
481 serv); 499 serv);
482 goto error; 500 goto error;
483 } 501 }
@@ -489,7 +507,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
489 507
490 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || 508 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
491 cfpkt_erroneous(pkt)) { 509 cfpkt_erroneous(pkt)) {
492 pr_err("Invalid O/E bit or parse error on CAIF control channel\n"); 510 pr_err("Invalid O/E bit or parse error "
511 "on CAIF control channel\n");
493 cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 512 cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
494 0, 513 0,
495 req ? req->client_layer 514 req ? req->client_layer
@@ -550,9 +569,8 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
550 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 569 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
551 case CAIF_CTRLCMD_FLOW_OFF_IND: 570 case CAIF_CTRLCMD_FLOW_OFF_IND:
552 spin_lock_bh(&this->info_list_lock); 571 spin_lock_bh(&this->info_list_lock);
553 if (!list_empty(&this->list)) { 572 if (!list_empty(&this->list))
554 pr_debug("Received flow off in control layer\n"); 573 pr_debug("Received flow off in control layer\n");
555 }
556 spin_unlock_bh(&this->info_list_lock); 574 spin_unlock_bh(&this->info_list_lock);
557 break; 575 break;
558 case _CAIF_CTRLCMD_PHYIF_DOWN_IND: { 576 case _CAIF_CTRLCMD_PHYIF_DOWN_IND: {
@@ -587,16 +605,16 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
587 case CFCTRL_CMD_LINK_SETUP: 605 case CFCTRL_CMD_LINK_SETUP:
588 spin_lock_bh(&ctrl->loop_linkid_lock); 606 spin_lock_bh(&ctrl->loop_linkid_lock);
589 if (!dec) { 607 if (!dec) {
590 for (linkid = last_linkid + 1; linkid < 255; linkid++) 608 for (linkid = last_linkid + 1; linkid < 254; linkid++)
591 if (!ctrl->loop_linkused[linkid]) 609 if (!ctrl->loop_linkused[linkid])
592 goto found; 610 goto found;
593 } 611 }
594 dec = 1; 612 dec = 1;
595 for (linkid = last_linkid - 1; linkid > 0; linkid--) 613 for (linkid = last_linkid - 1; linkid > 1; linkid--)
596 if (!ctrl->loop_linkused[linkid]) 614 if (!ctrl->loop_linkused[linkid])
597 goto found; 615 goto found;
598 spin_unlock_bh(&ctrl->loop_linkid_lock); 616 spin_unlock_bh(&ctrl->loop_linkid_lock);
599 617 return -1;
600found: 618found:
601 if (linkid < 10) 619 if (linkid < 10)
602 dec = 0; 620 dec = 0;
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 2a56df7e0a4..3a66b8c10e0 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -62,16 +62,6 @@ struct cflayer *cfmuxl_create(void)
62 return &this->layer; 62 return &this->layer;
63} 63}
64 64
65int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
66{
67 struct cfmuxl *muxl = container_obj(layr);
68
69 spin_lock_bh(&muxl->receive_lock);
70 list_add_rcu(&up->node, &muxl->srvl_list);
71 spin_unlock_bh(&muxl->receive_lock);
72 return 0;
73}
74
75int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) 65int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
76{ 66{
77 struct cfmuxl *muxl = (struct cfmuxl *) layr; 67 struct cfmuxl *muxl = (struct cfmuxl *) layr;
@@ -93,6 +83,24 @@ static struct cflayer *get_from_id(struct list_head *list, u16 id)
93 return NULL; 83 return NULL;
94} 84}
95 85
86int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
87{
88 struct cfmuxl *muxl = container_obj(layr);
89 struct cflayer *old;
90
91 spin_lock_bh(&muxl->receive_lock);
92
93 /* Two entries with same id is wrong, so remove old layer from mux */
94 old = get_from_id(&muxl->srvl_list, linkid);
95 if (old != NULL)
96 list_del_rcu(&old->node);
97
98 list_add_rcu(&up->node, &muxl->srvl_list);
99 spin_unlock_bh(&muxl->receive_lock);
100
101 return 0;
102}
103
96struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) 104struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
97{ 105{
98 struct cfmuxl *muxl = container_obj(layr); 106 struct cfmuxl *muxl = container_obj(layr);
@@ -146,6 +154,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
146 struct cfmuxl *muxl = container_obj(layr); 154 struct cfmuxl *muxl = container_obj(layr);
147 int idx = id % UP_CACHE_SIZE; 155 int idx = id % UP_CACHE_SIZE;
148 156
157 if (id == 0) {
158 pr_warn("Trying to remove control layer\n");
159 return NULL;
160 }
161
149 spin_lock_bh(&muxl->receive_lock); 162 spin_lock_bh(&muxl->receive_lock);
150 up = get_from_id(&muxl->srvl_list, id); 163 up = get_from_id(&muxl->srvl_list, id);
151 if (up == NULL) 164 if (up == NULL)
@@ -235,12 +248,26 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
235{ 248{
236 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
237 struct cflayer *layer; 250 struct cflayer *layer;
251 int idx;
238 252
239 rcu_read_lock(); 253 rcu_read_lock();
240 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { 254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
241 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) 255
256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
257
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) {
261
262 idx = layer->id % UP_CACHE_SIZE;
263 spin_lock_bh(&muxl->receive_lock);
264 rcu_assign_pointer(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
242 /* NOTE: ctrlcmd is not allowed to block */ 268 /* NOTE: ctrlcmd is not allowed to block */
243 layer->ctrlcmd(layer, ctrl, phyid); 269 layer->ctrlcmd(layer, ctrl, phyid);
270 }
244 } 271 }
245 rcu_read_unlock(); 272 rcu_read_unlock();
246} 273}
diff --git a/net/core/dev.c b/net/core/dev.c
index d94537914a7..bcb05cb799c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4294,10 +4294,8 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
4294 4294
4295 slave->master = master; 4295 slave->master = master;
4296 4296
4297 if (old) { 4297 if (old)
4298 synchronize_net();
4299 dev_put(old); 4298 dev_put(old);
4300 }
4301 return 0; 4299 return 0;
4302} 4300}
4303EXPORT_SYMBOL(netdev_set_master); 4301EXPORT_SYMBOL(netdev_set_master);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 67870e9fd09..f76079cd750 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3544,13 +3544,12 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3544 return -ENOMEM; 3544 return -ENOMEM;
3545 3545
3546 strcpy(pkt_dev->odevname, ifname); 3546 strcpy(pkt_dev->odevname, ifname);
3547 pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state), 3547 pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
3548 node); 3548 node);
3549 if (pkt_dev->flows == NULL) { 3549 if (pkt_dev->flows == NULL) {
3550 kfree(pkt_dev); 3550 kfree(pkt_dev);
3551 return -ENOMEM; 3551 return -ENOMEM;
3552 } 3552 }
3553 memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state));
3554 3553
3555 pkt_dev->removal_mark = 0; 3554 pkt_dev->removal_mark = 0;
3556 pkt_dev->min_pkt_size = ETH_ZLEN; 3555 pkt_dev->min_pkt_size = ETH_ZLEN;
@@ -3708,6 +3707,7 @@ static int __init pg_init(void)
3708{ 3707{
3709 int cpu; 3708 int cpu;
3710 struct proc_dir_entry *pe; 3709 struct proc_dir_entry *pe;
3710 int ret = 0;
3711 3711
3712 pr_info("%s", version); 3712 pr_info("%s", version);
3713 3713
@@ -3718,11 +3718,10 @@ static int __init pg_init(void)
3718 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); 3718 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
3719 if (pe == NULL) { 3719 if (pe == NULL) {
3720 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); 3720 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
3721 proc_net_remove(&init_net, PG_PROC_DIR); 3721 ret = -EINVAL;
3722 return -EINVAL; 3722 goto remove_dir;
3723 } 3723 }
3724 3724
3725 /* Register us to receive netdevice events */
3726 register_netdevice_notifier(&pktgen_notifier_block); 3725 register_netdevice_notifier(&pktgen_notifier_block);
3727 3726
3728 for_each_online_cpu(cpu) { 3727 for_each_online_cpu(cpu) {
@@ -3736,13 +3735,18 @@ static int __init pg_init(void)
3736 3735
3737 if (list_empty(&pktgen_threads)) { 3736 if (list_empty(&pktgen_threads)) {
3738 pr_err("ERROR: Initialization failed for all threads\n"); 3737 pr_err("ERROR: Initialization failed for all threads\n");
3739 unregister_netdevice_notifier(&pktgen_notifier_block); 3738 ret = -ENODEV;
3740 remove_proc_entry(PGCTRL, pg_proc_dir); 3739 goto unregister;
3741 proc_net_remove(&init_net, PG_PROC_DIR);
3742 return -ENODEV;
3743 } 3740 }
3744 3741
3745 return 0; 3742 return 0;
3743
3744 unregister:
3745 unregister_netdevice_notifier(&pktgen_notifier_block);
3746 remove_proc_entry(PGCTRL, pg_proc_dir);
3747 remove_dir:
3748 proc_net_remove(&init_net, PG_PROC_DIR);
3749 return ret;
3746} 3750}
3747 3751
3748static void __exit pg_cleanup(void) 3752static void __exit pg_cleanup(void)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d2ba2597c75..d1644e317e7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1956,6 +1956,8 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1956 case NETDEV_GOING_DOWN: 1956 case NETDEV_GOING_DOWN:
1957 case NETDEV_UNREGISTER: 1957 case NETDEV_UNREGISTER:
1958 case NETDEV_UNREGISTER_BATCH: 1958 case NETDEV_UNREGISTER_BATCH:
1959 case NETDEV_RELEASE:
1960 case NETDEV_JOIN:
1959 break; 1961 break;
1960 default: 1962 default:
1961 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1963 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b24d58e6bbc..52b0b956508 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1665,6 +1665,7 @@ static int ip_rt_bug(struct sk_buff *skb)
1665 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1665 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1666 skb->dev ? skb->dev->name : "?"); 1666 skb->dev ? skb->dev->name : "?");
1667 kfree_skb(skb); 1667 kfree_skb(skb);
1668 WARN_ON(1);
1668 return 0; 1669 return 0;
1669} 1670}
1670 1671
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c84b65920d1..b1721d71c27 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -815,9 +815,17 @@ static bool some_qdisc_is_busy(struct net_device *dev)
815 return false; 815 return false;
816} 816}
817 817
818/**
819 * dev_deactivate_many - deactivate transmissions on several devices
820 * @head: list of devices to deactivate
821 *
822 * This function returns only when all outstanding transmissions
823 * have completed, unless all devices are in dismantle phase.
824 */
818void dev_deactivate_many(struct list_head *head) 825void dev_deactivate_many(struct list_head *head)
819{ 826{
820 struct net_device *dev; 827 struct net_device *dev;
828 bool sync_needed = false;
821 829
822 list_for_each_entry(dev, head, unreg_list) { 830 list_for_each_entry(dev, head, unreg_list) {
823 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 831 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
@@ -827,10 +835,15 @@ void dev_deactivate_many(struct list_head *head)
827 &noop_qdisc); 835 &noop_qdisc);
828 836
829 dev_watchdog_down(dev); 837 dev_watchdog_down(dev);
838 sync_needed |= !dev->dismantle;
830 } 839 }
831 840
832 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 841 /* Wait for outstanding qdisc-less dev_queue_xmit calls.
833 synchronize_rcu(); 842 * This is avoided if all devices are in dismantle phase :
843 * Caller will call synchronize_net() for us
844 */
845 if (sync_needed)
846 synchronize_net();
834 847
835 /* Wait for outstanding qdisc_run calls. */ 848 /* Wait for outstanding qdisc_run calls. */
836 list_for_each_entry(dev, head, unreg_list) 849 list_for_each_entry(dev, head, unreg_list)