aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bnx2x.h2
-rw-r--r--drivers/net/bnx2x_main.c220
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/sch_generic.h5
-rw-r--r--net/ipv4/icmp.c22
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/icmp.c23
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/sched/sch_api.c48
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sctp/endpointola.c4
-rw-r--r--net/sctp/socket.c90
15 files changed, 293 insertions, 162 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index b468f904c7f8..a14dba1afcc5 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -271,7 +271,7 @@ struct bnx2x_fastpath {
271 (fp->tx_pkt_prod != fp->tx_pkt_cons)) 271 (fp->tx_pkt_prod != fp->tx_pkt_cons))
272 272
273#define BNX2X_HAS_RX_WORK(fp) \ 273#define BNX2X_HAS_RX_WORK(fp) \
274 (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb)) 274 (fp->rx_comp_cons != rx_cons_sb)
275 275
276#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) 276#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
277 277
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 971576b43687..82deea0a63f5 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.17" 62#define DRV_MODULE_VERSION "1.45.20"
63#define DRV_MODULE_RELDATE "2008/08/13" 63#define DRV_MODULE_RELDATE "2008/08/25"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -1717,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1717 return -EEXIST; 1717 return -EEXIST;
1718 } 1718 }
1719 1719
1720 /* Try for 1 second every 5ms */ 1720 /* Try for 5 second every 5ms */
1721 for (cnt = 0; cnt < 200; cnt++) { 1721 for (cnt = 0; cnt < 1000; cnt++) {
1722 /* Try to acquire the lock */ 1722 /* Try to acquire the lock */
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg); 1724 lock_status = REG_RD(bp, hw_lock_control_reg);
@@ -2550,6 +2550,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2550 BNX2X_ERR("SPIO5 hw attention\n"); 2550 BNX2X_ERR("SPIO5 hw attention\n");
2551 2551
2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554 /* Fan failure attention */ 2555 /* Fan failure attention */
2555 2556
@@ -4605,6 +4606,17 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
4605{ 4606{
4606 int i; 4607 int i;
4607 4608
4609 if (bp->flags & TPA_ENABLE_FLAG) {
4610 struct tstorm_eth_tpa_exist tpa = {0};
4611
4612 tpa.tpa_exist = 1;
4613
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4615 ((u32 *)&tpa)[0]);
4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4617 ((u32 *)&tpa)[1]);
4618 }
4619
4608 /* Zero this manually as its initialization is 4620 /* Zero this manually as its initialization is
4609 currently missing in the initTool */ 4621 currently missing in the initTool */
4610 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -5337,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
5337 } 5349 }
5338 5350
5339 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5351 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5352 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5340 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5341 /* Fan failure is indicated by SPIO 5 */ 5354 /* Fan failure is indicated by SPIO 5 */
5342 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
@@ -5363,17 +5376,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5363 5376
5364 enable_blocks_attention(bp); 5377 enable_blocks_attention(bp);
5365 5378
5366 if (bp->flags & TPA_ENABLE_FLAG) {
5367 struct tstorm_eth_tpa_exist tmp = {0};
5368
5369 tmp.tpa_exist = 1;
5370
5371 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5372 ((u32 *)&tmp)[0]);
5373 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5374 ((u32 *)&tmp)[1]);
5375 }
5376
5377 if (!BP_NOMCP(bp)) { 5379 if (!BP_NOMCP(bp)) {
5378 bnx2x_acquire_phy_lock(bp); 5380 bnx2x_acquire_phy_lock(bp);
5379 bnx2x_common_init_phy(bp, bp->common.shmem_base); 5381 bnx2x_common_init_phy(bp, bp->common.shmem_base);
@@ -5531,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
5531 /* Port DMAE comes here */ 5533 /* Port DMAE comes here */
5532 5534
5533 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5535 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5534 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5535 /* add SPIO 5 to group 0 */ 5538 /* add SPIO 5 to group 0 */
5536 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5539 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -6055,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6055 return rc; 6058 return rc;
6056} 6059}
6057 6060
6061static void bnx2x_napi_enable(struct bnx2x *bp)
6062{
6063 int i;
6064
6065 for_each_queue(bp, i)
6066 napi_enable(&bnx2x_fp(bp, i, napi));
6067}
6068
6069static void bnx2x_napi_disable(struct bnx2x *bp)
6070{
6071 int i;
6072
6073 for_each_queue(bp, i)
6074 napi_disable(&bnx2x_fp(bp, i, napi));
6075}
6076
6077static void bnx2x_netif_start(struct bnx2x *bp)
6078{
6079 if (atomic_dec_and_test(&bp->intr_sem)) {
6080 if (netif_running(bp->dev)) {
6081 if (bp->state == BNX2X_STATE_OPEN)
6082 netif_wake_queue(bp->dev);
6083 bnx2x_napi_enable(bp);
6084 bnx2x_int_enable(bp);
6085 }
6086 }
6087}
6088
6089static void bnx2x_netif_stop(struct bnx2x *bp)
6090{
6091 bnx2x_int_disable_sync(bp);
6092 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev);
6095 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6096 }
6097}
6098
6058/* 6099/*
6059 * Init service functions 6100 * Init service functions
6060 */ 6101 */
@@ -6338,7 +6379,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6338 rc = bnx2x_init_hw(bp, load_code); 6379 rc = bnx2x_init_hw(bp, load_code);
6339 if (rc) { 6380 if (rc) {
6340 BNX2X_ERR("HW init failed, aborting\n"); 6381 BNX2X_ERR("HW init failed, aborting\n");
6341 goto load_error; 6382 goto load_int_disable;
6342 } 6383 }
6343 6384
6344 /* Setup NIC internals and enable interrupts */ 6385 /* Setup NIC internals and enable interrupts */
@@ -6350,7 +6391,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6350 if (!load_code) { 6391 if (!load_code) {
6351 BNX2X_ERR("MCP response failure, aborting\n"); 6392 BNX2X_ERR("MCP response failure, aborting\n");
6352 rc = -EBUSY; 6393 rc = -EBUSY;
6353 goto load_int_disable; 6394 goto load_rings_free;
6354 } 6395 }
6355 } 6396 }
6356 6397
@@ -6360,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6360 6401
6361 /* Enable Rx interrupt handling before sending the ramrod 6402 /* Enable Rx interrupt handling before sending the ramrod
6362 as it's completed on Rx FP queue */ 6403 as it's completed on Rx FP queue */
6363 for_each_queue(bp, i) 6404 bnx2x_napi_enable(bp);
6364 napi_enable(&bnx2x_fp(bp, i, napi));
6365 6405
6366 /* Enable interrupt handling */ 6406 /* Enable interrupt handling */
6367 atomic_set(&bp->intr_sem, 0); 6407 atomic_set(&bp->intr_sem, 0);
@@ -6369,7 +6409,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6369 rc = bnx2x_setup_leading(bp); 6409 rc = bnx2x_setup_leading(bp);
6370 if (rc) { 6410 if (rc) {
6371 BNX2X_ERR("Setup leading failed!\n"); 6411 BNX2X_ERR("Setup leading failed!\n");
6372 goto load_stop_netif; 6412 goto load_netif_stop;
6373 } 6413 }
6374 6414
6375 if (CHIP_IS_E1H(bp)) 6415 if (CHIP_IS_E1H(bp))
@@ -6382,7 +6422,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6382 for_each_nondefault_queue(bp, i) { 6422 for_each_nondefault_queue(bp, i) {
6383 rc = bnx2x_setup_multi(bp, i); 6423 rc = bnx2x_setup_multi(bp, i);
6384 if (rc) 6424 if (rc)
6385 goto load_stop_netif; 6425 goto load_netif_stop;
6386 } 6426 }
6387 6427
6388 if (CHIP_IS_E1(bp)) 6428 if (CHIP_IS_E1(bp))
@@ -6427,20 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6427 6467
6428 return 0; 6468 return 0;
6429 6469
6430load_stop_netif: 6470load_netif_stop:
6471 bnx2x_napi_disable(bp);
6472load_rings_free:
6473 /* Free SKBs, SGEs, TPA pool and driver internals */
6474 bnx2x_free_skbs(bp);
6431 for_each_queue(bp, i) 6475 for_each_queue(bp, i)
6432 napi_disable(&bnx2x_fp(bp, i, napi)); 6476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6433
6434load_int_disable: 6477load_int_disable:
6435 bnx2x_int_disable_sync(bp); 6478 bnx2x_int_disable_sync(bp);
6436
6437 /* Release IRQs */ 6479 /* Release IRQs */
6438 bnx2x_free_irq(bp); 6480 bnx2x_free_irq(bp);
6439
6440 /* Free SKBs, SGEs, TPA pool and driver internals */
6441 bnx2x_free_skbs(bp);
6442 for_each_queue(bp, i)
6443 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6444load_error: 6481load_error:
6445 bnx2x_free_mem(bp); 6482 bnx2x_free_mem(bp);
6446 6483
@@ -6455,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6455 6492
6456 /* halt the connection */ 6493 /* halt the connection */
6457 bp->fp[index].state = BNX2X_FP_STATE_HALTING; 6494 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6458 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); 6495 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6459 6496
6460 /* Wait for completion */ 6497 /* Wait for completion */
6461 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 6498 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
@@ -6613,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6613 bp->rx_mode = BNX2X_RX_MODE_NONE; 6650 bp->rx_mode = BNX2X_RX_MODE_NONE;
6614 bnx2x_set_storm_rx_mode(bp); 6651 bnx2x_set_storm_rx_mode(bp);
6615 6652
6616 if (netif_running(bp->dev)) { 6653 bnx2x_netif_stop(bp);
6617 netif_tx_disable(bp->dev); 6654 if (!netif_running(bp->dev))
6618 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6655 bnx2x_napi_disable(bp);
6619 }
6620
6621 del_timer_sync(&bp->timer); 6656 del_timer_sync(&bp->timer);
6622 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6657 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6623 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6658 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
@@ -6631,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6631 smp_rmb(); 6666 smp_rmb();
6632 while (BNX2X_HAS_TX_WORK(fp)) { 6667 while (BNX2X_HAS_TX_WORK(fp)) {
6633 6668
6634 if (!netif_running(bp->dev)) 6669 bnx2x_tx_int(fp, 1000);
6635 bnx2x_tx_int(fp, 1000);
6636
6637 if (!cnt) { 6670 if (!cnt) {
6638 BNX2X_ERR("timeout waiting for queue[%d]\n", 6671 BNX2X_ERR("timeout waiting for queue[%d]\n",
6639 i); 6672 i);
@@ -6649,46 +6682,12 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6649 smp_rmb(); 6682 smp_rmb();
6650 } 6683 }
6651 } 6684 }
6652
6653 /* Give HW time to discard old tx messages */ 6685 /* Give HW time to discard old tx messages */
6654 msleep(1); 6686 msleep(1);
6655 6687
6656 for_each_queue(bp, i)
6657 napi_disable(&bnx2x_fp(bp, i, napi));
6658 /* Disable interrupts after Tx and Rx are disabled on stack level */
6659 bnx2x_int_disable_sync(bp);
6660
6661 /* Release IRQs */ 6688 /* Release IRQs */
6662 bnx2x_free_irq(bp); 6689 bnx2x_free_irq(bp);
6663 6690
6664 if (unload_mode == UNLOAD_NORMAL)
6665 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6666
6667 else if (bp->flags & NO_WOL_FLAG) {
6668 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6669 if (CHIP_IS_E1H(bp))
6670 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6671
6672 } else if (bp->wol) {
6673 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6674 u8 *mac_addr = bp->dev->dev_addr;
6675 u32 val;
6676 /* The mac address is written to entries 1-4 to
6677 preserve entry 0 which is used by the PMF */
6678 u8 entry = (BP_E1HVN(bp) + 1)*8;
6679
6680 val = (mac_addr[0] << 8) | mac_addr[1];
6681 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6682
6683 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6684 (mac_addr[4] << 8) | mac_addr[5];
6685 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6686
6687 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6688
6689 } else
6690 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6691
6692 if (CHIP_IS_E1(bp)) { 6691 if (CHIP_IS_E1(bp)) {
6693 struct mac_configuration_cmd *config = 6692 struct mac_configuration_cmd *config =
6694 bnx2x_sp(bp, mcast_config); 6693 bnx2x_sp(bp, mcast_config);
@@ -6711,14 +6710,41 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6711 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 6710 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6712 6711
6713 } else { /* E1H */ 6712 } else { /* E1H */
6713 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6714
6714 bnx2x_set_mac_addr_e1h(bp, 0); 6715 bnx2x_set_mac_addr_e1h(bp, 0);
6715 6716
6716 for (i = 0; i < MC_HASH_SIZE; i++) 6717 for (i = 0; i < MC_HASH_SIZE; i++)
6717 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6718 } 6719 }
6719 6720
6720 if (CHIP_IS_E1H(bp)) 6721 if (unload_mode == UNLOAD_NORMAL)
6721 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 6722 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6723
6724 else if (bp->flags & NO_WOL_FLAG) {
6725 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6726 if (CHIP_IS_E1H(bp))
6727 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6728
6729 } else if (bp->wol) {
6730 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6731 u8 *mac_addr = bp->dev->dev_addr;
6732 u32 val;
6733 /* The mac address is written to entries 1-4 to
6734 preserve entry 0 which is used by the PMF */
6735 u8 entry = (BP_E1HVN(bp) + 1)*8;
6736
6737 val = (mac_addr[0] << 8) | mac_addr[1];
6738 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6739
6740 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6741 (mac_addr[4] << 8) | mac_addr[5];
6742 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6743
6744 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6745
6746 } else
6747 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6722 6748
6723 /* Close multi and leading connections 6749 /* Close multi and leading connections
6724 Completions for ramrods are collected in a synchronous way */ 6750 Completions for ramrods are collected in a synchronous way */
@@ -6821,6 +6847,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6821 */ 6847 */
6822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6823 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6849 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6850 if (val == 0x7)
6851 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6853
6824 if (val == 0x7) { 6854 if (val == 0x7) {
6825 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6855 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6826 /* save our func */ 6856 /* save our func */
@@ -6898,7 +6928,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6898 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 6928 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6899 DRV_MSG_SEQ_NUMBER_MASK); 6929 DRV_MSG_SEQ_NUMBER_MASK);
6900 } 6930 }
6901 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6902 } 6931 }
6903} 6932}
6904 6933
@@ -8617,34 +8646,6 @@ test_mem_exit:
8617 return rc; 8646 return rc;
8618} 8647}
8619 8648
8620static void bnx2x_netif_start(struct bnx2x *bp)
8621{
8622 int i;
8623
8624 if (atomic_dec_and_test(&bp->intr_sem)) {
8625 if (netif_running(bp->dev)) {
8626 bnx2x_int_enable(bp);
8627 for_each_queue(bp, i)
8628 napi_enable(&bnx2x_fp(bp, i, napi));
8629 if (bp->state == BNX2X_STATE_OPEN)
8630 netif_wake_queue(bp->dev);
8631 }
8632 }
8633}
8634
8635static void bnx2x_netif_stop(struct bnx2x *bp)
8636{
8637 int i;
8638
8639 if (netif_running(bp->dev)) {
8640 netif_tx_disable(bp->dev);
8641 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8642 for_each_queue(bp, i)
8643 napi_disable(&bnx2x_fp(bp, i, napi));
8644 }
8645 bnx2x_int_disable_sync(bp);
8646}
8647
8648static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 8649static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8649{ 8650{
8650 int cnt = 1000; 8651 int cnt = 1000;
@@ -9250,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9250 napi); 9251 napi);
9251 struct bnx2x *bp = fp->bp; 9252 struct bnx2x *bp = fp->bp;
9252 int work_done = 0; 9253 int work_done = 0;
9254 u16 rx_cons_sb;
9253 9255
9254#ifdef BNX2X_STOP_ON_ERROR 9256#ifdef BNX2X_STOP_ON_ERROR
9255 if (unlikely(bp->panic)) 9257 if (unlikely(bp->panic))
@@ -9265,10 +9267,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9265 if (BNX2X_HAS_TX_WORK(fp)) 9267 if (BNX2X_HAS_TX_WORK(fp))
9266 bnx2x_tx_int(fp, budget); 9268 bnx2x_tx_int(fp, budget);
9267 9269
9270 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9271 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9272 rx_cons_sb++;
9268 if (BNX2X_HAS_RX_WORK(fp)) 9273 if (BNX2X_HAS_RX_WORK(fp))
9269 work_done = bnx2x_rx_int(fp, budget); 9274 work_done = bnx2x_rx_int(fp, budget);
9270 9275
9271 rmb(); /* BNX2X_HAS_WORK() reads the status block */ 9276 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9277 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9278 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9279 rx_cons_sb++;
9272 9280
9273 /* must not complete if we consumed full budget */ 9281 /* must not complete if we consumed full budget */
9274 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { 9282 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
@@ -9484,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9484 fp_index = (smp_processor_id() % bp->num_queues); 9492 fp_index = (smp_processor_id() % bp->num_queues);
9485 fp = &bp->fp[fp_index]; 9493 fp = &bp->fp[fp_index];
9486 9494
9487 if (unlikely(bnx2x_tx_avail(bp->fp) < 9495 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9488 (skb_shinfo(skb)->nr_frags + 3))) {
9489 bp->eth_stats.driver_xoff++, 9496 bp->eth_stats.driver_xoff++,
9490 netif_stop_queue(dev); 9497 netif_stop_queue(dev);
9491 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 9498 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -9548,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9548 tx_bd->vlan = cpu_to_le16(pkt_prod); 9555 tx_bd->vlan = cpu_to_le16(pkt_prod);
9549 9556
9550 if (xmit_type) { 9557 if (xmit_type) {
9551
9552 /* turn on parsing and get a BD */ 9558 /* turn on parsing and get a BD */
9553 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 9559 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9554 pbd = (void *)&fp->tx_desc_ring[bd_prod]; 9560 pbd = (void *)&fp->tx_desc_ring[bd_prod];
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 853fe83d9f37..b786a5b09253 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -78,6 +78,7 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
78 78
79extern int register_qdisc(struct Qdisc_ops *qops); 79extern int register_qdisc(struct Qdisc_ops *qops);
80extern int unregister_qdisc(struct Qdisc_ops *qops); 80extern int unregister_qdisc(struct Qdisc_ops *qops);
81extern void qdisc_list_del(struct Qdisc *q);
81extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 82extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
82extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); 83extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
83extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 84extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 84d25f2e6188..b1d2cfea89c5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -193,6 +193,11 @@ static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
193 return qdisc->dev_queue->qdisc; 193 return qdisc->dev_queue->qdisc;
194} 194}
195 195
196static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
197{
198 return qdisc->dev_queue->qdisc_sleeping;
199}
200
196/* The qdisc root lock is a mechanism by which to top level 201/* The qdisc root lock is a mechanism by which to top level
197 * of a qdisc tree can be locked from any qdisc node in the 202 * of a qdisc tree can be locked from any qdisc node in the
198 * forest. This allows changing the configuration of some 203 * forest. This allows changing the configuration of some
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 860558633b2c..55c355e63234 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net)
204 return net->ipv4.icmp_sk[smp_processor_id()]; 204 return net->ipv4.icmp_sk[smp_processor_id()];
205} 205}
206 206
207static inline int icmp_xmit_lock(struct sock *sk) 207static inline struct sock *icmp_xmit_lock(struct net *net)
208{ 208{
209 struct sock *sk;
210
209 local_bh_disable(); 211 local_bh_disable();
210 212
213 sk = icmp_sk(net);
214
211 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 215 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
212 /* This can happen if the output path signals a 216 /* This can happen if the output path signals a
213 * dst_link_failure() for an outgoing ICMP packet. 217 * dst_link_failure() for an outgoing ICMP packet.
214 */ 218 */
215 local_bh_enable(); 219 local_bh_enable();
216 return 1; 220 return NULL;
217 } 221 }
218 return 0; 222 return sk;
219} 223}
220 224
221static inline void icmp_xmit_unlock(struct sock *sk) 225static inline void icmp_xmit_unlock(struct sock *sk)
@@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
354 struct ipcm_cookie ipc; 358 struct ipcm_cookie ipc;
355 struct rtable *rt = skb->rtable; 359 struct rtable *rt = skb->rtable;
356 struct net *net = dev_net(rt->u.dst.dev); 360 struct net *net = dev_net(rt->u.dst.dev);
357 struct sock *sk = icmp_sk(net); 361 struct sock *sk;
358 struct inet_sock *inet = inet_sk(sk); 362 struct inet_sock *inet;
359 __be32 daddr; 363 __be32 daddr;
360 364
361 if (ip_options_echo(&icmp_param->replyopts, skb)) 365 if (ip_options_echo(&icmp_param->replyopts, skb))
362 return; 366 return;
363 367
364 if (icmp_xmit_lock(sk)) 368 sk = icmp_xmit_lock(net);
369 if (sk == NULL)
365 return; 370 return;
371 inet = inet_sk(sk);
366 372
367 icmp_param->data.icmph.checksum = 0; 373 icmp_param->data.icmph.checksum = 0;
368 374
@@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
419 if (!rt) 425 if (!rt)
420 goto out; 426 goto out;
421 net = dev_net(rt->u.dst.dev); 427 net = dev_net(rt->u.dst.dev);
422 sk = icmp_sk(net);
423 428
424 /* 429 /*
425 * Find the original header. It is expected to be valid, of course. 430 * Find the original header. It is expected to be valid, of course.
@@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
483 } 488 }
484 } 489 }
485 490
486 if (icmp_xmit_lock(sk)) 491 sk = icmp_xmit_lock(net);
492 if (sk == NULL)
487 return; 493 return;
488 494
489 /* 495 /*
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cca921ea8550..e91bafeb32f4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3116,14 +3116,23 @@ static ctl_table ipv4_route_table[] = {
3116 { .ctl_name = 0 } 3116 { .ctl_name = 0 }
3117}; 3117};
3118 3118
3119static __net_initdata struct ctl_path ipv4_route_path[] = { 3119static struct ctl_table empty[1];
3120
3121static struct ctl_table ipv4_skeleton[] =
3122{
3123 { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
3124 .child = ipv4_route_table},
3125 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
3126 .child = empty},
3127 { }
3128};
3129
3130static __net_initdata struct ctl_path ipv4_path[] = {
3120 { .procname = "net", .ctl_name = CTL_NET, }, 3131 { .procname = "net", .ctl_name = CTL_NET, },
3121 { .procname = "ipv4", .ctl_name = NET_IPV4, }, 3132 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3122 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3123 { }, 3133 { },
3124}; 3134};
3125 3135
3126
3127static struct ctl_table ipv4_route_flush_table[] = { 3136static struct ctl_table ipv4_route_flush_table[] = {
3128 { 3137 {
3129 .ctl_name = NET_IPV4_ROUTE_FLUSH, 3138 .ctl_name = NET_IPV4_ROUTE_FLUSH,
@@ -3136,6 +3145,13 @@ static struct ctl_table ipv4_route_flush_table[] = {
3136 { .ctl_name = 0 }, 3145 { .ctl_name = 0 },
3137}; 3146};
3138 3147
3148static __net_initdata struct ctl_path ipv4_route_path[] = {
3149 { .procname = "net", .ctl_name = CTL_NET, },
3150 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3151 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3152 { },
3153};
3154
3139static __net_init int sysctl_route_net_init(struct net *net) 3155static __net_init int sysctl_route_net_init(struct net *net)
3140{ 3156{
3141 struct ctl_table *tbl; 3157 struct ctl_table *tbl;
@@ -3287,7 +3303,7 @@ int __init ip_rt_init(void)
3287 */ 3303 */
3288void __init ip_static_sysctl_init(void) 3304void __init ip_static_sysctl_init(void)
3289{ 3305{
3290 register_sysctl_paths(ipv4_route_path, ipv4_route_table); 3306 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3291} 3307}
3292#endif 3308#endif
3293 3309
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e2d3b7580b76..7b6a584b62dd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1688 .fc_dst_len = plen, 1688 .fc_dst_len = plen,
1689 .fc_flags = RTF_UP | flags, 1689 .fc_flags = RTF_UP | flags,
1690 .fc_nlinfo.nl_net = dev_net(dev), 1690 .fc_nlinfo.nl_net = dev_net(dev),
1691 .fc_protocol = RTPROT_KERNEL,
1691 }; 1692 };
1692 1693
1693 ipv6_addr_copy(&cfg.fc_dst, pfx); 1694 ipv6_addr_copy(&cfg.fc_dst, pfx);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index abedf95fdf2d..b3157a0cc15d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = {
91 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 91 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
92}; 92};
93 93
94static __inline__ int icmpv6_xmit_lock(struct sock *sk) 94static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
95{ 95{
96 struct sock *sk;
97
96 local_bh_disable(); 98 local_bh_disable();
97 99
100 sk = icmpv6_sk(net);
98 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 101 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
99 /* This can happen if the output path (f.e. SIT or 102 /* This can happen if the output path (f.e. SIT or
100 * ip6ip6 tunnel) signals dst_link_failure() for an 103 * ip6ip6 tunnel) signals dst_link_failure() for an
101 * outgoing ICMP6 packet. 104 * outgoing ICMP6 packet.
102 */ 105 */
103 local_bh_enable(); 106 local_bh_enable();
104 return 1; 107 return NULL;
105 } 108 }
106 return 0; 109 return sk;
107} 110}
108 111
109static __inline__ void icmpv6_xmit_unlock(struct sock *sk) 112static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
@@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
392 fl.fl_icmp_code = code; 395 fl.fl_icmp_code = code;
393 security_skb_classify_flow(skb, &fl); 396 security_skb_classify_flow(skb, &fl);
394 397
395 sk = icmpv6_sk(net); 398 sk = icmpv6_xmit_lock(net);
396 np = inet6_sk(sk); 399 if (sk == NULL)
397
398 if (icmpv6_xmit_lock(sk))
399 return; 400 return;
401 np = inet6_sk(sk);
400 402
401 if (!icmpv6_xrlim_allow(sk, type, &fl)) 403 if (!icmpv6_xrlim_allow(sk, type, &fl))
402 goto out; 404 goto out;
@@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
539 fl.fl_icmp_type = ICMPV6_ECHO_REPLY; 541 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
540 security_skb_classify_flow(skb, &fl); 542 security_skb_classify_flow(skb, &fl);
541 543
542 sk = icmpv6_sk(net); 544 sk = icmpv6_xmit_lock(net);
543 np = inet6_sk(sk); 545 if (sk == NULL)
544
545 if (icmpv6_xmit_lock(sk))
546 return; 546 return;
547 np = inet6_sk(sk);
547 548
548 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 549 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
549 fl.oif = np->mcast_oif; 550 fl.oif = np->mcast_oif;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index e6dfaeac6be3..587f8f60c489 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base;
156int ipv6_static_sysctl_register(void) 156int ipv6_static_sysctl_register(void)
157{ 157{
158 static struct ctl_table empty[1]; 158 static struct ctl_table empty[1];
159 ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty); 159 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
160 if (ip6_base == NULL) 160 if (ip6_base == NULL)
161 return -ENOMEM; 161 return -ENOMEM;
162 return 0; 162 return 0;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ef0efeca6352..e7fb9e0d21b4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
199 return NULL; 199 return NULL;
200} 200}
201 201
202/*
203 * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
204 * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
205 */
206static DEFINE_SPINLOCK(qdisc_list_lock);
207
208static void qdisc_list_add(struct Qdisc *q)
209{
210 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
211 spin_lock_bh(&qdisc_list_lock);
212 list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
213 spin_unlock_bh(&qdisc_list_lock);
214 }
215}
216
217void qdisc_list_del(struct Qdisc *q)
218{
219 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
220 spin_lock_bh(&qdisc_list_lock);
221 list_del(&q->list);
222 spin_unlock_bh(&qdisc_list_lock);
223 }
224}
225EXPORT_SYMBOL(qdisc_list_del);
226
202struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 227struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
203{ 228{
204 unsigned int i; 229 unsigned int i;
230 struct Qdisc *q;
231
232 spin_lock_bh(&qdisc_list_lock);
205 233
206 for (i = 0; i < dev->num_tx_queues; i++) { 234 for (i = 0; i < dev->num_tx_queues; i++) {
207 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 235 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
208 struct Qdisc *q, *txq_root = txq->qdisc_sleeping; 236 struct Qdisc *txq_root = txq->qdisc_sleeping;
209 237
210 q = qdisc_match_from_root(txq_root, handle); 238 q = qdisc_match_from_root(txq_root, handle);
211 if (q) 239 if (q)
212 return q; 240 goto unlock;
213 } 241 }
214 return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); 242
243 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
244
245unlock:
246 spin_unlock_bh(&qdisc_list_lock);
247
248 return q;
215} 249}
216 250
217static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 251static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -444,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
444{ 478{
445 ktime_t time; 479 ktime_t time;
446 480
481 if (test_bit(__QDISC_STATE_DEACTIVATED,
482 &qdisc_root_sleeping(wd->qdisc)->state))
483 return;
484
447 wd->qdisc->flags |= TCQ_F_THROTTLED; 485 wd->qdisc->flags |= TCQ_F_THROTTLED;
448 time = ktime_set(0, 0); 486 time = ktime_set(0, 0);
449 time = ktime_add_ns(time, PSCHED_US2NS(expires)); 487 time = ktime_add_ns(time, PSCHED_US2NS(expires));
@@ -806,8 +844,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
806 goto err_out3; 844 goto err_out3;
807 } 845 }
808 } 846 }
809 if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) 847
810 list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); 848 qdisc_list_add(sch);
811 849
812 return sch; 850 return sch;
813 } 851 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 47ef492c4ff4..8fa90d68ec6d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl)
521 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); 521 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
522 psched_tdiff_t delay = cl->undertime - q->now; 522 psched_tdiff_t delay = cl->undertime - q->now;
523 523
524 if (test_bit(__QDISC_STATE_DEACTIVATED,
525 &qdisc_root_sleeping(cl->qdisc)->state))
526 return;
527
524 if (!cl->delayed) { 528 if (!cl->delayed) {
525 psched_time_t sched = q->now; 529 psched_time_t sched = q->now;
526 ktime_t expires; 530 ktime_t expires;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c3ed4d44fc14..5f0ade7806a7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc)
526 !atomic_dec_and_test(&qdisc->refcnt)) 526 !atomic_dec_and_test(&qdisc->refcnt))
527 return; 527 return;
528 528
529 if (qdisc->parent)
530 list_del(&qdisc->list);
531
532#ifdef CONFIG_NET_SCHED 529#ifdef CONFIG_NET_SCHED
530 qdisc_list_del(qdisc);
531
533 qdisc_put_stab(qdisc->stab); 532 qdisc_put_stab(qdisc->stab);
534#endif 533#endif
535 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 534 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 675a5c3e68a6..1fcb4cf2f4c9 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
80{ 80{
81 struct sctp_auth_bytes *key; 81 struct sctp_auth_bytes *key;
82 82
83 /* Verify that we are not going to overflow INT_MAX */
84 if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
85 return NULL;
86
83 /* Allocate the shared key */ 87 /* Allocate the shared key */
84 key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); 88 key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
85 if (!key) 89 if (!key)
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e39a0cdef184..4c8d9f45ce09 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
103 103
104 /* Initialize the CHUNKS parameter */ 104 /* Initialize the CHUNKS parameter */
105 auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; 105 auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
106 auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t));
106 107
107 /* If the Add-IP functionality is enabled, we must 108 /* If the Add-IP functionality is enabled, we must
108 * authenticate, ASCONF and ASCONF-ACK chunks 109 * authenticate, ASCONF and ASCONF-ACK chunks
@@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
110 if (sctp_addip_enable) { 111 if (sctp_addip_enable) {
111 auth_chunks->chunks[0] = SCTP_CID_ASCONF; 112 auth_chunks->chunks[0] = SCTP_CID_ASCONF;
112 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; 113 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
113 auth_chunks->param_hdr.length = 114 auth_chunks->param_hdr.length += htons(2);
114 htons(sizeof(sctp_paramhdr_t) + 2);
115 } 115 }
116 } 116 }
117 117
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index dbb79adf8f3c..afa952e726d7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
3055{ 3055{
3056 struct sctp_authchunk val; 3056 struct sctp_authchunk val;
3057 3057
3058 if (!sctp_auth_enable)
3059 return -EACCES;
3060
3058 if (optlen != sizeof(struct sctp_authchunk)) 3061 if (optlen != sizeof(struct sctp_authchunk))
3059 return -EINVAL; 3062 return -EINVAL;
3060 if (copy_from_user(&val, optval, optlen)) 3063 if (copy_from_user(&val, optval, optlen))
@@ -3085,6 +3088,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3085 struct sctp_hmacalgo *hmacs; 3088 struct sctp_hmacalgo *hmacs;
3086 int err; 3089 int err;
3087 3090
3091 if (!sctp_auth_enable)
3092 return -EACCES;
3093
3088 if (optlen < sizeof(struct sctp_hmacalgo)) 3094 if (optlen < sizeof(struct sctp_hmacalgo))
3089 return -EINVAL; 3095 return -EINVAL;
3090 3096
@@ -3123,6 +3129,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3123 struct sctp_association *asoc; 3129 struct sctp_association *asoc;
3124 int ret; 3130 int ret;
3125 3131
3132 if (!sctp_auth_enable)
3133 return -EACCES;
3134
3126 if (optlen <= sizeof(struct sctp_authkey)) 3135 if (optlen <= sizeof(struct sctp_authkey))
3127 return -EINVAL; 3136 return -EINVAL;
3128 3137
@@ -3135,6 +3144,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3135 goto out; 3144 goto out;
3136 } 3145 }
3137 3146
3147 if (authkey->sca_keylength > optlen) {
3148 ret = -EINVAL;
3149 goto out;
3150 }
3151
3138 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); 3152 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
3139 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { 3153 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
3140 ret = -EINVAL; 3154 ret = -EINVAL;
@@ -3160,6 +3174,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
3160 struct sctp_authkeyid val; 3174 struct sctp_authkeyid val;
3161 struct sctp_association *asoc; 3175 struct sctp_association *asoc;
3162 3176
3177 if (!sctp_auth_enable)
3178 return -EACCES;
3179
3163 if (optlen != sizeof(struct sctp_authkeyid)) 3180 if (optlen != sizeof(struct sctp_authkeyid))
3164 return -EINVAL; 3181 return -EINVAL;
3165 if (copy_from_user(&val, optval, optlen)) 3182 if (copy_from_user(&val, optval, optlen))
@@ -3185,6 +3202,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3185 struct sctp_authkeyid val; 3202 struct sctp_authkeyid val;
3186 struct sctp_association *asoc; 3203 struct sctp_association *asoc;
3187 3204
3205 if (!sctp_auth_enable)
3206 return -EACCES;
3207
3188 if (optlen != sizeof(struct sctp_authkeyid)) 3208 if (optlen != sizeof(struct sctp_authkeyid))
3189 return -EINVAL; 3209 return -EINVAL;
3190 if (copy_from_user(&val, optval, optlen)) 3210 if (copy_from_user(&val, optval, optlen))
@@ -5197,19 +5217,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5197static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5217static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5198 char __user *optval, int __user *optlen) 5218 char __user *optval, int __user *optlen)
5199{ 5219{
5220 struct sctp_hmacalgo __user *p = (void __user *)optval;
5200 struct sctp_hmac_algo_param *hmacs; 5221 struct sctp_hmac_algo_param *hmacs;
5201 __u16 param_len; 5222 __u16 data_len = 0;
5223 u32 num_idents;
5224
5225 if (!sctp_auth_enable)
5226 return -EACCES;
5202 5227
5203 hmacs = sctp_sk(sk)->ep->auth_hmacs_list; 5228 hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
5204 param_len = ntohs(hmacs->param_hdr.length); 5229 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
5205 5230
5206 if (len < param_len) 5231 if (len < sizeof(struct sctp_hmacalgo) + data_len)
5207 return -EINVAL; 5232 return -EINVAL;
5233
5234 len = sizeof(struct sctp_hmacalgo) + data_len;
5235 num_idents = data_len / sizeof(u16);
5236
5208 if (put_user(len, optlen)) 5237 if (put_user(len, optlen))
5209 return -EFAULT; 5238 return -EFAULT;
5210 if (copy_to_user(optval, hmacs->hmac_ids, len)) 5239 if (put_user(num_idents, &p->shmac_num_idents))
5240 return -EFAULT;
5241 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
5211 return -EFAULT; 5242 return -EFAULT;
5212
5213 return 0; 5243 return 0;
5214} 5244}
5215 5245
@@ -5219,6 +5249,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
5219 struct sctp_authkeyid val; 5249 struct sctp_authkeyid val;
5220 struct sctp_association *asoc; 5250 struct sctp_association *asoc;
5221 5251
5252 if (!sctp_auth_enable)
5253 return -EACCES;
5254
5222 if (len < sizeof(struct sctp_authkeyid)) 5255 if (len < sizeof(struct sctp_authkeyid))
5223 return -EINVAL; 5256 return -EINVAL;
5224 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 5257 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
@@ -5233,6 +5266,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
5233 else 5266 else
5234 val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; 5267 val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
5235 5268
5269 len = sizeof(struct sctp_authkeyid);
5270 if (put_user(len, optlen))
5271 return -EFAULT;
5272 if (copy_to_user(optval, &val, len))
5273 return -EFAULT;
5274
5236 return 0; 5275 return 0;
5237} 5276}
5238 5277
@@ -5243,13 +5282,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5243 struct sctp_authchunks val; 5282 struct sctp_authchunks val;
5244 struct sctp_association *asoc; 5283 struct sctp_association *asoc;
5245 struct sctp_chunks_param *ch; 5284 struct sctp_chunks_param *ch;
5246 u32 num_chunks; 5285 u32 num_chunks = 0;
5247 char __user *to; 5286 char __user *to;
5248 5287
5249 if (len <= sizeof(struct sctp_authchunks)) 5288 if (!sctp_auth_enable)
5289 return -EACCES;
5290
5291 if (len < sizeof(struct sctp_authchunks))
5250 return -EINVAL; 5292 return -EINVAL;
5251 5293
5252 if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) 5294 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5253 return -EFAULT; 5295 return -EFAULT;
5254 5296
5255 to = p->gauth_chunks; 5297 to = p->gauth_chunks;
@@ -5258,20 +5300,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5258 return -EINVAL; 5300 return -EINVAL;
5259 5301
5260 ch = asoc->peer.peer_chunks; 5302 ch = asoc->peer.peer_chunks;
5303 if (!ch)
5304 goto num;
5261 5305
5262 /* See if the user provided enough room for all the data */ 5306 /* See if the user provided enough room for all the data */
5263 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5307 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5264 if (len < num_chunks) 5308 if (len < num_chunks)
5265 return -EINVAL; 5309 return -EINVAL;
5266 5310
5267 len = num_chunks; 5311 if (copy_to_user(to, ch->chunks, num_chunks))
5268 if (put_user(len, optlen))
5269 return -EFAULT; 5312 return -EFAULT;
5313num:
5314 len = sizeof(struct sctp_authchunks) + num_chunks;
5315 if (put_user(len, optlen)) return -EFAULT;
5270 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5316 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5271 return -EFAULT; 5317 return -EFAULT;
5272 if (copy_to_user(to, ch->chunks, len))
5273 return -EFAULT;
5274
5275 return 0; 5318 return 0;
5276} 5319}
5277 5320
@@ -5282,13 +5325,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5282 struct sctp_authchunks val; 5325 struct sctp_authchunks val;
5283 struct sctp_association *asoc; 5326 struct sctp_association *asoc;
5284 struct sctp_chunks_param *ch; 5327 struct sctp_chunks_param *ch;
5285 u32 num_chunks; 5328 u32 num_chunks = 0;
5286 char __user *to; 5329 char __user *to;
5287 5330
5288 if (len <= sizeof(struct sctp_authchunks)) 5331 if (!sctp_auth_enable)
5332 return -EACCES;
5333
5334 if (len < sizeof(struct sctp_authchunks))
5289 return -EINVAL; 5335 return -EINVAL;
5290 5336
5291 if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) 5337 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5292 return -EFAULT; 5338 return -EFAULT;
5293 5339
5294 to = p->gauth_chunks; 5340 to = p->gauth_chunks;
@@ -5301,17 +5347,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5301 else 5347 else
5302 ch = sctp_sk(sk)->ep->auth_chunk_list; 5348 ch = sctp_sk(sk)->ep->auth_chunk_list;
5303 5349
5350 if (!ch)
5351 goto num;
5352
5304 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); 5353 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5305 if (len < num_chunks) 5354 if (len < sizeof(struct sctp_authchunks) + num_chunks)
5306 return -EINVAL; 5355 return -EINVAL;
5307 5356
5308 len = num_chunks; 5357 if (copy_to_user(to, ch->chunks, num_chunks))
5358 return -EFAULT;
5359num:
5360 len = sizeof(struct sctp_authchunks) + num_chunks;
5309 if (put_user(len, optlen)) 5361 if (put_user(len, optlen))
5310 return -EFAULT; 5362 return -EFAULT;
5311 if (put_user(num_chunks, &p->gauth_number_of_chunks)) 5363 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5312 return -EFAULT; 5364 return -EFAULT;
5313 if (copy_to_user(to, ch->chunks, len))
5314 return -EFAULT;
5315 5365
5316 return 0; 5366 return 0;
5317} 5367}