diff options
38 files changed, 176 insertions, 143 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 6ecc600c1bcc..3ec20cc18b0c 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
| @@ -307,7 +307,7 @@ poll_some_more: | |||
| 307 | } | 307 | } |
| 308 | spin_unlock_irq(&ep->rx_lock); | 308 | spin_unlock_irq(&ep->rx_lock); |
| 309 | 309 | ||
| 310 | if (more && netif_rx_reschedule(dev, napi)) | 310 | if (more && netif_rx_reschedule(napi)) |
| 311 | goto poll_some_more; | 311 | goto poll_some_more; |
| 312 | } | 312 | } |
| 313 | 313 | ||
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 26af411fc428..5fce1d5c1a1a 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c | |||
| @@ -504,7 +504,7 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
| 504 | netif_rx_complete(napi); | 504 | netif_rx_complete(napi); |
| 505 | qmgr_enable_irq(rxq); | 505 | qmgr_enable_irq(rxq); |
| 506 | if (!qmgr_stat_empty(rxq) && | 506 | if (!qmgr_stat_empty(rxq) && |
| 507 | netif_rx_reschedule(dev, napi)) { | 507 | netif_rx_reschedule(napi)) { |
| 508 | #if DEBUG_RX | 508 | #if DEBUG_RX |
| 509 | printk(KERN_DEBUG "%s: eth_poll" | 509 | printk(KERN_DEBUG "%s: eth_poll" |
| 510 | " netif_rx_reschedule successed\n", | 510 | " netif_rx_reschedule successed\n", |
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 592daee9dc28..9ad22d1b00fd 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
| 30 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
| 31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
| 32 | #include <linux/delay.h> | ||
| 33 | #include <linux/io.h> | 32 | #include <linux/io.h> |
| 34 | 33 | ||
| 35 | #include <asm/irq.h> | 34 | #include <asm/irq.h> |
diff --git a/drivers/net/eexpress.h b/drivers/net/eexpress.h index 707df3fcfe40..dc9c6ea289e9 100644 --- a/drivers/net/eexpress.h +++ b/drivers/net/eexpress.h | |||
| @@ -68,17 +68,17 @@ | |||
| 68 | */ | 68 | */ |
| 69 | 69 | ||
| 70 | /* these functions take the SCB status word and test the relevant status bit */ | 70 | /* these functions take the SCB status word and test the relevant status bit */ |
| 71 | #define SCB_complete(s) ((s&0x8000)!=0) | 71 | #define SCB_complete(s) (((s) & 0x8000) != 0) |
| 72 | #define SCB_rxdframe(s) ((s&0x4000)!=0) | 72 | #define SCB_rxdframe(s) (((s) & 0x4000) != 0) |
| 73 | #define SCB_CUdead(s) ((s&0x2000)!=0) | 73 | #define SCB_CUdead(s) (((s) & 0x2000) != 0) |
| 74 | #define SCB_RUdead(s) ((s&0x1000)!=0) | 74 | #define SCB_RUdead(s) (((s) & 0x1000) != 0) |
| 75 | #define SCB_ack(s) (s & 0xf000) | 75 | #define SCB_ack(s) ((s) & 0xf000) |
| 76 | 76 | ||
| 77 | /* Command unit status: 0=idle, 1=suspended, 2=active */ | 77 | /* Command unit status: 0=idle, 1=suspended, 2=active */ |
| 78 | #define SCB_CUstat(s) ((s&0x0300)>>8) | 78 | #define SCB_CUstat(s) (((s)&0x0300)>>8) |
| 79 | 79 | ||
| 80 | /* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */ | 80 | /* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */ |
| 81 | #define SCB_RUstat(s) ((s&0x0070)>>4) | 81 | #define SCB_RUstat(s) (((s)&0x0070)>>4) |
| 82 | 82 | ||
| 83 | /* SCB commands */ | 83 | /* SCB commands */ |
| 84 | #define SCB_CUnop 0x0000 | 84 | #define SCB_CUnop 0x0000 |
| @@ -98,18 +98,18 @@ | |||
| 98 | * Command block defines | 98 | * Command block defines |
| 99 | */ | 99 | */ |
| 100 | 100 | ||
| 101 | #define Stat_Done(s) ((s&0x8000)!=0) | 101 | #define Stat_Done(s) (((s) & 0x8000) != 0) |
| 102 | #define Stat_Busy(s) ((s&0x4000)!=0) | 102 | #define Stat_Busy(s) (((s) & 0x4000) != 0) |
| 103 | #define Stat_OK(s) ((s&0x2000)!=0) | 103 | #define Stat_OK(s) (((s) & 0x2000) != 0) |
| 104 | #define Stat_Abort(s) ((s&0x1000)!=0) | 104 | #define Stat_Abort(s) (((s) & 0x1000) != 0) |
| 105 | #define Stat_STFail ((s&0x0800)!=0) | 105 | #define Stat_STFail (((s) & 0x0800) != 0) |
| 106 | #define Stat_TNoCar(s) ((s&0x0400)!=0) | 106 | #define Stat_TNoCar(s) (((s) & 0x0400) != 0) |
| 107 | #define Stat_TNoCTS(s) ((s&0x0200)!=0) | 107 | #define Stat_TNoCTS(s) (((s) & 0x0200) != 0) |
| 108 | #define Stat_TNoDMA(s) ((s&0x0100)!=0) | 108 | #define Stat_TNoDMA(s) (((s) & 0x0100) != 0) |
| 109 | #define Stat_TDefer(s) ((s&0x0080)!=0) | 109 | #define Stat_TDefer(s) (((s) & 0x0080) != 0) |
| 110 | #define Stat_TColl(s) ((s&0x0040)!=0) | 110 | #define Stat_TColl(s) (((s) & 0x0040) != 0) |
| 111 | #define Stat_TXColl(s) ((s&0x0020)!=0) | 111 | #define Stat_TXColl(s) (((s) & 0x0020) != 0) |
| 112 | #define Stat_NoColl(s) (s&0x000f) | 112 | #define Stat_NoColl(s) ((s) & 0x000f) |
| 113 | 113 | ||
| 114 | /* Cmd_END will end AFTER the command if this is the first | 114 | /* Cmd_END will end AFTER the command if this is the first |
| 115 | * command block after an SCB_CUstart, but BEFORE the command | 115 | * command block after an SCB_CUstart, but BEFORE the command |
| @@ -136,16 +136,16 @@ | |||
| 136 | * Frame Descriptor (Receive block) defines | 136 | * Frame Descriptor (Receive block) defines |
| 137 | */ | 137 | */ |
| 138 | 138 | ||
| 139 | #define FD_Done(s) ((s&0x8000)!=0) | 139 | #define FD_Done(s) (((s) & 0x8000) != 0) |
| 140 | #define FD_Busy(s) ((s&0x4000)!=0) | 140 | #define FD_Busy(s) (((s) & 0x4000) != 0) |
| 141 | #define FD_OK(s) ((s&0x2000)!=0) | 141 | #define FD_OK(s) (((s) & 0x2000) != 0) |
| 142 | 142 | ||
| 143 | #define FD_CRC(s) ((s&0x0800)!=0) | 143 | #define FD_CRC(s) (((s) & 0x0800) != 0) |
| 144 | #define FD_Align(s) ((s&0x0400)!=0) | 144 | #define FD_Align(s) (((s) & 0x0400) != 0) |
| 145 | #define FD_Resrc(s) ((s&0x0200)!=0) | 145 | #define FD_Resrc(s) (((s) & 0x0200) != 0) |
| 146 | #define FD_DMA(s) ((s&0x0100)!=0) | 146 | #define FD_DMA(s) (((s) & 0x0100) != 0) |
| 147 | #define FD_Short(s) ((s&0x0080)!=0) | 147 | #define FD_Short(s) (((s) & 0x0080) != 0) |
| 148 | #define FD_NoEOF(s) ((s&0x0040)!=0) | 148 | #define FD_NoEOF(s) (((s) & 0x0040) != 0) |
| 149 | 149 | ||
| 150 | struct rfd_header { | 150 | struct rfd_header { |
| 151 | volatile unsigned long flags; | 151 | volatile unsigned long flags; |
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index c1c05852a95e..eda72dd2120f 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c | |||
| @@ -169,13 +169,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
| 169 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | 169 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { |
| 170 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", | 170 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", |
| 171 | mdev->profile.prof[i].tx_ring_num, i); | 171 | mdev->profile.prof[i].tx_ring_num, i); |
| 172 | if (!mdev->profile.prof[i].rx_ring_num) { | 172 | mdev->profile.prof[i].rx_ring_num = |
| 173 | mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors; | 173 | min_t(int, dev->caps.num_comp_vectors, MAX_RX_RINGS); |
| 174 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | 174 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", |
| 175 | mdev->profile.prof[i].rx_ring_num, i); | 175 | mdev->profile.prof[i].rx_ring_num, i); |
| 176 | } else | ||
| 177 | mlx4_info(mdev, "Using %d rx rings for port:%d\n", | ||
| 178 | mdev->profile.prof[i].rx_ring_num, i); | ||
| 179 | } | 176 | } |
| 180 | 177 | ||
| 181 | /* Create our own workqueue for reset/multicast tasks | 178 | /* Create our own workqueue for reset/multicast tasks |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index ebada3c7aff2..15bb38d99304 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
| @@ -552,7 +552,7 @@ static void mlx4_en_linkstate(struct work_struct *work) | |||
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | 554 | ||
| 555 | static int mlx4_en_start_port(struct net_device *dev) | 555 | int mlx4_en_start_port(struct net_device *dev) |
| 556 | { | 556 | { |
| 557 | struct mlx4_en_priv *priv = netdev_priv(dev); | 557 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 558 | struct mlx4_en_dev *mdev = priv->mdev; | 558 | struct mlx4_en_dev *mdev = priv->mdev; |
| @@ -707,7 +707,7 @@ cq_err: | |||
| 707 | } | 707 | } |
| 708 | 708 | ||
| 709 | 709 | ||
| 710 | static void mlx4_en_stop_port(struct net_device *dev) | 710 | void mlx4_en_stop_port(struct net_device *dev) |
| 711 | { | 711 | { |
| 712 | struct mlx4_en_priv *priv = netdev_priv(dev); | 712 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 713 | struct mlx4_en_dev *mdev = priv->mdev; | 713 | struct mlx4_en_dev *mdev = priv->mdev; |
| @@ -826,7 +826,7 @@ static int mlx4_en_close(struct net_device *dev) | |||
| 826 | return 0; | 826 | return 0; |
| 827 | } | 827 | } |
| 828 | 828 | ||
| 829 | static void mlx4_en_free_resources(struct mlx4_en_priv *priv) | 829 | void mlx4_en_free_resources(struct mlx4_en_priv *priv) |
| 830 | { | 830 | { |
| 831 | int i; | 831 | int i; |
| 832 | 832 | ||
| @@ -845,7 +845,7 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |||
| 845 | } | 845 | } |
| 846 | } | 846 | } |
| 847 | 847 | ||
| 848 | static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | 848 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) |
| 849 | { | 849 | { |
| 850 | struct mlx4_en_dev *mdev = priv->mdev; | 850 | struct mlx4_en_dev *mdev = priv->mdev; |
| 851 | struct mlx4_en_port_profile *prof = priv->prof; | 851 | struct mlx4_en_port_profile *prof = priv->prof; |
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c index 047b37f5a747..cfeef0f1bacc 100644 --- a/drivers/net/mlx4/en_params.c +++ b/drivers/net/mlx4/en_params.c | |||
| @@ -65,15 +65,6 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." | |||
| 65 | MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." | 65 | MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." |
| 66 | " Per priority bit mask"); | 66 | " Per priority bit mask"); |
| 67 | 67 | ||
| 68 | MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)"); | ||
| 69 | MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)"); | ||
| 70 | |||
| 71 | MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1"); | ||
| 72 | MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2"); | ||
| 73 | MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1"); | ||
| 74 | MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2"); | ||
| 75 | |||
| 76 | |||
| 77 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | 68 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev) |
| 78 | { | 69 | { |
| 79 | struct mlx4_en_profile *params = &mdev->profile; | 70 | struct mlx4_en_profile *params = &mdev->profile; |
| @@ -87,6 +78,8 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | |||
| 87 | params->prof[i].rx_ppp = pfcrx; | 78 | params->prof[i].rx_ppp = pfcrx; |
| 88 | params->prof[i].tx_pause = 1; | 79 | params->prof[i].tx_pause = 1; |
| 89 | params->prof[i].tx_ppp = pfctx; | 80 | params->prof[i].tx_ppp = pfctx; |
| 81 | params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; | ||
| 82 | params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; | ||
| 90 | } | 83 | } |
| 91 | if (pfcrx || pfctx) { | 84 | if (pfcrx || pfctx) { |
| 92 | params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; | 85 | params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; |
| @@ -95,32 +88,7 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | |||
| 95 | params->prof[1].tx_ring_num = 1; | 88 | params->prof[1].tx_ring_num = 1; |
| 96 | params->prof[2].tx_ring_num = 1; | 89 | params->prof[2].tx_ring_num = 1; |
| 97 | } | 90 | } |
| 98 | params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS); | 91 | |
| 99 | params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS); | ||
| 100 | |||
| 101 | if (tx_ring_size1 == MLX4_EN_AUTO_CONF) | ||
| 102 | tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE; | ||
| 103 | params->prof[1].tx_ring_size = | ||
| 104 | (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ? | ||
| 105 | MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1); | ||
| 106 | |||
| 107 | if (tx_ring_size2 == MLX4_EN_AUTO_CONF) | ||
| 108 | tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE; | ||
| 109 | params->prof[2].tx_ring_size = | ||
| 110 | (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ? | ||
| 111 | MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2); | ||
| 112 | |||
| 113 | if (rx_ring_size1 == MLX4_EN_AUTO_CONF) | ||
| 114 | rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE; | ||
| 115 | params->prof[1].rx_ring_size = | ||
| 116 | (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ? | ||
| 117 | MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1); | ||
| 118 | |||
| 119 | if (rx_ring_size2 == MLX4_EN_AUTO_CONF) | ||
| 120 | rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE; | ||
| 121 | params->prof[2].rx_ring_size = | ||
| 122 | (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ? | ||
| 123 | MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2); | ||
| 124 | return 0; | 92 | return 0; |
| 125 | } | 93 | } |
| 126 | 94 | ||
| @@ -417,6 +385,54 @@ static void mlx4_en_get_pauseparam(struct net_device *dev, | |||
| 417 | pause->rx_pause = priv->prof->rx_pause; | 385 | pause->rx_pause = priv->prof->rx_pause; |
| 418 | } | 386 | } |
| 419 | 387 | ||
| 388 | static int mlx4_en_set_ringparam(struct net_device *dev, | ||
| 389 | struct ethtool_ringparam *param) | ||
| 390 | { | ||
| 391 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 392 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 393 | u32 rx_size, tx_size; | ||
| 394 | int port_up = 0; | ||
| 395 | int err = 0; | ||
| 396 | |||
| 397 | if (param->rx_jumbo_pending || param->rx_mini_pending) | ||
| 398 | return -EINVAL; | ||
| 399 | |||
| 400 | rx_size = roundup_pow_of_two(param->rx_pending); | ||
| 401 | rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); | ||
| 402 | tx_size = roundup_pow_of_two(param->tx_pending); | ||
| 403 | tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); | ||
| 404 | |||
| 405 | if (rx_size == priv->prof->rx_ring_size && | ||
| 406 | tx_size == priv->prof->tx_ring_size) | ||
| 407 | return 0; | ||
| 408 | |||
| 409 | mutex_lock(&mdev->state_lock); | ||
| 410 | if (priv->port_up) { | ||
| 411 | port_up = 1; | ||
| 412 | mlx4_en_stop_port(dev); | ||
| 413 | } | ||
| 414 | |||
| 415 | mlx4_en_free_resources(priv); | ||
| 416 | |||
| 417 | priv->prof->tx_ring_size = tx_size; | ||
| 418 | priv->prof->rx_ring_size = rx_size; | ||
| 419 | |||
| 420 | err = mlx4_en_alloc_resources(priv); | ||
| 421 | if (err) { | ||
| 422 | mlx4_err(mdev, "Failed reallocating port resources\n"); | ||
| 423 | goto out; | ||
| 424 | } | ||
| 425 | if (port_up) { | ||
| 426 | err = mlx4_en_start_port(dev); | ||
| 427 | if (err) | ||
| 428 | mlx4_err(mdev, "Failed starting port\n"); | ||
| 429 | } | ||
| 430 | |||
| 431 | out: | ||
| 432 | mutex_unlock(&mdev->state_lock); | ||
| 433 | return err; | ||
| 434 | } | ||
| 435 | |||
| 420 | static void mlx4_en_get_ringparam(struct net_device *dev, | 436 | static void mlx4_en_get_ringparam(struct net_device *dev, |
| 421 | struct ethtool_ringparam *param) | 437 | struct ethtool_ringparam *param) |
| 422 | { | 438 | { |
| @@ -456,6 +472,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { | |||
| 456 | .get_pauseparam = mlx4_en_get_pauseparam, | 472 | .get_pauseparam = mlx4_en_get_pauseparam, |
| 457 | .set_pauseparam = mlx4_en_set_pauseparam, | 473 | .set_pauseparam = mlx4_en_set_pauseparam, |
| 458 | .get_ringparam = mlx4_en_get_ringparam, | 474 | .get_ringparam = mlx4_en_get_ringparam, |
| 475 | .set_ringparam = mlx4_en_set_ringparam, | ||
| 459 | .get_flags = ethtool_op_get_flags, | 476 | .get_flags = ethtool_op_get_flags, |
| 460 | .set_flags = ethtool_op_set_flags, | 477 | .set_flags = ethtool_op_set_flags, |
| 461 | }; | 478 | }; |
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index e78209768def..2e96c7b2180a 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
| @@ -489,6 +489,12 @@ void mlx4_en_destroy_netdev(struct net_device *dev); | |||
| 489 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | 489 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
| 490 | struct mlx4_en_port_profile *prof); | 490 | struct mlx4_en_port_profile *prof); |
| 491 | 491 | ||
| 492 | int mlx4_en_start_port(struct net_device *dev); | ||
| 493 | void mlx4_en_stop_port(struct net_device *dev); | ||
| 494 | |||
| 495 | void mlx4_en_free_resources(struct mlx4_en_priv *priv); | ||
| 496 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); | ||
| 497 | |||
| 492 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev); | 498 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev); |
| 493 | 499 | ||
| 494 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | 500 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index dcd199045613..5b7a574ce571 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
| @@ -954,7 +954,6 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | |||
| 954 | { | 954 | { |
| 955 | const struct pasemi_mac_rxring *rxring = data; | 955 | const struct pasemi_mac_rxring *rxring = data; |
| 956 | struct pasemi_mac *mac = rxring->mac; | 956 | struct pasemi_mac *mac = rxring->mac; |
| 957 | struct net_device *dev = mac->netdev; | ||
| 958 | const struct pasemi_dmachan *chan = &rxring->chan; | 957 | const struct pasemi_dmachan *chan = &rxring->chan; |
| 959 | unsigned int reg; | 958 | unsigned int reg; |
| 960 | 959 | ||
| @@ -1634,7 +1633,6 @@ static void pasemi_mac_set_rx_mode(struct net_device *dev) | |||
| 1634 | static int pasemi_mac_poll(struct napi_struct *napi, int budget) | 1633 | static int pasemi_mac_poll(struct napi_struct *napi, int budget) |
| 1635 | { | 1634 | { |
| 1636 | struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); | 1635 | struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); |
| 1637 | struct net_device *dev = mac->netdev; | ||
| 1638 | int pkts; | 1636 | int pkts; |
| 1639 | 1637 | ||
| 1640 | pasemi_mac_clean_tx(tx_ring(mac)); | 1638 | pasemi_mac_clean_tx(tx_ring(mac)); |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 5e989d884ddd..dc3f1108884d 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
| @@ -1484,13 +1484,13 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) | |||
| 1484 | } | 1484 | } |
| 1485 | 1485 | ||
| 1486 | if (likely(intsts & inten & INT_STS_RSFL_)) { | 1486 | if (likely(intsts & inten & INT_STS_RSFL_)) { |
| 1487 | if (likely(netif_rx_schedule_prep(dev, &pdata->napi))) { | 1487 | if (likely(netif_rx_schedule_prep(&pdata->napi))) { |
| 1488 | /* Disable Rx interrupts */ | 1488 | /* Disable Rx interrupts */ |
| 1489 | temp = smsc911x_reg_read(pdata, INT_EN); | 1489 | temp = smsc911x_reg_read(pdata, INT_EN); |
| 1490 | temp &= (~INT_EN_RSFL_EN_); | 1490 | temp &= (~INT_EN_RSFL_EN_); |
| 1491 | smsc911x_reg_write(pdata, INT_EN, temp); | 1491 | smsc911x_reg_write(pdata, INT_EN, temp); |
| 1492 | /* Schedule a NAPI poll */ | 1492 | /* Schedule a NAPI poll */ |
| 1493 | __netif_rx_schedule(dev, &pdata->napi); | 1493 | __netif_rx_schedule(&pdata->napi); |
| 1494 | } else { | 1494 | } else { |
| 1495 | SMSC_WARNING(RX_ERR, | 1495 | SMSC_WARNING(RX_ERR, |
| 1496 | "netif_rx_schedule_prep failed"); | 1496 | "netif_rx_schedule_prep failed"); |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index c5c123d3af57..88d2c67788df 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
| @@ -1277,7 +1277,6 @@ bad_desc: | |||
| 1277 | static int spider_net_poll(struct napi_struct *napi, int budget) | 1277 | static int spider_net_poll(struct napi_struct *napi, int budget) |
| 1278 | { | 1278 | { |
| 1279 | struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); | 1279 | struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); |
| 1280 | struct net_device *netdev = card->netdev; | ||
| 1281 | int packets_done = 0; | 1280 | int packets_done = 0; |
| 1282 | 1281 | ||
| 1283 | while (packets_done < budget) { | 1282 | while (packets_done < budget) { |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 666c1d98cdaf..69f9a0ec764d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -343,7 +343,7 @@ static void tun_net_init(struct net_device *dev) | |||
| 343 | break; | 343 | break; |
| 344 | 344 | ||
| 345 | case TUN_TAP_DEV: | 345 | case TUN_TAP_DEV: |
| 346 | dev->netdev_ops = &tun_netdev_ops; | 346 | dev->netdev_ops = &tap_netdev_ops; |
| 347 | /* Ethernet TAP Device */ | 347 | /* Ethernet TAP Device */ |
| 348 | ether_setup(dev); | 348 | ether_setup(dev); |
| 349 | 349 | ||
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 198ce3cf378a..9f7896a25f1b 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
| @@ -2831,7 +2831,7 @@ static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, | |||
| 2831 | for (i = 0; i < iface->desc.bNumEndpoints; i++) { | 2831 | for (i = 0; i < iface->desc.bNumEndpoints; i++) { |
| 2832 | endp = &iface->endpoint[i].desc; | 2832 | endp = &iface->endpoint[i].desc; |
| 2833 | if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) && | 2833 | if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) && |
| 2834 | ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == type)) | 2834 | (usb_endpoint_type(endp) == type)) |
| 2835 | return endp; | 2835 | return endp; |
| 2836 | } | 2836 | } |
| 2837 | 2837 | ||
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 0c6802507a79..2dc241689d37 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c | |||
| @@ -654,7 +654,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget) | |||
| 654 | netif_rx_complete(dev, napi); | 654 | netif_rx_complete(dev, napi); |
| 655 | qmgr_enable_irq(rxq); | 655 | qmgr_enable_irq(rxq); |
| 656 | if (!qmgr_stat_empty(rxq) && | 656 | if (!qmgr_stat_empty(rxq) && |
| 657 | netif_rx_reschedule(dev, napi)) { | 657 | netif_rx_reschedule(napi)) { |
| 658 | #if DEBUG_RX | 658 | #if DEBUG_RX |
| 659 | printk(KERN_DEBUG "%s: hss_hdlc_poll" | 659 | printk(KERN_DEBUG "%s: hss_hdlc_poll" |
| 660 | " netif_rx_reschedule succeeded\n", | 660 | " netif_rx_reschedule succeeded\n", |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 04c139666965..b5db57d2fcf5 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
| @@ -1065,8 +1065,7 @@ static int eject_installer(struct usb_interface *intf) | |||
| 1065 | /* Find bulk out endpoint */ | 1065 | /* Find bulk out endpoint */ |
| 1066 | endpoint = &iface_desc->endpoint[1].desc; | 1066 | endpoint = &iface_desc->endpoint[1].desc; |
| 1067 | if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT && | 1067 | if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT && |
| 1068 | (endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == | 1068 | usb_endpoint_xfer_bulk(endpoint)) { |
| 1069 | USB_ENDPOINT_XFER_BULK) { | ||
| 1070 | bulk_out_ep = endpoint->bEndpointAddress; | 1069 | bulk_out_ep = endpoint->bEndpointAddress; |
| 1071 | } else { | 1070 | } else { |
| 1072 | dev_err(&udev->dev, | 1071 | dev_err(&udev->dev, |
diff --git a/net/core/dev.c b/net/core/dev.c index 446424027d24..09c66a449da6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -5066,13 +5066,14 @@ static struct pernet_operations __net_initdata netdev_net_ops = { | |||
| 5066 | 5066 | ||
| 5067 | static void __net_exit default_device_exit(struct net *net) | 5067 | static void __net_exit default_device_exit(struct net *net) |
| 5068 | { | 5068 | { |
| 5069 | struct net_device *dev, *next; | 5069 | struct net_device *dev; |
| 5070 | /* | 5070 | /* |
| 5071 | * Push all migratable of the network devices back to the | 5071 | * Push all migratable of the network devices back to the |
| 5072 | * initial network namespace | 5072 | * initial network namespace |
| 5073 | */ | 5073 | */ |
| 5074 | rtnl_lock(); | 5074 | rtnl_lock(); |
| 5075 | for_each_netdev_safe(net, dev, next) { | 5075 | restart: |
| 5076 | for_each_netdev(net, dev) { | ||
| 5076 | int err; | 5077 | int err; |
| 5077 | char fb_name[IFNAMSIZ]; | 5078 | char fb_name[IFNAMSIZ]; |
| 5078 | 5079 | ||
| @@ -5083,7 +5084,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
| 5083 | /* Delete virtual devices */ | 5084 | /* Delete virtual devices */ |
| 5084 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { | 5085 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { |
| 5085 | dev->rtnl_link_ops->dellink(dev); | 5086 | dev->rtnl_link_ops->dellink(dev); |
| 5086 | continue; | 5087 | goto restart; |
| 5087 | } | 5088 | } |
| 5088 | 5089 | ||
| 5089 | /* Push remaing network devices to init_net */ | 5090 | /* Push remaing network devices to init_net */ |
| @@ -5094,6 +5095,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
| 5094 | __func__, dev->name, err); | 5095 | __func__, dev->name, err); |
| 5095 | BUG(); | 5096 | BUG(); |
| 5096 | } | 5097 | } |
| 5098 | goto restart; | ||
| 5097 | } | 5099 | } |
| 5098 | rtnl_unlock(); | 5100 | rtnl_unlock(); |
| 5099 | } | 5101 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9c3717a23cf7..f66c58df8953 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -2414,7 +2414,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 2414 | if (*pos == 0) | 2414 | if (*pos == 0) |
| 2415 | return SEQ_START_TOKEN; | 2415 | return SEQ_START_TOKEN; |
| 2416 | 2416 | ||
| 2417 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 2417 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 2418 | if (!cpu_possible(cpu)) | 2418 | if (!cpu_possible(cpu)) |
| 2419 | continue; | 2419 | continue; |
| 2420 | *pos = cpu+1; | 2420 | *pos = cpu+1; |
| @@ -2429,7 +2429,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 2429 | struct neigh_table *tbl = pde->data; | 2429 | struct neigh_table *tbl = pde->data; |
| 2430 | int cpu; | 2430 | int cpu; |
| 2431 | 2431 | ||
| 2432 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 2432 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 2433 | if (!cpu_possible(cpu)) | 2433 | if (!cpu_possible(cpu)) |
| 2434 | continue; | 2434 | continue; |
| 2435 | *pos = cpu+1; | 2435 | *pos = cpu+1; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index d5c2bacb713c..1747ccae8e8d 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
| @@ -964,7 +964,6 @@ adjudge_to_death: | |||
| 964 | state = sk->sk_state; | 964 | state = sk->sk_state; |
| 965 | sock_hold(sk); | 965 | sock_hold(sk); |
| 966 | sock_orphan(sk); | 966 | sock_orphan(sk); |
| 967 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 968 | 967 | ||
| 969 | /* | 968 | /* |
| 970 | * It is the last release_sock in its life. It will remove backlog. | 969 | * It is the last release_sock in its life. It will remove backlog. |
| @@ -978,6 +977,8 @@ adjudge_to_death: | |||
| 978 | bh_lock_sock(sk); | 977 | bh_lock_sock(sk); |
| 979 | WARN_ON(sock_owned_by_user(sk)); | 978 | WARN_ON(sock_owned_by_user(sk)); |
| 980 | 979 | ||
| 980 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 981 | |||
| 981 | /* Have we already been destroyed by a softirq or backlog? */ | 982 | /* Have we already been destroyed by a softirq or backlog? */ |
| 982 | if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) | 983 | if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) |
| 983 | goto out; | 984 | goto out; |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c7cda1ca8e65..f26ab38680de 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -633,8 +633,6 @@ void inet_csk_listen_stop(struct sock *sk) | |||
| 633 | 633 | ||
| 634 | acc_req = req->dl_next; | 634 | acc_req = req->dl_next; |
| 635 | 635 | ||
| 636 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 637 | |||
| 638 | local_bh_disable(); | 636 | local_bh_disable(); |
| 639 | bh_lock_sock(child); | 637 | bh_lock_sock(child); |
| 640 | WARN_ON(sock_owned_by_user(child)); | 638 | WARN_ON(sock_owned_by_user(child)); |
| @@ -644,6 +642,8 @@ void inet_csk_listen_stop(struct sock *sk) | |||
| 644 | 642 | ||
| 645 | sock_orphan(child); | 643 | sock_orphan(child); |
| 646 | 644 | ||
| 645 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 646 | |||
| 647 | inet_csk_destroy_sock(child); | 647 | inet_csk_destroy_sock(child); |
| 648 | 648 | ||
| 649 | bh_unlock_sock(child); | 649 | bh_unlock_sock(child); |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 313ebf00ee36..6ba5c557690c 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
| @@ -291,7 +291,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 291 | if (*pos == 0) | 291 | if (*pos == 0) |
| 292 | return SEQ_START_TOKEN; | 292 | return SEQ_START_TOKEN; |
| 293 | 293 | ||
| 294 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 294 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 295 | if (!cpu_possible(cpu)) | 295 | if (!cpu_possible(cpu)) |
| 296 | continue; | 296 | continue; |
| 297 | *pos = cpu+1; | 297 | *pos = cpu+1; |
| @@ -306,7 +306,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 306 | struct net *net = seq_file_net(seq); | 306 | struct net *net = seq_file_net(seq); |
| 307 | int cpu; | 307 | int cpu; |
| 308 | 308 | ||
| 309 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 309 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 310 | if (!cpu_possible(cpu)) | 310 | if (!cpu_possible(cpu)) |
| 311 | continue; | 311 | continue; |
| 312 | *pos = cpu+1; | 312 | *pos = cpu+1; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 614958b7c276..eb62e58bff79 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <net/tcp.h> | 38 | #include <net/tcp.h> |
| 39 | #include <net/udp.h> | 39 | #include <net/udp.h> |
| 40 | #include <net/udplite.h> | 40 | #include <net/udplite.h> |
| 41 | #include <linux/bottom_half.h> | ||
| 41 | #include <linux/inetdevice.h> | 42 | #include <linux/inetdevice.h> |
| 42 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
| 43 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
| @@ -50,13 +51,17 @@ | |||
| 50 | static int sockstat_seq_show(struct seq_file *seq, void *v) | 51 | static int sockstat_seq_show(struct seq_file *seq, void *v) |
| 51 | { | 52 | { |
| 52 | struct net *net = seq->private; | 53 | struct net *net = seq->private; |
| 54 | int orphans, sockets; | ||
| 55 | |||
| 56 | local_bh_disable(); | ||
| 57 | orphans = percpu_counter_sum_positive(&tcp_orphan_count), | ||
| 58 | sockets = percpu_counter_sum_positive(&tcp_sockets_allocated), | ||
| 59 | local_bh_enable(); | ||
| 53 | 60 | ||
| 54 | socket_seq_show(seq); | 61 | socket_seq_show(seq); |
| 55 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", | 62 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", |
| 56 | sock_prot_inuse_get(net, &tcp_prot), | 63 | sock_prot_inuse_get(net, &tcp_prot), orphans, |
| 57 | (int)percpu_counter_sum_positive(&tcp_orphan_count), | 64 | tcp_death_row.tw_count, sockets, |
| 58 | tcp_death_row.tw_count, | ||
| 59 | (int)percpu_counter_sum_positive(&tcp_sockets_allocated), | ||
| 60 | atomic_read(&tcp_memory_allocated)); | 65 | atomic_read(&tcp_memory_allocated)); |
| 61 | seq_printf(seq, "UDP: inuse %d mem %d\n", | 66 | seq_printf(seq, "UDP: inuse %d mem %d\n", |
| 62 | sock_prot_inuse_get(net, &udp_prot), | 67 | sock_prot_inuse_get(net, &udp_prot), |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 77bfba975959..97f71153584f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -429,7 +429,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 429 | if (*pos == 0) | 429 | if (*pos == 0) |
| 430 | return SEQ_START_TOKEN; | 430 | return SEQ_START_TOKEN; |
| 431 | 431 | ||
| 432 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 432 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 433 | if (!cpu_possible(cpu)) | 433 | if (!cpu_possible(cpu)) |
| 434 | continue; | 434 | continue; |
| 435 | *pos = cpu+1; | 435 | *pos = cpu+1; |
| @@ -442,7 +442,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 442 | { | 442 | { |
| 443 | int cpu; | 443 | int cpu; |
| 444 | 444 | ||
| 445 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 445 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 446 | if (!cpu_possible(cpu)) | 446 | if (!cpu_possible(cpu)) |
| 447 | continue; | 447 | continue; |
| 448 | *pos = cpu+1; | 448 | *pos = cpu+1; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1f3d52946b3b..f28acf11fc67 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1836,7 +1836,6 @@ adjudge_to_death: | |||
| 1836 | state = sk->sk_state; | 1836 | state = sk->sk_state; |
| 1837 | sock_hold(sk); | 1837 | sock_hold(sk); |
| 1838 | sock_orphan(sk); | 1838 | sock_orphan(sk); |
| 1839 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 1840 | 1839 | ||
| 1841 | /* It is the last release_sock in its life. It will remove backlog. */ | 1840 | /* It is the last release_sock in its life. It will remove backlog. */ |
| 1842 | release_sock(sk); | 1841 | release_sock(sk); |
| @@ -1849,6 +1848,8 @@ adjudge_to_death: | |||
| 1849 | bh_lock_sock(sk); | 1848 | bh_lock_sock(sk); |
| 1850 | WARN_ON(sock_owned_by_user(sk)); | 1849 | WARN_ON(sock_owned_by_user(sk)); |
| 1851 | 1850 | ||
| 1851 | percpu_counter_inc(sk->sk_prot->orphan_count); | ||
| 1852 | |||
| 1852 | /* Have we already been destroyed by a softirq or backlog? */ | 1853 | /* Have we already been destroyed by a softirq or backlog? */ |
| 1853 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) | 1854 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
| 1854 | goto out; | 1855 | goto out; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 10172487921b..9d839fa9331e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | */ | 51 | */ |
| 52 | 52 | ||
| 53 | 53 | ||
| 54 | #include <linux/bottom_half.h> | ||
| 54 | #include <linux/types.h> | 55 | #include <linux/types.h> |
| 55 | #include <linux/fcntl.h> | 56 | #include <linux/fcntl.h> |
| 56 | #include <linux/module.h> | 57 | #include <linux/module.h> |
| @@ -1797,7 +1798,9 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
| 1797 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1798 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
| 1798 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1799 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
| 1799 | 1800 | ||
| 1801 | local_bh_disable(); | ||
| 1800 | percpu_counter_inc(&tcp_sockets_allocated); | 1802 | percpu_counter_inc(&tcp_sockets_allocated); |
| 1803 | local_bh_enable(); | ||
| 1801 | 1804 | ||
| 1802 | return 0; | 1805 | return 0; |
| 1803 | } | 1806 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8702b06cb60a..e8b8337a8310 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | * 2 of the License, or (at your option) any later version. | 23 | * 2 of the License, or (at your option) any later version. |
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/bottom_half.h> | ||
| 26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 27 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
| 28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
| @@ -1830,7 +1831,9 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
| 1830 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1831 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
| 1831 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1832 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
| 1832 | 1833 | ||
| 1834 | local_bh_disable(); | ||
| 1833 | percpu_counter_inc(&tcp_sockets_allocated); | 1835 | percpu_counter_inc(&tcp_sockets_allocated); |
| 1836 | local_bh_enable(); | ||
| 1834 | 1837 | ||
| 1835 | return 0; | 1838 | return 0; |
| 1836 | } | 1839 | } |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 9394f539966a..3eb5e2660c49 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
| @@ -507,7 +507,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 507 | /* No cache entry or it is invalid, time to schedule */ | 507 | /* No cache entry or it is invalid, time to schedule */ |
| 508 | dest = __ip_vs_lblc_schedule(svc); | 508 | dest = __ip_vs_lblc_schedule(svc); |
| 509 | if (!dest) { | 509 | if (!dest) { |
| 510 | IP_VS_DBG(1, "no destination available\n"); | 510 | IP_VS_ERR_RL("LBLC: no destination available\n"); |
| 511 | return NULL; | 511 | return NULL; |
| 512 | } | 512 | } |
| 513 | 513 | ||
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 92dc76a6842c..c04ce56c7f0f 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
| @@ -690,7 +690,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 690 | /* The cache entry is invalid, time to schedule */ | 690 | /* The cache entry is invalid, time to schedule */ |
| 691 | dest = __ip_vs_lblcr_schedule(svc); | 691 | dest = __ip_vs_lblcr_schedule(svc); |
| 692 | if (!dest) { | 692 | if (!dest) { |
| 693 | IP_VS_DBG(1, "no destination available\n"); | 693 | IP_VS_ERR_RL("LBLCR: no destination available\n"); |
| 694 | read_unlock(&svc->sched_lock); | 694 | read_unlock(&svc->sched_lock); |
| 695 | return NULL; | 695 | return NULL; |
| 696 | } | 696 | } |
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c index 51912cab777b..d0dadc8a65fd 100644 --- a/net/netfilter/ipvs/ip_vs_lc.c +++ b/net/netfilter/ipvs/ip_vs_lc.c | |||
| @@ -66,11 +66,15 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 66 | } | 66 | } |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | if (least) | 69 | if (!least) |
| 70 | IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n", | 70 | IP_VS_ERR_RL("LC: no destination available\n"); |
| 71 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), | 71 | else |
| 72 | atomic_read(&least->activeconns), | 72 | IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d " |
| 73 | atomic_read(&least->inactconns)); | 73 | "inactconns %d\n", |
| 74 | IP_VS_DBG_ADDR(svc->af, &least->addr), | ||
| 75 | ntohs(least->port), | ||
| 76 | atomic_read(&least->activeconns), | ||
| 77 | atomic_read(&least->inactconns)); | ||
| 74 | 78 | ||
| 75 | return least; | 79 | return least; |
| 76 | } | 80 | } |
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index 6758ad2ceaaf..694952db5026 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c | |||
| @@ -95,8 +95,10 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 95 | } | 95 | } |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | if (!least) | 98 | if (!least) { |
| 99 | IP_VS_ERR_RL("NQ: no destination available\n"); | ||
| 99 | return NULL; | 100 | return NULL; |
| 101 | } | ||
| 100 | 102 | ||
| 101 | out: | 103 | out: |
| 102 | IP_VS_DBG_BUF(6, "NQ: server %s:%u " | 104 | IP_VS_DBG_BUF(6, "NQ: server %s:%u " |
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index 8fb51c169eb8..2d16ab7f8c1e 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c | |||
| @@ -69,6 +69,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 69 | q = q->next; | 69 | q = q->next; |
| 70 | } while (q != p); | 70 | } while (q != p); |
| 71 | write_unlock(&svc->sched_lock); | 71 | write_unlock(&svc->sched_lock); |
| 72 | IP_VS_ERR_RL("RR: no destination available\n"); | ||
| 72 | return NULL; | 73 | return NULL; |
| 73 | 74 | ||
| 74 | out: | 75 | out: |
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index 691a6a0086e1..20e4657d2f3b 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c | |||
| @@ -84,6 +84,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 84 | goto nextstage; | 84 | goto nextstage; |
| 85 | } | 85 | } |
| 86 | } | 86 | } |
| 87 | IP_VS_ERR_RL("SED: no destination available\n"); | ||
| 87 | return NULL; | 88 | return NULL; |
| 88 | 89 | ||
| 89 | /* | 90 | /* |
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 0e53955ef139..75709ebeb630 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c | |||
| @@ -219,6 +219,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 219 | || !(dest->flags & IP_VS_DEST_F_AVAILABLE) | 219 | || !(dest->flags & IP_VS_DEST_F_AVAILABLE) |
| 220 | || atomic_read(&dest->weight) <= 0 | 220 | || atomic_read(&dest->weight) <= 0 |
| 221 | || is_overloaded(dest)) { | 221 | || is_overloaded(dest)) { |
| 222 | IP_VS_ERR_RL("SH: no destination available\n"); | ||
| 222 | return NULL; | 223 | return NULL; |
| 223 | } | 224 | } |
| 224 | 225 | ||
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index 57b452bbb4ea..8e942565b47d 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c | |||
| @@ -72,6 +72,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 72 | goto nextstage; | 72 | goto nextstage; |
| 73 | } | 73 | } |
| 74 | } | 74 | } |
| 75 | IP_VS_ERR_RL("WLC: no destination available\n"); | ||
| 75 | return NULL; | 76 | return NULL; |
| 76 | 77 | ||
| 77 | /* | 78 | /* |
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 2f618dc29c5b..f7d74ef1ecf9 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c | |||
| @@ -155,6 +155,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 155 | 155 | ||
| 156 | if (mark->cl == mark->cl->next) { | 156 | if (mark->cl == mark->cl->next) { |
| 157 | /* no dest entry */ | 157 | /* no dest entry */ |
| 158 | IP_VS_ERR_RL("WRR: no destination available: " | ||
| 159 | "no destinations present\n"); | ||
| 158 | dest = NULL; | 160 | dest = NULL; |
| 159 | goto out; | 161 | goto out; |
| 160 | } | 162 | } |
| @@ -168,8 +170,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 168 | */ | 170 | */ |
| 169 | if (mark->cw == 0) { | 171 | if (mark->cw == 0) { |
| 170 | mark->cl = &svc->destinations; | 172 | mark->cl = &svc->destinations; |
| 171 | IP_VS_ERR_RL("ip_vs_wrr_schedule(): " | 173 | IP_VS_ERR_RL("WRR: no destination " |
| 172 | "no available servers\n"); | 174 | "available\n"); |
| 173 | dest = NULL; | 175 | dest = NULL; |
| 174 | goto out; | 176 | goto out; |
| 175 | } | 177 | } |
| @@ -191,6 +193,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
| 191 | /* back to the start, and no dest is found. | 193 | /* back to the start, and no dest is found. |
| 192 | It is only possible when all dests are OVERLOADED */ | 194 | It is only possible when all dests are OVERLOADED */ |
| 193 | dest = NULL; | 195 | dest = NULL; |
| 196 | IP_VS_ERR_RL("WRR: no destination available: " | ||
| 197 | "all destinations are overloaded\n"); | ||
| 194 | goto out; | 198 | goto out; |
| 195 | } | 199 | } |
| 196 | } | 200 | } |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index f37b9b74c6a8..4da54b0b9233 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
| @@ -200,7 +200,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 200 | if (*pos == 0) | 200 | if (*pos == 0) |
| 201 | return SEQ_START_TOKEN; | 201 | return SEQ_START_TOKEN; |
| 202 | 202 | ||
| 203 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | 203 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
| 204 | if (!cpu_possible(cpu)) | 204 | if (!cpu_possible(cpu)) |
| 205 | continue; | 205 | continue; |
| 206 | *pos = cpu + 1; | 206 | *pos = cpu + 1; |
| @@ -215,7 +215,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 215 | struct net *net = seq_file_net(seq); | 215 | struct net *net = seq_file_net(seq); |
| 216 | int cpu; | 216 | int cpu; |
| 217 | 217 | ||
| 218 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | 218 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| 219 | if (!cpu_possible(cpu)) | 219 | if (!cpu_possible(cpu)) |
| 220 | continue; | 220 | continue; |
| 221 | *pos = cpu + 1; | 221 | *pos = cpu + 1; |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 4f7ef0db302b..929218a47620 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
| @@ -335,9 +335,6 @@ config NET_CLS_CGROUP | |||
| 335 | Say Y here if you want to classify packets based on the control | 335 | Say Y here if you want to classify packets based on the control |
| 336 | cgroup of their process. | 336 | cgroup of their process. |
| 337 | 337 | ||
| 338 | To compile this code as a module, choose M here: the | ||
| 339 | module will be called cls_cgroup. | ||
| 340 | |||
| 341 | config NET_EMATCH | 338 | config NET_EMATCH |
| 342 | bool "Extended Matches" | 339 | bool "Extended Matches" |
| 343 | select NET_CLS | 340 | select NET_CLS |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 0d68b1975983..91a3db4a76f8 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
| @@ -24,10 +24,16 @@ struct cgroup_cls_state | |||
| 24 | u32 classid; | 24 | u32 classid; |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp) | 27 | static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) |
| 28 | { | 28 | { |
| 29 | return (struct cgroup_cls_state *) | 29 | return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), |
| 30 | cgroup_subsys_state(cgrp, net_cls_subsys_id); | 30 | struct cgroup_cls_state, css); |
| 31 | } | ||
| 32 | |||
| 33 | static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) | ||
| 34 | { | ||
| 35 | return container_of(task_subsys_state(p, net_cls_subsys_id), | ||
| 36 | struct cgroup_cls_state, css); | ||
| 31 | } | 37 | } |
| 32 | 38 | ||
| 33 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 39 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, |
| @@ -39,19 +45,19 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | |||
| 39 | return ERR_PTR(-ENOMEM); | 45 | return ERR_PTR(-ENOMEM); |
| 40 | 46 | ||
| 41 | if (cgrp->parent) | 47 | if (cgrp->parent) |
| 42 | cs->classid = net_cls_state(cgrp->parent)->classid; | 48 | cs->classid = cgrp_cls_state(cgrp->parent)->classid; |
| 43 | 49 | ||
| 44 | return &cs->css; | 50 | return &cs->css; |
| 45 | } | 51 | } |
| 46 | 52 | ||
| 47 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | 53 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
| 48 | { | 54 | { |
| 49 | kfree(ss); | 55 | kfree(cgrp_cls_state(cgrp)); |
| 50 | } | 56 | } |
| 51 | 57 | ||
| 52 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) | 58 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) |
| 53 | { | 59 | { |
| 54 | return net_cls_state(cgrp)->classid; | 60 | return cgrp_cls_state(cgrp)->classid; |
| 55 | } | 61 | } |
| 56 | 62 | ||
| 57 | static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) | 63 | static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) |
| @@ -59,7 +65,7 @@ static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) | |||
| 59 | if (!cgroup_lock_live_group(cgrp)) | 65 | if (!cgroup_lock_live_group(cgrp)) |
| 60 | return -ENODEV; | 66 | return -ENODEV; |
| 61 | 67 | ||
| 62 | net_cls_state(cgrp)->classid = (u32) value; | 68 | cgrp_cls_state(cgrp)->classid = (u32) value; |
| 63 | 69 | ||
| 64 | cgroup_unlock(); | 70 | cgroup_unlock(); |
| 65 | 71 | ||
| @@ -115,8 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
| 115 | return -1; | 121 | return -1; |
| 116 | 122 | ||
| 117 | rcu_read_lock(); | 123 | rcu_read_lock(); |
| 118 | cs = (struct cgroup_cls_state *) task_subsys_state(current, | 124 | cs = task_cls_state(current); |
| 119 | net_cls_subsys_id); | ||
| 120 | if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { | 125 | if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { |
| 121 | res->classid = cs->classid; | 126 | res->classid = cs->classid; |
| 122 | res->class = 0; | 127 | res->class = 0; |
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c index 284eaef1dbf2..a2adb51849a9 100644 --- a/net/xfrm/xfrm_proc.c +++ b/net/xfrm/xfrm_proc.c | |||
| @@ -44,27 +44,14 @@ static struct snmp_mib xfrm_mib_list[] = { | |||
| 44 | SNMP_MIB_SENTINEL | 44 | SNMP_MIB_SENTINEL |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | static unsigned long | ||
| 48 | fold_field(void *mib[], int offt) | ||
| 49 | { | ||
| 50 | unsigned long res = 0; | ||
| 51 | int i; | ||
| 52 | |||
| 53 | for_each_possible_cpu(i) { | ||
| 54 | res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); | ||
| 55 | res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); | ||
| 56 | } | ||
| 57 | return res; | ||
| 58 | } | ||
| 59 | |||
| 60 | static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) | 47 | static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) |
| 61 | { | 48 | { |
| 62 | struct net *net = seq->private; | 49 | struct net *net = seq->private; |
| 63 | int i; | 50 | int i; |
| 64 | for (i=0; xfrm_mib_list[i].name; i++) | 51 | for (i=0; xfrm_mib_list[i].name; i++) |
| 65 | seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, | 52 | seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, |
| 66 | fold_field((void **)net->mib.xfrm_statistics, | 53 | snmp_fold_field((void **)net->mib.xfrm_statistics, |
| 67 | xfrm_mib_list[i].entry)); | 54 | xfrm_mib_list[i].entry)); |
| 68 | return 0; | 55 | return 0; |
| 69 | } | 56 | } |
| 70 | 57 | ||
