aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c9
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c24
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/usb/mcs7830.c25
-rw-r--r--include/linux/mlx4/device.h6
-rw-r--r--include/net/cipso_ipv4.h29
-rw-r--r--lib/dynamic_queue_limits.c18
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv6/tcp_ipv6.c9
21 files changed, 201 insertions, 84 deletions
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 97f947b3d94a..2933d08b036e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
437 length = status & BCOM_FEC_RX_BD_LEN_MASK; 437 length = status & BCOM_FEC_RX_BD_LEN_MASK;
438 skb_put(rskb, length - 4); /* length without CRC32 */ 438 skb_put(rskb, length - 4); /* length without CRC32 */
439 rskb->protocol = eth_type_trans(rskb, dev); 439 rskb->protocol = eth_type_trans(rskb, dev);
440 if (!skb_defer_rx_timestamp(skb)) 440 if (!skb_defer_rx_timestamp(rskb))
441 netif_rx(rskb); 441 netif_rx(rskb);
442 442
443 spin_lock(&priv->lock); 443 spin_lock(&priv->lock);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 95731c841044..7483ca0a6282 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4080 spin_lock_irqsave(&adapter->stats_lock, 4080 spin_lock_irqsave(&adapter->stats_lock,
4081 irq_flags); 4081 irq_flags);
4082 e1000_tbi_adjust_stats(hw, &adapter->stats, 4082 e1000_tbi_adjust_stats(hw, &adapter->stats,
4083 length, skb->data); 4083 length, mapped);
4084 spin_unlock_irqrestore(&adapter->stats_lock, 4084 spin_unlock_irqrestore(&adapter->stats_lock,
4085 irq_flags); 4085 irq_flags);
4086 length--; 4086 length--;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index bbf70ba367da..238ab2f8a5e7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -165,14 +165,14 @@
165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ 165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
166 166
167/* Intel Rapid Start Technology Support */ 167/* Intel Rapid Start Technology Support */
168#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) 168#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) 170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
171#define I217_SxCTRL_MASK 0x1000 171#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
172#define I217_CGFREG PHY_REG(772, 29) 172#define I217_CGFREG PHY_REG(772, 29)
173#define I217_CGFREG_MASK 0x0002 173#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
174#define I217_MEMPWR PHY_REG(772, 26) 174#define I217_MEMPWR PHY_REG(772, 26)
175#define I217_MEMPWR_MASK 0x0010 175#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
176 176
177/* Strapping Option Register - RO */ 177/* Strapping Option Register - RO */
178#define E1000_STRAP 0x0000C 178#define E1000_STRAP 0x0000C
@@ -4089,12 +4089,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4089 * power good. 4089 * power good.
4090 */ 4090 */
4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); 4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4092 phy_reg |= I217_SxCTRL_MASK; 4092 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); 4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4094 4094
4095 /* Disable the SMB release on LCD reset. */ 4095 /* Disable the SMB release on LCD reset. */
4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4097 phy_reg &= ~I217_MEMPWR; 4097 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4099 } 4099 }
4100 4100
@@ -4103,7 +4103,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4103 * Support 4103 * Support
4104 */ 4104 */
4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4106 phy_reg |= I217_CGFREG_MASK; 4106 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4108 4108
4109release: 4109release:
@@ -4176,7 +4176,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4177 if (ret_val) 4177 if (ret_val)
4178 goto release; 4178 goto release;
4179 phy_reg |= I217_MEMPWR_MASK; 4179 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4181 4181
4182 /* Disable Proxy */ 4182 /* Disable Proxy */
@@ -4186,7 +4186,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4187 if (ret_val) 4187 if (ret_val)
4188 goto release; 4188 goto release;
4189 phy_reg &= ~I217_CGFREG_MASK; 4189 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4191release: 4191release:
4192 if (ret_val) 4192 if (ret_val)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1bcead1fa2f6..842c8ce9494e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
617 .out_is_imm = false, 617 .out_is_imm = false,
618 .encode_slave_id = false, 618 .encode_slave_id = false,
619 .verify = NULL, 619 .verify = NULL,
620 .wrapper = NULL 620 .wrapper = mlx4_QUERY_FW_wrapper
621 }, 621 },
622 { 622 {
623 .opcode = MLX4_CMD_QUERY_HCA, 623 .opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
635 .out_is_imm = false, 635 .out_is_imm = false,
636 .encode_slave_id = false, 636 .encode_slave_id = false,
637 .verify = NULL, 637 .verify = NULL,
638 .wrapper = NULL 638 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
639 }, 639 },
640 { 640 {
641 .opcode = MLX4_CMD_QUERY_FUNC_CAP, 641 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 988b2424e1c6..69ba57270481 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; 136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
137 struct mlx4_en_priv *priv; 137 struct mlx4_en_priv *priv;
138 138
139 if (!mdev->pndev[port])
140 return;
141
142 priv = netdev_priv(mdev->pndev[port]);
143 switch (event) { 139 switch (event) {
144 case MLX4_DEV_EVENT_PORT_UP: 140 case MLX4_DEV_EVENT_PORT_UP:
145 case MLX4_DEV_EVENT_PORT_DOWN: 141 case MLX4_DEV_EVENT_PORT_DOWN:
142 if (!mdev->pndev[port])
143 return;
144 priv = netdev_priv(mdev->pndev[port]);
146 /* To prevent races, we poll the link state in a separate 145 /* To prevent races, we poll the link state in a separate
147 task rather than changing it here */ 146 task rather than changing it here */
148 priv->link_state = event; 147 priv->link_state = event;
@@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
154 break; 153 break;
155 154
156 default: 155 default:
157 mlx4_warn(mdev, "Unhandled event: %d\n", event); 156 if (port < 1 || port > dev->caps.num_ports ||
157 !mdev->pndev[port])
158 return;
159 mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
158 } 160 }
159} 161}
160 162
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3b6f8efbf141..bce98d9c0039 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
426 426
427 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 427 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
428 428
429 if (flr_slave > dev->num_slaves) { 429 if (flr_slave >= dev->num_slaves) {
430 mlx4_warn(dev, 430 mlx4_warn(dev,
431 "Got FLR for unknown function: %d\n", 431 "Got FLR for unknown function: %d\n",
432 flr_slave); 432 flr_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 68f5cd6cb3c7..9c83bb8151ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
412 outbox = mailbox->buf; 412 outbox = mailbox->buf;
413 413
414 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 414 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
415 MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); 415 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
416 if (err) 416 if (err)
417 goto out; 417 goto out;
418 418
@@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
590 590
591 for (i = 1; i <= dev_cap->num_ports; ++i) { 591 for (i = 1; i <= dev_cap->num_ports; ++i) {
592 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 592 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
593 MLX4_CMD_TIME_CLASS_B, 593 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
594 !mlx4_is_slave(dev));
595 if (err) 594 if (err)
596 goto out; 595 goto out;
597 596
@@ -669,6 +668,28 @@ out:
669 return err; 668 return err;
670} 669}
671 670
671int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
672 struct mlx4_vhcr *vhcr,
673 struct mlx4_cmd_mailbox *inbox,
674 struct mlx4_cmd_mailbox *outbox,
675 struct mlx4_cmd_info *cmd)
676{
677 int err = 0;
678 u8 field;
679
680 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
681 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
682 if (err)
683 return err;
684
685 /* For guests, report Blueflame disabled */
686 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
687 field &= 0x7f;
688 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
689
690 return 0;
691}
692
672int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 693int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
673 struct mlx4_vhcr *vhcr, 694 struct mlx4_vhcr *vhcr,
674 struct mlx4_cmd_mailbox *inbox, 695 struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
860 ((fw_ver & 0xffff0000ull) >> 16) | 881 ((fw_ver & 0xffff0000ull) >> 16) |
861 ((fw_ver & 0x0000ffffull) << 16); 882 ((fw_ver & 0x0000ffffull) << 16);
862 883
884 if (mlx4_is_slave(dev))
885 goto out;
886
863 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 887 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
864 dev->caps.function = lg; 888 dev->caps.function = lg;
865 889
@@ -927,6 +951,27 @@ out:
927 return err; 951 return err;
928} 952}
929 953
954int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
955 struct mlx4_vhcr *vhcr,
956 struct mlx4_cmd_mailbox *inbox,
957 struct mlx4_cmd_mailbox *outbox,
958 struct mlx4_cmd_info *cmd)
959{
960 u8 *outbuf;
961 int err;
962
963 outbuf = outbox->buf;
964 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
965 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
966 if (err)
967 return err;
968
969 /* for slaves, zero out everything except FW version */
970 outbuf[0] = outbuf[1] = 0;
971 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
972 return 0;
973}
974
930static void get_board_id(void *vsd, char *board_id) 975static void get_board_id(void *vsd, char *board_id)
931{ 976{
932 int i; 977 int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2e024a68fa81..ee6f4fe00837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -142,12 +142,6 @@ struct mlx4_port_config {
142 struct pci_dev *pdev; 142 struct pci_dev *pdev;
143}; 143};
144 144
145static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
146{
147 return dev->caps.reserved_eqs +
148 MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
149}
150
151int mlx4_check_port_params(struct mlx4_dev *dev, 145int mlx4_check_port_params(struct mlx4_dev *dev,
152 enum mlx4_port_type *port_type) 146 enum mlx4_port_type *port_type)
153{ 147{
@@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
217 } 211 }
218 212
219 dev->caps.num_ports = dev_cap->num_ports; 213 dev->caps.num_ports = dev_cap->num_ports;
214 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
220 for (i = 1; i <= dev->caps.num_ports; ++i) { 215 for (i = 1; i <= dev->caps.num_ports; ++i) {
221 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 216 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
222 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 217 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
@@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
435 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 430 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
436 431
437 memset(&dev_cap, 0, sizeof(dev_cap)); 432 memset(&dev_cap, 0, sizeof(dev_cap));
433 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
438 err = mlx4_dev_cap(dev, &dev_cap); 434 err = mlx4_dev_cap(dev, &dev_cap);
439 if (err) { 435 if (err) {
440 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 436 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
441 return err; 437 return err;
442 } 438 }
443 439
440 err = mlx4_QUERY_FW(dev);
441 if (err)
442 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
443
444 page_size = ~dev->caps.page_size_cap + 1; 444 page_size = ~dev->caps.page_size_cap + 1;
445 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 445 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
446 if (page_size > PAGE_SIZE) { 446 if (page_size > PAGE_SIZE) {
@@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
485 dev->caps.num_mgms = 0; 485 dev->caps.num_mgms = 0;
486 dev->caps.num_amgms = 0; 486 dev->caps.num_amgms = 0;
487 487
488 for (i = 1; i <= dev->caps.num_ports; ++i)
489 dev->caps.port_mask[i] = dev->caps.port_type[i];
490
491 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 488 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
492 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 489 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
493 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 490 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
494 return -ENODEV; 491 return -ENODEV;
495 } 492 }
496 493
494 for (i = 1; i <= dev->caps.num_ports; ++i)
495 dev->caps.port_mask[i] = dev->caps.port_type[i];
496
497 if (dev->caps.uar_page_size * (dev->caps.num_uars - 497 if (dev->caps.uar_page_size * (dev->caps.num_uars -
498 dev->caps.reserved_uars) > 498 dev->caps.reserved_uars) >
499 pci_resource_len(dev->pdev, 2)) { 499 pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
504 return -ENODEV; 504 return -ENODEV;
505 } 505 }
506 506
507#if 0
508 mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
509 mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
510 dev->caps.num_uars, dev->caps.reserved_uars,
511 dev->caps.uar_page_size * dev->caps.num_uars,
512 pci_resource_len(dev->pdev, 2));
513 mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
514 dev->caps.reserved_eqs);
515 mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
516 dev->caps.num_pds, dev->caps.reserved_pds,
517 dev->caps.slave_pd_shift, dev->caps.pd_base);
518#endif
519 return 0; 507 return 0;
520} 508}
521 509
@@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
810 if (err) 798 if (err)
811 goto err_srq; 799 goto err_srq;
812 800
813 num_eqs = (mlx4_is_master(dev)) ? 801 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
814 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 802 dev->caps.num_eqs;
815 dev->caps.num_eqs;
816 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 803 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
817 cmpt_base + 804 cmpt_base +
818 ((u64) (MLX4_CMPT_TYPE_EQ * 805 ((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
874 } 861 }
875 862
876 863
877 num_eqs = (mlx4_is_master(dev)) ? 864 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
878 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 865 dev->caps.num_eqs;
879 dev->caps.num_eqs;
880 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 866 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
881 init_hca->eqc_base, dev_cap->eqc_entry_sz, 867 init_hca->eqc_base, dev_cap->eqc_entry_sz,
882 num_eqs, num_eqs, 0, 0); 868 num_eqs, num_eqs, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 86b6e5a2fabf..e5d20220762c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1039void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1039void mlx4_free_resource_tracker(struct mlx4_dev *dev,
1040 enum mlx4_res_tracker_free_type type); 1040 enum mlx4_res_tracker_free_type type);
1041 1041
1042int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1043 struct mlx4_vhcr *vhcr,
1044 struct mlx4_cmd_mailbox *inbox,
1045 struct mlx4_cmd_mailbox *outbox,
1046 struct mlx4_cmd_info *cmd);
1042int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 1047int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1043 struct mlx4_vhcr *vhcr, 1048 struct mlx4_vhcr *vhcr,
1044 struct mlx4_cmd_mailbox *inbox, 1049 struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1054 struct mlx4_cmd_mailbox *inbox, 1059 struct mlx4_cmd_mailbox *inbox,
1055 struct mlx4_cmd_mailbox *outbox, 1060 struct mlx4_cmd_mailbox *outbox,
1056 struct mlx4_cmd_info *cmd); 1061 struct mlx4_cmd_info *cmd);
1062int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1063 struct mlx4_vhcr *vhcr,
1064 struct mlx4_cmd_mailbox *inbox,
1065 struct mlx4_cmd_mailbox *outbox,
1066 struct mlx4_cmd_info *cmd);
1057int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1067int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1058 struct mlx4_vhcr *vhcr, 1068 struct mlx4_vhcr *vhcr,
1059 struct mlx4_cmd_mailbox *inbox, 1069 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 06e5adeb76f7..b83bc928d52a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
126 profile[MLX4_RES_AUXC].num = request->num_qp; 126 profile[MLX4_RES_AUXC].num = request->num_qp;
127 profile[MLX4_RES_SRQ].num = request->num_srq; 127 profile[MLX4_RES_SRQ].num = request->num_srq;
128 profile[MLX4_RES_CQ].num = request->num_cq; 128 profile[MLX4_RES_CQ].num = request->num_cq;
129 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 129 profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
130 dev->phys_caps.num_phys_eqs :
131 min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
130 profile[MLX4_RES_DMPT].num = request->num_mpt; 132 profile[MLX4_RES_DMPT].num = request->num_mpt;
131 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 133 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
132 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); 134 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
215 init_hca->log_num_cqs = profile[i].log_num; 217 init_hca->log_num_cqs = profile[i].log_num;
216 break; 218 break;
217 case MLX4_RES_EQ: 219 case MLX4_RES_EQ:
218 dev->caps.num_eqs = profile[i].num; 220 dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
221 MAX_MSIX));
219 init_hca->eqc_base = profile[i].start; 222 init_hca->eqc_base = profile[i].start;
220 init_hca->log_num_eqs = profile[i].log_num; 223 init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
221 break; 224 break;
222 case MLX4_RES_DMPT: 225 case MLX4_RES_DMPT:
223 dev->caps.num_mpts = profile[i].num; 226 dev->caps.num_mpts = profile[i].num;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5eef290997f9..995d0cfc4c06 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
981 981
982 cpw32_f(HiTxRingAddr, 0);
983 cpw32_f(HiTxRingAddr + 4, 0);
984
985 ring_dma = cp->ring_dma;
986 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
987 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
988
989 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
990 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
991 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
992
982 cp_start_hw(cp); 993 cp_start_hw(cp);
983 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ 994 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
984 995
@@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
992 1003
993 cpw8(Config5, cpr8(Config5) & PMEStatus); 1004 cpw8(Config5, cpr8(Config5) & PMEStatus);
994 1005
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1006 cpw16(MultiIntr, 0);
1007 1007
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1636 1636
1637static void eeprom_cmd_end(void __iomem *ee_addr) 1637static void eeprom_cmd_end(void __iomem *ee_addr)
1638{ 1638{
1639 writeb (~EE_CS, ee_addr); 1639 writeb(0, ee_addr);
1640 eeprom_delay (); 1640 eeprom_delay ();
1641} 1641}
1642 1642
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 03df076ed596..1d83565cc6af 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
1173 } 1173 }
1174 1174
1175 /* Terminate the EEPROM access. */ 1175 /* Terminate the EEPROM access. */
1176 RTL_W8 (Cfg9346, ~EE_CS); 1176 RTL_W8(Cfg9346, 0);
1177 eeprom_delay (); 1177 eeprom_delay ();
1178 1178
1179 return retval; 1179 return retval;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 00b4f56a671c..9757ce3543a0 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6345,6 +6345,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
6345 6345
6346 cancel_work_sync(&tp->wk.work); 6346 cancel_work_sync(&tp->wk.work);
6347 6347
6348 netif_napi_del(&tp->napi);
6349
6348 unregister_netdev(dev); 6350 unregister_netdev(dev);
6349 6351
6350 rtl_release_firmware(tp); 6352 rtl_release_firmware(tp);
@@ -6668,6 +6670,7 @@ out:
6668 return rc; 6670 return rc;
6669 6671
6670err_out_msi_4: 6672err_out_msi_4:
6673 netif_napi_del(&tp->napi);
6671 rtl_disable_msi(pdev, tp); 6674 rtl_disable_msi(pdev, tp);
6672 iounmap(ioaddr); 6675 iounmap(ioaddr);
6673err_out_free_res_3: 6676err_out_free_res_3:
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index add1064f755d..03c2d8d653df 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
629 return skb->len > 0; 629 return skb->len > 0;
630} 630}
631 631
632static void mcs7830_status(struct usbnet *dev, struct urb *urb)
633{
634 u8 *buf = urb->transfer_buffer;
635 bool link;
636
637 if (urb->actual_length < 16)
638 return;
639
640 link = !(buf[1] & 0x20);
641 if (netif_carrier_ok(dev->net) != link) {
642 if (link) {
643 netif_carrier_on(dev->net);
644 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
645 } else
646 netif_carrier_off(dev->net);
647 netdev_dbg(dev->net, "Link Status is: %d\n", link);
648 }
649}
650
632static const struct driver_info moschip_info = { 651static const struct driver_info moschip_info = {
633 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", 652 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
634 .bind = mcs7830_bind, 653 .bind = mcs7830_bind,
635 .rx_fixup = mcs7830_rx_fixup, 654 .rx_fixup = mcs7830_rx_fixup,
636 .flags = FLAG_ETHER, 655 .flags = FLAG_ETHER | FLAG_LINK_INTR,
656 .status = mcs7830_status,
637 .in = 1, 657 .in = 1,
638 .out = 2, 658 .out = 2,
639}; 659};
@@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
642 .description = "Sitecom LN-30 usb-NET adapter", 662 .description = "Sitecom LN-30 usb-NET adapter",
643 .bind = mcs7830_bind, 663 .bind = mcs7830_bind,
644 .rx_fixup = mcs7830_rx_fixup, 664 .rx_fixup = mcs7830_rx_fixup,
645 .flags = FLAG_ETHER, 665 .flags = FLAG_ETHER | FLAG_LINK_INTR,
666 .status = mcs7830_status,
646 .in = 1, 667 .in = 1,
647 .out = 2, 668 .out = 2,
648}; 669};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6e27fa99e8b9..6a8f002b8ed3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -64,6 +64,7 @@ enum {
64 MLX4_MAX_NUM_PF = 16, 64 MLX4_MAX_NUM_PF = 16,
65 MLX4_MAX_NUM_VF = 64, 65 MLX4_MAX_NUM_VF = 64,
66 MLX4_MFUNC_MAX = 80, 66 MLX4_MFUNC_MAX = 80,
67 MLX4_MAX_EQ_NUM = 1024,
67 MLX4_MFUNC_EQ_NUM = 4, 68 MLX4_MFUNC_EQ_NUM = 4,
68 MLX4_MFUNC_MAX_EQES = 8, 69 MLX4_MFUNC_MAX_EQES = 8,
69 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) 70 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
@@ -239,6 +240,10 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
239 return (major << 32) | (minor << 16) | subminor; 240 return (major << 32) | (minor << 16) | subminor;
240} 241}
241 242
243struct mlx4_phys_caps {
244 u32 num_phys_eqs;
245};
246
242struct mlx4_caps { 247struct mlx4_caps {
243 u64 fw_ver; 248 u64 fw_ver;
244 u32 function; 249 u32 function;
@@ -499,6 +504,7 @@ struct mlx4_dev {
499 unsigned long flags; 504 unsigned long flags;
500 unsigned long num_slaves; 505 unsigned long num_slaves;
501 struct mlx4_caps caps; 506 struct mlx4_caps caps;
507 struct mlx4_phys_caps phys_caps;
502 struct radix_tree_root qp_table_tree; 508 struct radix_tree_root qp_table_tree;
503 u8 rev_id; 509 u8 rev_id;
504 char board_id[MLX4_BOARD_ID_LEN]; 510 char board_id[MLX4_BOARD_ID_LEN];
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 9808877c2ab9..a7a683e30b64 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -42,6 +42,7 @@
42#include <net/netlabel.h> 42#include <net/netlabel.h>
43#include <net/request_sock.h> 43#include <net/request_sock.h>
44#include <linux/atomic.h> 44#include <linux/atomic.h>
45#include <asm/unaligned.h>
45 46
46/* known doi values */ 47/* known doi values */
47#define CIPSO_V4_DOI_UNKNOWN 0x00000000 48#define CIPSO_V4_DOI_UNKNOWN 0x00000000
@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
285static inline int cipso_v4_validate(const struct sk_buff *skb, 286static inline int cipso_v4_validate(const struct sk_buff *skb,
286 unsigned char **option) 287 unsigned char **option)
287{ 288{
288 return -ENOSYS; 289 unsigned char *opt = *option;
290 unsigned char err_offset = 0;
291 u8 opt_len = opt[1];
292 u8 opt_iter;
293
294 if (opt_len < 8) {
295 err_offset = 1;
296 goto out;
297 }
298
299 if (get_unaligned_be32(&opt[2]) == 0) {
300 err_offset = 2;
301 goto out;
302 }
303
304 for (opt_iter = 6; opt_iter < opt_len;) {
305 if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
306 err_offset = opt_iter + 1;
307 goto out;
308 }
309 opt_iter += opt[opt_iter + 1];
310 }
311
312out:
313 *option = opt + err_offset;
314 return err_offset;
315
289} 316}
290#endif /* CONFIG_NETLABEL */ 317#endif /* CONFIG_NETLABEL */
291 318
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index 6ab4587d052b..0777c5a45fa0 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,23 +10,27 @@
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/dynamic_queue_limits.h> 11#include <linux/dynamic_queue_limits.h>
12 12
13#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) 13#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
14#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
14 15
15/* Records completed count and recalculates the queue limit */ 16/* Records completed count and recalculates the queue limit */
16void dql_completed(struct dql *dql, unsigned int count) 17void dql_completed(struct dql *dql, unsigned int count)
17{ 18{
18 unsigned int inprogress, prev_inprogress, limit; 19 unsigned int inprogress, prev_inprogress, limit;
19 unsigned int ovlimit, all_prev_completed, completed; 20 unsigned int ovlimit, completed, num_queued;
21 bool all_prev_completed;
22
23 num_queued = ACCESS_ONCE(dql->num_queued);
20 24
21 /* Can't complete more than what's in queue */ 25 /* Can't complete more than what's in queue */
22 BUG_ON(count > dql->num_queued - dql->num_completed); 26 BUG_ON(count > num_queued - dql->num_completed);
23 27
24 completed = dql->num_completed + count; 28 completed = dql->num_completed + count;
25 limit = dql->limit; 29 limit = dql->limit;
26 ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); 30 ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
27 inprogress = dql->num_queued - completed; 31 inprogress = num_queued - completed;
28 prev_inprogress = dql->prev_num_queued - dql->num_completed; 32 prev_inprogress = dql->prev_num_queued - dql->num_completed;
29 all_prev_completed = POSDIFF(completed, dql->prev_num_queued); 33 all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
30 34
31 if ((ovlimit && !inprogress) || 35 if ((ovlimit && !inprogress) ||
32 (dql->prev_ovlimit && all_prev_completed)) { 36 (dql->prev_ovlimit && all_prev_completed)) {
@@ -104,7 +108,7 @@ void dql_completed(struct dql *dql, unsigned int count)
104 dql->prev_ovlimit = ovlimit; 108 dql->prev_ovlimit = ovlimit;
105 dql->prev_last_obj_cnt = dql->last_obj_cnt; 109 dql->prev_last_obj_cnt = dql->last_obj_cnt;
106 dql->num_completed = completed; 110 dql->num_completed = completed;
107 dql->prev_num_queued = dql->num_queued; 111 dql->prev_num_queued = num_queued;
108} 112}
109EXPORT_SYMBOL(dql_completed); 113EXPORT_SYMBOL(dql_completed);
110 114
diff --git a/net/core/sock.c b/net/core/sock.c
index 653f8c0aedc5..9e5b71fda6ec 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1592 gfp_t gfp_mask; 1592 gfp_t gfp_mask;
1593 long timeo; 1593 long timeo;
1594 int err; 1594 int err;
1595 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1596
1597 err = -EMSGSIZE;
1598 if (npages > MAX_SKB_FRAGS)
1599 goto failure;
1595 1600
1596 gfp_mask = sk->sk_allocation; 1601 gfp_mask = sk->sk_allocation;
1597 if (gfp_mask & __GFP_WAIT) 1602 if (gfp_mask & __GFP_WAIT)
@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1610 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1615 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1611 skb = alloc_skb(header_len, gfp_mask); 1616 skb = alloc_skb(header_len, gfp_mask);
1612 if (skb) { 1617 if (skb) {
1613 int npages;
1614 int i; 1618 int i;
1615 1619
1616 /* No pages, we're done... */ 1620 /* No pages, we're done... */
1617 if (!data_len) 1621 if (!data_len)
1618 break; 1622 break;
1619 1623
1620 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1621 skb->truesize += data_len; 1624 skb->truesize += data_len;
1622 skb_shinfo(skb)->nr_frags = npages; 1625 skb_shinfo(skb)->nr_frags = npages;
1623 for (i = 0; i < npages; i++) { 1626 for (i = 0; i < npages; i++) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 95e61596e605..f9ee7417f6a0 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
377 377
378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
380 sk->sk_protocol, inet_sk_flowi_flags(sk), 380 sk->sk_protocol,
381 inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
381 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 382 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
382 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 383 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
383 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 384 security_req_classify_flow(req, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a43b87dfe800..c8d28c433b2b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
824 */ 824 */
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req, 826 struct request_sock *req,
827 struct request_values *rvp) 827 struct request_values *rvp,
828 u16 queue_mapping)
828{ 829{
829 const struct inet_request_sock *ireq = inet_rsk(req); 830 const struct inet_request_sock *ireq = inet_rsk(req);
830 struct flowi4 fl4; 831 struct flowi4 fl4;
@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 if (skb) { 841 if (skb) {
841 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 842 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
842 843
844 skb_set_queue_mapping(skb, queue_mapping);
843 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 845 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
844 ireq->rmt_addr, 846 ireq->rmt_addr,
845 ireq->opt); 847 ireq->opt);
@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
854 struct request_values *rvp) 856 struct request_values *rvp)
855{ 857{
856 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
857 return tcp_v4_send_synack(sk, NULL, req, rvp); 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
858} 860}
859 861
860/* 862/*
@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1422 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1424 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1423 1425
1424 if (tcp_v4_send_synack(sk, dst, req, 1426 if (tcp_v4_send_synack(sk, dst, req,
1425 (struct request_values *)&tmp_ext) || 1427 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) ||
1426 want_cookie) 1429 want_cookie)
1427 goto drop_and_free; 1430 goto drop_and_free;
1428 1431
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 554d5999abc4..3a9aec29581a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,8 @@ out:
476 476
477 477
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp) 479 struct request_values *rvp,
480 u16 queue_mapping)
480{ 481{
481 struct inet6_request_sock *treq = inet6_rsk(req); 482 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk); 483 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 515
515 fl6.daddr = treq->rmt_addr; 516 fl6.daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err); 519 err = net_xmit_eval(err);
518 } 520 }
@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp) 530 struct request_values *rvp)
529{ 531{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp); 533 return tcp_v6_send_synack(sk, req, rvp, 0);
532} 534}
533 535
534static void tcp_v6_reqsk_destructor(struct request_sock *req) 536static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1213,7 +1215,8 @@ have_isn:
1213 security_inet_conn_request(sk, skb, req); 1215 security_inet_conn_request(sk, skb, req);
1214 1216
1215 if (tcp_v6_send_synack(sk, req, 1217 if (tcp_v6_send_synack(sk, req,
1216 (struct request_values *)&tmp_ext) || 1218 (struct request_values *)&tmp_ext,
1219 skb_get_queue_mapping(skb)) ||
1217 want_cookie) 1220 want_cookie)
1218 goto drop_and_free; 1221 goto drop_and_free;
1219 1222