diff options
66 files changed, 1906 insertions, 364 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c7a6213c6996..fbe1973f77b0 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -625,7 +625,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
625 | 625 | ||
626 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, | 626 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, |
627 | !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), | 627 | !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), |
628 | MLX4_PROTOCOL_IB); | 628 | MLX4_PROT_IB_IPV6); |
629 | if (err) | 629 | if (err) |
630 | return err; | 630 | return err; |
631 | 631 | ||
@@ -636,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
636 | return 0; | 636 | return 0; |
637 | 637 | ||
638 | err_add: | 638 | err_add: |
639 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); | 639 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); |
640 | return err; | 640 | return err; |
641 | } | 641 | } |
642 | 642 | ||
@@ -666,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
666 | struct mlx4_ib_gid_entry *ge; | 666 | struct mlx4_ib_gid_entry *ge; |
667 | 667 | ||
668 | err = mlx4_multicast_detach(mdev->dev, | 668 | err = mlx4_multicast_detach(mdev->dev, |
669 | &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); | 669 | &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); |
670 | if (err) | 670 | if (err) |
671 | return err; | 671 | return err; |
672 | 672 | ||
@@ -721,7 +721,6 @@ static int init_node_data(struct mlx4_ib_dev *dev) | |||
721 | if (err) | 721 | if (err) |
722 | goto out; | 722 | goto out; |
723 | 723 | ||
724 | dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); | ||
725 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); | 724 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); |
726 | 725 | ||
727 | out: | 726 | out: |
@@ -954,7 +953,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event | |||
954 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 953 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
955 | oldnd = iboe->netdevs[port - 1]; | 954 | oldnd = iboe->netdevs[port - 1]; |
956 | iboe->netdevs[port - 1] = | 955 | iboe->netdevs[port - 1] = |
957 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port); | 956 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); |
958 | if (oldnd != iboe->netdevs[port - 1]) { | 957 | if (oldnd != iboe->netdevs[port - 1]) { |
959 | if (iboe->netdevs[port - 1]) | 958 | if (iboe->netdevs[port - 1]) |
960 | netdev_added(ibdev, port); | 959 | netdev_added(ibdev, port); |
@@ -1207,7 +1206,7 @@ static struct mlx4_interface mlx4_ib_interface = { | |||
1207 | .add = mlx4_ib_add, | 1206 | .add = mlx4_ib_add, |
1208 | .remove = mlx4_ib_remove, | 1207 | .remove = mlx4_ib_remove, |
1209 | .event = mlx4_ib_event, | 1208 | .event = mlx4_ib_event, |
1210 | .protocol = MLX4_PROTOCOL_IB | 1209 | .protocol = MLX4_PROT_IB_IPV6 |
1211 | }; | 1210 | }; |
1212 | 1211 | ||
1213 | static int __init mlx4_ib_init(void) | 1212 | static int __init mlx4_ib_init(void) |
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index f142cc21e453..deaa8bc16cf8 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c | |||
@@ -711,14 +711,14 @@ static int __devinit a2065_init_one(struct zorro_dev *z, | |||
711 | return -EBUSY; | 711 | return -EBUSY; |
712 | r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); | 712 | r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); |
713 | if (!r2) { | 713 | if (!r2) { |
714 | release_resource(r1); | 714 | release_mem_region(base_addr, sizeof(struct lance_regs)); |
715 | return -EBUSY; | 715 | return -EBUSY; |
716 | } | 716 | } |
717 | 717 | ||
718 | dev = alloc_etherdev(sizeof(struct lance_private)); | 718 | dev = alloc_etherdev(sizeof(struct lance_private)); |
719 | if (dev == NULL) { | 719 | if (dev == NULL) { |
720 | release_resource(r1); | 720 | release_mem_region(base_addr, sizeof(struct lance_regs)); |
721 | release_resource(r2); | 721 | release_mem_region(mem_start, A2065_RAM_SIZE); |
722 | return -ENOMEM; | 722 | return -ENOMEM; |
723 | } | 723 | } |
724 | 724 | ||
@@ -764,8 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z, | |||
764 | 764 | ||
765 | err = register_netdev(dev); | 765 | err = register_netdev(dev); |
766 | if (err) { | 766 | if (err) { |
767 | release_resource(r1); | 767 | release_mem_region(base_addr, sizeof(struct lance_regs)); |
768 | release_resource(r2); | 768 | release_mem_region(mem_start, A2065_RAM_SIZE); |
769 | free_netdev(dev); | 769 | free_netdev(dev); |
770 | return err; | 770 | return err; |
771 | } | 771 | } |
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 7ca0eded2561..b7f45cd756a2 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -182,14 +182,14 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, | |||
182 | return -EBUSY; | 182 | return -EBUSY; |
183 | r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); | 183 | r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); |
184 | if (!r2) { | 184 | if (!r2) { |
185 | release_resource(r1); | 185 | release_mem_region(base_addr, sizeof(struct Am79C960)); |
186 | return -EBUSY; | 186 | return -EBUSY; |
187 | } | 187 | } |
188 | 188 | ||
189 | dev = alloc_etherdev(sizeof(struct ariadne_private)); | 189 | dev = alloc_etherdev(sizeof(struct ariadne_private)); |
190 | if (dev == NULL) { | 190 | if (dev == NULL) { |
191 | release_resource(r1); | 191 | release_mem_region(base_addr, sizeof(struct Am79C960)); |
192 | release_resource(r2); | 192 | release_mem_region(mem_start, ARIADNE_RAM_SIZE); |
193 | return -ENOMEM; | 193 | return -ENOMEM; |
194 | } | 194 | } |
195 | 195 | ||
@@ -213,8 +213,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, | |||
213 | 213 | ||
214 | err = register_netdev(dev); | 214 | err = register_netdev(dev); |
215 | if (err) { | 215 | if (err) { |
216 | release_resource(r1); | 216 | release_mem_region(base_addr, sizeof(struct Am79C960)); |
217 | release_resource(r2); | 217 | release_mem_region(mem_start, ARIADNE_RAM_SIZE); |
218 | free_netdev(dev); | 218 | free_netdev(dev); |
219 | return err; | 219 | return err; |
220 | } | 220 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 338bea147c64..16d6fe954695 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1482,21 +1482,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1482 | { | 1482 | { |
1483 | struct sk_buff *skb = *pskb; | 1483 | struct sk_buff *skb = *pskb; |
1484 | struct slave *slave; | 1484 | struct slave *slave; |
1485 | struct net_device *bond_dev; | ||
1486 | struct bonding *bond; | 1485 | struct bonding *bond; |
1487 | 1486 | ||
1488 | slave = bond_slave_get_rcu(skb->dev); | ||
1489 | bond_dev = ACCESS_ONCE(slave->dev->master); | ||
1490 | if (unlikely(!bond_dev)) | ||
1491 | return RX_HANDLER_PASS; | ||
1492 | |||
1493 | skb = skb_share_check(skb, GFP_ATOMIC); | 1487 | skb = skb_share_check(skb, GFP_ATOMIC); |
1494 | if (unlikely(!skb)) | 1488 | if (unlikely(!skb)) |
1495 | return RX_HANDLER_CONSUMED; | 1489 | return RX_HANDLER_CONSUMED; |
1496 | 1490 | ||
1497 | *pskb = skb; | 1491 | *pskb = skb; |
1498 | 1492 | ||
1499 | bond = netdev_priv(bond_dev); | 1493 | slave = bond_slave_get_rcu(skb->dev); |
1494 | bond = slave->bond; | ||
1500 | 1495 | ||
1501 | if (bond->params.arp_interval) | 1496 | if (bond->params.arp_interval) |
1502 | slave->dev->last_rx = jiffies; | 1497 | slave->dev->last_rx = jiffies; |
@@ -1505,10 +1500,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1505 | return RX_HANDLER_EXACT; | 1500 | return RX_HANDLER_EXACT; |
1506 | } | 1501 | } |
1507 | 1502 | ||
1508 | skb->dev = bond_dev; | 1503 | skb->dev = bond->dev; |
1509 | 1504 | ||
1510 | if (bond->params.mode == BOND_MODE_ALB && | 1505 | if (bond->params.mode == BOND_MODE_ALB && |
1511 | bond_dev->priv_flags & IFF_BRIDGE_PORT && | 1506 | bond->dev->priv_flags & IFF_BRIDGE_PORT && |
1512 | skb->pkt_type == PACKET_HOST) { | 1507 | skb->pkt_type == PACKET_HOST) { |
1513 | 1508 | ||
1514 | if (unlikely(skb_cow_head(skb, | 1509 | if (unlikely(skb_cow_head(skb, |
@@ -1516,7 +1511,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1516 | kfree_skb(skb); | 1511 | kfree_skb(skb); |
1517 | return RX_HANDLER_CONSUMED; | 1512 | return RX_HANDLER_CONSUMED; |
1518 | } | 1513 | } |
1519 | memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN); | 1514 | memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); |
1520 | } | 1515 | } |
1521 | 1516 | ||
1522 | return RX_HANDLER_ANOTHER; | 1517 | return RX_HANDLER_ANOTHER; |
@@ -1698,20 +1693,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1698 | pr_debug("Error %d calling netdev_set_bond_master\n", res); | 1693 | pr_debug("Error %d calling netdev_set_bond_master\n", res); |
1699 | goto err_restore_mac; | 1694 | goto err_restore_mac; |
1700 | } | 1695 | } |
1701 | res = netdev_rx_handler_register(slave_dev, bond_handle_frame, | ||
1702 | new_slave); | ||
1703 | if (res) { | ||
1704 | pr_debug("Error %d calling netdev_rx_handler_register\n", res); | ||
1705 | goto err_unset_master; | ||
1706 | } | ||
1707 | 1696 | ||
1708 | /* open the slave since the application closed it */ | 1697 | /* open the slave since the application closed it */ |
1709 | res = dev_open(slave_dev); | 1698 | res = dev_open(slave_dev); |
1710 | if (res) { | 1699 | if (res) { |
1711 | pr_debug("Opening slave %s failed\n", slave_dev->name); | 1700 | pr_debug("Opening slave %s failed\n", slave_dev->name); |
1712 | goto err_unreg_rxhandler; | 1701 | goto err_unset_master; |
1713 | } | 1702 | } |
1714 | 1703 | ||
1704 | new_slave->bond = bond; | ||
1715 | new_slave->dev = slave_dev; | 1705 | new_slave->dev = slave_dev; |
1716 | slave_dev->priv_flags |= IFF_BONDING; | 1706 | slave_dev->priv_flags |= IFF_BONDING; |
1717 | 1707 | ||
@@ -1907,6 +1897,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1907 | if (res) | 1897 | if (res) |
1908 | goto err_close; | 1898 | goto err_close; |
1909 | 1899 | ||
1900 | res = netdev_rx_handler_register(slave_dev, bond_handle_frame, | ||
1901 | new_slave); | ||
1902 | if (res) { | ||
1903 | pr_debug("Error %d calling netdev_rx_handler_register\n", res); | ||
1904 | goto err_dest_symlinks; | ||
1905 | } | ||
1906 | |||
1910 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", | 1907 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", |
1911 | bond_dev->name, slave_dev->name, | 1908 | bond_dev->name, slave_dev->name, |
1912 | bond_is_active_slave(new_slave) ? "n active" : " backup", | 1909 | bond_is_active_slave(new_slave) ? "n active" : " backup", |
@@ -1916,13 +1913,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1916 | return 0; | 1913 | return 0; |
1917 | 1914 | ||
1918 | /* Undo stages on error */ | 1915 | /* Undo stages on error */ |
1916 | err_dest_symlinks: | ||
1917 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | ||
1918 | |||
1919 | err_close: | 1919 | err_close: |
1920 | dev_close(slave_dev); | 1920 | dev_close(slave_dev); |
1921 | 1921 | ||
1922 | err_unreg_rxhandler: | ||
1923 | netdev_rx_handler_unregister(slave_dev); | ||
1924 | synchronize_net(); | ||
1925 | |||
1926 | err_unset_master: | 1922 | err_unset_master: |
1927 | netdev_set_bond_master(slave_dev, NULL); | 1923 | netdev_set_bond_master(slave_dev, NULL); |
1928 | 1924 | ||
@@ -1988,6 +1984,14 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1988 | return -EINVAL; | 1984 | return -EINVAL; |
1989 | } | 1985 | } |
1990 | 1986 | ||
1987 | /* unregister rx_handler early so bond_handle_frame wouldn't be called | ||
1988 | * for this slave anymore. | ||
1989 | */ | ||
1990 | netdev_rx_handler_unregister(slave_dev); | ||
1991 | write_unlock_bh(&bond->lock); | ||
1992 | synchronize_net(); | ||
1993 | write_lock_bh(&bond->lock); | ||
1994 | |||
1991 | if (!bond->params.fail_over_mac) { | 1995 | if (!bond->params.fail_over_mac) { |
1992 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && | 1996 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && |
1993 | bond->slave_cnt > 1) | 1997 | bond->slave_cnt > 1) |
@@ -2104,8 +2108,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2104 | netif_addr_unlock_bh(bond_dev); | 2108 | netif_addr_unlock_bh(bond_dev); |
2105 | } | 2109 | } |
2106 | 2110 | ||
2107 | netdev_rx_handler_unregister(slave_dev); | ||
2108 | synchronize_net(); | ||
2109 | netdev_set_bond_master(slave_dev, NULL); | 2111 | netdev_set_bond_master(slave_dev, NULL); |
2110 | 2112 | ||
2111 | slave_disable_netpoll(slave); | 2113 | slave_disable_netpoll(slave); |
@@ -2186,6 +2188,12 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2186 | */ | 2188 | */ |
2187 | write_unlock_bh(&bond->lock); | 2189 | write_unlock_bh(&bond->lock); |
2188 | 2190 | ||
2191 | /* unregister rx_handler early so bond_handle_frame wouldn't | ||
2192 | * be called for this slave anymore. | ||
2193 | */ | ||
2194 | netdev_rx_handler_unregister(slave_dev); | ||
2195 | synchronize_net(); | ||
2196 | |||
2189 | if (bond_is_lb(bond)) { | 2197 | if (bond_is_lb(bond)) { |
2190 | /* must be called only after the slave | 2198 | /* must be called only after the slave |
2191 | * has been detached from the list | 2199 | * has been detached from the list |
@@ -2217,8 +2225,6 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2217 | netif_addr_unlock_bh(bond_dev); | 2225 | netif_addr_unlock_bh(bond_dev); |
2218 | } | 2226 | } |
2219 | 2227 | ||
2220 | netdev_rx_handler_unregister(slave_dev); | ||
2221 | synchronize_net(); | ||
2222 | netdev_set_bond_master(slave_dev, NULL); | 2228 | netdev_set_bond_master(slave_dev, NULL); |
2223 | 2229 | ||
2224 | slave_disable_netpoll(slave); | 2230 | slave_disable_netpoll(slave); |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 6b26962fd0ec..90736cb4d975 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -187,6 +187,7 @@ struct slave { | |||
187 | struct net_device *dev; /* first - useful for panic debug */ | 187 | struct net_device *dev; /* first - useful for panic debug */ |
188 | struct slave *next; | 188 | struct slave *next; |
189 | struct slave *prev; | 189 | struct slave *prev; |
190 | struct bonding *bond; /* our master */ | ||
190 | int delay; | 191 | int delay; |
191 | unsigned long jiffies; | 192 | unsigned long jiffies; |
192 | unsigned long last_arp_rx; | 193 | unsigned long last_arp_rx; |
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c index e92b2b6cd8c4..ae47f23ba930 100644 --- a/drivers/net/davinci_cpdma.c +++ b/drivers/net/davinci_cpdma.c | |||
@@ -76,6 +76,7 @@ struct cpdma_desc { | |||
76 | 76 | ||
77 | struct cpdma_desc_pool { | 77 | struct cpdma_desc_pool { |
78 | u32 phys; | 78 | u32 phys; |
79 | u32 hw_addr; | ||
79 | void __iomem *iomap; /* ioremap map */ | 80 | void __iomem *iomap; /* ioremap map */ |
80 | void *cpumap; /* dma_alloc map */ | 81 | void *cpumap; /* dma_alloc map */ |
81 | int desc_size, mem_size; | 82 | int desc_size, mem_size; |
@@ -137,7 +138,8 @@ struct cpdma_chan { | |||
137 | * abstract out these details | 138 | * abstract out these details |
138 | */ | 139 | */ |
139 | static struct cpdma_desc_pool * | 140 | static struct cpdma_desc_pool * |
140 | cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align) | 141 | cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, |
142 | int size, int align) | ||
141 | { | 143 | { |
142 | int bitmap_size; | 144 | int bitmap_size; |
143 | struct cpdma_desc_pool *pool; | 145 | struct cpdma_desc_pool *pool; |
@@ -161,10 +163,12 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align) | |||
161 | if (phys) { | 163 | if (phys) { |
162 | pool->phys = phys; | 164 | pool->phys = phys; |
163 | pool->iomap = ioremap(phys, size); | 165 | pool->iomap = ioremap(phys, size); |
166 | pool->hw_addr = hw_addr; | ||
164 | } else { | 167 | } else { |
165 | pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, | 168 | pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, |
166 | GFP_KERNEL); | 169 | GFP_KERNEL); |
167 | pool->iomap = (void __force __iomem *)pool->cpumap; | 170 | pool->iomap = (void __force __iomem *)pool->cpumap; |
171 | pool->hw_addr = pool->phys; | ||
168 | } | 172 | } |
169 | 173 | ||
170 | if (pool->iomap) | 174 | if (pool->iomap) |
@@ -201,14 +205,14 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, | |||
201 | { | 205 | { |
202 | if (!desc) | 206 | if (!desc) |
203 | return 0; | 207 | return 0; |
204 | return pool->phys + (__force dma_addr_t)desc - | 208 | return pool->hw_addr + (__force dma_addr_t)desc - |
205 | (__force dma_addr_t)pool->iomap; | 209 | (__force dma_addr_t)pool->iomap; |
206 | } | 210 | } |
207 | 211 | ||
208 | static inline struct cpdma_desc __iomem * | 212 | static inline struct cpdma_desc __iomem * |
209 | desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) | 213 | desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) |
210 | { | 214 | { |
211 | return dma ? pool->iomap + dma - pool->phys : NULL; | 215 | return dma ? pool->iomap + dma - pool->hw_addr : NULL; |
212 | } | 216 | } |
213 | 217 | ||
214 | static struct cpdma_desc __iomem * | 218 | static struct cpdma_desc __iomem * |
@@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) | |||
260 | 264 | ||
261 | ctlr->pool = cpdma_desc_pool_create(ctlr->dev, | 265 | ctlr->pool = cpdma_desc_pool_create(ctlr->dev, |
262 | ctlr->params.desc_mem_phys, | 266 | ctlr->params.desc_mem_phys, |
267 | ctlr->params.desc_hw_addr, | ||
263 | ctlr->params.desc_mem_size, | 268 | ctlr->params.desc_mem_size, |
264 | ctlr->params.desc_align); | 269 | ctlr->params.desc_align); |
265 | if (!ctlr->pool) { | 270 | if (!ctlr->pool) { |
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h index 868e50ebde45..afa19a0c0d81 100644 --- a/drivers/net/davinci_cpdma.h +++ b/drivers/net/davinci_cpdma.h | |||
@@ -33,6 +33,7 @@ struct cpdma_params { | |||
33 | bool has_soft_reset; | 33 | bool has_soft_reset; |
34 | int min_packet_size; | 34 | int min_packet_size; |
35 | u32 desc_mem_phys; | 35 | u32 desc_mem_phys; |
36 | u32 desc_hw_addr; | ||
36 | int desc_mem_size; | 37 | int desc_mem_size; |
37 | int desc_align; | 38 | int desc_align; |
38 | 39 | ||
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 082d6ea69920..baca6bfcb089 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -1854,10 +1854,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1854 | dma_params.rxcp = priv->emac_base + 0x660; | 1854 | dma_params.rxcp = priv->emac_base + 0x660; |
1855 | dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; | 1855 | dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; |
1856 | dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; | 1856 | dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; |
1857 | dma_params.desc_mem_phys = hw_ram_addr; | 1857 | dma_params.desc_hw_addr = hw_ram_addr; |
1858 | dma_params.desc_mem_size = pdata->ctrl_ram_size; | 1858 | dma_params.desc_mem_size = pdata->ctrl_ram_size; |
1859 | dma_params.desc_align = 16; | 1859 | dma_params.desc_align = 16; |
1860 | 1860 | ||
1861 | dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 : | ||
1862 | (u32 __force)res->start + pdata->ctrl_ram_offset; | ||
1863 | |||
1861 | priv->dma = cpdma_ctlr_create(&dma_params); | 1864 | priv->dma = cpdma_ctlr_create(&dma_params); |
1862 | if (!priv->dma) { | 1865 | if (!priv->dma) { |
1863 | dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); | 1866 | dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); |
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 3a4277f6fac4..116cae334dad 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | |||
62 | } else | 62 | } else |
63 | obj = -1; | 63 | obj = -1; |
64 | 64 | ||
65 | if (obj != -1) | ||
66 | --bitmap->avail; | ||
67 | |||
65 | spin_unlock(&bitmap->lock); | 68 | spin_unlock(&bitmap->lock); |
66 | 69 | ||
67 | return obj; | 70 | return obj; |
@@ -101,11 +104,19 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) | |||
101 | } else | 104 | } else |
102 | obj = -1; | 105 | obj = -1; |
103 | 106 | ||
107 | if (obj != -1) | ||
108 | bitmap->avail -= cnt; | ||
109 | |||
104 | spin_unlock(&bitmap->lock); | 110 | spin_unlock(&bitmap->lock); |
105 | 111 | ||
106 | return obj; | 112 | return obj; |
107 | } | 113 | } |
108 | 114 | ||
115 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) | ||
116 | { | ||
117 | return bitmap->avail; | ||
118 | } | ||
119 | |||
109 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | 120 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) |
110 | { | 121 | { |
111 | obj &= bitmap->max + bitmap->reserved_top - 1; | 122 | obj &= bitmap->max + bitmap->reserved_top - 1; |
@@ -115,6 +126,7 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | |||
115 | bitmap->last = min(bitmap->last, obj); | 126 | bitmap->last = min(bitmap->last, obj); |
116 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | 127 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) |
117 | & bitmap->mask; | 128 | & bitmap->mask; |
129 | bitmap->avail += cnt; | ||
118 | spin_unlock(&bitmap->lock); | 130 | spin_unlock(&bitmap->lock); |
119 | } | 131 | } |
120 | 132 | ||
@@ -130,6 +142,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | |||
130 | bitmap->max = num - reserved_top; | 142 | bitmap->max = num - reserved_top; |
131 | bitmap->mask = mask; | 143 | bitmap->mask = mask; |
132 | bitmap->reserved_top = reserved_top; | 144 | bitmap->reserved_top = reserved_top; |
145 | bitmap->avail = num - reserved_top - reserved_bot; | ||
133 | spin_lock_init(&bitmap->lock); | 146 | spin_lock_init(&bitmap->lock); |
134 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * | 147 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * |
135 | sizeof (long), GFP_KERNEL); | 148 | sizeof (long), GFP_KERNEL); |
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index 7cd34e9c7c7e..bd8ef9f2fa71 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
198 | u64 mtt_addr; | 198 | u64 mtt_addr; |
199 | int err; | 199 | int err; |
200 | 200 | ||
201 | if (vector >= dev->caps.num_comp_vectors) | 201 | if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) |
202 | return -EINVAL; | 202 | return -EINVAL; |
203 | 203 | ||
204 | cq->vector = vector; | 204 | cq->vector = vector; |
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 21786ad4455e..ec4b6d047fe0 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c | |||
@@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, | |||
51 | int err; | 51 | int err; |
52 | 52 | ||
53 | cq->size = entries; | 53 | cq->size = entries; |
54 | if (mode == RX) { | 54 | if (mode == RX) |
55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); | 55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); |
56 | cq->vector = ring % mdev->dev->caps.num_comp_vectors; | 56 | else |
57 | } else { | ||
58 | cq->buf_size = sizeof(struct mlx4_cqe); | 57 | cq->buf_size = sizeof(struct mlx4_cqe); |
59 | cq->vector = 0; | ||
60 | } | ||
61 | 58 | ||
62 | cq->ring = ring; | 59 | cq->ring = ring; |
63 | cq->is_tx = mode; | 60 | cq->is_tx = mode; |
@@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, | |||
80 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | 77 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) |
81 | { | 78 | { |
82 | struct mlx4_en_dev *mdev = priv->mdev; | 79 | struct mlx4_en_dev *mdev = priv->mdev; |
83 | int err; | 80 | int err = 0; |
81 | char name[25]; | ||
84 | 82 | ||
85 | cq->dev = mdev->pndev[priv->port]; | 83 | cq->dev = mdev->pndev[priv->port]; |
86 | cq->mcq.set_ci_db = cq->wqres.db.db; | 84 | cq->mcq.set_ci_db = cq->wqres.db.db; |
@@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | |||
89 | *cq->mcq.arm_db = 0; | 87 | *cq->mcq.arm_db = 0; |
90 | memset(cq->buf, 0, cq->buf_size); | 88 | memset(cq->buf, 0, cq->buf_size); |
91 | 89 | ||
90 | if (cq->is_tx == RX) { | ||
91 | if (mdev->dev->caps.comp_pool) { | ||
92 | if (!cq->vector) { | ||
93 | sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring); | ||
94 | if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) { | ||
95 | cq->vector = (cq->ring + 1 + priv->port) % | ||
96 | mdev->dev->caps.num_comp_vectors; | ||
97 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
98 | "%s_rx-%d ,Falling back to legacy EQ's\n", | ||
99 | priv->dev->name, cq->ring); | ||
100 | } | ||
101 | } | ||
102 | } else { | ||
103 | cq->vector = (cq->ring + 1 + priv->port) % | ||
104 | mdev->dev->caps.num_comp_vectors; | ||
105 | } | ||
106 | } else { | ||
107 | if (!cq->vector || !mdev->dev->caps.comp_pool) { | ||
108 | /*Fallback to legacy pool in case of error*/ | ||
109 | cq->vector = 0; | ||
110 | } | ||
111 | } | ||
112 | |||
92 | if (!cq->is_tx) | 113 | if (!cq->is_tx) |
93 | cq->size = priv->rx_ring[cq->ring].actual_size; | 114 | cq->size = priv->rx_ring[cq->ring].actual_size; |
94 | 115 | ||
@@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | |||
112 | return 0; | 133 | return 0; |
113 | } | 134 | } |
114 | 135 | ||
115 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | 136 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
137 | bool reserve_vectors) | ||
116 | { | 138 | { |
117 | struct mlx4_en_dev *mdev = priv->mdev; | 139 | struct mlx4_en_dev *mdev = priv->mdev; |
118 | 140 | ||
119 | mlx4_en_unmap_buffer(&cq->wqres.buf); | 141 | mlx4_en_unmap_buffer(&cq->wqres.buf); |
120 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | 142 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); |
143 | if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors) | ||
144 | mlx4_release_eq(priv->mdev->dev, cq->vector); | ||
121 | cq->buf_size = 0; | 145 | cq->buf_size = 0; |
122 | cq->buf = NULL; | 146 | cq->buf = NULL; |
123 | } | 147 | } |
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c index 056152b3ff58..d54b7abf0225 100644 --- a/drivers/net/mlx4/en_ethtool.c +++ b/drivers/net/mlx4/en_ethtool.c | |||
@@ -45,7 +45,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | |||
45 | struct mlx4_en_priv *priv = netdev_priv(dev); | 45 | struct mlx4_en_priv *priv = netdev_priv(dev); |
46 | struct mlx4_en_dev *mdev = priv->mdev; | 46 | struct mlx4_en_dev *mdev = priv->mdev; |
47 | 47 | ||
48 | sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); | 48 | strncpy(drvinfo->driver, DRV_NAME, 32); |
49 | strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); | 49 | strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); |
50 | sprintf(drvinfo->fw_version, "%d.%d.%d", | 50 | sprintf(drvinfo->fw_version, "%d.%d.%d", |
51 | (u16) (mdev->dev->caps.fw_ver >> 32), | 51 | (u16) (mdev->dev->caps.fw_ver >> 32), |
@@ -131,8 +131,65 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) | |||
131 | static void mlx4_en_get_wol(struct net_device *netdev, | 131 | static void mlx4_en_get_wol(struct net_device *netdev, |
132 | struct ethtool_wolinfo *wol) | 132 | struct ethtool_wolinfo *wol) |
133 | { | 133 | { |
134 | wol->supported = 0; | 134 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
135 | wol->wolopts = 0; | 135 | int err = 0; |
136 | u64 config = 0; | ||
137 | |||
138 | if (!priv->mdev->dev->caps.wol) { | ||
139 | wol->supported = 0; | ||
140 | wol->wolopts = 0; | ||
141 | return; | ||
142 | } | ||
143 | |||
144 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | ||
145 | if (err) { | ||
146 | en_err(priv, "Failed to get WoL information\n"); | ||
147 | return; | ||
148 | } | ||
149 | |||
150 | if (config & MLX4_EN_WOL_MAGIC) | ||
151 | wol->supported = WAKE_MAGIC; | ||
152 | else | ||
153 | wol->supported = 0; | ||
154 | |||
155 | if (config & MLX4_EN_WOL_ENABLED) | ||
156 | wol->wolopts = WAKE_MAGIC; | ||
157 | else | ||
158 | wol->wolopts = 0; | ||
159 | } | ||
160 | |||
161 | static int mlx4_en_set_wol(struct net_device *netdev, | ||
162 | struct ethtool_wolinfo *wol) | ||
163 | { | ||
164 | struct mlx4_en_priv *priv = netdev_priv(netdev); | ||
165 | u64 config = 0; | ||
166 | int err = 0; | ||
167 | |||
168 | if (!priv->mdev->dev->caps.wol) | ||
169 | return -EOPNOTSUPP; | ||
170 | |||
171 | if (wol->supported & ~WAKE_MAGIC) | ||
172 | return -EINVAL; | ||
173 | |||
174 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | ||
175 | if (err) { | ||
176 | en_err(priv, "Failed to get WoL info, unable to modify\n"); | ||
177 | return err; | ||
178 | } | ||
179 | |||
180 | if (wol->wolopts & WAKE_MAGIC) { | ||
181 | config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | | ||
182 | MLX4_EN_WOL_MAGIC; | ||
183 | } else { | ||
184 | config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); | ||
185 | config |= MLX4_EN_WOL_DO_MODIFY; | ||
186 | } | ||
187 | |||
188 | err = mlx4_wol_write(priv->mdev->dev, config, priv->port); | ||
189 | if (err) | ||
190 | en_err(priv, "Failed to set WoL information\n"); | ||
191 | |||
192 | return err; | ||
136 | } | 193 | } |
137 | 194 | ||
138 | static int mlx4_en_get_sset_count(struct net_device *dev, int sset) | 195 | static int mlx4_en_get_sset_count(struct net_device *dev, int sset) |
@@ -388,7 +445,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, | |||
388 | mlx4_en_stop_port(dev); | 445 | mlx4_en_stop_port(dev); |
389 | } | 446 | } |
390 | 447 | ||
391 | mlx4_en_free_resources(priv); | 448 | mlx4_en_free_resources(priv, true); |
392 | 449 | ||
393 | priv->prof->tx_ring_size = tx_size; | 450 | priv->prof->tx_ring_size = tx_size; |
394 | priv->prof->rx_ring_size = rx_size; | 451 | priv->prof->rx_ring_size = rx_size; |
@@ -442,6 +499,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { | |||
442 | .get_ethtool_stats = mlx4_en_get_ethtool_stats, | 499 | .get_ethtool_stats = mlx4_en_get_ethtool_stats, |
443 | .self_test = mlx4_en_self_test, | 500 | .self_test = mlx4_en_self_test, |
444 | .get_wol = mlx4_en_get_wol, | 501 | .get_wol = mlx4_en_get_wol, |
502 | .set_wol = mlx4_en_set_wol, | ||
445 | .get_msglevel = mlx4_en_get_msglevel, | 503 | .get_msglevel = mlx4_en_get_msglevel, |
446 | .set_msglevel = mlx4_en_set_msglevel, | 504 | .set_msglevel = mlx4_en_set_msglevel, |
447 | .get_coalesce = mlx4_en_get_coalesce, | 505 | .get_coalesce = mlx4_en_get_coalesce, |
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 1ff6ca6466ed..9317b61a75b8 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c | |||
@@ -241,16 +241,18 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
241 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | 241 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) |
242 | mdev->port_cnt++; | 242 | mdev->port_cnt++; |
243 | 243 | ||
244 | /* If we did not receive an explicit number of Rx rings, default to | ||
245 | * the number of completion vectors populated by the mlx4_core */ | ||
246 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | 244 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { |
247 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", | 245 | if (!dev->caps.comp_pool) { |
248 | mdev->profile.prof[i].tx_ring_num, i); | 246 | mdev->profile.prof[i].rx_ring_num = |
249 | mdev->profile.prof[i].rx_ring_num = min_t(int, | 247 | rounddown_pow_of_two(max_t(int, MIN_RX_RINGS, |
250 | roundup_pow_of_two(dev->caps.num_comp_vectors), | 248 | min_t(int, |
251 | MAX_RX_RINGS); | 249 | dev->caps.num_comp_vectors, |
252 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | 250 | MAX_RX_RINGS))); |
253 | mdev->profile.prof[i].rx_ring_num, i); | 251 | } else { |
252 | mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two( | ||
253 | min_t(int, dev->caps.comp_pool/ | ||
254 | dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1)); | ||
255 | } | ||
254 | } | 256 | } |
255 | 257 | ||
256 | /* Create our own workqueue for reset/multicast tasks | 258 | /* Create our own workqueue for reset/multicast tasks |
@@ -294,7 +296,7 @@ static struct mlx4_interface mlx4_en_interface = { | |||
294 | .remove = mlx4_en_remove, | 296 | .remove = mlx4_en_remove, |
295 | .event = mlx4_en_event, | 297 | .event = mlx4_en_event, |
296 | .get_dev = mlx4_en_get_netdev, | 298 | .get_dev = mlx4_en_get_netdev, |
297 | .protocol = MLX4_PROTOCOL_EN, | 299 | .protocol = MLX4_PROT_ETH, |
298 | }; | 300 | }; |
299 | 301 | ||
300 | static int __init mlx4_en_init(void) | 302 | static int __init mlx4_en_init(void) |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 897f576b8b17..5762ebde4455 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work) | |||
156 | mutex_lock(&mdev->state_lock); | 156 | mutex_lock(&mdev->state_lock); |
157 | if (priv->port_up) { | 157 | if (priv->port_up) { |
158 | /* Remove old MAC and insert the new one */ | 158 | /* Remove old MAC and insert the new one */ |
159 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | 159 | err = mlx4_replace_mac(mdev->dev, priv->port, |
160 | err = mlx4_register_mac(mdev->dev, priv->port, | 160 | priv->base_qpn, priv->mac, 0); |
161 | priv->mac, &priv->mac_index); | ||
162 | if (err) | 161 | if (err) |
163 | en_err(priv, "Failed changing HW MAC address\n"); | 162 | en_err(priv, "Failed changing HW MAC address\n"); |
164 | } else | 163 | } else |
@@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
214 | struct mlx4_en_dev *mdev = priv->mdev; | 213 | struct mlx4_en_dev *mdev = priv->mdev; |
215 | struct net_device *dev = priv->dev; | 214 | struct net_device *dev = priv->dev; |
216 | u64 mcast_addr = 0; | 215 | u64 mcast_addr = 0; |
216 | u8 mc_list[16] = {0}; | ||
217 | int err; | 217 | int err; |
218 | 218 | ||
219 | mutex_lock(&mdev->state_lock); | 219 | mutex_lock(&mdev->state_lock); |
@@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
239 | priv->flags |= MLX4_EN_FLAG_PROMISC; | 239 | priv->flags |= MLX4_EN_FLAG_PROMISC; |
240 | 240 | ||
241 | /* Enable promiscouos mode */ | 241 | /* Enable promiscouos mode */ |
242 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 242 | if (!mdev->dev->caps.vep_uc_steering) |
243 | priv->base_qpn, 1); | 243 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
244 | priv->base_qpn, 1); | ||
245 | else | ||
246 | err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, | ||
247 | priv->port); | ||
244 | if (err) | 248 | if (err) |
245 | en_err(priv, "Failed enabling " | 249 | en_err(priv, "Failed enabling " |
246 | "promiscous mode\n"); | 250 | "promiscous mode\n"); |
@@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
252 | en_err(priv, "Failed disabling " | 256 | en_err(priv, "Failed disabling " |
253 | "multicast filter\n"); | 257 | "multicast filter\n"); |
254 | 258 | ||
255 | /* Disable port VLAN filter */ | 259 | /* Add the default qp number as multicast promisc */ |
256 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | 260 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { |
257 | if (err) | 261 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, |
258 | en_err(priv, "Failed disabling VLAN filter\n"); | 262 | priv->port); |
263 | if (err) | ||
264 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
265 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
266 | } | ||
267 | |||
268 | if (priv->vlgrp) { | ||
269 | /* Disable port VLAN filter */ | ||
270 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | ||
271 | if (err) | ||
272 | en_err(priv, "Failed disabling VLAN filter\n"); | ||
273 | } | ||
259 | } | 274 | } |
260 | goto out; | 275 | goto out; |
261 | } | 276 | } |
@@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
270 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | 285 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; |
271 | 286 | ||
272 | /* Disable promiscouos mode */ | 287 | /* Disable promiscouos mode */ |
273 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 288 | if (!mdev->dev->caps.vep_uc_steering) |
274 | priv->base_qpn, 0); | 289 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
290 | priv->base_qpn, 0); | ||
291 | else | ||
292 | err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
293 | priv->port); | ||
275 | if (err) | 294 | if (err) |
276 | en_err(priv, "Failed disabling promiscous mode\n"); | 295 | en_err(priv, "Failed disabling promiscous mode\n"); |
277 | 296 | ||
297 | /* Disable Multicast promisc */ | ||
298 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
299 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
300 | priv->port); | ||
301 | if (err) | ||
302 | en_err(priv, "Failed disabling multicast promiscous mode\n"); | ||
303 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
304 | } | ||
305 | |||
278 | /* Enable port VLAN filter */ | 306 | /* Enable port VLAN filter */ |
279 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | 307 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); |
280 | if (err) | 308 | if (err) |
@@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
287 | 0, MLX4_MCAST_DISABLE); | 315 | 0, MLX4_MCAST_DISABLE); |
288 | if (err) | 316 | if (err) |
289 | en_err(priv, "Failed disabling multicast filter\n"); | 317 | en_err(priv, "Failed disabling multicast filter\n"); |
318 | |||
319 | /* Add the default qp number as multicast promisc */ | ||
320 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { | ||
321 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, | ||
322 | priv->port); | ||
323 | if (err) | ||
324 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
325 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
326 | } | ||
290 | } else { | 327 | } else { |
291 | int i; | 328 | int i; |
329 | /* Disable Multicast promisc */ | ||
330 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
331 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
332 | priv->port); | ||
333 | if (err) | ||
334 | en_err(priv, "Failed disabling multicast promiscous mode\n"); | ||
335 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
336 | } | ||
292 | 337 | ||
293 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 338 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
294 | 0, MLX4_MCAST_DISABLE); | 339 | 0, MLX4_MCAST_DISABLE); |
295 | if (err) | 340 | if (err) |
296 | en_err(priv, "Failed disabling multicast filter\n"); | 341 | en_err(priv, "Failed disabling multicast filter\n"); |
297 | 342 | ||
343 | /* Detach our qp from all the multicast addresses */ | ||
344 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
345 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
346 | mc_list[5] = priv->port; | ||
347 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
348 | mc_list, MLX4_PROT_ETH); | ||
349 | } | ||
298 | /* Flush mcast filter and init it with broadcast address */ | 350 | /* Flush mcast filter and init it with broadcast address */ |
299 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | 351 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, |
300 | 1, MLX4_MCAST_CONFIG); | 352 | 1, MLX4_MCAST_CONFIG); |
@@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
307 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | 359 | for (i = 0; i < priv->mc_addrs_cnt; i++) { |
308 | mcast_addr = | 360 | mcast_addr = |
309 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); | 361 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); |
362 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
363 | mc_list[5] = priv->port; | ||
364 | mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, | ||
365 | mc_list, 0, MLX4_PROT_ETH); | ||
310 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | 366 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, |
311 | mcast_addr, 0, MLX4_MCAST_CONFIG); | 367 | mcast_addr, 0, MLX4_MCAST_CONFIG); |
312 | } | 368 | } |
@@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
314 | 0, MLX4_MCAST_ENABLE); | 370 | 0, MLX4_MCAST_ENABLE); |
315 | if (err) | 371 | if (err) |
316 | en_err(priv, "Failed enabling multicast filter\n"); | 372 | en_err(priv, "Failed enabling multicast filter\n"); |
317 | |||
318 | mlx4_en_clear_list(dev); | ||
319 | } | 373 | } |
320 | out: | 374 | out: |
321 | mutex_unlock(&mdev->state_lock); | 375 | mutex_unlock(&mdev->state_lock); |
@@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
417 | unsigned long avg_pkt_size; | 471 | unsigned long avg_pkt_size; |
418 | unsigned long rx_packets; | 472 | unsigned long rx_packets; |
419 | unsigned long rx_bytes; | 473 | unsigned long rx_bytes; |
420 | unsigned long rx_byte_diff; | ||
421 | unsigned long tx_packets; | 474 | unsigned long tx_packets; |
422 | unsigned long tx_pkt_diff; | 475 | unsigned long tx_pkt_diff; |
423 | unsigned long rx_pkt_diff; | 476 | unsigned long rx_pkt_diff; |
@@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
441 | rx_pkt_diff = ((unsigned long) (rx_packets - | 494 | rx_pkt_diff = ((unsigned long) (rx_packets - |
442 | priv->last_moder_packets)); | 495 | priv->last_moder_packets)); |
443 | packets = max(tx_pkt_diff, rx_pkt_diff); | 496 | packets = max(tx_pkt_diff, rx_pkt_diff); |
444 | rx_byte_diff = rx_bytes - priv->last_moder_bytes; | ||
445 | rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1; | ||
446 | rate = packets * HZ / period; | 497 | rate = packets * HZ / period; |
447 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | 498 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - |
448 | priv->last_moder_bytes)) / packets : 0; | 499 | priv->last_moder_bytes)) / packets : 0; |
449 | 500 | ||
450 | /* Apply auto-moderation only when packet rate exceeds a rate that | 501 | /* Apply auto-moderation only when packet rate exceeds a rate that |
451 | * it matters */ | 502 | * it matters */ |
452 | if (rate > MLX4_EN_RX_RATE_THRESH) { | 503 | if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { |
453 | /* If tx and rx packet rates are not balanced, assume that | 504 | /* If tx and rx packet rates are not balanced, assume that |
454 | * traffic is mainly BW bound and apply maximum moderation. | 505 | * traffic is mainly BW bound and apply maximum moderation. |
455 | * Otherwise, moderate according to packet rate */ | 506 | * Otherwise, moderate according to packet rate */ |
456 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff && | 507 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || |
457 | rx_pkt_diff / rx_byte_diff < | 508 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { |
458 | MLX4_EN_SMALL_PKT_SIZE) | ||
459 | moder_time = priv->rx_usecs_low; | ||
460 | else if (2 * rx_pkt_diff > 3 * tx_pkt_diff) | ||
461 | moder_time = priv->rx_usecs_high; | 509 | moder_time = priv->rx_usecs_high; |
462 | else { | 510 | } else { |
463 | if (rate < priv->pkt_rate_low) | 511 | if (rate < priv->pkt_rate_low) |
464 | moder_time = priv->rx_usecs_low; | 512 | moder_time = priv->rx_usecs_low; |
465 | else if (rate > priv->pkt_rate_high) | 513 | else if (rate > priv->pkt_rate_high) |
@@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
471 | priv->rx_usecs_low; | 519 | priv->rx_usecs_low; |
472 | } | 520 | } |
473 | } else { | 521 | } else { |
474 | /* When packet rate is low, use default moderation rather than | 522 | moder_time = priv->rx_usecs_low; |
475 | * 0 to prevent interrupt storms if traffic suddenly increases */ | ||
476 | moder_time = priv->rx_usecs; | ||
477 | } | 523 | } |
478 | 524 | ||
479 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | 525 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", |
@@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev) | |||
565 | int err = 0; | 611 | int err = 0; |
566 | int i; | 612 | int i; |
567 | int j; | 613 | int j; |
614 | u8 mc_list[16] = {0}; | ||
615 | char name[32]; | ||
568 | 616 | ||
569 | if (priv->port_up) { | 617 | if (priv->port_up) { |
570 | en_dbg(DRV, priv, "start port called while port already up\n"); | 618 | en_dbg(DRV, priv, "start port called while port already up\n"); |
@@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev) | |||
603 | ++rx_index; | 651 | ++rx_index; |
604 | } | 652 | } |
605 | 653 | ||
654 | /* Set port mac number */ | ||
655 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
656 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
657 | priv->mac, &priv->base_qpn, 0); | ||
658 | if (err) { | ||
659 | en_err(priv, "Failed setting port mac\n"); | ||
660 | goto cq_err; | ||
661 | } | ||
662 | mdev->mac_removed[priv->port] = 0; | ||
663 | |||
606 | err = mlx4_en_config_rss_steer(priv); | 664 | err = mlx4_en_config_rss_steer(priv); |
607 | if (err) { | 665 | if (err) { |
608 | en_err(priv, "Failed configuring rss steering\n"); | 666 | en_err(priv, "Failed configuring rss steering\n"); |
609 | goto cq_err; | 667 | goto mac_err; |
610 | } | 668 | } |
611 | 669 | ||
670 | if (mdev->dev->caps.comp_pool && !priv->tx_vector) { | ||
671 | sprintf(name , "%s-tx", priv->dev->name); | ||
672 | if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) { | ||
673 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
674 | "%s_tx ,Falling back to legacy " | ||
675 | "EQ's\n", priv->dev->name); | ||
676 | } | ||
677 | } | ||
612 | /* Configure tx cq's and rings */ | 678 | /* Configure tx cq's and rings */ |
613 | for (i = 0; i < priv->tx_ring_num; i++) { | 679 | for (i = 0; i < priv->tx_ring_num; i++) { |
614 | /* Configure cq */ | 680 | /* Configure cq */ |
615 | cq = &priv->tx_cq[i]; | 681 | cq = &priv->tx_cq[i]; |
682 | cq->vector = priv->tx_vector; | ||
616 | err = mlx4_en_activate_cq(priv, cq); | 683 | err = mlx4_en_activate_cq(priv, cq); |
617 | if (err) { | 684 | if (err) { |
618 | en_err(priv, "Failed allocating Tx CQ\n"); | 685 | en_err(priv, "Failed allocating Tx CQ\n"); |
@@ -659,24 +726,22 @@ int mlx4_en_start_port(struct net_device *dev) | |||
659 | en_err(priv, "Failed setting default qp numbers\n"); | 726 | en_err(priv, "Failed setting default qp numbers\n"); |
660 | goto tx_err; | 727 | goto tx_err; |
661 | } | 728 | } |
662 | /* Set port mac number */ | ||
663 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
664 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
665 | priv->mac, &priv->mac_index); | ||
666 | if (err) { | ||
667 | en_err(priv, "Failed setting port mac\n"); | ||
668 | goto tx_err; | ||
669 | } | ||
670 | mdev->mac_removed[priv->port] = 0; | ||
671 | 729 | ||
672 | /* Init port */ | 730 | /* Init port */ |
673 | en_dbg(HW, priv, "Initializing port\n"); | 731 | en_dbg(HW, priv, "Initializing port\n"); |
674 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | 732 | err = mlx4_INIT_PORT(mdev->dev, priv->port); |
675 | if (err) { | 733 | if (err) { |
676 | en_err(priv, "Failed Initializing port\n"); | 734 | en_err(priv, "Failed Initializing port\n"); |
677 | goto mac_err; | 735 | goto tx_err; |
678 | } | 736 | } |
679 | 737 | ||
738 | /* Attach rx QP to bradcast address */ | ||
739 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
740 | mc_list[5] = priv->port; | ||
741 | if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
742 | 0, MLX4_PROT_ETH)) | ||
743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | ||
744 | |||
680 | /* Schedule multicast task to populate multicast list */ | 745 | /* Schedule multicast task to populate multicast list */ |
681 | queue_work(mdev->workqueue, &priv->mcast_task); | 746 | queue_work(mdev->workqueue, &priv->mcast_task); |
682 | 747 | ||
@@ -684,8 +749,6 @@ int mlx4_en_start_port(struct net_device *dev) | |||
684 | netif_tx_start_all_queues(dev); | 749 | netif_tx_start_all_queues(dev); |
685 | return 0; | 750 | return 0; |
686 | 751 | ||
687 | mac_err: | ||
688 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
689 | tx_err: | 752 | tx_err: |
690 | while (tx_index--) { | 753 | while (tx_index--) { |
691 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | 754 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); |
@@ -693,6 +756,8 @@ tx_err: | |||
693 | } | 756 | } |
694 | 757 | ||
695 | mlx4_en_release_rss_steer(priv); | 758 | mlx4_en_release_rss_steer(priv); |
759 | mac_err: | ||
760 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); | ||
696 | cq_err: | 761 | cq_err: |
697 | while (rx_index--) | 762 | while (rx_index--) |
698 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | 763 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); |
@@ -708,6 +773,7 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
708 | struct mlx4_en_priv *priv = netdev_priv(dev); | 773 | struct mlx4_en_priv *priv = netdev_priv(dev); |
709 | struct mlx4_en_dev *mdev = priv->mdev; | 774 | struct mlx4_en_dev *mdev = priv->mdev; |
710 | int i; | 775 | int i; |
776 | u8 mc_list[16] = {0}; | ||
711 | 777 | ||
712 | if (!priv->port_up) { | 778 | if (!priv->port_up) { |
713 | en_dbg(DRV, priv, "stop port called while port already down\n"); | 779 | en_dbg(DRV, priv, "stop port called while port already down\n"); |
@@ -722,8 +788,23 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
722 | /* Set port as not active */ | 788 | /* Set port as not active */ |
723 | priv->port_up = false; | 789 | priv->port_up = false; |
724 | 790 | ||
791 | /* Detach All multicasts */ | ||
792 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
793 | mc_list[5] = priv->port; | ||
794 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
795 | MLX4_PROT_ETH); | ||
796 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
797 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
798 | mc_list[5] = priv->port; | ||
799 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
800 | mc_list, MLX4_PROT_ETH); | ||
801 | } | ||
802 | mlx4_en_clear_list(dev); | ||
803 | /* Flush multicast filter */ | ||
804 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); | ||
805 | |||
725 | /* Unregister Mac address for the port */ | 806 | /* Unregister Mac address for the port */ |
726 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | 807 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); |
727 | mdev->mac_removed[priv->port] = 1; | 808 | mdev->mac_removed[priv->port] = 1; |
728 | 809 | ||
729 | /* Free TX Rings */ | 810 | /* Free TX Rings */ |
@@ -801,7 +882,6 @@ static int mlx4_en_open(struct net_device *dev) | |||
801 | priv->rx_ring[i].packets = 0; | 882 | priv->rx_ring[i].packets = 0; |
802 | } | 883 | } |
803 | 884 | ||
804 | mlx4_en_set_default_moderation(priv); | ||
805 | err = mlx4_en_start_port(dev); | 885 | err = mlx4_en_start_port(dev); |
806 | if (err) | 886 | if (err) |
807 | en_err(priv, "Failed starting port:%d\n", priv->port); | 887 | en_err(priv, "Failed starting port:%d\n", priv->port); |
@@ -828,7 +908,7 @@ static int mlx4_en_close(struct net_device *dev) | |||
828 | return 0; | 908 | return 0; |
829 | } | 909 | } |
830 | 910 | ||
831 | void mlx4_en_free_resources(struct mlx4_en_priv *priv) | 911 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors) |
832 | { | 912 | { |
833 | int i; | 913 | int i; |
834 | 914 | ||
@@ -836,14 +916,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |||
836 | if (priv->tx_ring[i].tx_info) | 916 | if (priv->tx_ring[i].tx_info) |
837 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | 917 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); |
838 | if (priv->tx_cq[i].buf) | 918 | if (priv->tx_cq[i].buf) |
839 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); | 919 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors); |
840 | } | 920 | } |
841 | 921 | ||
842 | for (i = 0; i < priv->rx_ring_num; i++) { | 922 | for (i = 0; i < priv->rx_ring_num; i++) { |
843 | if (priv->rx_ring[i].rx_info) | 923 | if (priv->rx_ring[i].rx_info) |
844 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | 924 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); |
845 | if (priv->rx_cq[i].buf) | 925 | if (priv->rx_cq[i].buf) |
846 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); | 926 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors); |
847 | } | 927 | } |
848 | } | 928 | } |
849 | 929 | ||
@@ -851,6 +931,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
851 | { | 931 | { |
852 | struct mlx4_en_port_profile *prof = priv->prof; | 932 | struct mlx4_en_port_profile *prof = priv->prof; |
853 | int i; | 933 | int i; |
934 | int base_tx_qpn, err; | ||
935 | |||
936 | err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); | ||
937 | if (err) { | ||
938 | en_err(priv, "failed reserving range for TX rings\n"); | ||
939 | return err; | ||
940 | } | ||
854 | 941 | ||
855 | /* Create tx Rings */ | 942 | /* Create tx Rings */ |
856 | for (i = 0; i < priv->tx_ring_num; i++) { | 943 | for (i = 0; i < priv->tx_ring_num; i++) { |
@@ -858,7 +945,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
858 | prof->tx_ring_size, i, TX)) | 945 | prof->tx_ring_size, i, TX)) |
859 | goto err; | 946 | goto err; |
860 | 947 | ||
861 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], | 948 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, |
862 | prof->tx_ring_size, TXBB_SIZE)) | 949 | prof->tx_ring_size, TXBB_SIZE)) |
863 | goto err; | 950 | goto err; |
864 | } | 951 | } |
@@ -878,6 +965,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
878 | 965 | ||
879 | err: | 966 | err: |
880 | en_err(priv, "Failed to allocate NIC resources\n"); | 967 | en_err(priv, "Failed to allocate NIC resources\n"); |
968 | mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); | ||
881 | return -ENOMEM; | 969 | return -ENOMEM; |
882 | } | 970 | } |
883 | 971 | ||
@@ -905,7 +993,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
905 | mdev->pndev[priv->port] = NULL; | 993 | mdev->pndev[priv->port] = NULL; |
906 | mutex_unlock(&mdev->state_lock); | 994 | mutex_unlock(&mdev->state_lock); |
907 | 995 | ||
908 | mlx4_en_free_resources(priv); | 996 | mlx4_en_free_resources(priv, false); |
909 | free_netdev(dev); | 997 | free_netdev(dev); |
910 | } | 998 | } |
911 | 999 | ||
@@ -932,7 +1020,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | |||
932 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); | 1020 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); |
933 | } else { | 1021 | } else { |
934 | mlx4_en_stop_port(dev); | 1022 | mlx4_en_stop_port(dev); |
935 | mlx4_en_set_default_moderation(priv); | ||
936 | err = mlx4_en_start_port(dev); | 1023 | err = mlx4_en_start_port(dev); |
937 | if (err) { | 1024 | if (err) { |
938 | en_err(priv, "Failed restarting port:%d\n", | 1025 | en_err(priv, "Failed restarting port:%d\n", |
@@ -1079,7 +1166,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1079 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | 1166 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); |
1080 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | 1167 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
1081 | 1168 | ||
1169 | /* Configure port */ | ||
1170 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
1171 | MLX4_EN_MIN_MTU, | ||
1172 | 0, 0, 0, 0); | ||
1173 | if (err) { | ||
1174 | en_err(priv, "Failed setting port general configurations " | ||
1175 | "for port %d, with error %d\n", priv->port, err); | ||
1176 | goto out; | ||
1177 | } | ||
1178 | |||
1179 | /* Init port */ | ||
1180 | en_warn(priv, "Initializing port\n"); | ||
1181 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
1182 | if (err) { | ||
1183 | en_err(priv, "Failed Initializing port\n"); | ||
1184 | goto out; | ||
1185 | } | ||
1082 | priv->registered = 1; | 1186 | priv->registered = 1; |
1187 | mlx4_en_set_default_moderation(priv); | ||
1083 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | 1188 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); |
1084 | return 0; | 1189 | return 0; |
1085 | 1190 | ||
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c index 7f5a3221e0c1..f2a4f5dd313d 100644 --- a/drivers/net/mlx4/en_port.c +++ b/drivers/net/mlx4/en_port.c | |||
@@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | |||
119 | struct mlx4_set_port_rqp_calc_context *context; | 119 | struct mlx4_set_port_rqp_calc_context *context; |
120 | int err; | 120 | int err; |
121 | u32 in_mod; | 121 | u32 in_mod; |
122 | u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT; | ||
123 | |||
124 | if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering) | ||
125 | return 0; | ||
122 | 126 | ||
123 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 127 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
124 | if (IS_ERR(mailbox)) | 128 | if (IS_ERR(mailbox)) |
@@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | |||
127 | memset(context, 0, sizeof *context); | 131 | memset(context, 0, sizeof *context); |
128 | 132 | ||
129 | context->base_qpn = cpu_to_be32(base_qpn); | 133 | context->base_qpn = cpu_to_be32(base_qpn); |
130 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn); | 134 | context->n_mac = 0x7; |
131 | context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn); | 135 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | |
136 | base_qpn); | ||
137 | context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | | ||
138 | base_qpn); | ||
132 | context->intra_no_vlan = 0; | 139 | context->intra_no_vlan = 0; |
133 | context->no_vlan = MLX4_NO_VLAN_IDX; | 140 | context->no_vlan = MLX4_NO_VLAN_IDX; |
134 | context->intra_vlan_miss = 0; | 141 | context->intra_vlan_miss = 0; |
@@ -206,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
206 | } | 213 | } |
207 | stats->tx_packets = 0; | 214 | stats->tx_packets = 0; |
208 | stats->tx_bytes = 0; | 215 | stats->tx_bytes = 0; |
209 | for (i = 0; i <= priv->tx_ring_num; i++) { | 216 | for (i = 0; i < priv->tx_ring_num; i++) { |
210 | stats->tx_packets += priv->tx_ring[i].packets; | 217 | stats->tx_packets += priv->tx_ring[i].packets; |
211 | stats->tx_bytes += priv->tx_ring[i].bytes; | 218 | stats->tx_bytes += priv->tx_ring[i].bytes; |
212 | } | 219 | } |
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h index 092e814b1981..e3d73e41c567 100644 --- a/drivers/net/mlx4/en_port.h +++ b/drivers/net/mlx4/en_port.h | |||
@@ -36,8 +36,8 @@ | |||
36 | 36 | ||
37 | 37 | ||
38 | #define SET_PORT_GEN_ALL_VALID 0x7 | 38 | #define SET_PORT_GEN_ALL_VALID 0x7 |
39 | #define SET_PORT_PROMISC_EN_SHIFT 31 | 39 | #define SET_PORT_PROMISC_SHIFT 31 |
40 | #define SET_PORT_PROMISC_MODE_SHIFT 30 | 40 | #define SET_PORT_MC_PROMISC_SHIFT 30 |
41 | 41 | ||
42 | enum { | 42 | enum { |
43 | MLX4_CMD_SET_VLAN_FLTR = 0x47, | 43 | MLX4_CMD_SET_VLAN_FLTR = 0x47, |
@@ -45,6 +45,12 @@ enum { | |||
45 | MLX4_CMD_DUMP_ETH_STATS = 0x49, | 45 | MLX4_CMD_DUMP_ETH_STATS = 0x49, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | enum { | ||
49 | MCAST_DIRECT_ONLY = 0, | ||
50 | MCAST_DIRECT = 1, | ||
51 | MCAST_DEFAULT = 2 | ||
52 | }; | ||
53 | |||
48 | struct mlx4_set_port_general_context { | 54 | struct mlx4_set_port_general_context { |
49 | u8 reserved[3]; | 55 | u8 reserved[3]; |
50 | u8 flags; | 56 | u8 flags; |
@@ -60,14 +66,17 @@ struct mlx4_set_port_general_context { | |||
60 | 66 | ||
61 | struct mlx4_set_port_rqp_calc_context { | 67 | struct mlx4_set_port_rqp_calc_context { |
62 | __be32 base_qpn; | 68 | __be32 base_qpn; |
63 | __be32 flags; | 69 | u8 rererved; |
64 | u8 reserved[3]; | 70 | u8 n_mac; |
71 | u8 n_vlan; | ||
72 | u8 n_prio; | ||
73 | u8 reserved2[3]; | ||
65 | u8 mac_miss; | 74 | u8 mac_miss; |
66 | u8 intra_no_vlan; | 75 | u8 intra_no_vlan; |
67 | u8 no_vlan; | 76 | u8 no_vlan; |
68 | u8 intra_vlan_miss; | 77 | u8 intra_vlan_miss; |
69 | u8 vlan_miss; | 78 | u8 vlan_miss; |
70 | u8 reserved2[3]; | 79 | u8 reserved3[3]; |
71 | u8 no_vlan_prio; | 80 | u8 no_vlan_prio; |
72 | __be32 promisc; | 81 | __be32 promisc; |
73 | __be32 mcast; | 82 | __be32 mcast; |
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 570f2508fb30..05998ee297c9 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -845,16 +845,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
845 | } | 845 | } |
846 | 846 | ||
847 | /* Configure RSS indirection qp */ | 847 | /* Configure RSS indirection qp */ |
848 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); | ||
849 | if (err) { | ||
850 | en_err(priv, "Failed to reserve range for RSS " | ||
851 | "indirection qp\n"); | ||
852 | goto rss_err; | ||
853 | } | ||
854 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); | 848 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); |
855 | if (err) { | 849 | if (err) { |
856 | en_err(priv, "Failed to allocate RSS indirection QP\n"); | 850 | en_err(priv, "Failed to allocate RSS indirection QP\n"); |
857 | goto reserve_err; | 851 | goto rss_err; |
858 | } | 852 | } |
859 | rss_map->indir_qp.event = mlx4_en_sqp_event; | 853 | rss_map->indir_qp.event = mlx4_en_sqp_event; |
860 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | 854 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, |
@@ -881,8 +875,6 @@ indir_err: | |||
881 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | 875 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); |
882 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | 876 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); |
883 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | 877 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); |
884 | reserve_err: | ||
885 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
886 | rss_err: | 878 | rss_err: |
887 | for (i = 0; i < good_qps; i++) { | 879 | for (i = 0; i < good_qps; i++) { |
888 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | 880 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
@@ -904,7 +896,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | |||
904 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | 896 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); |
905 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | 897 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); |
906 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | 898 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); |
907 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
908 | 899 | ||
909 | for (i = 0; i < priv->rx_ring_num; i++) { | 900 | for (i = 0; i < priv->rx_ring_num; i++) { |
910 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | 901 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index a680cd4a5ab6..01feb8fd42ad 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | enum { | 45 | enum { |
46 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ | 46 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ |
47 | MAX_BF = 256, | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | static int inline_thold __read_mostly = MAX_INLINE; | 50 | static int inline_thold __read_mostly = MAX_INLINE; |
@@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444); | |||
52 | MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); | 53 | MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); |
53 | 54 | ||
54 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | 55 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, |
55 | struct mlx4_en_tx_ring *ring, u32 size, | 56 | struct mlx4_en_tx_ring *ring, int qpn, u32 size, |
56 | u16 stride) | 57 | u16 stride) |
57 | { | 58 | { |
58 | struct mlx4_en_dev *mdev = priv->mdev; | 59 | struct mlx4_en_dev *mdev = priv->mdev; |
@@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | |||
103 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, | 104 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, |
104 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); | 105 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); |
105 | 106 | ||
106 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); | 107 | ring->qpn = qpn; |
107 | if (err) { | ||
108 | en_err(priv, "Failed reserving qp for tx ring.\n"); | ||
109 | goto err_map; | ||
110 | } | ||
111 | |||
112 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); | 108 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); |
113 | if (err) { | 109 | if (err) { |
114 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); | 110 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); |
115 | goto err_reserve; | 111 | goto err_map; |
116 | } | 112 | } |
117 | ring->qp.event = mlx4_en_sqp_event; | 113 | ring->qp.event = mlx4_en_sqp_event; |
118 | 114 | ||
115 | err = mlx4_bf_alloc(mdev->dev, &ring->bf); | ||
116 | if (err) { | ||
117 | en_dbg(DRV, priv, "working without blueflame (%d)", err); | ||
118 | ring->bf.uar = &mdev->priv_uar; | ||
119 | ring->bf.uar->map = mdev->uar_map; | ||
120 | ring->bf_enabled = false; | ||
121 | } else | ||
122 | ring->bf_enabled = true; | ||
123 | |||
119 | return 0; | 124 | return 0; |
120 | 125 | ||
121 | err_reserve: | ||
122 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
123 | err_map: | 126 | err_map: |
124 | mlx4_en_unmap_buffer(&ring->wqres.buf); | 127 | mlx4_en_unmap_buffer(&ring->wqres.buf); |
125 | err_hwq_res: | 128 | err_hwq_res: |
@@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, | |||
139 | struct mlx4_en_dev *mdev = priv->mdev; | 142 | struct mlx4_en_dev *mdev = priv->mdev; |
140 | en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); | 143 | en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); |
141 | 144 | ||
145 | if (ring->bf_enabled) | ||
146 | mlx4_bf_free(mdev->dev, &ring->bf); | ||
142 | mlx4_qp_remove(mdev->dev, &ring->qp); | 147 | mlx4_qp_remove(mdev->dev, &ring->qp); |
143 | mlx4_qp_free(mdev->dev, &ring->qp); | 148 | mlx4_qp_free(mdev->dev, &ring->qp); |
144 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | 149 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); |
@@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
171 | 176 | ||
172 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | 177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, |
173 | ring->cqn, &ring->context); | 178 | ring->cqn, &ring->context); |
179 | if (ring->bf_enabled) | ||
180 | ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); | ||
174 | 181 | ||
175 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | 182 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, |
176 | &ring->qp, &ring->qp_state); | 183 | &ring->qp, &ring->qp_state); |
@@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
591 | return skb_tx_hash(dev, skb); | 598 | return skb_tx_hash(dev, skb); |
592 | } | 599 | } |
593 | 600 | ||
601 | static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) | ||
602 | { | ||
603 | __iowrite64_copy(dst, src, bytecnt / 8); | ||
604 | } | ||
605 | |||
594 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | 606 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) |
595 | { | 607 | { |
596 | struct mlx4_en_priv *priv = netdev_priv(dev); | 608 | struct mlx4_en_priv *priv = netdev_priv(dev); |
@@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
609 | int desc_size; | 621 | int desc_size; |
610 | int real_size; | 622 | int real_size; |
611 | dma_addr_t dma; | 623 | dma_addr_t dma; |
612 | u32 index; | 624 | u32 index, bf_index; |
613 | __be32 op_own; | 625 | __be32 op_own; |
614 | u16 vlan_tag = 0; | 626 | u16 vlan_tag = 0; |
615 | int i; | 627 | int i; |
616 | int lso_header_size; | 628 | int lso_header_size; |
617 | void *fragptr; | 629 | void *fragptr; |
630 | bool bounce = false; | ||
618 | 631 | ||
619 | if (!priv->port_up) | 632 | if (!priv->port_up) |
620 | goto tx_drop; | 633 | goto tx_drop; |
@@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
657 | 670 | ||
658 | /* Packet is good - grab an index and transmit it */ | 671 | /* Packet is good - grab an index and transmit it */ |
659 | index = ring->prod & ring->size_mask; | 672 | index = ring->prod & ring->size_mask; |
673 | bf_index = ring->prod; | ||
660 | 674 | ||
661 | /* See if we have enough space for whole descriptor TXBB for setting | 675 | /* See if we have enough space for whole descriptor TXBB for setting |
662 | * SW ownership on next descriptor; if not, use a bounce buffer. */ | 676 | * SW ownership on next descriptor; if not, use a bounce buffer. */ |
663 | if (likely(index + nr_txbb <= ring->size)) | 677 | if (likely(index + nr_txbb <= ring->size)) |
664 | tx_desc = ring->buf + index * TXBB_SIZE; | 678 | tx_desc = ring->buf + index * TXBB_SIZE; |
665 | else | 679 | else { |
666 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; | 680 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; |
681 | bounce = true; | ||
682 | } | ||
667 | 683 | ||
668 | /* Save skb in tx_info ring */ | 684 | /* Save skb in tx_info ring */ |
669 | tx_info = &ring->tx_info[index]; | 685 | tx_info = &ring->tx_info[index]; |
@@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
768 | ring->prod += nr_txbb; | 784 | ring->prod += nr_txbb; |
769 | 785 | ||
770 | /* If we used a bounce buffer then copy descriptor back into place */ | 786 | /* If we used a bounce buffer then copy descriptor back into place */ |
771 | if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) | 787 | if (bounce) |
772 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); | 788 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); |
773 | 789 | ||
774 | /* Run destructor before passing skb to HW */ | 790 | /* Run destructor before passing skb to HW */ |
775 | if (likely(!skb_shared(skb))) | 791 | if (likely(!skb_shared(skb))) |
776 | skb_orphan(skb); | 792 | skb_orphan(skb); |
777 | 793 | ||
778 | /* Ensure new descirptor hits memory | 794 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { |
779 | * before setting ownership of this descriptor to HW */ | 795 | *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; |
780 | wmb(); | 796 | op_own |= htonl((bf_index & 0xffff) << 8); |
781 | tx_desc->ctrl.owner_opcode = op_own; | 797 | /* Ensure new descirptor hits memory |
798 | * before setting ownership of this descriptor to HW */ | ||
799 | wmb(); | ||
800 | tx_desc->ctrl.owner_opcode = op_own; | ||
782 | 801 | ||
783 | /* Ring doorbell! */ | 802 | wmb(); |
784 | wmb(); | 803 | |
785 | writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); | 804 | mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, |
805 | desc_size); | ||
806 | |||
807 | wmb(); | ||
808 | |||
809 | ring->bf.offset ^= ring->bf.buf_size; | ||
810 | } else { | ||
811 | /* Ensure new descirptor hits memory | ||
812 | * before setting ownership of this descriptor to HW */ | ||
813 | wmb(); | ||
814 | tx_desc->ctrl.owner_opcode = op_own; | ||
815 | wmb(); | ||
816 | writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); | ||
817 | } | ||
786 | 818 | ||
787 | /* Poll CQ here */ | 819 | /* Poll CQ here */ |
788 | mlx4_en_xmit_poll(priv, tx_ind); | 820 | mlx4_en_xmit_poll(priv, tx_ind); |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 552d0fce6f67..506cfd0372ec 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include "fw.h" | 42 | #include "fw.h" |
43 | 43 | ||
44 | enum { | 44 | enum { |
45 | MLX4_IRQNAME_SIZE = 64 | 45 | MLX4_IRQNAME_SIZE = 32 |
46 | }; | 46 | }; |
47 | 47 | ||
48 | enum { | 48 | enum { |
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev) | |||
317 | * we need to map, take the difference of highest index and | 317 | * we need to map, take the difference of highest index and |
318 | * the lowest index we'll use and add 1. | 318 | * the lowest index we'll use and add 1. |
319 | */ | 319 | */ |
320 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - | 320 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + |
321 | dev->caps.reserved_eqs / 4 + 1; | 321 | dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; |
322 | } | 322 | } |
323 | 323 | ||
324 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) | 324 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev, | |||
496 | static void mlx4_free_irqs(struct mlx4_dev *dev) | 496 | static void mlx4_free_irqs(struct mlx4_dev *dev) |
497 | { | 497 | { |
498 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; | 498 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; |
499 | int i; | 499 | struct mlx4_priv *priv = mlx4_priv(dev); |
500 | int i, vec; | ||
500 | 501 | ||
501 | if (eq_table->have_irq) | 502 | if (eq_table->have_irq) |
502 | free_irq(dev->pdev->irq, dev); | 503 | free_irq(dev->pdev->irq, dev); |
504 | |||
503 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | 505 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
504 | if (eq_table->eq[i].have_irq) { | 506 | if (eq_table->eq[i].have_irq) { |
505 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | 507 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
506 | eq_table->eq[i].have_irq = 0; | 508 | eq_table->eq[i].have_irq = 0; |
507 | } | 509 | } |
508 | 510 | ||
511 | for (i = 0; i < dev->caps.comp_pool; i++) { | ||
512 | /* | ||
513 | * Freeing the assigned irq's | ||
514 | * all bits should be 0, but we need to validate | ||
515 | */ | ||
516 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
517 | /* NO need protecting*/ | ||
518 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
519 | free_irq(priv->eq_table.eq[vec].irq, | ||
520 | &priv->eq_table.eq[vec]); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | |||
509 | kfree(eq_table->irq_names); | 525 | kfree(eq_table->irq_names); |
510 | } | 526 | } |
511 | 527 | ||
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
578 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 594 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
579 | 595 | ||
580 | priv->eq_table.irq_names = | 596 | priv->eq_table.irq_names = |
581 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), | 597 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + |
598 | dev->caps.comp_pool), | ||
582 | GFP_KERNEL); | 599 | GFP_KERNEL); |
583 | if (!priv->eq_table.irq_names) { | 600 | if (!priv->eq_table.irq_names) { |
584 | err = -ENOMEM; | 601 | err = -ENOMEM; |
@@ -601,6 +618,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
601 | if (err) | 618 | if (err) |
602 | goto err_out_comp; | 619 | goto err_out_comp; |
603 | 620 | ||
621 | /*if additional completion vectors poolsize is 0 this loop will not run*/ | ||
622 | for (i = dev->caps.num_comp_vectors + 1; | ||
623 | i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { | ||
624 | |||
625 | err = mlx4_create_eq(dev, dev->caps.num_cqs - | ||
626 | dev->caps.reserved_cqs + | ||
627 | MLX4_NUM_SPARE_EQE, | ||
628 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | ||
629 | &priv->eq_table.eq[i]); | ||
630 | if (err) { | ||
631 | --i; | ||
632 | goto err_out_unmap; | ||
633 | } | ||
634 | } | ||
635 | |||
636 | |||
604 | if (dev->flags & MLX4_FLAG_MSI_X) { | 637 | if (dev->flags & MLX4_FLAG_MSI_X) { |
605 | const char *eq_name; | 638 | const char *eq_name; |
606 | 639 | ||
@@ -686,7 +719,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
686 | 719 | ||
687 | mlx4_free_irqs(dev); | 720 | mlx4_free_irqs(dev); |
688 | 721 | ||
689 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | 722 | for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) |
690 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 723 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
691 | 724 | ||
692 | mlx4_unmap_clr_int(dev); | 725 | mlx4_unmap_clr_int(dev); |
@@ -743,3 +776,65 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) | |||
743 | return err; | 776 | return err; |
744 | } | 777 | } |
745 | EXPORT_SYMBOL(mlx4_test_interrupts); | 778 | EXPORT_SYMBOL(mlx4_test_interrupts); |
779 | |||
780 | int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) | ||
781 | { | ||
782 | |||
783 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
784 | int vec = 0, err = 0, i; | ||
785 | |||
786 | spin_lock(&priv->msix_ctl.pool_lock); | ||
787 | for (i = 0; !vec && i < dev->caps.comp_pool; i++) { | ||
788 | if (~priv->msix_ctl.pool_bm & 1ULL << i) { | ||
789 | priv->msix_ctl.pool_bm |= 1ULL << i; | ||
790 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
791 | snprintf(priv->eq_table.irq_names + | ||
792 | vec * MLX4_IRQNAME_SIZE, | ||
793 | MLX4_IRQNAME_SIZE, "%s", name); | ||
794 | err = request_irq(priv->eq_table.eq[vec].irq, | ||
795 | mlx4_msi_x_interrupt, 0, | ||
796 | &priv->eq_table.irq_names[vec<<5], | ||
797 | priv->eq_table.eq + vec); | ||
798 | if (err) { | ||
799 | /*zero out bit by fliping it*/ | ||
800 | priv->msix_ctl.pool_bm ^= 1 << i; | ||
801 | vec = 0; | ||
802 | continue; | ||
803 | /*we dont want to break here*/ | ||
804 | } | ||
805 | eq_set_ci(&priv->eq_table.eq[vec], 1); | ||
806 | } | ||
807 | } | ||
808 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
809 | |||
810 | if (vec) { | ||
811 | *vector = vec; | ||
812 | } else { | ||
813 | *vector = 0; | ||
814 | err = (i == dev->caps.comp_pool) ? -ENOSPC : err; | ||
815 | } | ||
816 | return err; | ||
817 | } | ||
818 | EXPORT_SYMBOL(mlx4_assign_eq); | ||
819 | |||
820 | void mlx4_release_eq(struct mlx4_dev *dev, int vec) | ||
821 | { | ||
822 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
823 | /*bm index*/ | ||
824 | int i = vec - dev->caps.num_comp_vectors - 1; | ||
825 | |||
826 | if (likely(i >= 0)) { | ||
827 | /*sanity check , making sure were not trying to free irq's | ||
828 | Belonging to a legacy EQ*/ | ||
829 | spin_lock(&priv->msix_ctl.pool_lock); | ||
830 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
831 | free_irq(priv->eq_table.eq[vec].irq, | ||
832 | &priv->eq_table.eq[vec]); | ||
833 | priv->msix_ctl.pool_bm &= ~(1ULL << i); | ||
834 | } | ||
835 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
836 | } | ||
837 | |||
838 | } | ||
839 | EXPORT_SYMBOL(mlx4_release_eq); | ||
840 | |||
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 5de1db897835..67a209ba939d 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -274,8 +274,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
274 | dev_cap->stat_rate_support = stat_rate; | 274 | dev_cap->stat_rate_support = stat_rate; |
275 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); | 275 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); |
276 | dev_cap->udp_rss = field & 0x1; | 276 | dev_cap->udp_rss = field & 0x1; |
277 | dev_cap->vep_uc_steering = field & 0x2; | ||
278 | dev_cap->vep_mc_steering = field & 0x4; | ||
277 | MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); | 279 | MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); |
278 | dev_cap->loopback_support = field & 0x1; | 280 | dev_cap->loopback_support = field & 0x1; |
281 | dev_cap->wol = field & 0x40; | ||
279 | MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); | 282 | MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); |
280 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); | 283 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); |
281 | dev_cap->reserved_uars = field >> 4; | 284 | dev_cap->reserved_uars = field >> 4; |
@@ -737,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) | |||
737 | #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) | 740 | #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) |
738 | #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) | 741 | #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) |
739 | #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) | 742 | #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) |
743 | #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) | ||
740 | #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) | 744 | #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) |
741 | #define INIT_HCA_TPT_OFFSET 0x0f0 | 745 | #define INIT_HCA_TPT_OFFSET 0x0f0 |
742 | #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) | 746 | #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) |
@@ -797,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) | |||
797 | MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); | 801 | MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); |
798 | MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); | 802 | MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); |
799 | MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); | 803 | MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); |
804 | if (dev->caps.vep_mc_steering) | ||
805 | MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); | ||
800 | MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); | 806 | MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); |
801 | 807 | ||
802 | /* TPT attributes */ | 808 | /* TPT attributes */ |
@@ -908,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev) | |||
908 | /* Input modifier of 0x1f means "finish as soon as possible." */ | 914 | /* Input modifier of 0x1f means "finish as soon as possible." */ |
909 | return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); | 915 | return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); |
910 | } | 916 | } |
917 | |||
918 | #define MLX4_WOL_SETUP_MODE (5 << 28) | ||
919 | int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) | ||
920 | { | ||
921 | u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; | ||
922 | |||
923 | return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, | ||
924 | MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); | ||
925 | } | ||
926 | EXPORT_SYMBOL_GPL(mlx4_wol_read); | ||
927 | |||
928 | int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) | ||
929 | { | ||
930 | u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; | ||
931 | |||
932 | return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, | ||
933 | MLX4_CMD_TIME_CLASS_A); | ||
934 | } | ||
935 | EXPORT_SYMBOL_GPL(mlx4_wol_write); | ||
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index 65cc72eb899d..88003ebc6185 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h | |||
@@ -80,6 +80,9 @@ struct mlx4_dev_cap { | |||
80 | u16 stat_rate_support; | 80 | u16 stat_rate_support; |
81 | int udp_rss; | 81 | int udp_rss; |
82 | int loopback_support; | 82 | int loopback_support; |
83 | int vep_uc_steering; | ||
84 | int vep_mc_steering; | ||
85 | int wol; | ||
83 | u32 flags; | 86 | u32 flags; |
84 | int reserved_uars; | 87 | int reserved_uars; |
85 | int uar_size; | 88 | int uar_size; |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index c83501122d77..62fa7eec5f0c 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
42 | #include <linux/io-mapping.h> | ||
42 | 43 | ||
43 | #include <linux/mlx4/device.h> | 44 | #include <linux/mlx4/device.h> |
44 | #include <linux/mlx4/doorbell.h> | 45 | #include <linux/mlx4/doorbell.h> |
@@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
227 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | 228 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; |
228 | dev->caps.udp_rss = dev_cap->udp_rss; | 229 | dev->caps.udp_rss = dev_cap->udp_rss; |
229 | dev->caps.loopback_support = dev_cap->loopback_support; | 230 | dev->caps.loopback_support = dev_cap->loopback_support; |
231 | dev->caps.vep_uc_steering = dev_cap->vep_uc_steering; | ||
232 | dev->caps.vep_mc_steering = dev_cap->vep_mc_steering; | ||
233 | dev->caps.wol = dev_cap->wol; | ||
230 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 234 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
231 | 235 | ||
232 | dev->caps.log_num_macs = log_num_mac; | 236 | dev->caps.log_num_macs = log_num_mac; |
@@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev) | |||
718 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); | 722 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
719 | } | 723 | } |
720 | 724 | ||
725 | static int map_bf_area(struct mlx4_dev *dev) | ||
726 | { | ||
727 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
728 | resource_size_t bf_start; | ||
729 | resource_size_t bf_len; | ||
730 | int err = 0; | ||
731 | |||
732 | bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); | ||
733 | bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); | ||
734 | priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); | ||
735 | if (!priv->bf_mapping) | ||
736 | err = -ENOMEM; | ||
737 | |||
738 | return err; | ||
739 | } | ||
740 | |||
741 | static void unmap_bf_area(struct mlx4_dev *dev) | ||
742 | { | ||
743 | if (mlx4_priv(dev)->bf_mapping) | ||
744 | io_mapping_free(mlx4_priv(dev)->bf_mapping); | ||
745 | } | ||
746 | |||
721 | static void mlx4_close_hca(struct mlx4_dev *dev) | 747 | static void mlx4_close_hca(struct mlx4_dev *dev) |
722 | { | 748 | { |
749 | unmap_bf_area(dev); | ||
723 | mlx4_CLOSE_HCA(dev, 0); | 750 | mlx4_CLOSE_HCA(dev, 0); |
724 | mlx4_free_icms(dev); | 751 | mlx4_free_icms(dev); |
725 | mlx4_UNMAP_FA(dev); | 752 | mlx4_UNMAP_FA(dev); |
@@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
772 | goto err_stop_fw; | 799 | goto err_stop_fw; |
773 | } | 800 | } |
774 | 801 | ||
802 | if (map_bf_area(dev)) | ||
803 | mlx4_dbg(dev, "Failed to map blue flame area\n"); | ||
804 | |||
775 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars); | 805 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars); |
776 | 806 | ||
777 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); | 807 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); |
@@ -802,6 +832,7 @@ err_free_icm: | |||
802 | mlx4_free_icms(dev); | 832 | mlx4_free_icms(dev); |
803 | 833 | ||
804 | err_stop_fw: | 834 | err_stop_fw: |
835 | unmap_bf_area(dev); | ||
805 | mlx4_UNMAP_FA(dev); | 836 | mlx4_UNMAP_FA(dev); |
806 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); | 837 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); |
807 | 838 | ||
@@ -969,13 +1000,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
969 | { | 1000 | { |
970 | struct mlx4_priv *priv = mlx4_priv(dev); | 1001 | struct mlx4_priv *priv = mlx4_priv(dev); |
971 | struct msix_entry *entries; | 1002 | struct msix_entry *entries; |
972 | int nreq; | 1003 | int nreq = min_t(int, dev->caps.num_ports * |
1004 | min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) | ||
1005 | + MSIX_LEGACY_SZ, MAX_MSIX); | ||
973 | int err; | 1006 | int err; |
974 | int i; | 1007 | int i; |
975 | 1008 | ||
976 | if (msi_x) { | 1009 | if (msi_x) { |
977 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, | 1010 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, |
978 | num_possible_cpus() + 1); | 1011 | nreq); |
979 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); | 1012 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); |
980 | if (!entries) | 1013 | if (!entries) |
981 | goto no_msi; | 1014 | goto no_msi; |
@@ -998,7 +1031,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
998 | goto no_msi; | 1031 | goto no_msi; |
999 | } | 1032 | } |
1000 | 1033 | ||
1001 | dev->caps.num_comp_vectors = nreq - 1; | 1034 | if (nreq < |
1035 | MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { | ||
1036 | /*Working in legacy mode , all EQ's shared*/ | ||
1037 | dev->caps.comp_pool = 0; | ||
1038 | dev->caps.num_comp_vectors = nreq - 1; | ||
1039 | } else { | ||
1040 | dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; | ||
1041 | dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; | ||
1042 | } | ||
1002 | for (i = 0; i < nreq; ++i) | 1043 | for (i = 0; i < nreq; ++i) |
1003 | priv->eq_table.eq[i].irq = entries[i].vector; | 1044 | priv->eq_table.eq[i].irq = entries[i].vector; |
1004 | 1045 | ||
@@ -1010,6 +1051,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
1010 | 1051 | ||
1011 | no_msi: | 1052 | no_msi: |
1012 | dev->caps.num_comp_vectors = 1; | 1053 | dev->caps.num_comp_vectors = 1; |
1054 | dev->caps.comp_pool = 0; | ||
1013 | 1055 | ||
1014 | for (i = 0; i < 2; ++i) | 1056 | for (i = 0; i < 2; ++i) |
1015 | priv->eq_table.eq[i].irq = dev->pdev->irq; | 1057 | priv->eq_table.eq[i].irq = dev->pdev->irq; |
@@ -1049,6 +1091,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | |||
1049 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | 1091 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); |
1050 | } | 1092 | } |
1051 | 1093 | ||
1094 | static int mlx4_init_steering(struct mlx4_dev *dev) | ||
1095 | { | ||
1096 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
1097 | int num_entries = dev->caps.num_ports; | ||
1098 | int i, j; | ||
1099 | |||
1100 | priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); | ||
1101 | if (!priv->steer) | ||
1102 | return -ENOMEM; | ||
1103 | |||
1104 | for (i = 0; i < num_entries; i++) { | ||
1105 | for (j = 0; j < MLX4_NUM_STEERS; j++) { | ||
1106 | INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); | ||
1107 | INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); | ||
1108 | } | ||
1109 | INIT_LIST_HEAD(&priv->steer[i].high_prios); | ||
1110 | } | ||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static void mlx4_clear_steering(struct mlx4_dev *dev) | ||
1115 | { | ||
1116 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
1117 | struct mlx4_steer_index *entry, *tmp_entry; | ||
1118 | struct mlx4_promisc_qp *pqp, *tmp_pqp; | ||
1119 | int num_entries = dev->caps.num_ports; | ||
1120 | int i, j; | ||
1121 | |||
1122 | for (i = 0; i < num_entries; i++) { | ||
1123 | for (j = 0; j < MLX4_NUM_STEERS; j++) { | ||
1124 | list_for_each_entry_safe(pqp, tmp_pqp, | ||
1125 | &priv->steer[i].promisc_qps[j], | ||
1126 | list) { | ||
1127 | list_del(&pqp->list); | ||
1128 | kfree(pqp); | ||
1129 | } | ||
1130 | list_for_each_entry_safe(entry, tmp_entry, | ||
1131 | &priv->steer[i].steer_entries[j], | ||
1132 | list) { | ||
1133 | list_del(&entry->list); | ||
1134 | list_for_each_entry_safe(pqp, tmp_pqp, | ||
1135 | &entry->duplicates, | ||
1136 | list) { | ||
1137 | list_del(&pqp->list); | ||
1138 | kfree(pqp); | ||
1139 | } | ||
1140 | kfree(entry); | ||
1141 | } | ||
1142 | } | ||
1143 | } | ||
1144 | kfree(priv->steer); | ||
1145 | } | ||
1146 | |||
1052 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 1147 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
1053 | { | 1148 | { |
1054 | struct mlx4_priv *priv; | 1149 | struct mlx4_priv *priv; |
@@ -1130,6 +1225,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1130 | INIT_LIST_HEAD(&priv->pgdir_list); | 1225 | INIT_LIST_HEAD(&priv->pgdir_list); |
1131 | mutex_init(&priv->pgdir_mutex); | 1226 | mutex_init(&priv->pgdir_mutex); |
1132 | 1227 | ||
1228 | pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id); | ||
1229 | |||
1230 | INIT_LIST_HEAD(&priv->bf_list); | ||
1231 | mutex_init(&priv->bf_mutex); | ||
1232 | |||
1133 | /* | 1233 | /* |
1134 | * Now reset the HCA before we touch the PCI capabilities or | 1234 | * Now reset the HCA before we touch the PCI capabilities or |
1135 | * attempt a firmware command, since a boot ROM may have left | 1235 | * attempt a firmware command, since a boot ROM may have left |
@@ -1154,8 +1254,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1154 | if (err) | 1254 | if (err) |
1155 | goto err_close; | 1255 | goto err_close; |
1156 | 1256 | ||
1257 | priv->msix_ctl.pool_bm = 0; | ||
1258 | spin_lock_init(&priv->msix_ctl.pool_lock); | ||
1259 | |||
1157 | mlx4_enable_msi_x(dev); | 1260 | mlx4_enable_msi_x(dev); |
1158 | 1261 | ||
1262 | err = mlx4_init_steering(dev); | ||
1263 | if (err) | ||
1264 | goto err_free_eq; | ||
1265 | |||
1159 | err = mlx4_setup_hca(dev); | 1266 | err = mlx4_setup_hca(dev); |
1160 | if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { | 1267 | if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { |
1161 | dev->flags &= ~MLX4_FLAG_MSI_X; | 1268 | dev->flags &= ~MLX4_FLAG_MSI_X; |
@@ -1164,7 +1271,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1164 | } | 1271 | } |
1165 | 1272 | ||
1166 | if (err) | 1273 | if (err) |
1167 | goto err_free_eq; | 1274 | goto err_steer; |
1168 | 1275 | ||
1169 | for (port = 1; port <= dev->caps.num_ports; port++) { | 1276 | for (port = 1; port <= dev->caps.num_ports; port++) { |
1170 | err = mlx4_init_port_info(dev, port); | 1277 | err = mlx4_init_port_info(dev, port); |
@@ -1197,6 +1304,9 @@ err_port: | |||
1197 | mlx4_cleanup_pd_table(dev); | 1304 | mlx4_cleanup_pd_table(dev); |
1198 | mlx4_cleanup_uar_table(dev); | 1305 | mlx4_cleanup_uar_table(dev); |
1199 | 1306 | ||
1307 | err_steer: | ||
1308 | mlx4_clear_steering(dev); | ||
1309 | |||
1200 | err_free_eq: | 1310 | err_free_eq: |
1201 | mlx4_free_eq_table(dev); | 1311 | mlx4_free_eq_table(dev); |
1202 | 1312 | ||
@@ -1256,6 +1366,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
1256 | iounmap(priv->kar); | 1366 | iounmap(priv->kar); |
1257 | mlx4_uar_free(dev, &priv->driver_uar); | 1367 | mlx4_uar_free(dev, &priv->driver_uar); |
1258 | mlx4_cleanup_uar_table(dev); | 1368 | mlx4_cleanup_uar_table(dev); |
1369 | mlx4_clear_steering(dev); | ||
1259 | mlx4_free_eq_table(dev); | 1370 | mlx4_free_eq_table(dev); |
1260 | mlx4_close_hca(dev); | 1371 | mlx4_close_hca(dev); |
1261 | mlx4_cmd_cleanup(dev); | 1372 | mlx4_cmd_cleanup(dev); |
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index 79cf42db2ea9..e71372aa9cc4 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/etherdevice.h> | ||
35 | 36 | ||
36 | #include <linux/mlx4/cmd.h> | 37 | #include <linux/mlx4/cmd.h> |
37 | 38 | ||
@@ -40,38 +41,40 @@ | |||
40 | #define MGM_QPN_MASK 0x00FFFFFF | 41 | #define MGM_QPN_MASK 0x00FFFFFF |
41 | #define MGM_BLCK_LB_BIT 30 | 42 | #define MGM_BLCK_LB_BIT 30 |
42 | 43 | ||
43 | struct mlx4_mgm { | ||
44 | __be32 next_gid_index; | ||
45 | __be32 members_count; | ||
46 | u32 reserved[2]; | ||
47 | u8 gid[16]; | ||
48 | __be32 qp[MLX4_QP_PER_MGM]; | ||
49 | }; | ||
50 | |||
51 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ | 44 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ |
52 | 45 | ||
53 | static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, | 46 | static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, |
54 | struct mlx4_cmd_mailbox *mailbox) | 47 | struct mlx4_cmd_mailbox *mailbox) |
55 | { | 48 | { |
56 | return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, | 49 | return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, |
57 | MLX4_CMD_TIME_CLASS_A); | 50 | MLX4_CMD_TIME_CLASS_A); |
58 | } | 51 | } |
59 | 52 | ||
60 | static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, | 53 | static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, |
61 | struct mlx4_cmd_mailbox *mailbox) | 54 | struct mlx4_cmd_mailbox *mailbox) |
62 | { | 55 | { |
63 | return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, | 56 | return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, |
64 | MLX4_CMD_TIME_CLASS_A); | 57 | MLX4_CMD_TIME_CLASS_A); |
65 | } | 58 | } |
66 | 59 | ||
67 | static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | 60 | static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer, |
68 | u16 *hash) | 61 | struct mlx4_cmd_mailbox *mailbox) |
62 | { | ||
63 | u32 in_mod; | ||
64 | |||
65 | in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1; | ||
66 | return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, | ||
67 | MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A); | ||
68 | } | ||
69 | |||
70 | static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
71 | u16 *hash, u8 op_mod) | ||
69 | { | 72 | { |
70 | u64 imm; | 73 | u64 imm; |
71 | int err; | 74 | int err; |
72 | 75 | ||
73 | err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, | 76 | err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, |
74 | MLX4_CMD_TIME_CLASS_A); | 77 | MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A); |
75 | 78 | ||
76 | if (!err) | 79 | if (!err) |
77 | *hash = imm; | 80 | *hash = imm; |
@@ -79,6 +82,457 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
79 | return err; | 82 | return err; |
80 | } | 83 | } |
81 | 84 | ||
85 | static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, | ||
86 | enum mlx4_steer_type steer, | ||
87 | u32 qpn) | ||
88 | { | ||
89 | struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
90 | struct mlx4_promisc_qp *pqp; | ||
91 | |||
92 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | ||
93 | if (pqp->qpn == qpn) | ||
94 | return pqp; | ||
95 | } | ||
96 | /* not found */ | ||
97 | return NULL; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Add new entry to steering data structure. | ||
102 | * All promisc QPs should be added as well | ||
103 | */ | ||
104 | static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
105 | enum mlx4_steer_type steer, | ||
106 | unsigned int index, u32 qpn) | ||
107 | { | ||
108 | struct mlx4_steer *s_steer; | ||
109 | struct mlx4_cmd_mailbox *mailbox; | ||
110 | struct mlx4_mgm *mgm; | ||
111 | u32 members_count; | ||
112 | struct mlx4_steer_index *new_entry; | ||
113 | struct mlx4_promisc_qp *pqp; | ||
114 | struct mlx4_promisc_qp *dqp; | ||
115 | u32 prot; | ||
116 | int err; | ||
117 | u8 pf_num; | ||
118 | |||
119 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
120 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
121 | new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); | ||
122 | if (!new_entry) | ||
123 | return -ENOMEM; | ||
124 | |||
125 | INIT_LIST_HEAD(&new_entry->duplicates); | ||
126 | new_entry->index = index; | ||
127 | list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); | ||
128 | |||
129 | /* If the given qpn is also a promisc qp, | ||
130 | * it should be inserted to duplicates list | ||
131 | */ | ||
132 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
133 | if (pqp) { | ||
134 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
135 | if (!dqp) { | ||
136 | err = -ENOMEM; | ||
137 | goto out_alloc; | ||
138 | } | ||
139 | dqp->qpn = qpn; | ||
140 | list_add_tail(&dqp->list, &new_entry->duplicates); | ||
141 | } | ||
142 | |||
143 | /* if no promisc qps for this vep, we are done */ | ||
144 | if (list_empty(&s_steer->promisc_qps[steer])) | ||
145 | return 0; | ||
146 | |||
147 | /* now need to add all the promisc qps to the new | ||
148 | * steering entry, as they should also receive the packets | ||
149 | * destined to this address */ | ||
150 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
151 | if (IS_ERR(mailbox)) { | ||
152 | err = -ENOMEM; | ||
153 | goto out_alloc; | ||
154 | } | ||
155 | mgm = mailbox->buf; | ||
156 | |||
157 | err = mlx4_READ_ENTRY(dev, index, mailbox); | ||
158 | if (err) | ||
159 | goto out_mailbox; | ||
160 | |||
161 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
162 | prot = be32_to_cpu(mgm->members_count) >> 30; | ||
163 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | ||
164 | /* don't add already existing qpn */ | ||
165 | if (pqp->qpn == qpn) | ||
166 | continue; | ||
167 | if (members_count == MLX4_QP_PER_MGM) { | ||
168 | /* out of space */ | ||
169 | err = -ENOMEM; | ||
170 | goto out_mailbox; | ||
171 | } | ||
172 | |||
173 | /* add the qpn */ | ||
174 | mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); | ||
175 | } | ||
176 | /* update the qps count and update the entry with all the promisc qps*/ | ||
177 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | ||
178 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
179 | |||
180 | out_mailbox: | ||
181 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
182 | if (!err) | ||
183 | return 0; | ||
184 | out_alloc: | ||
185 | if (dqp) { | ||
186 | list_del(&dqp->list); | ||
187 | kfree(&dqp); | ||
188 | } | ||
189 | list_del(&new_entry->list); | ||
190 | kfree(new_entry); | ||
191 | return err; | ||
192 | } | ||
193 | |||
194 | /* update the data structures with existing steering entry */ | ||
195 | static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
196 | enum mlx4_steer_type steer, | ||
197 | unsigned int index, u32 qpn) | ||
198 | { | ||
199 | struct mlx4_steer *s_steer; | ||
200 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | ||
201 | struct mlx4_promisc_qp *pqp; | ||
202 | struct mlx4_promisc_qp *dqp; | ||
203 | u8 pf_num; | ||
204 | |||
205 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
206 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
207 | |||
208 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
209 | if (!pqp) | ||
210 | return 0; /* nothing to do */ | ||
211 | |||
212 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | ||
213 | if (tmp_entry->index == index) { | ||
214 | entry = tmp_entry; | ||
215 | break; | ||
216 | } | ||
217 | } | ||
218 | if (unlikely(!entry)) { | ||
219 | mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
223 | /* the given qpn is listed as a promisc qpn | ||
224 | * we need to add it as a duplicate to this entry | ||
225 | * for future refernce */ | ||
226 | list_for_each_entry(dqp, &entry->duplicates, list) { | ||
227 | if (qpn == dqp->qpn) | ||
228 | return 0; /* qp is already duplicated */ | ||
229 | } | ||
230 | |||
231 | /* add the qp as a duplicate on this index */ | ||
232 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
233 | if (!dqp) | ||
234 | return -ENOMEM; | ||
235 | dqp->qpn = qpn; | ||
236 | list_add_tail(&dqp->list, &entry->duplicates); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | /* Check whether a qpn is a duplicate on steering entry | ||
242 | * If so, it should not be removed from mgm */ | ||
243 | static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
244 | enum mlx4_steer_type steer, | ||
245 | unsigned int index, u32 qpn) | ||
246 | { | ||
247 | struct mlx4_steer *s_steer; | ||
248 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | ||
249 | struct mlx4_promisc_qp *dqp, *tmp_dqp; | ||
250 | u8 pf_num; | ||
251 | |||
252 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
253 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
254 | |||
255 | /* if qp is not promisc, it cannot be duplicated */ | ||
256 | if (!get_promisc_qp(dev, pf_num, steer, qpn)) | ||
257 | return false; | ||
258 | |||
259 | /* The qp is promisc qp so it is a duplicate on this index | ||
260 | * Find the index entry, and remove the duplicate */ | ||
261 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | ||
262 | if (tmp_entry->index == index) { | ||
263 | entry = tmp_entry; | ||
264 | break; | ||
265 | } | ||
266 | } | ||
267 | if (unlikely(!entry)) { | ||
268 | mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); | ||
269 | return false; | ||
270 | } | ||
271 | list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { | ||
272 | if (dqp->qpn == qpn) { | ||
273 | list_del(&dqp->list); | ||
274 | kfree(dqp); | ||
275 | } | ||
276 | } | ||
277 | return true; | ||
278 | } | ||
279 | |||
280 | /* I a steering entry contains only promisc QPs, it can be removed. */ | ||
281 | static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
282 | enum mlx4_steer_type steer, | ||
283 | unsigned int index, u32 tqpn) | ||
284 | { | ||
285 | struct mlx4_steer *s_steer; | ||
286 | struct mlx4_cmd_mailbox *mailbox; | ||
287 | struct mlx4_mgm *mgm; | ||
288 | struct mlx4_steer_index *entry = NULL, *tmp_entry; | ||
289 | u32 qpn; | ||
290 | u32 members_count; | ||
291 | bool ret = false; | ||
292 | int i; | ||
293 | u8 pf_num; | ||
294 | |||
295 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
296 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
297 | |||
298 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
299 | if (IS_ERR(mailbox)) | ||
300 | return false; | ||
301 | mgm = mailbox->buf; | ||
302 | |||
303 | if (mlx4_READ_ENTRY(dev, index, mailbox)) | ||
304 | goto out; | ||
305 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
306 | for (i = 0; i < members_count; i++) { | ||
307 | qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; | ||
308 | if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) { | ||
309 | /* the qp is not promisc, the entry can't be removed */ | ||
310 | goto out; | ||
311 | } | ||
312 | } | ||
313 | /* All the qps currently registered for this entry are promiscuous, | ||
314 | * Checking for duplicates */ | ||
315 | ret = true; | ||
316 | list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { | ||
317 | if (entry->index == index) { | ||
318 | if (list_empty(&entry->duplicates)) { | ||
319 | list_del(&entry->list); | ||
320 | kfree(entry); | ||
321 | } else { | ||
322 | /* This entry contains duplicates so it shouldn't be removed */ | ||
323 | ret = false; | ||
324 | goto out; | ||
325 | } | ||
326 | } | ||
327 | } | ||
328 | |||
329 | out: | ||
330 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
335 | enum mlx4_steer_type steer, u32 qpn) | ||
336 | { | ||
337 | struct mlx4_steer *s_steer; | ||
338 | struct mlx4_cmd_mailbox *mailbox; | ||
339 | struct mlx4_mgm *mgm; | ||
340 | struct mlx4_steer_index *entry; | ||
341 | struct mlx4_promisc_qp *pqp; | ||
342 | struct mlx4_promisc_qp *dqp; | ||
343 | u32 members_count; | ||
344 | u32 prot; | ||
345 | int i; | ||
346 | bool found; | ||
347 | int last_index; | ||
348 | int err; | ||
349 | u8 pf_num; | ||
350 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
351 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
352 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
353 | |||
354 | mutex_lock(&priv->mcg_table.mutex); | ||
355 | |||
356 | if (get_promisc_qp(dev, pf_num, steer, qpn)) { | ||
357 | err = 0; /* Noting to do, already exists */ | ||
358 | goto out_mutex; | ||
359 | } | ||
360 | |||
361 | pqp = kmalloc(sizeof *pqp, GFP_KERNEL); | ||
362 | if (!pqp) { | ||
363 | err = -ENOMEM; | ||
364 | goto out_mutex; | ||
365 | } | ||
366 | pqp->qpn = qpn; | ||
367 | |||
368 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
369 | if (IS_ERR(mailbox)) { | ||
370 | err = -ENOMEM; | ||
371 | goto out_alloc; | ||
372 | } | ||
373 | mgm = mailbox->buf; | ||
374 | |||
375 | /* the promisc qp needs to be added for each one of the steering | ||
376 | * entries, if it already exists, needs to be added as a duplicate | ||
377 | * for this entry */ | ||
378 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | ||
379 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | ||
380 | if (err) | ||
381 | goto out_mailbox; | ||
382 | |||
383 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
384 | prot = be32_to_cpu(mgm->members_count) >> 30; | ||
385 | found = false; | ||
386 | for (i = 0; i < members_count; i++) { | ||
387 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { | ||
388 | /* Entry already exists, add to duplicates */ | ||
389 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
390 | if (!dqp) | ||
391 | goto out_mailbox; | ||
392 | dqp->qpn = qpn; | ||
393 | list_add_tail(&dqp->list, &entry->duplicates); | ||
394 | found = true; | ||
395 | } | ||
396 | } | ||
397 | if (!found) { | ||
398 | /* Need to add the qpn to mgm */ | ||
399 | if (members_count == MLX4_QP_PER_MGM) { | ||
400 | /* entry is full */ | ||
401 | err = -ENOMEM; | ||
402 | goto out_mailbox; | ||
403 | } | ||
404 | mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); | ||
405 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | ||
406 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | ||
407 | if (err) | ||
408 | goto out_mailbox; | ||
409 | } | ||
410 | last_index = entry->index; | ||
411 | } | ||
412 | |||
413 | /* add the new qpn to list of promisc qps */ | ||
414 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | ||
415 | /* now need to add all the promisc qps to default entry */ | ||
416 | memset(mgm, 0, sizeof *mgm); | ||
417 | members_count = 0; | ||
418 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | ||
419 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | ||
420 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | ||
421 | |||
422 | err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); | ||
423 | if (err) | ||
424 | goto out_list; | ||
425 | |||
426 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
427 | mutex_unlock(&priv->mcg_table.mutex); | ||
428 | return 0; | ||
429 | |||
430 | out_list: | ||
431 | list_del(&pqp->list); | ||
432 | out_mailbox: | ||
433 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
434 | out_alloc: | ||
435 | kfree(pqp); | ||
436 | out_mutex: | ||
437 | mutex_unlock(&priv->mcg_table.mutex); | ||
438 | return err; | ||
439 | } | ||
440 | |||
441 | static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
442 | enum mlx4_steer_type steer, u32 qpn) | ||
443 | { | ||
444 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
445 | struct mlx4_steer *s_steer; | ||
446 | struct mlx4_cmd_mailbox *mailbox; | ||
447 | struct mlx4_mgm *mgm; | ||
448 | struct mlx4_steer_index *entry; | ||
449 | struct mlx4_promisc_qp *pqp; | ||
450 | struct mlx4_promisc_qp *dqp; | ||
451 | u32 members_count; | ||
452 | bool found; | ||
453 | bool back_to_list = false; | ||
454 | int loc, i; | ||
455 | int err; | ||
456 | u8 pf_num; | ||
457 | |||
458 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
459 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
460 | mutex_lock(&priv->mcg_table.mutex); | ||
461 | |||
462 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
463 | if (unlikely(!pqp)) { | ||
464 | mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); | ||
465 | /* nothing to do */ | ||
466 | err = 0; | ||
467 | goto out_mutex; | ||
468 | } | ||
469 | |||
470 | /*remove from list of promisc qps */ | ||
471 | list_del(&pqp->list); | ||
472 | kfree(pqp); | ||
473 | |||
474 | /* set the default entry not to include the removed one */ | ||
475 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
476 | if (IS_ERR(mailbox)) { | ||
477 | err = -ENOMEM; | ||
478 | back_to_list = true; | ||
479 | goto out_list; | ||
480 | } | ||
481 | mgm = mailbox->buf; | ||
482 | members_count = 0; | ||
483 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | ||
484 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | ||
485 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | ||
486 | |||
487 | err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); | ||
488 | if (err) | ||
489 | goto out_mailbox; | ||
490 | |||
491 | /* remove the qp from all the steering entries*/ | ||
492 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | ||
493 | found = false; | ||
494 | list_for_each_entry(dqp, &entry->duplicates, list) { | ||
495 | if (dqp->qpn == qpn) { | ||
496 | found = true; | ||
497 | break; | ||
498 | } | ||
499 | } | ||
500 | if (found) { | ||
501 | /* a duplicate, no need to change the mgm, | ||
502 | * only update the duplicates list */ | ||
503 | list_del(&dqp->list); | ||
504 | kfree(dqp); | ||
505 | } else { | ||
506 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | ||
507 | if (err) | ||
508 | goto out_mailbox; | ||
509 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
510 | for (loc = -1, i = 0; i < members_count; ++i) | ||
511 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) | ||
512 | loc = i; | ||
513 | |||
514 | mgm->members_count = cpu_to_be32(--members_count | | ||
515 | (MLX4_PROT_ETH << 30)); | ||
516 | mgm->qp[loc] = mgm->qp[i - 1]; | ||
517 | mgm->qp[i - 1] = 0; | ||
518 | |||
519 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | ||
520 | if (err) | ||
521 | goto out_mailbox; | ||
522 | } | ||
523 | |||
524 | } | ||
525 | |||
526 | out_mailbox: | ||
527 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
528 | out_list: | ||
529 | if (back_to_list) | ||
530 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | ||
531 | out_mutex: | ||
532 | mutex_unlock(&priv->mcg_table.mutex); | ||
533 | return err; | ||
534 | } | ||
535 | |||
82 | /* | 536 | /* |
83 | * Caller must hold MCG table semaphore. gid and mgm parameters must | 537 | * Caller must hold MCG table semaphore. gid and mgm parameters must |
84 | * be properly aligned for command interface. | 538 | * be properly aligned for command interface. |
@@ -94,15 +548,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
94 | * If no AMGM exists for given gid, *index = -1, *prev = index of last | 548 | * If no AMGM exists for given gid, *index = -1, *prev = index of last |
95 | * entry in hash chain and *mgm holds end of hash chain. | 549 | * entry in hash chain and *mgm holds end of hash chain. |
96 | */ | 550 | */ |
97 | static int find_mgm(struct mlx4_dev *dev, | 551 | static int find_entry(struct mlx4_dev *dev, u8 port, |
98 | u8 *gid, enum mlx4_protocol protocol, | 552 | u8 *gid, enum mlx4_protocol prot, |
99 | struct mlx4_cmd_mailbox *mgm_mailbox, | 553 | enum mlx4_steer_type steer, |
100 | u16 *hash, int *prev, int *index) | 554 | struct mlx4_cmd_mailbox *mgm_mailbox, |
555 | u16 *hash, int *prev, int *index) | ||
101 | { | 556 | { |
102 | struct mlx4_cmd_mailbox *mailbox; | 557 | struct mlx4_cmd_mailbox *mailbox; |
103 | struct mlx4_mgm *mgm = mgm_mailbox->buf; | 558 | struct mlx4_mgm *mgm = mgm_mailbox->buf; |
104 | u8 *mgid; | 559 | u8 *mgid; |
105 | int err; | 560 | int err; |
561 | u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0; | ||
106 | 562 | ||
107 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 563 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
108 | if (IS_ERR(mailbox)) | 564 | if (IS_ERR(mailbox)) |
@@ -111,7 +567,7 @@ static int find_mgm(struct mlx4_dev *dev, | |||
111 | 567 | ||
112 | memcpy(mgid, gid, 16); | 568 | memcpy(mgid, gid, 16); |
113 | 569 | ||
114 | err = mlx4_MGID_HASH(dev, mailbox, hash); | 570 | err = mlx4_GID_HASH(dev, mailbox, hash, op_mod); |
115 | mlx4_free_cmd_mailbox(dev, mailbox); | 571 | mlx4_free_cmd_mailbox(dev, mailbox); |
116 | if (err) | 572 | if (err) |
117 | return err; | 573 | return err; |
@@ -123,11 +579,11 @@ static int find_mgm(struct mlx4_dev *dev, | |||
123 | *prev = -1; | 579 | *prev = -1; |
124 | 580 | ||
125 | do { | 581 | do { |
126 | err = mlx4_READ_MCG(dev, *index, mgm_mailbox); | 582 | err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); |
127 | if (err) | 583 | if (err) |
128 | return err; | 584 | return err; |
129 | 585 | ||
130 | if (!memcmp(mgm->gid, zero_gid, 16)) { | 586 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { |
131 | if (*index != *hash) { | 587 | if (*index != *hash) { |
132 | mlx4_err(dev, "Found zero MGID in AMGM.\n"); | 588 | mlx4_err(dev, "Found zero MGID in AMGM.\n"); |
133 | err = -EINVAL; | 589 | err = -EINVAL; |
@@ -136,7 +592,7 @@ static int find_mgm(struct mlx4_dev *dev, | |||
136 | } | 592 | } |
137 | 593 | ||
138 | if (!memcmp(mgm->gid, gid, 16) && | 594 | if (!memcmp(mgm->gid, gid, 16) && |
139 | be32_to_cpu(mgm->members_count) >> 30 == protocol) | 595 | be32_to_cpu(mgm->members_count) >> 30 == prot) |
140 | return err; | 596 | return err; |
141 | 597 | ||
142 | *prev = *index; | 598 | *prev = *index; |
@@ -147,8 +603,9 @@ static int find_mgm(struct mlx4_dev *dev, | |||
147 | return err; | 603 | return err; |
148 | } | 604 | } |
149 | 605 | ||
150 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 606 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
151 | int block_mcast_loopback, enum mlx4_protocol protocol) | 607 | int block_mcast_loopback, enum mlx4_protocol prot, |
608 | enum mlx4_steer_type steer) | ||
152 | { | 609 | { |
153 | struct mlx4_priv *priv = mlx4_priv(dev); | 610 | struct mlx4_priv *priv = mlx4_priv(dev); |
154 | struct mlx4_cmd_mailbox *mailbox; | 611 | struct mlx4_cmd_mailbox *mailbox; |
@@ -159,6 +616,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
159 | int link = 0; | 616 | int link = 0; |
160 | int i; | 617 | int i; |
161 | int err; | 618 | int err; |
619 | u8 port = gid[5]; | ||
620 | u8 new_entry = 0; | ||
162 | 621 | ||
163 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 622 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
164 | if (IS_ERR(mailbox)) | 623 | if (IS_ERR(mailbox)) |
@@ -166,14 +625,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
166 | mgm = mailbox->buf; | 625 | mgm = mailbox->buf; |
167 | 626 | ||
168 | mutex_lock(&priv->mcg_table.mutex); | 627 | mutex_lock(&priv->mcg_table.mutex); |
169 | 628 | err = find_entry(dev, port, gid, prot, steer, | |
170 | err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); | 629 | mailbox, &hash, &prev, &index); |
171 | if (err) | 630 | if (err) |
172 | goto out; | 631 | goto out; |
173 | 632 | ||
174 | if (index != -1) { | 633 | if (index != -1) { |
175 | if (!memcmp(mgm->gid, zero_gid, 16)) | 634 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { |
635 | new_entry = 1; | ||
176 | memcpy(mgm->gid, gid, 16); | 636 | memcpy(mgm->gid, gid, 16); |
637 | } | ||
177 | } else { | 638 | } else { |
178 | link = 1; | 639 | link = 1; |
179 | 640 | ||
@@ -209,26 +670,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
209 | else | 670 | else |
210 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); | 671 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); |
211 | 672 | ||
212 | mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); | 673 | mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); |
213 | 674 | ||
214 | err = mlx4_WRITE_MCG(dev, index, mailbox); | 675 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); |
215 | if (err) | 676 | if (err) |
216 | goto out; | 677 | goto out; |
217 | 678 | ||
218 | if (!link) | 679 | if (!link) |
219 | goto out; | 680 | goto out; |
220 | 681 | ||
221 | err = mlx4_READ_MCG(dev, prev, mailbox); | 682 | err = mlx4_READ_ENTRY(dev, prev, mailbox); |
222 | if (err) | 683 | if (err) |
223 | goto out; | 684 | goto out; |
224 | 685 | ||
225 | mgm->next_gid_index = cpu_to_be32(index << 6); | 686 | mgm->next_gid_index = cpu_to_be32(index << 6); |
226 | 687 | ||
227 | err = mlx4_WRITE_MCG(dev, prev, mailbox); | 688 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); |
228 | if (err) | 689 | if (err) |
229 | goto out; | 690 | goto out; |
230 | 691 | ||
231 | out: | 692 | out: |
693 | if (prot == MLX4_PROT_ETH) { | ||
694 | /* manage the steering entry for promisc mode */ | ||
695 | if (new_entry) | ||
696 | new_steering_entry(dev, 0, port, steer, index, qp->qpn); | ||
697 | else | ||
698 | existing_steering_entry(dev, 0, port, steer, | ||
699 | index, qp->qpn); | ||
700 | } | ||
232 | if (err && link && index != -1) { | 701 | if (err && link && index != -1) { |
233 | if (index < dev->caps.num_mgms) | 702 | if (index < dev->caps.num_mgms) |
234 | mlx4_warn(dev, "Got AMGM index %d < %d", | 703 | mlx4_warn(dev, "Got AMGM index %d < %d", |
@@ -242,10 +711,9 @@ out: | |||
242 | mlx4_free_cmd_mailbox(dev, mailbox); | 711 | mlx4_free_cmd_mailbox(dev, mailbox); |
243 | return err; | 712 | return err; |
244 | } | 713 | } |
245 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); | ||
246 | 714 | ||
247 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 715 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
248 | enum mlx4_protocol protocol) | 716 | enum mlx4_protocol prot, enum mlx4_steer_type steer) |
249 | { | 717 | { |
250 | struct mlx4_priv *priv = mlx4_priv(dev); | 718 | struct mlx4_priv *priv = mlx4_priv(dev); |
251 | struct mlx4_cmd_mailbox *mailbox; | 719 | struct mlx4_cmd_mailbox *mailbox; |
@@ -255,6 +723,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
255 | int prev, index; | 723 | int prev, index; |
256 | int i, loc; | 724 | int i, loc; |
257 | int err; | 725 | int err; |
726 | u8 port = gid[5]; | ||
727 | bool removed_entry = false; | ||
258 | 728 | ||
259 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 729 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
260 | if (IS_ERR(mailbox)) | 730 | if (IS_ERR(mailbox)) |
@@ -263,7 +733,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
263 | 733 | ||
264 | mutex_lock(&priv->mcg_table.mutex); | 734 | mutex_lock(&priv->mcg_table.mutex); |
265 | 735 | ||
266 | err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); | 736 | err = find_entry(dev, port, gid, prot, steer, |
737 | mailbox, &hash, &prev, &index); | ||
267 | if (err) | 738 | if (err) |
268 | goto out; | 739 | goto out; |
269 | 740 | ||
@@ -273,6 +744,11 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
273 | goto out; | 744 | goto out; |
274 | } | 745 | } |
275 | 746 | ||
747 | /* if this pq is also a promisc qp, it shouldn't be removed */ | ||
748 | if (prot == MLX4_PROT_ETH && | ||
749 | check_duplicate_entry(dev, 0, port, steer, index, qp->qpn)) | ||
750 | goto out; | ||
751 | |||
276 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | 752 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; |
277 | for (loc = -1, i = 0; i < members_count; ++i) | 753 | for (loc = -1, i = 0; i < members_count; ++i) |
278 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) | 754 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) |
@@ -285,26 +761,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
285 | } | 761 | } |
286 | 762 | ||
287 | 763 | ||
288 | mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); | 764 | mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); |
289 | mgm->qp[loc] = mgm->qp[i - 1]; | 765 | mgm->qp[loc] = mgm->qp[i - 1]; |
290 | mgm->qp[i - 1] = 0; | 766 | mgm->qp[i - 1] = 0; |
291 | 767 | ||
292 | if (i != 1) { | 768 | if (prot == MLX4_PROT_ETH) |
293 | err = mlx4_WRITE_MCG(dev, index, mailbox); | 769 | removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn); |
770 | if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { | ||
771 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
294 | goto out; | 772 | goto out; |
295 | } | 773 | } |
296 | 774 | ||
775 | /* We are going to delete the entry, members count should be 0 */ | ||
776 | mgm->members_count = cpu_to_be32((u32) prot << 30); | ||
777 | |||
297 | if (prev == -1) { | 778 | if (prev == -1) { |
298 | /* Remove entry from MGM */ | 779 | /* Remove entry from MGM */ |
299 | int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; | 780 | int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; |
300 | if (amgm_index) { | 781 | if (amgm_index) { |
301 | err = mlx4_READ_MCG(dev, amgm_index, mailbox); | 782 | err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); |
302 | if (err) | 783 | if (err) |
303 | goto out; | 784 | goto out; |
304 | } else | 785 | } else |
305 | memset(mgm->gid, 0, 16); | 786 | memset(mgm->gid, 0, 16); |
306 | 787 | ||
307 | err = mlx4_WRITE_MCG(dev, index, mailbox); | 788 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); |
308 | if (err) | 789 | if (err) |
309 | goto out; | 790 | goto out; |
310 | 791 | ||
@@ -319,13 +800,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
319 | } else { | 800 | } else { |
320 | /* Remove entry from AMGM */ | 801 | /* Remove entry from AMGM */ |
321 | int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; | 802 | int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; |
322 | err = mlx4_READ_MCG(dev, prev, mailbox); | 803 | err = mlx4_READ_ENTRY(dev, prev, mailbox); |
323 | if (err) | 804 | if (err) |
324 | goto out; | 805 | goto out; |
325 | 806 | ||
326 | mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); | 807 | mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); |
327 | 808 | ||
328 | err = mlx4_WRITE_MCG(dev, prev, mailbox); | 809 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); |
329 | if (err) | 810 | if (err) |
330 | goto out; | 811 | goto out; |
331 | 812 | ||
@@ -343,8 +824,85 @@ out: | |||
343 | mlx4_free_cmd_mailbox(dev, mailbox); | 824 | mlx4_free_cmd_mailbox(dev, mailbox); |
344 | return err; | 825 | return err; |
345 | } | 826 | } |
827 | |||
828 | |||
829 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
830 | int block_mcast_loopback, enum mlx4_protocol prot) | ||
831 | { | ||
832 | enum mlx4_steer_type steer; | ||
833 | |||
834 | steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; | ||
835 | |||
836 | if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) | ||
837 | return 0; | ||
838 | |||
839 | if (prot == MLX4_PROT_ETH) | ||
840 | gid[7] |= (steer << 1); | ||
841 | |||
842 | return mlx4_qp_attach_common(dev, qp, gid, | ||
843 | block_mcast_loopback, prot, | ||
844 | steer); | ||
845 | } | ||
846 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); | ||
847 | |||
848 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
849 | enum mlx4_protocol prot) | ||
850 | { | ||
851 | enum mlx4_steer_type steer; | ||
852 | |||
853 | steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; | ||
854 | |||
855 | if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) | ||
856 | return 0; | ||
857 | |||
858 | if (prot == MLX4_PROT_ETH) { | ||
859 | gid[7] |= (steer << 1); | ||
860 | } | ||
861 | |||
862 | return mlx4_qp_detach_common(dev, qp, gid, prot, steer); | ||
863 | } | ||
346 | EXPORT_SYMBOL_GPL(mlx4_multicast_detach); | 864 | EXPORT_SYMBOL_GPL(mlx4_multicast_detach); |
347 | 865 | ||
866 | |||
867 | int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
868 | { | ||
869 | if (!dev->caps.vep_mc_steering) | ||
870 | return 0; | ||
871 | |||
872 | |||
873 | return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); | ||
874 | } | ||
875 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); | ||
876 | |||
877 | int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
878 | { | ||
879 | if (!dev->caps.vep_mc_steering) | ||
880 | return 0; | ||
881 | |||
882 | |||
883 | return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); | ||
884 | } | ||
885 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); | ||
886 | |||
887 | int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
888 | { | ||
889 | if (!dev->caps.vep_mc_steering) | ||
890 | return 0; | ||
891 | |||
892 | |||
893 | return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); | ||
894 | } | ||
895 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); | ||
896 | |||
897 | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
898 | { | ||
899 | if (!dev->caps.vep_mc_steering) | ||
900 | return 0; | ||
901 | |||
902 | return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); | ||
903 | } | ||
904 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); | ||
905 | |||
348 | int mlx4_init_mcg_table(struct mlx4_dev *dev) | 906 | int mlx4_init_mcg_table(struct mlx4_dev *dev) |
349 | { | 907 | { |
350 | struct mlx4_priv *priv = mlx4_priv(dev); | 908 | struct mlx4_priv *priv = mlx4_priv(dev); |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 0da5bb7285b4..c1e0e5f1bcdb 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -105,6 +105,7 @@ struct mlx4_bitmap { | |||
105 | u32 max; | 105 | u32 max; |
106 | u32 reserved_top; | 106 | u32 reserved_top; |
107 | u32 mask; | 107 | u32 mask; |
108 | u32 avail; | ||
108 | spinlock_t lock; | 109 | spinlock_t lock; |
109 | unsigned long *table; | 110 | unsigned long *table; |
110 | }; | 111 | }; |
@@ -162,6 +163,27 @@ struct mlx4_fw { | |||
162 | u8 catas_bar; | 163 | u8 catas_bar; |
163 | }; | 164 | }; |
164 | 165 | ||
166 | #define MGM_QPN_MASK 0x00FFFFFF | ||
167 | #define MGM_BLCK_LB_BIT 30 | ||
168 | |||
169 | struct mlx4_promisc_qp { | ||
170 | struct list_head list; | ||
171 | u32 qpn; | ||
172 | }; | ||
173 | |||
174 | struct mlx4_steer_index { | ||
175 | struct list_head list; | ||
176 | unsigned int index; | ||
177 | struct list_head duplicates; | ||
178 | }; | ||
179 | |||
180 | struct mlx4_mgm { | ||
181 | __be32 next_gid_index; | ||
182 | __be32 members_count; | ||
183 | u32 reserved[2]; | ||
184 | u8 gid[16]; | ||
185 | __be32 qp[MLX4_QP_PER_MGM]; | ||
186 | }; | ||
165 | struct mlx4_cmd { | 187 | struct mlx4_cmd { |
166 | struct pci_pool *pool; | 188 | struct pci_pool *pool; |
167 | void __iomem *hcr; | 189 | void __iomem *hcr; |
@@ -265,6 +287,10 @@ struct mlx4_vlan_table { | |||
265 | int max; | 287 | int max; |
266 | }; | 288 | }; |
267 | 289 | ||
290 | struct mlx4_mac_entry { | ||
291 | u64 mac; | ||
292 | }; | ||
293 | |||
268 | struct mlx4_port_info { | 294 | struct mlx4_port_info { |
269 | struct mlx4_dev *dev; | 295 | struct mlx4_dev *dev; |
270 | int port; | 296 | int port; |
@@ -272,7 +298,9 @@ struct mlx4_port_info { | |||
272 | struct device_attribute port_attr; | 298 | struct device_attribute port_attr; |
273 | enum mlx4_port_type tmp_type; | 299 | enum mlx4_port_type tmp_type; |
274 | struct mlx4_mac_table mac_table; | 300 | struct mlx4_mac_table mac_table; |
301 | struct radix_tree_root mac_tree; | ||
275 | struct mlx4_vlan_table vlan_table; | 302 | struct mlx4_vlan_table vlan_table; |
303 | int base_qpn; | ||
276 | }; | 304 | }; |
277 | 305 | ||
278 | struct mlx4_sense { | 306 | struct mlx4_sense { |
@@ -282,6 +310,17 @@ struct mlx4_sense { | |||
282 | struct delayed_work sense_poll; | 310 | struct delayed_work sense_poll; |
283 | }; | 311 | }; |
284 | 312 | ||
313 | struct mlx4_msix_ctl { | ||
314 | u64 pool_bm; | ||
315 | spinlock_t pool_lock; | ||
316 | }; | ||
317 | |||
318 | struct mlx4_steer { | ||
319 | struct list_head promisc_qps[MLX4_NUM_STEERS]; | ||
320 | struct list_head steer_entries[MLX4_NUM_STEERS]; | ||
321 | struct list_head high_prios; | ||
322 | }; | ||
323 | |||
285 | struct mlx4_priv { | 324 | struct mlx4_priv { |
286 | struct mlx4_dev dev; | 325 | struct mlx4_dev dev; |
287 | 326 | ||
@@ -313,6 +352,11 @@ struct mlx4_priv { | |||
313 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; | 352 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; |
314 | struct mlx4_sense sense; | 353 | struct mlx4_sense sense; |
315 | struct mutex port_mutex; | 354 | struct mutex port_mutex; |
355 | struct mlx4_msix_ctl msix_ctl; | ||
356 | struct mlx4_steer *steer; | ||
357 | struct list_head bf_list; | ||
358 | struct mutex bf_mutex; | ||
359 | struct io_mapping *bf_mapping; | ||
316 | }; | 360 | }; |
317 | 361 | ||
318 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) | 362 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) |
@@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); | |||
328 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); | 372 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); |
329 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); | 373 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); |
330 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); | 374 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); |
375 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); | ||
331 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | 376 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, |
332 | u32 reserved_bot, u32 resetrved_top); | 377 | u32 reserved_bot, u32 resetrved_top); |
333 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); | 378 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); |
@@ -403,4 +448,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); | |||
403 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); | 448 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); |
404 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); | 449 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); |
405 | 450 | ||
451 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
452 | enum mlx4_protocol prot, enum mlx4_steer_type steer); | ||
453 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
454 | int block_mcast_loopback, enum mlx4_protocol prot, | ||
455 | enum mlx4_steer_type steer); | ||
406 | #endif /* MLX4_H */ | 456 | #endif /* MLX4_H */ |
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index dfed6a07c2d7..e30f6099c0de 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -49,8 +49,8 @@ | |||
49 | #include "en_port.h" | 49 | #include "en_port.h" |
50 | 50 | ||
51 | #define DRV_NAME "mlx4_en" | 51 | #define DRV_NAME "mlx4_en" |
52 | #define DRV_VERSION "1.5.1.6" | 52 | #define DRV_VERSION "1.5.4.1" |
53 | #define DRV_RELDATE "August 2010" | 53 | #define DRV_RELDATE "March 2011" |
54 | 54 | ||
55 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) | 55 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) |
56 | 56 | ||
@@ -62,6 +62,7 @@ | |||
62 | #define MLX4_EN_PAGE_SHIFT 12 | 62 | #define MLX4_EN_PAGE_SHIFT 12 |
63 | #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) | 63 | #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) |
64 | #define MAX_RX_RINGS 16 | 64 | #define MAX_RX_RINGS 16 |
65 | #define MIN_RX_RINGS 4 | ||
65 | #define TXBB_SIZE 64 | 66 | #define TXBB_SIZE 64 |
66 | #define HEADROOM (2048 / TXBB_SIZE + 1) | 67 | #define HEADROOM (2048 / TXBB_SIZE + 1) |
67 | #define STAMP_STRIDE 64 | 68 | #define STAMP_STRIDE 64 |
@@ -124,6 +125,7 @@ enum { | |||
124 | #define MLX4_EN_RX_SIZE_THRESH 1024 | 125 | #define MLX4_EN_RX_SIZE_THRESH 1024 |
125 | #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) | 126 | #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) |
126 | #define MLX4_EN_SAMPLE_INTERVAL 0 | 127 | #define MLX4_EN_SAMPLE_INTERVAL 0 |
128 | #define MLX4_EN_AVG_PKT_SMALL 256 | ||
127 | 129 | ||
128 | #define MLX4_EN_AUTO_CONF 0xffff | 130 | #define MLX4_EN_AUTO_CONF 0xffff |
129 | 131 | ||
@@ -214,6 +216,9 @@ struct mlx4_en_tx_desc { | |||
214 | 216 | ||
215 | #define MLX4_EN_USE_SRQ 0x01000000 | 217 | #define MLX4_EN_USE_SRQ 0x01000000 |
216 | 218 | ||
219 | #define MLX4_EN_CX3_LOW_ID 0x1000 | ||
220 | #define MLX4_EN_CX3_HIGH_ID 0x1005 | ||
221 | |||
217 | struct mlx4_en_rx_alloc { | 222 | struct mlx4_en_rx_alloc { |
218 | struct page *page; | 223 | struct page *page; |
219 | u16 offset; | 224 | u16 offset; |
@@ -243,6 +248,8 @@ struct mlx4_en_tx_ring { | |||
243 | unsigned long bytes; | 248 | unsigned long bytes; |
244 | unsigned long packets; | 249 | unsigned long packets; |
245 | spinlock_t comp_lock; | 250 | spinlock_t comp_lock; |
251 | struct mlx4_bf bf; | ||
252 | bool bf_enabled; | ||
246 | }; | 253 | }; |
247 | 254 | ||
248 | struct mlx4_en_rx_desc { | 255 | struct mlx4_en_rx_desc { |
@@ -453,6 +460,7 @@ struct mlx4_en_priv { | |||
453 | struct mlx4_en_rss_map rss_map; | 460 | struct mlx4_en_rss_map rss_map; |
454 | u32 flags; | 461 | u32 flags; |
455 | #define MLX4_EN_FLAG_PROMISC 0x1 | 462 | #define MLX4_EN_FLAG_PROMISC 0x1 |
463 | #define MLX4_EN_FLAG_MC_PROMISC 0x2 | ||
456 | u32 tx_ring_num; | 464 | u32 tx_ring_num; |
457 | u32 rx_ring_num; | 465 | u32 rx_ring_num; |
458 | u32 rx_skb_size; | 466 | u32 rx_skb_size; |
@@ -461,6 +469,7 @@ struct mlx4_en_priv { | |||
461 | u16 log_rx_info; | 469 | u16 log_rx_info; |
462 | 470 | ||
463 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; | 471 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; |
472 | int tx_vector; | ||
464 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; | 473 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; |
465 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; | 474 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; |
466 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | 475 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; |
@@ -476,6 +485,13 @@ struct mlx4_en_priv { | |||
476 | int mc_addrs_cnt; | 485 | int mc_addrs_cnt; |
477 | struct mlx4_en_stat_out_mbox hw_stats; | 486 | struct mlx4_en_stat_out_mbox hw_stats; |
478 | int vids[128]; | 487 | int vids[128]; |
488 | bool wol; | ||
489 | }; | ||
490 | |||
491 | enum mlx4_en_wol { | ||
492 | MLX4_EN_WOL_MAGIC = (1ULL << 61), | ||
493 | MLX4_EN_WOL_ENABLED = (1ULL << 62), | ||
494 | MLX4_EN_WOL_DO_MODIFY = (1ULL << 63), | ||
479 | }; | 495 | }; |
480 | 496 | ||
481 | 497 | ||
@@ -486,12 +502,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
486 | int mlx4_en_start_port(struct net_device *dev); | 502 | int mlx4_en_start_port(struct net_device *dev); |
487 | void mlx4_en_stop_port(struct net_device *dev); | 503 | void mlx4_en_stop_port(struct net_device *dev); |
488 | 504 | ||
489 | void mlx4_en_free_resources(struct mlx4_en_priv *priv); | 505 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors); |
490 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); | 506 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); |
491 | 507 | ||
492 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | 508 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
493 | int entries, int ring, enum cq_type mode); | 509 | int entries, int ring, enum cq_type mode); |
494 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 510 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
511 | bool reserve_vectors); | ||
495 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 512 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
496 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 513 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
497 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 514 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
@@ -503,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); | |||
503 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | 520 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); |
504 | 521 | ||
505 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, | 522 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, |
506 | u32 size, u16 stride); | 523 | int qpn, u32 size, u16 stride); |
507 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); | 524 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); |
508 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | 525 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, |
509 | struct mlx4_en_tx_ring *ring, | 526 | struct mlx4_en_tx_ring *ring, |
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c index c4988d6bd5b2..1286b886dcea 100644 --- a/drivers/net/mlx4/pd.c +++ b/drivers/net/mlx4/pd.c | |||
@@ -32,12 +32,17 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/io-mapping.h> | ||
35 | 36 | ||
36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
37 | 38 | ||
38 | #include "mlx4.h" | 39 | #include "mlx4.h" |
39 | #include "icm.h" | 40 | #include "icm.h" |
40 | 41 | ||
42 | enum { | ||
43 | MLX4_NUM_RESERVED_UARS = 8 | ||
44 | }; | ||
45 | |||
41 | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) | 46 | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) |
42 | { | 47 | { |
43 | struct mlx4_priv *priv = mlx4_priv(dev); | 48 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) | |||
77 | return -ENOMEM; | 82 | return -ENOMEM; |
78 | 83 | ||
79 | uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; | 84 | uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; |
85 | uar->map = NULL; | ||
80 | 86 | ||
81 | return 0; | 87 | return 0; |
82 | } | 88 | } |
@@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) | |||
88 | } | 94 | } |
89 | EXPORT_SYMBOL_GPL(mlx4_uar_free); | 95 | EXPORT_SYMBOL_GPL(mlx4_uar_free); |
90 | 96 | ||
97 | int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) | ||
98 | { | ||
99 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
100 | struct mlx4_uar *uar; | ||
101 | int err = 0; | ||
102 | int idx; | ||
103 | |||
104 | if (!priv->bf_mapping) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | mutex_lock(&priv->bf_mutex); | ||
108 | if (!list_empty(&priv->bf_list)) | ||
109 | uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list); | ||
110 | else { | ||
111 | if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) { | ||
112 | err = -ENOMEM; | ||
113 | goto out; | ||
114 | } | ||
115 | uar = kmalloc(sizeof *uar, GFP_KERNEL); | ||
116 | if (!uar) { | ||
117 | err = -ENOMEM; | ||
118 | goto out; | ||
119 | } | ||
120 | err = mlx4_uar_alloc(dev, uar); | ||
121 | if (err) | ||
122 | goto free_kmalloc; | ||
123 | |||
124 | uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); | ||
125 | if (!uar->map) { | ||
126 | err = -ENOMEM; | ||
127 | goto free_uar; | ||
128 | } | ||
129 | |||
130 | uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); | ||
131 | if (!uar->bf_map) { | ||
132 | err = -ENOMEM; | ||
133 | goto unamp_uar; | ||
134 | } | ||
135 | uar->free_bf_bmap = 0; | ||
136 | list_add(&uar->bf_list, &priv->bf_list); | ||
137 | } | ||
138 | |||
139 | bf->uar = uar; | ||
140 | idx = ffz(uar->free_bf_bmap); | ||
141 | uar->free_bf_bmap |= 1 << idx; | ||
142 | bf->uar = uar; | ||
143 | bf->offset = 0; | ||
144 | bf->buf_size = dev->caps.bf_reg_size / 2; | ||
145 | bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size; | ||
146 | if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1) | ||
147 | list_del_init(&uar->bf_list); | ||
148 | |||
149 | goto out; | ||
150 | |||
151 | unamp_uar: | ||
152 | bf->uar = NULL; | ||
153 | iounmap(uar->map); | ||
154 | |||
155 | free_uar: | ||
156 | mlx4_uar_free(dev, uar); | ||
157 | |||
158 | free_kmalloc: | ||
159 | kfree(uar); | ||
160 | |||
161 | out: | ||
162 | mutex_unlock(&priv->bf_mutex); | ||
163 | return err; | ||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(mlx4_bf_alloc); | ||
166 | |||
167 | void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) | ||
168 | { | ||
169 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
170 | int idx; | ||
171 | |||
172 | if (!bf->uar || !bf->uar->bf_map) | ||
173 | return; | ||
174 | |||
175 | mutex_lock(&priv->bf_mutex); | ||
176 | idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size; | ||
177 | bf->uar->free_bf_bmap &= ~(1 << idx); | ||
178 | if (!bf->uar->free_bf_bmap) { | ||
179 | if (!list_empty(&bf->uar->bf_list)) | ||
180 | list_del(&bf->uar->bf_list); | ||
181 | |||
182 | io_mapping_unmap(bf->uar->bf_map); | ||
183 | iounmap(bf->uar->map); | ||
184 | mlx4_uar_free(dev, bf->uar); | ||
185 | kfree(bf->uar); | ||
186 | } else if (list_empty(&bf->uar->bf_list)) | ||
187 | list_add(&bf->uar->bf_list, &priv->bf_list); | ||
188 | |||
189 | mutex_unlock(&priv->bf_mutex); | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(mlx4_bf_free); | ||
192 | |||
91 | int mlx4_init_uar_table(struct mlx4_dev *dev) | 193 | int mlx4_init_uar_table(struct mlx4_dev *dev) |
92 | { | 194 | { |
93 | if (dev->caps.num_uars <= 128) { | 195 | if (dev->caps.num_uars <= 128) { |
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 451339559bdc..eca7d8596f87 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c | |||
@@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, | |||
90 | return err; | 90 | return err; |
91 | } | 91 | } |
92 | 92 | ||
93 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | 93 | static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, |
94 | u64 mac, int *qpn, u8 reserve) | ||
94 | { | 95 | { |
95 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | 96 | struct mlx4_qp qp; |
97 | u8 gid[16] = {0}; | ||
98 | int err; | ||
99 | |||
100 | if (reserve) { | ||
101 | err = mlx4_qp_reserve_range(dev, 1, 1, qpn); | ||
102 | if (err) { | ||
103 | mlx4_err(dev, "Failed to reserve qp for mac registration\n"); | ||
104 | return err; | ||
105 | } | ||
106 | } | ||
107 | qp.qpn = *qpn; | ||
108 | |||
109 | mac &= 0xffffffffffffULL; | ||
110 | mac = cpu_to_be64(mac << 16); | ||
111 | memcpy(&gid[10], &mac, ETH_ALEN); | ||
112 | gid[5] = port; | ||
113 | gid[7] = MLX4_UC_STEER << 1; | ||
114 | |||
115 | err = mlx4_qp_attach_common(dev, &qp, gid, 0, | ||
116 | MLX4_PROT_ETH, MLX4_UC_STEER); | ||
117 | if (err && reserve) | ||
118 | mlx4_qp_release_range(dev, *qpn, 1); | ||
119 | |||
120 | return err; | ||
121 | } | ||
122 | |||
123 | static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, | ||
124 | u64 mac, int qpn, u8 free) | ||
125 | { | ||
126 | struct mlx4_qp qp; | ||
127 | u8 gid[16] = {0}; | ||
128 | |||
129 | qp.qpn = qpn; | ||
130 | mac &= 0xffffffffffffULL; | ||
131 | mac = cpu_to_be64(mac << 16); | ||
132 | memcpy(&gid[10], &mac, ETH_ALEN); | ||
133 | gid[5] = port; | ||
134 | gid[7] = MLX4_UC_STEER << 1; | ||
135 | |||
136 | mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); | ||
137 | if (free) | ||
138 | mlx4_qp_release_range(dev, qpn, 1); | ||
139 | } | ||
140 | |||
141 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) | ||
142 | { | ||
143 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
144 | struct mlx4_mac_table *table = &info->mac_table; | ||
145 | struct mlx4_mac_entry *entry; | ||
96 | int i, err = 0; | 146 | int i, err = 0; |
97 | int free = -1; | 147 | int free = -1; |
98 | 148 | ||
149 | if (dev->caps.vep_uc_steering) { | ||
150 | err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); | ||
151 | if (!err) { | ||
152 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
153 | if (!entry) { | ||
154 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
155 | return -ENOMEM; | ||
156 | } | ||
157 | entry->mac = mac; | ||
158 | err = radix_tree_insert(&info->mac_tree, *qpn, entry); | ||
159 | if (err) { | ||
160 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
161 | return err; | ||
162 | } | ||
163 | } else | ||
164 | return err; | ||
165 | } | ||
99 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); | 166 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); |
100 | mutex_lock(&table->mutex); | 167 | mutex_lock(&table->mutex); |
101 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { | 168 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { |
@@ -106,7 +173,6 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | |||
106 | 173 | ||
107 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | 174 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { |
108 | /* MAC already registered, increase refernce count */ | 175 | /* MAC already registered, increase refernce count */ |
109 | *index = i; | ||
110 | ++table->refs[i]; | 176 | ++table->refs[i]; |
111 | goto out; | 177 | goto out; |
112 | } | 178 | } |
@@ -137,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | |||
137 | goto out; | 203 | goto out; |
138 | } | 204 | } |
139 | 205 | ||
140 | *index = free; | 206 | if (!dev->caps.vep_uc_steering) |
207 | *qpn = info->base_qpn + free; | ||
141 | ++table->total; | 208 | ++table->total; |
142 | out: | 209 | out: |
143 | mutex_unlock(&table->mutex); | 210 | mutex_unlock(&table->mutex); |
@@ -145,20 +212,52 @@ out: | |||
145 | } | 212 | } |
146 | EXPORT_SYMBOL_GPL(mlx4_register_mac); | 213 | EXPORT_SYMBOL_GPL(mlx4_register_mac); |
147 | 214 | ||
148 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) | 215 | static int validate_index(struct mlx4_dev *dev, |
216 | struct mlx4_mac_table *table, int index) | ||
149 | { | 217 | { |
150 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | 218 | int err = 0; |
151 | 219 | ||
152 | mutex_lock(&table->mutex); | 220 | if (index < 0 || index >= table->max || !table->entries[index]) { |
153 | if (!table->refs[index]) { | 221 | mlx4_warn(dev, "No valid Mac entry for the given index\n"); |
154 | mlx4_warn(dev, "No MAC entry for index %d\n", index); | 222 | err = -EINVAL; |
155 | goto out; | ||
156 | } | 223 | } |
157 | if (--table->refs[index]) { | 224 | return err; |
158 | mlx4_warn(dev, "Have more references for index %d," | 225 | } |
159 | "no need to modify MAC table\n", index); | 226 | |
160 | goto out; | 227 | static int find_index(struct mlx4_dev *dev, |
228 | struct mlx4_mac_table *table, u64 mac) | ||
229 | { | ||
230 | int i; | ||
231 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | ||
232 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) | ||
233 | return i; | ||
161 | } | 234 | } |
235 | /* Mac not found */ | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | |||
239 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) | ||
240 | { | ||
241 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
242 | struct mlx4_mac_table *table = &info->mac_table; | ||
243 | int index = qpn - info->base_qpn; | ||
244 | struct mlx4_mac_entry *entry; | ||
245 | |||
246 | if (dev->caps.vep_uc_steering) { | ||
247 | entry = radix_tree_lookup(&info->mac_tree, qpn); | ||
248 | if (entry) { | ||
249 | mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); | ||
250 | radix_tree_delete(&info->mac_tree, qpn); | ||
251 | index = find_index(dev, table, entry->mac); | ||
252 | kfree(entry); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | mutex_lock(&table->mutex); | ||
257 | |||
258 | if (validate_index(dev, table, index)) | ||
259 | goto out; | ||
260 | |||
162 | table->entries[index] = 0; | 261 | table->entries[index] = 0; |
163 | mlx4_set_port_mac_table(dev, port, table->entries); | 262 | mlx4_set_port_mac_table(dev, port, table->entries); |
164 | --table->total; | 263 | --table->total; |
@@ -167,6 +266,44 @@ out: | |||
167 | } | 266 | } |
168 | EXPORT_SYMBOL_GPL(mlx4_unregister_mac); | 267 | EXPORT_SYMBOL_GPL(mlx4_unregister_mac); |
169 | 268 | ||
269 | int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) | ||
270 | { | ||
271 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
272 | struct mlx4_mac_table *table = &info->mac_table; | ||
273 | int index = qpn - info->base_qpn; | ||
274 | struct mlx4_mac_entry *entry; | ||
275 | int err; | ||
276 | |||
277 | if (dev->caps.vep_uc_steering) { | ||
278 | entry = radix_tree_lookup(&info->mac_tree, qpn); | ||
279 | if (!entry) | ||
280 | return -EINVAL; | ||
281 | index = find_index(dev, table, entry->mac); | ||
282 | mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); | ||
283 | entry->mac = new_mac; | ||
284 | err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); | ||
285 | if (err || index < 0) | ||
286 | return err; | ||
287 | } | ||
288 | |||
289 | mutex_lock(&table->mutex); | ||
290 | |||
291 | err = validate_index(dev, table, index); | ||
292 | if (err) | ||
293 | goto out; | ||
294 | |||
295 | table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); | ||
296 | |||
297 | err = mlx4_set_port_mac_table(dev, port, table->entries); | ||
298 | if (unlikely(err)) { | ||
299 | mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); | ||
300 | table->entries[index] = 0; | ||
301 | } | ||
302 | out: | ||
303 | mutex_unlock(&table->mutex); | ||
304 | return err; | ||
305 | } | ||
306 | EXPORT_SYMBOL_GPL(mlx4_replace_mac); | ||
170 | static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, | 307 | static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, |
171 | __be32 *entries) | 308 | __be32 *entries) |
172 | { | 309 | { |
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index e749f82865fe..b967647d0c76 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c | |||
@@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
107 | profile[MLX4_RES_AUXC].num = request->num_qp; | 107 | profile[MLX4_RES_AUXC].num = request->num_qp; |
108 | profile[MLX4_RES_SRQ].num = request->num_srq; | 108 | profile[MLX4_RES_SRQ].num = request->num_srq; |
109 | profile[MLX4_RES_CQ].num = request->num_cq; | 109 | profile[MLX4_RES_CQ].num = request->num_cq; |
110 | profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, | 110 | profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); |
111 | dev_cap->reserved_eqs + | ||
112 | num_possible_cpus() + 1); | ||
113 | profile[MLX4_RES_DMPT].num = request->num_mpt; | 111 | profile[MLX4_RES_DMPT].num = request->num_mpt; |
114 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; | 112 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; |
115 | profile[MLX4_RES_MTT].num = request->num_mtt; | 113 | profile[MLX4_RES_MTT].num = request->num_mtt; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index a7f2eed9a08a..1f4e8680a96a 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -3645,6 +3645,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) | |||
3645 | dma_free_coherent(&pdev->dev, bytes, | 3645 | dma_free_coherent(&pdev->dev, bytes, |
3646 | ss->fw_stats, ss->fw_stats_bus); | 3646 | ss->fw_stats, ss->fw_stats_bus); |
3647 | ss->fw_stats = NULL; | 3647 | ss->fw_stats = NULL; |
3648 | netif_napi_del(&ss->napi); | ||
3648 | } | 3649 | } |
3649 | } | 3650 | } |
3650 | kfree(mgp->ss); | 3651 | kfree(mgp->ss); |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 8c66e22c3a0a..50986840c99c 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -2441,7 +2441,7 @@ static struct pci_error_handlers pch_gbe_err_handler = { | |||
2441 | .resume = pch_gbe_io_resume | 2441 | .resume = pch_gbe_io_resume |
2442 | }; | 2442 | }; |
2443 | 2443 | ||
2444 | static struct pci_driver pch_gbe_pcidev = { | 2444 | static struct pci_driver pch_gbe_driver = { |
2445 | .name = KBUILD_MODNAME, | 2445 | .name = KBUILD_MODNAME, |
2446 | .id_table = pch_gbe_pcidev_id, | 2446 | .id_table = pch_gbe_pcidev_id, |
2447 | .probe = pch_gbe_probe, | 2447 | .probe = pch_gbe_probe, |
@@ -2458,7 +2458,7 @@ static int __init pch_gbe_init_module(void) | |||
2458 | { | 2458 | { |
2459 | int ret; | 2459 | int ret; |
2460 | 2460 | ||
2461 | ret = pci_register_driver(&pch_gbe_pcidev); | 2461 | ret = pci_register_driver(&pch_gbe_driver); |
2462 | if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { | 2462 | if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { |
2463 | if (copybreak == 0) { | 2463 | if (copybreak == 0) { |
2464 | pr_info("copybreak disabled\n"); | 2464 | pr_info("copybreak disabled\n"); |
@@ -2472,7 +2472,7 @@ static int __init pch_gbe_init_module(void) | |||
2472 | 2472 | ||
2473 | static void __exit pch_gbe_exit_module(void) | 2473 | static void __exit pch_gbe_exit_module(void) |
2474 | { | 2474 | { |
2475 | pci_unregister_driver(&pch_gbe_pcidev); | 2475 | pci_unregister_driver(&pch_gbe_driver); |
2476 | } | 2476 | } |
2477 | 2477 | ||
2478 | module_init(pch_gbe_init_module); | 2478 | module_init(pch_gbe_init_module); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index b8bd936374f2..d890679e4c4d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1054,6 +1054,7 @@ static int efx_init_io(struct efx_nic *efx) | |||
1054 | { | 1054 | { |
1055 | struct pci_dev *pci_dev = efx->pci_dev; | 1055 | struct pci_dev *pci_dev = efx->pci_dev; |
1056 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 1056 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
1057 | bool use_wc; | ||
1057 | int rc; | 1058 | int rc; |
1058 | 1059 | ||
1059 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | 1060 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
@@ -1104,8 +1105,21 @@ static int efx_init_io(struct efx_nic *efx) | |||
1104 | rc = -EIO; | 1105 | rc = -EIO; |
1105 | goto fail3; | 1106 | goto fail3; |
1106 | } | 1107 | } |
1107 | efx->membase = ioremap_wc(efx->membase_phys, | 1108 | |
1108 | efx->type->mem_map_size); | 1109 | /* bug22643: If SR-IOV is enabled then tx push over a write combined |
1110 | * mapping is unsafe. We need to disable write combining in this case. | ||
1111 | * MSI is unsupported when SR-IOV is enabled, and the firmware will | ||
1112 | * have removed the MSI capability. So write combining is safe if | ||
1113 | * there is an MSI capability. | ||
1114 | */ | ||
1115 | use_wc = (!EFX_WORKAROUND_22643(efx) || | ||
1116 | pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); | ||
1117 | if (use_wc) | ||
1118 | efx->membase = ioremap_wc(efx->membase_phys, | ||
1119 | efx->type->mem_map_size); | ||
1120 | else | ||
1121 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
1122 | efx->type->mem_map_size); | ||
1109 | if (!efx->membase) { | 1123 | if (!efx->membase) { |
1110 | netif_err(efx, probe, efx->net_dev, | 1124 | netif_err(efx, probe, efx->net_dev, |
1111 | "could not map memory BAR at %llx+%x\n", | 1125 | "could not map memory BAR at %llx+%x\n", |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index e4dd3a7f304b..99ff11400cef 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -38,6 +38,8 @@ | |||
38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS | 38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS |
39 | /* Legacy interrupt storm when interrupt fifo fills */ | 39 | /* Legacy interrupt storm when interrupt fifo fills */ |
40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA | 40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA |
41 | /* Write combining and sriov=enabled are incompatible */ | ||
42 | #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA | ||
41 | 43 | ||
42 | /* Spurious parity errors in TSORT buffers */ | 44 | /* Spurious parity errors in TSORT buffers */ |
43 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | 45 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index bc86f4b6ecc2..727874d9deb6 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -49,6 +49,8 @@ | |||
49 | 49 | ||
50 | struct smsc95xx_priv { | 50 | struct smsc95xx_priv { |
51 | u32 mac_cr; | 51 | u32 mac_cr; |
52 | u32 hash_hi; | ||
53 | u32 hash_lo; | ||
52 | spinlock_t mac_cr_lock; | 54 | spinlock_t mac_cr_lock; |
53 | bool use_tx_csum; | 55 | bool use_tx_csum; |
54 | bool use_rx_csum; | 56 | bool use_rx_csum; |
@@ -370,10 +372,11 @@ static void smsc95xx_set_multicast(struct net_device *netdev) | |||
370 | { | 372 | { |
371 | struct usbnet *dev = netdev_priv(netdev); | 373 | struct usbnet *dev = netdev_priv(netdev); |
372 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | 374 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); |
373 | u32 hash_hi = 0; | ||
374 | u32 hash_lo = 0; | ||
375 | unsigned long flags; | 375 | unsigned long flags; |
376 | 376 | ||
377 | pdata->hash_hi = 0; | ||
378 | pdata->hash_lo = 0; | ||
379 | |||
377 | spin_lock_irqsave(&pdata->mac_cr_lock, flags); | 380 | spin_lock_irqsave(&pdata->mac_cr_lock, flags); |
378 | 381 | ||
379 | if (dev->net->flags & IFF_PROMISC) { | 382 | if (dev->net->flags & IFF_PROMISC) { |
@@ -394,13 +397,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev) | |||
394 | u32 bitnum = smsc95xx_hash(ha->addr); | 397 | u32 bitnum = smsc95xx_hash(ha->addr); |
395 | u32 mask = 0x01 << (bitnum & 0x1F); | 398 | u32 mask = 0x01 << (bitnum & 0x1F); |
396 | if (bitnum & 0x20) | 399 | if (bitnum & 0x20) |
397 | hash_hi |= mask; | 400 | pdata->hash_hi |= mask; |
398 | else | 401 | else |
399 | hash_lo |= mask; | 402 | pdata->hash_lo |= mask; |
400 | } | 403 | } |
401 | 404 | ||
402 | netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", | 405 | netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", |
403 | hash_hi, hash_lo); | 406 | pdata->hash_hi, pdata->hash_lo); |
404 | } else { | 407 | } else { |
405 | netif_dbg(dev, drv, dev->net, "receive own packets only\n"); | 408 | netif_dbg(dev, drv, dev->net, "receive own packets only\n"); |
406 | pdata->mac_cr &= | 409 | pdata->mac_cr &= |
@@ -410,8 +413,8 @@ static void smsc95xx_set_multicast(struct net_device *netdev) | |||
410 | spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); | 413 | spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); |
411 | 414 | ||
412 | /* Initiate async writes, as we can't wait for completion here */ | 415 | /* Initiate async writes, as we can't wait for completion here */ |
413 | smsc95xx_write_reg_async(dev, HASHH, &hash_hi); | 416 | smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi); |
414 | smsc95xx_write_reg_async(dev, HASHL, &hash_lo); | 417 | smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo); |
415 | smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); | 418 | smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); |
416 | } | 419 | } |
417 | 420 | ||
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 115f162c617a..524825720a09 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2160,6 +2160,8 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | |||
2160 | if (!ath_drain_all_txq(sc, false)) | 2160 | if (!ath_drain_all_txq(sc, false)) |
2161 | ath_reset(sc, false); | 2161 | ath_reset(sc, false); |
2162 | 2162 | ||
2163 | ieee80211_wake_queues(hw); | ||
2164 | |||
2163 | out: | 2165 | out: |
2164 | ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); | 2166 | ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); |
2165 | mutex_unlock(&sc->mutex); | 2167 | mutex_unlock(&sc->mutex); |
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 960d717ca7c2..a3241cd089b1 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c | |||
@@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, | |||
1328 | 1328 | ||
1329 | hdr = (struct ieee80211_hdr *)skb->data; | 1329 | hdr = (struct ieee80211_hdr *)skb->data; |
1330 | fc = hdr->frame_control; | 1330 | fc = hdr->frame_control; |
1331 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { | 1331 | for (i = 0; i < sc->hw->max_rates; i++) { |
1332 | struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; | 1332 | struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; |
1333 | if (!rate->count) | 1333 | if (!rate->count) |
1334 | break; | 1334 | break; |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index ef22096d40c9..26734e53b37f 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1725,8 +1725,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1725 | u8 tidno; | 1725 | u8 tidno; |
1726 | 1726 | ||
1727 | spin_lock_bh(&txctl->txq->axq_lock); | 1727 | spin_lock_bh(&txctl->txq->axq_lock); |
1728 | 1728 | if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && | |
1729 | if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { | 1729 | ieee80211_is_data_qos(hdr->frame_control)) { |
1730 | tidno = ieee80211_get_qos_ctl(hdr)[0] & | 1730 | tidno = ieee80211_get_qos_ctl(hdr)[0] & |
1731 | IEEE80211_QOS_CTL_TID_MASK; | 1731 | IEEE80211_QOS_CTL_TID_MASK; |
1732 | tid = ATH_AN_2_TID(txctl->an, tidno); | 1732 | tid = ATH_AN_2_TID(txctl->an, tidno); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 2003c1d4295f..08ccb9496f76 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
@@ -2265,7 +2265,7 @@ signed long iwlagn_wait_notification(struct iwl_priv *priv, | |||
2265 | int ret; | 2265 | int ret; |
2266 | 2266 | ||
2267 | ret = wait_event_timeout(priv->_agn.notif_waitq, | 2267 | ret = wait_event_timeout(priv->_agn.notif_waitq, |
2268 | &wait_entry->triggered, | 2268 | wait_entry->triggered, |
2269 | timeout); | 2269 | timeout); |
2270 | 2270 | ||
2271 | spin_lock_bh(&priv->_agn.notif_wait_lock); | 2271 | spin_lock_bh(&priv->_agn.notif_wait_lock); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 581dc9f10273..321b18b59135 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -3009,14 +3009,17 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw) | |||
3009 | 3009 | ||
3010 | mutex_lock(&priv->mutex); | 3010 | mutex_lock(&priv->mutex); |
3011 | 3011 | ||
3012 | if (!priv->_agn.offchan_tx_skb) | 3012 | if (!priv->_agn.offchan_tx_skb) { |
3013 | return -EINVAL; | 3013 | ret = -EINVAL; |
3014 | goto unlock; | ||
3015 | } | ||
3014 | 3016 | ||
3015 | priv->_agn.offchan_tx_skb = NULL; | 3017 | priv->_agn.offchan_tx_skb = NULL; |
3016 | 3018 | ||
3017 | ret = iwl_scan_cancel_timeout(priv, 200); | 3019 | ret = iwl_scan_cancel_timeout(priv, 200); |
3018 | if (ret) | 3020 | if (ret) |
3019 | ret = -EIO; | 3021 | ret = -EIO; |
3022 | unlock: | ||
3020 | mutex_unlock(&priv->mutex); | 3023 | mutex_unlock(&priv->mutex); |
3021 | 3024 | ||
3022 | return ret; | 3025 | return ret; |
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c index 09fae2f0ea08..736bbb9bd1d0 100644 --- a/drivers/net/wireless/orinoco/cfg.c +++ b/drivers/net/wireless/orinoco/cfg.c | |||
@@ -153,6 +153,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev, | |||
153 | priv->scan_request = request; | 153 | priv->scan_request = request; |
154 | 154 | ||
155 | err = orinoco_hw_trigger_scan(priv, request->ssids); | 155 | err = orinoco_hw_trigger_scan(priv, request->ssids); |
156 | /* On error the we aren't processing the request */ | ||
157 | if (err) | ||
158 | priv->scan_request = NULL; | ||
156 | 159 | ||
157 | return err; | 160 | return err; |
158 | } | 161 | } |
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index f3d396e7544b..62c6b2b37dbe 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c | |||
@@ -1376,13 +1376,13 @@ static void orinoco_process_scan_results(struct work_struct *work) | |||
1376 | 1376 | ||
1377 | spin_lock_irqsave(&priv->scan_lock, flags); | 1377 | spin_lock_irqsave(&priv->scan_lock, flags); |
1378 | list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { | 1378 | list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { |
1379 | spin_unlock_irqrestore(&priv->scan_lock, flags); | ||
1380 | 1379 | ||
1381 | buf = sd->buf; | 1380 | buf = sd->buf; |
1382 | len = sd->len; | 1381 | len = sd->len; |
1383 | type = sd->type; | 1382 | type = sd->type; |
1384 | 1383 | ||
1385 | list_del(&sd->list); | 1384 | list_del(&sd->list); |
1385 | spin_unlock_irqrestore(&priv->scan_lock, flags); | ||
1386 | kfree(sd); | 1386 | kfree(sd); |
1387 | 1387 | ||
1388 | if (len > 0) { | 1388 | if (len > 0) { |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index f1a92144996f..4e368657a83c 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -719,6 +719,7 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
719 | { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, | 719 | { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, |
720 | { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, | 720 | { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, |
721 | { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, | 721 | { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, |
722 | { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
722 | /* AzureWave */ | 723 | /* AzureWave */ |
723 | { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, | 724 | { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, |
724 | { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, | 725 | { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, |
@@ -913,7 +914,6 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
913 | { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, | 914 | { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, |
914 | { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, | 915 | { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, |
915 | { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, | 916 | { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, |
916 | { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
917 | /* AzureWave */ | 917 | /* AzureWave */ |
918 | { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, | 918 | { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, |
919 | { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, | 919 | { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, |
@@ -937,6 +937,8 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
937 | { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, | 937 | { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, |
938 | { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, | 938 | { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, |
939 | { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, | 939 | { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, |
940 | /* Edimax */ | ||
941 | { USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
940 | /* Encore */ | 942 | /* Encore */ |
941 | { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, | 943 | { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, |
942 | /* Gemtek */ | 944 | /* Gemtek */ |
@@ -961,6 +963,7 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
961 | { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, | 963 | { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, |
962 | { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, | 964 | { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, |
963 | /* Planex */ | 965 | /* Planex */ |
966 | { USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
964 | { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, | 967 | { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, |
965 | /* Qcom */ | 968 | /* Qcom */ |
966 | { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, | 969 | { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, |
@@ -972,6 +975,8 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
972 | /* Sweex */ | 975 | /* Sweex */ |
973 | { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, | 976 | { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, |
974 | { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, | 977 | { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, |
978 | /* Toshiba */ | ||
979 | { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
975 | /* Zyxel */ | 980 | /* Zyxel */ |
976 | { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, | 981 | { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, |
977 | #endif | 982 | #endif |
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index 4f92cba6810a..f74a8701c67d 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c | |||
@@ -410,8 +410,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) | |||
410 | 410 | ||
411 | if (!efuse_shadow_update_chk(hw)) { | 411 | if (!efuse_shadow_update_chk(hw)) { |
412 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); | 412 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); |
413 | memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], | 413 | memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], |
414 | (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], | 414 | &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], |
415 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); | 415 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); |
416 | 416 | ||
417 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, | 417 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, |
@@ -446,9 +446,9 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) | |||
446 | 446 | ||
447 | if (word_en != 0x0F) { | 447 | if (word_en != 0x0F) { |
448 | u8 tmpdata[8]; | 448 | u8 tmpdata[8]; |
449 | memcpy((void *)tmpdata, | 449 | memcpy(tmpdata, |
450 | (void *)(&rtlefuse-> | 450 | &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base], |
451 | efuse_map[EFUSE_MODIFY_MAP][base]), 8); | 451 | 8); |
452 | RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, | 452 | RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, |
453 | ("U-efuse\n"), tmpdata, 8); | 453 | ("U-efuse\n"), tmpdata, 8); |
454 | 454 | ||
@@ -465,8 +465,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) | |||
465 | efuse_power_switch(hw, true, false); | 465 | efuse_power_switch(hw, true, false); |
466 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); | 466 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); |
467 | 467 | ||
468 | memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], | 468 | memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], |
469 | (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], | 469 | &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], |
470 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); | 470 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); |
471 | 471 | ||
472 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); | 472 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); |
@@ -479,13 +479,12 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw) | |||
479 | struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); | 479 | struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); |
480 | 480 | ||
481 | if (rtlefuse->autoload_failflag == true) { | 481 | if (rtlefuse->autoload_failflag == true) { |
482 | memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128, | 482 | memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128); |
483 | 0xFF); | ||
484 | } else | 483 | } else |
485 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); | 484 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); |
486 | 485 | ||
487 | memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], | 486 | memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], |
488 | (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], | 487 | &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], |
489 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); | 488 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); |
490 | 489 | ||
491 | } | 490 | } |
@@ -694,8 +693,8 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) | |||
694 | if (offset > 15) | 693 | if (offset > 15) |
695 | return false; | 694 | return false; |
696 | 695 | ||
697 | memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff); | 696 | memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); |
698 | memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff); | 697 | memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); |
699 | 698 | ||
700 | while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { | 699 | while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { |
701 | if (readstate & PG_STATE_HEADER) { | 700 | if (readstate & PG_STATE_HEADER) { |
@@ -862,7 +861,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, | |||
862 | 861 | ||
863 | tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); | 862 | tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); |
864 | 863 | ||
865 | memset((void *)originaldata, 8 * sizeof(u8), 0xff); | 864 | memset(originaldata, 0xff, 8 * sizeof(u8)); |
866 | 865 | ||
867 | if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { | 866 | if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { |
868 | badworden = efuse_word_enable_data_write(hw, | 867 | badworden = efuse_word_enable_data_write(hw, |
@@ -917,7 +916,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, | |||
917 | target_pkt.offset = offset; | 916 | target_pkt.offset = offset; |
918 | target_pkt.word_en = word_en; | 917 | target_pkt.word_en = word_en; |
919 | 918 | ||
920 | memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF); | 919 | memset(target_pkt.data, 0xFF, 8 * sizeof(u8)); |
921 | 920 | ||
922 | efuse_word_enable_data_read(word_en, data, target_pkt.data); | 921 | efuse_word_enable_data_read(word_en, data, target_pkt.data); |
923 | target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); | 922 | target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); |
@@ -1022,7 +1021,7 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, | |||
1022 | u8 badworden = 0x0F; | 1021 | u8 badworden = 0x0F; |
1023 | u8 tmpdata[8]; | 1022 | u8 tmpdata[8]; |
1024 | 1023 | ||
1025 | memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff); | 1024 | memset(tmpdata, 0xff, PGPKT_DATA_SIZE); |
1026 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, | 1025 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, |
1027 | ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); | 1026 | ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); |
1028 | 1027 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 81e80489a052..58236e6d0921 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -60,6 +60,7 @@ static struct usb_device_id usb_ids[] = { | |||
60 | { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, | 60 | { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, |
61 | { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, | 61 | { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, |
62 | { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, | 62 | { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, |
63 | { USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 }, | ||
63 | { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, | 64 | { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, |
64 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, | 65 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, |
65 | /* ZD1211B */ | 66 | /* ZD1211B */ |
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 5dd428532f79..542888504994 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h | |||
@@ -36,6 +36,7 @@ struct emac_platform_data { | |||
36 | 36 | ||
37 | u8 rmii_en; | 37 | u8 rmii_en; |
38 | u8 version; | 38 | u8 version; |
39 | bool no_bd_ram; | ||
39 | void (*interrupt_enable) (void); | 40 | void (*interrupt_enable) (void); |
40 | void (*interrupt_disable) (void); | 41 | void (*interrupt_disable) (void); |
41 | }; | 42 | }; |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index b297f288f6eb..ae757bcf1280 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -648,6 +648,9 @@ enum ethtool_sfeatures_retval_bits { | |||
648 | 648 | ||
649 | #include <linux/rculist.h> | 649 | #include <linux/rculist.h> |
650 | 650 | ||
651 | /* needed by dev_disable_lro() */ | ||
652 | extern int __ethtool_set_flags(struct net_device *dev, u32 flags); | ||
653 | |||
651 | struct ethtool_rx_ntuple_flow_spec_container { | 654 | struct ethtool_rx_ntuple_flow_spec_container { |
652 | struct ethtool_rx_ntuple_flow_spec fs; | 655 | struct ethtool_rx_ntuple_flow_spec fs; |
653 | struct list_head list; | 656 | struct list_head list; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 049214642036..8985768e2c0d 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -39,6 +39,11 @@ | |||
39 | 39 | ||
40 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
41 | 41 | ||
42 | #define MAX_MSIX_P_PORT 17 | ||
43 | #define MAX_MSIX 64 | ||
44 | #define MSIX_LEGACY_SZ 4 | ||
45 | #define MIN_MSIX_P_PORT 5 | ||
46 | |||
42 | enum { | 47 | enum { |
43 | MLX4_FLAG_MSI_X = 1 << 0, | 48 | MLX4_FLAG_MSI_X = 1 << 0, |
44 | MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, | 49 | MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, |
@@ -145,8 +150,10 @@ enum { | |||
145 | }; | 150 | }; |
146 | 151 | ||
147 | enum mlx4_protocol { | 152 | enum mlx4_protocol { |
148 | MLX4_PROTOCOL_IB, | 153 | MLX4_PROT_IB_IPV6 = 0, |
149 | MLX4_PROTOCOL_EN, | 154 | MLX4_PROT_ETH, |
155 | MLX4_PROT_IB_IPV4, | ||
156 | MLX4_PROT_FCOE | ||
150 | }; | 157 | }; |
151 | 158 | ||
152 | enum { | 159 | enum { |
@@ -173,6 +180,12 @@ enum mlx4_special_vlan_idx { | |||
173 | MLX4_VLAN_REGULAR | 180 | MLX4_VLAN_REGULAR |
174 | }; | 181 | }; |
175 | 182 | ||
183 | enum mlx4_steer_type { | ||
184 | MLX4_MC_STEER = 0, | ||
185 | MLX4_UC_STEER, | ||
186 | MLX4_NUM_STEERS | ||
187 | }; | ||
188 | |||
176 | enum { | 189 | enum { |
177 | MLX4_NUM_FEXCH = 64 * 1024, | 190 | MLX4_NUM_FEXCH = 64 * 1024, |
178 | }; | 191 | }; |
@@ -223,6 +236,7 @@ struct mlx4_caps { | |||
223 | int num_eqs; | 236 | int num_eqs; |
224 | int reserved_eqs; | 237 | int reserved_eqs; |
225 | int num_comp_vectors; | 238 | int num_comp_vectors; |
239 | int comp_pool; | ||
226 | int num_mpts; | 240 | int num_mpts; |
227 | int num_mtt_segs; | 241 | int num_mtt_segs; |
228 | int mtts_per_seg; | 242 | int mtts_per_seg; |
@@ -245,6 +259,9 @@ struct mlx4_caps { | |||
245 | u16 stat_rate_support; | 259 | u16 stat_rate_support; |
246 | int udp_rss; | 260 | int udp_rss; |
247 | int loopback_support; | 261 | int loopback_support; |
262 | int vep_uc_steering; | ||
263 | int vep_mc_steering; | ||
264 | int wol; | ||
248 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 265 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
249 | int max_gso_sz; | 266 | int max_gso_sz; |
250 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; | 267 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; |
@@ -334,6 +351,17 @@ struct mlx4_fmr { | |||
334 | struct mlx4_uar { | 351 | struct mlx4_uar { |
335 | unsigned long pfn; | 352 | unsigned long pfn; |
336 | int index; | 353 | int index; |
354 | struct list_head bf_list; | ||
355 | unsigned free_bf_bmap; | ||
356 | void __iomem *map; | ||
357 | void __iomem *bf_map; | ||
358 | }; | ||
359 | |||
360 | struct mlx4_bf { | ||
361 | unsigned long offset; | ||
362 | int buf_size; | ||
363 | struct mlx4_uar *uar; | ||
364 | void __iomem *reg; | ||
337 | }; | 365 | }; |
338 | 366 | ||
339 | struct mlx4_cq { | 367 | struct mlx4_cq { |
@@ -415,7 +443,7 @@ struct mlx4_dev { | |||
415 | unsigned long flags; | 443 | unsigned long flags; |
416 | struct mlx4_caps caps; | 444 | struct mlx4_caps caps; |
417 | struct radix_tree_root qp_table_tree; | 445 | struct radix_tree_root qp_table_tree; |
418 | u32 rev_id; | 446 | u8 rev_id; |
419 | char board_id[MLX4_BOARD_ID_LEN]; | 447 | char board_id[MLX4_BOARD_ID_LEN]; |
420 | }; | 448 | }; |
421 | 449 | ||
@@ -461,6 +489,8 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); | |||
461 | 489 | ||
462 | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); | 490 | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); |
463 | void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); | 491 | void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); |
492 | int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); | ||
493 | void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); | ||
464 | 494 | ||
465 | int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, | 495 | int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, |
466 | struct mlx4_mtt *mtt); | 496 | struct mlx4_mtt *mtt); |
@@ -508,9 +538,15 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
508 | int block_mcast_loopback, enum mlx4_protocol protocol); | 538 | int block_mcast_loopback, enum mlx4_protocol protocol); |
509 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 539 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
510 | enum mlx4_protocol protocol); | 540 | enum mlx4_protocol protocol); |
541 | int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); | ||
542 | int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); | ||
543 | int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); | ||
544 | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); | ||
545 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | ||
511 | 546 | ||
512 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); | 547 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap); |
513 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index); | 548 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn); |
549 | int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap); | ||
514 | 550 | ||
515 | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); | 551 | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); |
516 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); | 552 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); |
@@ -526,5 +562,10 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | |||
526 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); | 562 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); |
527 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); | 563 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); |
528 | int mlx4_test_interrupts(struct mlx4_dev *dev); | 564 | int mlx4_test_interrupts(struct mlx4_dev *dev); |
565 | int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector); | ||
566 | void mlx4_release_eq(struct mlx4_dev *dev, int vec); | ||
567 | |||
568 | int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); | ||
569 | int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); | ||
529 | 570 | ||
530 | #endif /* MLX4_DEVICE_H */ | 571 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 0eeb2a1a867c..9e9eb21056ca 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -303,6 +303,7 @@ struct mlx4_wqe_data_seg { | |||
303 | 303 | ||
304 | enum { | 304 | enum { |
305 | MLX4_INLINE_ALIGN = 64, | 305 | MLX4_INLINE_ALIGN = 64, |
306 | MLX4_INLINE_SEG = 1 << 31, | ||
306 | }; | 307 | }; |
307 | 308 | ||
308 | struct mlx4_wqe_inline_seg { | 309 | struct mlx4_wqe_inline_seg { |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 60f7876b6da8..b2b9d28cb4ab 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -486,7 +486,8 @@ struct rate_info { | |||
486 | * @plink_state: mesh peer link state | 486 | * @plink_state: mesh peer link state |
487 | * @signal: signal strength of last received packet in dBm | 487 | * @signal: signal strength of last received packet in dBm |
488 | * @signal_avg: signal strength average in dBm | 488 | * @signal_avg: signal strength average in dBm |
489 | * @txrate: current unicast bitrate to this station | 489 | * @txrate: current unicast bitrate from this station |
490 | * @rxrate: current unicast bitrate to this station | ||
490 | * @rx_packets: packets received from this station | 491 | * @rx_packets: packets received from this station |
491 | * @tx_packets: packets transmitted to this station | 492 | * @tx_packets: packets transmitted to this station |
492 | * @tx_retries: cumulative retry counts | 493 | * @tx_retries: cumulative retry counts |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 642a80bb42cf..c850e5fb967c 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
@@ -70,7 +70,7 @@ static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt) | |||
70 | extern void ip6_route_input(struct sk_buff *skb); | 70 | extern void ip6_route_input(struct sk_buff *skb); |
71 | 71 | ||
72 | extern struct dst_entry * ip6_route_output(struct net *net, | 72 | extern struct dst_entry * ip6_route_output(struct net *net, |
73 | struct sock *sk, | 73 | const struct sock *sk, |
74 | struct flowi6 *fl6); | 74 | struct flowi6 *fl6); |
75 | 75 | ||
76 | extern int ip6_route_init(void); | 76 | extern int ip6_route_init(void); |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a1a858035913..e5d66ec88cf6 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -51,7 +51,6 @@ struct fib_nh { | |||
51 | struct fib_info *nh_parent; | 51 | struct fib_info *nh_parent; |
52 | unsigned nh_flags; | 52 | unsigned nh_flags; |
53 | unsigned char nh_scope; | 53 | unsigned char nh_scope; |
54 | unsigned char nh_cfg_scope; | ||
55 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 54 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
56 | int nh_weight; | 55 | int nh_weight; |
57 | int nh_power; | 56 | int nh_power; |
@@ -62,6 +61,7 @@ struct fib_nh { | |||
62 | int nh_oif; | 61 | int nh_oif; |
63 | __be32 nh_gw; | 62 | __be32 nh_gw; |
64 | __be32 nh_saddr; | 63 | __be32 nh_saddr; |
64 | int nh_saddr_genid; | ||
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* | 67 | /* |
@@ -74,9 +74,10 @@ struct fib_info { | |||
74 | struct net *fib_net; | 74 | struct net *fib_net; |
75 | int fib_treeref; | 75 | int fib_treeref; |
76 | atomic_t fib_clntref; | 76 | atomic_t fib_clntref; |
77 | int fib_dead; | ||
78 | unsigned fib_flags; | 77 | unsigned fib_flags; |
79 | int fib_protocol; | 78 | unsigned char fib_dead; |
79 | unsigned char fib_protocol; | ||
80 | unsigned char fib_scope; | ||
80 | __be32 fib_prefsrc; | 81 | __be32 fib_prefsrc; |
81 | u32 fib_priority; | 82 | u32 fib_priority; |
82 | u32 *fib_metrics; | 83 | u32 *fib_metrics; |
@@ -141,12 +142,19 @@ struct fib_result_nl { | |||
141 | 142 | ||
142 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | 143 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ |
143 | 144 | ||
144 | #define FIB_RES_SADDR(res) (FIB_RES_NH(res).nh_saddr) | 145 | extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); |
146 | |||
147 | #define FIB_RES_SADDR(net, res) \ | ||
148 | ((FIB_RES_NH(res).nh_saddr_genid == \ | ||
149 | atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ | ||
150 | FIB_RES_NH(res).nh_saddr : \ | ||
151 | fib_info_update_nh_saddr((net), &FIB_RES_NH(res))) | ||
145 | #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) | 152 | #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) |
146 | #define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev) | 153 | #define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev) |
147 | #define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif) | 154 | #define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif) |
148 | 155 | ||
149 | #define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : FIB_RES_SADDR(res)) | 156 | #define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \ |
157 | FIB_RES_SADDR(net, res)) | ||
150 | 158 | ||
151 | struct fib_table { | 159 | struct fib_table { |
152 | struct hlist_node tb_hlist; | 160 | struct hlist_node tb_hlist; |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 8650e7bf2ed0..cefe1b37c493 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -1160,7 +1160,7 @@ enum ieee80211_hw_flags { | |||
1160 | * @napi_weight: weight used for NAPI polling. You must specify an | 1160 | * @napi_weight: weight used for NAPI polling. You must specify an |
1161 | * appropriate value here if a napi_poll operation is provided | 1161 | * appropriate value here if a napi_poll operation is provided |
1162 | * by your driver. | 1162 | * by your driver. |
1163 | 1163 | * | |
1164 | * @max_rx_aggregation_subframes: maximum buffer size (number of | 1164 | * @max_rx_aggregation_subframes: maximum buffer size (number of |
1165 | * sub-frames) to be used for A-MPDU block ack receiver | 1165 | * sub-frames) to be used for A-MPDU block ack receiver |
1166 | * aggregation. | 1166 | * aggregation. |
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index e2e2ef57eca2..542195d9469e 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h | |||
@@ -55,6 +55,7 @@ struct netns_ipv4 { | |||
55 | int current_rt_cache_rebuild_count; | 55 | int current_rt_cache_rebuild_count; |
56 | 56 | ||
57 | atomic_t rt_genid; | 57 | atomic_t rt_genid; |
58 | atomic_t dev_addr_genid; | ||
58 | 59 | ||
59 | #ifdef CONFIG_IP_MROUTE | 60 | #ifdef CONFIG_IP_MROUTE |
60 | #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES | 61 | #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES |
diff --git a/include/net/route.h b/include/net/route.h index 30d6cae3841a..f88429cad52a 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -207,6 +207,7 @@ extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb); | |||
207 | 207 | ||
208 | struct in_ifaddr; | 208 | struct in_ifaddr; |
209 | extern void fib_add_ifaddr(struct in_ifaddr *); | 209 | extern void fib_add_ifaddr(struct in_ifaddr *); |
210 | extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); | ||
210 | 211 | ||
211 | static inline void ip_rt_put(struct rtable * rt) | 212 | static inline void ip_rt_put(struct rtable * rt) |
212 | { | 213 | { |
@@ -269,8 +270,8 @@ static inline struct rtable *ip_route_newports(struct rtable *rt, | |||
269 | struct flowi4 fl4 = { | 270 | struct flowi4 fl4 = { |
270 | .flowi4_oif = rt->rt_oif, | 271 | .flowi4_oif = rt->rt_oif, |
271 | .flowi4_mark = rt->rt_mark, | 272 | .flowi4_mark = rt->rt_mark, |
272 | .daddr = rt->rt_key_dst, | 273 | .daddr = rt->rt_dst, |
273 | .saddr = rt->rt_key_src, | 274 | .saddr = rt->rt_src, |
274 | .flowi4_tos = rt->rt_tos, | 275 | .flowi4_tos = rt->rt_tos, |
275 | .flowi4_proto = protocol, | 276 | .flowi4_proto = protocol, |
276 | .fl4_sport = sport, | 277 | .fl4_sport = sport, |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a9505b6a18e3..b931f021d7ab 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -25,6 +25,7 @@ struct qdisc_rate_table { | |||
25 | enum qdisc_state_t { | 25 | enum qdisc_state_t { |
26 | __QDISC_STATE_SCHED, | 26 | __QDISC_STATE_SCHED, |
27 | __QDISC_STATE_DEACTIVATED, | 27 | __QDISC_STATE_DEACTIVATED, |
28 | __QDISC_STATE_THROTTLED, | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | /* | 31 | /* |
@@ -32,7 +33,6 @@ enum qdisc_state_t { | |||
32 | */ | 33 | */ |
33 | enum qdisc___state_t { | 34 | enum qdisc___state_t { |
34 | __QDISC___STATE_RUNNING = 1, | 35 | __QDISC___STATE_RUNNING = 1, |
35 | __QDISC___STATE_THROTTLED = 2, | ||
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct qdisc_size_table { | 38 | struct qdisc_size_table { |
@@ -106,17 +106,17 @@ static inline void qdisc_run_end(struct Qdisc *qdisc) | |||
106 | 106 | ||
107 | static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) | 107 | static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) |
108 | { | 108 | { |
109 | return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false; | 109 | return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline void qdisc_throttled(struct Qdisc *qdisc) | 112 | static inline void qdisc_throttled(struct Qdisc *qdisc) |
113 | { | 113 | { |
114 | qdisc->__state |= __QDISC___STATE_THROTTLED; | 114 | set_bit(__QDISC_STATE_THROTTLED, &qdisc->state); |
115 | } | 115 | } |
116 | 116 | ||
117 | static inline void qdisc_unthrottled(struct Qdisc *qdisc) | 117 | static inline void qdisc_unthrottled(struct Qdisc *qdisc) |
118 | { | 118 | { |
119 | qdisc->__state &= ~__QDISC___STATE_THROTTLED; | 119 | clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state); |
120 | } | 120 | } |
121 | 121 | ||
122 | struct Qdisc_class_ops { | 122 | struct Qdisc_class_ops { |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 030a002ff8ee..f61eb2eff3fd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -445,9 +445,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
445 | ip6h->payload_len = htons(8 + sizeof(*mldq)); | 445 | ip6h->payload_len = htons(8 + sizeof(*mldq)); |
446 | ip6h->nexthdr = IPPROTO_HOPOPTS; | 446 | ip6h->nexthdr = IPPROTO_HOPOPTS; |
447 | ip6h->hop_limit = 1; | 447 | ip6h->hop_limit = 1; |
448 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); | ||
448 | ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, | 449 | ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, |
449 | &ip6h->saddr); | 450 | &ip6h->saddr); |
450 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); | ||
451 | ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); | 451 | ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); |
452 | 452 | ||
453 | hopopt = (u8 *)(ip6h + 1); | 453 | hopopt = (u8 *)(ip6h + 1); |
diff --git a/net/core/dev.c b/net/core/dev.c index 0b88eba97dab..f453370131a0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1353,14 +1353,17 @@ EXPORT_SYMBOL(dev_close); | |||
1353 | */ | 1353 | */ |
1354 | void dev_disable_lro(struct net_device *dev) | 1354 | void dev_disable_lro(struct net_device *dev) |
1355 | { | 1355 | { |
1356 | if (dev->ethtool_ops && dev->ethtool_ops->get_flags && | 1356 | u32 flags; |
1357 | dev->ethtool_ops->set_flags) { | 1357 | |
1358 | u32 flags = dev->ethtool_ops->get_flags(dev); | 1358 | if (dev->ethtool_ops && dev->ethtool_ops->get_flags) |
1359 | if (flags & ETH_FLAG_LRO) { | 1359 | flags = dev->ethtool_ops->get_flags(dev); |
1360 | flags &= ~ETH_FLAG_LRO; | 1360 | else |
1361 | dev->ethtool_ops->set_flags(dev, flags); | 1361 | flags = ethtool_op_get_flags(dev); |
1362 | } | 1362 | |
1363 | } | 1363 | if (!(flags & ETH_FLAG_LRO)) |
1364 | return; | ||
1365 | |||
1366 | __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); | ||
1364 | WARN_ON(dev->features & NETIF_F_LRO); | 1367 | WARN_ON(dev->features & NETIF_F_LRO); |
1365 | } | 1368 | } |
1366 | EXPORT_SYMBOL(dev_disable_lro); | 1369 | EXPORT_SYMBOL(dev_disable_lro); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index a1086fb0c0c7..24bd57493c0d 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -513,7 +513,7 @@ static int ethtool_set_one_feature(struct net_device *dev, | |||
513 | } | 513 | } |
514 | } | 514 | } |
515 | 515 | ||
516 | static int __ethtool_set_flags(struct net_device *dev, u32 data) | 516 | int __ethtool_set_flags(struct net_device *dev, u32 data) |
517 | { | 517 | { |
518 | u32 changed; | 518 | u32 changed; |
519 | 519 | ||
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 6d85800daeb7..5345b0bee6df 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -64,6 +64,8 @@ | |||
64 | #include <net/rtnetlink.h> | 64 | #include <net/rtnetlink.h> |
65 | #include <net/net_namespace.h> | 65 | #include <net/net_namespace.h> |
66 | 66 | ||
67 | #include "fib_lookup.h" | ||
68 | |||
67 | static struct ipv4_devconf ipv4_devconf = { | 69 | static struct ipv4_devconf ipv4_devconf = { |
68 | .data = { | 70 | .data = { |
69 | [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, | 71 | [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, |
@@ -151,6 +153,20 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) | |||
151 | break; | 153 | break; |
152 | } | 154 | } |
153 | } | 155 | } |
156 | if (!result) { | ||
157 | struct flowi4 fl4 = { .daddr = addr }; | ||
158 | struct fib_result res = { 0 }; | ||
159 | struct fib_table *local; | ||
160 | |||
161 | /* Fallback to FIB local table so that communication | ||
162 | * over loopback subnets work. | ||
163 | */ | ||
164 | local = fib_get_table(net, RT_TABLE_LOCAL); | ||
165 | if (local && | ||
166 | !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) && | ||
167 | res.type == RTN_LOCAL) | ||
168 | result = FIB_RES_DEV(res); | ||
169 | } | ||
154 | if (result && devref) | 170 | if (result && devref) |
155 | dev_hold(result); | 171 | dev_hold(result); |
156 | rcu_read_unlock(); | 172 | rcu_read_unlock(); |
@@ -345,6 +361,17 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, | |||
345 | } | 361 | } |
346 | } | 362 | } |
347 | 363 | ||
364 | /* On promotion all secondaries from subnet are changing | ||
365 | * the primary IP, we must remove all their routes silently | ||
366 | * and later to add them back with new prefsrc. Do this | ||
367 | * while all addresses are on the device list. | ||
368 | */ | ||
369 | for (ifa = promote; ifa; ifa = ifa->ifa_next) { | ||
370 | if (ifa1->ifa_mask == ifa->ifa_mask && | ||
371 | inet_ifa_match(ifa1->ifa_address, ifa)) | ||
372 | fib_del_ifaddr(ifa, ifa1); | ||
373 | } | ||
374 | |||
348 | /* 2. Unlink it */ | 375 | /* 2. Unlink it */ |
349 | 376 | ||
350 | *ifap = ifa1->ifa_next; | 377 | *ifap = ifa1->ifa_next; |
@@ -364,6 +391,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, | |||
364 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); | 391 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); |
365 | 392 | ||
366 | if (promote) { | 393 | if (promote) { |
394 | struct in_ifaddr *next_sec = promote->ifa_next; | ||
367 | 395 | ||
368 | if (prev_prom) { | 396 | if (prev_prom) { |
369 | prev_prom->ifa_next = promote->ifa_next; | 397 | prev_prom->ifa_next = promote->ifa_next; |
@@ -375,7 +403,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, | |||
375 | rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); | 403 | rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); |
376 | blocking_notifier_call_chain(&inetaddr_chain, | 404 | blocking_notifier_call_chain(&inetaddr_chain, |
377 | NETDEV_UP, promote); | 405 | NETDEV_UP, promote); |
378 | for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) { | 406 | for (ifa = next_sec; ifa; ifa = ifa->ifa_next) { |
379 | if (ifa1->ifa_mask != ifa->ifa_mask || | 407 | if (ifa1->ifa_mask != ifa->ifa_mask || |
380 | !inet_ifa_match(ifa1->ifa_address, ifa)) | 408 | !inet_ifa_match(ifa1->ifa_address, ifa)) |
381 | continue; | 409 | continue; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index a373a259253c..f116ce8f1b46 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -228,7 +228,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
228 | if (res.type != RTN_LOCAL || !accept_local) | 228 | if (res.type != RTN_LOCAL || !accept_local) |
229 | goto e_inval; | 229 | goto e_inval; |
230 | } | 230 | } |
231 | *spec_dst = FIB_RES_PREFSRC(res); | 231 | *spec_dst = FIB_RES_PREFSRC(net, res); |
232 | fib_combine_itag(itag, &res); | 232 | fib_combine_itag(itag, &res); |
233 | dev_match = false; | 233 | dev_match = false; |
234 | 234 | ||
@@ -258,7 +258,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
258 | ret = 0; | 258 | ret = 0; |
259 | if (fib_lookup(net, &fl4, &res) == 0) { | 259 | if (fib_lookup(net, &fl4, &res) == 0) { |
260 | if (res.type == RTN_UNICAST) { | 260 | if (res.type == RTN_UNICAST) { |
261 | *spec_dst = FIB_RES_PREFSRC(res); | 261 | *spec_dst = FIB_RES_PREFSRC(net, res); |
262 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; | 262 | ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; |
263 | } | 263 | } |
264 | } | 264 | } |
@@ -722,12 +722,17 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) | |||
722 | } | 722 | } |
723 | } | 723 | } |
724 | 724 | ||
725 | static void fib_del_ifaddr(struct in_ifaddr *ifa) | 725 | /* Delete primary or secondary address. |
726 | * Optionally, on secondary address promotion consider the addresses | ||
727 | * from subnet iprim as deleted, even if they are in device list. | ||
728 | * In this case the secondary ifa can be in device list. | ||
729 | */ | ||
730 | void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) | ||
726 | { | 731 | { |
727 | struct in_device *in_dev = ifa->ifa_dev; | 732 | struct in_device *in_dev = ifa->ifa_dev; |
728 | struct net_device *dev = in_dev->dev; | 733 | struct net_device *dev = in_dev->dev; |
729 | struct in_ifaddr *ifa1; | 734 | struct in_ifaddr *ifa1; |
730 | struct in_ifaddr *prim = ifa; | 735 | struct in_ifaddr *prim = ifa, *prim1 = NULL; |
731 | __be32 brd = ifa->ifa_address | ~ifa->ifa_mask; | 736 | __be32 brd = ifa->ifa_address | ~ifa->ifa_mask; |
732 | __be32 any = ifa->ifa_address & ifa->ifa_mask; | 737 | __be32 any = ifa->ifa_address & ifa->ifa_mask; |
733 | #define LOCAL_OK 1 | 738 | #define LOCAL_OK 1 |
@@ -735,17 +740,26 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa) | |||
735 | #define BRD0_OK 4 | 740 | #define BRD0_OK 4 |
736 | #define BRD1_OK 8 | 741 | #define BRD1_OK 8 |
737 | unsigned ok = 0; | 742 | unsigned ok = 0; |
743 | int subnet = 0; /* Primary network */ | ||
744 | int gone = 1; /* Address is missing */ | ||
745 | int same_prefsrc = 0; /* Another primary with same IP */ | ||
738 | 746 | ||
739 | if (!(ifa->ifa_flags & IFA_F_SECONDARY)) | 747 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
740 | fib_magic(RTM_DELROUTE, | ||
741 | dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, | ||
742 | any, ifa->ifa_prefixlen, prim); | ||
743 | else { | ||
744 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); | 748 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); |
745 | if (prim == NULL) { | 749 | if (prim == NULL) { |
746 | printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n"); | 750 | printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n"); |
747 | return; | 751 | return; |
748 | } | 752 | } |
753 | if (iprim && iprim != prim) { | ||
754 | printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n"); | ||
755 | return; | ||
756 | } | ||
757 | } else if (!ipv4_is_zeronet(any) && | ||
758 | (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) { | ||
759 | fib_magic(RTM_DELROUTE, | ||
760 | dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, | ||
761 | any, ifa->ifa_prefixlen, prim); | ||
762 | subnet = 1; | ||
749 | } | 763 | } |
750 | 764 | ||
751 | /* Deletion is more complicated than add. | 765 | /* Deletion is more complicated than add. |
@@ -755,6 +769,49 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa) | |||
755 | */ | 769 | */ |
756 | 770 | ||
757 | for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { | 771 | for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { |
772 | if (ifa1 == ifa) { | ||
773 | /* promotion, keep the IP */ | ||
774 | gone = 0; | ||
775 | continue; | ||
776 | } | ||
777 | /* Ignore IFAs from our subnet */ | ||
778 | if (iprim && ifa1->ifa_mask == iprim->ifa_mask && | ||
779 | inet_ifa_match(ifa1->ifa_address, iprim)) | ||
780 | continue; | ||
781 | |||
782 | /* Ignore ifa1 if it uses different primary IP (prefsrc) */ | ||
783 | if (ifa1->ifa_flags & IFA_F_SECONDARY) { | ||
784 | /* Another address from our subnet? */ | ||
785 | if (ifa1->ifa_mask == prim->ifa_mask && | ||
786 | inet_ifa_match(ifa1->ifa_address, prim)) | ||
787 | prim1 = prim; | ||
788 | else { | ||
789 | /* We reached the secondaries, so | ||
790 | * same_prefsrc should be determined. | ||
791 | */ | ||
792 | if (!same_prefsrc) | ||
793 | continue; | ||
794 | /* Search new prim1 if ifa1 is not | ||
795 | * using the current prim1 | ||
796 | */ | ||
797 | if (!prim1 || | ||
798 | ifa1->ifa_mask != prim1->ifa_mask || | ||
799 | !inet_ifa_match(ifa1->ifa_address, prim1)) | ||
800 | prim1 = inet_ifa_byprefix(in_dev, | ||
801 | ifa1->ifa_address, | ||
802 | ifa1->ifa_mask); | ||
803 | if (!prim1) | ||
804 | continue; | ||
805 | if (prim1->ifa_local != prim->ifa_local) | ||
806 | continue; | ||
807 | } | ||
808 | } else { | ||
809 | if (prim->ifa_local != ifa1->ifa_local) | ||
810 | continue; | ||
811 | prim1 = ifa1; | ||
812 | if (prim != prim1) | ||
813 | same_prefsrc = 1; | ||
814 | } | ||
758 | if (ifa->ifa_local == ifa1->ifa_local) | 815 | if (ifa->ifa_local == ifa1->ifa_local) |
759 | ok |= LOCAL_OK; | 816 | ok |= LOCAL_OK; |
760 | if (ifa->ifa_broadcast == ifa1->ifa_broadcast) | 817 | if (ifa->ifa_broadcast == ifa1->ifa_broadcast) |
@@ -763,19 +820,37 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa) | |||
763 | ok |= BRD1_OK; | 820 | ok |= BRD1_OK; |
764 | if (any == ifa1->ifa_broadcast) | 821 | if (any == ifa1->ifa_broadcast) |
765 | ok |= BRD0_OK; | 822 | ok |= BRD0_OK; |
823 | /* primary has network specific broadcasts */ | ||
824 | if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) { | ||
825 | __be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask; | ||
826 | __be32 any1 = ifa1->ifa_address & ifa1->ifa_mask; | ||
827 | |||
828 | if (!ipv4_is_zeronet(any1)) { | ||
829 | if (ifa->ifa_broadcast == brd1 || | ||
830 | ifa->ifa_broadcast == any1) | ||
831 | ok |= BRD_OK; | ||
832 | if (brd == brd1 || brd == any1) | ||
833 | ok |= BRD1_OK; | ||
834 | if (any == brd1 || any == any1) | ||
835 | ok |= BRD0_OK; | ||
836 | } | ||
837 | } | ||
766 | } | 838 | } |
767 | 839 | ||
768 | if (!(ok & BRD_OK)) | 840 | if (!(ok & BRD_OK)) |
769 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim); | 841 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim); |
770 | if (!(ok & BRD1_OK)) | 842 | if (subnet && ifa->ifa_prefixlen < 31) { |
771 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim); | 843 | if (!(ok & BRD1_OK)) |
772 | if (!(ok & BRD0_OK)) | 844 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim); |
773 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim); | 845 | if (!(ok & BRD0_OK)) |
846 | fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim); | ||
847 | } | ||
774 | if (!(ok & LOCAL_OK)) { | 848 | if (!(ok & LOCAL_OK)) { |
775 | fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); | 849 | fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); |
776 | 850 | ||
777 | /* Check, that this local address finally disappeared. */ | 851 | /* Check, that this local address finally disappeared. */ |
778 | if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) { | 852 | if (gone && |
853 | inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) { | ||
779 | /* And the last, but not the least thing. | 854 | /* And the last, but not the least thing. |
780 | * We must flush stray FIB entries. | 855 | * We must flush stray FIB entries. |
781 | * | 856 | * |
@@ -885,6 +960,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
885 | { | 960 | { |
886 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; | 961 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
887 | struct net_device *dev = ifa->ifa_dev->dev; | 962 | struct net_device *dev = ifa->ifa_dev->dev; |
963 | struct net *net = dev_net(dev); | ||
888 | 964 | ||
889 | switch (event) { | 965 | switch (event) { |
890 | case NETDEV_UP: | 966 | case NETDEV_UP: |
@@ -892,12 +968,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
892 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 968 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
893 | fib_sync_up(dev); | 969 | fib_sync_up(dev); |
894 | #endif | 970 | #endif |
895 | fib_update_nh_saddrs(dev); | 971 | atomic_inc(&net->ipv4.dev_addr_genid); |
896 | rt_cache_flush(dev_net(dev), -1); | 972 | rt_cache_flush(dev_net(dev), -1); |
897 | break; | 973 | break; |
898 | case NETDEV_DOWN: | 974 | case NETDEV_DOWN: |
899 | fib_del_ifaddr(ifa); | 975 | fib_del_ifaddr(ifa, NULL); |
900 | fib_update_nh_saddrs(dev); | 976 | atomic_inc(&net->ipv4.dev_addr_genid); |
901 | if (ifa->ifa_dev->ifa_list == NULL) { | 977 | if (ifa->ifa_dev->ifa_list == NULL) { |
902 | /* Last address was deleted from this interface. | 978 | /* Last address was deleted from this interface. |
903 | * Disable IP. | 979 | * Disable IP. |
@@ -915,6 +991,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
915 | { | 991 | { |
916 | struct net_device *dev = ptr; | 992 | struct net_device *dev = ptr; |
917 | struct in_device *in_dev = __in_dev_get_rtnl(dev); | 993 | struct in_device *in_dev = __in_dev_get_rtnl(dev); |
994 | struct net *net = dev_net(dev); | ||
918 | 995 | ||
919 | if (event == NETDEV_UNREGISTER) { | 996 | if (event == NETDEV_UNREGISTER) { |
920 | fib_disable_ip(dev, 2, -1); | 997 | fib_disable_ip(dev, 2, -1); |
@@ -932,6 +1009,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
932 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1009 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
933 | fib_sync_up(dev); | 1010 | fib_sync_up(dev); |
934 | #endif | 1011 | #endif |
1012 | atomic_inc(&net->ipv4.dev_addr_genid); | ||
935 | rt_cache_flush(dev_net(dev), -1); | 1013 | rt_cache_flush(dev_net(dev), -1); |
936 | break; | 1014 | break; |
937 | case NETDEV_DOWN: | 1015 | case NETDEV_DOWN: |
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index 4ec323875a02..af0f14aba169 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h | |||
@@ -10,7 +10,6 @@ struct fib_alias { | |||
10 | struct fib_info *fa_info; | 10 | struct fib_info *fa_info; |
11 | u8 fa_tos; | 11 | u8 fa_tos; |
12 | u8 fa_type; | 12 | u8 fa_type; |
13 | u8 fa_scope; | ||
14 | u8 fa_state; | 13 | u8 fa_state; |
15 | struct rcu_head rcu; | 14 | struct rcu_head rcu; |
16 | }; | 15 | }; |
@@ -29,7 +28,7 @@ extern void fib_release_info(struct fib_info *); | |||
29 | extern struct fib_info *fib_create_info(struct fib_config *cfg); | 28 | extern struct fib_info *fib_create_info(struct fib_config *cfg); |
30 | extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); | 29 | extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); |
31 | extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 30 | extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
32 | u32 tb_id, u8 type, u8 scope, __be32 dst, | 31 | u32 tb_id, u8 type, __be32 dst, |
33 | int dst_len, u8 tos, struct fib_info *fi, | 32 | int dst_len, u8 tos, struct fib_info *fi, |
34 | unsigned int); | 33 | unsigned int); |
35 | extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, | 34 | extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 622ac4c95026..641a5a2a9f9c 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -222,7 +222,7 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi) | |||
222 | unsigned int mask = (fib_info_hash_size - 1); | 222 | unsigned int mask = (fib_info_hash_size - 1); |
223 | unsigned int val = fi->fib_nhs; | 223 | unsigned int val = fi->fib_nhs; |
224 | 224 | ||
225 | val ^= fi->fib_protocol; | 225 | val ^= (fi->fib_protocol << 8) | fi->fib_scope; |
226 | val ^= (__force u32)fi->fib_prefsrc; | 226 | val ^= (__force u32)fi->fib_prefsrc; |
227 | val ^= fi->fib_priority; | 227 | val ^= fi->fib_priority; |
228 | for_nexthops(fi) { | 228 | for_nexthops(fi) { |
@@ -248,10 +248,11 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi) | |||
248 | if (fi->fib_nhs != nfi->fib_nhs) | 248 | if (fi->fib_nhs != nfi->fib_nhs) |
249 | continue; | 249 | continue; |
250 | if (nfi->fib_protocol == fi->fib_protocol && | 250 | if (nfi->fib_protocol == fi->fib_protocol && |
251 | nfi->fib_scope == fi->fib_scope && | ||
251 | nfi->fib_prefsrc == fi->fib_prefsrc && | 252 | nfi->fib_prefsrc == fi->fib_prefsrc && |
252 | nfi->fib_priority == fi->fib_priority && | 253 | nfi->fib_priority == fi->fib_priority && |
253 | memcmp(nfi->fib_metrics, fi->fib_metrics, | 254 | memcmp(nfi->fib_metrics, fi->fib_metrics, |
254 | sizeof(fi->fib_metrics)) == 0 && | 255 | sizeof(u32) * RTAX_MAX) == 0 && |
255 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && | 256 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && |
256 | (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) | 257 | (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) |
257 | return fi; | 258 | return fi; |
@@ -328,7 +329,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, | |||
328 | goto errout; | 329 | goto errout; |
329 | 330 | ||
330 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, | 331 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, |
331 | fa->fa_type, fa->fa_scope, key, dst_len, | 332 | fa->fa_type, key, dst_len, |
332 | fa->fa_tos, fa->fa_info, nlm_flags); | 333 | fa->fa_tos, fa->fa_info, nlm_flags); |
333 | if (err < 0) { | 334 | if (err < 0) { |
334 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ | 335 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ |
@@ -695,6 +696,16 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash, | |||
695 | fib_info_hash_free(old_laddrhash, bytes); | 696 | fib_info_hash_free(old_laddrhash, bytes); |
696 | } | 697 | } |
697 | 698 | ||
699 | __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) | ||
700 | { | ||
701 | nh->nh_saddr = inet_select_addr(nh->nh_dev, | ||
702 | nh->nh_gw, | ||
703 | nh->nh_parent->fib_scope); | ||
704 | nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); | ||
705 | |||
706 | return nh->nh_saddr; | ||
707 | } | ||
708 | |||
698 | struct fib_info *fib_create_info(struct fib_config *cfg) | 709 | struct fib_info *fib_create_info(struct fib_config *cfg) |
699 | { | 710 | { |
700 | int err; | 711 | int err; |
@@ -753,6 +764,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
753 | 764 | ||
754 | fi->fib_net = hold_net(net); | 765 | fi->fib_net = hold_net(net); |
755 | fi->fib_protocol = cfg->fc_protocol; | 766 | fi->fib_protocol = cfg->fc_protocol; |
767 | fi->fib_scope = cfg->fc_scope; | ||
756 | fi->fib_flags = cfg->fc_flags; | 768 | fi->fib_flags = cfg->fc_flags; |
757 | fi->fib_priority = cfg->fc_priority; | 769 | fi->fib_priority = cfg->fc_priority; |
758 | fi->fib_prefsrc = cfg->fc_prefsrc; | 770 | fi->fib_prefsrc = cfg->fc_prefsrc; |
@@ -854,10 +866,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
854 | } | 866 | } |
855 | 867 | ||
856 | change_nexthops(fi) { | 868 | change_nexthops(fi) { |
857 | nexthop_nh->nh_cfg_scope = cfg->fc_scope; | 869 | fib_info_update_nh_saddr(net, nexthop_nh); |
858 | nexthop_nh->nh_saddr = inet_select_addr(nexthop_nh->nh_dev, | ||
859 | nexthop_nh->nh_gw, | ||
860 | nexthop_nh->nh_cfg_scope); | ||
861 | } endfor_nexthops(fi) | 870 | } endfor_nexthops(fi) |
862 | 871 | ||
863 | link_it: | 872 | link_it: |
@@ -906,7 +915,7 @@ failure: | |||
906 | } | 915 | } |
907 | 916 | ||
908 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 917 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
909 | u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos, | 918 | u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, |
910 | struct fib_info *fi, unsigned int flags) | 919 | struct fib_info *fi, unsigned int flags) |
911 | { | 920 | { |
912 | struct nlmsghdr *nlh; | 921 | struct nlmsghdr *nlh; |
@@ -928,7 +937,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
928 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); | 937 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); |
929 | rtm->rtm_type = type; | 938 | rtm->rtm_type = type; |
930 | rtm->rtm_flags = fi->fib_flags; | 939 | rtm->rtm_flags = fi->fib_flags; |
931 | rtm->rtm_scope = scope; | 940 | rtm->rtm_scope = fi->fib_scope; |
932 | rtm->rtm_protocol = fi->fib_protocol; | 941 | rtm->rtm_protocol = fi->fib_protocol; |
933 | 942 | ||
934 | if (rtm->rtm_dst_len) | 943 | if (rtm->rtm_dst_len) |
@@ -1084,7 +1093,7 @@ void fib_select_default(struct fib_result *res) | |||
1084 | list_for_each_entry_rcu(fa, fa_head, fa_list) { | 1093 | list_for_each_entry_rcu(fa, fa_head, fa_list) { |
1085 | struct fib_info *next_fi = fa->fa_info; | 1094 | struct fib_info *next_fi = fa->fa_info; |
1086 | 1095 | ||
1087 | if (fa->fa_scope != res->scope || | 1096 | if (next_fi->fib_scope != res->scope || |
1088 | fa->fa_type != RTN_UNICAST) | 1097 | fa->fa_type != RTN_UNICAST) |
1089 | continue; | 1098 | continue; |
1090 | 1099 | ||
@@ -1128,24 +1137,6 @@ out: | |||
1128 | return; | 1137 | return; |
1129 | } | 1138 | } |
1130 | 1139 | ||
1131 | void fib_update_nh_saddrs(struct net_device *dev) | ||
1132 | { | ||
1133 | struct hlist_head *head; | ||
1134 | struct hlist_node *node; | ||
1135 | struct fib_nh *nh; | ||
1136 | unsigned int hash; | ||
1137 | |||
1138 | hash = fib_devindex_hashfn(dev->ifindex); | ||
1139 | head = &fib_info_devhash[hash]; | ||
1140 | hlist_for_each_entry(nh, node, head, nh_hash) { | ||
1141 | if (nh->nh_dev != dev) | ||
1142 | continue; | ||
1143 | nh->nh_saddr = inet_select_addr(nh->nh_dev, | ||
1144 | nh->nh_gw, | ||
1145 | nh->nh_cfg_scope); | ||
1146 | } | ||
1147 | } | ||
1148 | |||
1149 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1140 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1150 | 1141 | ||
1151 | /* | 1142 | /* |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 3d28a35c2e1a..90a3ff605591 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1245,7 +1245,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1245 | if (fa->fa_info->fib_priority != fi->fib_priority) | 1245 | if (fa->fa_info->fib_priority != fi->fib_priority) |
1246 | break; | 1246 | break; |
1247 | if (fa->fa_type == cfg->fc_type && | 1247 | if (fa->fa_type == cfg->fc_type && |
1248 | fa->fa_scope == cfg->fc_scope && | ||
1249 | fa->fa_info == fi) { | 1248 | fa->fa_info == fi) { |
1250 | fa_match = fa; | 1249 | fa_match = fa; |
1251 | break; | 1250 | break; |
@@ -1271,7 +1270,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1271 | new_fa->fa_tos = fa->fa_tos; | 1270 | new_fa->fa_tos = fa->fa_tos; |
1272 | new_fa->fa_info = fi; | 1271 | new_fa->fa_info = fi; |
1273 | new_fa->fa_type = cfg->fc_type; | 1272 | new_fa->fa_type = cfg->fc_type; |
1274 | new_fa->fa_scope = cfg->fc_scope; | ||
1275 | state = fa->fa_state; | 1273 | state = fa->fa_state; |
1276 | new_fa->fa_state = state & ~FA_S_ACCESSED; | 1274 | new_fa->fa_state = state & ~FA_S_ACCESSED; |
1277 | 1275 | ||
@@ -1308,7 +1306,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1308 | new_fa->fa_info = fi; | 1306 | new_fa->fa_info = fi; |
1309 | new_fa->fa_tos = tos; | 1307 | new_fa->fa_tos = tos; |
1310 | new_fa->fa_type = cfg->fc_type; | 1308 | new_fa->fa_type = cfg->fc_type; |
1311 | new_fa->fa_scope = cfg->fc_scope; | ||
1312 | new_fa->fa_state = 0; | 1309 | new_fa->fa_state = 0; |
1313 | /* | 1310 | /* |
1314 | * Insert new entry to the list. | 1311 | * Insert new entry to the list. |
@@ -1362,7 +1359,7 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, | |||
1362 | 1359 | ||
1363 | if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) | 1360 | if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) |
1364 | continue; | 1361 | continue; |
1365 | if (fa->fa_scope < flp->flowi4_scope) | 1362 | if (fa->fa_info->fib_scope < flp->flowi4_scope) |
1366 | continue; | 1363 | continue; |
1367 | fib_alias_accessed(fa); | 1364 | fib_alias_accessed(fa); |
1368 | err = fib_props[fa->fa_type].error; | 1365 | err = fib_props[fa->fa_type].error; |
@@ -1388,7 +1385,7 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, | |||
1388 | res->prefixlen = plen; | 1385 | res->prefixlen = plen; |
1389 | res->nh_sel = nhsel; | 1386 | res->nh_sel = nhsel; |
1390 | res->type = fa->fa_type; | 1387 | res->type = fa->fa_type; |
1391 | res->scope = fa->fa_scope; | 1388 | res->scope = fa->fa_info->fib_scope; |
1392 | res->fi = fi; | 1389 | res->fi = fi; |
1393 | res->table = tb; | 1390 | res->table = tb; |
1394 | res->fa_head = &li->falh; | 1391 | res->fa_head = &li->falh; |
@@ -1664,7 +1661,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) | |||
1664 | 1661 | ||
1665 | if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && | 1662 | if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && |
1666 | (cfg->fc_scope == RT_SCOPE_NOWHERE || | 1663 | (cfg->fc_scope == RT_SCOPE_NOWHERE || |
1667 | fa->fa_scope == cfg->fc_scope) && | 1664 | fa->fa_info->fib_scope == cfg->fc_scope) && |
1665 | (!cfg->fc_prefsrc || | ||
1666 | fi->fib_prefsrc == cfg->fc_prefsrc) && | ||
1668 | (!cfg->fc_protocol || | 1667 | (!cfg->fc_protocol || |
1669 | fi->fib_protocol == cfg->fc_protocol) && | 1668 | fi->fib_protocol == cfg->fc_protocol) && |
1670 | fib_nh_match(cfg, fi) == 0) { | 1669 | fib_nh_match(cfg, fi) == 0) { |
@@ -1861,7 +1860,6 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, | |||
1861 | RTM_NEWROUTE, | 1860 | RTM_NEWROUTE, |
1862 | tb->tb_id, | 1861 | tb->tb_id, |
1863 | fa->fa_type, | 1862 | fa->fa_type, |
1864 | fa->fa_scope, | ||
1865 | xkey, | 1863 | xkey, |
1866 | plen, | 1864 | plen, |
1867 | fa->fa_tos, | 1865 | fa->fa_tos, |
@@ -2382,7 +2380,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) | |||
2382 | seq_indent(seq, iter->depth+1); | 2380 | seq_indent(seq, iter->depth+1); |
2383 | seq_printf(seq, " /%d %s %s", li->plen, | 2381 | seq_printf(seq, " /%d %s %s", li->plen, |
2384 | rtn_scope(buf1, sizeof(buf1), | 2382 | rtn_scope(buf1, sizeof(buf1), |
2385 | fa->fa_scope), | 2383 | fa->fa_info->fib_scope), |
2386 | rtn_type(buf2, sizeof(buf2), | 2384 | rtn_type(buf2, sizeof(buf2), |
2387 | fa->fa_type)); | 2385 | fa->fa_type)); |
2388 | if (fa->fa_tos) | 2386 | if (fa->fa_tos) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 870b5182ddd8..4b0c81180804 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1593,8 +1593,6 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
1593 | rt->rt_peer_genid = rt_peer_genid(); | 1593 | rt->rt_peer_genid = rt_peer_genid(); |
1594 | } | 1594 | } |
1595 | check_peer_pmtu(dst, peer); | 1595 | check_peer_pmtu(dst, peer); |
1596 | |||
1597 | inet_putpeer(peer); | ||
1598 | } | 1596 | } |
1599 | } | 1597 | } |
1600 | 1598 | ||
@@ -1720,7 +1718,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) | |||
1720 | 1718 | ||
1721 | rcu_read_lock(); | 1719 | rcu_read_lock(); |
1722 | if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) | 1720 | if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) |
1723 | src = FIB_RES_PREFSRC(res); | 1721 | src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); |
1724 | else | 1722 | else |
1725 | src = inet_select_addr(rt->dst.dev, rt->rt_gateway, | 1723 | src = inet_select_addr(rt->dst.dev, rt->rt_gateway, |
1726 | RT_SCOPE_UNIVERSE); | 1724 | RT_SCOPE_UNIVERSE); |
@@ -2617,7 +2615,7 @@ static struct rtable *ip_route_output_slow(struct net *net, | |||
2617 | fib_select_default(&res); | 2615 | fib_select_default(&res); |
2618 | 2616 | ||
2619 | if (!fl4.saddr) | 2617 | if (!fl4.saddr) |
2620 | fl4.saddr = FIB_RES_PREFSRC(res); | 2618 | fl4.saddr = FIB_RES_PREFSRC(net, res); |
2621 | 2619 | ||
2622 | dev_out = FIB_RES_DEV(res); | 2620 | dev_out = FIB_RES_DEV(res); |
2623 | fl4.flowi4_oif = dev_out->ifindex; | 2621 | fl4.flowi4_oif = dev_out->ifindex; |
@@ -3221,6 +3219,8 @@ static __net_init int rt_genid_init(struct net *net) | |||
3221 | { | 3219 | { |
3222 | get_random_bytes(&net->ipv4.rt_genid, | 3220 | get_random_bytes(&net->ipv4.rt_genid, |
3223 | sizeof(net->ipv4.rt_genid)); | 3221 | sizeof(net->ipv4.rt_genid)); |
3222 | get_random_bytes(&net->ipv4.dev_addr_genid, | ||
3223 | sizeof(net->ipv4.dev_addr_genid)); | ||
3224 | return 0; | 3224 | return 0; |
3225 | } | 3225 | } |
3226 | 3226 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index da782e7ab16d..bef9f04c22ba 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2659,7 +2659,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2659 | #define DBGUNDO(x...) do { } while (0) | 2659 | #define DBGUNDO(x...) do { } while (0) |
2660 | #endif | 2660 | #endif |
2661 | 2661 | ||
2662 | static void tcp_undo_cwr(struct sock *sk, const int undo) | 2662 | static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) |
2663 | { | 2663 | { |
2664 | struct tcp_sock *tp = tcp_sk(sk); | 2664 | struct tcp_sock *tp = tcp_sk(sk); |
2665 | 2665 | ||
@@ -2671,14 +2671,13 @@ static void tcp_undo_cwr(struct sock *sk, const int undo) | |||
2671 | else | 2671 | else |
2672 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); | 2672 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); |
2673 | 2673 | ||
2674 | if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { | 2674 | if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { |
2675 | tp->snd_ssthresh = tp->prior_ssthresh; | 2675 | tp->snd_ssthresh = tp->prior_ssthresh; |
2676 | TCP_ECN_withdraw_cwr(tp); | 2676 | TCP_ECN_withdraw_cwr(tp); |
2677 | } | 2677 | } |
2678 | } else { | 2678 | } else { |
2679 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); | 2679 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); |
2680 | } | 2680 | } |
2681 | tcp_moderate_cwnd(tp); | ||
2682 | tp->snd_cwnd_stamp = tcp_time_stamp; | 2681 | tp->snd_cwnd_stamp = tcp_time_stamp; |
2683 | } | 2682 | } |
2684 | 2683 | ||
@@ -2699,7 +2698,7 @@ static int tcp_try_undo_recovery(struct sock *sk) | |||
2699 | * or our original transmission succeeded. | 2698 | * or our original transmission succeeded. |
2700 | */ | 2699 | */ |
2701 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); | 2700 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); |
2702 | tcp_undo_cwr(sk, 1); | 2701 | tcp_undo_cwr(sk, true); |
2703 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) | 2702 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) |
2704 | mib_idx = LINUX_MIB_TCPLOSSUNDO; | 2703 | mib_idx = LINUX_MIB_TCPLOSSUNDO; |
2705 | else | 2704 | else |
@@ -2726,7 +2725,7 @@ static void tcp_try_undo_dsack(struct sock *sk) | |||
2726 | 2725 | ||
2727 | if (tp->undo_marker && !tp->undo_retrans) { | 2726 | if (tp->undo_marker && !tp->undo_retrans) { |
2728 | DBGUNDO(sk, "D-SACK"); | 2727 | DBGUNDO(sk, "D-SACK"); |
2729 | tcp_undo_cwr(sk, 1); | 2728 | tcp_undo_cwr(sk, true); |
2730 | tp->undo_marker = 0; | 2729 | tp->undo_marker = 0; |
2731 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); | 2730 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); |
2732 | } | 2731 | } |
@@ -2779,7 +2778,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) | |||
2779 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | 2778 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); |
2780 | 2779 | ||
2781 | DBGUNDO(sk, "Hoe"); | 2780 | DBGUNDO(sk, "Hoe"); |
2782 | tcp_undo_cwr(sk, 0); | 2781 | tcp_undo_cwr(sk, false); |
2783 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); | 2782 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); |
2784 | 2783 | ||
2785 | /* So... Do not make Hoe's retransmit yet. | 2784 | /* So... Do not make Hoe's retransmit yet. |
@@ -2808,7 +2807,7 @@ static int tcp_try_undo_loss(struct sock *sk) | |||
2808 | 2807 | ||
2809 | DBGUNDO(sk, "partial loss"); | 2808 | DBGUNDO(sk, "partial loss"); |
2810 | tp->lost_out = 0; | 2809 | tp->lost_out = 0; |
2811 | tcp_undo_cwr(sk, 1); | 2810 | tcp_undo_cwr(sk, true); |
2812 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); | 2811 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); |
2813 | inet_csk(sk)->icsk_retransmits = 0; | 2812 | inet_csk(sk)->icsk_retransmits = 0; |
2814 | tp->undo_marker = 0; | 2813 | tp->undo_marker = 0; |
@@ -2822,8 +2821,11 @@ static int tcp_try_undo_loss(struct sock *sk) | |||
2822 | static inline void tcp_complete_cwr(struct sock *sk) | 2821 | static inline void tcp_complete_cwr(struct sock *sk) |
2823 | { | 2822 | { |
2824 | struct tcp_sock *tp = tcp_sk(sk); | 2823 | struct tcp_sock *tp = tcp_sk(sk); |
2825 | tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); | 2824 | /* Do not moderate cwnd if it's already undone in cwr or recovery */ |
2826 | tp->snd_cwnd_stamp = tcp_time_stamp; | 2825 | if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) { |
2826 | tp->snd_cwnd = tp->snd_ssthresh; | ||
2827 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
2828 | } | ||
2827 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); | 2829 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); |
2828 | } | 2830 | } |
2829 | 2831 | ||
@@ -3494,7 +3496,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag) | |||
3494 | if (flag & FLAG_ECE) | 3496 | if (flag & FLAG_ECE) |
3495 | tcp_ratehalving_spur_to_response(sk); | 3497 | tcp_ratehalving_spur_to_response(sk); |
3496 | else | 3498 | else |
3497 | tcp_undo_cwr(sk, 1); | 3499 | tcp_undo_cwr(sk, true); |
3498 | } | 3500 | } |
3499 | 3501 | ||
3500 | /* F-RTO spurious RTO detection algorithm (RFC4138) | 3502 | /* F-RTO spurious RTO detection algorithm (RFC4138) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6814c8722fa7..843406f14d7b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -854,7 +854,7 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table | |||
854 | return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); | 854 | return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); |
855 | } | 855 | } |
856 | 856 | ||
857 | struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | 857 | struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, |
858 | struct flowi6 *fl6) | 858 | struct flowi6 *fl6) |
859 | { | 859 | { |
860 | int flags = 0; | 860 | int flags = 0; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 5a11078827ab..d0311a322ddd 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -243,6 +243,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
243 | memcpy(sta->sta.addr, addr, ETH_ALEN); | 243 | memcpy(sta->sta.addr, addr, ETH_ALEN); |
244 | sta->local = local; | 244 | sta->local = local; |
245 | sta->sdata = sdata; | 245 | sta->sdata = sdata; |
246 | sta->last_rx = jiffies; | ||
246 | 247 | ||
247 | ewma_init(&sta->avg_signal, 1024, 8); | 248 | ewma_init(&sta->avg_signal, 1024, 8); |
248 | 249 | ||