aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/alloc.c33
-rw-r--r--drivers/net/mlx4/catas.c6
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c38
-rw-r--r--drivers/net/mlx4/en_ethtool.c286
-rw-r--r--drivers/net/mlx4/en_main.c64
-rw-r--r--drivers/net/mlx4/en_netdev.c274
-rw-r--r--drivers/net/mlx4/en_port.c45
-rw-r--r--drivers/net/mlx4/en_port.h30
-rw-r--r--drivers/net/mlx4/en_rx.c123
-rw-r--r--drivers/net/mlx4/en_selftest.c179
-rw-r--r--drivers/net/mlx4/en_tx.c98
-rw-r--r--drivers/net/mlx4/eq.c155
-rw-r--r--drivers/net/mlx4/fw.c45
-rw-r--r--drivers/net/mlx4/fw.h9
-rw-r--r--drivers/net/mlx4/icm.c28
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c21
-rw-r--r--drivers/net/mlx4/main.c154
-rw-r--r--drivers/net/mlx4/mcg.c650
-rw-r--r--drivers/net/mlx4/mlx4.h52
-rw-r--r--drivers/net/mlx4/mlx4_en.h64
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/port.c199
-rw-r--r--drivers/net/mlx4/profile.c6
-rw-r--r--drivers/net/mlx4/sense.c4
27 files changed, 2182 insertions, 489 deletions
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 1fd068e1d930..d1aa45a15854 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -6,4 +6,4 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o 9 en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8c8515619b8e..116cae334dad 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
62 } else 62 } else
63 obj = -1; 63 obj = -1;
64 64
65 if (obj != -1)
66 --bitmap->avail;
67
65 spin_unlock(&bitmap->lock); 68 spin_unlock(&bitmap->lock);
66 69
67 return obj; 70 return obj;
@@ -74,7 +77,7 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
74 77
75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) 78u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
76{ 79{
77 u32 obj, i; 80 u32 obj;
78 81
79 if (likely(cnt == 1 && align == 1)) 82 if (likely(cnt == 1 && align == 1))
80 return mlx4_bitmap_alloc(bitmap); 83 return mlx4_bitmap_alloc(bitmap);
@@ -91,8 +94,7 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
91 } 94 }
92 95
93 if (obj < bitmap->max) { 96 if (obj < bitmap->max) {
94 for (i = 0; i < cnt; i++) 97 bitmap_set(bitmap->table, obj, cnt);
95 set_bit(obj + i, bitmap->table);
96 if (obj == bitmap->last) { 98 if (obj == bitmap->last) {
97 bitmap->last = (obj + cnt); 99 bitmap->last = (obj + cnt);
98 if (bitmap->last >= bitmap->max) 100 if (bitmap->last >= bitmap->max)
@@ -102,31 +104,35 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
102 } else 104 } else
103 obj = -1; 105 obj = -1;
104 106
107 if (obj != -1)
108 bitmap->avail -= cnt;
109
105 spin_unlock(&bitmap->lock); 110 spin_unlock(&bitmap->lock);
106 111
107 return obj; 112 return obj;
108} 113}
109 114
110void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) 115u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
111{ 116{
112 u32 i; 117 return bitmap->avail;
118}
113 119
120void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
121{
114 obj &= bitmap->max + bitmap->reserved_top - 1; 122 obj &= bitmap->max + bitmap->reserved_top - 1;
115 123
116 spin_lock(&bitmap->lock); 124 spin_lock(&bitmap->lock);
117 for (i = 0; i < cnt; i++) 125 bitmap_clear(bitmap->table, obj, cnt);
118 clear_bit(obj + i, bitmap->table);
119 bitmap->last = min(bitmap->last, obj); 126 bitmap->last = min(bitmap->last, obj);
120 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 127 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
121 & bitmap->mask; 128 & bitmap->mask;
129 bitmap->avail += cnt;
122 spin_unlock(&bitmap->lock); 130 spin_unlock(&bitmap->lock);
123} 131}
124 132
125int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 133int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
126 u32 reserved_bot, u32 reserved_top) 134 u32 reserved_bot, u32 reserved_top)
127{ 135{
128 int i;
129
130 /* num must be a power of 2 */ 136 /* num must be a power of 2 */
131 if (num != roundup_pow_of_two(num)) 137 if (num != roundup_pow_of_two(num))
132 return -EINVAL; 138 return -EINVAL;
@@ -136,14 +142,14 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
136 bitmap->max = num - reserved_top; 142 bitmap->max = num - reserved_top;
137 bitmap->mask = mask; 143 bitmap->mask = mask;
138 bitmap->reserved_top = reserved_top; 144 bitmap->reserved_top = reserved_top;
145 bitmap->avail = num - reserved_top - reserved_bot;
139 spin_lock_init(&bitmap->lock); 146 spin_lock_init(&bitmap->lock);
140 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 147 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
141 sizeof (long), GFP_KERNEL); 148 sizeof (long), GFP_KERNEL);
142 if (!bitmap->table) 149 if (!bitmap->table)
143 return -ENOMEM; 150 return -ENOMEM;
144 151
145 for (i = 0; i < reserved_bot; ++i) 152 bitmap_set(bitmap->table, 0, reserved_bot);
146 set_bit(i, bitmap->table);
147 153
148 return 0; 154 return 0;
149} 155}
@@ -185,10 +191,11 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
185 } else { 191 } else {
186 int i; 192 int i;
187 193
194 buf->direct.buf = NULL;
188 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 195 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
189 buf->npages = buf->nbufs; 196 buf->npages = buf->nbufs;
190 buf->page_shift = PAGE_SHIFT; 197 buf->page_shift = PAGE_SHIFT;
191 buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, 198 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
192 GFP_KERNEL); 199 GFP_KERNEL);
193 if (!buf->page_list) 200 if (!buf->page_list)
194 return -ENOMEM; 201 return -ENOMEM;
@@ -236,7 +243,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
236 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 243 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
237 buf->direct.map); 244 buf->direct.map);
238 else { 245 else {
239 if (BITS_PER_LONG == 64) 246 if (BITS_PER_LONG == 64 && buf->direct.buf)
240 vunmap(buf->direct.buf); 247 vunmap(buf->direct.buf);
241 248
242 for (i = 0; i < buf->nbufs; ++i) 249 for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 68aaa42d0ced..32f947154c33 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work)
113void mlx4_start_catas_poll(struct mlx4_dev *dev) 113void mlx4_start_catas_poll(struct mlx4_dev *dev)
114{ 114{
115 struct mlx4_priv *priv = mlx4_priv(dev); 115 struct mlx4_priv *priv = mlx4_priv(dev);
116 unsigned long addr; 116 phys_addr_t addr;
117 117
118 INIT_LIST_HEAD(&priv->catas_err.list); 118 INIT_LIST_HEAD(&priv->catas_err.list);
119 init_timer(&priv->catas_err.timer); 119 init_timer(&priv->catas_err.timer);
@@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
124 124
125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
126 if (!priv->catas_err.map) { 126 if (!priv->catas_err.map) {
127 mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", 127 mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
128 addr); 128 (unsigned long long) addr);
129 return; 129 return;
130 } 130 }
131 131
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 7cd34e9c7c7e..bd8ef9f2fa71 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
198 u64 mtt_addr; 198 u64 mtt_addr;
199 int err; 199 int err;
200 200
201 if (vector >= dev->caps.num_comp_vectors) 201 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
202 return -EINVAL; 202 return -EINVAL;
203 203
204 cq->vector = vector; 204 cq->vector = vector;
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 21786ad4455e..ec4b6d047fe0 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
51 int err; 51 int err;
52 52
53 cq->size = entries; 53 cq->size = entries;
54 if (mode == RX) { 54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe); 55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 cq->vector = ring % mdev->dev->caps.num_comp_vectors; 56 else
57 } else {
58 cq->buf_size = sizeof(struct mlx4_cqe); 57 cq->buf_size = sizeof(struct mlx4_cqe);
59 cq->vector = 0;
60 }
61 58
62 cq->ring = ring; 59 cq->ring = ring;
63 cq->is_tx = mode; 60 cq->is_tx = mode;
@@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
80int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 77int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
81{ 78{
82 struct mlx4_en_dev *mdev = priv->mdev; 79 struct mlx4_en_dev *mdev = priv->mdev;
83 int err; 80 int err = 0;
81 char name[25];
84 82
85 cq->dev = mdev->pndev[priv->port]; 83 cq->dev = mdev->pndev[priv->port];
86 cq->mcq.set_ci_db = cq->wqres.db.db; 84 cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
89 *cq->mcq.arm_db = 0; 87 *cq->mcq.arm_db = 0;
90 memset(cq->buf, 0, cq->buf_size); 88 memset(cq->buf, 0, cq->buf_size);
91 89
90 if (cq->is_tx == RX) {
91 if (mdev->dev->caps.comp_pool) {
92 if (!cq->vector) {
93 sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
94 if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
95 cq->vector = (cq->ring + 1 + priv->port) %
96 mdev->dev->caps.num_comp_vectors;
97 mlx4_warn(mdev, "Failed Assigning an EQ to "
98 "%s_rx-%d ,Falling back to legacy EQ's\n",
99 priv->dev->name, cq->ring);
100 }
101 }
102 } else {
103 cq->vector = (cq->ring + 1 + priv->port) %
104 mdev->dev->caps.num_comp_vectors;
105 }
106 } else {
107 if (!cq->vector || !mdev->dev->caps.comp_pool) {
108 /*Fallback to legacy pool in case of error*/
109 cq->vector = 0;
110 }
111 }
112
92 if (!cq->is_tx) 113 if (!cq->is_tx)
93 cq->size = priv->rx_ring[cq->ring].actual_size; 114 cq->size = priv->rx_ring[cq->ring].actual_size;
94 115
@@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
112 return 0; 133 return 0;
113} 134}
114 135
115void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 136void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
137 bool reserve_vectors)
116{ 138{
117 struct mlx4_en_dev *mdev = priv->mdev; 139 struct mlx4_en_dev *mdev = priv->mdev;
118 140
119 mlx4_en_unmap_buffer(&cq->wqres.buf); 141 mlx4_en_unmap_buffer(&cq->wqres.buf);
120 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 142 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
143 if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
144 mlx4_release_eq(priv->mdev->dev, cq->vector);
121 cq->buf_size = 0; 145 cq->buf_size = 0;
122 cq->buf = NULL; 146 cq->buf = NULL;
123} 147}
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index b275238fe70d..2e858e4dcf4d 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -39,28 +39,13 @@
39#include "en_port.h" 39#include "en_port.h"
40 40
41 41
42static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
43{
44 int i;
45
46 priv->port_stats.lro_aggregated = 0;
47 priv->port_stats.lro_flushed = 0;
48 priv->port_stats.lro_no_desc = 0;
49
50 for (i = 0; i < priv->rx_ring_num; i++) {
51 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
52 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
53 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
54 }
55}
56
57static void 42static void
58mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 43mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
59{ 44{
60 struct mlx4_en_priv *priv = netdev_priv(dev); 45 struct mlx4_en_priv *priv = netdev_priv(dev);
61 struct mlx4_en_dev *mdev = priv->mdev; 46 struct mlx4_en_dev *mdev = priv->mdev;
62 47
63 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); 48 strncpy(drvinfo->driver, DRV_NAME, 32);
64 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); 49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
65 sprintf(drvinfo->fw_version, "%d.%d.%d", 50 sprintf(drvinfo->fw_version, "%d.%d.%d",
66 (u16) (mdev->dev->caps.fw_ver >> 32), 51 (u16) (mdev->dev->caps.fw_ver >> 32),
@@ -72,37 +57,6 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
72 drvinfo->eedump_len = 0; 57 drvinfo->eedump_len = 0;
73} 58}
74 59
75static u32 mlx4_en_get_tso(struct net_device *dev)
76{
77 return (dev->features & NETIF_F_TSO) != 0;
78}
79
80static int mlx4_en_set_tso(struct net_device *dev, u32 data)
81{
82 struct mlx4_en_priv *priv = netdev_priv(dev);
83
84 if (data) {
85 if (!priv->mdev->LSO_support)
86 return -EPERM;
87 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
88 } else
89 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
90 return 0;
91}
92
93static u32 mlx4_en_get_rx_csum(struct net_device *dev)
94{
95 struct mlx4_en_priv *priv = netdev_priv(dev);
96 return priv->rx_csum;
97}
98
99static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
100{
101 struct mlx4_en_priv *priv = netdev_priv(dev);
102 priv->rx_csum = (data != 0);
103 return 0;
104}
105
106static const char main_strings[][ETH_GSTRING_LEN] = { 60static const char main_strings[][ETH_GSTRING_LEN] = {
107 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 61 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
108 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 62 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -112,7 +66,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
112 "tx_heartbeat_errors", "tx_window_errors", 66 "tx_heartbeat_errors", "tx_window_errors",
113 67
114 /* port statistics */ 68 /* port statistics */
115 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets", 69 "tso_packets",
116 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", 70 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
117 "rx_csum_good", "rx_csum_none", "tx_chksum_offload", 71 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
118 72
@@ -125,6 +79,14 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
125#define NUM_MAIN_STATS 21 79#define NUM_MAIN_STATS 21
126#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) 80#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
127 81
82static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
83 "Interupt Test",
84 "Link Test",
85 "Speed Test",
86 "Register Test",
87 "Loopback Test",
88};
89
128static u32 mlx4_en_get_msglevel(struct net_device *dev) 90static u32 mlx4_en_get_msglevel(struct net_device *dev)
129{ 91{
130 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; 92 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
@@ -138,18 +100,80 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
138static void mlx4_en_get_wol(struct net_device *netdev, 100static void mlx4_en_get_wol(struct net_device *netdev,
139 struct ethtool_wolinfo *wol) 101 struct ethtool_wolinfo *wol)
140{ 102{
141 wol->supported = 0; 103 struct mlx4_en_priv *priv = netdev_priv(netdev);
142 wol->wolopts = 0; 104 int err = 0;
105 u64 config = 0;
106
107 if (!priv->mdev->dev->caps.wol) {
108 wol->supported = 0;
109 wol->wolopts = 0;
110 return;
111 }
112
113 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
114 if (err) {
115 en_err(priv, "Failed to get WoL information\n");
116 return;
117 }
118
119 if (config & MLX4_EN_WOL_MAGIC)
120 wol->supported = WAKE_MAGIC;
121 else
122 wol->supported = 0;
123
124 if (config & MLX4_EN_WOL_ENABLED)
125 wol->wolopts = WAKE_MAGIC;
126 else
127 wol->wolopts = 0;
128}
129
130static int mlx4_en_set_wol(struct net_device *netdev,
131 struct ethtool_wolinfo *wol)
132{
133 struct mlx4_en_priv *priv = netdev_priv(netdev);
134 u64 config = 0;
135 int err = 0;
136
137 if (!priv->mdev->dev->caps.wol)
138 return -EOPNOTSUPP;
139
140 if (wol->supported & ~WAKE_MAGIC)
141 return -EINVAL;
142
143 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
144 if (err) {
145 en_err(priv, "Failed to get WoL info, unable to modify\n");
146 return err;
147 }
148
149 if (wol->wolopts & WAKE_MAGIC) {
150 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
151 MLX4_EN_WOL_MAGIC;
152 } else {
153 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
154 config |= MLX4_EN_WOL_DO_MODIFY;
155 }
156
157 err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
158 if (err)
159 en_err(priv, "Failed to set WoL information\n");
160
161 return err;
143} 162}
144 163
145static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 164static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
146{ 165{
147 struct mlx4_en_priv *priv = netdev_priv(dev); 166 struct mlx4_en_priv *priv = netdev_priv(dev);
148 167
149 if (sset != ETH_SS_STATS) 168 switch (sset) {
169 case ETH_SS_STATS:
170 return NUM_ALL_STATS +
171 (priv->tx_ring_num + priv->rx_ring_num) * 2;
172 case ETH_SS_TEST:
173 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
174 default:
150 return -EOPNOTSUPP; 175 return -EOPNOTSUPP;
151 176 }
152 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
153} 177}
154 178
155static void mlx4_en_get_ethtool_stats(struct net_device *dev, 179static void mlx4_en_get_ethtool_stats(struct net_device *dev,
@@ -161,8 +185,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
161 185
162 spin_lock_bh(&priv->stats_lock); 186 spin_lock_bh(&priv->stats_lock);
163 187
164 mlx4_en_update_lro_stats(priv);
165
166 for (i = 0; i < NUM_MAIN_STATS; i++) 188 for (i = 0; i < NUM_MAIN_STATS; i++)
167 data[index++] = ((unsigned long *) &priv->stats)[i]; 189 data[index++] = ((unsigned long *) &priv->stats)[i];
168 for (i = 0; i < NUM_PORT_STATS; i++) 190 for (i = 0; i < NUM_PORT_STATS; i++)
@@ -181,6 +203,12 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
181 203
182} 204}
183 205
206static void mlx4_en_self_test(struct net_device *dev,
207 struct ethtool_test *etest, u64 *buf)
208{
209 mlx4_en_ex_selftest(dev, &etest->flags, buf);
210}
211
184static void mlx4_en_get_strings(struct net_device *dev, 212static void mlx4_en_get_strings(struct net_device *dev,
185 uint32_t stringset, uint8_t *data) 213 uint32_t stringset, uint8_t *data)
186{ 214{
@@ -188,51 +216,84 @@ static void mlx4_en_get_strings(struct net_device *dev,
188 int index = 0; 216 int index = 0;
189 int i; 217 int i;
190 218
191 if (stringset != ETH_SS_STATS) 219 switch (stringset) {
192 return; 220 case ETH_SS_TEST:
193 221 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
194 /* Add main counters */ 222 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
195 for (i = 0; i < NUM_MAIN_STATS; i++) 223 if (priv->mdev->dev->caps.loopback_support)
196 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); 224 for (; i < MLX4_EN_NUM_SELF_TEST; i++)
197 for (i = 0; i < NUM_PORT_STATS; i++) 225 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
198 strcpy(data + (index++) * ETH_GSTRING_LEN, 226 break;
227
228 case ETH_SS_STATS:
229 /* Add main counters */
230 for (i = 0; i < NUM_MAIN_STATS; i++)
231 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
232 for (i = 0; i< NUM_PORT_STATS; i++)
233 strcpy(data + (index++) * ETH_GSTRING_LEN,
199 main_strings[i + NUM_MAIN_STATS]); 234 main_strings[i + NUM_MAIN_STATS]);
200 for (i = 0; i < priv->tx_ring_num; i++) { 235 for (i = 0; i < priv->tx_ring_num; i++) {
201 sprintf(data + (index++) * ETH_GSTRING_LEN, 236 sprintf(data + (index++) * ETH_GSTRING_LEN,
202 "tx%d_packets", i); 237 "tx%d_packets", i);
203 sprintf(data + (index++) * ETH_GSTRING_LEN, 238 sprintf(data + (index++) * ETH_GSTRING_LEN,
204 "tx%d_bytes", i); 239 "tx%d_bytes", i);
205 } 240 }
206 for (i = 0; i < priv->rx_ring_num; i++) { 241 for (i = 0; i < priv->rx_ring_num; i++) {
207 sprintf(data + (index++) * ETH_GSTRING_LEN, 242 sprintf(data + (index++) * ETH_GSTRING_LEN,
208 "rx%d_packets", i); 243 "rx%d_packets", i);
209 sprintf(data + (index++) * ETH_GSTRING_LEN, 244 sprintf(data + (index++) * ETH_GSTRING_LEN,
210 "rx%d_bytes", i); 245 "rx%d_bytes", i);
211 } 246 }
212 for (i = 0; i < NUM_PKT_STATS; i++) 247 for (i = 0; i< NUM_PKT_STATS; i++)
213 strcpy(data + (index++) * ETH_GSTRING_LEN, 248 strcpy(data + (index++) * ETH_GSTRING_LEN,
214 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); 249 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
250 break;
251 }
215} 252}
216 253
217static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 254static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
218{ 255{
256 struct mlx4_en_priv *priv = netdev_priv(dev);
257 int trans_type;
258
219 cmd->autoneg = AUTONEG_DISABLE; 259 cmd->autoneg = AUTONEG_DISABLE;
220 cmd->supported = SUPPORTED_10000baseT_Full; 260 cmd->supported = SUPPORTED_10000baseT_Full;
221 cmd->advertising = ADVERTISED_1000baseT_Full; 261 cmd->advertising = ADVERTISED_10000baseT_Full;
262
263 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
264 return -ENOMEM;
265
266 trans_type = priv->port_state.transciver;
222 if (netif_carrier_ok(dev)) { 267 if (netif_carrier_ok(dev)) {
223 cmd->speed = SPEED_10000; 268 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
224 cmd->duplex = DUPLEX_FULL; 269 cmd->duplex = DUPLEX_FULL;
225 } else { 270 } else {
226 cmd->speed = -1; 271 ethtool_cmd_speed_set(cmd, -1);
227 cmd->duplex = -1; 272 cmd->duplex = -1;
228 } 273 }
274
275 if (trans_type > 0 && trans_type <= 0xC) {
276 cmd->port = PORT_FIBRE;
277 cmd->transceiver = XCVR_EXTERNAL;
278 cmd->supported |= SUPPORTED_FIBRE;
279 cmd->advertising |= ADVERTISED_FIBRE;
280 } else if (trans_type == 0x80 || trans_type == 0) {
281 cmd->port = PORT_TP;
282 cmd->transceiver = XCVR_INTERNAL;
283 cmd->supported |= SUPPORTED_TP;
284 cmd->advertising |= ADVERTISED_TP;
285 } else {
286 cmd->port = -1;
287 cmd->transceiver = -1;
288 }
229 return 0; 289 return 0;
230} 290}
231 291
232static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 292static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
233{ 293{
234 if ((cmd->autoneg == AUTONEG_ENABLE) || 294 if ((cmd->autoneg == AUTONEG_ENABLE) ||
235 (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL)) 295 (ethtool_cmd_speed(cmd) != SPEED_10000) ||
296 (cmd->duplex != DUPLEX_FULL))
236 return -EINVAL; 297 return -EINVAL;
237 298
238 /* Nothing to change */ 299 /* Nothing to change */
@@ -343,8 +404,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
343 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 404 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
344 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 405 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
345 406
346 if (rx_size == priv->prof->rx_ring_size && 407 if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
347 tx_size == priv->prof->tx_ring_size) 408 priv->rx_ring[0].size) &&
409 tx_size == priv->tx_ring[0].size)
348 return 0; 410 return 0;
349 411
350 mutex_lock(&mdev->state_lock); 412 mutex_lock(&mdev->state_lock);
@@ -353,7 +415,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
353 mlx4_en_stop_port(dev); 415 mlx4_en_stop_port(dev);
354 } 416 }
355 417
356 mlx4_en_free_resources(priv); 418 mlx4_en_free_resources(priv, true);
357 419
358 priv->prof->tx_ring_size = tx_size; 420 priv->prof->tx_ring_size = tx_size;
359 priv->prof->rx_ring_size = rx_size; 421 priv->prof->rx_ring_size = rx_size;
@@ -378,70 +440,26 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
378 struct ethtool_ringparam *param) 440 struct ethtool_ringparam *param)
379{ 441{
380 struct mlx4_en_priv *priv = netdev_priv(dev); 442 struct mlx4_en_priv *priv = netdev_priv(dev);
381 struct mlx4_en_dev *mdev = priv->mdev;
382 443
383 memset(param, 0, sizeof(*param)); 444 memset(param, 0, sizeof(*param));
384 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 445 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
385 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 446 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
386 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size; 447 param->rx_pending = priv->port_up ?
387 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; 448 priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
388} 449 param->tx_pending = priv->tx_ring[0].size;
389
390static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
391{
392 struct mlx4_en_priv *priv = netdev_priv(dev);
393 struct mlx4_en_dev *mdev = priv->mdev;
394 int rc = 0;
395 int changed = 0;
396
397 if (data & ~ETH_FLAG_LRO)
398 return -EOPNOTSUPP;
399
400 if (data & ETH_FLAG_LRO) {
401 if (mdev->profile.num_lro == 0)
402 return -EOPNOTSUPP;
403 if (!(dev->features & NETIF_F_LRO))
404 changed = 1;
405 } else if (dev->features & NETIF_F_LRO) {
406 changed = 1;
407 }
408
409 if (changed) {
410 if (netif_running(dev)) {
411 mutex_lock(&mdev->state_lock);
412 mlx4_en_stop_port(dev);
413 }
414 dev->features ^= NETIF_F_LRO;
415 if (netif_running(dev)) {
416 rc = mlx4_en_start_port(dev);
417 if (rc)
418 en_err(priv, "Failed to restart port\n");
419 mutex_unlock(&mdev->state_lock);
420 }
421 }
422
423 return rc;
424} 450}
425 451
426const struct ethtool_ops mlx4_en_ethtool_ops = { 452const struct ethtool_ops mlx4_en_ethtool_ops = {
427 .get_drvinfo = mlx4_en_get_drvinfo, 453 .get_drvinfo = mlx4_en_get_drvinfo,
428 .get_settings = mlx4_en_get_settings, 454 .get_settings = mlx4_en_get_settings,
429 .set_settings = mlx4_en_set_settings, 455 .set_settings = mlx4_en_set_settings,
430#ifdef NETIF_F_TSO
431 .get_tso = mlx4_en_get_tso,
432 .set_tso = mlx4_en_set_tso,
433#endif
434 .get_sg = ethtool_op_get_sg,
435 .set_sg = ethtool_op_set_sg,
436 .get_link = ethtool_op_get_link, 456 .get_link = ethtool_op_get_link,
437 .get_rx_csum = mlx4_en_get_rx_csum,
438 .set_rx_csum = mlx4_en_set_rx_csum,
439 .get_tx_csum = ethtool_op_get_tx_csum,
440 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
441 .get_strings = mlx4_en_get_strings, 457 .get_strings = mlx4_en_get_strings,
442 .get_sset_count = mlx4_en_get_sset_count, 458 .get_sset_count = mlx4_en_get_sset_count,
443 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 459 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
460 .self_test = mlx4_en_self_test,
444 .get_wol = mlx4_en_get_wol, 461 .get_wol = mlx4_en_get_wol,
462 .set_wol = mlx4_en_set_wol,
445 .get_msglevel = mlx4_en_get_msglevel, 463 .get_msglevel = mlx4_en_get_msglevel,
446 .set_msglevel = mlx4_en_set_msglevel, 464 .set_msglevel = mlx4_en_set_msglevel,
447 .get_coalesce = mlx4_en_get_coalesce, 465 .get_coalesce = mlx4_en_get_coalesce,
@@ -450,8 +468,6 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
450 .set_pauseparam = mlx4_en_set_pauseparam, 468 .set_pauseparam = mlx4_en_set_pauseparam,
451 .get_ringparam = mlx4_en_get_ringparam, 469 .get_ringparam = mlx4_en_get_ringparam,
452 .set_ringparam = mlx4_en_set_ringparam, 470 .set_ringparam = mlx4_en_set_ringparam,
453 .get_flags = ethtool_op_get_flags,
454 .set_flags = mlx4_ethtool_op_set_flags,
455}; 471};
456 472
457 473
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 97934f1ec53a..9276b1b25586 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -63,15 +63,12 @@ static const char mlx4_en_version[] =
63 */ 63 */
64 64
65 65
66/* Use a XOR rathern than Toeplitz hash function for RSS */ 66/* Enable RSS TCP traffic */
67MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS"); 67MLX4_EN_PARM_INT(tcp_rss, 1,
68 68 "Enable RSS for incomming TCP traffic or disabled (0)");
69/* RSS hash type mask - default to <saddr, daddr, sport, dport> */ 69/* Enable RSS UDP traffic */
70MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask"); 70MLX4_EN_PARM_INT(udp_rss, 1,
71 71 "Enable RSS for incomming UDP traffic or disabled (0)");
72/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
73MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
74 "Number of LRO sessions per ring or disabled (0)");
75 72
76/* Priority pausing */ 73/* Priority pausing */
77MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." 74MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -107,9 +104,12 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
107 struct mlx4_en_profile *params = &mdev->profile; 104 struct mlx4_en_profile *params = &mdev->profile;
108 int i; 105 int i;
109 106
110 params->rss_xor = (rss_xor != 0); 107 params->tcp_rss = tcp_rss;
111 params->rss_mask = rss_mask & 0x1f; 108 params->udp_rss = udp_rss;
112 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); 109 if (params->udp_rss && !mdev->dev->caps.udp_rss) {
110 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
111 params->udp_rss = 0;
112 }
113 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 113 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
114 params->prof[i].rx_pause = 1; 114 params->prof[i].rx_pause = 1;
115 params->prof[i].rx_ppp = pfcrx; 115 params->prof[i].rx_ppp = pfcrx;
@@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
124 return 0; 124 return 0;
125} 125}
126 126
127static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
128{
129 struct mlx4_en_dev *endev = ctx;
130
131 return endev->pndev[port];
132}
133
127static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, 134static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
128 enum mlx4_dev_event event, int port) 135 enum mlx4_dev_event event, int port)
129{ 136{
@@ -195,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
195 if (mlx4_uar_alloc(dev, &mdev->priv_uar)) 202 if (mlx4_uar_alloc(dev, &mdev->priv_uar))
196 goto err_pd; 203 goto err_pd;
197 204
198 mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 205 mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
206 PAGE_SIZE);
199 if (!mdev->uar_map) 207 if (!mdev->uar_map)
200 goto err_uar; 208 goto err_uar;
201 spin_lock_init(&mdev->uar_lock); 209 spin_lock_init(&mdev->uar_lock);
@@ -228,21 +236,23 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
228 goto err_mr; 236 goto err_mr;
229 } 237 }
230 238
231 /* Configure wich ports to start according to module parameters */ 239 /* Configure which ports to start according to module parameters */
232 mdev->port_cnt = 0; 240 mdev->port_cnt = 0;
233 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
234 mdev->port_cnt++; 242 mdev->port_cnt++;
235 243
236 /* If we did not receive an explicit number of Rx rings, default to
237 * the number of completion vectors populated by the mlx4_core */
238 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 244 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
239 mlx4_info(mdev, "Using %d tx rings for port:%d\n", 245 if (!dev->caps.comp_pool) {
240 mdev->profile.prof[i].tx_ring_num, i); 246 mdev->profile.prof[i].rx_ring_num =
241 mdev->profile.prof[i].rx_ring_num = min_t(int, 247 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
242 roundup_pow_of_two(dev->caps.num_comp_vectors), 248 min_t(int,
243 MAX_RX_RINGS); 249 dev->caps.num_comp_vectors,
244 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", 250 MAX_RX_RINGS)));
245 mdev->profile.prof[i].rx_ring_num, i); 251 } else {
252 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
253 min_t(int, dev->caps.comp_pool/
254 dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
255 }
246 } 256 }
247 257
248 /* Create our own workqueue for reset/multicast tasks 258 /* Create our own workqueue for reset/multicast tasks
@@ -282,9 +292,11 @@ err_free_res:
282} 292}
283 293
284static struct mlx4_interface mlx4_en_interface = { 294static struct mlx4_interface mlx4_en_interface = {
285 .add = mlx4_en_add, 295 .add = mlx4_en_add,
286 .remove = mlx4_en_remove, 296 .remove = mlx4_en_remove,
287 .event = mlx4_en_event, 297 .event = mlx4_en_event,
298 .get_dev = mlx4_en_get_netdev,
299 .protocol = MLX4_PROT_ETH,
288}; 300};
289 301
290static int __init mlx4_en_init(void) 302static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a0d8a26f5a02..61850adae6f7 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
69 struct mlx4_en_priv *priv = netdev_priv(dev); 69 struct mlx4_en_priv *priv = netdev_priv(dev);
70 struct mlx4_en_dev *mdev = priv->mdev; 70 struct mlx4_en_dev *mdev = priv->mdev;
71 int err; 71 int err;
72 int idx;
72 73
73 if (!priv->vlgrp) 74 if (!priv->vlgrp)
74 return; 75 return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
83 if (err) 84 if (err)
84 en_err(priv, "Failed configuring VLAN filter\n"); 85 en_err(priv, "Failed configuring VLAN filter\n");
85 } 86 }
87 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
88 en_err(priv, "failed adding vlan %d\n", vid);
86 mutex_unlock(&mdev->state_lock); 89 mutex_unlock(&mdev->state_lock);
90
87} 91}
88 92
89static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 93static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
91 struct mlx4_en_priv *priv = netdev_priv(dev); 95 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev; 96 struct mlx4_en_dev *mdev = priv->mdev;
93 int err; 97 int err;
98 int idx;
94 99
95 if (!priv->vlgrp) 100 if (!priv->vlgrp)
96 return; 101 return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
101 106
102 /* Remove VID from port VLAN filter */ 107 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock); 108 mutex_lock(&mdev->state_lock);
109 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
110 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
111 else
112 en_err(priv, "could not find vid %d in cache\n", vid);
113
104 if (mdev->device_up && priv->port_up) { 114 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 115 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 116 if (err)
@@ -109,7 +119,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
109 mutex_unlock(&mdev->state_lock); 119 mutex_unlock(&mdev->state_lock);
110} 120}
111 121
112static u64 mlx4_en_mac_to_u64(u8 *addr) 122u64 mlx4_en_mac_to_u64(u8 *addr)
113{ 123{
114 u64 mac = 0; 124 u64 mac = 0;
115 int i; 125 int i;
@@ -146,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
146 mutex_lock(&mdev->state_lock); 156 mutex_lock(&mdev->state_lock);
147 if (priv->port_up) { 157 if (priv->port_up) {
148 /* Remove old MAC and insert the new one */ 158 /* Remove old MAC and insert the new one */
149 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 159 err = mlx4_replace_mac(mdev->dev, priv->port,
150 err = mlx4_register_mac(mdev->dev, priv->port, 160 priv->base_qpn, priv->mac, 0);
151 priv->mac, &priv->mac_index);
152 if (err) 161 if (err)
153 en_err(priv, "Failed changing HW MAC address\n"); 162 en_err(priv, "Failed changing HW MAC address\n");
154 } else 163 } else
@@ -204,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
204 struct mlx4_en_dev *mdev = priv->mdev; 213 struct mlx4_en_dev *mdev = priv->mdev;
205 struct net_device *dev = priv->dev; 214 struct net_device *dev = priv->dev;
206 u64 mcast_addr = 0; 215 u64 mcast_addr = 0;
216 u8 mc_list[16] = {0};
207 int err; 217 int err;
208 218
209 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
@@ -229,11 +239,15 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
229 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
230 240
231 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
232 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 if (!mdev->dev->caps.vep_uc_steering)
233 priv->base_qpn, 1); 243 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
244 priv->base_qpn, 1);
245 else
246 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
247 priv->port);
234 if (err) 248 if (err)
235 en_err(priv, "Failed enabling " 249 en_err(priv, "Failed enabling "
236 "promiscous mode\n"); 250 "promiscuous mode\n");
237 251
238 /* Disable port multicast filter (unconditionally) */ 252 /* Disable port multicast filter (unconditionally) */
239 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 253 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -242,16 +256,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
242 en_err(priv, "Failed disabling " 256 en_err(priv, "Failed disabling "
243 "multicast filter\n"); 257 "multicast filter\n");
244 258
245 /* Disable port VLAN filter */ 259 /* Add the default qp number as multicast promisc */
246 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 260 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
247 if (err) 261 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
248 en_err(priv, "Failed disabling VLAN filter\n"); 262 priv->port);
263 if (err)
264 en_err(priv, "Failed entering multicast promisc mode\n");
265 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
266 }
267
268 if (priv->vlgrp) {
269 /* Disable port VLAN filter */
270 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
271 if (err)
272 en_err(priv, "Failed disabling VLAN filter\n");
273 }
249 } 274 }
250 goto out; 275 goto out;
251 } 276 }
252 277
253 /* 278 /*
254 * Not in promiscous mode 279 * Not in promiscuous mode
255 */ 280 */
256 281
257 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 282 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
@@ -260,10 +285,23 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
260 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 285 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
261 286
262 /* Disable promiscouos mode */ 287 /* Disable promiscouos mode */
263 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 288 if (!mdev->dev->caps.vep_uc_steering)
264 priv->base_qpn, 0); 289 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
290 priv->base_qpn, 0);
291 else
292 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
293 priv->port);
265 if (err) 294 if (err)
266 en_err(priv, "Failed disabling promiscous mode\n"); 295 en_err(priv, "Failed disabling promiscuous mode\n");
296
297 /* Disable Multicast promisc */
298 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
299 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
300 priv->port);
301 if (err)
302 en_err(priv, "Failed disabling multicast promiscuous mode\n");
303 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
304 }
267 305
268 /* Enable port VLAN filter */ 306 /* Enable port VLAN filter */
269 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 307 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
@@ -277,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
277 0, MLX4_MCAST_DISABLE); 315 0, MLX4_MCAST_DISABLE);
278 if (err) 316 if (err)
279 en_err(priv, "Failed disabling multicast filter\n"); 317 en_err(priv, "Failed disabling multicast filter\n");
318
319 /* Add the default qp number as multicast promisc */
320 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
321 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
322 priv->port);
323 if (err)
324 en_err(priv, "Failed entering multicast promisc mode\n");
325 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
326 }
280 } else { 327 } else {
281 int i; 328 int i;
329 /* Disable Multicast promisc */
330 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
331 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
332 priv->port);
333 if (err)
334 en_err(priv, "Failed disabling multicast promiscuous mode\n");
335 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
336 }
282 337
283 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 338 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
284 0, MLX4_MCAST_DISABLE); 339 0, MLX4_MCAST_DISABLE);
285 if (err) 340 if (err)
286 en_err(priv, "Failed disabling multicast filter\n"); 341 en_err(priv, "Failed disabling multicast filter\n");
287 342
343 /* Detach our qp from all the multicast addresses */
344 for (i = 0; i < priv->mc_addrs_cnt; i++) {
345 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
346 mc_list[5] = priv->port;
347 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
348 mc_list, MLX4_PROT_ETH);
349 }
288 /* Flush mcast filter and init it with broadcast address */ 350 /* Flush mcast filter and init it with broadcast address */
289 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 351 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
290 1, MLX4_MCAST_CONFIG); 352 1, MLX4_MCAST_CONFIG);
@@ -297,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
297 for (i = 0; i < priv->mc_addrs_cnt; i++) { 359 for (i = 0; i < priv->mc_addrs_cnt; i++) {
298 mcast_addr = 360 mcast_addr =
299 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 361 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
362 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
363 mc_list[5] = priv->port;
364 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
365 mc_list, 0, MLX4_PROT_ETH);
300 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 366 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
301 mcast_addr, 0, MLX4_MCAST_CONFIG); 367 mcast_addr, 0, MLX4_MCAST_CONFIG);
302 } 368 }
@@ -304,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
304 0, MLX4_MCAST_ENABLE); 370 0, MLX4_MCAST_ENABLE);
305 if (err) 371 if (err)
306 en_err(priv, "Failed enabling multicast filter\n"); 372 en_err(priv, "Failed enabling multicast filter\n");
307
308 mlx4_en_clear_list(dev);
309 } 373 }
310out: 374out:
311 mutex_unlock(&mdev->state_lock); 375 mutex_unlock(&mdev->state_lock);
@@ -407,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
407 unsigned long avg_pkt_size; 471 unsigned long avg_pkt_size;
408 unsigned long rx_packets; 472 unsigned long rx_packets;
409 unsigned long rx_bytes; 473 unsigned long rx_bytes;
410 unsigned long rx_byte_diff;
411 unsigned long tx_packets; 474 unsigned long tx_packets;
412 unsigned long tx_pkt_diff; 475 unsigned long tx_pkt_diff;
413 unsigned long rx_pkt_diff; 476 unsigned long rx_pkt_diff;
@@ -431,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
431 rx_pkt_diff = ((unsigned long) (rx_packets - 494 rx_pkt_diff = ((unsigned long) (rx_packets -
432 priv->last_moder_packets)); 495 priv->last_moder_packets));
433 packets = max(tx_pkt_diff, rx_pkt_diff); 496 packets = max(tx_pkt_diff, rx_pkt_diff);
434 rx_byte_diff = rx_bytes - priv->last_moder_bytes;
435 rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
436 rate = packets * HZ / period; 497 rate = packets * HZ / period;
437 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 498 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
438 priv->last_moder_bytes)) / packets : 0; 499 priv->last_moder_bytes)) / packets : 0;
439 500
440 /* Apply auto-moderation only when packet rate exceeds a rate that 501 /* Apply auto-moderation only when packet rate exceeds a rate that
441 * it matters */ 502 * it matters */
442 if (rate > MLX4_EN_RX_RATE_THRESH) { 503 if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
443 /* If tx and rx packet rates are not balanced, assume that 504 /* If tx and rx packet rates are not balanced, assume that
444 * traffic is mainly BW bound and apply maximum moderation. 505 * traffic is mainly BW bound and apply maximum moderation.
445 * Otherwise, moderate according to packet rate */ 506 * Otherwise, moderate according to packet rate */
446 if (2 * tx_pkt_diff > 3 * rx_pkt_diff && 507 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
447 rx_pkt_diff / rx_byte_diff < 508 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
448 MLX4_EN_SMALL_PKT_SIZE)
449 moder_time = priv->rx_usecs_low;
450 else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
451 moder_time = priv->rx_usecs_high; 509 moder_time = priv->rx_usecs_high;
452 else { 510 } else {
453 if (rate < priv->pkt_rate_low) 511 if (rate < priv->pkt_rate_low)
454 moder_time = priv->rx_usecs_low; 512 moder_time = priv->rx_usecs_low;
455 else if (rate > priv->pkt_rate_high) 513 else if (rate > priv->pkt_rate_high)
@@ -461,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
461 priv->rx_usecs_low; 519 priv->rx_usecs_low;
462 } 520 }
463 } else { 521 } else {
464 /* When packet rate is low, use default moderation rather than 522 moder_time = priv->rx_usecs_low;
465 * 0 to prevent interrupt storms if traffic suddenly increases */
466 moder_time = priv->rx_usecs;
467 } 523 }
468 524
469 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 525 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
@@ -513,6 +569,10 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
513 569
514 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 570 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
515 } 571 }
572 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
573 queue_work(mdev->workqueue, &priv->mac_task);
574 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
575 }
516 mutex_unlock(&mdev->state_lock); 576 mutex_unlock(&mdev->state_lock);
517} 577}
518 578
@@ -528,10 +588,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
528 * report to system log */ 588 * report to system log */
529 if (priv->last_link_state != linkstate) { 589 if (priv->last_link_state != linkstate) {
530 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 590 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
531 en_dbg(LINK, priv, "Link Down\n"); 591 en_info(priv, "Link Down\n");
532 netif_carrier_off(priv->dev); 592 netif_carrier_off(priv->dev);
533 } else { 593 } else {
534 en_dbg(LINK, priv, "Link Up\n"); 594 en_info(priv, "Link Up\n");
535 netif_carrier_on(priv->dev); 595 netif_carrier_on(priv->dev);
536 } 596 }
537 } 597 }
@@ -551,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev)
551 int err = 0; 611 int err = 0;
552 int i; 612 int i;
553 int j; 613 int j;
614 u8 mc_list[16] = {0};
615 char name[32];
554 616
555 if (priv->port_up) { 617 if (priv->port_up) {
556 en_dbg(DRV, priv, "start port called while port already up\n"); 618 en_dbg(DRV, priv, "start port called while port already up\n");
@@ -589,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev)
589 ++rx_index; 651 ++rx_index;
590 } 652 }
591 653
654 /* Set port mac number */
655 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
656 err = mlx4_register_mac(mdev->dev, priv->port,
657 priv->mac, &priv->base_qpn, 0);
658 if (err) {
659 en_err(priv, "Failed setting port mac\n");
660 goto cq_err;
661 }
662 mdev->mac_removed[priv->port] = 0;
663
592 err = mlx4_en_config_rss_steer(priv); 664 err = mlx4_en_config_rss_steer(priv);
593 if (err) { 665 if (err) {
594 en_err(priv, "Failed configuring rss steering\n"); 666 en_err(priv, "Failed configuring rss steering\n");
595 goto cq_err; 667 goto mac_err;
596 } 668 }
597 669
670 if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
671 sprintf(name , "%s-tx", priv->dev->name);
672 if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
673 mlx4_warn(mdev, "Failed Assigning an EQ to "
674 "%s_tx ,Falling back to legacy "
675 "EQ's\n", priv->dev->name);
676 }
677 }
598 /* Configure tx cq's and rings */ 678 /* Configure tx cq's and rings */
599 for (i = 0; i < priv->tx_ring_num; i++) { 679 for (i = 0; i < priv->tx_ring_num; i++) {
600 /* Configure cq */ 680 /* Configure cq */
601 cq = &priv->tx_cq[i]; 681 cq = &priv->tx_cq[i];
682 cq->vector = priv->tx_vector;
602 err = mlx4_en_activate_cq(priv, cq); 683 err = mlx4_en_activate_cq(priv, cq);
603 if (err) { 684 if (err) {
604 en_err(priv, "Failed allocating Tx CQ\n"); 685 en_err(priv, "Failed allocating Tx CQ\n");
@@ -645,23 +726,25 @@ int mlx4_en_start_port(struct net_device *dev)
645 en_err(priv, "Failed setting default qp numbers\n"); 726 en_err(priv, "Failed setting default qp numbers\n");
646 goto tx_err; 727 goto tx_err;
647 } 728 }
648 /* Set port mac number */
649 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
650 err = mlx4_register_mac(mdev->dev, priv->port,
651 priv->mac, &priv->mac_index);
652 if (err) {
653 en_err(priv, "Failed setting port mac\n");
654 goto tx_err;
655 }
656 729
657 /* Init port */ 730 /* Init port */
658 en_dbg(HW, priv, "Initializing port\n"); 731 en_dbg(HW, priv, "Initializing port\n");
659 err = mlx4_INIT_PORT(mdev->dev, priv->port); 732 err = mlx4_INIT_PORT(mdev->dev, priv->port);
660 if (err) { 733 if (err) {
661 en_err(priv, "Failed Initializing port\n"); 734 en_err(priv, "Failed Initializing port\n");
662 goto mac_err; 735 goto tx_err;
663 } 736 }
664 737
738 /* Attach rx QP to bradcast address */
739 memset(&mc_list[10], 0xff, ETH_ALEN);
740 mc_list[5] = priv->port;
741 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
742 0, MLX4_PROT_ETH))
743 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
744
745 /* Must redo promiscuous mode setup. */
746 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
747
665 /* Schedule multicast task to populate multicast list */ 748 /* Schedule multicast task to populate multicast list */
666 queue_work(mdev->workqueue, &priv->mcast_task); 749 queue_work(mdev->workqueue, &priv->mcast_task);
667 750
@@ -669,8 +752,6 @@ int mlx4_en_start_port(struct net_device *dev)
669 netif_tx_start_all_queues(dev); 752 netif_tx_start_all_queues(dev);
670 return 0; 753 return 0;
671 754
672mac_err:
673 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
674tx_err: 755tx_err:
675 while (tx_index--) { 756 while (tx_index--) {
676 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 757 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
@@ -678,6 +759,8 @@ tx_err:
678 } 759 }
679 760
680 mlx4_en_release_rss_steer(priv); 761 mlx4_en_release_rss_steer(priv);
762mac_err:
763 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
681cq_err: 764cq_err:
682 while (rx_index--) 765 while (rx_index--)
683 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 766 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -693,6 +776,7 @@ void mlx4_en_stop_port(struct net_device *dev)
693 struct mlx4_en_priv *priv = netdev_priv(dev); 776 struct mlx4_en_priv *priv = netdev_priv(dev);
694 struct mlx4_en_dev *mdev = priv->mdev; 777 struct mlx4_en_dev *mdev = priv->mdev;
695 int i; 778 int i;
779 u8 mc_list[16] = {0};
696 780
697 if (!priv->port_up) { 781 if (!priv->port_up) {
698 en_dbg(DRV, priv, "stop port called while port already down\n"); 782 en_dbg(DRV, priv, "stop port called while port already down\n");
@@ -704,12 +788,27 @@ void mlx4_en_stop_port(struct net_device *dev)
704 netif_tx_stop_all_queues(dev); 788 netif_tx_stop_all_queues(dev);
705 netif_tx_unlock_bh(dev); 789 netif_tx_unlock_bh(dev);
706 790
707 /* close port*/ 791 /* Set port as not active */
708 priv->port_up = false; 792 priv->port_up = false;
709 mlx4_CLOSE_PORT(mdev->dev, priv->port); 793
794 /* Detach All multicasts */
795 memset(&mc_list[10], 0xff, ETH_ALEN);
796 mc_list[5] = priv->port;
797 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
798 MLX4_PROT_ETH);
799 for (i = 0; i < priv->mc_addrs_cnt; i++) {
800 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
801 mc_list[5] = priv->port;
802 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
803 mc_list, MLX4_PROT_ETH);
804 }
805 mlx4_en_clear_list(dev);
806 /* Flush multicast filter */
807 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
710 808
711 /* Unregister Mac address for the port */ 809 /* Unregister Mac address for the port */
712 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 810 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
811 mdev->mac_removed[priv->port] = 1;
713 812
714 /* Free TX Rings */ 813 /* Free TX Rings */
715 for (i = 0; i < priv->tx_ring_num; i++) { 814 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -731,6 +830,9 @@ void mlx4_en_stop_port(struct net_device *dev)
731 msleep(1); 830 msleep(1);
732 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 831 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
733 } 832 }
833
834 /* close port*/
835 mlx4_CLOSE_PORT(mdev->dev, priv->port);
734} 836}
735 837
736static void mlx4_en_restart(struct work_struct *work) 838static void mlx4_en_restart(struct work_struct *work)
@@ -783,7 +885,6 @@ static int mlx4_en_open(struct net_device *dev)
783 priv->rx_ring[i].packets = 0; 885 priv->rx_ring[i].packets = 0;
784 } 886 }
785 887
786 mlx4_en_set_default_moderation(priv);
787 err = mlx4_en_start_port(dev); 888 err = mlx4_en_start_port(dev);
788 if (err) 889 if (err)
789 en_err(priv, "Failed starting port:%d\n", priv->port); 890 en_err(priv, "Failed starting port:%d\n", priv->port);
@@ -810,7 +911,7 @@ static int mlx4_en_close(struct net_device *dev)
810 return 0; 911 return 0;
811} 912}
812 913
813void mlx4_en_free_resources(struct mlx4_en_priv *priv) 914void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
814{ 915{
815 int i; 916 int i;
816 917
@@ -818,14 +919,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
818 if (priv->tx_ring[i].tx_info) 919 if (priv->tx_ring[i].tx_info)
819 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 920 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
820 if (priv->tx_cq[i].buf) 921 if (priv->tx_cq[i].buf)
821 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 922 mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
822 } 923 }
823 924
824 for (i = 0; i < priv->rx_ring_num; i++) { 925 for (i = 0; i < priv->rx_ring_num; i++) {
825 if (priv->rx_ring[i].rx_info) 926 if (priv->rx_ring[i].rx_info)
826 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 927 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
827 if (priv->rx_cq[i].buf) 928 if (priv->rx_cq[i].buf)
828 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 929 mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
829 } 930 }
830} 931}
831 932
@@ -833,6 +934,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
833{ 934{
834 struct mlx4_en_port_profile *prof = priv->prof; 935 struct mlx4_en_port_profile *prof = priv->prof;
835 int i; 936 int i;
937 int base_tx_qpn, err;
938
939 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
940 if (err) {
941 en_err(priv, "failed reserving range for TX rings\n");
942 return err;
943 }
836 944
837 /* Create tx Rings */ 945 /* Create tx Rings */
838 for (i = 0; i < priv->tx_ring_num; i++) { 946 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -840,7 +948,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
840 prof->tx_ring_size, i, TX)) 948 prof->tx_ring_size, i, TX))
841 goto err; 949 goto err;
842 950
843 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 951 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
844 prof->tx_ring_size, TXBB_SIZE)) 952 prof->tx_ring_size, TXBB_SIZE))
845 goto err; 953 goto err;
846 } 954 }
@@ -860,6 +968,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
860 968
861err: 969err:
862 en_err(priv, "Failed to allocate NIC resources\n"); 970 en_err(priv, "Failed to allocate NIC resources\n");
971 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
863 return -ENOMEM; 972 return -ENOMEM;
864} 973}
865 974
@@ -887,7 +996,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
887 mdev->pndev[priv->port] = NULL; 996 mdev->pndev[priv->port] = NULL;
888 mutex_unlock(&mdev->state_lock); 997 mutex_unlock(&mdev->state_lock);
889 998
890 mlx4_en_free_resources(priv); 999 mlx4_en_free_resources(priv, false);
891 free_netdev(dev); 1000 free_netdev(dev);
892} 1001}
893 1002
@@ -914,7 +1023,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
914 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1023 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
915 } else { 1024 } else {
916 mlx4_en_stop_port(dev); 1025 mlx4_en_stop_port(dev);
917 mlx4_en_set_default_moderation(priv);
918 err = mlx4_en_start_port(dev); 1026 err = mlx4_en_start_port(dev);
919 if (err) { 1027 if (err) {
920 en_err(priv, "Failed restarting port:%d\n", 1028 en_err(priv, "Failed restarting port:%d\n",
@@ -954,7 +1062,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
954 int i; 1062 int i;
955 int err; 1063 int err;
956 1064
957 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); 1065 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
1066 prof->tx_ring_num, prof->rx_ring_num);
958 if (dev == NULL) { 1067 if (dev == NULL) {
959 mlx4_err(mdev, "Net device allocation failed\n"); 1068 mlx4_err(mdev, "Net device allocation failed\n");
960 return -ENOMEM; 1069 return -ENOMEM;
@@ -974,7 +1083,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
974 priv->prof = prof; 1083 priv->prof = prof;
975 priv->port = port; 1084 priv->port = port;
976 priv->port_up = false; 1085 priv->port_up = false;
977 priv->rx_csum = 1;
978 priv->flags = prof->flags; 1086 priv->flags = prof->flags;
979 priv->tx_ring_num = prof->tx_ring_num; 1087 priv->tx_ring_num = prof->tx_ring_num;
980 priv->rx_ring_num = prof->rx_ring_num; 1088 priv->rx_ring_num = prof->rx_ring_num;
@@ -1017,35 +1125,31 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1017 */ 1125 */
1018 dev->netdev_ops = &mlx4_netdev_ops; 1126 dev->netdev_ops = &mlx4_netdev_ops;
1019 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1127 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1020 dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS; 1128 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1129 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1021 1130
1022 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1131 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1023 1132
1024 /* Set defualt MAC */ 1133 /* Set defualt MAC */
1025 dev->addr_len = ETH_ALEN; 1134 dev->addr_len = ETH_ALEN;
1026 for (i = 0; i < ETH_ALEN; i++) 1135 for (i = 0; i < ETH_ALEN; i++) {
1027 dev->dev_addr[ETH_ALEN - 1 - i] = 1136 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1028 (u8) (priv->mac >> (8 * i)); 1137 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1138 }
1029 1139
1030 /* 1140 /*
1031 * Set driver features 1141 * Set driver features
1032 */ 1142 */
1033 dev->features |= NETIF_F_SG; 1143 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1034 dev->vlan_features |= NETIF_F_SG; 1144 if (mdev->LSO_support)
1035 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1145 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1036 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1146
1037 dev->features |= NETIF_F_HIGHDMA; 1147 dev->vlan_features = dev->hw_features;
1038 dev->features |= NETIF_F_HW_VLAN_TX | 1148
1039 NETIF_F_HW_VLAN_RX | 1149 dev->hw_features |= NETIF_F_RXCSUM;
1040 NETIF_F_HW_VLAN_FILTER; 1150 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
1041 if (mdev->profile.num_lro) 1151 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1042 dev->features |= NETIF_F_LRO; 1152 NETIF_F_HW_VLAN_FILTER;
1043 if (mdev->LSO_support) {
1044 dev->features |= NETIF_F_TSO;
1045 dev->features |= NETIF_F_TSO6;
1046 dev->vlan_features |= NETIF_F_TSO;
1047 dev->vlan_features |= NETIF_F_TSO6;
1048 }
1049 1153
1050 mdev->pndev[port] = dev; 1154 mdev->pndev[port] = dev;
1051 1155
@@ -1059,7 +1163,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1059 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1163 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1060 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1164 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1061 1165
1166 /* Configure port */
1167 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1168 MLX4_EN_MIN_MTU,
1169 0, 0, 0, 0);
1170 if (err) {
1171 en_err(priv, "Failed setting port general configurations "
1172 "for port %d, with error %d\n", priv->port, err);
1173 goto out;
1174 }
1175
1176 /* Init port */
1177 en_warn(priv, "Initializing port\n");
1178 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1179 if (err) {
1180 en_err(priv, "Failed Initializing port\n");
1181 goto out;
1182 }
1062 priv->registered = 1; 1183 priv->registered = 1;
1184 mlx4_en_set_default_moderation(priv);
1063 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1185 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1064 return 0; 1186 return 0;
1065 1187
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index a29abe845d2e..f2a4f5dd313d 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
119 struct mlx4_set_port_rqp_calc_context *context; 119 struct mlx4_set_port_rqp_calc_context *context;
120 int err; 120 int err;
121 u32 in_mod; 121 u32 in_mod;
122 u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
123
124 if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
125 return 0;
122 126
123 mailbox = mlx4_alloc_cmd_mailbox(dev); 127 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox)) 128 if (IS_ERR(mailbox))
@@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
127 memset(context, 0, sizeof *context); 131 memset(context, 0, sizeof *context);
128 132
129 context->base_qpn = cpu_to_be32(base_qpn); 133 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn); 134 context->n_mac = 0x7;
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn); 135 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
136 base_qpn);
137 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
138 base_qpn);
132 context->intra_no_vlan = 0; 139 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX; 140 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0; 141 context->intra_vlan_miss = 0;
@@ -142,6 +149,38 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
142 return err; 149 return err;
143} 150}
144 151
152int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
153{
154 struct mlx4_en_query_port_context *qport_context;
155 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
156 struct mlx4_en_port_state *state = &priv->port_state;
157 struct mlx4_cmd_mailbox *mailbox;
158 int err;
159
160 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
161 if (IS_ERR(mailbox))
162 return PTR_ERR(mailbox);
163 memset(mailbox->buf, 0, sizeof(*qport_context));
164 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
165 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
166 if (err)
167 goto out;
168 qport_context = mailbox->buf;
169
170 /* This command is always accessed from Ethtool context
171 * already synchronized, no need in locking */
172 state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
173 if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
174 MLX4_EN_1G_SPEED)
175 state->link_speed = 1000;
176 else
177 state->link_speed = 10000;
178 state->transciver = qport_context->transceiver;
179
180out:
181 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
182 return err;
183}
145 184
146int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) 185int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
147{ 186{
@@ -174,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
174 } 213 }
175 stats->tx_packets = 0; 214 stats->tx_packets = 0;
176 stats->tx_bytes = 0; 215 stats->tx_bytes = 0;
177 for (i = 0; i <= priv->tx_ring_num; i++) { 216 for (i = 0; i < priv->tx_ring_num; i++) {
178 stats->tx_packets += priv->tx_ring[i].packets; 217 stats->tx_packets += priv->tx_ring[i].packets;
179 stats->tx_bytes += priv->tx_ring[i].bytes; 218 stats->tx_bytes += priv->tx_ring[i].bytes;
180 } 219 }
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index e6477f12beb5..e3d73e41c567 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -37,6 +37,7 @@
37 37
38#define SET_PORT_GEN_ALL_VALID 0x7 38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_SHIFT 31 39#define SET_PORT_PROMISC_SHIFT 31
40#define SET_PORT_MC_PROMISC_SHIFT 30
40 41
41enum { 42enum {
42 MLX4_CMD_SET_VLAN_FLTR = 0x47, 43 MLX4_CMD_SET_VLAN_FLTR = 0x47,
@@ -44,6 +45,12 @@ enum {
44 MLX4_CMD_DUMP_ETH_STATS = 0x49, 45 MLX4_CMD_DUMP_ETH_STATS = 0x49,
45}; 46};
46 47
48enum {
49 MCAST_DIRECT_ONLY = 0,
50 MCAST_DIRECT = 1,
51 MCAST_DEFAULT = 2
52};
53
47struct mlx4_set_port_general_context { 54struct mlx4_set_port_general_context {
48 u8 reserved[3]; 55 u8 reserved[3];
49 u8 flags; 56 u8 flags;
@@ -59,14 +66,17 @@ struct mlx4_set_port_general_context {
59 66
60struct mlx4_set_port_rqp_calc_context { 67struct mlx4_set_port_rqp_calc_context {
61 __be32 base_qpn; 68 __be32 base_qpn;
62 __be32 flags; 69 u8 rererved;
63 u8 reserved[3]; 70 u8 n_mac;
71 u8 n_vlan;
72 u8 n_prio;
73 u8 reserved2[3];
64 u8 mac_miss; 74 u8 mac_miss;
65 u8 intra_no_vlan; 75 u8 intra_no_vlan;
66 u8 no_vlan; 76 u8 no_vlan;
67 u8 intra_vlan_miss; 77 u8 intra_vlan_miss;
68 u8 vlan_miss; 78 u8 vlan_miss;
69 u8 reserved2[3]; 79 u8 reserved3[3];
70 u8 no_vlan_prio; 80 u8 no_vlan_prio;
71 __be32 promisc; 81 __be32 promisc;
72 __be32 mcast; 82 __be32 mcast;
@@ -84,6 +94,20 @@ enum {
84 MLX4_MCAST_ENABLE = 2, 94 MLX4_MCAST_ENABLE = 2,
85}; 95};
86 96
97struct mlx4_en_query_port_context {
98 u8 link_up;
99#define MLX4_EN_LINK_UP_MASK 0x80
100 u8 reserved;
101 __be16 mtu;
102 u8 reserved2;
103 u8 link_speed;
104#define MLX4_EN_SPEED_MASK 0x3
105#define MLX4_EN_1G_SPEED 0x2
106 u16 reserved3[5];
107 __be64 mac;
108 u8 transceiver;
109};
110
87 111
88struct mlx4_en_stat_out_mbox { 112struct mlx4_en_stat_out_mbox {
89 /* Received frames with a length of 64 octets */ 113 /* Received frames with a length of 64 octets */
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 8e2fcb7103c3..277215fb9d72 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -42,18 +42,6 @@
42#include "mlx4_en.h" 42#include "mlx4_en.h"
43 43
44 44
45static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
46 void **ip_hdr, void **tcpudp_hdr,
47 u64 *hdr_flags, void *priv)
48{
49 *mac_hdr = page_address(frags->page) + frags->page_offset;
50 *ip_hdr = *mac_hdr + ETH_HLEN;
51 *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
52 *hdr_flags = LRO_IPV4 | LRO_TCP;
53
54 return 0;
55}
56
57static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, 45static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
58 struct mlx4_en_rx_desc *rx_desc, 46 struct mlx4_en_rx_desc *rx_desc,
59 struct skb_frag_struct *skb_frags, 47 struct skb_frag_struct *skb_frags,
@@ -251,7 +239,6 @@ reduce_rings:
251 ring->prod--; 239 ring->prod--;
252 mlx4_en_free_rx_desc(priv, ring, ring->actual_size); 240 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
253 } 241 }
254 ring->size_mask = ring->actual_size - 1;
255 } 242 }
256 243
257 return 0; 244 return 0;
@@ -313,28 +300,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
313 } 300 }
314 ring->buf = ring->wqres.buf.direct.buf; 301 ring->buf = ring->wqres.buf.direct.buf;
315 302
316 /* Configure lro mngr */
317 memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
318 ring->lro.dev = priv->dev;
319 ring->lro.features = LRO_F_NAPI;
320 ring->lro.frag_align_pad = NET_IP_ALIGN;
321 ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
322 ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
323 ring->lro.max_desc = mdev->profile.num_lro;
324 ring->lro.max_aggr = MAX_SKB_FRAGS;
325 ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
326 sizeof(struct net_lro_desc),
327 GFP_KERNEL);
328 if (!ring->lro.lro_arr) {
329 en_err(priv, "Failed to allocate lro array\n");
330 goto err_map;
331 }
332 ring->lro.get_frag_header = mlx4_en_get_frag_header;
333
334 return 0; 303 return 0;
335 304
336err_map:
337 mlx4_en_unmap_buffer(&ring->wqres.buf);
338err_hwq: 305err_hwq:
339 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 306 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
340err_ring: 307err_ring:
@@ -378,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
378 err = mlx4_en_init_allocator(priv, ring); 345 err = mlx4_en_init_allocator(priv, ring);
379 if (err) { 346 if (err) {
380 en_err(priv, "Failed initializing ring allocator\n"); 347 en_err(priv, "Failed initializing ring allocator\n");
348 if (ring->stride <= TXBB_SIZE)
349 ring->buf -= TXBB_SIZE;
381 ring_ind--; 350 ring_ind--;
382 goto err_allocator; 351 goto err_allocator;
383 } 352 }
@@ -389,6 +358,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
389 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 358 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
390 ring = &priv->rx_ring[ring_ind]; 359 ring = &priv->rx_ring[ring_ind];
391 360
361 ring->size_mask = ring->actual_size - 1;
392 mlx4_en_update_rx_prod_db(ring); 362 mlx4_en_update_rx_prod_db(ring);
393 } 363 }
394 364
@@ -401,6 +371,8 @@ err_buffers:
401 ring_ind = priv->rx_ring_num - 1; 371 ring_ind = priv->rx_ring_num - 1;
402err_allocator: 372err_allocator:
403 while (ring_ind >= 0) { 373 while (ring_ind >= 0) {
374 if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
375 priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
404 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); 376 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
405 ring_ind--; 377 ring_ind--;
406 } 378 }
@@ -412,7 +384,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
412{ 384{
413 struct mlx4_en_dev *mdev = priv->mdev; 385 struct mlx4_en_dev *mdev = priv->mdev;
414 386
415 kfree(ring->lro.lro_arr);
416 mlx4_en_unmap_buffer(&ring->wqres.buf); 387 mlx4_en_unmap_buffer(&ring->wqres.buf);
417 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); 388 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
418 vfree(ring->rx_info); 389 vfree(ring->rx_info);
@@ -459,7 +430,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
459 goto fail; 430 goto fail;
460 431
461 /* Unmap buffer */ 432 /* Unmap buffer */
462 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 433 pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
463 PCI_DMA_FROMDEVICE); 434 PCI_DMA_FROMDEVICE);
464 } 435 }
465 /* Adjust size of last fragment to match actual length */ 436 /* Adjust size of last fragment to match actual length */
@@ -541,6 +512,21 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
541 return skb; 512 return skb;
542} 513}
543 514
515static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
516{
517 int i;
518 int offset = ETH_HLEN;
519
520 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
521 if (*(skb->data + offset) != (unsigned char) (i & 0xff))
522 goto out_loopback;
523 }
524 /* Loopback found */
525 priv->loopback_ok = 1;
526
527out_loopback:
528 dev_kfree_skb_any(skb);
529}
544 530
545int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 531int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
546{ 532{
@@ -548,7 +534,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
548 struct mlx4_cqe *cqe; 534 struct mlx4_cqe *cqe;
549 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 535 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
550 struct skb_frag_struct *skb_frags; 536 struct skb_frag_struct *skb_frags;
551 struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
552 struct mlx4_en_rx_desc *rx_desc; 537 struct mlx4_en_rx_desc *rx_desc;
553 struct sk_buff *skb; 538 struct sk_buff *skb;
554 int index; 539 int index;
@@ -599,7 +584,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
599 ring->bytes += length; 584 ring->bytes += length;
600 ring->packets++; 585 ring->packets++;
601 586
602 if (likely(priv->rx_csum)) { 587 if (likely(dev->features & NETIF_F_RXCSUM)) {
603 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 588 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
604 (cqe->checksum == cpu_to_be16(0xffff))) { 589 (cqe->checksum == cpu_to_be16(0xffff))) {
605 priv->port_stats.rx_chksum_good++; 590 priv->port_stats.rx_chksum_good++;
@@ -608,37 +593,35 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
608 * - TCP/IP (v4) 593 * - TCP/IP (v4)
609 * - without IP options 594 * - without IP options
610 * - not an IP fragment */ 595 * - not an IP fragment */
611 if (mlx4_en_can_lro(cqe->status) && 596 if (dev->features & NETIF_F_GRO) {
612 dev->features & NETIF_F_LRO) { 597 struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
598 if (!gro_skb)
599 goto next;
613 600
614 nr = mlx4_en_complete_rx_desc( 601 nr = mlx4_en_complete_rx_desc(
615 priv, rx_desc, 602 priv, rx_desc,
616 skb_frags, lro_frags, 603 skb_frags, skb_shinfo(gro_skb)->frags,
617 ring->page_alloc, length); 604 ring->page_alloc, length);
618 if (!nr) 605 if (!nr)
619 goto next; 606 goto next;
620 607
608 skb_shinfo(gro_skb)->nr_frags = nr;
609 gro_skb->len = length;
610 gro_skb->data_len = length;
611 gro_skb->truesize += length;
612 gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
613
621 if (priv->vlgrp && (cqe->vlan_my_qpn & 614 if (priv->vlgrp && (cqe->vlan_my_qpn &
622 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) { 615 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)))
623 lro_vlan_hwaccel_receive_frags( 616 vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid));
624 &ring->lro, lro_frags, 617 else
625 length, length, 618 napi_gro_frags(&cq->napi);
626 priv->vlgrp,
627 be16_to_cpu(cqe->sl_vid),
628 NULL, 0);
629 } else
630 lro_receive_frags(&ring->lro,
631 lro_frags,
632 length,
633 length,
634 NULL, 0);
635 619
636 goto next; 620 goto next;
637 } 621 }
638 622
639 /* LRO not possible, complete processing here */ 623 /* LRO not possible, complete processing here */
640 ip_summed = CHECKSUM_UNNECESSARY; 624 ip_summed = CHECKSUM_UNNECESSARY;
641 INC_PERF_COUNTER(priv->pstats.lro_misses);
642 } else { 625 } else {
643 ip_summed = CHECKSUM_NONE; 626 ip_summed = CHECKSUM_NONE;
644 priv->port_stats.rx_chksum_none++; 627 priv->port_stats.rx_chksum_none++;
@@ -655,6 +638,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
655 goto next; 638 goto next;
656 } 639 }
657 640
641 if (unlikely(priv->validate_loopback)) {
642 validate_loopback(priv, skb);
643 goto next;
644 }
645
658 skb->ip_summed = ip_summed; 646 skb->ip_summed = ip_summed;
659 skb->protocol = eth_type_trans(skb, dev); 647 skb->protocol = eth_type_trans(skb, dev);
660 skb_record_rx_queue(skb, cq->ring); 648 skb_record_rx_queue(skb, cq->ring);
@@ -674,14 +662,10 @@ next:
674 if (++polled == budget) { 662 if (++polled == budget) {
675 /* We are here because we reached the NAPI budget - 663 /* We are here because we reached the NAPI budget -
676 * flush only pending LRO sessions */ 664 * flush only pending LRO sessions */
677 lro_flush_all(&ring->lro);
678 goto out; 665 goto out;
679 } 666 }
680 } 667 }
681 668
682 /* If CQ is empty flush all LRO sessions unconditionally */
683 lro_flush_all(&ring->lro);
684
685out: 669out:
686 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); 670 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
687 mlx4_cq_set_ci(&cq->mcq); 671 mlx4_cq_set_ci(&cq->mcq);
@@ -726,7 +710,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
726} 710}
727 711
728 712
729/* Calculate the last offset position that accomodates a full fragment 713/* Calculate the last offset position that accommodates a full fragment
730 * (assuming fagment size = stride-align) */ 714 * (assuming fagment size = stride-align) */
731static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) 715static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
732{ 716{
@@ -816,7 +800,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
816 qp->event = mlx4_en_sqp_event; 800 qp->event = mlx4_en_sqp_event;
817 801
818 memset(context, 0, sizeof *context); 802 memset(context, 0, sizeof *context);
819 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0, 803 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
820 qpn, ring->cqn, context); 804 qpn, ring->cqn, context);
821 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 805 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
822 806
@@ -839,8 +823,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
839 struct mlx4_qp_context context; 823 struct mlx4_qp_context context;
840 struct mlx4_en_rss_context *rss_context; 824 struct mlx4_en_rss_context *rss_context;
841 void *ptr; 825 void *ptr;
842 int rss_xor = mdev->profile.rss_xor; 826 u8 rss_mask = 0x3f;
843 u8 rss_mask = mdev->profile.rss_mask;
844 int i, qpn; 827 int i, qpn;
845 int err = 0; 828 int err = 0;
846 int good_qps = 0; 829 int good_qps = 0;
@@ -866,16 +849,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
866 } 849 }
867 850
868 /* Configure RSS indirection qp */ 851 /* Configure RSS indirection qp */
869 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
870 if (err) {
871 en_err(priv, "Failed to reserve range for RSS "
872 "indirection qp\n");
873 goto rss_err;
874 }
875 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 852 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
876 if (err) { 853 if (err) {
877 en_err(priv, "Failed to allocate RSS indirection QP\n"); 854 en_err(priv, "Failed to allocate RSS indirection QP\n");
878 goto reserve_err; 855 goto rss_err;
879 } 856 }
880 rss_map->indir_qp.event = mlx4_en_sqp_event; 857 rss_map->indir_qp.event = mlx4_en_sqp_event;
881 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 858 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
@@ -886,9 +863,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
886 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | 863 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
887 (rss_map->base_qpn)); 864 (rss_map->base_qpn));
888 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 865 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
889 rss_context->hash_fn = rss_xor & 0x3; 866 rss_context->flags = rss_mask;
890 rss_context->flags = rss_mask << 2;
891 867
868 if (priv->mdev->profile.udp_rss)
869 rss_context->base_qpn_udp = rss_context->default_qpn;
892 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, 870 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
893 &rss_map->indir_qp, &rss_map->indir_state); 871 &rss_map->indir_qp, &rss_map->indir_state);
894 if (err) 872 if (err)
@@ -901,8 +879,6 @@ indir_err:
901 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 879 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
902 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 880 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
903 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 881 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
904reserve_err:
905 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
906rss_err: 882rss_err:
907 for (i = 0; i < good_qps; i++) { 883 for (i = 0; i < good_qps; i++) {
908 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 884 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -924,7 +900,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
924 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 900 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
925 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 901 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
926 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 902 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
927 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
928 903
929 for (i = 0; i < priv->rx_ring_num; i++) { 904 for (i = 0; i < priv->rx_ring_num; i++) {
930 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 905 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
new file mode 100644
index 000000000000..191a8dcd8a93
--- /dev/null
+++ b/drivers/net/mlx4/en_selftest.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37#include <linux/delay.h>
38#include <linux/mlx4/driver.h>
39
40#include "mlx4_en.h"
41
42
43static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
44{
45 return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
46 MLX4_CMD_TIME_CLASS_A);
47}
48
49static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
50{
51 struct sk_buff *skb;
52 struct ethhdr *ethh;
53 unsigned char *packet;
54 unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
55 unsigned int i;
56 int err;
57
58
59 /* build the pkt before xmit */
60 skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
61 if (!skb) {
62 en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
63 return -ENOMEM;
64 }
65 skb_reserve(skb, NET_IP_ALIGN);
66
67 ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
68 packet = (unsigned char *)skb_put(skb, packet_size);
69 memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
70 memset(ethh->h_source, 0, ETH_ALEN);
71 ethh->h_proto = htons(ETH_P_ARP);
72 skb_set_mac_header(skb, 0);
73 for (i = 0; i < packet_size; ++i) /* fill our packet */
74 packet[i] = (unsigned char)(i & 0xff);
75
76 /* xmit the pkt */
77 err = mlx4_en_xmit(skb, priv->dev);
78 return err;
79}
80
81static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
82{
83 u32 loopback_ok = 0;
84 int i;
85
86
87 priv->loopback_ok = 0;
88 priv->validate_loopback = 1;
89
90 /* xmit */
91 if (mlx4_en_test_loopback_xmit(priv)) {
92 en_err(priv, "Transmitting loopback packet failed\n");
93 goto mlx4_en_test_loopback_exit;
94 }
95
96 /* polling for result */
97 for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
98 msleep(MLX4_EN_LOOPBACK_TIMEOUT);
99 if (priv->loopback_ok) {
100 loopback_ok = 1;
101 break;
102 }
103 }
104 if (!loopback_ok)
105 en_err(priv, "Loopback packet didn't arrive\n");
106
107mlx4_en_test_loopback_exit:
108
109 priv->validate_loopback = 0;
110 return !loopback_ok;
111}
112
113
114static int mlx4_en_test_link(struct mlx4_en_priv *priv)
115{
116 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
117 return -ENOMEM;
118 if (priv->port_state.link_state == 1)
119 return 0;
120 else
121 return 1;
122}
123
124static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
125{
126
127 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
128 return -ENOMEM;
129
130 /* The device currently only supports 10G speed */
131 if (priv->port_state.link_speed != SPEED_10000)
132 return priv->port_state.link_speed;
133 return 0;
134}
135
136
137void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
138{
139 struct mlx4_en_priv *priv = netdev_priv(dev);
140 struct mlx4_en_dev *mdev = priv->mdev;
141 struct mlx4_en_tx_ring *tx_ring;
142 int i, carrier_ok;
143
144 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
145
146 if (*flags & ETH_TEST_FL_OFFLINE) {
147 /* disable the interface */
148 carrier_ok = netif_carrier_ok(dev);
149
150 netif_carrier_off(dev);
151retry_tx:
152 /* Wait until all tx queues are empty.
153 * there should not be any additional incoming traffic
154 * since we turned the carrier off */
155 msleep(200);
156 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
157 tx_ring = &priv->tx_ring[i];
158 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
159 goto retry_tx;
160 }
161
162 if (priv->mdev->dev->caps.loopback_support){
163 buf[3] = mlx4_en_test_registers(priv);
164 buf[4] = mlx4_en_test_loopback(priv);
165 }
166
167 if (carrier_ok)
168 netif_carrier_on(dev);
169
170 }
171 buf[0] = mlx4_test_interrupts(mdev->dev);
172 buf[1] = mlx4_en_test_link(priv);
173 buf[2] = mlx4_en_test_speed(priv);
174
175 for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
176 if (buf[i])
177 *flags |= ETH_TEST_FL_FAILED;
178 }
179}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 580968f304eb..b229acf1855f 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -38,11 +38,13 @@
38#include <linux/skbuff.h> 38#include <linux/skbuff.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
41#include <linux/tcp.h>
41 42
42#include "mlx4_en.h" 43#include "mlx4_en.h"
43 44
44enum { 45enum {
45 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
47 MAX_BF = 256,
46}; 48};
47 49
48static int inline_thold __read_mostly = MAX_INLINE; 50static int inline_thold __read_mostly = MAX_INLINE;
@@ -51,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444);
51MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 53MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
52 54
53int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 55int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
54 struct mlx4_en_tx_ring *ring, u32 size, 56 struct mlx4_en_tx_ring *ring, int qpn, u32 size,
55 u16 stride) 57 u16 stride)
56{ 58{
57 struct mlx4_en_dev *mdev = priv->mdev; 59 struct mlx4_en_dev *mdev = priv->mdev;
@@ -102,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
102 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 104 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
103 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 105 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
104 106
105 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 107 ring->qpn = qpn;
106 if (err) {
107 en_err(priv, "Failed reserving qp for tx ring.\n");
108 goto err_map;
109 }
110
111 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 108 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
112 if (err) { 109 if (err) {
113 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 110 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
114 goto err_reserve; 111 goto err_map;
115 } 112 }
116 ring->qp.event = mlx4_en_sqp_event; 113 ring->qp.event = mlx4_en_sqp_event;
117 114
115 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
116 if (err) {
117 en_dbg(DRV, priv, "working without blueflame (%d)", err);
118 ring->bf.uar = &mdev->priv_uar;
119 ring->bf.uar->map = mdev->uar_map;
120 ring->bf_enabled = false;
121 } else
122 ring->bf_enabled = true;
123
118 return 0; 124 return 0;
119 125
120err_reserve:
121 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
122err_map: 126err_map:
123 mlx4_en_unmap_buffer(&ring->wqres.buf); 127 mlx4_en_unmap_buffer(&ring->wqres.buf);
124err_hwq_res: 128err_hwq_res:
@@ -138,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
138 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
139 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 143 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
140 144
145 if (ring->bf_enabled)
146 mlx4_bf_free(mdev->dev, &ring->bf);
141 mlx4_qp_remove(mdev->dev, &ring->qp); 147 mlx4_qp_remove(mdev->dev, &ring->qp);
142 mlx4_qp_free(mdev->dev, &ring->qp); 148 mlx4_qp_free(mdev->dev, &ring->qp);
143 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
@@ -170,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
170 176
171 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
172 ring->cqn, &ring->context); 178 ring->cqn, &ring->context);
179 if (ring->bf_enabled)
180 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
173 181
174 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 182 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
175 &ring->qp, &ring->qp_state); 183 &ring->qp, &ring->qp_state);
@@ -582,7 +590,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
582 /* If we support per priority flow control and the packet contains 590 /* If we support per priority flow control and the packet contains
583 * a vlan tag, send the packet to the TX ring assigned to that priority 591 * a vlan tag, send the packet to the TX ring assigned to that priority
584 */ 592 */
585 if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) { 593 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
586 vlan_tag = vlan_tx_tag_get(skb); 594 vlan_tag = vlan_tx_tag_get(skb);
587 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); 595 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
588 } 596 }
@@ -590,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
590 return skb_tx_hash(dev, skb); 598 return skb_tx_hash(dev, skb);
591} 599}
592 600
601static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
602{
603 __iowrite64_copy(dst, src, bytecnt / 8);
604}
605
593netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 606netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
594{ 607{
595 struct mlx4_en_priv *priv = netdev_priv(dev); 608 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -600,23 +613,30 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
600 struct mlx4_wqe_data_seg *data; 613 struct mlx4_wqe_data_seg *data;
601 struct skb_frag_struct *frag; 614 struct skb_frag_struct *frag;
602 struct mlx4_en_tx_info *tx_info; 615 struct mlx4_en_tx_info *tx_info;
616 struct ethhdr *ethh;
617 u64 mac;
618 u32 mac_l, mac_h;
603 int tx_ind = 0; 619 int tx_ind = 0;
604 int nr_txbb; 620 int nr_txbb;
605 int desc_size; 621 int desc_size;
606 int real_size; 622 int real_size;
607 dma_addr_t dma; 623 dma_addr_t dma;
608 u32 index; 624 u32 index, bf_index;
609 __be32 op_own; 625 __be32 op_own;
610 u16 vlan_tag = 0; 626 u16 vlan_tag = 0;
611 int i; 627 int i;
612 int lso_header_size; 628 int lso_header_size;
613 void *fragptr; 629 void *fragptr;
630 bool bounce = false;
631
632 if (!priv->port_up)
633 goto tx_drop;
614 634
615 real_size = get_real_size(skb, dev, &lso_header_size); 635 real_size = get_real_size(skb, dev, &lso_header_size);
616 if (unlikely(!real_size)) 636 if (unlikely(!real_size))
617 goto tx_drop; 637 goto tx_drop;
618 638
619 /* Allign descriptor to TXBB size */ 639 /* Align descriptor to TXBB size */
620 desc_size = ALIGN(real_size, TXBB_SIZE); 640 desc_size = ALIGN(real_size, TXBB_SIZE);
621 nr_txbb = desc_size / TXBB_SIZE; 641 nr_txbb = desc_size / TXBB_SIZE;
622 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 642 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
@@ -627,7 +647,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
627 647
628 tx_ind = skb->queue_mapping; 648 tx_ind = skb->queue_mapping;
629 ring = &priv->tx_ring[tx_ind]; 649 ring = &priv->tx_ring[tx_ind];
630 if (priv->vlgrp && vlan_tx_tag_present(skb)) 650 if (vlan_tx_tag_present(skb))
631 vlan_tag = vlan_tx_tag_get(skb); 651 vlan_tag = vlan_tx_tag_get(skb);
632 652
633 /* Check available TXBBs And 2K spare for prefetch */ 653 /* Check available TXBBs And 2K spare for prefetch */
@@ -650,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
650 670
651 /* Packet is good - grab an index and transmit it */ 671 /* Packet is good - grab an index and transmit it */
652 index = ring->prod & ring->size_mask; 672 index = ring->prod & ring->size_mask;
673 bf_index = ring->prod;
653 674
654 /* See if we have enough space for whole descriptor TXBB for setting 675 /* See if we have enough space for whole descriptor TXBB for setting
655 * SW ownership on next descriptor; if not, use a bounce buffer. */ 676 * SW ownership on next descriptor; if not, use a bounce buffer. */
656 if (likely(index + nr_txbb <= ring->size)) 677 if (likely(index + nr_txbb <= ring->size))
657 tx_desc = ring->buf + index * TXBB_SIZE; 678 tx_desc = ring->buf + index * TXBB_SIZE;
658 else 679 else {
659 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 680 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
681 bounce = true;
682 }
660 683
661 /* Save skb in tx_info ring */ 684 /* Save skb in tx_info ring */
662 tx_info = &ring->tx_info[index]; 685 tx_info = &ring->tx_info[index];
@@ -676,6 +699,19 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
676 priv->port_stats.tx_chksum_offload++; 699 priv->port_stats.tx_chksum_offload++;
677 } 700 }
678 701
702 if (unlikely(priv->validate_loopback)) {
703 /* Copy dst mac address to wqe */
704 skb_reset_mac_header(skb);
705 ethh = eth_hdr(skb);
706 if (ethh && ethh->h_dest) {
707 mac = mlx4_en_mac_to_u64(ethh->h_dest);
708 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
709 mac_l = (u32) (mac & 0xffffffff);
710 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
711 tx_desc->ctrl.imm = cpu_to_be32(mac_l);
712 }
713 }
714
679 /* Handle LSO (TSO) packets */ 715 /* Handle LSO (TSO) packets */
680 if (lso_header_size) { 716 if (lso_header_size) {
681 /* Mark opcode as LSO */ 717 /* Mark opcode as LSO */
@@ -748,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
748 ring->prod += nr_txbb; 784 ring->prod += nr_txbb;
749 785
750 /* If we used a bounce buffer then copy descriptor back into place */ 786 /* If we used a bounce buffer then copy descriptor back into place */
751 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) 787 if (bounce)
752 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 788 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
753 789
754 /* Run destructor before passing skb to HW */ 790 /* Run destructor before passing skb to HW */
755 if (likely(!skb_shared(skb))) 791 if (likely(!skb_shared(skb)))
756 skb_orphan(skb); 792 skb_orphan(skb);
757 793
758 /* Ensure new descirptor hits memory 794 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
759 * before setting ownership of this descriptor to HW */ 795 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
760 wmb(); 796 op_own |= htonl((bf_index & 0xffff) << 8);
761 tx_desc->ctrl.owner_opcode = op_own; 797 /* Ensure new descirptor hits memory
798 * before setting ownership of this descriptor to HW */
799 wmb();
800 tx_desc->ctrl.owner_opcode = op_own;
762 801
763 /* Ring doorbell! */ 802 wmb();
764 wmb(); 803
765 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 804 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
805 desc_size);
806
807 wmb();
808
809 ring->bf.offset ^= ring->bf.buf_size;
810 } else {
811 /* Ensure new descirptor hits memory
812 * before setting ownership of this descriptor to HW */
813 wmb();
814 tx_desc->ctrl.owner_opcode = op_own;
815 wmb();
816 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
817 }
766 818
767 /* Poll CQ here */ 819 /* Poll CQ here */
768 mlx4_en_xmit_poll(priv, tx_ind); 820 mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 6d7b2bf210ce..1ad1f6029af8 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -42,7 +42,7 @@
42#include "fw.h" 42#include "fw.h"
43 43
44enum { 44enum {
45 MLX4_IRQNAME_SIZE = 64 45 MLX4_IRQNAME_SIZE = 32
46}; 46};
47 47
48enum { 48enum {
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
317 * we need to map, take the difference of highest index and 317 * we need to map, take the difference of highest index and
318 * the lowest index we'll use and add 1. 318 * the lowest index we'll use and add 1.
319 */ 319 */
320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
321 dev->caps.reserved_eqs / 4 + 1; 321 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
322} 322}
323 323
324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
496static void mlx4_free_irqs(struct mlx4_dev *dev) 496static void mlx4_free_irqs(struct mlx4_dev *dev)
497{ 497{
498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
499 int i; 499 struct mlx4_priv *priv = mlx4_priv(dev);
500 int i, vec;
500 501
501 if (eq_table->have_irq) 502 if (eq_table->have_irq)
502 free_irq(dev->pdev->irq, dev); 503 free_irq(dev->pdev->irq, dev);
504
503 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 505 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
504 if (eq_table->eq[i].have_irq) { 506 if (eq_table->eq[i].have_irq) {
505 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 507 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
506 eq_table->eq[i].have_irq = 0; 508 eq_table->eq[i].have_irq = 0;
507 } 509 }
508 510
511 for (i = 0; i < dev->caps.comp_pool; i++) {
512 /*
513 * Freeing the assigned irq's
514 * all bits should be 0, but we need to validate
515 */
516 if (priv->msix_ctl.pool_bm & 1ULL << i) {
517 /* NO need protecting*/
518 vec = dev->caps.num_comp_vectors + 1 + i;
519 free_irq(priv->eq_table.eq[vec].irq,
520 &priv->eq_table.eq[vec]);
521 }
522 }
523
524
509 kfree(eq_table->irq_names); 525 kfree(eq_table->irq_names);
510} 526}
511 527
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
578 (priv->eq_table.inta_pin < 32 ? 4 : 0); 594 (priv->eq_table.inta_pin < 32 ? 4 : 0);
579 595
580 priv->eq_table.irq_names = 596 priv->eq_table.irq_names =
581 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 597 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
598 dev->caps.comp_pool),
582 GFP_KERNEL); 599 GFP_KERNEL);
583 if (!priv->eq_table.irq_names) { 600 if (!priv->eq_table.irq_names) {
584 err = -ENOMEM; 601 err = -ENOMEM;
@@ -586,7 +603,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
586 } 603 }
587 604
588 for (i = 0; i < dev->caps.num_comp_vectors; ++i) { 605 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
589 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, 606 err = mlx4_create_eq(dev, dev->caps.num_cqs -
607 dev->caps.reserved_cqs +
608 MLX4_NUM_SPARE_EQE,
590 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 609 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
591 &priv->eq_table.eq[i]); 610 &priv->eq_table.eq[i]);
592 if (err) { 611 if (err) {
@@ -601,6 +620,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
601 if (err) 620 if (err)
602 goto err_out_comp; 621 goto err_out_comp;
603 622
623 /*if additional completion vectors poolsize is 0 this loop will not run*/
624 for (i = dev->caps.num_comp_vectors + 1;
625 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
626
627 err = mlx4_create_eq(dev, dev->caps.num_cqs -
628 dev->caps.reserved_cqs +
629 MLX4_NUM_SPARE_EQE,
630 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
631 &priv->eq_table.eq[i]);
632 if (err) {
633 --i;
634 goto err_out_unmap;
635 }
636 }
637
638
604 if (dev->flags & MLX4_FLAG_MSI_X) { 639 if (dev->flags & MLX4_FLAG_MSI_X) {
605 const char *eq_name; 640 const char *eq_name;
606 641
@@ -686,7 +721,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
686 721
687 mlx4_free_irqs(dev); 722 mlx4_free_irqs(dev);
688 723
689 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 724 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
690 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 725 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
691 726
692 mlx4_unmap_clr_int(dev); 727 mlx4_unmap_clr_int(dev);
@@ -699,3 +734,109 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
699 734
700 kfree(priv->eq_table.uar_map); 735 kfree(priv->eq_table.uar_map);
701} 736}
737
738/* A test that verifies that we can accept interrupts on all
739 * the irq vectors of the device.
740 * Interrupts are checked using the NOP command.
741 */
742int mlx4_test_interrupts(struct mlx4_dev *dev)
743{
744 struct mlx4_priv *priv = mlx4_priv(dev);
745 int i;
746 int err;
747
748 err = mlx4_NOP(dev);
749 /* When not in MSI_X, there is only one irq to check */
750 if (!(dev->flags & MLX4_FLAG_MSI_X))
751 return err;
752
753 /* A loop over all completion vectors, for each vector we will check
754 * whether it works by mapping command completions to that vector
755 * and performing a NOP command
756 */
757 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
758 /* Temporary use polling for command completions */
759 mlx4_cmd_use_polling(dev);
760
761 /* Map the new eq to handle all asyncronous events */
762 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
763 priv->eq_table.eq[i].eqn);
764 if (err) {
765 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
766 mlx4_cmd_use_events(dev);
767 break;
768 }
769
770 /* Go back to using events */
771 mlx4_cmd_use_events(dev);
772 err = mlx4_NOP(dev);
773 }
774
775 /* Return to default */
776 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
777 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
778 return err;
779}
780EXPORT_SYMBOL(mlx4_test_interrupts);
781
782int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
783{
784
785 struct mlx4_priv *priv = mlx4_priv(dev);
786 int vec = 0, err = 0, i;
787
788 spin_lock(&priv->msix_ctl.pool_lock);
789 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
790 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
791 priv->msix_ctl.pool_bm |= 1ULL << i;
792 vec = dev->caps.num_comp_vectors + 1 + i;
793 snprintf(priv->eq_table.irq_names +
794 vec * MLX4_IRQNAME_SIZE,
795 MLX4_IRQNAME_SIZE, "%s", name);
796 err = request_irq(priv->eq_table.eq[vec].irq,
797 mlx4_msi_x_interrupt, 0,
798 &priv->eq_table.irq_names[vec<<5],
799 priv->eq_table.eq + vec);
800 if (err) {
801 /*zero out bit by fliping it*/
802 priv->msix_ctl.pool_bm ^= 1 << i;
803 vec = 0;
804 continue;
805 /*we dont want to break here*/
806 }
807 eq_set_ci(&priv->eq_table.eq[vec], 1);
808 }
809 }
810 spin_unlock(&priv->msix_ctl.pool_lock);
811
812 if (vec) {
813 *vector = vec;
814 } else {
815 *vector = 0;
816 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
817 }
818 return err;
819}
820EXPORT_SYMBOL(mlx4_assign_eq);
821
822void mlx4_release_eq(struct mlx4_dev *dev, int vec)
823{
824 struct mlx4_priv *priv = mlx4_priv(dev);
825 /*bm index*/
826 int i = vec - dev->caps.num_comp_vectors - 1;
827
828 if (likely(i >= 0)) {
829 /*sanity check , making sure were not trying to free irq's
830 Belonging to a legacy EQ*/
831 spin_lock(&priv->msix_ctl.pool_lock);
832 if (priv->msix_ctl.pool_bm & 1ULL << i) {
833 free_irq(priv->eq_table.eq[vec].irq,
834 &priv->eq_table.eq[vec]);
835 priv->msix_ctl.pool_bm &= ~(1ULL << i);
836 }
837 spin_unlock(&priv->msix_ctl.pool_lock);
838 }
839
840}
841EXPORT_SYMBOL(mlx4_release_eq);
842
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 04f42ae1eda0..67a209ba939d 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
98 [20] = "Address vector port checking support", 98 [20] = "Address vector port checking support",
99 [21] = "UD multicast support", 99 [21] = "UD multicast support",
100 [24] = "Demand paging support", 100 [24] = "Demand paging support",
101 [25] = "Router support" 101 [25] = "Router support",
102 [30] = "IBoE support"
102 }; 103 };
103 int i; 104 int i;
104 105
@@ -141,6 +142,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
141 struct mlx4_cmd_mailbox *mailbox; 142 struct mlx4_cmd_mailbox *mailbox;
142 u32 *outbox; 143 u32 *outbox;
143 u8 field; 144 u8 field;
145 u32 field32;
144 u16 size; 146 u16 size;
145 u16 stat_rate; 147 u16 stat_rate;
146 int err; 148 int err;
@@ -178,6 +180,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
178#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 180#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
179#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 181#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
180#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 182#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
183#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
184#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
181#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 185#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
182#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 186#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
183#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 187#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -268,6 +272,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
268 dev_cap->max_msg_sz = 1 << (field & 0x1f); 272 dev_cap->max_msg_sz = 1 << (field & 0x1f);
269 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 273 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
270 dev_cap->stat_rate_support = stat_rate; 274 dev_cap->stat_rate_support = stat_rate;
275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
276 dev_cap->udp_rss = field & 0x1;
277 dev_cap->vep_uc_steering = field & 0x2;
278 dev_cap->vep_mc_steering = field & 0x4;
279 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
280 dev_cap->loopback_support = field & 0x1;
281 dev_cap->wol = field & 0x40;
271 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 282 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
272 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 283 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
273 dev_cap->reserved_uars = field >> 4; 284 dev_cap->reserved_uars = field >> 4;
@@ -281,6 +292,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
281 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 292 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
282 dev_cap->bf_reg_size = 1 << (field & 0x1f); 293 dev_cap->bf_reg_size = 1 << (field & 0x1f);
283 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 294 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
295 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
296 field = 3;
284 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 297 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
285 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 298 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
286 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 299 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
@@ -365,6 +378,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
365#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 378#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
366#define QUERY_PORT_MAX_VL_OFFSET 0x0b 379#define QUERY_PORT_MAX_VL_OFFSET 0x0b
367#define QUERY_PORT_MAC_OFFSET 0x10 380#define QUERY_PORT_MAC_OFFSET 0x10
381#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
382#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
383#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
368 384
369 for (i = 1; i <= dev_cap->num_ports; ++i) { 385 for (i = 1; i <= dev_cap->num_ports; ++i) {
370 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 386 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -388,6 +404,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
388 dev_cap->log_max_vlans[i] = field >> 4; 404 dev_cap->log_max_vlans[i] = field >> 4;
389 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); 405 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
390 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); 406 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
407 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
408 dev_cap->trans_type[i] = field32 >> 24;
409 dev_cap->vendor_oui[i] = field32 & 0xffffff;
410 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
411 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
391 } 412 }
392 } 413 }
393 414
@@ -719,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
719#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 740#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
720#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 741#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
721#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 742#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
743#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
722#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 744#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
723#define INIT_HCA_TPT_OFFSET 0x0f0 745#define INIT_HCA_TPT_OFFSET 0x0f0
724#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 746#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
@@ -779,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
779 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 801 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
780 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 802 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
781 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 803 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
804 if (dev->caps.vep_mc_steering)
805 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
782 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 806 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
783 807
784 /* TPT attributes */ 808 /* TPT attributes */
@@ -890,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev)
890 /* Input modifier of 0x1f means "finish as soon as possible." */ 914 /* Input modifier of 0x1f means "finish as soon as possible." */
891 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); 915 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
892} 916}
917
918#define MLX4_WOL_SETUP_MODE (5 << 28)
919int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
920{
921 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
922
923 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
924 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
925}
926EXPORT_SYMBOL_GPL(mlx4_wol_read);
927
928int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
929{
930 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
931
932 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
933 MLX4_CMD_TIME_CLASS_A);
934}
935EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 526d7f30c041..88003ebc6185 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -73,7 +73,16 @@ struct mlx4_dev_cap {
73 int max_pkeys[MLX4_MAX_PORTS + 1]; 73 int max_pkeys[MLX4_MAX_PORTS + 1];
74 u64 def_mac[MLX4_MAX_PORTS + 1]; 74 u64 def_mac[MLX4_MAX_PORTS + 1];
75 u16 eth_mtu[MLX4_MAX_PORTS + 1]; 75 u16 eth_mtu[MLX4_MAX_PORTS + 1];
76 int trans_type[MLX4_MAX_PORTS + 1];
77 int vendor_oui[MLX4_MAX_PORTS + 1];
78 u16 wavelength[MLX4_MAX_PORTS + 1];
79 u64 trans_code[MLX4_MAX_PORTS + 1];
76 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int udp_rss;
82 int loopback_support;
83 int vep_uc_steering;
84 int vep_mc_steering;
85 int wol;
77 u32 flags; 86 u32 flags;
78 int reserved_uars; 87 int reserved_uars;
79 int uar_size; 88 int uar_size;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b07e4dee80aa..02393fdf44c1 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); 210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
211} 211}
212 212
213int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) 213static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
214{ 214{
215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, 215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
216 MLX4_CMD_TIME_CLASS_B); 216 MLX4_CMD_TIME_CLASS_B);
217} 217}
218 218
219int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
220{
221 struct mlx4_cmd_mailbox *mailbox;
222 __be64 *inbox;
223 int err;
224
225 mailbox = mlx4_alloc_cmd_mailbox(dev);
226 if (IS_ERR(mailbox))
227 return PTR_ERR(mailbox);
228 inbox = mailbox->buf;
229
230 inbox[0] = cpu_to_be64(virt);
231 inbox[1] = cpu_to_be64(dma_addr);
232
233 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
234 MLX4_CMD_TIME_CLASS_B);
235
236 mlx4_free_cmd_mailbox(dev, mailbox);
237
238 if (!err)
239 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
240 (unsigned long long) dma_addr, (unsigned long long) virt);
241
242 return err;
243}
244
245int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) 219int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
246{ 220{
247 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); 221 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index ab56a2f89b65..b10c07a1dc1a 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
129} 129}
130 130
131int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
132int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
133int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); 131int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
134int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); 132int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
135 133
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 555067802751..73c94fcdfddf 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
161 161
162 mutex_unlock(&intf_mutex); 162 mutex_unlock(&intf_mutex);
163} 163}
164
165void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
166{
167 struct mlx4_priv *priv = mlx4_priv(dev);
168 struct mlx4_device_context *dev_ctx;
169 unsigned long flags;
170 void *result = NULL;
171
172 spin_lock_irqsave(&priv->ctx_lock, flags);
173
174 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
175 if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
176 result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
177 break;
178 }
179
180 spin_unlock_irqrestore(&priv->ctx_lock, flags);
181
182 return result;
183}
184EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 5102ab1ac561..3814fc9b1145 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -39,6 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h>
42 43
43#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
@@ -103,7 +104,7 @@ MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
103 104
104static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 105static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
105module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 106module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
106MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); 107MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
107 108
108int mlx4_check_port_params(struct mlx4_dev *dev, 109int mlx4_check_port_params(struct mlx4_dev *dev,
109 enum mlx4_port_type *port_type) 110 enum mlx4_port_type *port_type)
@@ -184,6 +185,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
184 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 185 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
185 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 186 dev->caps.def_mac[i] = dev_cap->def_mac[i];
186 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 187 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
188 dev->caps.trans_type[i] = dev_cap->trans_type[i];
189 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
190 dev->caps.wavelength[i] = dev_cap->wavelength[i];
191 dev->caps.trans_code[i] = dev_cap->trans_code[i];
187 } 192 }
188 193
189 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 194 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -221,6 +226,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
221 dev->caps.bmme_flags = dev_cap->bmme_flags; 226 dev->caps.bmme_flags = dev_cap->bmme_flags;
222 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 227 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
223 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 228 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
229 dev->caps.udp_rss = dev_cap->udp_rss;
230 dev->caps.loopback_support = dev_cap->loopback_support;
231 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
232 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
233 dev->caps.wol = dev_cap->wol;
224 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 234 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
225 235
226 dev->caps.log_num_macs = log_num_mac; 236 dev->caps.log_num_macs = log_num_mac;
@@ -712,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
712 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 722 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
713} 723}
714 724
725static int map_bf_area(struct mlx4_dev *dev)
726{
727 struct mlx4_priv *priv = mlx4_priv(dev);
728 resource_size_t bf_start;
729 resource_size_t bf_len;
730 int err = 0;
731
732 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
733 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
734 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
735 if (!priv->bf_mapping)
736 err = -ENOMEM;
737
738 return err;
739}
740
741static void unmap_bf_area(struct mlx4_dev *dev)
742{
743 if (mlx4_priv(dev)->bf_mapping)
744 io_mapping_free(mlx4_priv(dev)->bf_mapping);
745}
746
715static void mlx4_close_hca(struct mlx4_dev *dev) 747static void mlx4_close_hca(struct mlx4_dev *dev)
716{ 748{
749 unmap_bf_area(dev);
717 mlx4_CLOSE_HCA(dev, 0); 750 mlx4_CLOSE_HCA(dev, 0);
718 mlx4_free_icms(dev); 751 mlx4_free_icms(dev);
719 mlx4_UNMAP_FA(dev); 752 mlx4_UNMAP_FA(dev);
@@ -766,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
766 goto err_stop_fw; 799 goto err_stop_fw;
767 } 800 }
768 801
802 if (map_bf_area(dev))
803 mlx4_dbg(dev, "Failed to map blue flame area\n");
804
769 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 805 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
770 806
771 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 807 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
@@ -796,6 +832,7 @@ err_free_icm:
796 mlx4_free_icms(dev); 832 mlx4_free_icms(dev);
797 833
798err_stop_fw: 834err_stop_fw:
835 unmap_bf_area(dev);
799 mlx4_UNMAP_FA(dev); 836 mlx4_UNMAP_FA(dev);
800 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 837 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
801 838
@@ -823,7 +860,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
823 goto err_uar_table_free; 860 goto err_uar_table_free;
824 } 861 }
825 862
826 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 863 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
827 if (!priv->kar) { 864 if (!priv->kar) {
828 mlx4_err(dev, "Couldn't map kernel access region, " 865 mlx4_err(dev, "Couldn't map kernel access region, "
829 "aborting.\n"); 866 "aborting.\n");
@@ -907,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
907 } 944 }
908 945
909 for (port = 1; port <= dev->caps.num_ports; port++) { 946 for (port = 1; port <= dev->caps.num_ports; port++) {
947 enum mlx4_port_type port_type = 0;
948 mlx4_SENSE_PORT(dev, port, &port_type);
949 if (port_type)
950 dev->caps.port_type[port] = port_type;
910 ib_port_default_caps = 0; 951 ib_port_default_caps = 0;
911 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); 952 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
912 if (err) 953 if (err)
@@ -921,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
921 goto err_mcg_table_free; 962 goto err_mcg_table_free;
922 } 963 }
923 } 964 }
965 mlx4_set_port_mask(dev);
924 966
925 return 0; 967 return 0;
926 968
@@ -963,13 +1005,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
963{ 1005{
964 struct mlx4_priv *priv = mlx4_priv(dev); 1006 struct mlx4_priv *priv = mlx4_priv(dev);
965 struct msix_entry *entries; 1007 struct msix_entry *entries;
966 int nreq; 1008 int nreq = min_t(int, dev->caps.num_ports *
1009 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1010 + MSIX_LEGACY_SZ, MAX_MSIX);
967 int err; 1011 int err;
968 int i; 1012 int i;
969 1013
970 if (msi_x) { 1014 if (msi_x) {
971 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1015 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
972 num_possible_cpus() + 1); 1016 nreq);
973 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1017 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
974 if (!entries) 1018 if (!entries)
975 goto no_msi; 1019 goto no_msi;
@@ -992,7 +1036,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
992 goto no_msi; 1036 goto no_msi;
993 } 1037 }
994 1038
995 dev->caps.num_comp_vectors = nreq - 1; 1039 if (nreq <
1040 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1041 /*Working in legacy mode , all EQ's shared*/
1042 dev->caps.comp_pool = 0;
1043 dev->caps.num_comp_vectors = nreq - 1;
1044 } else {
1045 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1046 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1047 }
996 for (i = 0; i < nreq; ++i) 1048 for (i = 0; i < nreq; ++i)
997 priv->eq_table.eq[i].irq = entries[i].vector; 1049 priv->eq_table.eq[i].irq = entries[i].vector;
998 1050
@@ -1004,6 +1056,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1004 1056
1005no_msi: 1057no_msi:
1006 dev->caps.num_comp_vectors = 1; 1058 dev->caps.num_comp_vectors = 1;
1059 dev->caps.comp_pool = 0;
1007 1060
1008 for (i = 0; i < 2; ++i) 1061 for (i = 0; i < 2; ++i)
1009 priv->eq_table.eq[i].irq = dev->pdev->irq; 1062 priv->eq_table.eq[i].irq = dev->pdev->irq;
@@ -1043,6 +1096,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1043 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1096 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1044} 1097}
1045 1098
1099static int mlx4_init_steering(struct mlx4_dev *dev)
1100{
1101 struct mlx4_priv *priv = mlx4_priv(dev);
1102 int num_entries = dev->caps.num_ports;
1103 int i, j;
1104
1105 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1106 if (!priv->steer)
1107 return -ENOMEM;
1108
1109 for (i = 0; i < num_entries; i++) {
1110 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1111 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1112 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1113 }
1114 INIT_LIST_HEAD(&priv->steer[i].high_prios);
1115 }
1116 return 0;
1117}
1118
1119static void mlx4_clear_steering(struct mlx4_dev *dev)
1120{
1121 struct mlx4_priv *priv = mlx4_priv(dev);
1122 struct mlx4_steer_index *entry, *tmp_entry;
1123 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1124 int num_entries = dev->caps.num_ports;
1125 int i, j;
1126
1127 for (i = 0; i < num_entries; i++) {
1128 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1129 list_for_each_entry_safe(pqp, tmp_pqp,
1130 &priv->steer[i].promisc_qps[j],
1131 list) {
1132 list_del(&pqp->list);
1133 kfree(pqp);
1134 }
1135 list_for_each_entry_safe(entry, tmp_entry,
1136 &priv->steer[i].steer_entries[j],
1137 list) {
1138 list_del(&entry->list);
1139 list_for_each_entry_safe(pqp, tmp_pqp,
1140 &entry->duplicates,
1141 list) {
1142 list_del(&pqp->list);
1143 kfree(pqp);
1144 }
1145 kfree(entry);
1146 }
1147 }
1148 }
1149 kfree(priv->steer);
1150}
1151
1046static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1152static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1047{ 1153{
1048 struct mlx4_priv *priv; 1154 struct mlx4_priv *priv;
@@ -1103,6 +1209,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1103 } 1209 }
1104 } 1210 }
1105 1211
1212 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1213 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1214
1106 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1215 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1107 if (!priv) { 1216 if (!priv) {
1108 dev_err(&pdev->dev, "Device struct alloc failed, " 1217 dev_err(&pdev->dev, "Device struct alloc failed, "
@@ -1121,6 +1230,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1121 INIT_LIST_HEAD(&priv->pgdir_list); 1230 INIT_LIST_HEAD(&priv->pgdir_list);
1122 mutex_init(&priv->pgdir_mutex); 1231 mutex_init(&priv->pgdir_mutex);
1123 1232
1233 pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
1234
1235 INIT_LIST_HEAD(&priv->bf_list);
1236 mutex_init(&priv->bf_mutex);
1237
1124 /* 1238 /*
1125 * Now reset the HCA before we touch the PCI capabilities or 1239 * Now reset the HCA before we touch the PCI capabilities or
1126 * attempt a firmware command, since a boot ROM may have left 1240 * attempt a firmware command, since a boot ROM may have left
@@ -1145,8 +1259,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1145 if (err) 1259 if (err)
1146 goto err_close; 1260 goto err_close;
1147 1261
1262 priv->msix_ctl.pool_bm = 0;
1263 spin_lock_init(&priv->msix_ctl.pool_lock);
1264
1148 mlx4_enable_msi_x(dev); 1265 mlx4_enable_msi_x(dev);
1149 1266
1267 err = mlx4_init_steering(dev);
1268 if (err)
1269 goto err_free_eq;
1270
1150 err = mlx4_setup_hca(dev); 1271 err = mlx4_setup_hca(dev);
1151 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 1272 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1152 dev->flags &= ~MLX4_FLAG_MSI_X; 1273 dev->flags &= ~MLX4_FLAG_MSI_X;
@@ -1155,7 +1276,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1155 } 1276 }
1156 1277
1157 if (err) 1278 if (err)
1158 goto err_free_eq; 1279 goto err_steer;
1159 1280
1160 for (port = 1; port <= dev->caps.num_ports; port++) { 1281 for (port = 1; port <= dev->caps.num_ports; port++) {
1161 err = mlx4_init_port_info(dev, port); 1282 err = mlx4_init_port_info(dev, port);
@@ -1188,6 +1309,9 @@ err_port:
1188 mlx4_cleanup_pd_table(dev); 1309 mlx4_cleanup_pd_table(dev);
1189 mlx4_cleanup_uar_table(dev); 1310 mlx4_cleanup_uar_table(dev);
1190 1311
1312err_steer:
1313 mlx4_clear_steering(dev);
1314
1191err_free_eq: 1315err_free_eq:
1192 mlx4_free_eq_table(dev); 1316 mlx4_free_eq_table(dev);
1193 1317
@@ -1247,6 +1371,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1247 iounmap(priv->kar); 1371 iounmap(priv->kar);
1248 mlx4_uar_free(dev, &priv->driver_uar); 1372 mlx4_uar_free(dev, &priv->driver_uar);
1249 mlx4_cleanup_uar_table(dev); 1373 mlx4_cleanup_uar_table(dev);
1374 mlx4_clear_steering(dev);
1250 mlx4_free_eq_table(dev); 1375 mlx4_free_eq_table(dev);
1251 mlx4_close_hca(dev); 1376 mlx4_close_hca(dev);
1252 mlx4_cmd_cleanup(dev); 1377 mlx4_cmd_cleanup(dev);
@@ -1280,6 +1405,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1280 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1405 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1281 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1406 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1282 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1407 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1408 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1409 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1410 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1411 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1412 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1413 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1414 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1415 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1416 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1417 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1418 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1419 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1420 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1421 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1422 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1283 { 0, } 1423 { 0, }
1284}; 1424};
1285 1425
@@ -1304,7 +1444,7 @@ static int __init mlx4_verify_params(void)
1304 return -1; 1444 return -1;
1305 } 1445 }
1306 1446
1307 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { 1447 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1308 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 1448 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1309 return -1; 1449 return -1;
1310 } 1450 }
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c4f88b7ef7b6..e63c37d6a115 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/etherdevice.h>
35 36
36#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
37 38
@@ -40,38 +41,40 @@
40#define MGM_QPN_MASK 0x00FFFFFF 41#define MGM_QPN_MASK 0x00FFFFFF
41#define MGM_BLCK_LB_BIT 30 42#define MGM_BLCK_LB_BIT 30
42 43
43struct mlx4_mgm {
44 __be32 next_gid_index;
45 __be32 members_count;
46 u32 reserved[2];
47 u8 gid[16];
48 __be32 qp[MLX4_QP_PER_MGM];
49};
50
51static const u8 zero_gid[16]; /* automatically initialized to 0 */ 44static const u8 zero_gid[16]; /* automatically initialized to 0 */
52 45
53static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, 46static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
54 struct mlx4_cmd_mailbox *mailbox) 47 struct mlx4_cmd_mailbox *mailbox)
55{ 48{
56 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 49 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
57 MLX4_CMD_TIME_CLASS_A); 50 MLX4_CMD_TIME_CLASS_A);
58} 51}
59 52
60static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, 53static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
61 struct mlx4_cmd_mailbox *mailbox) 54 struct mlx4_cmd_mailbox *mailbox)
62{ 55{
63 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 56 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
64 MLX4_CMD_TIME_CLASS_A); 57 MLX4_CMD_TIME_CLASS_A);
65} 58}
66 59
67static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 60static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
68 u16 *hash) 61 struct mlx4_cmd_mailbox *mailbox)
62{
63 u32 in_mod;
64
65 in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
66 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
67 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
68}
69
70static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
71 u16 *hash, u8 op_mod)
69{ 72{
70 u64 imm; 73 u64 imm;
71 int err; 74 int err;
72 75
73 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, 76 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
74 MLX4_CMD_TIME_CLASS_A); 77 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
75 78
76 if (!err) 79 if (!err)
77 *hash = imm; 80 *hash = imm;
@@ -79,6 +82,458 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
79 return err; 82 return err;
80} 83}
81 84
85static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
86 enum mlx4_steer_type steer,
87 u32 qpn)
88{
89 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
90 struct mlx4_promisc_qp *pqp;
91
92 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
93 if (pqp->qpn == qpn)
94 return pqp;
95 }
96 /* not found */
97 return NULL;
98}
99
100/*
101 * Add new entry to steering data structure.
102 * All promisc QPs should be added as well
103 */
104static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
105 enum mlx4_steer_type steer,
106 unsigned int index, u32 qpn)
107{
108 struct mlx4_steer *s_steer;
109 struct mlx4_cmd_mailbox *mailbox;
110 struct mlx4_mgm *mgm;
111 u32 members_count;
112 struct mlx4_steer_index *new_entry;
113 struct mlx4_promisc_qp *pqp;
114 struct mlx4_promisc_qp *dqp = NULL;
115 u32 prot;
116 int err;
117 u8 pf_num;
118
119 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
120 s_steer = &mlx4_priv(dev)->steer[pf_num];
121 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
122 if (!new_entry)
123 return -ENOMEM;
124
125 INIT_LIST_HEAD(&new_entry->duplicates);
126 new_entry->index = index;
127 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
128
129 /* If the given qpn is also a promisc qp,
130 * it should be inserted to duplicates list
131 */
132 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
133 if (pqp) {
134 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
135 if (!dqp) {
136 err = -ENOMEM;
137 goto out_alloc;
138 }
139 dqp->qpn = qpn;
140 list_add_tail(&dqp->list, &new_entry->duplicates);
141 }
142
143 /* if no promisc qps for this vep, we are done */
144 if (list_empty(&s_steer->promisc_qps[steer]))
145 return 0;
146
147 /* now need to add all the promisc qps to the new
148 * steering entry, as they should also receive the packets
149 * destined to this address */
150 mailbox = mlx4_alloc_cmd_mailbox(dev);
151 if (IS_ERR(mailbox)) {
152 err = -ENOMEM;
153 goto out_alloc;
154 }
155 mgm = mailbox->buf;
156
157 err = mlx4_READ_ENTRY(dev, index, mailbox);
158 if (err)
159 goto out_mailbox;
160
161 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
162 prot = be32_to_cpu(mgm->members_count) >> 30;
163 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
164 /* don't add already existing qpn */
165 if (pqp->qpn == qpn)
166 continue;
167 if (members_count == MLX4_QP_PER_MGM) {
168 /* out of space */
169 err = -ENOMEM;
170 goto out_mailbox;
171 }
172
173 /* add the qpn */
174 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
175 }
176 /* update the qps count and update the entry with all the promisc qps*/
177 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
178 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
179
180out_mailbox:
181 mlx4_free_cmd_mailbox(dev, mailbox);
182 if (!err)
183 return 0;
184out_alloc:
185 if (dqp) {
186 list_del(&dqp->list);
187 kfree(dqp);
188 }
189 list_del(&new_entry->list);
190 kfree(new_entry);
191 return err;
192}
193
194/* update the data structures with existing steering entry */
195static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
196 enum mlx4_steer_type steer,
197 unsigned int index, u32 qpn)
198{
199 struct mlx4_steer *s_steer;
200 struct mlx4_steer_index *tmp_entry, *entry = NULL;
201 struct mlx4_promisc_qp *pqp;
202 struct mlx4_promisc_qp *dqp;
203 u8 pf_num;
204
205 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
206 s_steer = &mlx4_priv(dev)->steer[pf_num];
207
208 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
209 if (!pqp)
210 return 0; /* nothing to do */
211
212 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
213 if (tmp_entry->index == index) {
214 entry = tmp_entry;
215 break;
216 }
217 }
218 if (unlikely(!entry)) {
219 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
220 return -EINVAL;
221 }
222
223 /* the given qpn is listed as a promisc qpn
224 * we need to add it as a duplicate to this entry
225 * for future references */
226 list_for_each_entry(dqp, &entry->duplicates, list) {
227 if (qpn == dqp->qpn)
228 return 0; /* qp is already duplicated */
229 }
230
231 /* add the qp as a duplicate on this index */
232 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
233 if (!dqp)
234 return -ENOMEM;
235 dqp->qpn = qpn;
236 list_add_tail(&dqp->list, &entry->duplicates);
237
238 return 0;
239}
240
241/* Check whether a qpn is a duplicate on steering entry
242 * If so, it should not be removed from mgm */
243static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
244 enum mlx4_steer_type steer,
245 unsigned int index, u32 qpn)
246{
247 struct mlx4_steer *s_steer;
248 struct mlx4_steer_index *tmp_entry, *entry = NULL;
249 struct mlx4_promisc_qp *dqp, *tmp_dqp;
250 u8 pf_num;
251
252 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
253 s_steer = &mlx4_priv(dev)->steer[pf_num];
254
255 /* if qp is not promisc, it cannot be duplicated */
256 if (!get_promisc_qp(dev, pf_num, steer, qpn))
257 return false;
258
259 /* The qp is promisc qp so it is a duplicate on this index
260 * Find the index entry, and remove the duplicate */
261 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
262 if (tmp_entry->index == index) {
263 entry = tmp_entry;
264 break;
265 }
266 }
267 if (unlikely(!entry)) {
268 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
269 return false;
270 }
271 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
272 if (dqp->qpn == qpn) {
273 list_del(&dqp->list);
274 kfree(dqp);
275 }
276 }
277 return true;
278}
279
280/* I a steering entry contains only promisc QPs, it can be removed. */
281static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
282 enum mlx4_steer_type steer,
283 unsigned int index, u32 tqpn)
284{
285 struct mlx4_steer *s_steer;
286 struct mlx4_cmd_mailbox *mailbox;
287 struct mlx4_mgm *mgm;
288 struct mlx4_steer_index *entry = NULL, *tmp_entry;
289 u32 qpn;
290 u32 members_count;
291 bool ret = false;
292 int i;
293 u8 pf_num;
294
295 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
296 s_steer = &mlx4_priv(dev)->steer[pf_num];
297
298 mailbox = mlx4_alloc_cmd_mailbox(dev);
299 if (IS_ERR(mailbox))
300 return false;
301 mgm = mailbox->buf;
302
303 if (mlx4_READ_ENTRY(dev, index, mailbox))
304 goto out;
305 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
306 for (i = 0; i < members_count; i++) {
307 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
308 if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
309 /* the qp is not promisc, the entry can't be removed */
310 goto out;
311 }
312 }
313 /* All the qps currently registered for this entry are promiscuous,
314 * Checking for duplicates */
315 ret = true;
316 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
317 if (entry->index == index) {
318 if (list_empty(&entry->duplicates)) {
319 list_del(&entry->list);
320 kfree(entry);
321 } else {
322 /* This entry contains duplicates so it shouldn't be removed */
323 ret = false;
324 goto out;
325 }
326 }
327 }
328
329out:
330 mlx4_free_cmd_mailbox(dev, mailbox);
331 return ret;
332}
333
334static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
335 enum mlx4_steer_type steer, u32 qpn)
336{
337 struct mlx4_steer *s_steer;
338 struct mlx4_cmd_mailbox *mailbox;
339 struct mlx4_mgm *mgm;
340 struct mlx4_steer_index *entry;
341 struct mlx4_promisc_qp *pqp;
342 struct mlx4_promisc_qp *dqp;
343 u32 members_count;
344 u32 prot;
345 int i;
346 bool found;
347 int last_index;
348 int err;
349 u8 pf_num;
350 struct mlx4_priv *priv = mlx4_priv(dev);
351 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
352 s_steer = &mlx4_priv(dev)->steer[pf_num];
353
354 mutex_lock(&priv->mcg_table.mutex);
355
356 if (get_promisc_qp(dev, pf_num, steer, qpn)) {
357 err = 0; /* Noting to do, already exists */
358 goto out_mutex;
359 }
360
361 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
362 if (!pqp) {
363 err = -ENOMEM;
364 goto out_mutex;
365 }
366 pqp->qpn = qpn;
367
368 mailbox = mlx4_alloc_cmd_mailbox(dev);
369 if (IS_ERR(mailbox)) {
370 err = -ENOMEM;
371 goto out_alloc;
372 }
373 mgm = mailbox->buf;
374
375 /* the promisc qp needs to be added for each one of the steering
376 * entries, if it already exists, needs to be added as a duplicate
377 * for this entry */
378 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
379 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
380 if (err)
381 goto out_mailbox;
382
383 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
384 prot = be32_to_cpu(mgm->members_count) >> 30;
385 found = false;
386 for (i = 0; i < members_count; i++) {
387 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
388 /* Entry already exists, add to duplicates */
389 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
390 if (!dqp)
391 goto out_mailbox;
392 dqp->qpn = qpn;
393 list_add_tail(&dqp->list, &entry->duplicates);
394 found = true;
395 }
396 }
397 if (!found) {
398 /* Need to add the qpn to mgm */
399 if (members_count == MLX4_QP_PER_MGM) {
400 /* entry is full */
401 err = -ENOMEM;
402 goto out_mailbox;
403 }
404 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
405 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
406 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
407 if (err)
408 goto out_mailbox;
409 }
410 last_index = entry->index;
411 }
412
413 /* add the new qpn to list of promisc qps */
414 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
415 /* now need to add all the promisc qps to default entry */
416 memset(mgm, 0, sizeof *mgm);
417 members_count = 0;
418 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
419 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
420 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
421
422 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
423 if (err)
424 goto out_list;
425
426 mlx4_free_cmd_mailbox(dev, mailbox);
427 mutex_unlock(&priv->mcg_table.mutex);
428 return 0;
429
430out_list:
431 list_del(&pqp->list);
432out_mailbox:
433 mlx4_free_cmd_mailbox(dev, mailbox);
434out_alloc:
435 kfree(pqp);
436out_mutex:
437 mutex_unlock(&priv->mcg_table.mutex);
438 return err;
439}
440
441static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
442 enum mlx4_steer_type steer, u32 qpn)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 struct mlx4_steer *s_steer;
446 struct mlx4_cmd_mailbox *mailbox;
447 struct mlx4_mgm *mgm;
448 struct mlx4_steer_index *entry;
449 struct mlx4_promisc_qp *pqp;
450 struct mlx4_promisc_qp *dqp;
451 u32 members_count;
452 bool found;
453 bool back_to_list = false;
454 int loc, i;
455 int err;
456 u8 pf_num;
457
458 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
459 s_steer = &mlx4_priv(dev)->steer[pf_num];
460 mutex_lock(&priv->mcg_table.mutex);
461
462 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
463 if (unlikely(!pqp)) {
464 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
465 /* nothing to do */
466 err = 0;
467 goto out_mutex;
468 }
469
470 /*remove from list of promisc qps */
471 list_del(&pqp->list);
472
473 /* set the default entry not to include the removed one */
474 mailbox = mlx4_alloc_cmd_mailbox(dev);
475 if (IS_ERR(mailbox)) {
476 err = -ENOMEM;
477 back_to_list = true;
478 goto out_list;
479 }
480 mgm = mailbox->buf;
481 members_count = 0;
482 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
483 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
484 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
485
486 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
487 if (err)
488 goto out_mailbox;
489
490 /* remove the qp from all the steering entries*/
491 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
492 found = false;
493 list_for_each_entry(dqp, &entry->duplicates, list) {
494 if (dqp->qpn == qpn) {
495 found = true;
496 break;
497 }
498 }
499 if (found) {
500 /* a duplicate, no need to change the mgm,
501 * only update the duplicates list */
502 list_del(&dqp->list);
503 kfree(dqp);
504 } else {
505 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
506 if (err)
507 goto out_mailbox;
508 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
509 for (loc = -1, i = 0; i < members_count; ++i)
510 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
511 loc = i;
512
513 mgm->members_count = cpu_to_be32(--members_count |
514 (MLX4_PROT_ETH << 30));
515 mgm->qp[loc] = mgm->qp[i - 1];
516 mgm->qp[i - 1] = 0;
517
518 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
519 if (err)
520 goto out_mailbox;
521 }
522
523 }
524
525out_mailbox:
526 mlx4_free_cmd_mailbox(dev, mailbox);
527out_list:
528 if (back_to_list)
529 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
530 else
531 kfree(pqp);
532out_mutex:
533 mutex_unlock(&priv->mcg_table.mutex);
534 return err;
535}
536
82/* 537/*
83 * Caller must hold MCG table semaphore. gid and mgm parameters must 538 * Caller must hold MCG table semaphore. gid and mgm parameters must
84 * be properly aligned for command interface. 539 * be properly aligned for command interface.
@@ -94,14 +549,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
94 * If no AMGM exists for given gid, *index = -1, *prev = index of last 549 * If no AMGM exists for given gid, *index = -1, *prev = index of last
95 * entry in hash chain and *mgm holds end of hash chain. 550 * entry in hash chain and *mgm holds end of hash chain.
96 */ 551 */
97static int find_mgm(struct mlx4_dev *dev, 552static int find_entry(struct mlx4_dev *dev, u8 port,
98 u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox, 553 u8 *gid, enum mlx4_protocol prot,
99 u16 *hash, int *prev, int *index) 554 enum mlx4_steer_type steer,
555 struct mlx4_cmd_mailbox *mgm_mailbox,
556 u16 *hash, int *prev, int *index)
100{ 557{
101 struct mlx4_cmd_mailbox *mailbox; 558 struct mlx4_cmd_mailbox *mailbox;
102 struct mlx4_mgm *mgm = mgm_mailbox->buf; 559 struct mlx4_mgm *mgm = mgm_mailbox->buf;
103 u8 *mgid; 560 u8 *mgid;
104 int err; 561 int err;
562 u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
105 563
106 mailbox = mlx4_alloc_cmd_mailbox(dev); 564 mailbox = mlx4_alloc_cmd_mailbox(dev);
107 if (IS_ERR(mailbox)) 565 if (IS_ERR(mailbox))
@@ -110,7 +568,7 @@ static int find_mgm(struct mlx4_dev *dev,
110 568
111 memcpy(mgid, gid, 16); 569 memcpy(mgid, gid, 16);
112 570
113 err = mlx4_MGID_HASH(dev, mailbox, hash); 571 err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
114 mlx4_free_cmd_mailbox(dev, mailbox); 572 mlx4_free_cmd_mailbox(dev, mailbox);
115 if (err) 573 if (err)
116 return err; 574 return err;
@@ -122,11 +580,11 @@ static int find_mgm(struct mlx4_dev *dev,
122 *prev = -1; 580 *prev = -1;
123 581
124 do { 582 do {
125 err = mlx4_READ_MCG(dev, *index, mgm_mailbox); 583 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
126 if (err) 584 if (err)
127 return err; 585 return err;
128 586
129 if (!memcmp(mgm->gid, zero_gid, 16)) { 587 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
130 if (*index != *hash) { 588 if (*index != *hash) {
131 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 589 mlx4_err(dev, "Found zero MGID in AMGM.\n");
132 err = -EINVAL; 590 err = -EINVAL;
@@ -134,7 +592,8 @@ static int find_mgm(struct mlx4_dev *dev,
134 return err; 592 return err;
135 } 593 }
136 594
137 if (!memcmp(mgm->gid, gid, 16)) 595 if (!memcmp(mgm->gid, gid, 16) &&
596 be32_to_cpu(mgm->members_count) >> 30 == prot)
138 return err; 597 return err;
139 598
140 *prev = *index; 599 *prev = *index;
@@ -145,8 +604,9 @@ static int find_mgm(struct mlx4_dev *dev,
145 return err; 604 return err;
146} 605}
147 606
148int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 607int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
149 int block_mcast_loopback) 608 int block_mcast_loopback, enum mlx4_protocol prot,
609 enum mlx4_steer_type steer)
150{ 610{
151 struct mlx4_priv *priv = mlx4_priv(dev); 611 struct mlx4_priv *priv = mlx4_priv(dev);
152 struct mlx4_cmd_mailbox *mailbox; 612 struct mlx4_cmd_mailbox *mailbox;
@@ -157,6 +617,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
157 int link = 0; 617 int link = 0;
158 int i; 618 int i;
159 int err; 619 int err;
620 u8 port = gid[5];
621 u8 new_entry = 0;
160 622
161 mailbox = mlx4_alloc_cmd_mailbox(dev); 623 mailbox = mlx4_alloc_cmd_mailbox(dev);
162 if (IS_ERR(mailbox)) 624 if (IS_ERR(mailbox))
@@ -164,14 +626,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
164 mgm = mailbox->buf; 626 mgm = mailbox->buf;
165 627
166 mutex_lock(&priv->mcg_table.mutex); 628 mutex_lock(&priv->mcg_table.mutex);
167 629 err = find_entry(dev, port, gid, prot, steer,
168 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 630 mailbox, &hash, &prev, &index);
169 if (err) 631 if (err)
170 goto out; 632 goto out;
171 633
172 if (index != -1) { 634 if (index != -1) {
173 if (!memcmp(mgm->gid, zero_gid, 16)) 635 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
636 new_entry = 1;
174 memcpy(mgm->gid, gid, 16); 637 memcpy(mgm->gid, gid, 16);
638 }
175 } else { 639 } else {
176 link = 1; 640 link = 1;
177 641
@@ -187,7 +651,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
187 memcpy(mgm->gid, gid, 16); 651 memcpy(mgm->gid, gid, 16);
188 } 652 }
189 653
190 members_count = be32_to_cpu(mgm->members_count); 654 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
191 if (members_count == MLX4_QP_PER_MGM) { 655 if (members_count == MLX4_QP_PER_MGM) {
192 mlx4_err(dev, "MGM at index %x is full.\n", index); 656 mlx4_err(dev, "MGM at index %x is full.\n", index);
193 err = -ENOMEM; 657 err = -ENOMEM;
@@ -207,26 +671,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
207 else 671 else
208 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 672 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
209 673
210 mgm->members_count = cpu_to_be32(members_count); 674 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
211 675
212 err = mlx4_WRITE_MCG(dev, index, mailbox); 676 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
213 if (err) 677 if (err)
214 goto out; 678 goto out;
215 679
216 if (!link) 680 if (!link)
217 goto out; 681 goto out;
218 682
219 err = mlx4_READ_MCG(dev, prev, mailbox); 683 err = mlx4_READ_ENTRY(dev, prev, mailbox);
220 if (err) 684 if (err)
221 goto out; 685 goto out;
222 686
223 mgm->next_gid_index = cpu_to_be32(index << 6); 687 mgm->next_gid_index = cpu_to_be32(index << 6);
224 688
225 err = mlx4_WRITE_MCG(dev, prev, mailbox); 689 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
226 if (err) 690 if (err)
227 goto out; 691 goto out;
228 692
229out: 693out:
694 if (prot == MLX4_PROT_ETH) {
695 /* manage the steering entry for promisc mode */
696 if (new_entry)
697 new_steering_entry(dev, 0, port, steer, index, qp->qpn);
698 else
699 existing_steering_entry(dev, 0, port, steer,
700 index, qp->qpn);
701 }
230 if (err && link && index != -1) { 702 if (err && link && index != -1) {
231 if (index < dev->caps.num_mgms) 703 if (index < dev->caps.num_mgms)
232 mlx4_warn(dev, "Got AMGM index %d < %d", 704 mlx4_warn(dev, "Got AMGM index %d < %d",
@@ -240,9 +712,9 @@ out:
240 mlx4_free_cmd_mailbox(dev, mailbox); 712 mlx4_free_cmd_mailbox(dev, mailbox);
241 return err; 713 return err;
242} 714}
243EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
244 715
245int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) 716int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
717 enum mlx4_protocol prot, enum mlx4_steer_type steer)
246{ 718{
247 struct mlx4_priv *priv = mlx4_priv(dev); 719 struct mlx4_priv *priv = mlx4_priv(dev);
248 struct mlx4_cmd_mailbox *mailbox; 720 struct mlx4_cmd_mailbox *mailbox;
@@ -252,6 +724,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
252 int prev, index; 724 int prev, index;
253 int i, loc; 725 int i, loc;
254 int err; 726 int err;
727 u8 port = gid[5];
728 bool removed_entry = false;
255 729
256 mailbox = mlx4_alloc_cmd_mailbox(dev); 730 mailbox = mlx4_alloc_cmd_mailbox(dev);
257 if (IS_ERR(mailbox)) 731 if (IS_ERR(mailbox))
@@ -260,7 +734,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
260 734
261 mutex_lock(&priv->mcg_table.mutex); 735 mutex_lock(&priv->mcg_table.mutex);
262 736
263 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 737 err = find_entry(dev, port, gid, prot, steer,
738 mailbox, &hash, &prev, &index);
264 if (err) 739 if (err)
265 goto out; 740 goto out;
266 741
@@ -270,7 +745,12 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
270 goto out; 745 goto out;
271 } 746 }
272 747
273 members_count = be32_to_cpu(mgm->members_count); 748 /* if this pq is also a promisc qp, it shouldn't be removed */
749 if (prot == MLX4_PROT_ETH &&
750 check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
751 goto out;
752
753 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
274 for (loc = -1, i = 0; i < members_count; ++i) 754 for (loc = -1, i = 0; i < members_count; ++i)
275 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 755 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
276 loc = i; 756 loc = i;
@@ -282,26 +762,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
282 } 762 }
283 763
284 764
285 mgm->members_count = cpu_to_be32(--members_count); 765 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
286 mgm->qp[loc] = mgm->qp[i - 1]; 766 mgm->qp[loc] = mgm->qp[i - 1];
287 mgm->qp[i - 1] = 0; 767 mgm->qp[i - 1] = 0;
288 768
289 if (i != 1) { 769 if (prot == MLX4_PROT_ETH)
290 err = mlx4_WRITE_MCG(dev, index, mailbox); 770 removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
771 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
772 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
291 goto out; 773 goto out;
292 } 774 }
293 775
776 /* We are going to delete the entry, members count should be 0 */
777 mgm->members_count = cpu_to_be32((u32) prot << 30);
778
294 if (prev == -1) { 779 if (prev == -1) {
295 /* Remove entry from MGM */ 780 /* Remove entry from MGM */
296 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 781 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
297 if (amgm_index) { 782 if (amgm_index) {
298 err = mlx4_READ_MCG(dev, amgm_index, mailbox); 783 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
299 if (err) 784 if (err)
300 goto out; 785 goto out;
301 } else 786 } else
302 memset(mgm->gid, 0, 16); 787 memset(mgm->gid, 0, 16);
303 788
304 err = mlx4_WRITE_MCG(dev, index, mailbox); 789 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
305 if (err) 790 if (err)
306 goto out; 791 goto out;
307 792
@@ -316,13 +801,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
316 } else { 801 } else {
317 /* Remove entry from AMGM */ 802 /* Remove entry from AMGM */
318 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 803 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
319 err = mlx4_READ_MCG(dev, prev, mailbox); 804 err = mlx4_READ_ENTRY(dev, prev, mailbox);
320 if (err) 805 if (err)
321 goto out; 806 goto out;
322 807
323 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 808 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
324 809
325 err = mlx4_WRITE_MCG(dev, prev, mailbox); 810 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
326 if (err) 811 if (err)
327 goto out; 812 goto out;
328 813
@@ -340,8 +825,85 @@ out:
340 mlx4_free_cmd_mailbox(dev, mailbox); 825 mlx4_free_cmd_mailbox(dev, mailbox);
341 return err; 826 return err;
342} 827}
828
829
830int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
831 int block_mcast_loopback, enum mlx4_protocol prot)
832{
833 enum mlx4_steer_type steer;
834
835 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
836
837 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
838 return 0;
839
840 if (prot == MLX4_PROT_ETH)
841 gid[7] |= (steer << 1);
842
843 return mlx4_qp_attach_common(dev, qp, gid,
844 block_mcast_loopback, prot,
845 steer);
846}
847EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
848
849int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
850 enum mlx4_protocol prot)
851{
852 enum mlx4_steer_type steer;
853
854 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
855
856 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
857 return 0;
858
859 if (prot == MLX4_PROT_ETH) {
860 gid[7] |= (steer << 1);
861 }
862
863 return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
864}
343EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 865EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
344 866
867
868int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
869{
870 if (!dev->caps.vep_mc_steering)
871 return 0;
872
873
874 return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
875}
876EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
877
878int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
879{
880 if (!dev->caps.vep_mc_steering)
881 return 0;
882
883
884 return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
885}
886EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
887
888int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
889{
890 if (!dev->caps.vep_mc_steering)
891 return 0;
892
893
894 return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
895}
896EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
897
898int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
899{
900 if (!dev->caps.vep_mc_steering)
901 return 0;
902
903 return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
904}
905EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
906
345int mlx4_init_mcg_table(struct mlx4_dev *dev) 907int mlx4_init_mcg_table(struct mlx4_dev *dev)
346{ 908{
347 struct mlx4_priv *priv = mlx4_priv(dev); 909 struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 0da5bb7285b4..dd7d745fbab4 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -105,6 +105,7 @@ struct mlx4_bitmap {
105 u32 max; 105 u32 max;
106 u32 reserved_top; 106 u32 reserved_top;
107 u32 mask; 107 u32 mask;
108 u32 avail;
108 spinlock_t lock; 109 spinlock_t lock;
109 unsigned long *table; 110 unsigned long *table;
110}; 111};
@@ -162,6 +163,27 @@ struct mlx4_fw {
162 u8 catas_bar; 163 u8 catas_bar;
163}; 164};
164 165
166#define MGM_QPN_MASK 0x00FFFFFF
167#define MGM_BLCK_LB_BIT 30
168
169struct mlx4_promisc_qp {
170 struct list_head list;
171 u32 qpn;
172};
173
174struct mlx4_steer_index {
175 struct list_head list;
176 unsigned int index;
177 struct list_head duplicates;
178};
179
180struct mlx4_mgm {
181 __be32 next_gid_index;
182 __be32 members_count;
183 u32 reserved[2];
184 u8 gid[16];
185 __be32 qp[MLX4_QP_PER_MGM];
186};
165struct mlx4_cmd { 187struct mlx4_cmd {
166 struct pci_pool *pool; 188 struct pci_pool *pool;
167 void __iomem *hcr; 189 void __iomem *hcr;
@@ -265,6 +287,10 @@ struct mlx4_vlan_table {
265 int max; 287 int max;
266}; 288};
267 289
290struct mlx4_mac_entry {
291 u64 mac;
292};
293
268struct mlx4_port_info { 294struct mlx4_port_info {
269 struct mlx4_dev *dev; 295 struct mlx4_dev *dev;
270 int port; 296 int port;
@@ -272,7 +298,9 @@ struct mlx4_port_info {
272 struct device_attribute port_attr; 298 struct device_attribute port_attr;
273 enum mlx4_port_type tmp_type; 299 enum mlx4_port_type tmp_type;
274 struct mlx4_mac_table mac_table; 300 struct mlx4_mac_table mac_table;
301 struct radix_tree_root mac_tree;
275 struct mlx4_vlan_table vlan_table; 302 struct mlx4_vlan_table vlan_table;
303 int base_qpn;
276}; 304};
277 305
278struct mlx4_sense { 306struct mlx4_sense {
@@ -282,6 +310,17 @@ struct mlx4_sense {
282 struct delayed_work sense_poll; 310 struct delayed_work sense_poll;
283}; 311};
284 312
313struct mlx4_msix_ctl {
314 u64 pool_bm;
315 spinlock_t pool_lock;
316};
317
318struct mlx4_steer {
319 struct list_head promisc_qps[MLX4_NUM_STEERS];
320 struct list_head steer_entries[MLX4_NUM_STEERS];
321 struct list_head high_prios;
322};
323
285struct mlx4_priv { 324struct mlx4_priv {
286 struct mlx4_dev dev; 325 struct mlx4_dev dev;
287 326
@@ -313,6 +352,11 @@ struct mlx4_priv {
313 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 352 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
314 struct mlx4_sense sense; 353 struct mlx4_sense sense;
315 struct mutex port_mutex; 354 struct mutex port_mutex;
355 struct mlx4_msix_ctl msix_ctl;
356 struct mlx4_steer *steer;
357 struct list_head bf_list;
358 struct mutex bf_mutex;
359 struct io_mapping *bf_mapping;
316}; 360};
317 361
318static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 362static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
328void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 372void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
329u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 373u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
330void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); 374void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
375u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
331int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 376int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
332 u32 reserved_bot, u32 resetrved_top); 377 u32 reserved_bot, u32 resetrved_top);
333void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 378void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
@@ -386,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
386 431
387void mlx4_handle_catas_err(struct mlx4_dev *dev); 432void mlx4_handle_catas_err(struct mlx4_dev *dev);
388 433
434int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
435 enum mlx4_port_type *type);
389void mlx4_do_sense_ports(struct mlx4_dev *dev, 436void mlx4_do_sense_ports(struct mlx4_dev *dev,
390 enum mlx4_port_type *stype, 437 enum mlx4_port_type *stype,
391 enum mlx4_port_type *defaults); 438 enum mlx4_port_type *defaults);
@@ -403,4 +450,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
403int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); 450int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
404int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 451int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
405 452
453int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
454 enum mlx4_protocol prot, enum mlx4_steer_type steer);
455int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
456 int block_mcast_loopback, enum mlx4_protocol prot,
457 enum mlx4_steer_type steer);
406#endif /* MLX4_H */ 458#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 449210994ee9..0b5150df0585 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -38,19 +38,19 @@
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/inet_lro.h>
42 41
43#include <linux/mlx4/device.h> 42#include <linux/mlx4/device.h>
44#include <linux/mlx4/qp.h> 43#include <linux/mlx4/qp.h>
45#include <linux/mlx4/cq.h> 44#include <linux/mlx4/cq.h>
46#include <linux/mlx4/srq.h> 45#include <linux/mlx4/srq.h>
47#include <linux/mlx4/doorbell.h> 46#include <linux/mlx4/doorbell.h>
47#include <linux/mlx4/cmd.h>
48 48
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.1.1" 52#define DRV_VERSION "1.5.4.1"
53#define DRV_RELDATE "June 2009" 53#define DRV_RELDATE "March 2011"
54 54
55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
56 56
@@ -61,8 +61,8 @@
61 61
62#define MLX4_EN_PAGE_SHIFT 12 62#define MLX4_EN_PAGE_SHIFT 12
63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
64#define MAX_TX_RINGS 16
65#define MAX_RX_RINGS 16 64#define MAX_RX_RINGS 16
65#define MIN_RX_RINGS 4
66#define TXBB_SIZE 64 66#define TXBB_SIZE 64
67#define HEADROOM (2048 / TXBB_SIZE + 1) 67#define HEADROOM (2048 / TXBB_SIZE + 1)
68#define STAMP_STRIDE 64 68#define STAMP_STRIDE 64
@@ -107,6 +107,7 @@ enum {
107#define MLX4_EN_SMALL_PKT_SIZE 64 107#define MLX4_EN_SMALL_PKT_SIZE 64
108#define MLX4_EN_NUM_TX_RINGS 8 108#define MLX4_EN_NUM_TX_RINGS 8
109#define MLX4_EN_NUM_PPP_RINGS 8 109#define MLX4_EN_NUM_PPP_RINGS 8
110#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
110#define MLX4_EN_DEF_TX_RING_SIZE 512 111#define MLX4_EN_DEF_TX_RING_SIZE 512
111#define MLX4_EN_DEF_RX_RING_SIZE 1024 112#define MLX4_EN_DEF_RX_RING_SIZE 1024
112 113
@@ -124,6 +125,7 @@ enum {
124#define MLX4_EN_RX_SIZE_THRESH 1024 125#define MLX4_EN_RX_SIZE_THRESH 1024
125#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) 126#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
126#define MLX4_EN_SAMPLE_INTERVAL 0 127#define MLX4_EN_SAMPLE_INTERVAL 0
128#define MLX4_EN_AVG_PKT_SMALL 256
127 129
128#define MLX4_EN_AUTO_CONF 0xffff 130#define MLX4_EN_AUTO_CONF 0xffff
129 131
@@ -139,10 +141,14 @@ enum {
139 141
140#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 142#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
141#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 143#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
144#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
142 145
143#define MLX4_EN_MIN_MTU 46 146#define MLX4_EN_MIN_MTU 46
144#define ETH_BCAST 0xffffffffffffULL 147#define ETH_BCAST 0xffffffffffffULL
145 148
149#define MLX4_EN_LOOPBACK_RETRIES 5
150#define MLX4_EN_LOOPBACK_TIMEOUT 100
151
146#ifdef MLX4_EN_PERF_STAT 152#ifdef MLX4_EN_PERF_STAT
147/* Number of samples to 'average' */ 153/* Number of samples to 'average' */
148#define AVG_SIZE 128 154#define AVG_SIZE 128
@@ -210,6 +216,9 @@ struct mlx4_en_tx_desc {
210 216
211#define MLX4_EN_USE_SRQ 0x01000000 217#define MLX4_EN_USE_SRQ 0x01000000
212 218
219#define MLX4_EN_CX3_LOW_ID 0x1000
220#define MLX4_EN_CX3_HIGH_ID 0x1005
221
213struct mlx4_en_rx_alloc { 222struct mlx4_en_rx_alloc {
214 struct page *page; 223 struct page *page;
215 u16 offset; 224 u16 offset;
@@ -239,6 +248,8 @@ struct mlx4_en_tx_ring {
239 unsigned long bytes; 248 unsigned long bytes;
240 unsigned long packets; 249 unsigned long packets;
241 spinlock_t comp_lock; 250 spinlock_t comp_lock;
251 struct mlx4_bf bf;
252 bool bf_enabled;
242}; 253};
243 254
244struct mlx4_en_rx_desc { 255struct mlx4_en_rx_desc {
@@ -249,7 +260,6 @@ struct mlx4_en_rx_desc {
249struct mlx4_en_rx_ring { 260struct mlx4_en_rx_ring {
250 struct mlx4_hwq_resources wqres; 261 struct mlx4_hwq_resources wqres;
251 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; 262 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
252 struct net_lro_mgr lro;
253 u32 size ; /* number of Rx descs*/ 263 u32 size ; /* number of Rx descs*/
254 u32 actual_size; 264 u32 actual_size;
255 u32 size_mask; 265 u32 size_mask;
@@ -313,7 +323,8 @@ struct mlx4_en_port_profile {
313 323
314struct mlx4_en_profile { 324struct mlx4_en_profile {
315 int rss_xor; 325 int rss_xor;
316 int num_lro; 326 int tcp_rss;
327 int udp_rss;
317 u8 rss_mask; 328 u8 rss_mask;
318 u32 active_ports; 329 u32 active_ports;
319 u32 small_pkt_int; 330 u32 small_pkt_int;
@@ -337,6 +348,7 @@ struct mlx4_en_dev {
337 struct mlx4_mr mr; 348 struct mlx4_mr mr;
338 u32 priv_pdn; 349 u32 priv_pdn;
339 spinlock_t uar_lock; 350 spinlock_t uar_lock;
351 u8 mac_removed[MLX4_MAX_PORTS + 1];
340}; 352};
341 353
342 354
@@ -355,6 +367,13 @@ struct mlx4_en_rss_context {
355 u8 hash_fn; 367 u8 hash_fn;
356 u8 flags; 368 u8 flags;
357 __be32 rss_key[10]; 369 __be32 rss_key[10];
370 __be32 base_qpn_udp;
371};
372
373struct mlx4_en_port_state {
374 int link_state;
375 int link_speed;
376 int transciver;
358}; 377};
359 378
360struct mlx4_en_pkt_stats { 379struct mlx4_en_pkt_stats {
@@ -365,9 +384,6 @@ struct mlx4_en_pkt_stats {
365}; 384};
366 385
367struct mlx4_en_port_stats { 386struct mlx4_en_port_stats {
368 unsigned long lro_aggregated;
369 unsigned long lro_flushed;
370 unsigned long lro_no_desc;
371 unsigned long tso_packets; 387 unsigned long tso_packets;
372 unsigned long queue_stopped; 388 unsigned long queue_stopped;
373 unsigned long wake_queue; 389 unsigned long wake_queue;
@@ -376,7 +392,7 @@ struct mlx4_en_port_stats {
376 unsigned long rx_chksum_good; 392 unsigned long rx_chksum_good;
377 unsigned long rx_chksum_none; 393 unsigned long rx_chksum_none;
378 unsigned long tx_chksum_offload; 394 unsigned long tx_chksum_offload;
379#define NUM_PORT_STATS 11 395#define NUM_PORT_STATS 8
380}; 396};
381 397
382struct mlx4_en_perf_stats { 398struct mlx4_en_perf_stats {
@@ -405,6 +421,7 @@ struct mlx4_en_priv {
405 struct vlan_group *vlgrp; 421 struct vlan_group *vlgrp;
406 struct net_device_stats stats; 422 struct net_device_stats stats;
407 struct net_device_stats ret_stats; 423 struct net_device_stats ret_stats;
424 struct mlx4_en_port_state port_state;
408 spinlock_t stats_lock; 425 spinlock_t stats_lock;
409 426
410 unsigned long last_moder_packets; 427 unsigned long last_moder_packets;
@@ -423,6 +440,8 @@ struct mlx4_en_priv {
423 u16 sample_interval; 440 u16 sample_interval;
424 u16 adaptive_rx_coal; 441 u16 adaptive_rx_coal;
425 u32 msg_enable; 442 u32 msg_enable;
443 u32 loopback_ok;
444 u32 validate_loopback;
426 445
427 struct mlx4_hwq_resources res; 446 struct mlx4_hwq_resources res;
428 int link_state; 447 int link_state;
@@ -432,7 +451,6 @@ struct mlx4_en_priv {
432 int registered; 451 int registered;
433 int allocated; 452 int allocated;
434 int stride; 453 int stride;
435 int rx_csum;
436 u64 mac; 454 u64 mac;
437 int mac_index; 455 int mac_index;
438 unsigned max_mtu; 456 unsigned max_mtu;
@@ -441,6 +459,7 @@ struct mlx4_en_priv {
441 struct mlx4_en_rss_map rss_map; 459 struct mlx4_en_rss_map rss_map;
442 u32 flags; 460 u32 flags;
443#define MLX4_EN_FLAG_PROMISC 0x1 461#define MLX4_EN_FLAG_PROMISC 0x1
462#define MLX4_EN_FLAG_MC_PROMISC 0x2
444 u32 tx_ring_num; 463 u32 tx_ring_num;
445 u32 rx_ring_num; 464 u32 rx_ring_num;
446 u32 rx_skb_size; 465 u32 rx_skb_size;
@@ -449,6 +468,7 @@ struct mlx4_en_priv {
449 u16 log_rx_info; 468 u16 log_rx_info;
450 469
451 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; 470 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
471 int tx_vector;
452 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 472 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
453 struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; 473 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
454 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 474 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
@@ -463,6 +483,14 @@ struct mlx4_en_priv {
463 char *mc_addrs; 483 char *mc_addrs;
464 int mc_addrs_cnt; 484 int mc_addrs_cnt;
465 struct mlx4_en_stat_out_mbox hw_stats; 485 struct mlx4_en_stat_out_mbox hw_stats;
486 int vids[128];
487 bool wol;
488};
489
490enum mlx4_en_wol {
491 MLX4_EN_WOL_MAGIC = (1ULL << 61),
492 MLX4_EN_WOL_ENABLED = (1ULL << 62),
493 MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
466}; 494};
467 495
468 496
@@ -473,12 +501,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
473int mlx4_en_start_port(struct net_device *dev); 501int mlx4_en_start_port(struct net_device *dev);
474void mlx4_en_stop_port(struct net_device *dev); 502void mlx4_en_stop_port(struct net_device *dev);
475 503
476void mlx4_en_free_resources(struct mlx4_en_priv *priv); 504void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
477int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 505int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
478 506
479int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 507int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
480 int entries, int ring, enum cq_type mode); 508 int entries, int ring, enum cq_type mode);
481void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 509void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
510 bool reserve_vectors);
482int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 511int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
483void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 512void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
484int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 513int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -490,7 +519,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
490netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 519netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
491 520
492int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 521int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
493 u32 size, u16 stride); 522 int qpn, u32 size, u16 stride);
494void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 523void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
495int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 524int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
496 struct mlx4_en_tx_ring *ring, 525 struct mlx4_en_tx_ring *ring,
@@ -531,6 +560,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
531 u8 promisc); 560 u8 promisc);
532 561
533int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); 562int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
563int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
564
565#define MLX4_EN_NUM_SELF_TEST 5
566void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
567u64 mlx4_en_mac_to_u64(u8 *addr);
534 568
535/* 569/*
536 * Globals 570 * Globals
@@ -555,6 +589,8 @@ do { \
555 en_print(KERN_WARNING, priv, format, ##arg) 589 en_print(KERN_WARNING, priv, format, ##arg)
556#define en_err(priv, format, arg...) \ 590#define en_err(priv, format, arg...) \
557 en_print(KERN_ERR, priv, format, ##arg) 591 en_print(KERN_ERR, priv, format, ##arg)
592#define en_info(priv, format, arg...) \
593 en_print(KERN_INFO, priv, format, ## arg)
558 594
559#define mlx4_err(mdev, format, arg...) \ 595#define mlx4_err(mdev, format, arg...) \
560 pr_err("%s %s: " format, DRV_NAME, \ 596 pr_err("%s %s: " format, DRV_NAME, \
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index c4988d6bd5b2..1286b886dcea 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -32,12 +32,17 @@
32 */ 32 */
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/io-mapping.h>
35 36
36#include <asm/page.h> 37#include <asm/page.h>
37 38
38#include "mlx4.h" 39#include "mlx4.h"
39#include "icm.h" 40#include "icm.h"
40 41
42enum {
43 MLX4_NUM_RESERVED_UARS = 8
44};
45
41int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) 46int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
42{ 47{
43 struct mlx4_priv *priv = mlx4_priv(dev); 48 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
77 return -ENOMEM; 82 return -ENOMEM;
78 83
79 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; 84 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
85 uar->map = NULL;
80 86
81 return 0; 87 return 0;
82} 88}
@@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
88} 94}
89EXPORT_SYMBOL_GPL(mlx4_uar_free); 95EXPORT_SYMBOL_GPL(mlx4_uar_free);
90 96
97int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
98{
99 struct mlx4_priv *priv = mlx4_priv(dev);
100 struct mlx4_uar *uar;
101 int err = 0;
102 int idx;
103
104 if (!priv->bf_mapping)
105 return -ENOMEM;
106
107 mutex_lock(&priv->bf_mutex);
108 if (!list_empty(&priv->bf_list))
109 uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
110 else {
111 if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
112 err = -ENOMEM;
113 goto out;
114 }
115 uar = kmalloc(sizeof *uar, GFP_KERNEL);
116 if (!uar) {
117 err = -ENOMEM;
118 goto out;
119 }
120 err = mlx4_uar_alloc(dev, uar);
121 if (err)
122 goto free_kmalloc;
123
124 uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
125 if (!uar->map) {
126 err = -ENOMEM;
127 goto free_uar;
128 }
129
130 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
131 if (!uar->bf_map) {
132 err = -ENOMEM;
133 goto unamp_uar;
134 }
135 uar->free_bf_bmap = 0;
136 list_add(&uar->bf_list, &priv->bf_list);
137 }
138
139 bf->uar = uar;
140 idx = ffz(uar->free_bf_bmap);
141 uar->free_bf_bmap |= 1 << idx;
142 bf->uar = uar;
143 bf->offset = 0;
144 bf->buf_size = dev->caps.bf_reg_size / 2;
145 bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
146 if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
147 list_del_init(&uar->bf_list);
148
149 goto out;
150
151unamp_uar:
152 bf->uar = NULL;
153 iounmap(uar->map);
154
155free_uar:
156 mlx4_uar_free(dev, uar);
157
158free_kmalloc:
159 kfree(uar);
160
161out:
162 mutex_unlock(&priv->bf_mutex);
163 return err;
164}
165EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
166
167void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
168{
169 struct mlx4_priv *priv = mlx4_priv(dev);
170 int idx;
171
172 if (!bf->uar || !bf->uar->bf_map)
173 return;
174
175 mutex_lock(&priv->bf_mutex);
176 idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
177 bf->uar->free_bf_bmap &= ~(1 << idx);
178 if (!bf->uar->free_bf_bmap) {
179 if (!list_empty(&bf->uar->bf_list))
180 list_del(&bf->uar->bf_list);
181
182 io_mapping_unmap(bf->uar->bf_map);
183 iounmap(bf->uar->map);
184 mlx4_uar_free(dev, bf->uar);
185 kfree(bf->uar);
186 } else if (list_empty(&bf->uar->bf_list))
187 list_add(&bf->uar->bf_list, &priv->bf_list);
188
189 mutex_unlock(&priv->bf_mutex);
190}
191EXPORT_SYMBOL_GPL(mlx4_bf_free);
192
91int mlx4_init_uar_table(struct mlx4_dev *dev) 193int mlx4_init_uar_table(struct mlx4_dev *dev)
92{ 194{
93 if (dev->caps.num_uars <= 128) { 195 if (dev->caps.num_uars <= 128) {
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 606aa58afdea..8856659fb43c 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
90 return err; 90 return err;
91} 91}
92 92
93int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) 93static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
94 u64 mac, int *qpn, u8 reserve)
94{ 95{
95 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 96 struct mlx4_qp qp;
97 u8 gid[16] = {0};
98 int err;
99
100 if (reserve) {
101 err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
102 if (err) {
103 mlx4_err(dev, "Failed to reserve qp for mac registration\n");
104 return err;
105 }
106 }
107 qp.qpn = *qpn;
108
109 mac &= 0xffffffffffffULL;
110 mac = cpu_to_be64(mac << 16);
111 memcpy(&gid[10], &mac, ETH_ALEN);
112 gid[5] = port;
113 gid[7] = MLX4_UC_STEER << 1;
114
115 err = mlx4_qp_attach_common(dev, &qp, gid, 0,
116 MLX4_PROT_ETH, MLX4_UC_STEER);
117 if (err && reserve)
118 mlx4_qp_release_range(dev, *qpn, 1);
119
120 return err;
121}
122
123static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
124 u64 mac, int qpn, u8 free)
125{
126 struct mlx4_qp qp;
127 u8 gid[16] = {0};
128
129 qp.qpn = qpn;
130 mac &= 0xffffffffffffULL;
131 mac = cpu_to_be64(mac << 16);
132 memcpy(&gid[10], &mac, ETH_ALEN);
133 gid[5] = port;
134 gid[7] = MLX4_UC_STEER << 1;
135
136 mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
137 if (free)
138 mlx4_qp_release_range(dev, qpn, 1);
139}
140
141int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
142{
143 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
144 struct mlx4_mac_table *table = &info->mac_table;
145 struct mlx4_mac_entry *entry;
96 int i, err = 0; 146 int i, err = 0;
97 int free = -1; 147 int free = -1;
98 148
149 if (dev->caps.vep_uc_steering) {
150 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
151 if (!err) {
152 entry = kmalloc(sizeof *entry, GFP_KERNEL);
153 if (!entry) {
154 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
155 return -ENOMEM;
156 }
157 entry->mac = mac;
158 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
159 if (err) {
160 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
161 return err;
162 }
163 } else
164 return err;
165 }
99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); 166 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
100 mutex_lock(&table->mutex); 167 mutex_lock(&table->mutex);
101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { 168 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
@@ -105,12 +172,17 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
105 } 172 }
106 173
107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 174 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
108 /* MAC already registered, increase refernce count */ 175 /* MAC already registered, increase references count */
109 *index = i;
110 ++table->refs[i]; 176 ++table->refs[i];
111 goto out; 177 goto out;
112 } 178 }
113 } 179 }
180
181 if (free < 0) {
182 err = -ENOMEM;
183 goto out;
184 }
185
114 mlx4_dbg(dev, "Free MAC index is %d\n", free); 186 mlx4_dbg(dev, "Free MAC index is %d\n", free);
115 187
116 if (table->total == table->max) { 188 if (table->total == table->max) {
@@ -131,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
131 goto out; 203 goto out;
132 } 204 }
133 205
134 *index = free; 206 if (!dev->caps.vep_uc_steering)
207 *qpn = info->base_qpn + free;
135 ++table->total; 208 ++table->total;
136out: 209out:
137 mutex_unlock(&table->mutex); 210 mutex_unlock(&table->mutex);
@@ -139,20 +212,52 @@ out:
139} 212}
140EXPORT_SYMBOL_GPL(mlx4_register_mac); 213EXPORT_SYMBOL_GPL(mlx4_register_mac);
141 214
142void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) 215static int validate_index(struct mlx4_dev *dev,
216 struct mlx4_mac_table *table, int index)
143{ 217{
144 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 218 int err = 0;
145 219
146 mutex_lock(&table->mutex); 220 if (index < 0 || index >= table->max || !table->entries[index]) {
147 if (!table->refs[index]) { 221 mlx4_warn(dev, "No valid Mac entry for the given index\n");
148 mlx4_warn(dev, "No MAC entry for index %d\n", index); 222 err = -EINVAL;
149 goto out;
150 } 223 }
151 if (--table->refs[index]) { 224 return err;
152 mlx4_warn(dev, "Have more references for index %d," 225}
153 "no need to modify MAC table\n", index); 226
154 goto out; 227static int find_index(struct mlx4_dev *dev,
228 struct mlx4_mac_table *table, u64 mac)
229{
230 int i;
231 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
232 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
233 return i;
234 }
235 /* Mac not found */
236 return -EINVAL;
237}
238
239void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
240{
241 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
242 struct mlx4_mac_table *table = &info->mac_table;
243 int index = qpn - info->base_qpn;
244 struct mlx4_mac_entry *entry;
245
246 if (dev->caps.vep_uc_steering) {
247 entry = radix_tree_lookup(&info->mac_tree, qpn);
248 if (entry) {
249 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
250 radix_tree_delete(&info->mac_tree, qpn);
251 index = find_index(dev, table, entry->mac);
252 kfree(entry);
253 }
155 } 254 }
255
256 mutex_lock(&table->mutex);
257
258 if (validate_index(dev, table, index))
259 goto out;
260
156 table->entries[index] = 0; 261 table->entries[index] = 0;
157 mlx4_set_port_mac_table(dev, port, table->entries); 262 mlx4_set_port_mac_table(dev, port, table->entries);
158 --table->total; 263 --table->total;
@@ -161,6 +266,44 @@ out:
161} 266}
162EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 267EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
163 268
269int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
270{
271 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
272 struct mlx4_mac_table *table = &info->mac_table;
273 int index = qpn - info->base_qpn;
274 struct mlx4_mac_entry *entry;
275 int err;
276
277 if (dev->caps.vep_uc_steering) {
278 entry = radix_tree_lookup(&info->mac_tree, qpn);
279 if (!entry)
280 return -EINVAL;
281 index = find_index(dev, table, entry->mac);
282 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
283 entry->mac = new_mac;
284 err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
285 if (err || index < 0)
286 return err;
287 }
288
289 mutex_lock(&table->mutex);
290
291 err = validate_index(dev, table, index);
292 if (err)
293 goto out;
294
295 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
296
297 err = mlx4_set_port_mac_table(dev, port, table->entries);
298 if (unlikely(err)) {
299 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
300 table->entries[index] = 0;
301 }
302out:
303 mutex_unlock(&table->mutex);
304 return err;
305}
306EXPORT_SYMBOL_GPL(mlx4_replace_mac);
164static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 307static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
165 __be32 *entries) 308 __be32 *entries)
166{ 309{
@@ -182,6 +325,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
182 return err; 325 return err;
183} 326}
184 327
328int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
329{
330 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
331 int i;
332
333 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
334 if (table->refs[i] &&
335 (vid == (MLX4_VLAN_MASK &
336 be32_to_cpu(table->entries[i])))) {
337 /* VLAN already registered, increase reference count */
338 *idx = i;
339 return 0;
340 }
341 }
342
343 return -ENOENT;
344}
345EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
346
185int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 347int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
186{ 348{
187 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 349 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -198,13 +360,18 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
198 if (table->refs[i] && 360 if (table->refs[i] &&
199 (vlan == (MLX4_VLAN_MASK & 361 (vlan == (MLX4_VLAN_MASK &
200 be32_to_cpu(table->entries[i])))) { 362 be32_to_cpu(table->entries[i])))) {
201 /* Vlan already registered, increase refernce count */ 363 /* Vlan already registered, increase references count */
202 *index = i; 364 *index = i;
203 ++table->refs[i]; 365 ++table->refs[i];
204 goto out; 366 goto out;
205 } 367 }
206 } 368 }
207 369
370 if (free < 0) {
371 err = -ENOMEM;
372 goto out;
373 }
374
208 if (table->total == table->max) { 375 if (table->total == table->max) {
209 /* No free vlan entries */ 376 /* No free vlan entries */
210 err = -ENOSPC; 377 err = -ENOSPC;
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 5caf0115fa5b..b967647d0c76 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -85,7 +85,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
85 struct mlx4_resource tmp; 85 struct mlx4_resource tmp;
86 int i, j; 86 int i, j;
87 87
88 profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL); 88 profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
89 if (!profile) 89 if (!profile)
90 return -ENOMEM; 90 return -ENOMEM;
91 91
@@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
107 profile[MLX4_RES_AUXC].num = request->num_qp; 107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq; 108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq; 109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
111 dev_cap->reserved_eqs +
112 num_possible_cpus() + 1);
113 profile[MLX4_RES_DMPT].num = request->num_mpt; 111 profile[MLX4_RES_DMPT].num = request->num_mpt;
114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
115 profile[MLX4_RES_MTT].num = request->num_mtt; 113 profile[MLX4_RES_MTT].num = request->num_mtt;
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index 015fbe785c13..e2337a7411d9 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -38,8 +38,8 @@
38 38
39#include "mlx4.h" 39#include "mlx4.h"
40 40
41static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 41int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
42 enum mlx4_port_type *type) 42 enum mlx4_port_type *type)
43{ 43{
44 u64 out_param; 44 u64 out_param;
45 int err = 0; 45 int err = 0;