diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
| commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
| tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/mlx4 | |
| parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) | |
Diffstat (limited to 'drivers/net/mlx4')
33 files changed, 15196 insertions, 0 deletions
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile new file mode 100644 index 00000000000..d1aa45a1585 --- /dev/null +++ b/drivers/net/mlx4/Makefile | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | obj-$(CONFIG_MLX4_CORE) += mlx4_core.o | ||
| 2 | |||
| 3 | mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ | ||
| 4 | mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o | ||
| 5 | |||
| 6 | obj-$(CONFIG_MLX4_EN) += mlx4_en.o | ||
| 7 | |||
| 8 | mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ | ||
| 9 | en_resources.o en_netdev.o en_selftest.o | ||
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c new file mode 100644 index 00000000000..116cae334da --- /dev/null +++ b/drivers/net/mlx4/alloc.c | |||
| @@ -0,0 +1,414 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/errno.h> | ||
| 35 | #include <linux/slab.h> | ||
| 36 | #include <linux/mm.h> | ||
| 37 | #include <linux/bitmap.h> | ||
| 38 | #include <linux/dma-mapping.h> | ||
| 39 | #include <linux/vmalloc.h> | ||
| 40 | |||
| 41 | #include "mlx4.h" | ||
| 42 | |||
| 43 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | ||
| 44 | { | ||
| 45 | u32 obj; | ||
| 46 | |||
| 47 | spin_lock(&bitmap->lock); | ||
| 48 | |||
| 49 | obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); | ||
| 50 | if (obj >= bitmap->max) { | ||
| 51 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | ||
| 52 | & bitmap->mask; | ||
| 53 | obj = find_first_zero_bit(bitmap->table, bitmap->max); | ||
| 54 | } | ||
| 55 | |||
| 56 | if (obj < bitmap->max) { | ||
| 57 | set_bit(obj, bitmap->table); | ||
| 58 | bitmap->last = (obj + 1); | ||
| 59 | if (bitmap->last == bitmap->max) | ||
| 60 | bitmap->last = 0; | ||
| 61 | obj |= bitmap->top; | ||
| 62 | } else | ||
| 63 | obj = -1; | ||
| 64 | |||
| 65 | if (obj != -1) | ||
| 66 | --bitmap->avail; | ||
| 67 | |||
| 68 | spin_unlock(&bitmap->lock); | ||
| 69 | |||
| 70 | return obj; | ||
| 71 | } | ||
| 72 | |||
| 73 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) | ||
| 74 | { | ||
| 75 | mlx4_bitmap_free_range(bitmap, obj, 1); | ||
| 76 | } | ||
| 77 | |||
| 78 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) | ||
| 79 | { | ||
| 80 | u32 obj; | ||
| 81 | |||
| 82 | if (likely(cnt == 1 && align == 1)) | ||
| 83 | return mlx4_bitmap_alloc(bitmap); | ||
| 84 | |||
| 85 | spin_lock(&bitmap->lock); | ||
| 86 | |||
| 87 | obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, | ||
| 88 | bitmap->last, cnt, align - 1); | ||
| 89 | if (obj >= bitmap->max) { | ||
| 90 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | ||
| 91 | & bitmap->mask; | ||
| 92 | obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, | ||
| 93 | 0, cnt, align - 1); | ||
| 94 | } | ||
| 95 | |||
| 96 | if (obj < bitmap->max) { | ||
| 97 | bitmap_set(bitmap->table, obj, cnt); | ||
| 98 | if (obj == bitmap->last) { | ||
| 99 | bitmap->last = (obj + cnt); | ||
| 100 | if (bitmap->last >= bitmap->max) | ||
| 101 | bitmap->last = 0; | ||
| 102 | } | ||
| 103 | obj |= bitmap->top; | ||
| 104 | } else | ||
| 105 | obj = -1; | ||
| 106 | |||
| 107 | if (obj != -1) | ||
| 108 | bitmap->avail -= cnt; | ||
| 109 | |||
| 110 | spin_unlock(&bitmap->lock); | ||
| 111 | |||
| 112 | return obj; | ||
| 113 | } | ||
| 114 | |||
| 115 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) | ||
| 116 | { | ||
| 117 | return bitmap->avail; | ||
| 118 | } | ||
| 119 | |||
| 120 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | ||
| 121 | { | ||
| 122 | obj &= bitmap->max + bitmap->reserved_top - 1; | ||
| 123 | |||
| 124 | spin_lock(&bitmap->lock); | ||
| 125 | bitmap_clear(bitmap->table, obj, cnt); | ||
| 126 | bitmap->last = min(bitmap->last, obj); | ||
| 127 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | ||
| 128 | & bitmap->mask; | ||
| 129 | bitmap->avail += cnt; | ||
| 130 | spin_unlock(&bitmap->lock); | ||
| 131 | } | ||
| 132 | |||
| 133 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | ||
| 134 | u32 reserved_bot, u32 reserved_top) | ||
| 135 | { | ||
| 136 | /* num must be a power of 2 */ | ||
| 137 | if (num != roundup_pow_of_two(num)) | ||
| 138 | return -EINVAL; | ||
| 139 | |||
| 140 | bitmap->last = 0; | ||
| 141 | bitmap->top = 0; | ||
| 142 | bitmap->max = num - reserved_top; | ||
| 143 | bitmap->mask = mask; | ||
| 144 | bitmap->reserved_top = reserved_top; | ||
| 145 | bitmap->avail = num - reserved_top - reserved_bot; | ||
| 146 | spin_lock_init(&bitmap->lock); | ||
| 147 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * | ||
| 148 | sizeof (long), GFP_KERNEL); | ||
| 149 | if (!bitmap->table) | ||
| 150 | return -ENOMEM; | ||
| 151 | |||
| 152 | bitmap_set(bitmap->table, 0, reserved_bot); | ||
| 153 | |||
| 154 | return 0; | ||
| 155 | } | ||
| 156 | |||
| 157 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) | ||
| 158 | { | ||
| 159 | kfree(bitmap->table); | ||
| 160 | } | ||
| 161 | |||
| 162 | /* | ||
| 163 | * Handling for queue buffers -- we allocate a bunch of memory and | ||
| 164 | * register it in a memory region at HCA virtual address 0. If the | ||
| 165 | * requested size is > max_direct, we split the allocation into | ||
| 166 | * multiple pages, so we don't require too much contiguous memory. | ||
| 167 | */ | ||
| 168 | |||
| 169 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | ||
| 170 | struct mlx4_buf *buf) | ||
| 171 | { | ||
| 172 | dma_addr_t t; | ||
| 173 | |||
| 174 | if (size <= max_direct) { | ||
| 175 | buf->nbufs = 1; | ||
| 176 | buf->npages = 1; | ||
| 177 | buf->page_shift = get_order(size) + PAGE_SHIFT; | ||
| 178 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, | ||
| 179 | size, &t, GFP_KERNEL); | ||
| 180 | if (!buf->direct.buf) | ||
| 181 | return -ENOMEM; | ||
| 182 | |||
| 183 | buf->direct.map = t; | ||
| 184 | |||
| 185 | while (t & ((1 << buf->page_shift) - 1)) { | ||
| 186 | --buf->page_shift; | ||
| 187 | buf->npages *= 2; | ||
| 188 | } | ||
| 189 | |||
| 190 | memset(buf->direct.buf, 0, size); | ||
| 191 | } else { | ||
| 192 | int i; | ||
| 193 | |||
| 194 | buf->direct.buf = NULL; | ||
| 195 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; | ||
| 196 | buf->npages = buf->nbufs; | ||
| 197 | buf->page_shift = PAGE_SHIFT; | ||
| 198 | buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), | ||
| 199 | GFP_KERNEL); | ||
| 200 | if (!buf->page_list) | ||
| 201 | return -ENOMEM; | ||
| 202 | |||
| 203 | for (i = 0; i < buf->nbufs; ++i) { | ||
| 204 | buf->page_list[i].buf = | ||
| 205 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, | ||
| 206 | &t, GFP_KERNEL); | ||
| 207 | if (!buf->page_list[i].buf) | ||
| 208 | goto err_free; | ||
| 209 | |||
| 210 | buf->page_list[i].map = t; | ||
| 211 | |||
| 212 | memset(buf->page_list[i].buf, 0, PAGE_SIZE); | ||
| 213 | } | ||
| 214 | |||
| 215 | if (BITS_PER_LONG == 64) { | ||
| 216 | struct page **pages; | ||
| 217 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | ||
| 218 | if (!pages) | ||
| 219 | goto err_free; | ||
| 220 | for (i = 0; i < buf->nbufs; ++i) | ||
| 221 | pages[i] = virt_to_page(buf->page_list[i].buf); | ||
| 222 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | ||
| 223 | kfree(pages); | ||
| 224 | if (!buf->direct.buf) | ||
| 225 | goto err_free; | ||
| 226 | } | ||
| 227 | } | ||
| 228 | |||
| 229 | return 0; | ||
| 230 | |||
| 231 | err_free: | ||
| 232 | mlx4_buf_free(dev, size, buf); | ||
| 233 | |||
| 234 | return -ENOMEM; | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL_GPL(mlx4_buf_alloc); | ||
| 237 | |||
| 238 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | ||
| 239 | { | ||
| 240 | int i; | ||
| 241 | |||
| 242 | if (buf->nbufs == 1) | ||
| 243 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, | ||
| 244 | buf->direct.map); | ||
| 245 | else { | ||
| 246 | if (BITS_PER_LONG == 64 && buf->direct.buf) | ||
| 247 | vunmap(buf->direct.buf); | ||
| 248 | |||
| 249 | for (i = 0; i < buf->nbufs; ++i) | ||
| 250 | if (buf->page_list[i].buf) | ||
| 251 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | ||
| 252 | buf->page_list[i].buf, | ||
| 253 | buf->page_list[i].map); | ||
| 254 | kfree(buf->page_list); | ||
| 255 | } | ||
| 256 | } | ||
| 257 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | ||
| 258 | |||
| 259 | static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) | ||
| 260 | { | ||
| 261 | struct mlx4_db_pgdir *pgdir; | ||
| 262 | |||
| 263 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
| 264 | if (!pgdir) | ||
| 265 | return NULL; | ||
| 266 | |||
| 267 | bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); | ||
| 268 | pgdir->bits[0] = pgdir->order0; | ||
| 269 | pgdir->bits[1] = pgdir->order1; | ||
| 270 | pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, | ||
| 271 | &pgdir->db_dma, GFP_KERNEL); | ||
| 272 | if (!pgdir->db_page) { | ||
| 273 | kfree(pgdir); | ||
| 274 | return NULL; | ||
| 275 | } | ||
| 276 | |||
| 277 | return pgdir; | ||
| 278 | } | ||
| 279 | |||
| 280 | static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, | ||
| 281 | struct mlx4_db *db, int order) | ||
| 282 | { | ||
| 283 | int o; | ||
| 284 | int i; | ||
| 285 | |||
| 286 | for (o = order; o <= 1; ++o) { | ||
| 287 | i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); | ||
| 288 | if (i < MLX4_DB_PER_PAGE >> o) | ||
| 289 | goto found; | ||
| 290 | } | ||
| 291 | |||
| 292 | return -ENOMEM; | ||
| 293 | |||
| 294 | found: | ||
| 295 | clear_bit(i, pgdir->bits[o]); | ||
| 296 | |||
| 297 | i <<= o; | ||
| 298 | |||
| 299 | if (o > order) | ||
| 300 | set_bit(i ^ 1, pgdir->bits[order]); | ||
| 301 | |||
| 302 | db->u.pgdir = pgdir; | ||
| 303 | db->index = i; | ||
| 304 | db->db = pgdir->db_page + db->index; | ||
| 305 | db->dma = pgdir->db_dma + db->index * 4; | ||
| 306 | db->order = order; | ||
| 307 | |||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) | ||
| 312 | { | ||
| 313 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 314 | struct mlx4_db_pgdir *pgdir; | ||
| 315 | int ret = 0; | ||
| 316 | |||
| 317 | mutex_lock(&priv->pgdir_mutex); | ||
| 318 | |||
| 319 | list_for_each_entry(pgdir, &priv->pgdir_list, list) | ||
| 320 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) | ||
| 321 | goto out; | ||
| 322 | |||
| 323 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); | ||
| 324 | if (!pgdir) { | ||
| 325 | ret = -ENOMEM; | ||
| 326 | goto out; | ||
| 327 | } | ||
| 328 | |||
| 329 | list_add(&pgdir->list, &priv->pgdir_list); | ||
| 330 | |||
| 331 | /* This should never fail -- we just allocated an empty page: */ | ||
| 332 | WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); | ||
| 333 | |||
| 334 | out: | ||
| 335 | mutex_unlock(&priv->pgdir_mutex); | ||
| 336 | |||
| 337 | return ret; | ||
| 338 | } | ||
| 339 | EXPORT_SYMBOL_GPL(mlx4_db_alloc); | ||
| 340 | |||
| 341 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) | ||
| 342 | { | ||
| 343 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 344 | int o; | ||
| 345 | int i; | ||
| 346 | |||
| 347 | mutex_lock(&priv->pgdir_mutex); | ||
| 348 | |||
| 349 | o = db->order; | ||
| 350 | i = db->index; | ||
| 351 | |||
| 352 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
| 353 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
| 354 | ++o; | ||
| 355 | } | ||
| 356 | i >>= o; | ||
| 357 | set_bit(i, db->u.pgdir->bits[o]); | ||
| 358 | |||
| 359 | if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { | ||
| 360 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | ||
| 361 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
| 362 | list_del(&db->u.pgdir->list); | ||
| 363 | kfree(db->u.pgdir); | ||
| 364 | } | ||
| 365 | |||
| 366 | mutex_unlock(&priv->pgdir_mutex); | ||
| 367 | } | ||
| 368 | EXPORT_SYMBOL_GPL(mlx4_db_free); | ||
| 369 | |||
| 370 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
| 371 | int size, int max_direct) | ||
| 372 | { | ||
| 373 | int err; | ||
| 374 | |||
| 375 | err = mlx4_db_alloc(dev, &wqres->db, 1); | ||
| 376 | if (err) | ||
| 377 | return err; | ||
| 378 | |||
| 379 | *wqres->db.db = 0; | ||
| 380 | |||
| 381 | err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); | ||
| 382 | if (err) | ||
| 383 | goto err_db; | ||
| 384 | |||
| 385 | err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, | ||
| 386 | &wqres->mtt); | ||
| 387 | if (err) | ||
| 388 | goto err_buf; | ||
| 389 | |||
| 390 | err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); | ||
| 391 | if (err) | ||
| 392 | goto err_mtt; | ||
| 393 | |||
| 394 | return 0; | ||
| 395 | |||
| 396 | err_mtt: | ||
| 397 | mlx4_mtt_cleanup(dev, &wqres->mtt); | ||
| 398 | err_buf: | ||
| 399 | mlx4_buf_free(dev, size, &wqres->buf); | ||
| 400 | err_db: | ||
| 401 | mlx4_db_free(dev, &wqres->db); | ||
| 402 | |||
| 403 | return err; | ||
| 404 | } | ||
| 405 | EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); | ||
| 406 | |||
| 407 | void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
| 408 | int size) | ||
| 409 | { | ||
| 410 | mlx4_mtt_cleanup(dev, &wqres->mtt); | ||
| 411 | mlx4_buf_free(dev, size, &wqres->buf); | ||
| 412 | mlx4_db_free(dev, &wqres->db); | ||
| 413 | } | ||
| 414 | EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); | ||
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c new file mode 100644 index 00000000000..32f947154c3 --- /dev/null +++ b/drivers/net/mlx4/catas.c | |||
| @@ -0,0 +1,156 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | ||
| 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/workqueue.h> | ||
| 35 | |||
| 36 | #include "mlx4.h" | ||
| 37 | |||
| 38 | enum { | ||
| 39 | MLX4_CATAS_POLL_INTERVAL = 5 * HZ, | ||
| 40 | }; | ||
| 41 | |||
| 42 | static DEFINE_SPINLOCK(catas_lock); | ||
| 43 | |||
| 44 | static LIST_HEAD(catas_list); | ||
| 45 | static struct work_struct catas_work; | ||
| 46 | |||
| 47 | static int internal_err_reset = 1; | ||
| 48 | module_param(internal_err_reset, int, 0644); | ||
| 49 | MODULE_PARM_DESC(internal_err_reset, | ||
| 50 | "Reset device on internal errors if non-zero (default 1)"); | ||
| 51 | |||
| 52 | static void dump_err_buf(struct mlx4_dev *dev) | ||
| 53 | { | ||
| 54 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 55 | |||
| 56 | int i; | ||
| 57 | |||
| 58 | mlx4_err(dev, "Internal error detected:\n"); | ||
| 59 | for (i = 0; i < priv->fw.catas_size; ++i) | ||
| 60 | mlx4_err(dev, " buf[%02x]: %08x\n", | ||
| 61 | i, swab32(readl(priv->catas_err.map + i))); | ||
| 62 | } | ||
| 63 | |||
| 64 | static void poll_catas(unsigned long dev_ptr) | ||
| 65 | { | ||
| 66 | struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; | ||
| 67 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 68 | |||
| 69 | if (readl(priv->catas_err.map)) { | ||
| 70 | dump_err_buf(dev); | ||
| 71 | |||
| 72 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); | ||
| 73 | |||
| 74 | if (internal_err_reset) { | ||
| 75 | spin_lock(&catas_lock); | ||
| 76 | list_add(&priv->catas_err.list, &catas_list); | ||
| 77 | spin_unlock(&catas_lock); | ||
| 78 | |||
| 79 | queue_work(mlx4_wq, &catas_work); | ||
| 80 | } | ||
| 81 | } else | ||
| 82 | mod_timer(&priv->catas_err.timer, | ||
| 83 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); | ||
| 84 | } | ||
| 85 | |||
| 86 | static void catas_reset(struct work_struct *work) | ||
| 87 | { | ||
| 88 | struct mlx4_priv *priv, *tmppriv; | ||
| 89 | struct mlx4_dev *dev; | ||
| 90 | |||
| 91 | LIST_HEAD(tlist); | ||
| 92 | int ret; | ||
| 93 | |||
| 94 | spin_lock_irq(&catas_lock); | ||
| 95 | list_splice_init(&catas_list, &tlist); | ||
| 96 | spin_unlock_irq(&catas_lock); | ||
| 97 | |||
| 98 | list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { | ||
| 99 | struct pci_dev *pdev = priv->dev.pdev; | ||
| 100 | |||
| 101 | ret = mlx4_restart_one(priv->dev.pdev); | ||
| 102 | /* 'priv' now is not valid */ | ||
| 103 | if (ret) | ||
| 104 | pr_err("mlx4 %s: Reset failed (%d)\n", | ||
| 105 | pci_name(pdev), ret); | ||
| 106 | else { | ||
| 107 | dev = pci_get_drvdata(pdev); | ||
| 108 | mlx4_dbg(dev, "Reset succeeded\n"); | ||
| 109 | } | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | void mlx4_start_catas_poll(struct mlx4_dev *dev) | ||
| 114 | { | ||
| 115 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 116 | phys_addr_t addr; | ||
| 117 | |||
| 118 | INIT_LIST_HEAD(&priv->catas_err.list); | ||
| 119 | init_timer(&priv->catas_err.timer); | ||
| 120 | priv->catas_err.map = NULL; | ||
| 121 | |||
| 122 | addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + | ||
| 123 | priv->fw.catas_offset; | ||
| 124 | |||
| 125 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); | ||
| 126 | if (!priv->catas_err.map) { | ||
| 127 | mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", | ||
| 128 | (unsigned long long) addr); | ||
| 129 | return; | ||
| 130 | } | ||
| 131 | |||
| 132 | priv->catas_err.timer.data = (unsigned long) dev; | ||
| 133 | priv->catas_err.timer.function = poll_catas; | ||
| 134 | priv->catas_err.timer.expires = | ||
| 135 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); | ||
| 136 | add_timer(&priv->catas_err.timer); | ||
| 137 | } | ||
| 138 | |||
| 139 | void mlx4_stop_catas_poll(struct mlx4_dev *dev) | ||
| 140 | { | ||
| 141 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 142 | |||
| 143 | del_timer_sync(&priv->catas_err.timer); | ||
| 144 | |||
| 145 | if (priv->catas_err.map) | ||
| 146 | iounmap(priv->catas_err.map); | ||
| 147 | |||
| 148 | spin_lock_irq(&catas_lock); | ||
| 149 | list_del(&priv->catas_err.list); | ||
| 150 | spin_unlock_irq(&catas_lock); | ||
| 151 | } | ||
| 152 | |||
| 153 | void __init mlx4_catas_init(void) | ||
| 154 | { | ||
| 155 | INIT_WORK(&catas_work, catas_reset); | ||
| 156 | } | ||
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c new file mode 100644 index 00000000000..23cee7b6af9 --- /dev/null +++ b/drivers/net/mlx4/cmd.c | |||
| @@ -0,0 +1,443 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/sched.h> | ||
| 36 | #include <linux/slab.h> | ||
| 37 | #include <linux/pci.h> | ||
| 38 | #include <linux/errno.h> | ||
| 39 | |||
| 40 | #include <linux/mlx4/cmd.h> | ||
| 41 | |||
| 42 | #include <asm/io.h> | ||
| 43 | |||
| 44 | #include "mlx4.h" | ||
| 45 | |||
| 46 | #define CMD_POLL_TOKEN 0xffff | ||
| 47 | |||
| 48 | enum { | ||
| 49 | /* command completed successfully: */ | ||
| 50 | CMD_STAT_OK = 0x00, | ||
| 51 | /* Internal error (such as a bus error) occurred while processing command: */ | ||
| 52 | CMD_STAT_INTERNAL_ERR = 0x01, | ||
| 53 | /* Operation/command not supported or opcode modifier not supported: */ | ||
| 54 | CMD_STAT_BAD_OP = 0x02, | ||
| 55 | /* Parameter not supported or parameter out of range: */ | ||
| 56 | CMD_STAT_BAD_PARAM = 0x03, | ||
| 57 | /* System not enabled or bad system state: */ | ||
| 58 | CMD_STAT_BAD_SYS_STATE = 0x04, | ||
| 59 | /* Attempt to access reserved or unallocaterd resource: */ | ||
| 60 | CMD_STAT_BAD_RESOURCE = 0x05, | ||
| 61 | /* Requested resource is currently executing a command, or is otherwise busy: */ | ||
| 62 | CMD_STAT_RESOURCE_BUSY = 0x06, | ||
| 63 | /* Required capability exceeds device limits: */ | ||
| 64 | CMD_STAT_EXCEED_LIM = 0x08, | ||
| 65 | /* Resource is not in the appropriate state or ownership: */ | ||
| 66 | CMD_STAT_BAD_RES_STATE = 0x09, | ||
| 67 | /* Index out of range: */ | ||
| 68 | CMD_STAT_BAD_INDEX = 0x0a, | ||
| 69 | /* FW image corrupted: */ | ||
| 70 | CMD_STAT_BAD_NVMEM = 0x0b, | ||
| 71 | /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */ | ||
| 72 | CMD_STAT_ICM_ERROR = 0x0c, | ||
| 73 | /* Attempt to modify a QP/EE which is not in the presumed state: */ | ||
| 74 | CMD_STAT_BAD_QP_STATE = 0x10, | ||
| 75 | /* Bad segment parameters (Address/Size): */ | ||
| 76 | CMD_STAT_BAD_SEG_PARAM = 0x20, | ||
| 77 | /* Memory Region has Memory Windows bound to: */ | ||
| 78 | CMD_STAT_REG_BOUND = 0x21, | ||
| 79 | /* HCA local attached memory not present: */ | ||
| 80 | CMD_STAT_LAM_NOT_PRE = 0x22, | ||
| 81 | /* Bad management packet (silently discarded): */ | ||
| 82 | CMD_STAT_BAD_PKT = 0x30, | ||
| 83 | /* More outstanding CQEs in CQ than new CQ size: */ | ||
| 84 | CMD_STAT_BAD_SIZE = 0x40, | ||
| 85 | /* Multi Function device support required: */ | ||
| 86 | CMD_STAT_MULTI_FUNC_REQ = 0x50, | ||
| 87 | }; | ||
| 88 | |||
| 89 | enum { | ||
| 90 | HCR_IN_PARAM_OFFSET = 0x00, | ||
| 91 | HCR_IN_MODIFIER_OFFSET = 0x08, | ||
| 92 | HCR_OUT_PARAM_OFFSET = 0x0c, | ||
| 93 | HCR_TOKEN_OFFSET = 0x14, | ||
| 94 | HCR_STATUS_OFFSET = 0x18, | ||
| 95 | |||
| 96 | HCR_OPMOD_SHIFT = 12, | ||
| 97 | HCR_T_BIT = 21, | ||
| 98 | HCR_E_BIT = 22, | ||
| 99 | HCR_GO_BIT = 23 | ||
| 100 | }; | ||
| 101 | |||
| 102 | enum { | ||
| 103 | GO_BIT_TIMEOUT_MSECS = 10000 | ||
| 104 | }; | ||
| 105 | |||
| 106 | struct mlx4_cmd_context { | ||
| 107 | struct completion done; | ||
| 108 | int result; | ||
| 109 | int next; | ||
| 110 | u64 out_param; | ||
| 111 | u16 token; | ||
| 112 | }; | ||
| 113 | |||
| 114 | static int mlx4_status_to_errno(u8 status) | ||
| 115 | { | ||
| 116 | static const int trans_table[] = { | ||
| 117 | [CMD_STAT_INTERNAL_ERR] = -EIO, | ||
| 118 | [CMD_STAT_BAD_OP] = -EPERM, | ||
| 119 | [CMD_STAT_BAD_PARAM] = -EINVAL, | ||
| 120 | [CMD_STAT_BAD_SYS_STATE] = -ENXIO, | ||
| 121 | [CMD_STAT_BAD_RESOURCE] = -EBADF, | ||
| 122 | [CMD_STAT_RESOURCE_BUSY] = -EBUSY, | ||
| 123 | [CMD_STAT_EXCEED_LIM] = -ENOMEM, | ||
| 124 | [CMD_STAT_BAD_RES_STATE] = -EBADF, | ||
| 125 | [CMD_STAT_BAD_INDEX] = -EBADF, | ||
| 126 | [CMD_STAT_BAD_NVMEM] = -EFAULT, | ||
| 127 | [CMD_STAT_ICM_ERROR] = -ENFILE, | ||
| 128 | [CMD_STAT_BAD_QP_STATE] = -EINVAL, | ||
| 129 | [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, | ||
| 130 | [CMD_STAT_REG_BOUND] = -EBUSY, | ||
| 131 | [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, | ||
| 132 | [CMD_STAT_BAD_PKT] = -EINVAL, | ||
| 133 | [CMD_STAT_BAD_SIZE] = -ENOMEM, | ||
| 134 | [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, | ||
| 135 | }; | ||
| 136 | |||
| 137 | if (status >= ARRAY_SIZE(trans_table) || | ||
| 138 | (status != CMD_STAT_OK && trans_table[status] == 0)) | ||
| 139 | return -EIO; | ||
| 140 | |||
| 141 | return trans_table[status]; | ||
| 142 | } | ||
| 143 | |||
| 144 | static int cmd_pending(struct mlx4_dev *dev) | ||
| 145 | { | ||
| 146 | u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); | ||
| 147 | |||
| 148 | return (status & swab32(1 << HCR_GO_BIT)) || | ||
| 149 | (mlx4_priv(dev)->cmd.toggle == | ||
| 150 | !!(status & swab32(1 << HCR_T_BIT))); | ||
| 151 | } | ||
| 152 | |||
| 153 | static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, | ||
| 154 | u32 in_modifier, u8 op_modifier, u16 op, u16 token, | ||
| 155 | int event) | ||
| 156 | { | ||
| 157 | struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; | ||
| 158 | u32 __iomem *hcr = cmd->hcr; | ||
| 159 | int ret = -EAGAIN; | ||
| 160 | unsigned long end; | ||
| 161 | |||
| 162 | mutex_lock(&cmd->hcr_mutex); | ||
| 163 | |||
| 164 | end = jiffies; | ||
| 165 | if (event) | ||
| 166 | end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); | ||
| 167 | |||
| 168 | while (cmd_pending(dev)) { | ||
| 169 | if (time_after_eq(jiffies, end)) | ||
| 170 | goto out; | ||
| 171 | cond_resched(); | ||
| 172 | } | ||
| 173 | |||
| 174 | /* | ||
| 175 | * We use writel (instead of something like memcpy_toio) | ||
| 176 | * because writes of less than 32 bits to the HCR don't work | ||
| 177 | * (and some architectures such as ia64 implement memcpy_toio | ||
| 178 | * in terms of writeb). | ||
| 179 | */ | ||
| 180 | __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); | ||
| 181 | __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); | ||
| 182 | __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); | ||
| 183 | __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); | ||
| 184 | __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); | ||
| 185 | __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); | ||
| 186 | |||
| 187 | /* __raw_writel may not order writes. */ | ||
| 188 | wmb(); | ||
| 189 | |||
| 190 | __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | | ||
| 191 | (cmd->toggle << HCR_T_BIT) | | ||
| 192 | (event ? (1 << HCR_E_BIT) : 0) | | ||
| 193 | (op_modifier << HCR_OPMOD_SHIFT) | | ||
| 194 | op), hcr + 6); | ||
| 195 | |||
| 196 | /* | ||
| 197 | * Make sure that our HCR writes don't get mixed in with | ||
| 198 | * writes from another CPU starting a FW command. | ||
| 199 | */ | ||
| 200 | mmiowb(); | ||
| 201 | |||
| 202 | cmd->toggle = cmd->toggle ^ 1; | ||
| 203 | |||
| 204 | ret = 0; | ||
| 205 | |||
| 206 | out: | ||
| 207 | mutex_unlock(&cmd->hcr_mutex); | ||
| 208 | return ret; | ||
| 209 | } | ||
| 210 | |||
| 211 | static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | ||
| 212 | int out_is_imm, u32 in_modifier, u8 op_modifier, | ||
| 213 | u16 op, unsigned long timeout) | ||
| 214 | { | ||
| 215 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 216 | void __iomem *hcr = priv->cmd.hcr; | ||
| 217 | int err = 0; | ||
| 218 | unsigned long end; | ||
| 219 | |||
| 220 | down(&priv->cmd.poll_sem); | ||
| 221 | |||
| 222 | err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, | ||
| 223 | in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); | ||
| 224 | if (err) | ||
| 225 | goto out; | ||
| 226 | |||
| 227 | end = msecs_to_jiffies(timeout) + jiffies; | ||
| 228 | while (cmd_pending(dev) && time_before(jiffies, end)) | ||
| 229 | cond_resched(); | ||
| 230 | |||
| 231 | if (cmd_pending(dev)) { | ||
| 232 | err = -ETIMEDOUT; | ||
| 233 | goto out; | ||
| 234 | } | ||
| 235 | |||
| 236 | if (out_is_imm) | ||
| 237 | *out_param = | ||
| 238 | (u64) be32_to_cpu((__force __be32) | ||
| 239 | __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | | ||
| 240 | (u64) be32_to_cpu((__force __be32) | ||
| 241 | __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); | ||
| 242 | |||
| 243 | err = mlx4_status_to_errno(be32_to_cpu((__force __be32) | ||
| 244 | __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24); | ||
| 245 | |||
| 246 | out: | ||
| 247 | up(&priv->cmd.poll_sem); | ||
| 248 | return err; | ||
| 249 | } | ||
| 250 | |||
| 251 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) | ||
| 252 | { | ||
| 253 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 254 | struct mlx4_cmd_context *context = | ||
| 255 | &priv->cmd.context[token & priv->cmd.token_mask]; | ||
| 256 | |||
| 257 | /* previously timed out command completing at long last */ | ||
| 258 | if (token != context->token) | ||
| 259 | return; | ||
| 260 | |||
| 261 | context->result = mlx4_status_to_errno(status); | ||
| 262 | context->out_param = out_param; | ||
| 263 | |||
| 264 | complete(&context->done); | ||
| 265 | } | ||
| 266 | |||
| 267 | static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | ||
| 268 | int out_is_imm, u32 in_modifier, u8 op_modifier, | ||
| 269 | u16 op, unsigned long timeout) | ||
| 270 | { | ||
| 271 | struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; | ||
| 272 | struct mlx4_cmd_context *context; | ||
| 273 | int err = 0; | ||
| 274 | |||
| 275 | down(&cmd->event_sem); | ||
| 276 | |||
| 277 | spin_lock(&cmd->context_lock); | ||
| 278 | BUG_ON(cmd->free_head < 0); | ||
| 279 | context = &cmd->context[cmd->free_head]; | ||
| 280 | context->token += cmd->token_mask + 1; | ||
| 281 | cmd->free_head = context->next; | ||
| 282 | spin_unlock(&cmd->context_lock); | ||
| 283 | |||
| 284 | init_completion(&context->done); | ||
| 285 | |||
| 286 | mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, | ||
| 287 | in_modifier, op_modifier, op, context->token, 1); | ||
| 288 | |||
| 289 | if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { | ||
| 290 | err = -EBUSY; | ||
| 291 | goto out; | ||
| 292 | } | ||
| 293 | |||
| 294 | err = context->result; | ||
| 295 | if (err) | ||
| 296 | goto out; | ||
| 297 | |||
| 298 | if (out_is_imm) | ||
| 299 | *out_param = context->out_param; | ||
| 300 | |||
| 301 | out: | ||
| 302 | spin_lock(&cmd->context_lock); | ||
| 303 | context->next = cmd->free_head; | ||
| 304 | cmd->free_head = context - cmd->context; | ||
| 305 | spin_unlock(&cmd->context_lock); | ||
| 306 | |||
| 307 | up(&cmd->event_sem); | ||
| 308 | return err; | ||
| 309 | } | ||
| 310 | |||
| 311 | int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | ||
| 312 | int out_is_imm, u32 in_modifier, u8 op_modifier, | ||
| 313 | u16 op, unsigned long timeout) | ||
| 314 | { | ||
| 315 | if (mlx4_priv(dev)->cmd.use_events) | ||
| 316 | return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, | ||
| 317 | in_modifier, op_modifier, op, timeout); | ||
| 318 | else | ||
| 319 | return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, | ||
| 320 | in_modifier, op_modifier, op, timeout); | ||
| 321 | } | ||
| 322 | EXPORT_SYMBOL_GPL(__mlx4_cmd); | ||
| 323 | |||
| 324 | int mlx4_cmd_init(struct mlx4_dev *dev) | ||
| 325 | { | ||
| 326 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 327 | |||
| 328 | mutex_init(&priv->cmd.hcr_mutex); | ||
| 329 | sema_init(&priv->cmd.poll_sem, 1); | ||
| 330 | priv->cmd.use_events = 0; | ||
| 331 | priv->cmd.toggle = 1; | ||
| 332 | |||
| 333 | priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, | ||
| 334 | MLX4_HCR_SIZE); | ||
| 335 | if (!priv->cmd.hcr) { | ||
| 336 | mlx4_err(dev, "Couldn't map command register."); | ||
| 337 | return -ENOMEM; | ||
| 338 | } | ||
| 339 | |||
| 340 | priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, | ||
| 341 | MLX4_MAILBOX_SIZE, | ||
| 342 | MLX4_MAILBOX_SIZE, 0); | ||
| 343 | if (!priv->cmd.pool) { | ||
| 344 | iounmap(priv->cmd.hcr); | ||
| 345 | return -ENOMEM; | ||
| 346 | } | ||
| 347 | |||
| 348 | return 0; | ||
| 349 | } | ||
| 350 | |||
| 351 | void mlx4_cmd_cleanup(struct mlx4_dev *dev) | ||
| 352 | { | ||
| 353 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 354 | |||
| 355 | pci_pool_destroy(priv->cmd.pool); | ||
| 356 | iounmap(priv->cmd.hcr); | ||
| 357 | } | ||
| 358 | |||
| 359 | /* | ||
| 360 | * Switch to using events to issue FW commands (can only be called | ||
| 361 | * after event queue for command events has been initialized). | ||
| 362 | */ | ||
| 363 | int mlx4_cmd_use_events(struct mlx4_dev *dev) | ||
| 364 | { | ||
| 365 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 366 | int i; | ||
| 367 | |||
| 368 | priv->cmd.context = kmalloc(priv->cmd.max_cmds * | ||
| 369 | sizeof (struct mlx4_cmd_context), | ||
| 370 | GFP_KERNEL); | ||
| 371 | if (!priv->cmd.context) | ||
| 372 | return -ENOMEM; | ||
| 373 | |||
| 374 | for (i = 0; i < priv->cmd.max_cmds; ++i) { | ||
| 375 | priv->cmd.context[i].token = i; | ||
| 376 | priv->cmd.context[i].next = i + 1; | ||
| 377 | } | ||
| 378 | |||
| 379 | priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; | ||
| 380 | priv->cmd.free_head = 0; | ||
| 381 | |||
| 382 | sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); | ||
| 383 | spin_lock_init(&priv->cmd.context_lock); | ||
| 384 | |||
| 385 | for (priv->cmd.token_mask = 1; | ||
| 386 | priv->cmd.token_mask < priv->cmd.max_cmds; | ||
| 387 | priv->cmd.token_mask <<= 1) | ||
| 388 | ; /* nothing */ | ||
| 389 | --priv->cmd.token_mask; | ||
| 390 | |||
| 391 | priv->cmd.use_events = 1; | ||
| 392 | |||
| 393 | down(&priv->cmd.poll_sem); | ||
| 394 | |||
| 395 | return 0; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* | ||
| 399 | * Switch back to polling (used when shutting down the device) | ||
| 400 | */ | ||
| 401 | void mlx4_cmd_use_polling(struct mlx4_dev *dev) | ||
| 402 | { | ||
| 403 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 404 | int i; | ||
| 405 | |||
| 406 | priv->cmd.use_events = 0; | ||
| 407 | |||
| 408 | for (i = 0; i < priv->cmd.max_cmds; ++i) | ||
| 409 | down(&priv->cmd.event_sem); | ||
| 410 | |||
| 411 | kfree(priv->cmd.context); | ||
| 412 | |||
| 413 | up(&priv->cmd.poll_sem); | ||
| 414 | } | ||
| 415 | |||
| 416 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) | ||
| 417 | { | ||
| 418 | struct mlx4_cmd_mailbox *mailbox; | ||
| 419 | |||
| 420 | mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); | ||
| 421 | if (!mailbox) | ||
| 422 | return ERR_PTR(-ENOMEM); | ||
| 423 | |||
| 424 | mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, | ||
| 425 | &mailbox->dma); | ||
| 426 | if (!mailbox->buf) { | ||
| 427 | kfree(mailbox); | ||
| 428 | return ERR_PTR(-ENOMEM); | ||
| 429 | } | ||
| 430 | |||
| 431 | return mailbox; | ||
| 432 | } | ||
| 433 | EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); | ||
| 434 | |||
| 435 | void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) | ||
| 436 | { | ||
| 437 | if (!mailbox) | ||
| 438 | return; | ||
| 439 | |||
| 440 | pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); | ||
| 441 | kfree(mailbox); | ||
| 442 | } | ||
| 443 | EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); | ||
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c new file mode 100644 index 00000000000..bd8ef9f2fa7 --- /dev/null +++ b/drivers/net/mlx4/cq.c | |||
| @@ -0,0 +1,319 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | ||
| 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 5 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | ||
| 7 | * | ||
| 8 | * This software is available to you under a choice of one of two | ||
| 9 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 10 | * General Public License (GPL) Version 2, available from the file | ||
| 11 | * COPYING in the main directory of this source tree, or the | ||
| 12 | * OpenIB.org BSD license below: | ||
| 13 | * | ||
| 14 | * Redistribution and use in source and binary forms, with or | ||
| 15 | * without modification, are permitted provided that the following | ||
| 16 | * conditions are met: | ||
| 17 | * | ||
| 18 | * - Redistributions of source code must retain the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer. | ||
| 21 | * | ||
| 22 | * - Redistributions in binary form must reproduce the above | ||
| 23 | * copyright notice, this list of conditions and the following | ||
| 24 | * disclaimer in the documentation and/or other materials | ||
| 25 | * provided with the distribution. | ||
| 26 | * | ||
| 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 34 | * SOFTWARE. | ||
| 35 | */ | ||
| 36 | |||
| 37 | #include <linux/hardirq.h> | ||
| 38 | #include <linux/gfp.h> | ||
| 39 | |||
| 40 | #include <linux/mlx4/cmd.h> | ||
| 41 | #include <linux/mlx4/cq.h> | ||
| 42 | |||
| 43 | #include "mlx4.h" | ||
| 44 | #include "icm.h" | ||
| 45 | |||
| 46 | struct mlx4_cq_context { | ||
| 47 | __be32 flags; | ||
| 48 | u16 reserved1[3]; | ||
| 49 | __be16 page_offset; | ||
| 50 | __be32 logsize_usrpage; | ||
| 51 | __be16 cq_period; | ||
| 52 | __be16 cq_max_count; | ||
| 53 | u8 reserved2[3]; | ||
| 54 | u8 comp_eqn; | ||
| 55 | u8 log_page_size; | ||
| 56 | u8 reserved3[2]; | ||
| 57 | u8 mtt_base_addr_h; | ||
| 58 | __be32 mtt_base_addr_l; | ||
| 59 | __be32 last_notified_index; | ||
| 60 | __be32 solicit_producer_index; | ||
| 61 | __be32 consumer_index; | ||
| 62 | __be32 producer_index; | ||
| 63 | u32 reserved4[2]; | ||
| 64 | __be64 db_rec_addr; | ||
| 65 | }; | ||
| 66 | |||
| 67 | #define MLX4_CQ_STATUS_OK ( 0 << 28) | ||
| 68 | #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) | ||
| 69 | #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) | ||
| 70 | #define MLX4_CQ_FLAG_CC ( 1 << 18) | ||
| 71 | #define MLX4_CQ_FLAG_OI ( 1 << 17) | ||
| 72 | #define MLX4_CQ_STATE_ARMED ( 9 << 8) | ||
| 73 | #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) | ||
| 74 | #define MLX4_EQ_STATE_FIRED (10 << 8) | ||
| 75 | |||
| 76 | void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) | ||
| 77 | { | ||
| 78 | struct mlx4_cq *cq; | ||
| 79 | |||
| 80 | cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, | ||
| 81 | cqn & (dev->caps.num_cqs - 1)); | ||
| 82 | if (!cq) { | ||
| 83 | mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn); | ||
| 84 | return; | ||
| 85 | } | ||
| 86 | |||
| 87 | ++cq->arm_sn; | ||
| 88 | |||
| 89 | cq->comp(cq); | ||
| 90 | } | ||
| 91 | |||
| 92 | void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) | ||
| 93 | { | ||
| 94 | struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; | ||
| 95 | struct mlx4_cq *cq; | ||
| 96 | |||
| 97 | spin_lock(&cq_table->lock); | ||
| 98 | |||
| 99 | cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); | ||
| 100 | if (cq) | ||
| 101 | atomic_inc(&cq->refcount); | ||
| 102 | |||
| 103 | spin_unlock(&cq_table->lock); | ||
| 104 | |||
| 105 | if (!cq) { | ||
| 106 | mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); | ||
| 107 | return; | ||
| 108 | } | ||
| 109 | |||
| 110 | cq->event(cq, event_type); | ||
| 111 | |||
| 112 | if (atomic_dec_and_test(&cq->refcount)) | ||
| 113 | complete(&cq->free); | ||
| 114 | } | ||
| 115 | |||
| 116 | static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 117 | int cq_num) | ||
| 118 | { | ||
| 119 | return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ, | ||
| 120 | MLX4_CMD_TIME_CLASS_A); | ||
| 121 | } | ||
| 122 | |||
| 123 | static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 124 | int cq_num, u32 opmod) | ||
| 125 | { | ||
| 126 | return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, | ||
| 127 | MLX4_CMD_TIME_CLASS_A); | ||
| 128 | } | ||
| 129 | |||
| 130 | static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 131 | int cq_num) | ||
| 132 | { | ||
| 133 | return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num, | ||
| 134 | mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, | ||
| 135 | MLX4_CMD_TIME_CLASS_A); | ||
| 136 | } | ||
| 137 | |||
| 138 | int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, | ||
| 139 | u16 count, u16 period) | ||
| 140 | { | ||
| 141 | struct mlx4_cmd_mailbox *mailbox; | ||
| 142 | struct mlx4_cq_context *cq_context; | ||
| 143 | int err; | ||
| 144 | |||
| 145 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 146 | if (IS_ERR(mailbox)) | ||
| 147 | return PTR_ERR(mailbox); | ||
| 148 | |||
| 149 | cq_context = mailbox->buf; | ||
| 150 | memset(cq_context, 0, sizeof *cq_context); | ||
| 151 | |||
| 152 | cq_context->cq_max_count = cpu_to_be16(count); | ||
| 153 | cq_context->cq_period = cpu_to_be16(period); | ||
| 154 | |||
| 155 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); | ||
| 156 | |||
| 157 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 158 | return err; | ||
| 159 | } | ||
| 160 | EXPORT_SYMBOL_GPL(mlx4_cq_modify); | ||
| 161 | |||
| 162 | int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, | ||
| 163 | int entries, struct mlx4_mtt *mtt) | ||
| 164 | { | ||
| 165 | struct mlx4_cmd_mailbox *mailbox; | ||
| 166 | struct mlx4_cq_context *cq_context; | ||
| 167 | u64 mtt_addr; | ||
| 168 | int err; | ||
| 169 | |||
| 170 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 171 | if (IS_ERR(mailbox)) | ||
| 172 | return PTR_ERR(mailbox); | ||
| 173 | |||
| 174 | cq_context = mailbox->buf; | ||
| 175 | memset(cq_context, 0, sizeof *cq_context); | ||
| 176 | |||
| 177 | cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); | ||
| 178 | cq_context->log_page_size = mtt->page_shift - 12; | ||
| 179 | mtt_addr = mlx4_mtt_addr(dev, mtt); | ||
| 180 | cq_context->mtt_base_addr_h = mtt_addr >> 32; | ||
| 181 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | ||
| 182 | |||
| 183 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); | ||
| 184 | |||
| 185 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 186 | return err; | ||
| 187 | } | ||
| 188 | EXPORT_SYMBOL_GPL(mlx4_cq_resize); | ||
| 189 | |||
| 190 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | ||
| 191 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, | ||
| 192 | unsigned vector, int collapsed) | ||
| 193 | { | ||
| 194 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 195 | struct mlx4_cq_table *cq_table = &priv->cq_table; | ||
| 196 | struct mlx4_cmd_mailbox *mailbox; | ||
| 197 | struct mlx4_cq_context *cq_context; | ||
| 198 | u64 mtt_addr; | ||
| 199 | int err; | ||
| 200 | |||
| 201 | if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) | ||
| 202 | return -EINVAL; | ||
| 203 | |||
| 204 | cq->vector = vector; | ||
| 205 | |||
| 206 | cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); | ||
| 207 | if (cq->cqn == -1) | ||
| 208 | return -ENOMEM; | ||
| 209 | |||
| 210 | err = mlx4_table_get(dev, &cq_table->table, cq->cqn); | ||
| 211 | if (err) | ||
| 212 | goto err_out; | ||
| 213 | |||
| 214 | err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn); | ||
| 215 | if (err) | ||
| 216 | goto err_put; | ||
| 217 | |||
| 218 | spin_lock_irq(&cq_table->lock); | ||
| 219 | err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); | ||
| 220 | spin_unlock_irq(&cq_table->lock); | ||
| 221 | if (err) | ||
| 222 | goto err_cmpt_put; | ||
| 223 | |||
| 224 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 225 | if (IS_ERR(mailbox)) { | ||
| 226 | err = PTR_ERR(mailbox); | ||
| 227 | goto err_radix; | ||
| 228 | } | ||
| 229 | |||
| 230 | cq_context = mailbox->buf; | ||
| 231 | memset(cq_context, 0, sizeof *cq_context); | ||
| 232 | |||
| 233 | cq_context->flags = cpu_to_be32(!!collapsed << 18); | ||
| 234 | cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); | ||
| 235 | cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; | ||
| 236 | cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | ||
| 237 | |||
| 238 | mtt_addr = mlx4_mtt_addr(dev, mtt); | ||
| 239 | cq_context->mtt_base_addr_h = mtt_addr >> 32; | ||
| 240 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | ||
| 241 | cq_context->db_rec_addr = cpu_to_be64(db_rec); | ||
| 242 | |||
| 243 | err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); | ||
| 244 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 245 | if (err) | ||
| 246 | goto err_radix; | ||
| 247 | |||
| 248 | cq->cons_index = 0; | ||
| 249 | cq->arm_sn = 1; | ||
| 250 | cq->uar = uar; | ||
| 251 | atomic_set(&cq->refcount, 1); | ||
| 252 | init_completion(&cq->free); | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | |||
| 256 | err_radix: | ||
| 257 | spin_lock_irq(&cq_table->lock); | ||
| 258 | radix_tree_delete(&cq_table->tree, cq->cqn); | ||
| 259 | spin_unlock_irq(&cq_table->lock); | ||
| 260 | |||
| 261 | err_cmpt_put: | ||
| 262 | mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); | ||
| 263 | |||
| 264 | err_put: | ||
| 265 | mlx4_table_put(dev, &cq_table->table, cq->cqn); | ||
| 266 | |||
| 267 | err_out: | ||
| 268 | mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); | ||
| 269 | |||
| 270 | return err; | ||
| 271 | } | ||
| 272 | EXPORT_SYMBOL_GPL(mlx4_cq_alloc); | ||
| 273 | |||
| 274 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) | ||
| 275 | { | ||
| 276 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 277 | struct mlx4_cq_table *cq_table = &priv->cq_table; | ||
| 278 | int err; | ||
| 279 | |||
| 280 | err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); | ||
| 281 | if (err) | ||
| 282 | mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); | ||
| 283 | |||
| 284 | synchronize_irq(priv->eq_table.eq[cq->vector].irq); | ||
| 285 | |||
| 286 | spin_lock_irq(&cq_table->lock); | ||
| 287 | radix_tree_delete(&cq_table->tree, cq->cqn); | ||
| 288 | spin_unlock_irq(&cq_table->lock); | ||
| 289 | |||
| 290 | if (atomic_dec_and_test(&cq->refcount)) | ||
| 291 | complete(&cq->free); | ||
| 292 | wait_for_completion(&cq->free); | ||
| 293 | |||
| 294 | mlx4_table_put(dev, &cq_table->table, cq->cqn); | ||
| 295 | mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); | ||
| 296 | } | ||
| 297 | EXPORT_SYMBOL_GPL(mlx4_cq_free); | ||
| 298 | |||
| 299 | int mlx4_init_cq_table(struct mlx4_dev *dev) | ||
| 300 | { | ||
| 301 | struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; | ||
| 302 | int err; | ||
| 303 | |||
| 304 | spin_lock_init(&cq_table->lock); | ||
| 305 | INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); | ||
| 306 | |||
| 307 | err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, | ||
| 308 | dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); | ||
| 309 | if (err) | ||
| 310 | return err; | ||
| 311 | |||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | void mlx4_cleanup_cq_table(struct mlx4_dev *dev) | ||
| 316 | { | ||
| 317 | /* Nothing to do to clean up radix_tree */ | ||
| 318 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); | ||
| 319 | } | ||
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c new file mode 100644 index 00000000000..ec4b6d047fe --- /dev/null +++ b/drivers/net/mlx4/en_cq.c | |||
| @@ -0,0 +1,178 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/mlx4/cq.h> | ||
| 35 | #include <linux/mlx4/qp.h> | ||
| 36 | #include <linux/mlx4/cmd.h> | ||
| 37 | |||
| 38 | #include "mlx4_en.h" | ||
| 39 | |||
| 40 | static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) | ||
| 41 | { | ||
| 42 | return; | ||
| 43 | } | ||
| 44 | |||
| 45 | |||
| 46 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, | ||
| 47 | struct mlx4_en_cq *cq, | ||
| 48 | int entries, int ring, enum cq_type mode) | ||
| 49 | { | ||
| 50 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 51 | int err; | ||
| 52 | |||
| 53 | cq->size = entries; | ||
| 54 | if (mode == RX) | ||
| 55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); | ||
| 56 | else | ||
| 57 | cq->buf_size = sizeof(struct mlx4_cqe); | ||
| 58 | |||
| 59 | cq->ring = ring; | ||
| 60 | cq->is_tx = mode; | ||
| 61 | spin_lock_init(&cq->lock); | ||
| 62 | |||
| 63 | err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, | ||
| 64 | cq->buf_size, 2 * PAGE_SIZE); | ||
| 65 | if (err) | ||
| 66 | return err; | ||
| 67 | |||
| 68 | err = mlx4_en_map_buffer(&cq->wqres.buf); | ||
| 69 | if (err) | ||
| 70 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | ||
| 71 | else | ||
| 72 | cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; | ||
| 73 | |||
| 74 | return err; | ||
| 75 | } | ||
| 76 | |||
| 77 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
| 78 | { | ||
| 79 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 80 | int err = 0; | ||
| 81 | char name[25]; | ||
| 82 | |||
| 83 | cq->dev = mdev->pndev[priv->port]; | ||
| 84 | cq->mcq.set_ci_db = cq->wqres.db.db; | ||
| 85 | cq->mcq.arm_db = cq->wqres.db.db + 1; | ||
| 86 | *cq->mcq.set_ci_db = 0; | ||
| 87 | *cq->mcq.arm_db = 0; | ||
| 88 | memset(cq->buf, 0, cq->buf_size); | ||
| 89 | |||
| 90 | if (cq->is_tx == RX) { | ||
| 91 | if (mdev->dev->caps.comp_pool) { | ||
| 92 | if (!cq->vector) { | ||
| 93 | sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring); | ||
| 94 | if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) { | ||
| 95 | cq->vector = (cq->ring + 1 + priv->port) % | ||
| 96 | mdev->dev->caps.num_comp_vectors; | ||
| 97 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
| 98 | "%s_rx-%d ,Falling back to legacy EQ's\n", | ||
| 99 | priv->dev->name, cq->ring); | ||
| 100 | } | ||
| 101 | } | ||
| 102 | } else { | ||
| 103 | cq->vector = (cq->ring + 1 + priv->port) % | ||
| 104 | mdev->dev->caps.num_comp_vectors; | ||
| 105 | } | ||
| 106 | } else { | ||
| 107 | if (!cq->vector || !mdev->dev->caps.comp_pool) { | ||
| 108 | /*Fallback to legacy pool in case of error*/ | ||
| 109 | cq->vector = 0; | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | if (!cq->is_tx) | ||
| 114 | cq->size = priv->rx_ring[cq->ring].actual_size; | ||
| 115 | |||
| 116 | err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, | ||
| 117 | cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); | ||
| 118 | if (err) | ||
| 119 | return err; | ||
| 120 | |||
| 121 | cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; | ||
| 122 | cq->mcq.event = mlx4_en_cq_event; | ||
| 123 | |||
| 124 | if (cq->is_tx) { | ||
| 125 | init_timer(&cq->timer); | ||
| 126 | cq->timer.function = mlx4_en_poll_tx_cq; | ||
| 127 | cq->timer.data = (unsigned long) cq; | ||
| 128 | } else { | ||
| 129 | netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); | ||
| 130 | napi_enable(&cq->napi); | ||
| 131 | } | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | ||
| 137 | bool reserve_vectors) | ||
| 138 | { | ||
| 139 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 140 | |||
| 141 | mlx4_en_unmap_buffer(&cq->wqres.buf); | ||
| 142 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | ||
| 143 | if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors) | ||
| 144 | mlx4_release_eq(priv->mdev->dev, cq->vector); | ||
| 145 | cq->buf_size = 0; | ||
| 146 | cq->buf = NULL; | ||
| 147 | } | ||
| 148 | |||
| 149 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
| 150 | { | ||
| 151 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 152 | |||
| 153 | if (cq->is_tx) | ||
| 154 | del_timer(&cq->timer); | ||
| 155 | else { | ||
| 156 | napi_disable(&cq->napi); | ||
| 157 | netif_napi_del(&cq->napi); | ||
| 158 | } | ||
| 159 | |||
| 160 | mlx4_cq_free(mdev->dev, &cq->mcq); | ||
| 161 | } | ||
| 162 | |||
| 163 | /* Set rx cq moderation parameters */ | ||
| 164 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
| 165 | { | ||
| 166 | return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, | ||
| 167 | cq->moder_cnt, cq->moder_time); | ||
| 168 | } | ||
| 169 | |||
| 170 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | ||
| 171 | { | ||
| 172 | mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, | ||
| 173 | &priv->mdev->uar_lock); | ||
| 174 | |||
| 175 | return 0; | ||
| 176 | } | ||
| 177 | |||
| 178 | |||
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c new file mode 100644 index 00000000000..eb096253d78 --- /dev/null +++ b/drivers/net/mlx4/en_ethtool.c | |||
| @@ -0,0 +1,477 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/kernel.h> | ||
| 35 | #include <linux/ethtool.h> | ||
| 36 | #include <linux/netdevice.h> | ||
| 37 | |||
| 38 | #include "mlx4_en.h" | ||
| 39 | #include "en_port.h" | ||
| 40 | |||
| 41 | |||
| 42 | static void | ||
| 43 | mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | ||
| 44 | { | ||
| 45 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 46 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 47 | |||
| 48 | strncpy(drvinfo->driver, DRV_NAME, 32); | ||
| 49 | strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); | ||
| 50 | sprintf(drvinfo->fw_version, "%d.%d.%d", | ||
| 51 | (u16) (mdev->dev->caps.fw_ver >> 32), | ||
| 52 | (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), | ||
| 53 | (u16) (mdev->dev->caps.fw_ver & 0xffff)); | ||
| 54 | strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32); | ||
| 55 | drvinfo->n_stats = 0; | ||
| 56 | drvinfo->regdump_len = 0; | ||
| 57 | drvinfo->eedump_len = 0; | ||
| 58 | } | ||
| 59 | |||
| 60 | static const char main_strings[][ETH_GSTRING_LEN] = { | ||
| 61 | "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", | ||
| 62 | "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", | ||
| 63 | "rx_length_errors", "rx_over_errors", "rx_crc_errors", | ||
| 64 | "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", | ||
| 65 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | ||
| 66 | "tx_heartbeat_errors", "tx_window_errors", | ||
| 67 | |||
| 68 | /* port statistics */ | ||
| 69 | "tso_packets", | ||
| 70 | "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", | ||
| 71 | "rx_csum_good", "rx_csum_none", "tx_chksum_offload", | ||
| 72 | |||
| 73 | /* packet statistics */ | ||
| 74 | "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", | ||
| 75 | "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0", | ||
| 76 | "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5", | ||
| 77 | "tx_prio_6", "tx_prio_7", | ||
| 78 | }; | ||
| 79 | #define NUM_MAIN_STATS 21 | ||
| 80 | #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) | ||
| 81 | |||
| 82 | static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { | ||
| 83 | "Interupt Test", | ||
| 84 | "Link Test", | ||
| 85 | "Speed Test", | ||
| 86 | "Register Test", | ||
| 87 | "Loopback Test", | ||
| 88 | }; | ||
| 89 | |||
| 90 | static u32 mlx4_en_get_msglevel(struct net_device *dev) | ||
| 91 | { | ||
| 92 | return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; | ||
| 93 | } | ||
| 94 | |||
| 95 | static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) | ||
| 96 | { | ||
| 97 | ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val; | ||
| 98 | } | ||
| 99 | |||
| 100 | static void mlx4_en_get_wol(struct net_device *netdev, | ||
| 101 | struct ethtool_wolinfo *wol) | ||
| 102 | { | ||
| 103 | struct mlx4_en_priv *priv = netdev_priv(netdev); | ||
| 104 | int err = 0; | ||
| 105 | u64 config = 0; | ||
| 106 | |||
| 107 | if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) { | ||
| 108 | wol->supported = 0; | ||
| 109 | wol->wolopts = 0; | ||
| 110 | return; | ||
| 111 | } | ||
| 112 | |||
| 113 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | ||
| 114 | if (err) { | ||
| 115 | en_err(priv, "Failed to get WoL information\n"); | ||
| 116 | return; | ||
| 117 | } | ||
| 118 | |||
| 119 | if (config & MLX4_EN_WOL_MAGIC) | ||
| 120 | wol->supported = WAKE_MAGIC; | ||
| 121 | else | ||
| 122 | wol->supported = 0; | ||
| 123 | |||
| 124 | if (config & MLX4_EN_WOL_ENABLED) | ||
| 125 | wol->wolopts = WAKE_MAGIC; | ||
| 126 | else | ||
| 127 | wol->wolopts = 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | static int mlx4_en_set_wol(struct net_device *netdev, | ||
| 131 | struct ethtool_wolinfo *wol) | ||
| 132 | { | ||
| 133 | struct mlx4_en_priv *priv = netdev_priv(netdev); | ||
| 134 | u64 config = 0; | ||
| 135 | int err = 0; | ||
| 136 | |||
| 137 | if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) | ||
| 138 | return -EOPNOTSUPP; | ||
| 139 | |||
| 140 | if (wol->supported & ~WAKE_MAGIC) | ||
| 141 | return -EINVAL; | ||
| 142 | |||
| 143 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | ||
| 144 | if (err) { | ||
| 145 | en_err(priv, "Failed to get WoL info, unable to modify\n"); | ||
| 146 | return err; | ||
| 147 | } | ||
| 148 | |||
| 149 | if (wol->wolopts & WAKE_MAGIC) { | ||
| 150 | config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | | ||
| 151 | MLX4_EN_WOL_MAGIC; | ||
| 152 | } else { | ||
| 153 | config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); | ||
| 154 | config |= MLX4_EN_WOL_DO_MODIFY; | ||
| 155 | } | ||
| 156 | |||
| 157 | err = mlx4_wol_write(priv->mdev->dev, config, priv->port); | ||
| 158 | if (err) | ||
| 159 | en_err(priv, "Failed to set WoL information\n"); | ||
| 160 | |||
| 161 | return err; | ||
| 162 | } | ||
| 163 | |||
| 164 | static int mlx4_en_get_sset_count(struct net_device *dev, int sset) | ||
| 165 | { | ||
| 166 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 167 | |||
| 168 | switch (sset) { | ||
| 169 | case ETH_SS_STATS: | ||
| 170 | return NUM_ALL_STATS + | ||
| 171 | (priv->tx_ring_num + priv->rx_ring_num) * 2; | ||
| 172 | case ETH_SS_TEST: | ||
| 173 | return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags | ||
| 174 | & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; | ||
| 175 | default: | ||
| 176 | return -EOPNOTSUPP; | ||
| 177 | } | ||
| 178 | } | ||
| 179 | |||
| 180 | static void mlx4_en_get_ethtool_stats(struct net_device *dev, | ||
| 181 | struct ethtool_stats *stats, uint64_t *data) | ||
| 182 | { | ||
| 183 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 184 | int index = 0; | ||
| 185 | int i; | ||
| 186 | |||
| 187 | spin_lock_bh(&priv->stats_lock); | ||
| 188 | |||
| 189 | for (i = 0; i < NUM_MAIN_STATS; i++) | ||
| 190 | data[index++] = ((unsigned long *) &priv->stats)[i]; | ||
| 191 | for (i = 0; i < NUM_PORT_STATS; i++) | ||
| 192 | data[index++] = ((unsigned long *) &priv->port_stats)[i]; | ||
| 193 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 194 | data[index++] = priv->tx_ring[i].packets; | ||
| 195 | data[index++] = priv->tx_ring[i].bytes; | ||
| 196 | } | ||
| 197 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 198 | data[index++] = priv->rx_ring[i].packets; | ||
| 199 | data[index++] = priv->rx_ring[i].bytes; | ||
| 200 | } | ||
| 201 | for (i = 0; i < NUM_PKT_STATS; i++) | ||
| 202 | data[index++] = ((unsigned long *) &priv->pkstats)[i]; | ||
| 203 | spin_unlock_bh(&priv->stats_lock); | ||
| 204 | |||
| 205 | } | ||
| 206 | |||
| 207 | static void mlx4_en_self_test(struct net_device *dev, | ||
| 208 | struct ethtool_test *etest, u64 *buf) | ||
| 209 | { | ||
| 210 | mlx4_en_ex_selftest(dev, &etest->flags, buf); | ||
| 211 | } | ||
| 212 | |||
| 213 | static void mlx4_en_get_strings(struct net_device *dev, | ||
| 214 | uint32_t stringset, uint8_t *data) | ||
| 215 | { | ||
| 216 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 217 | int index = 0; | ||
| 218 | int i; | ||
| 219 | |||
| 220 | switch (stringset) { | ||
| 221 | case ETH_SS_TEST: | ||
| 222 | for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) | ||
| 223 | strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); | ||
| 224 | if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) | ||
| 225 | for (; i < MLX4_EN_NUM_SELF_TEST; i++) | ||
| 226 | strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); | ||
| 227 | break; | ||
| 228 | |||
| 229 | case ETH_SS_STATS: | ||
| 230 | /* Add main counters */ | ||
| 231 | for (i = 0; i < NUM_MAIN_STATS; i++) | ||
| 232 | strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); | ||
| 233 | for (i = 0; i< NUM_PORT_STATS; i++) | ||
| 234 | strcpy(data + (index++) * ETH_GSTRING_LEN, | ||
| 235 | main_strings[i + NUM_MAIN_STATS]); | ||
| 236 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 237 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
| 238 | "tx%d_packets", i); | ||
| 239 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
| 240 | "tx%d_bytes", i); | ||
| 241 | } | ||
| 242 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 243 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
| 244 | "rx%d_packets", i); | ||
| 245 | sprintf(data + (index++) * ETH_GSTRING_LEN, | ||
| 246 | "rx%d_bytes", i); | ||
| 247 | } | ||
| 248 | for (i = 0; i< NUM_PKT_STATS; i++) | ||
| 249 | strcpy(data + (index++) * ETH_GSTRING_LEN, | ||
| 250 | main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); | ||
| 251 | break; | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 256 | { | ||
| 257 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 258 | int trans_type; | ||
| 259 | |||
| 260 | cmd->autoneg = AUTONEG_DISABLE; | ||
| 261 | cmd->supported = SUPPORTED_10000baseT_Full; | ||
| 262 | cmd->advertising = ADVERTISED_10000baseT_Full; | ||
| 263 | |||
| 264 | if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) | ||
| 265 | return -ENOMEM; | ||
| 266 | |||
| 267 | trans_type = priv->port_state.transciver; | ||
| 268 | if (netif_carrier_ok(dev)) { | ||
| 269 | ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); | ||
| 270 | cmd->duplex = DUPLEX_FULL; | ||
| 271 | } else { | ||
| 272 | ethtool_cmd_speed_set(cmd, -1); | ||
| 273 | cmd->duplex = -1; | ||
| 274 | } | ||
| 275 | |||
| 276 | if (trans_type > 0 && trans_type <= 0xC) { | ||
| 277 | cmd->port = PORT_FIBRE; | ||
| 278 | cmd->transceiver = XCVR_EXTERNAL; | ||
| 279 | cmd->supported |= SUPPORTED_FIBRE; | ||
| 280 | cmd->advertising |= ADVERTISED_FIBRE; | ||
| 281 | } else if (trans_type == 0x80 || trans_type == 0) { | ||
| 282 | cmd->port = PORT_TP; | ||
| 283 | cmd->transceiver = XCVR_INTERNAL; | ||
| 284 | cmd->supported |= SUPPORTED_TP; | ||
| 285 | cmd->advertising |= ADVERTISED_TP; | ||
| 286 | } else { | ||
| 287 | cmd->port = -1; | ||
| 288 | cmd->transceiver = -1; | ||
| 289 | } | ||
| 290 | return 0; | ||
| 291 | } | ||
| 292 | |||
| 293 | static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 294 | { | ||
| 295 | if ((cmd->autoneg == AUTONEG_ENABLE) || | ||
| 296 | (ethtool_cmd_speed(cmd) != SPEED_10000) || | ||
| 297 | (cmd->duplex != DUPLEX_FULL)) | ||
| 298 | return -EINVAL; | ||
| 299 | |||
| 300 | /* Nothing to change */ | ||
| 301 | return 0; | ||
| 302 | } | ||
| 303 | |||
| 304 | static int mlx4_en_get_coalesce(struct net_device *dev, | ||
| 305 | struct ethtool_coalesce *coal) | ||
| 306 | { | ||
| 307 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 308 | |||
| 309 | coal->tx_coalesce_usecs = 0; | ||
| 310 | coal->tx_max_coalesced_frames = 0; | ||
| 311 | coal->rx_coalesce_usecs = priv->rx_usecs; | ||
| 312 | coal->rx_max_coalesced_frames = priv->rx_frames; | ||
| 313 | |||
| 314 | coal->pkt_rate_low = priv->pkt_rate_low; | ||
| 315 | coal->rx_coalesce_usecs_low = priv->rx_usecs_low; | ||
| 316 | coal->pkt_rate_high = priv->pkt_rate_high; | ||
| 317 | coal->rx_coalesce_usecs_high = priv->rx_usecs_high; | ||
| 318 | coal->rate_sample_interval = priv->sample_interval; | ||
| 319 | coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; | ||
| 320 | return 0; | ||
| 321 | } | ||
| 322 | |||
| 323 | static int mlx4_en_set_coalesce(struct net_device *dev, | ||
| 324 | struct ethtool_coalesce *coal) | ||
| 325 | { | ||
| 326 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 327 | int err, i; | ||
| 328 | |||
| 329 | priv->rx_frames = (coal->rx_max_coalesced_frames == | ||
| 330 | MLX4_EN_AUTO_CONF) ? | ||
| 331 | MLX4_EN_RX_COAL_TARGET : | ||
| 332 | coal->rx_max_coalesced_frames; | ||
| 333 | priv->rx_usecs = (coal->rx_coalesce_usecs == | ||
| 334 | MLX4_EN_AUTO_CONF) ? | ||
| 335 | MLX4_EN_RX_COAL_TIME : | ||
| 336 | coal->rx_coalesce_usecs; | ||
| 337 | |||
| 338 | /* Set adaptive coalescing params */ | ||
| 339 | priv->pkt_rate_low = coal->pkt_rate_low; | ||
| 340 | priv->rx_usecs_low = coal->rx_coalesce_usecs_low; | ||
| 341 | priv->pkt_rate_high = coal->pkt_rate_high; | ||
| 342 | priv->rx_usecs_high = coal->rx_coalesce_usecs_high; | ||
| 343 | priv->sample_interval = coal->rate_sample_interval; | ||
| 344 | priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; | ||
| 345 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
| 346 | if (priv->adaptive_rx_coal) | ||
| 347 | return 0; | ||
| 348 | |||
| 349 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 350 | priv->rx_cq[i].moder_cnt = priv->rx_frames; | ||
| 351 | priv->rx_cq[i].moder_time = priv->rx_usecs; | ||
| 352 | err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); | ||
| 353 | if (err) | ||
| 354 | return err; | ||
| 355 | } | ||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | |||
| 359 | static int mlx4_en_set_pauseparam(struct net_device *dev, | ||
| 360 | struct ethtool_pauseparam *pause) | ||
| 361 | { | ||
| 362 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 363 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 364 | int err; | ||
| 365 | |||
| 366 | priv->prof->tx_pause = pause->tx_pause != 0; | ||
| 367 | priv->prof->rx_pause = pause->rx_pause != 0; | ||
| 368 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
| 369 | priv->rx_skb_size + ETH_FCS_LEN, | ||
| 370 | priv->prof->tx_pause, | ||
| 371 | priv->prof->tx_ppp, | ||
| 372 | priv->prof->rx_pause, | ||
| 373 | priv->prof->rx_ppp); | ||
| 374 | if (err) | ||
| 375 | en_err(priv, "Failed setting pause params\n"); | ||
| 376 | |||
| 377 | return err; | ||
| 378 | } | ||
| 379 | |||
| 380 | static void mlx4_en_get_pauseparam(struct net_device *dev, | ||
| 381 | struct ethtool_pauseparam *pause) | ||
| 382 | { | ||
| 383 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 384 | |||
| 385 | pause->tx_pause = priv->prof->tx_pause; | ||
| 386 | pause->rx_pause = priv->prof->rx_pause; | ||
| 387 | } | ||
| 388 | |||
| 389 | static int mlx4_en_set_ringparam(struct net_device *dev, | ||
| 390 | struct ethtool_ringparam *param) | ||
| 391 | { | ||
| 392 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 393 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 394 | u32 rx_size, tx_size; | ||
| 395 | int port_up = 0; | ||
| 396 | int err = 0; | ||
| 397 | |||
| 398 | if (param->rx_jumbo_pending || param->rx_mini_pending) | ||
| 399 | return -EINVAL; | ||
| 400 | |||
| 401 | rx_size = roundup_pow_of_two(param->rx_pending); | ||
| 402 | rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); | ||
| 403 | rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); | ||
| 404 | tx_size = roundup_pow_of_two(param->tx_pending); | ||
| 405 | tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); | ||
| 406 | tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); | ||
| 407 | |||
| 408 | if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size : | ||
| 409 | priv->rx_ring[0].size) && | ||
| 410 | tx_size == priv->tx_ring[0].size) | ||
| 411 | return 0; | ||
| 412 | |||
| 413 | mutex_lock(&mdev->state_lock); | ||
| 414 | if (priv->port_up) { | ||
| 415 | port_up = 1; | ||
| 416 | mlx4_en_stop_port(dev); | ||
| 417 | } | ||
| 418 | |||
| 419 | mlx4_en_free_resources(priv, true); | ||
| 420 | |||
| 421 | priv->prof->tx_ring_size = tx_size; | ||
| 422 | priv->prof->rx_ring_size = rx_size; | ||
| 423 | |||
| 424 | err = mlx4_en_alloc_resources(priv); | ||
| 425 | if (err) { | ||
| 426 | en_err(priv, "Failed reallocating port resources\n"); | ||
| 427 | goto out; | ||
| 428 | } | ||
| 429 | if (port_up) { | ||
| 430 | err = mlx4_en_start_port(dev); | ||
| 431 | if (err) | ||
| 432 | en_err(priv, "Failed starting port\n"); | ||
| 433 | } | ||
| 434 | |||
| 435 | out: | ||
| 436 | mutex_unlock(&mdev->state_lock); | ||
| 437 | return err; | ||
| 438 | } | ||
| 439 | |||
| 440 | static void mlx4_en_get_ringparam(struct net_device *dev, | ||
| 441 | struct ethtool_ringparam *param) | ||
| 442 | { | ||
| 443 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 444 | |||
| 445 | memset(param, 0, sizeof(*param)); | ||
| 446 | param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; | ||
| 447 | param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; | ||
| 448 | param->rx_pending = priv->port_up ? | ||
| 449 | priv->rx_ring[0].actual_size : priv->rx_ring[0].size; | ||
| 450 | param->tx_pending = priv->tx_ring[0].size; | ||
| 451 | } | ||
| 452 | |||
| 453 | const struct ethtool_ops mlx4_en_ethtool_ops = { | ||
| 454 | .get_drvinfo = mlx4_en_get_drvinfo, | ||
| 455 | .get_settings = mlx4_en_get_settings, | ||
| 456 | .set_settings = mlx4_en_set_settings, | ||
| 457 | .get_link = ethtool_op_get_link, | ||
| 458 | .get_strings = mlx4_en_get_strings, | ||
| 459 | .get_sset_count = mlx4_en_get_sset_count, | ||
| 460 | .get_ethtool_stats = mlx4_en_get_ethtool_stats, | ||
| 461 | .self_test = mlx4_en_self_test, | ||
| 462 | .get_wol = mlx4_en_get_wol, | ||
| 463 | .set_wol = mlx4_en_set_wol, | ||
| 464 | .get_msglevel = mlx4_en_get_msglevel, | ||
| 465 | .set_msglevel = mlx4_en_set_msglevel, | ||
| 466 | .get_coalesce = mlx4_en_get_coalesce, | ||
| 467 | .set_coalesce = mlx4_en_set_coalesce, | ||
| 468 | .get_pauseparam = mlx4_en_get_pauseparam, | ||
| 469 | .set_pauseparam = mlx4_en_set_pauseparam, | ||
| 470 | .get_ringparam = mlx4_en_get_ringparam, | ||
| 471 | .set_ringparam = mlx4_en_set_ringparam, | ||
| 472 | }; | ||
| 473 | |||
| 474 | |||
| 475 | |||
| 476 | |||
| 477 | |||
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c new file mode 100644 index 00000000000..6bfea233a9f --- /dev/null +++ b/drivers/net/mlx4/en_main.c | |||
| @@ -0,0 +1,315 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/cpumask.h> | ||
| 35 | #include <linux/module.h> | ||
| 36 | #include <linux/delay.h> | ||
| 37 | #include <linux/netdevice.h> | ||
| 38 | #include <linux/slab.h> | ||
| 39 | |||
| 40 | #include <linux/mlx4/driver.h> | ||
| 41 | #include <linux/mlx4/device.h> | ||
| 42 | #include <linux/mlx4/cmd.h> | ||
| 43 | |||
| 44 | #include "mlx4_en.h" | ||
| 45 | |||
| 46 | MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin"); | ||
| 47 | MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver"); | ||
| 48 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 49 | MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")"); | ||
| 50 | |||
| 51 | static const char mlx4_en_version[] = | ||
| 52 | DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" | ||
| 53 | DRV_VERSION " (" DRV_RELDATE ")\n"; | ||
| 54 | |||
| 55 | #define MLX4_EN_PARM_INT(X, def_val, desc) \ | ||
| 56 | static unsigned int X = def_val;\ | ||
| 57 | module_param(X , uint, 0444); \ | ||
| 58 | MODULE_PARM_DESC(X, desc); | ||
| 59 | |||
| 60 | |||
| 61 | /* | ||
| 62 | * Device scope module parameters | ||
| 63 | */ | ||
| 64 | |||
| 65 | |||
| 66 | /* Enable RSS TCP traffic */ | ||
| 67 | MLX4_EN_PARM_INT(tcp_rss, 1, | ||
| 68 | "Enable RSS for incomming TCP traffic or disabled (0)"); | ||
| 69 | /* Enable RSS UDP traffic */ | ||
| 70 | MLX4_EN_PARM_INT(udp_rss, 1, | ||
| 71 | "Enable RSS for incomming UDP traffic or disabled (0)"); | ||
| 72 | |||
| 73 | /* Priority pausing */ | ||
| 74 | MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." | ||
| 75 | " Per priority bit mask"); | ||
| 76 | MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." | ||
| 77 | " Per priority bit mask"); | ||
| 78 | |||
| 79 | int en_print(const char *level, const struct mlx4_en_priv *priv, | ||
| 80 | const char *format, ...) | ||
| 81 | { | ||
| 82 | va_list args; | ||
| 83 | struct va_format vaf; | ||
| 84 | int i; | ||
| 85 | |||
| 86 | va_start(args, format); | ||
| 87 | |||
| 88 | vaf.fmt = format; | ||
| 89 | vaf.va = &args; | ||
| 90 | if (priv->registered) | ||
| 91 | i = printk("%s%s: %s: %pV", | ||
| 92 | level, DRV_NAME, priv->dev->name, &vaf); | ||
| 93 | else | ||
| 94 | i = printk("%s%s: %s: Port %d: %pV", | ||
| 95 | level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), | ||
| 96 | priv->port, &vaf); | ||
| 97 | va_end(args); | ||
| 98 | |||
| 99 | return i; | ||
| 100 | } | ||
| 101 | |||
| 102 | static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | ||
| 103 | { | ||
| 104 | struct mlx4_en_profile *params = &mdev->profile; | ||
| 105 | int i; | ||
| 106 | |||
| 107 | params->tcp_rss = tcp_rss; | ||
| 108 | params->udp_rss = udp_rss; | ||
| 109 | if (params->udp_rss && !(mdev->dev->caps.flags | ||
| 110 | & MLX4_DEV_CAP_FLAG_UDP_RSS)) { | ||
| 111 | mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); | ||
| 112 | params->udp_rss = 0; | ||
| 113 | } | ||
| 114 | for (i = 1; i <= MLX4_MAX_PORTS; i++) { | ||
| 115 | params->prof[i].rx_pause = 1; | ||
| 116 | params->prof[i].rx_ppp = pfcrx; | ||
| 117 | params->prof[i].tx_pause = 1; | ||
| 118 | params->prof[i].tx_ppp = pfctx; | ||
| 119 | params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; | ||
| 120 | params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; | ||
| 121 | params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + | ||
| 122 | (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; | ||
| 123 | } | ||
| 124 | |||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | |||
| 128 | static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) | ||
| 129 | { | ||
| 130 | struct mlx4_en_dev *endev = ctx; | ||
| 131 | |||
| 132 | return endev->pndev[port]; | ||
| 133 | } | ||
| 134 | |||
| 135 | static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, | ||
| 136 | enum mlx4_dev_event event, int port) | ||
| 137 | { | ||
| 138 | struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; | ||
| 139 | struct mlx4_en_priv *priv; | ||
| 140 | |||
| 141 | if (!mdev->pndev[port]) | ||
| 142 | return; | ||
| 143 | |||
| 144 | priv = netdev_priv(mdev->pndev[port]); | ||
| 145 | switch (event) { | ||
| 146 | case MLX4_DEV_EVENT_PORT_UP: | ||
| 147 | case MLX4_DEV_EVENT_PORT_DOWN: | ||
| 148 | /* To prevent races, we poll the link state in a separate | ||
| 149 | task rather than changing it here */ | ||
| 150 | priv->link_state = event; | ||
| 151 | queue_work(mdev->workqueue, &priv->linkstate_task); | ||
| 152 | break; | ||
| 153 | |||
| 154 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: | ||
| 155 | mlx4_err(mdev, "Internal error detected, restarting device\n"); | ||
| 156 | break; | ||
| 157 | |||
| 158 | default: | ||
| 159 | mlx4_warn(mdev, "Unhandled event: %d\n", event); | ||
| 160 | } | ||
| 161 | } | ||
| 162 | |||
| 163 | static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) | ||
| 164 | { | ||
| 165 | struct mlx4_en_dev *mdev = endev_ptr; | ||
| 166 | int i; | ||
| 167 | |||
| 168 | mutex_lock(&mdev->state_lock); | ||
| 169 | mdev->device_up = false; | ||
| 170 | mutex_unlock(&mdev->state_lock); | ||
| 171 | |||
| 172 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | ||
| 173 | if (mdev->pndev[i]) | ||
| 174 | mlx4_en_destroy_netdev(mdev->pndev[i]); | ||
| 175 | |||
| 176 | flush_workqueue(mdev->workqueue); | ||
| 177 | destroy_workqueue(mdev->workqueue); | ||
| 178 | mlx4_mr_free(dev, &mdev->mr); | ||
| 179 | mlx4_uar_free(dev, &mdev->priv_uar); | ||
| 180 | mlx4_pd_free(dev, mdev->priv_pdn); | ||
| 181 | kfree(mdev); | ||
| 182 | } | ||
| 183 | |||
| 184 | static void *mlx4_en_add(struct mlx4_dev *dev) | ||
| 185 | { | ||
| 186 | struct mlx4_en_dev *mdev; | ||
| 187 | int i; | ||
| 188 | int err; | ||
| 189 | |||
| 190 | printk_once(KERN_INFO "%s", mlx4_en_version); | ||
| 191 | |||
| 192 | mdev = kzalloc(sizeof *mdev, GFP_KERNEL); | ||
| 193 | if (!mdev) { | ||
| 194 | dev_err(&dev->pdev->dev, "Device struct alloc failed, " | ||
| 195 | "aborting.\n"); | ||
| 196 | err = -ENOMEM; | ||
| 197 | goto err_free_res; | ||
| 198 | } | ||
| 199 | |||
| 200 | if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) | ||
| 201 | goto err_free_dev; | ||
| 202 | |||
| 203 | if (mlx4_uar_alloc(dev, &mdev->priv_uar)) | ||
| 204 | goto err_pd; | ||
| 205 | |||
| 206 | mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, | ||
| 207 | PAGE_SIZE); | ||
| 208 | if (!mdev->uar_map) | ||
| 209 | goto err_uar; | ||
| 210 | spin_lock_init(&mdev->uar_lock); | ||
| 211 | |||
| 212 | mdev->dev = dev; | ||
| 213 | mdev->dma_device = &(dev->pdev->dev); | ||
| 214 | mdev->pdev = dev->pdev; | ||
| 215 | mdev->device_up = false; | ||
| 216 | |||
| 217 | mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); | ||
| 218 | if (!mdev->LSO_support) | ||
| 219 | mlx4_warn(mdev, "LSO not supported, please upgrade to later " | ||
| 220 | "FW version to enable LSO\n"); | ||
| 221 | |||
| 222 | if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, | ||
| 223 | MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, | ||
| 224 | 0, 0, &mdev->mr)) { | ||
| 225 | mlx4_err(mdev, "Failed allocating memory region\n"); | ||
| 226 | goto err_uar; | ||
| 227 | } | ||
| 228 | if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { | ||
| 229 | mlx4_err(mdev, "Failed enabling memory region\n"); | ||
| 230 | goto err_mr; | ||
| 231 | } | ||
| 232 | |||
| 233 | /* Build device profile according to supplied module parameters */ | ||
| 234 | err = mlx4_en_get_profile(mdev); | ||
| 235 | if (err) { | ||
| 236 | mlx4_err(mdev, "Bad module parameters, aborting.\n"); | ||
| 237 | goto err_mr; | ||
| 238 | } | ||
| 239 | |||
| 240 | /* Configure which ports to start according to module parameters */ | ||
| 241 | mdev->port_cnt = 0; | ||
| 242 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | ||
| 243 | mdev->port_cnt++; | ||
| 244 | |||
| 245 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
| 246 | if (!dev->caps.comp_pool) { | ||
| 247 | mdev->profile.prof[i].rx_ring_num = | ||
| 248 | rounddown_pow_of_two(max_t(int, MIN_RX_RINGS, | ||
| 249 | min_t(int, | ||
| 250 | dev->caps.num_comp_vectors, | ||
| 251 | MAX_RX_RINGS))); | ||
| 252 | } else { | ||
| 253 | mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two( | ||
| 254 | min_t(int, dev->caps.comp_pool/ | ||
| 255 | dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1)); | ||
| 256 | } | ||
| 257 | } | ||
| 258 | |||
| 259 | /* Create our own workqueue for reset/multicast tasks | ||
| 260 | * Note: we cannot use the shared workqueue because of deadlocks caused | ||
| 261 | * by the rtnl lock */ | ||
| 262 | mdev->workqueue = create_singlethread_workqueue("mlx4_en"); | ||
| 263 | if (!mdev->workqueue) { | ||
| 264 | err = -ENOMEM; | ||
| 265 | goto err_mr; | ||
| 266 | } | ||
| 267 | |||
| 268 | /* At this stage all non-port specific tasks are complete: | ||
| 269 | * mark the card state as up */ | ||
| 270 | mutex_init(&mdev->state_lock); | ||
| 271 | mdev->device_up = true; | ||
| 272 | |||
| 273 | /* Setup ports */ | ||
| 274 | |||
| 275 | /* Create a netdev for each port */ | ||
| 276 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | ||
| 277 | mlx4_info(mdev, "Activating port:%d\n", i); | ||
| 278 | if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) | ||
| 279 | mdev->pndev[i] = NULL; | ||
| 280 | } | ||
| 281 | return mdev; | ||
| 282 | |||
| 283 | err_mr: | ||
| 284 | mlx4_mr_free(dev, &mdev->mr); | ||
| 285 | err_uar: | ||
| 286 | mlx4_uar_free(dev, &mdev->priv_uar); | ||
| 287 | err_pd: | ||
| 288 | mlx4_pd_free(dev, mdev->priv_pdn); | ||
| 289 | err_free_dev: | ||
| 290 | kfree(mdev); | ||
| 291 | err_free_res: | ||
| 292 | return NULL; | ||
| 293 | } | ||
| 294 | |||
| 295 | static struct mlx4_interface mlx4_en_interface = { | ||
| 296 | .add = mlx4_en_add, | ||
| 297 | .remove = mlx4_en_remove, | ||
| 298 | .event = mlx4_en_event, | ||
| 299 | .get_dev = mlx4_en_get_netdev, | ||
| 300 | .protocol = MLX4_PROT_ETH, | ||
| 301 | }; | ||
| 302 | |||
| 303 | static int __init mlx4_en_init(void) | ||
| 304 | { | ||
| 305 | return mlx4_register_interface(&mlx4_en_interface); | ||
| 306 | } | ||
| 307 | |||
| 308 | static void __exit mlx4_en_cleanup(void) | ||
| 309 | { | ||
| 310 | mlx4_unregister_interface(&mlx4_en_interface); | ||
| 311 | } | ||
| 312 | |||
| 313 | module_init(mlx4_en_init); | ||
| 314 | module_exit(mlx4_en_cleanup); | ||
| 315 | |||
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c new file mode 100644 index 00000000000..4b0f32e568f --- /dev/null +++ b/drivers/net/mlx4/en_netdev.c | |||
| @@ -0,0 +1,1166 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/etherdevice.h> | ||
| 35 | #include <linux/tcp.h> | ||
| 36 | #include <linux/if_vlan.h> | ||
| 37 | #include <linux/delay.h> | ||
| 38 | #include <linux/slab.h> | ||
| 39 | |||
| 40 | #include <linux/mlx4/driver.h> | ||
| 41 | #include <linux/mlx4/device.h> | ||
| 42 | #include <linux/mlx4/cmd.h> | ||
| 43 | #include <linux/mlx4/cq.h> | ||
| 44 | |||
| 45 | #include "mlx4_en.h" | ||
| 46 | #include "en_port.h" | ||
| 47 | |||
| 48 | static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | ||
| 49 | { | ||
| 50 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 51 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 52 | int err; | ||
| 53 | int idx; | ||
| 54 | |||
| 55 | en_dbg(HW, priv, "adding VLAN:%d\n", vid); | ||
| 56 | |||
| 57 | set_bit(vid, priv->active_vlans); | ||
| 58 | |||
| 59 | /* Add VID to port VLAN filter */ | ||
| 60 | mutex_lock(&mdev->state_lock); | ||
| 61 | if (mdev->device_up && priv->port_up) { | ||
| 62 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
| 63 | if (err) | ||
| 64 | en_err(priv, "Failed configuring VLAN filter\n"); | ||
| 65 | } | ||
| 66 | if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) | ||
| 67 | en_err(priv, "failed adding vlan %d\n", vid); | ||
| 68 | mutex_unlock(&mdev->state_lock); | ||
| 69 | |||
| 70 | } | ||
| 71 | |||
| 72 | static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
| 73 | { | ||
| 74 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 75 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 76 | int err; | ||
| 77 | int idx; | ||
| 78 | |||
| 79 | en_dbg(HW, priv, "Killing VID:%d\n", vid); | ||
| 80 | |||
| 81 | clear_bit(vid, priv->active_vlans); | ||
| 82 | |||
| 83 | /* Remove VID from port VLAN filter */ | ||
| 84 | mutex_lock(&mdev->state_lock); | ||
| 85 | if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) | ||
| 86 | mlx4_unregister_vlan(mdev->dev, priv->port, idx); | ||
| 87 | else | ||
| 88 | en_err(priv, "could not find vid %d in cache\n", vid); | ||
| 89 | |||
| 90 | if (mdev->device_up && priv->port_up) { | ||
| 91 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
| 92 | if (err) | ||
| 93 | en_err(priv, "Failed configuring VLAN filter\n"); | ||
| 94 | } | ||
| 95 | mutex_unlock(&mdev->state_lock); | ||
| 96 | } | ||
| 97 | |||
| 98 | u64 mlx4_en_mac_to_u64(u8 *addr) | ||
| 99 | { | ||
| 100 | u64 mac = 0; | ||
| 101 | int i; | ||
| 102 | |||
| 103 | for (i = 0; i < ETH_ALEN; i++) { | ||
| 104 | mac <<= 8; | ||
| 105 | mac |= addr[i]; | ||
| 106 | } | ||
| 107 | return mac; | ||
| 108 | } | ||
| 109 | |||
| 110 | static int mlx4_en_set_mac(struct net_device *dev, void *addr) | ||
| 111 | { | ||
| 112 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 113 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 114 | struct sockaddr *saddr = addr; | ||
| 115 | |||
| 116 | if (!is_valid_ether_addr(saddr->sa_data)) | ||
| 117 | return -EADDRNOTAVAIL; | ||
| 118 | |||
| 119 | memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); | ||
| 120 | priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); | ||
| 121 | queue_work(mdev->workqueue, &priv->mac_task); | ||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | static void mlx4_en_do_set_mac(struct work_struct *work) | ||
| 126 | { | ||
| 127 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
| 128 | mac_task); | ||
| 129 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 130 | int err = 0; | ||
| 131 | |||
| 132 | mutex_lock(&mdev->state_lock); | ||
| 133 | if (priv->port_up) { | ||
| 134 | /* Remove old MAC and insert the new one */ | ||
| 135 | err = mlx4_replace_mac(mdev->dev, priv->port, | ||
| 136 | priv->base_qpn, priv->mac, 0); | ||
| 137 | if (err) | ||
| 138 | en_err(priv, "Failed changing HW MAC address\n"); | ||
| 139 | } else | ||
| 140 | en_dbg(HW, priv, "Port is down while " | ||
| 141 | "registering mac, exiting...\n"); | ||
| 142 | |||
| 143 | mutex_unlock(&mdev->state_lock); | ||
| 144 | } | ||
| 145 | |||
| 146 | static void mlx4_en_clear_list(struct net_device *dev) | ||
| 147 | { | ||
| 148 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 149 | |||
| 150 | kfree(priv->mc_addrs); | ||
| 151 | priv->mc_addrs_cnt = 0; | ||
| 152 | } | ||
| 153 | |||
| 154 | static void mlx4_en_cache_mclist(struct net_device *dev) | ||
| 155 | { | ||
| 156 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 157 | struct netdev_hw_addr *ha; | ||
| 158 | char *mc_addrs; | ||
| 159 | int mc_addrs_cnt = netdev_mc_count(dev); | ||
| 160 | int i; | ||
| 161 | |||
| 162 | mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); | ||
| 163 | if (!mc_addrs) { | ||
| 164 | en_err(priv, "failed to allocate multicast list\n"); | ||
| 165 | return; | ||
| 166 | } | ||
| 167 | i = 0; | ||
| 168 | netdev_for_each_mc_addr(ha, dev) | ||
| 169 | memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); | ||
| 170 | priv->mc_addrs = mc_addrs; | ||
| 171 | priv->mc_addrs_cnt = mc_addrs_cnt; | ||
| 172 | } | ||
| 173 | |||
| 174 | |||
| 175 | static void mlx4_en_set_multicast(struct net_device *dev) | ||
| 176 | { | ||
| 177 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 178 | |||
| 179 | if (!priv->port_up) | ||
| 180 | return; | ||
| 181 | |||
| 182 | queue_work(priv->mdev->workqueue, &priv->mcast_task); | ||
| 183 | } | ||
| 184 | |||
| 185 | static void mlx4_en_do_set_multicast(struct work_struct *work) | ||
| 186 | { | ||
| 187 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
| 188 | mcast_task); | ||
| 189 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 190 | struct net_device *dev = priv->dev; | ||
| 191 | u64 mcast_addr = 0; | ||
| 192 | u8 mc_list[16] = {0}; | ||
| 193 | int err; | ||
| 194 | |||
| 195 | mutex_lock(&mdev->state_lock); | ||
| 196 | if (!mdev->device_up) { | ||
| 197 | en_dbg(HW, priv, "Card is not up, " | ||
| 198 | "ignoring multicast change.\n"); | ||
| 199 | goto out; | ||
| 200 | } | ||
| 201 | if (!priv->port_up) { | ||
| 202 | en_dbg(HW, priv, "Port is down, " | ||
| 203 | "ignoring multicast change.\n"); | ||
| 204 | goto out; | ||
| 205 | } | ||
| 206 | |||
| 207 | /* | ||
| 208 | * Promsicuous mode: disable all filters | ||
| 209 | */ | ||
| 210 | |||
| 211 | if (dev->flags & IFF_PROMISC) { | ||
| 212 | if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { | ||
| 213 | if (netif_msg_rx_status(priv)) | ||
| 214 | en_warn(priv, "Entering promiscuous mode\n"); | ||
| 215 | priv->flags |= MLX4_EN_FLAG_PROMISC; | ||
| 216 | |||
| 217 | /* Enable promiscouos mode */ | ||
| 218 | if (!(mdev->dev->caps.flags & | ||
| 219 | MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) | ||
| 220 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
| 221 | priv->base_qpn, 1); | ||
| 222 | else | ||
| 223 | err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, | ||
| 224 | priv->port); | ||
| 225 | if (err) | ||
| 226 | en_err(priv, "Failed enabling " | ||
| 227 | "promiscuous mode\n"); | ||
| 228 | |||
| 229 | /* Disable port multicast filter (unconditionally) */ | ||
| 230 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
| 231 | 0, MLX4_MCAST_DISABLE); | ||
| 232 | if (err) | ||
| 233 | en_err(priv, "Failed disabling " | ||
| 234 | "multicast filter\n"); | ||
| 235 | |||
| 236 | /* Add the default qp number as multicast promisc */ | ||
| 237 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { | ||
| 238 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, | ||
| 239 | priv->port); | ||
| 240 | if (err) | ||
| 241 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
| 242 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
| 243 | } | ||
| 244 | |||
| 245 | /* Disable port VLAN filter */ | ||
| 246 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
| 247 | if (err) | ||
| 248 | en_err(priv, "Failed disabling VLAN filter\n"); | ||
| 249 | } | ||
| 250 | goto out; | ||
| 251 | } | ||
| 252 | |||
| 253 | /* | ||
| 254 | * Not in promiscuous mode | ||
| 255 | */ | ||
| 256 | |||
| 257 | if (priv->flags & MLX4_EN_FLAG_PROMISC) { | ||
| 258 | if (netif_msg_rx_status(priv)) | ||
| 259 | en_warn(priv, "Leaving promiscuous mode\n"); | ||
| 260 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | ||
| 261 | |||
| 262 | /* Disable promiscouos mode */ | ||
| 263 | if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) | ||
| 264 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
| 265 | priv->base_qpn, 0); | ||
| 266 | else | ||
| 267 | err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
| 268 | priv->port); | ||
| 269 | if (err) | ||
| 270 | en_err(priv, "Failed disabling promiscuous mode\n"); | ||
| 271 | |||
| 272 | /* Disable Multicast promisc */ | ||
| 273 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
| 274 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
| 275 | priv->port); | ||
| 276 | if (err) | ||
| 277 | en_err(priv, "Failed disabling multicast promiscuous mode\n"); | ||
| 278 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* Enable port VLAN filter */ | ||
| 282 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
| 283 | if (err) | ||
| 284 | en_err(priv, "Failed enabling VLAN filter\n"); | ||
| 285 | } | ||
| 286 | |||
| 287 | /* Enable/disable the multicast filter according to IFF_ALLMULTI */ | ||
| 288 | if (dev->flags & IFF_ALLMULTI) { | ||
| 289 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
| 290 | 0, MLX4_MCAST_DISABLE); | ||
| 291 | if (err) | ||
| 292 | en_err(priv, "Failed disabling multicast filter\n"); | ||
| 293 | |||
| 294 | /* Add the default qp number as multicast promisc */ | ||
| 295 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { | ||
| 296 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, | ||
| 297 | priv->port); | ||
| 298 | if (err) | ||
| 299 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
| 300 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
| 301 | } | ||
| 302 | } else { | ||
| 303 | int i; | ||
| 304 | /* Disable Multicast promisc */ | ||
| 305 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
| 306 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
| 307 | priv->port); | ||
| 308 | if (err) | ||
| 309 | en_err(priv, "Failed disabling multicast promiscuous mode\n"); | ||
| 310 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
| 311 | } | ||
| 312 | |||
| 313 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
| 314 | 0, MLX4_MCAST_DISABLE); | ||
| 315 | if (err) | ||
| 316 | en_err(priv, "Failed disabling multicast filter\n"); | ||
| 317 | |||
| 318 | /* Detach our qp from all the multicast addresses */ | ||
| 319 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
| 320 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
| 321 | mc_list[5] = priv->port; | ||
| 322 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
| 323 | mc_list, MLX4_PROT_ETH); | ||
| 324 | } | ||
| 325 | /* Flush mcast filter and init it with broadcast address */ | ||
| 326 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | ||
| 327 | 1, MLX4_MCAST_CONFIG); | ||
| 328 | |||
| 329 | /* Update multicast list - we cache all addresses so they won't | ||
| 330 | * change while HW is updated holding the command semaphor */ | ||
| 331 | netif_tx_lock_bh(dev); | ||
| 332 | mlx4_en_cache_mclist(dev); | ||
| 333 | netif_tx_unlock_bh(dev); | ||
| 334 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
| 335 | mcast_addr = | ||
| 336 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); | ||
| 337 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
| 338 | mc_list[5] = priv->port; | ||
| 339 | mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, | ||
| 340 | mc_list, 0, MLX4_PROT_ETH); | ||
| 341 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | ||
| 342 | mcast_addr, 0, MLX4_MCAST_CONFIG); | ||
| 343 | } | ||
| 344 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
| 345 | 0, MLX4_MCAST_ENABLE); | ||
| 346 | if (err) | ||
| 347 | en_err(priv, "Failed enabling multicast filter\n"); | ||
| 348 | } | ||
| 349 | out: | ||
| 350 | mutex_unlock(&mdev->state_lock); | ||
| 351 | } | ||
| 352 | |||
| 353 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 354 | static void mlx4_en_netpoll(struct net_device *dev) | ||
| 355 | { | ||
| 356 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 357 | struct mlx4_en_cq *cq; | ||
| 358 | unsigned long flags; | ||
| 359 | int i; | ||
| 360 | |||
| 361 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 362 | cq = &priv->rx_cq[i]; | ||
| 363 | spin_lock_irqsave(&cq->lock, flags); | ||
| 364 | napi_synchronize(&cq->napi); | ||
| 365 | mlx4_en_process_rx_cq(dev, cq, 0); | ||
| 366 | spin_unlock_irqrestore(&cq->lock, flags); | ||
| 367 | } | ||
| 368 | } | ||
| 369 | #endif | ||
| 370 | |||
| 371 | static void mlx4_en_tx_timeout(struct net_device *dev) | ||
| 372 | { | ||
| 373 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 374 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 375 | |||
| 376 | if (netif_msg_timer(priv)) | ||
| 377 | en_warn(priv, "Tx timeout called on port:%d\n", priv->port); | ||
| 378 | |||
| 379 | priv->port_stats.tx_timeout++; | ||
| 380 | en_dbg(DRV, priv, "Scheduling watchdog\n"); | ||
| 381 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
| 382 | } | ||
| 383 | |||
| 384 | |||
| 385 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | ||
| 386 | { | ||
| 387 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 388 | |||
| 389 | spin_lock_bh(&priv->stats_lock); | ||
| 390 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | ||
| 391 | spin_unlock_bh(&priv->stats_lock); | ||
| 392 | |||
| 393 | return &priv->ret_stats; | ||
| 394 | } | ||
| 395 | |||
| 396 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | ||
| 397 | { | ||
| 398 | struct mlx4_en_cq *cq; | ||
| 399 | int i; | ||
| 400 | |||
| 401 | /* If we haven't received a specific coalescing setting | ||
| 402 | * (module param), we set the moderation parameters as follows: | ||
| 403 | * - moder_cnt is set to the number of mtu sized packets to | ||
| 404 | * satisfy our coelsing target. | ||
| 405 | * - moder_time is set to a fixed value. | ||
| 406 | */ | ||
| 407 | priv->rx_frames = MLX4_EN_RX_COAL_TARGET; | ||
| 408 | priv->rx_usecs = MLX4_EN_RX_COAL_TIME; | ||
| 409 | en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " | ||
| 410 | "rx_frames:%d rx_usecs:%d\n", | ||
| 411 | priv->dev->mtu, priv->rx_frames, priv->rx_usecs); | ||
| 412 | |||
| 413 | /* Setup cq moderation params */ | ||
| 414 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 415 | cq = &priv->rx_cq[i]; | ||
| 416 | cq->moder_cnt = priv->rx_frames; | ||
| 417 | cq->moder_time = priv->rx_usecs; | ||
| 418 | } | ||
| 419 | |||
| 420 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 421 | cq = &priv->tx_cq[i]; | ||
| 422 | cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; | ||
| 423 | cq->moder_time = MLX4_EN_TX_COAL_TIME; | ||
| 424 | } | ||
| 425 | |||
| 426 | /* Reset auto-moderation params */ | ||
| 427 | priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; | ||
| 428 | priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; | ||
| 429 | priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; | ||
| 430 | priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; | ||
| 431 | priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; | ||
| 432 | priv->adaptive_rx_coal = 1; | ||
| 433 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
| 434 | priv->last_moder_jiffies = 0; | ||
| 435 | priv->last_moder_packets = 0; | ||
| 436 | priv->last_moder_tx_packets = 0; | ||
| 437 | priv->last_moder_bytes = 0; | ||
| 438 | } | ||
| 439 | |||
| 440 | static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | ||
| 441 | { | ||
| 442 | unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); | ||
| 443 | struct mlx4_en_cq *cq; | ||
| 444 | unsigned long packets; | ||
| 445 | unsigned long rate; | ||
| 446 | unsigned long avg_pkt_size; | ||
| 447 | unsigned long rx_packets; | ||
| 448 | unsigned long rx_bytes; | ||
| 449 | unsigned long tx_packets; | ||
| 450 | unsigned long tx_pkt_diff; | ||
| 451 | unsigned long rx_pkt_diff; | ||
| 452 | int moder_time; | ||
| 453 | int i, err; | ||
| 454 | |||
| 455 | if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) | ||
| 456 | return; | ||
| 457 | |||
| 458 | spin_lock_bh(&priv->stats_lock); | ||
| 459 | rx_packets = priv->stats.rx_packets; | ||
| 460 | rx_bytes = priv->stats.rx_bytes; | ||
| 461 | tx_packets = priv->stats.tx_packets; | ||
| 462 | spin_unlock_bh(&priv->stats_lock); | ||
| 463 | |||
| 464 | if (!priv->last_moder_jiffies || !period) | ||
| 465 | goto out; | ||
| 466 | |||
| 467 | tx_pkt_diff = ((unsigned long) (tx_packets - | ||
| 468 | priv->last_moder_tx_packets)); | ||
| 469 | rx_pkt_diff = ((unsigned long) (rx_packets - | ||
| 470 | priv->last_moder_packets)); | ||
| 471 | packets = max(tx_pkt_diff, rx_pkt_diff); | ||
| 472 | rate = packets * HZ / period; | ||
| 473 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | ||
| 474 | priv->last_moder_bytes)) / packets : 0; | ||
| 475 | |||
| 476 | /* Apply auto-moderation only when packet rate exceeds a rate that | ||
| 477 | * it matters */ | ||
| 478 | if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { | ||
| 479 | /* If tx and rx packet rates are not balanced, assume that | ||
| 480 | * traffic is mainly BW bound and apply maximum moderation. | ||
| 481 | * Otherwise, moderate according to packet rate */ | ||
| 482 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || | ||
| 483 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { | ||
| 484 | moder_time = priv->rx_usecs_high; | ||
| 485 | } else { | ||
| 486 | if (rate < priv->pkt_rate_low) | ||
| 487 | moder_time = priv->rx_usecs_low; | ||
| 488 | else if (rate > priv->pkt_rate_high) | ||
| 489 | moder_time = priv->rx_usecs_high; | ||
| 490 | else | ||
| 491 | moder_time = (rate - priv->pkt_rate_low) * | ||
| 492 | (priv->rx_usecs_high - priv->rx_usecs_low) / | ||
| 493 | (priv->pkt_rate_high - priv->pkt_rate_low) + | ||
| 494 | priv->rx_usecs_low; | ||
| 495 | } | ||
| 496 | } else { | ||
| 497 | moder_time = priv->rx_usecs_low; | ||
| 498 | } | ||
| 499 | |||
| 500 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | ||
| 501 | tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); | ||
| 502 | |||
| 503 | en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " | ||
| 504 | "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", | ||
| 505 | priv->last_moder_time, moder_time, period, packets, | ||
| 506 | avg_pkt_size, rate); | ||
| 507 | |||
| 508 | if (moder_time != priv->last_moder_time) { | ||
| 509 | priv->last_moder_time = moder_time; | ||
| 510 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 511 | cq = &priv->rx_cq[i]; | ||
| 512 | cq->moder_time = moder_time; | ||
| 513 | err = mlx4_en_set_cq_moder(priv, cq); | ||
| 514 | if (err) { | ||
| 515 | en_err(priv, "Failed modifying moderation for cq:%d\n", i); | ||
| 516 | break; | ||
| 517 | } | ||
| 518 | } | ||
| 519 | } | ||
| 520 | |||
| 521 | out: | ||
| 522 | priv->last_moder_packets = rx_packets; | ||
| 523 | priv->last_moder_tx_packets = tx_packets; | ||
| 524 | priv->last_moder_bytes = rx_bytes; | ||
| 525 | priv->last_moder_jiffies = jiffies; | ||
| 526 | } | ||
| 527 | |||
| 528 | static void mlx4_en_do_get_stats(struct work_struct *work) | ||
| 529 | { | ||
| 530 | struct delayed_work *delay = to_delayed_work(work); | ||
| 531 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
| 532 | stats_task); | ||
| 533 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 534 | int err; | ||
| 535 | |||
| 536 | err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); | ||
| 537 | if (err) | ||
| 538 | en_dbg(HW, priv, "Could not update stats\n"); | ||
| 539 | |||
| 540 | mutex_lock(&mdev->state_lock); | ||
| 541 | if (mdev->device_up) { | ||
| 542 | if (priv->port_up) | ||
| 543 | mlx4_en_auto_moderation(priv); | ||
| 544 | |||
| 545 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
| 546 | } | ||
| 547 | if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { | ||
| 548 | queue_work(mdev->workqueue, &priv->mac_task); | ||
| 549 | mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; | ||
| 550 | } | ||
| 551 | mutex_unlock(&mdev->state_lock); | ||
| 552 | } | ||
| 553 | |||
| 554 | static void mlx4_en_linkstate(struct work_struct *work) | ||
| 555 | { | ||
| 556 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
| 557 | linkstate_task); | ||
| 558 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 559 | int linkstate = priv->link_state; | ||
| 560 | |||
| 561 | mutex_lock(&mdev->state_lock); | ||
| 562 | /* If observable port state changed set carrier state and | ||
| 563 | * report to system log */ | ||
| 564 | if (priv->last_link_state != linkstate) { | ||
| 565 | if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { | ||
| 566 | en_info(priv, "Link Down\n"); | ||
| 567 | netif_carrier_off(priv->dev); | ||
| 568 | } else { | ||
| 569 | en_info(priv, "Link Up\n"); | ||
| 570 | netif_carrier_on(priv->dev); | ||
| 571 | } | ||
| 572 | } | ||
| 573 | priv->last_link_state = linkstate; | ||
| 574 | mutex_unlock(&mdev->state_lock); | ||
| 575 | } | ||
| 576 | |||
| 577 | |||
| 578 | int mlx4_en_start_port(struct net_device *dev) | ||
| 579 | { | ||
| 580 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 581 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 582 | struct mlx4_en_cq *cq; | ||
| 583 | struct mlx4_en_tx_ring *tx_ring; | ||
| 584 | int rx_index = 0; | ||
| 585 | int tx_index = 0; | ||
| 586 | int err = 0; | ||
| 587 | int i; | ||
| 588 | int j; | ||
| 589 | u8 mc_list[16] = {0}; | ||
| 590 | char name[32]; | ||
| 591 | |||
| 592 | if (priv->port_up) { | ||
| 593 | en_dbg(DRV, priv, "start port called while port already up\n"); | ||
| 594 | return 0; | ||
| 595 | } | ||
| 596 | |||
| 597 | /* Calculate Rx buf size */ | ||
| 598 | dev->mtu = min(dev->mtu, priv->max_mtu); | ||
| 599 | mlx4_en_calc_rx_buf(dev); | ||
| 600 | en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); | ||
| 601 | |||
| 602 | /* Configure rx cq's and rings */ | ||
| 603 | err = mlx4_en_activate_rx_rings(priv); | ||
| 604 | if (err) { | ||
| 605 | en_err(priv, "Failed to activate RX rings\n"); | ||
| 606 | return err; | ||
| 607 | } | ||
| 608 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 609 | cq = &priv->rx_cq[i]; | ||
| 610 | |||
| 611 | err = mlx4_en_activate_cq(priv, cq); | ||
| 612 | if (err) { | ||
| 613 | en_err(priv, "Failed activating Rx CQ\n"); | ||
| 614 | goto cq_err; | ||
| 615 | } | ||
| 616 | for (j = 0; j < cq->size; j++) | ||
| 617 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | ||
| 618 | err = mlx4_en_set_cq_moder(priv, cq); | ||
| 619 | if (err) { | ||
| 620 | en_err(priv, "Failed setting cq moderation parameters"); | ||
| 621 | mlx4_en_deactivate_cq(priv, cq); | ||
| 622 | goto cq_err; | ||
| 623 | } | ||
| 624 | mlx4_en_arm_cq(priv, cq); | ||
| 625 | priv->rx_ring[i].cqn = cq->mcq.cqn; | ||
| 626 | ++rx_index; | ||
| 627 | } | ||
| 628 | |||
| 629 | /* Set port mac number */ | ||
| 630 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
| 631 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
| 632 | priv->mac, &priv->base_qpn, 0); | ||
| 633 | if (err) { | ||
| 634 | en_err(priv, "Failed setting port mac\n"); | ||
| 635 | goto cq_err; | ||
| 636 | } | ||
| 637 | mdev->mac_removed[priv->port] = 0; | ||
| 638 | |||
| 639 | err = mlx4_en_config_rss_steer(priv); | ||
| 640 | if (err) { | ||
| 641 | en_err(priv, "Failed configuring rss steering\n"); | ||
| 642 | goto mac_err; | ||
| 643 | } | ||
| 644 | |||
| 645 | if (mdev->dev->caps.comp_pool && !priv->tx_vector) { | ||
| 646 | sprintf(name , "%s-tx", priv->dev->name); | ||
| 647 | if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) { | ||
| 648 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
| 649 | "%s_tx ,Falling back to legacy " | ||
| 650 | "EQ's\n", priv->dev->name); | ||
| 651 | } | ||
| 652 | } | ||
| 653 | /* Configure tx cq's and rings */ | ||
| 654 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 655 | /* Configure cq */ | ||
| 656 | cq = &priv->tx_cq[i]; | ||
| 657 | cq->vector = priv->tx_vector; | ||
| 658 | err = mlx4_en_activate_cq(priv, cq); | ||
| 659 | if (err) { | ||
| 660 | en_err(priv, "Failed allocating Tx CQ\n"); | ||
| 661 | goto tx_err; | ||
| 662 | } | ||
| 663 | err = mlx4_en_set_cq_moder(priv, cq); | ||
| 664 | if (err) { | ||
| 665 | en_err(priv, "Failed setting cq moderation parameters"); | ||
| 666 | mlx4_en_deactivate_cq(priv, cq); | ||
| 667 | goto tx_err; | ||
| 668 | } | ||
| 669 | en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); | ||
| 670 | cq->buf->wqe_index = cpu_to_be16(0xffff); | ||
| 671 | |||
| 672 | /* Configure ring */ | ||
| 673 | tx_ring = &priv->tx_ring[i]; | ||
| 674 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); | ||
| 675 | if (err) { | ||
| 676 | en_err(priv, "Failed allocating Tx ring\n"); | ||
| 677 | mlx4_en_deactivate_cq(priv, cq); | ||
| 678 | goto tx_err; | ||
| 679 | } | ||
| 680 | /* Set initial ownership of all Tx TXBBs to SW (1) */ | ||
| 681 | for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) | ||
| 682 | *((u32 *) (tx_ring->buf + j)) = 0xffffffff; | ||
| 683 | ++tx_index; | ||
| 684 | } | ||
| 685 | |||
| 686 | /* Configure port */ | ||
| 687 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
| 688 | priv->rx_skb_size + ETH_FCS_LEN, | ||
| 689 | priv->prof->tx_pause, | ||
| 690 | priv->prof->tx_ppp, | ||
| 691 | priv->prof->rx_pause, | ||
| 692 | priv->prof->rx_ppp); | ||
| 693 | if (err) { | ||
| 694 | en_err(priv, "Failed setting port general configurations " | ||
| 695 | "for port %d, with error %d\n", priv->port, err); | ||
| 696 | goto tx_err; | ||
| 697 | } | ||
| 698 | /* Set default qp number */ | ||
| 699 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); | ||
| 700 | if (err) { | ||
| 701 | en_err(priv, "Failed setting default qp numbers\n"); | ||
| 702 | goto tx_err; | ||
| 703 | } | ||
| 704 | |||
| 705 | /* Init port */ | ||
| 706 | en_dbg(HW, priv, "Initializing port\n"); | ||
| 707 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
| 708 | if (err) { | ||
| 709 | en_err(priv, "Failed Initializing port\n"); | ||
| 710 | goto tx_err; | ||
| 711 | } | ||
| 712 | |||
| 713 | /* Attach rx QP to bradcast address */ | ||
| 714 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
| 715 | mc_list[5] = priv->port; | ||
| 716 | if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
| 717 | 0, MLX4_PROT_ETH)) | ||
| 718 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | ||
| 719 | |||
| 720 | /* Must redo promiscuous mode setup. */ | ||
| 721 | priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); | ||
| 722 | |||
| 723 | /* Schedule multicast task to populate multicast list */ | ||
| 724 | queue_work(mdev->workqueue, &priv->mcast_task); | ||
| 725 | |||
| 726 | priv->port_up = true; | ||
| 727 | netif_tx_start_all_queues(dev); | ||
| 728 | return 0; | ||
| 729 | |||
| 730 | tx_err: | ||
| 731 | while (tx_index--) { | ||
| 732 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | ||
| 733 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); | ||
| 734 | } | ||
| 735 | |||
| 736 | mlx4_en_release_rss_steer(priv); | ||
| 737 | mac_err: | ||
| 738 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); | ||
| 739 | cq_err: | ||
| 740 | while (rx_index--) | ||
| 741 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | ||
| 742 | for (i = 0; i < priv->rx_ring_num; i++) | ||
| 743 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | ||
| 744 | |||
| 745 | return err; /* need to close devices */ | ||
| 746 | } | ||
| 747 | |||
| 748 | |||
| 749 | void mlx4_en_stop_port(struct net_device *dev) | ||
| 750 | { | ||
| 751 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 752 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 753 | int i; | ||
| 754 | u8 mc_list[16] = {0}; | ||
| 755 | |||
| 756 | if (!priv->port_up) { | ||
| 757 | en_dbg(DRV, priv, "stop port called while port already down\n"); | ||
| 758 | return; | ||
| 759 | } | ||
| 760 | |||
| 761 | /* Synchronize with tx routine */ | ||
| 762 | netif_tx_lock_bh(dev); | ||
| 763 | netif_tx_stop_all_queues(dev); | ||
| 764 | netif_tx_unlock_bh(dev); | ||
| 765 | |||
| 766 | /* Set port as not active */ | ||
| 767 | priv->port_up = false; | ||
| 768 | |||
| 769 | /* Detach All multicasts */ | ||
| 770 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
| 771 | mc_list[5] = priv->port; | ||
| 772 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
| 773 | MLX4_PROT_ETH); | ||
| 774 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
| 775 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
| 776 | mc_list[5] = priv->port; | ||
| 777 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
| 778 | mc_list, MLX4_PROT_ETH); | ||
| 779 | } | ||
| 780 | mlx4_en_clear_list(dev); | ||
| 781 | /* Flush multicast filter */ | ||
| 782 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); | ||
| 783 | |||
| 784 | /* Unregister Mac address for the port */ | ||
| 785 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); | ||
| 786 | mdev->mac_removed[priv->port] = 1; | ||
| 787 | |||
| 788 | /* Free TX Rings */ | ||
| 789 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 790 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); | ||
| 791 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); | ||
| 792 | } | ||
| 793 | msleep(10); | ||
| 794 | |||
| 795 | for (i = 0; i < priv->tx_ring_num; i++) | ||
| 796 | mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); | ||
| 797 | |||
| 798 | /* Free RSS qps */ | ||
| 799 | mlx4_en_release_rss_steer(priv); | ||
| 800 | |||
| 801 | /* Free RX Rings */ | ||
| 802 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 803 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | ||
| 804 | while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) | ||
| 805 | msleep(1); | ||
| 806 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); | ||
| 807 | } | ||
| 808 | |||
| 809 | /* close port*/ | ||
| 810 | mlx4_CLOSE_PORT(mdev->dev, priv->port); | ||
| 811 | } | ||
| 812 | |||
| 813 | static void mlx4_en_restart(struct work_struct *work) | ||
| 814 | { | ||
| 815 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
| 816 | watchdog_task); | ||
| 817 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 818 | struct net_device *dev = priv->dev; | ||
| 819 | |||
| 820 | en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); | ||
| 821 | |||
| 822 | mutex_lock(&mdev->state_lock); | ||
| 823 | if (priv->port_up) { | ||
| 824 | mlx4_en_stop_port(dev); | ||
| 825 | if (mlx4_en_start_port(dev)) | ||
| 826 | en_err(priv, "Failed restarting port %d\n", priv->port); | ||
| 827 | } | ||
| 828 | mutex_unlock(&mdev->state_lock); | ||
| 829 | } | ||
| 830 | |||
| 831 | |||
| 832 | static int mlx4_en_open(struct net_device *dev) | ||
| 833 | { | ||
| 834 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 835 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 836 | int i; | ||
| 837 | int err = 0; | ||
| 838 | |||
| 839 | mutex_lock(&mdev->state_lock); | ||
| 840 | |||
| 841 | if (!mdev->device_up) { | ||
| 842 | en_err(priv, "Cannot open - device down/disabled\n"); | ||
| 843 | err = -EBUSY; | ||
| 844 | goto out; | ||
| 845 | } | ||
| 846 | |||
| 847 | /* Reset HW statistics and performance counters */ | ||
| 848 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | ||
| 849 | en_dbg(HW, priv, "Failed dumping statistics\n"); | ||
| 850 | |||
| 851 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
| 852 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | ||
| 853 | |||
| 854 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 855 | priv->tx_ring[i].bytes = 0; | ||
| 856 | priv->tx_ring[i].packets = 0; | ||
| 857 | } | ||
| 858 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 859 | priv->rx_ring[i].bytes = 0; | ||
| 860 | priv->rx_ring[i].packets = 0; | ||
| 861 | } | ||
| 862 | |||
| 863 | err = mlx4_en_start_port(dev); | ||
| 864 | if (err) | ||
| 865 | en_err(priv, "Failed starting port:%d\n", priv->port); | ||
| 866 | |||
| 867 | out: | ||
| 868 | mutex_unlock(&mdev->state_lock); | ||
| 869 | return err; | ||
| 870 | } | ||
| 871 | |||
| 872 | |||
| 873 | static int mlx4_en_close(struct net_device *dev) | ||
| 874 | { | ||
| 875 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 876 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 877 | |||
| 878 | en_dbg(IFDOWN, priv, "Close port called\n"); | ||
| 879 | |||
| 880 | mutex_lock(&mdev->state_lock); | ||
| 881 | |||
| 882 | mlx4_en_stop_port(dev); | ||
| 883 | netif_carrier_off(dev); | ||
| 884 | |||
| 885 | mutex_unlock(&mdev->state_lock); | ||
| 886 | return 0; | ||
| 887 | } | ||
| 888 | |||
| 889 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors) | ||
| 890 | { | ||
| 891 | int i; | ||
| 892 | |||
| 893 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 894 | if (priv->tx_ring[i].tx_info) | ||
| 895 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | ||
| 896 | if (priv->tx_cq[i].buf) | ||
| 897 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors); | ||
| 898 | } | ||
| 899 | |||
| 900 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 901 | if (priv->rx_ring[i].rx_info) | ||
| 902 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | ||
| 903 | if (priv->rx_cq[i].buf) | ||
| 904 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors); | ||
| 905 | } | ||
| 906 | } | ||
| 907 | |||
| 908 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | ||
| 909 | { | ||
| 910 | struct mlx4_en_port_profile *prof = priv->prof; | ||
| 911 | int i; | ||
| 912 | int base_tx_qpn, err; | ||
| 913 | |||
| 914 | err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); | ||
| 915 | if (err) { | ||
| 916 | en_err(priv, "failed reserving range for TX rings\n"); | ||
| 917 | return err; | ||
| 918 | } | ||
| 919 | |||
| 920 | /* Create tx Rings */ | ||
| 921 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 922 | if (mlx4_en_create_cq(priv, &priv->tx_cq[i], | ||
| 923 | prof->tx_ring_size, i, TX)) | ||
| 924 | goto err; | ||
| 925 | |||
| 926 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, | ||
| 927 | prof->tx_ring_size, TXBB_SIZE)) | ||
| 928 | goto err; | ||
| 929 | } | ||
| 930 | |||
| 931 | /* Create rx Rings */ | ||
| 932 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 933 | if (mlx4_en_create_cq(priv, &priv->rx_cq[i], | ||
| 934 | prof->rx_ring_size, i, RX)) | ||
| 935 | goto err; | ||
| 936 | |||
| 937 | if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], | ||
| 938 | prof->rx_ring_size, priv->stride)) | ||
| 939 | goto err; | ||
| 940 | } | ||
| 941 | |||
| 942 | return 0; | ||
| 943 | |||
| 944 | err: | ||
| 945 | en_err(priv, "Failed to allocate NIC resources\n"); | ||
| 946 | mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); | ||
| 947 | return -ENOMEM; | ||
| 948 | } | ||
| 949 | |||
| 950 | |||
| 951 | void mlx4_en_destroy_netdev(struct net_device *dev) | ||
| 952 | { | ||
| 953 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 954 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 955 | |||
| 956 | en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); | ||
| 957 | |||
| 958 | /* Unregister device - this will close the port if it was up */ | ||
| 959 | if (priv->registered) | ||
| 960 | unregister_netdev(dev); | ||
| 961 | |||
| 962 | if (priv->allocated) | ||
| 963 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); | ||
| 964 | |||
| 965 | cancel_delayed_work(&priv->stats_task); | ||
| 966 | /* flush any pending task for this netdev */ | ||
| 967 | flush_workqueue(mdev->workqueue); | ||
| 968 | |||
| 969 | /* Detach the netdev so tasks would not attempt to access it */ | ||
| 970 | mutex_lock(&mdev->state_lock); | ||
| 971 | mdev->pndev[priv->port] = NULL; | ||
| 972 | mutex_unlock(&mdev->state_lock); | ||
| 973 | |||
| 974 | mlx4_en_free_resources(priv, false); | ||
| 975 | free_netdev(dev); | ||
| 976 | } | ||
| 977 | |||
| 978 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | ||
| 979 | { | ||
| 980 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 981 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 982 | int err = 0; | ||
| 983 | |||
| 984 | en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", | ||
| 985 | dev->mtu, new_mtu); | ||
| 986 | |||
| 987 | if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { | ||
| 988 | en_err(priv, "Bad MTU size:%d.\n", new_mtu); | ||
| 989 | return -EPERM; | ||
| 990 | } | ||
| 991 | dev->mtu = new_mtu; | ||
| 992 | |||
| 993 | if (netif_running(dev)) { | ||
| 994 | mutex_lock(&mdev->state_lock); | ||
| 995 | if (!mdev->device_up) { | ||
| 996 | /* NIC is probably restarting - let watchdog task reset | ||
| 997 | * the port */ | ||
| 998 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); | ||
| 999 | } else { | ||
| 1000 | mlx4_en_stop_port(dev); | ||
| 1001 | err = mlx4_en_start_port(dev); | ||
| 1002 | if (err) { | ||
| 1003 | en_err(priv, "Failed restarting port:%d\n", | ||
| 1004 | priv->port); | ||
| 1005 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
| 1006 | } | ||
| 1007 | } | ||
| 1008 | mutex_unlock(&mdev->state_lock); | ||
| 1009 | } | ||
| 1010 | return 0; | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | static const struct net_device_ops mlx4_netdev_ops = { | ||
| 1014 | .ndo_open = mlx4_en_open, | ||
| 1015 | .ndo_stop = mlx4_en_close, | ||
| 1016 | .ndo_start_xmit = mlx4_en_xmit, | ||
| 1017 | .ndo_select_queue = mlx4_en_select_queue, | ||
| 1018 | .ndo_get_stats = mlx4_en_get_stats, | ||
| 1019 | .ndo_set_multicast_list = mlx4_en_set_multicast, | ||
| 1020 | .ndo_set_mac_address = mlx4_en_set_mac, | ||
| 1021 | .ndo_validate_addr = eth_validate_addr, | ||
| 1022 | .ndo_change_mtu = mlx4_en_change_mtu, | ||
| 1023 | .ndo_tx_timeout = mlx4_en_tx_timeout, | ||
| 1024 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, | ||
| 1025 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, | ||
| 1026 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1027 | .ndo_poll_controller = mlx4_en_netpoll, | ||
| 1028 | #endif | ||
| 1029 | }; | ||
| 1030 | |||
| 1031 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
| 1032 | struct mlx4_en_port_profile *prof) | ||
| 1033 | { | ||
| 1034 | struct net_device *dev; | ||
| 1035 | struct mlx4_en_priv *priv; | ||
| 1036 | int i; | ||
| 1037 | int err; | ||
| 1038 | |||
| 1039 | dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), | ||
| 1040 | prof->tx_ring_num, prof->rx_ring_num); | ||
| 1041 | if (dev == NULL) { | ||
| 1042 | mlx4_err(mdev, "Net device allocation failed\n"); | ||
| 1043 | return -ENOMEM; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); | ||
| 1047 | dev->dev_id = port - 1; | ||
| 1048 | |||
| 1049 | /* | ||
| 1050 | * Initialize driver private data | ||
| 1051 | */ | ||
| 1052 | |||
| 1053 | priv = netdev_priv(dev); | ||
| 1054 | memset(priv, 0, sizeof(struct mlx4_en_priv)); | ||
| 1055 | priv->dev = dev; | ||
| 1056 | priv->mdev = mdev; | ||
| 1057 | priv->prof = prof; | ||
| 1058 | priv->port = port; | ||
| 1059 | priv->port_up = false; | ||
| 1060 | priv->flags = prof->flags; | ||
| 1061 | priv->tx_ring_num = prof->tx_ring_num; | ||
| 1062 | priv->rx_ring_num = prof->rx_ring_num; | ||
| 1063 | priv->mac_index = -1; | ||
| 1064 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | ||
| 1065 | spin_lock_init(&priv->stats_lock); | ||
| 1066 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); | ||
| 1067 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | ||
| 1068 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | ||
| 1069 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | ||
| 1070 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | ||
| 1071 | |||
| 1072 | /* Query for default mac and max mtu */ | ||
| 1073 | priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; | ||
| 1074 | priv->mac = mdev->dev->caps.def_mac[priv->port]; | ||
| 1075 | if (ILLEGAL_MAC(priv->mac)) { | ||
| 1076 | en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", | ||
| 1077 | priv->port, priv->mac); | ||
| 1078 | err = -EINVAL; | ||
| 1079 | goto out; | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
| 1083 | DS_SIZE * MLX4_EN_MAX_RX_FRAGS); | ||
| 1084 | err = mlx4_en_alloc_resources(priv); | ||
| 1085 | if (err) | ||
| 1086 | goto out; | ||
| 1087 | |||
| 1088 | /* Allocate page for receive rings */ | ||
| 1089 | err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, | ||
| 1090 | MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); | ||
| 1091 | if (err) { | ||
| 1092 | en_err(priv, "Failed to allocate page for rx qps\n"); | ||
| 1093 | goto out; | ||
| 1094 | } | ||
| 1095 | priv->allocated = 1; | ||
| 1096 | |||
| 1097 | /* | ||
| 1098 | * Initialize netdev entry points | ||
| 1099 | */ | ||
| 1100 | dev->netdev_ops = &mlx4_netdev_ops; | ||
| 1101 | dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; | ||
| 1102 | netif_set_real_num_tx_queues(dev, priv->tx_ring_num); | ||
| 1103 | netif_set_real_num_rx_queues(dev, priv->rx_ring_num); | ||
| 1104 | |||
| 1105 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); | ||
| 1106 | |||
| 1107 | /* Set defualt MAC */ | ||
| 1108 | dev->addr_len = ETH_ALEN; | ||
| 1109 | for (i = 0; i < ETH_ALEN; i++) { | ||
| 1110 | dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); | ||
| 1111 | dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | /* | ||
| 1115 | * Set driver features | ||
| 1116 | */ | ||
| 1117 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
| 1118 | if (mdev->LSO_support) | ||
| 1119 | dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; | ||
| 1120 | |||
| 1121 | dev->vlan_features = dev->hw_features; | ||
| 1122 | |||
| 1123 | dev->hw_features |= NETIF_F_RXCSUM; | ||
| 1124 | dev->features = dev->hw_features | NETIF_F_HIGHDMA | | ||
| 1125 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | ||
| 1126 | NETIF_F_HW_VLAN_FILTER; | ||
| 1127 | |||
| 1128 | mdev->pndev[port] = dev; | ||
| 1129 | |||
| 1130 | netif_carrier_off(dev); | ||
| 1131 | err = register_netdev(dev); | ||
| 1132 | if (err) { | ||
| 1133 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
| 1134 | goto out; | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | ||
| 1138 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | ||
| 1139 | |||
| 1140 | /* Configure port */ | ||
| 1141 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
| 1142 | MLX4_EN_MIN_MTU, | ||
| 1143 | 0, 0, 0, 0); | ||
| 1144 | if (err) { | ||
| 1145 | en_err(priv, "Failed setting port general configurations " | ||
| 1146 | "for port %d, with error %d\n", priv->port, err); | ||
| 1147 | goto out; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | /* Init port */ | ||
| 1151 | en_warn(priv, "Initializing port\n"); | ||
| 1152 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
| 1153 | if (err) { | ||
| 1154 | en_err(priv, "Failed Initializing port\n"); | ||
| 1155 | goto out; | ||
| 1156 | } | ||
| 1157 | priv->registered = 1; | ||
| 1158 | mlx4_en_set_default_moderation(priv); | ||
| 1159 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
| 1160 | return 0; | ||
| 1161 | |||
| 1162 | out: | ||
| 1163 | mlx4_en_destroy_netdev(dev); | ||
| 1164 | return err; | ||
| 1165 | } | ||
| 1166 | |||
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c new file mode 100644 index 00000000000..5ada5b46911 --- /dev/null +++ b/drivers/net/mlx4/en_port.c | |||
| @@ -0,0 +1,278 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | |||
| 35 | #include <linux/if_vlan.h> | ||
| 36 | |||
| 37 | #include <linux/mlx4/device.h> | ||
| 38 | #include <linux/mlx4/cmd.h> | ||
| 39 | |||
| 40 | #include "en_port.h" | ||
| 41 | #include "mlx4_en.h" | ||
| 42 | |||
| 43 | |||
| 44 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, | ||
| 45 | u64 mac, u64 clear, u8 mode) | ||
| 46 | { | ||
| 47 | return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, | ||
| 48 | MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B); | ||
| 49 | } | ||
| 50 | |||
| 51 | int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) | ||
| 52 | { | ||
| 53 | struct mlx4_cmd_mailbox *mailbox; | ||
| 54 | struct mlx4_set_vlan_fltr_mbox *filter; | ||
| 55 | int i; | ||
| 56 | int j; | ||
| 57 | int index = 0; | ||
| 58 | u32 entry; | ||
| 59 | int err = 0; | ||
| 60 | |||
| 61 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 62 | if (IS_ERR(mailbox)) | ||
| 63 | return PTR_ERR(mailbox); | ||
| 64 | |||
| 65 | filter = mailbox->buf; | ||
| 66 | memset(filter, 0, sizeof(*filter)); | ||
| 67 | for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { | ||
| 68 | entry = 0; | ||
| 69 | for (j = 0; j < 32; j++) | ||
| 70 | if (test_bit(index++, priv->active_vlans)) | ||
| 71 | entry |= 1 << j; | ||
| 72 | filter->entry[i] = cpu_to_be32(entry); | ||
| 73 | } | ||
| 74 | err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR, | ||
| 75 | MLX4_CMD_TIME_CLASS_B); | ||
| 76 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 77 | return err; | ||
| 78 | } | ||
| 79 | |||
| 80 | |||
| 81 | int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, | ||
| 82 | u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) | ||
| 83 | { | ||
| 84 | struct mlx4_cmd_mailbox *mailbox; | ||
| 85 | struct mlx4_set_port_general_context *context; | ||
| 86 | int err; | ||
| 87 | u32 in_mod; | ||
| 88 | |||
| 89 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 90 | if (IS_ERR(mailbox)) | ||
| 91 | return PTR_ERR(mailbox); | ||
| 92 | context = mailbox->buf; | ||
| 93 | memset(context, 0, sizeof *context); | ||
| 94 | |||
| 95 | context->flags = SET_PORT_GEN_ALL_VALID; | ||
| 96 | context->mtu = cpu_to_be16(mtu); | ||
| 97 | context->pptx = (pptx * (!pfctx)) << 7; | ||
| 98 | context->pfctx = pfctx; | ||
| 99 | context->pprx = (pprx * (!pfcrx)) << 7; | ||
| 100 | context->pfcrx = pfcrx; | ||
| 101 | |||
| 102 | in_mod = MLX4_SET_PORT_GENERAL << 8 | port; | ||
| 103 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
| 104 | MLX4_CMD_TIME_CLASS_B); | ||
| 105 | |||
| 106 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 107 | return err; | ||
| 108 | } | ||
| 109 | |||
| 110 | int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | ||
| 111 | u8 promisc) | ||
| 112 | { | ||
| 113 | struct mlx4_cmd_mailbox *mailbox; | ||
| 114 | struct mlx4_set_port_rqp_calc_context *context; | ||
| 115 | int err; | ||
| 116 | u32 in_mod; | ||
| 117 | u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? | ||
| 118 | MCAST_DIRECT : MCAST_DEFAULT; | ||
| 119 | |||
| 120 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && | ||
| 121 | dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) | ||
| 122 | return 0; | ||
| 123 | |||
| 124 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 125 | if (IS_ERR(mailbox)) | ||
| 126 | return PTR_ERR(mailbox); | ||
| 127 | context = mailbox->buf; | ||
| 128 | memset(context, 0, sizeof *context); | ||
| 129 | |||
| 130 | context->base_qpn = cpu_to_be32(base_qpn); | ||
| 131 | context->n_mac = 0x2; | ||
| 132 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | | ||
| 133 | base_qpn); | ||
| 134 | context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | | ||
| 135 | base_qpn); | ||
| 136 | context->intra_no_vlan = 0; | ||
| 137 | context->no_vlan = MLX4_NO_VLAN_IDX; | ||
| 138 | context->intra_vlan_miss = 0; | ||
| 139 | context->vlan_miss = MLX4_VLAN_MISS_IDX; | ||
| 140 | |||
| 141 | in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; | ||
| 142 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
| 143 | MLX4_CMD_TIME_CLASS_B); | ||
| 144 | |||
| 145 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 146 | return err; | ||
| 147 | } | ||
| 148 | |||
| 149 | int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) | ||
| 150 | { | ||
| 151 | struct mlx4_en_query_port_context *qport_context; | ||
| 152 | struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); | ||
| 153 | struct mlx4_en_port_state *state = &priv->port_state; | ||
| 154 | struct mlx4_cmd_mailbox *mailbox; | ||
| 155 | int err; | ||
| 156 | |||
| 157 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | ||
| 158 | if (IS_ERR(mailbox)) | ||
| 159 | return PTR_ERR(mailbox); | ||
| 160 | memset(mailbox->buf, 0, sizeof(*qport_context)); | ||
| 161 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, | ||
| 162 | MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B); | ||
| 163 | if (err) | ||
| 164 | goto out; | ||
| 165 | qport_context = mailbox->buf; | ||
| 166 | |||
| 167 | /* This command is always accessed from Ethtool context | ||
| 168 | * already synchronized, no need in locking */ | ||
| 169 | state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); | ||
| 170 | if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) == | ||
| 171 | MLX4_EN_1G_SPEED) | ||
| 172 | state->link_speed = 1000; | ||
| 173 | else | ||
| 174 | state->link_speed = 10000; | ||
| 175 | state->transciver = qport_context->transceiver; | ||
| 176 | |||
| 177 | out: | ||
| 178 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | ||
| 179 | return err; | ||
| 180 | } | ||
| 181 | |||
| 182 | int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | ||
| 183 | { | ||
| 184 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; | ||
| 185 | struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); | ||
| 186 | struct net_device_stats *stats = &priv->stats; | ||
| 187 | struct mlx4_cmd_mailbox *mailbox; | ||
| 188 | u64 in_mod = reset << 8 | port; | ||
| 189 | int err; | ||
| 190 | int i; | ||
| 191 | |||
| 192 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | ||
| 193 | if (IS_ERR(mailbox)) | ||
| 194 | return PTR_ERR(mailbox); | ||
| 195 | memset(mailbox->buf, 0, sizeof(*mlx4_en_stats)); | ||
| 196 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, | ||
| 197 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B); | ||
| 198 | if (err) | ||
| 199 | goto out; | ||
| 200 | |||
| 201 | mlx4_en_stats = mailbox->buf; | ||
| 202 | |||
| 203 | spin_lock_bh(&priv->stats_lock); | ||
| 204 | |||
| 205 | stats->rx_packets = 0; | ||
| 206 | stats->rx_bytes = 0; | ||
| 207 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 208 | stats->rx_packets += priv->rx_ring[i].packets; | ||
| 209 | stats->rx_bytes += priv->rx_ring[i].bytes; | ||
| 210 | } | ||
| 211 | stats->tx_packets = 0; | ||
| 212 | stats->tx_bytes = 0; | ||
| 213 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
| 214 | stats->tx_packets += priv->tx_ring[i].packets; | ||
| 215 | stats->tx_bytes += priv->tx_ring[i].bytes; | ||
| 216 | } | ||
| 217 | |||
| 218 | stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + | ||
| 219 | be32_to_cpu(mlx4_en_stats->RdropLength) + | ||
| 220 | be32_to_cpu(mlx4_en_stats->RJBBR) + | ||
| 221 | be32_to_cpu(mlx4_en_stats->RCRC) + | ||
| 222 | be32_to_cpu(mlx4_en_stats->RRUNT); | ||
| 223 | stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP); | ||
| 224 | stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) + | ||
| 225 | be64_to_cpu(mlx4_en_stats->MCAST_prio_1) + | ||
| 226 | be64_to_cpu(mlx4_en_stats->MCAST_prio_2) + | ||
| 227 | be64_to_cpu(mlx4_en_stats->MCAST_prio_3) + | ||
| 228 | be64_to_cpu(mlx4_en_stats->MCAST_prio_4) + | ||
| 229 | be64_to_cpu(mlx4_en_stats->MCAST_prio_5) + | ||
| 230 | be64_to_cpu(mlx4_en_stats->MCAST_prio_6) + | ||
| 231 | be64_to_cpu(mlx4_en_stats->MCAST_prio_7) + | ||
| 232 | be64_to_cpu(mlx4_en_stats->MCAST_novlan); | ||
| 233 | stats->collisions = 0; | ||
| 234 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); | ||
| 235 | stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
| 236 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); | ||
| 237 | stats->rx_frame_errors = 0; | ||
| 238 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
| 239 | stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | ||
| 240 | stats->tx_aborted_errors = 0; | ||
| 241 | stats->tx_carrier_errors = 0; | ||
| 242 | stats->tx_fifo_errors = 0; | ||
| 243 | stats->tx_heartbeat_errors = 0; | ||
| 244 | stats->tx_window_errors = 0; | ||
| 245 | |||
| 246 | priv->pkstats.broadcast = | ||
| 247 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) + | ||
| 248 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) + | ||
| 249 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) + | ||
| 250 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) + | ||
| 251 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) + | ||
| 252 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) + | ||
| 253 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) + | ||
| 254 | be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) + | ||
| 255 | be64_to_cpu(mlx4_en_stats->RBCAST_novlan); | ||
| 256 | priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); | ||
| 257 | priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); | ||
| 258 | priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); | ||
| 259 | priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); | ||
| 260 | priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); | ||
| 261 | priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); | ||
| 262 | priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); | ||
| 263 | priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); | ||
| 264 | priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); | ||
| 265 | priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); | ||
| 266 | priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); | ||
| 267 | priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); | ||
| 268 | priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); | ||
| 269 | priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); | ||
| 270 | priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); | ||
| 271 | priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); | ||
| 272 | spin_unlock_bh(&priv->stats_lock); | ||
| 273 | |||
| 274 | out: | ||
| 275 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | ||
| 276 | return err; | ||
| 277 | } | ||
| 278 | |||
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h new file mode 100644 index 00000000000..e3d73e41c56 --- /dev/null +++ b/drivers/net/mlx4/en_port.h | |||
| @@ -0,0 +1,594 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #ifndef _MLX4_EN_PORT_H_ | ||
| 35 | #define _MLX4_EN_PORT_H_ | ||
| 36 | |||
| 37 | |||
| 38 | #define SET_PORT_GEN_ALL_VALID 0x7 | ||
| 39 | #define SET_PORT_PROMISC_SHIFT 31 | ||
| 40 | #define SET_PORT_MC_PROMISC_SHIFT 30 | ||
| 41 | |||
| 42 | enum { | ||
| 43 | MLX4_CMD_SET_VLAN_FLTR = 0x47, | ||
| 44 | MLX4_CMD_SET_MCAST_FLTR = 0x48, | ||
| 45 | MLX4_CMD_DUMP_ETH_STATS = 0x49, | ||
| 46 | }; | ||
| 47 | |||
| 48 | enum { | ||
| 49 | MCAST_DIRECT_ONLY = 0, | ||
| 50 | MCAST_DIRECT = 1, | ||
| 51 | MCAST_DEFAULT = 2 | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct mlx4_set_port_general_context { | ||
| 55 | u8 reserved[3]; | ||
| 56 | u8 flags; | ||
| 57 | u16 reserved2; | ||
| 58 | __be16 mtu; | ||
| 59 | u8 pptx; | ||
| 60 | u8 pfctx; | ||
| 61 | u16 reserved3; | ||
| 62 | u8 pprx; | ||
| 63 | u8 pfcrx; | ||
| 64 | u16 reserved4; | ||
| 65 | }; | ||
| 66 | |||
| 67 | struct mlx4_set_port_rqp_calc_context { | ||
| 68 | __be32 base_qpn; | ||
| 69 | u8 rererved; | ||
| 70 | u8 n_mac; | ||
| 71 | u8 n_vlan; | ||
| 72 | u8 n_prio; | ||
| 73 | u8 reserved2[3]; | ||
| 74 | u8 mac_miss; | ||
| 75 | u8 intra_no_vlan; | ||
| 76 | u8 no_vlan; | ||
| 77 | u8 intra_vlan_miss; | ||
| 78 | u8 vlan_miss; | ||
| 79 | u8 reserved3[3]; | ||
| 80 | u8 no_vlan_prio; | ||
| 81 | __be32 promisc; | ||
| 82 | __be32 mcast; | ||
| 83 | }; | ||
| 84 | |||
| 85 | #define VLAN_FLTR_SIZE 128 | ||
| 86 | struct mlx4_set_vlan_fltr_mbox { | ||
| 87 | __be32 entry[VLAN_FLTR_SIZE]; | ||
| 88 | }; | ||
| 89 | |||
| 90 | |||
| 91 | enum { | ||
| 92 | MLX4_MCAST_CONFIG = 0, | ||
| 93 | MLX4_MCAST_DISABLE = 1, | ||
| 94 | MLX4_MCAST_ENABLE = 2, | ||
| 95 | }; | ||
| 96 | |||
| 97 | struct mlx4_en_query_port_context { | ||
| 98 | u8 link_up; | ||
| 99 | #define MLX4_EN_LINK_UP_MASK 0x80 | ||
| 100 | u8 reserved; | ||
| 101 | __be16 mtu; | ||
| 102 | u8 reserved2; | ||
| 103 | u8 link_speed; | ||
| 104 | #define MLX4_EN_SPEED_MASK 0x3 | ||
| 105 | #define MLX4_EN_1G_SPEED 0x2 | ||
| 106 | u16 reserved3[5]; | ||
| 107 | __be64 mac; | ||
| 108 | u8 transceiver; | ||
| 109 | }; | ||
| 110 | |||
| 111 | |||
| 112 | struct mlx4_en_stat_out_mbox { | ||
| 113 | /* Received frames with a length of 64 octets */ | ||
| 114 | __be64 R64_prio_0; | ||
| 115 | __be64 R64_prio_1; | ||
| 116 | __be64 R64_prio_2; | ||
| 117 | __be64 R64_prio_3; | ||
| 118 | __be64 R64_prio_4; | ||
| 119 | __be64 R64_prio_5; | ||
| 120 | __be64 R64_prio_6; | ||
| 121 | __be64 R64_prio_7; | ||
| 122 | __be64 R64_novlan; | ||
| 123 | /* Received frames with a length of 127 octets */ | ||
| 124 | __be64 R127_prio_0; | ||
| 125 | __be64 R127_prio_1; | ||
| 126 | __be64 R127_prio_2; | ||
| 127 | __be64 R127_prio_3; | ||
| 128 | __be64 R127_prio_4; | ||
| 129 | __be64 R127_prio_5; | ||
| 130 | __be64 R127_prio_6; | ||
| 131 | __be64 R127_prio_7; | ||
| 132 | __be64 R127_novlan; | ||
| 133 | /* Received frames with a length of 255 octets */ | ||
| 134 | __be64 R255_prio_0; | ||
| 135 | __be64 R255_prio_1; | ||
| 136 | __be64 R255_prio_2; | ||
| 137 | __be64 R255_prio_3; | ||
| 138 | __be64 R255_prio_4; | ||
| 139 | __be64 R255_prio_5; | ||
| 140 | __be64 R255_prio_6; | ||
| 141 | __be64 R255_prio_7; | ||
| 142 | __be64 R255_novlan; | ||
| 143 | /* Received frames with a length of 511 octets */ | ||
| 144 | __be64 R511_prio_0; | ||
| 145 | __be64 R511_prio_1; | ||
| 146 | __be64 R511_prio_2; | ||
| 147 | __be64 R511_prio_3; | ||
| 148 | __be64 R511_prio_4; | ||
| 149 | __be64 R511_prio_5; | ||
| 150 | __be64 R511_prio_6; | ||
| 151 | __be64 R511_prio_7; | ||
| 152 | __be64 R511_novlan; | ||
| 153 | /* Received frames with a length of 1023 octets */ | ||
| 154 | __be64 R1023_prio_0; | ||
| 155 | __be64 R1023_prio_1; | ||
| 156 | __be64 R1023_prio_2; | ||
| 157 | __be64 R1023_prio_3; | ||
| 158 | __be64 R1023_prio_4; | ||
| 159 | __be64 R1023_prio_5; | ||
| 160 | __be64 R1023_prio_6; | ||
| 161 | __be64 R1023_prio_7; | ||
| 162 | __be64 R1023_novlan; | ||
| 163 | /* Received frames with a length of 1518 octets */ | ||
| 164 | __be64 R1518_prio_0; | ||
| 165 | __be64 R1518_prio_1; | ||
| 166 | __be64 R1518_prio_2; | ||
| 167 | __be64 R1518_prio_3; | ||
| 168 | __be64 R1518_prio_4; | ||
| 169 | __be64 R1518_prio_5; | ||
| 170 | __be64 R1518_prio_6; | ||
| 171 | __be64 R1518_prio_7; | ||
| 172 | __be64 R1518_novlan; | ||
| 173 | /* Received frames with a length of 1522 octets */ | ||
| 174 | __be64 R1522_prio_0; | ||
| 175 | __be64 R1522_prio_1; | ||
| 176 | __be64 R1522_prio_2; | ||
| 177 | __be64 R1522_prio_3; | ||
| 178 | __be64 R1522_prio_4; | ||
| 179 | __be64 R1522_prio_5; | ||
| 180 | __be64 R1522_prio_6; | ||
| 181 | __be64 R1522_prio_7; | ||
| 182 | __be64 R1522_novlan; | ||
| 183 | /* Received frames with a length of 1548 octets */ | ||
| 184 | __be64 R1548_prio_0; | ||
| 185 | __be64 R1548_prio_1; | ||
| 186 | __be64 R1548_prio_2; | ||
| 187 | __be64 R1548_prio_3; | ||
| 188 | __be64 R1548_prio_4; | ||
| 189 | __be64 R1548_prio_5; | ||
| 190 | __be64 R1548_prio_6; | ||
| 191 | __be64 R1548_prio_7; | ||
| 192 | __be64 R1548_novlan; | ||
| 193 | /* Received frames with a length of 1548 < octets < MTU */ | ||
| 194 | __be64 R2MTU_prio_0; | ||
| 195 | __be64 R2MTU_prio_1; | ||
| 196 | __be64 R2MTU_prio_2; | ||
| 197 | __be64 R2MTU_prio_3; | ||
| 198 | __be64 R2MTU_prio_4; | ||
| 199 | __be64 R2MTU_prio_5; | ||
| 200 | __be64 R2MTU_prio_6; | ||
| 201 | __be64 R2MTU_prio_7; | ||
| 202 | __be64 R2MTU_novlan; | ||
| 203 | /* Received frames with a length of MTU< octets and good CRC */ | ||
| 204 | __be64 RGIANT_prio_0; | ||
| 205 | __be64 RGIANT_prio_1; | ||
| 206 | __be64 RGIANT_prio_2; | ||
| 207 | __be64 RGIANT_prio_3; | ||
| 208 | __be64 RGIANT_prio_4; | ||
| 209 | __be64 RGIANT_prio_5; | ||
| 210 | __be64 RGIANT_prio_6; | ||
| 211 | __be64 RGIANT_prio_7; | ||
| 212 | __be64 RGIANT_novlan; | ||
| 213 | /* Received broadcast frames with good CRC */ | ||
| 214 | __be64 RBCAST_prio_0; | ||
| 215 | __be64 RBCAST_prio_1; | ||
| 216 | __be64 RBCAST_prio_2; | ||
| 217 | __be64 RBCAST_prio_3; | ||
| 218 | __be64 RBCAST_prio_4; | ||
| 219 | __be64 RBCAST_prio_5; | ||
| 220 | __be64 RBCAST_prio_6; | ||
| 221 | __be64 RBCAST_prio_7; | ||
| 222 | __be64 RBCAST_novlan; | ||
| 223 | /* Received multicast frames with good CRC */ | ||
| 224 | __be64 MCAST_prio_0; | ||
| 225 | __be64 MCAST_prio_1; | ||
| 226 | __be64 MCAST_prio_2; | ||
| 227 | __be64 MCAST_prio_3; | ||
| 228 | __be64 MCAST_prio_4; | ||
| 229 | __be64 MCAST_prio_5; | ||
| 230 | __be64 MCAST_prio_6; | ||
| 231 | __be64 MCAST_prio_7; | ||
| 232 | __be64 MCAST_novlan; | ||
| 233 | /* Received unicast not short or GIANT frames with good CRC */ | ||
| 234 | __be64 RTOTG_prio_0; | ||
| 235 | __be64 RTOTG_prio_1; | ||
| 236 | __be64 RTOTG_prio_2; | ||
| 237 | __be64 RTOTG_prio_3; | ||
| 238 | __be64 RTOTG_prio_4; | ||
| 239 | __be64 RTOTG_prio_5; | ||
| 240 | __be64 RTOTG_prio_6; | ||
| 241 | __be64 RTOTG_prio_7; | ||
| 242 | __be64 RTOTG_novlan; | ||
| 243 | |||
| 244 | /* Count of total octets of received frames, includes framing characters */ | ||
| 245 | __be64 RTTLOCT_prio_0; | ||
| 246 | /* Count of total octets of received frames, not including framing | ||
| 247 | characters */ | ||
| 248 | __be64 RTTLOCT_NOFRM_prio_0; | ||
| 249 | /* Count of Total number of octets received | ||
| 250 | (only for frames without errors) */ | ||
| 251 | __be64 ROCT_prio_0; | ||
| 252 | |||
| 253 | __be64 RTTLOCT_prio_1; | ||
| 254 | __be64 RTTLOCT_NOFRM_prio_1; | ||
| 255 | __be64 ROCT_prio_1; | ||
| 256 | |||
| 257 | __be64 RTTLOCT_prio_2; | ||
| 258 | __be64 RTTLOCT_NOFRM_prio_2; | ||
| 259 | __be64 ROCT_prio_2; | ||
| 260 | |||
| 261 | __be64 RTTLOCT_prio_3; | ||
| 262 | __be64 RTTLOCT_NOFRM_prio_3; | ||
| 263 | __be64 ROCT_prio_3; | ||
| 264 | |||
| 265 | __be64 RTTLOCT_prio_4; | ||
| 266 | __be64 RTTLOCT_NOFRM_prio_4; | ||
| 267 | __be64 ROCT_prio_4; | ||
| 268 | |||
| 269 | __be64 RTTLOCT_prio_5; | ||
| 270 | __be64 RTTLOCT_NOFRM_prio_5; | ||
| 271 | __be64 ROCT_prio_5; | ||
| 272 | |||
| 273 | __be64 RTTLOCT_prio_6; | ||
| 274 | __be64 RTTLOCT_NOFRM_prio_6; | ||
| 275 | __be64 ROCT_prio_6; | ||
| 276 | |||
| 277 | __be64 RTTLOCT_prio_7; | ||
| 278 | __be64 RTTLOCT_NOFRM_prio_7; | ||
| 279 | __be64 ROCT_prio_7; | ||
| 280 | |||
| 281 | __be64 RTTLOCT_novlan; | ||
| 282 | __be64 RTTLOCT_NOFRM_novlan; | ||
| 283 | __be64 ROCT_novlan; | ||
| 284 | |||
| 285 | /* Count of Total received frames including bad frames */ | ||
| 286 | __be64 RTOT_prio_0; | ||
| 287 | /* Count of Total number of received frames with 802.1Q encapsulation */ | ||
| 288 | __be64 R1Q_prio_0; | ||
| 289 | __be64 reserved1; | ||
| 290 | |||
| 291 | __be64 RTOT_prio_1; | ||
| 292 | __be64 R1Q_prio_1; | ||
| 293 | __be64 reserved2; | ||
| 294 | |||
| 295 | __be64 RTOT_prio_2; | ||
| 296 | __be64 R1Q_prio_2; | ||
| 297 | __be64 reserved3; | ||
| 298 | |||
| 299 | __be64 RTOT_prio_3; | ||
| 300 | __be64 R1Q_prio_3; | ||
| 301 | __be64 reserved4; | ||
| 302 | |||
| 303 | __be64 RTOT_prio_4; | ||
| 304 | __be64 R1Q_prio_4; | ||
| 305 | __be64 reserved5; | ||
| 306 | |||
| 307 | __be64 RTOT_prio_5; | ||
| 308 | __be64 R1Q_prio_5; | ||
| 309 | __be64 reserved6; | ||
| 310 | |||
| 311 | __be64 RTOT_prio_6; | ||
| 312 | __be64 R1Q_prio_6; | ||
| 313 | __be64 reserved7; | ||
| 314 | |||
| 315 | __be64 RTOT_prio_7; | ||
| 316 | __be64 R1Q_prio_7; | ||
| 317 | __be64 reserved8; | ||
| 318 | |||
| 319 | __be64 RTOT_novlan; | ||
| 320 | __be64 R1Q_novlan; | ||
| 321 | __be64 reserved9; | ||
| 322 | |||
| 323 | /* Total number of Successfully Received Control Frames */ | ||
| 324 | __be64 RCNTL; | ||
| 325 | __be64 reserved10; | ||
| 326 | __be64 reserved11; | ||
| 327 | __be64 reserved12; | ||
| 328 | /* Count of received frames with a length/type field value between 46 | ||
| 329 | (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames), | ||
| 330 | inclusive */ | ||
| 331 | __be64 RInRangeLengthErr; | ||
| 332 | /* Count of received frames with length/type field between 1501 and 1535 | ||
| 333 | decimal, inclusive */ | ||
| 334 | __be64 ROutRangeLengthErr; | ||
| 335 | /* Count of received frames that are longer than max allowed size for | ||
| 336 | 802.3 frames (1518/1522) */ | ||
| 337 | __be64 RFrmTooLong; | ||
| 338 | /* Count frames received with PCS error */ | ||
| 339 | __be64 PCS; | ||
| 340 | |||
| 341 | /* Transmit frames with a length of 64 octets */ | ||
| 342 | __be64 T64_prio_0; | ||
| 343 | __be64 T64_prio_1; | ||
| 344 | __be64 T64_prio_2; | ||
| 345 | __be64 T64_prio_3; | ||
| 346 | __be64 T64_prio_4; | ||
| 347 | __be64 T64_prio_5; | ||
| 348 | __be64 T64_prio_6; | ||
| 349 | __be64 T64_prio_7; | ||
| 350 | __be64 T64_novlan; | ||
| 351 | __be64 T64_loopbk; | ||
| 352 | /* Transmit frames with a length of 65 to 127 octets. */ | ||
| 353 | __be64 T127_prio_0; | ||
| 354 | __be64 T127_prio_1; | ||
| 355 | __be64 T127_prio_2; | ||
| 356 | __be64 T127_prio_3; | ||
| 357 | __be64 T127_prio_4; | ||
| 358 | __be64 T127_prio_5; | ||
| 359 | __be64 T127_prio_6; | ||
| 360 | __be64 T127_prio_7; | ||
| 361 | __be64 T127_novlan; | ||
| 362 | __be64 T127_loopbk; | ||
| 363 | /* Transmit frames with a length of 128 to 255 octets */ | ||
| 364 | __be64 T255_prio_0; | ||
| 365 | __be64 T255_prio_1; | ||
| 366 | __be64 T255_prio_2; | ||
| 367 | __be64 T255_prio_3; | ||
| 368 | __be64 T255_prio_4; | ||
| 369 | __be64 T255_prio_5; | ||
| 370 | __be64 T255_prio_6; | ||
| 371 | __be64 T255_prio_7; | ||
| 372 | __be64 T255_novlan; | ||
| 373 | __be64 T255_loopbk; | ||
| 374 | /* Transmit frames with a length of 256 to 511 octets */ | ||
| 375 | __be64 T511_prio_0; | ||
| 376 | __be64 T511_prio_1; | ||
| 377 | __be64 T511_prio_2; | ||
| 378 | __be64 T511_prio_3; | ||
| 379 | __be64 T511_prio_4; | ||
| 380 | __be64 T511_prio_5; | ||
| 381 | __be64 T511_prio_6; | ||
| 382 | __be64 T511_prio_7; | ||
| 383 | __be64 T511_novlan; | ||
| 384 | __be64 T511_loopbk; | ||
| 385 | /* Transmit frames with a length of 512 to 1023 octets */ | ||
| 386 | __be64 T1023_prio_0; | ||
| 387 | __be64 T1023_prio_1; | ||
| 388 | __be64 T1023_prio_2; | ||
| 389 | __be64 T1023_prio_3; | ||
| 390 | __be64 T1023_prio_4; | ||
| 391 | __be64 T1023_prio_5; | ||
| 392 | __be64 T1023_prio_6; | ||
| 393 | __be64 T1023_prio_7; | ||
| 394 | __be64 T1023_novlan; | ||
| 395 | __be64 T1023_loopbk; | ||
| 396 | /* Transmit frames with a length of 1024 to 1518 octets */ | ||
| 397 | __be64 T1518_prio_0; | ||
| 398 | __be64 T1518_prio_1; | ||
| 399 | __be64 T1518_prio_2; | ||
| 400 | __be64 T1518_prio_3; | ||
| 401 | __be64 T1518_prio_4; | ||
| 402 | __be64 T1518_prio_5; | ||
| 403 | __be64 T1518_prio_6; | ||
| 404 | __be64 T1518_prio_7; | ||
| 405 | __be64 T1518_novlan; | ||
| 406 | __be64 T1518_loopbk; | ||
| 407 | /* Counts transmit frames with a length of 1519 to 1522 bytes */ | ||
| 408 | __be64 T1522_prio_0; | ||
| 409 | __be64 T1522_prio_1; | ||
| 410 | __be64 T1522_prio_2; | ||
| 411 | __be64 T1522_prio_3; | ||
| 412 | __be64 T1522_prio_4; | ||
| 413 | __be64 T1522_prio_5; | ||
| 414 | __be64 T1522_prio_6; | ||
| 415 | __be64 T1522_prio_7; | ||
| 416 | __be64 T1522_novlan; | ||
| 417 | __be64 T1522_loopbk; | ||
| 418 | /* Transmit frames with a length of 1523 to 1548 octets */ | ||
| 419 | __be64 T1548_prio_0; | ||
| 420 | __be64 T1548_prio_1; | ||
| 421 | __be64 T1548_prio_2; | ||
| 422 | __be64 T1548_prio_3; | ||
| 423 | __be64 T1548_prio_4; | ||
| 424 | __be64 T1548_prio_5; | ||
| 425 | __be64 T1548_prio_6; | ||
| 426 | __be64 T1548_prio_7; | ||
| 427 | __be64 T1548_novlan; | ||
| 428 | __be64 T1548_loopbk; | ||
| 429 | /* Counts transmit frames with a length of 1549 to MTU bytes */ | ||
| 430 | __be64 T2MTU_prio_0; | ||
| 431 | __be64 T2MTU_prio_1; | ||
| 432 | __be64 T2MTU_prio_2; | ||
| 433 | __be64 T2MTU_prio_3; | ||
| 434 | __be64 T2MTU_prio_4; | ||
| 435 | __be64 T2MTU_prio_5; | ||
| 436 | __be64 T2MTU_prio_6; | ||
| 437 | __be64 T2MTU_prio_7; | ||
| 438 | __be64 T2MTU_novlan; | ||
| 439 | __be64 T2MTU_loopbk; | ||
| 440 | /* Transmit frames with a length greater than MTU octets and a good CRC. */ | ||
| 441 | __be64 TGIANT_prio_0; | ||
| 442 | __be64 TGIANT_prio_1; | ||
| 443 | __be64 TGIANT_prio_2; | ||
| 444 | __be64 TGIANT_prio_3; | ||
| 445 | __be64 TGIANT_prio_4; | ||
| 446 | __be64 TGIANT_prio_5; | ||
| 447 | __be64 TGIANT_prio_6; | ||
| 448 | __be64 TGIANT_prio_7; | ||
| 449 | __be64 TGIANT_novlan; | ||
| 450 | __be64 TGIANT_loopbk; | ||
| 451 | /* Transmit broadcast frames with a good CRC */ | ||
| 452 | __be64 TBCAST_prio_0; | ||
| 453 | __be64 TBCAST_prio_1; | ||
| 454 | __be64 TBCAST_prio_2; | ||
| 455 | __be64 TBCAST_prio_3; | ||
| 456 | __be64 TBCAST_prio_4; | ||
| 457 | __be64 TBCAST_prio_5; | ||
| 458 | __be64 TBCAST_prio_6; | ||
| 459 | __be64 TBCAST_prio_7; | ||
| 460 | __be64 TBCAST_novlan; | ||
| 461 | __be64 TBCAST_loopbk; | ||
| 462 | /* Transmit multicast frames with a good CRC */ | ||
| 463 | __be64 TMCAST_prio_0; | ||
| 464 | __be64 TMCAST_prio_1; | ||
| 465 | __be64 TMCAST_prio_2; | ||
| 466 | __be64 TMCAST_prio_3; | ||
| 467 | __be64 TMCAST_prio_4; | ||
| 468 | __be64 TMCAST_prio_5; | ||
| 469 | __be64 TMCAST_prio_6; | ||
| 470 | __be64 TMCAST_prio_7; | ||
| 471 | __be64 TMCAST_novlan; | ||
| 472 | __be64 TMCAST_loopbk; | ||
| 473 | /* Transmit good frames that are neither broadcast nor multicast */ | ||
| 474 | __be64 TTOTG_prio_0; | ||
| 475 | __be64 TTOTG_prio_1; | ||
| 476 | __be64 TTOTG_prio_2; | ||
| 477 | __be64 TTOTG_prio_3; | ||
| 478 | __be64 TTOTG_prio_4; | ||
| 479 | __be64 TTOTG_prio_5; | ||
| 480 | __be64 TTOTG_prio_6; | ||
| 481 | __be64 TTOTG_prio_7; | ||
| 482 | __be64 TTOTG_novlan; | ||
| 483 | __be64 TTOTG_loopbk; | ||
| 484 | |||
| 485 | /* total octets of transmitted frames, including framing characters */ | ||
| 486 | __be64 TTTLOCT_prio_0; | ||
| 487 | /* total octets of transmitted frames, not including framing characters */ | ||
| 488 | __be64 TTTLOCT_NOFRM_prio_0; | ||
| 489 | /* ifOutOctets */ | ||
| 490 | __be64 TOCT_prio_0; | ||
| 491 | |||
| 492 | __be64 TTTLOCT_prio_1; | ||
| 493 | __be64 TTTLOCT_NOFRM_prio_1; | ||
| 494 | __be64 TOCT_prio_1; | ||
| 495 | |||
| 496 | __be64 TTTLOCT_prio_2; | ||
| 497 | __be64 TTTLOCT_NOFRM_prio_2; | ||
| 498 | __be64 TOCT_prio_2; | ||
| 499 | |||
| 500 | __be64 TTTLOCT_prio_3; | ||
| 501 | __be64 TTTLOCT_NOFRM_prio_3; | ||
| 502 | __be64 TOCT_prio_3; | ||
| 503 | |||
| 504 | __be64 TTTLOCT_prio_4; | ||
| 505 | __be64 TTTLOCT_NOFRM_prio_4; | ||
| 506 | __be64 TOCT_prio_4; | ||
| 507 | |||
| 508 | __be64 TTTLOCT_prio_5; | ||
| 509 | __be64 TTTLOCT_NOFRM_prio_5; | ||
| 510 | __be64 TOCT_prio_5; | ||
| 511 | |||
| 512 | __be64 TTTLOCT_prio_6; | ||
| 513 | __be64 TTTLOCT_NOFRM_prio_6; | ||
| 514 | __be64 TOCT_prio_6; | ||
| 515 | |||
| 516 | __be64 TTTLOCT_prio_7; | ||
| 517 | __be64 TTTLOCT_NOFRM_prio_7; | ||
| 518 | __be64 TOCT_prio_7; | ||
| 519 | |||
| 520 | __be64 TTTLOCT_novlan; | ||
| 521 | __be64 TTTLOCT_NOFRM_novlan; | ||
| 522 | __be64 TOCT_novlan; | ||
| 523 | |||
| 524 | __be64 TTTLOCT_loopbk; | ||
| 525 | __be64 TTTLOCT_NOFRM_loopbk; | ||
| 526 | __be64 TOCT_loopbk; | ||
| 527 | |||
| 528 | /* Total frames transmitted with a good CRC that are not aborted */ | ||
| 529 | __be64 TTOT_prio_0; | ||
| 530 | /* Total number of frames transmitted with 802.1Q encapsulation */ | ||
| 531 | __be64 T1Q_prio_0; | ||
| 532 | __be64 reserved13; | ||
| 533 | |||
| 534 | __be64 TTOT_prio_1; | ||
| 535 | __be64 T1Q_prio_1; | ||
| 536 | __be64 reserved14; | ||
| 537 | |||
| 538 | __be64 TTOT_prio_2; | ||
| 539 | __be64 T1Q_prio_2; | ||
| 540 | __be64 reserved15; | ||
| 541 | |||
| 542 | __be64 TTOT_prio_3; | ||
| 543 | __be64 T1Q_prio_3; | ||
| 544 | __be64 reserved16; | ||
| 545 | |||
| 546 | __be64 TTOT_prio_4; | ||
| 547 | __be64 T1Q_prio_4; | ||
| 548 | __be64 reserved17; | ||
| 549 | |||
| 550 | __be64 TTOT_prio_5; | ||
| 551 | __be64 T1Q_prio_5; | ||
| 552 | __be64 reserved18; | ||
| 553 | |||
| 554 | __be64 TTOT_prio_6; | ||
| 555 | __be64 T1Q_prio_6; | ||
| 556 | __be64 reserved19; | ||
| 557 | |||
| 558 | __be64 TTOT_prio_7; | ||
| 559 | __be64 T1Q_prio_7; | ||
| 560 | __be64 reserved20; | ||
| 561 | |||
| 562 | __be64 TTOT_novlan; | ||
| 563 | __be64 T1Q_novlan; | ||
| 564 | __be64 reserved21; | ||
| 565 | |||
| 566 | __be64 TTOT_loopbk; | ||
| 567 | __be64 T1Q_loopbk; | ||
| 568 | __be64 reserved22; | ||
| 569 | |||
| 570 | /* Received frames with a length greater than MTU octets and a bad CRC */ | ||
| 571 | __be32 RJBBR; | ||
| 572 | /* Received frames with a bad CRC that are not runts, jabbers, | ||
| 573 | or alignment errors */ | ||
| 574 | __be32 RCRC; | ||
| 575 | /* Received frames with SFD with a length of less than 64 octets and a | ||
| 576 | bad CRC */ | ||
| 577 | __be32 RRUNT; | ||
| 578 | /* Received frames with a length less than 64 octets and a good CRC */ | ||
| 579 | __be32 RSHORT; | ||
| 580 | /* Total Number of Received Packets Dropped */ | ||
| 581 | __be32 RDROP; | ||
| 582 | /* Drop due to overflow */ | ||
| 583 | __be32 RdropOvflw; | ||
| 584 | /* Drop due to overflow */ | ||
| 585 | __be32 RdropLength; | ||
| 586 | /* Total of good frames. Does not include frames received with | ||
| 587 | frame-too-long, FCS, or length errors */ | ||
| 588 | __be32 RTOTFRMS; | ||
| 589 | /* Total dropped Xmited packets */ | ||
| 590 | __be32 TDROP; | ||
| 591 | }; | ||
| 592 | |||
| 593 | |||
| 594 | #endif | ||
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c new file mode 100644 index 00000000000..0dfb4ec8a9d --- /dev/null +++ b/drivers/net/mlx4/en_resources.c | |||
| @@ -0,0 +1,102 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/slab.h> | ||
| 35 | #include <linux/vmalloc.h> | ||
| 36 | #include <linux/mlx4/qp.h> | ||
| 37 | |||
| 38 | #include "mlx4_en.h" | ||
| 39 | |||
| 40 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | ||
| 41 | int is_tx, int rss, int qpn, int cqn, | ||
| 42 | struct mlx4_qp_context *context) | ||
| 43 | { | ||
| 44 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 45 | |||
| 46 | memset(context, 0, sizeof *context); | ||
| 47 | context->flags = cpu_to_be32(7 << 16 | rss << 13); | ||
| 48 | context->pd = cpu_to_be32(mdev->priv_pdn); | ||
| 49 | context->mtu_msgmax = 0xff; | ||
| 50 | if (!is_tx && !rss) | ||
| 51 | context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | ||
| 52 | if (is_tx) | ||
| 53 | context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | ||
| 54 | else | ||
| 55 | context->sq_size_stride = ilog2(TXBB_SIZE) - 4; | ||
| 56 | context->usr_page = cpu_to_be32(mdev->priv_uar.index); | ||
| 57 | context->local_qpn = cpu_to_be32(qpn); | ||
| 58 | context->pri_path.ackto = 1 & 0x07; | ||
| 59 | context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; | ||
| 60 | context->pri_path.counter_index = 0xff; | ||
| 61 | context->cqn_send = cpu_to_be32(cqn); | ||
| 62 | context->cqn_recv = cpu_to_be32(cqn); | ||
| 63 | context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); | ||
| 64 | } | ||
| 65 | |||
| 66 | |||
| 67 | int mlx4_en_map_buffer(struct mlx4_buf *buf) | ||
| 68 | { | ||
| 69 | struct page **pages; | ||
| 70 | int i; | ||
| 71 | |||
| 72 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | ||
| 73 | return 0; | ||
| 74 | |||
| 75 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | ||
| 76 | if (!pages) | ||
| 77 | return -ENOMEM; | ||
| 78 | |||
| 79 | for (i = 0; i < buf->nbufs; ++i) | ||
| 80 | pages[i] = virt_to_page(buf->page_list[i].buf); | ||
| 81 | |||
| 82 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | ||
| 83 | kfree(pages); | ||
| 84 | if (!buf->direct.buf) | ||
| 85 | return -ENOMEM; | ||
| 86 | |||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | void mlx4_en_unmap_buffer(struct mlx4_buf *buf) | ||
| 91 | { | ||
| 92 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | ||
| 93 | return; | ||
| 94 | |||
| 95 | vunmap(buf->direct.buf); | ||
| 96 | } | ||
| 97 | |||
| 98 | void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) | ||
| 99 | { | ||
| 100 | return; | ||
| 101 | } | ||
| 102 | |||
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c new file mode 100644 index 00000000000..37cc9e5c56b --- /dev/null +++ b/drivers/net/mlx4/en_rx.c | |||
| @@ -0,0 +1,918 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/mlx4/cq.h> | ||
| 35 | #include <linux/slab.h> | ||
| 36 | #include <linux/mlx4/qp.h> | ||
| 37 | #include <linux/skbuff.h> | ||
| 38 | #include <linux/if_ether.h> | ||
| 39 | #include <linux/if_vlan.h> | ||
| 40 | #include <linux/vmalloc.h> | ||
| 41 | |||
| 42 | #include "mlx4_en.h" | ||
| 43 | |||
| 44 | |||
| 45 | static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, | ||
| 46 | struct mlx4_en_rx_desc *rx_desc, | ||
| 47 | struct skb_frag_struct *skb_frags, | ||
| 48 | struct mlx4_en_rx_alloc *ring_alloc, | ||
| 49 | int i) | ||
| 50 | { | ||
| 51 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 52 | struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; | ||
| 53 | struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i]; | ||
| 54 | struct page *page; | ||
| 55 | dma_addr_t dma; | ||
| 56 | |||
| 57 | if (page_alloc->offset == frag_info->last_offset) { | ||
| 58 | /* Allocate new page */ | ||
| 59 | page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER); | ||
| 60 | if (!page) | ||
| 61 | return -ENOMEM; | ||
| 62 | |||
| 63 | skb_frags[i].page = page_alloc->page; | ||
| 64 | skb_frags[i].page_offset = page_alloc->offset; | ||
| 65 | page_alloc->page = page; | ||
| 66 | page_alloc->offset = frag_info->frag_align; | ||
| 67 | } else { | ||
| 68 | page = page_alloc->page; | ||
| 69 | get_page(page); | ||
| 70 | |||
| 71 | skb_frags[i].page = page; | ||
| 72 | skb_frags[i].page_offset = page_alloc->offset; | ||
| 73 | page_alloc->offset += frag_info->frag_stride; | ||
| 74 | } | ||
| 75 | dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) + | ||
| 76 | skb_frags[i].page_offset, frag_info->frag_size, | ||
| 77 | PCI_DMA_FROMDEVICE); | ||
| 78 | rx_desc->data[i].addr = cpu_to_be64(dma); | ||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, | ||
| 83 | struct mlx4_en_rx_ring *ring) | ||
| 84 | { | ||
| 85 | struct mlx4_en_rx_alloc *page_alloc; | ||
| 86 | int i; | ||
| 87 | |||
| 88 | for (i = 0; i < priv->num_frags; i++) { | ||
| 89 | page_alloc = &ring->page_alloc[i]; | ||
| 90 | page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, | ||
| 91 | MLX4_EN_ALLOC_ORDER); | ||
| 92 | if (!page_alloc->page) | ||
| 93 | goto out; | ||
| 94 | |||
| 95 | page_alloc->offset = priv->frag_info[i].frag_align; | ||
| 96 | en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", | ||
| 97 | i, page_alloc->page); | ||
| 98 | } | ||
| 99 | return 0; | ||
| 100 | |||
| 101 | out: | ||
| 102 | while (i--) { | ||
| 103 | page_alloc = &ring->page_alloc[i]; | ||
| 104 | put_page(page_alloc->page); | ||
| 105 | page_alloc->page = NULL; | ||
| 106 | } | ||
| 107 | return -ENOMEM; | ||
| 108 | } | ||
| 109 | |||
| 110 | static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, | ||
| 111 | struct mlx4_en_rx_ring *ring) | ||
| 112 | { | ||
| 113 | struct mlx4_en_rx_alloc *page_alloc; | ||
| 114 | int i; | ||
| 115 | |||
| 116 | for (i = 0; i < priv->num_frags; i++) { | ||
| 117 | page_alloc = &ring->page_alloc[i]; | ||
| 118 | en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", | ||
| 119 | i, page_count(page_alloc->page)); | ||
| 120 | |||
| 121 | put_page(page_alloc->page); | ||
| 122 | page_alloc->page = NULL; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | |||
| 127 | static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, | ||
| 128 | struct mlx4_en_rx_ring *ring, int index) | ||
| 129 | { | ||
| 130 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; | ||
| 131 | struct skb_frag_struct *skb_frags = ring->rx_info + | ||
| 132 | (index << priv->log_rx_info); | ||
| 133 | int possible_frags; | ||
| 134 | int i; | ||
| 135 | |||
| 136 | /* Set size and memtype fields */ | ||
| 137 | for (i = 0; i < priv->num_frags; i++) { | ||
| 138 | skb_frags[i].size = priv->frag_info[i].frag_size; | ||
| 139 | rx_desc->data[i].byte_count = | ||
| 140 | cpu_to_be32(priv->frag_info[i].frag_size); | ||
| 141 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); | ||
| 142 | } | ||
| 143 | |||
| 144 | /* If the number of used fragments does not fill up the ring stride, | ||
| 145 | * remaining (unused) fragments must be padded with null address/size | ||
| 146 | * and a special memory key */ | ||
| 147 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; | ||
| 148 | for (i = priv->num_frags; i < possible_frags; i++) { | ||
| 149 | rx_desc->data[i].byte_count = 0; | ||
| 150 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); | ||
| 151 | rx_desc->data[i].addr = 0; | ||
| 152 | } | ||
| 153 | } | ||
| 154 | |||
| 155 | |||
| 156 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, | ||
| 157 | struct mlx4_en_rx_ring *ring, int index) | ||
| 158 | { | ||
| 159 | struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); | ||
| 160 | struct skb_frag_struct *skb_frags = ring->rx_info + | ||
| 161 | (index << priv->log_rx_info); | ||
| 162 | int i; | ||
| 163 | |||
| 164 | for (i = 0; i < priv->num_frags; i++) | ||
| 165 | if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i)) | ||
| 166 | goto err; | ||
| 167 | |||
| 168 | return 0; | ||
| 169 | |||
| 170 | err: | ||
| 171 | while (i--) | ||
| 172 | put_page(skb_frags[i].page); | ||
| 173 | return -ENOMEM; | ||
| 174 | } | ||
| 175 | |||
| 176 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) | ||
| 177 | { | ||
| 178 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | ||
| 179 | } | ||
| 180 | |||
| 181 | static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, | ||
| 182 | struct mlx4_en_rx_ring *ring, | ||
| 183 | int index) | ||
| 184 | { | ||
| 185 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 186 | struct skb_frag_struct *skb_frags; | ||
| 187 | struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride); | ||
| 188 | dma_addr_t dma; | ||
| 189 | int nr; | ||
| 190 | |||
| 191 | skb_frags = ring->rx_info + (index << priv->log_rx_info); | ||
| 192 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
| 193 | en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); | ||
| 194 | dma = be64_to_cpu(rx_desc->data[nr].addr); | ||
| 195 | |||
| 196 | en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma); | ||
| 197 | pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, | ||
| 198 | PCI_DMA_FROMDEVICE); | ||
| 199 | put_page(skb_frags[nr].page); | ||
| 200 | } | ||
| 201 | } | ||
| 202 | |||
| 203 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) | ||
| 204 | { | ||
| 205 | struct mlx4_en_rx_ring *ring; | ||
| 206 | int ring_ind; | ||
| 207 | int buf_ind; | ||
| 208 | int new_size; | ||
| 209 | |||
| 210 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { | ||
| 211 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
| 212 | ring = &priv->rx_ring[ring_ind]; | ||
| 213 | |||
| 214 | if (mlx4_en_prepare_rx_desc(priv, ring, | ||
| 215 | ring->actual_size)) { | ||
| 216 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { | ||
| 217 | en_err(priv, "Failed to allocate " | ||
| 218 | "enough rx buffers\n"); | ||
| 219 | return -ENOMEM; | ||
| 220 | } else { | ||
| 221 | new_size = rounddown_pow_of_two(ring->actual_size); | ||
| 222 | en_warn(priv, "Only %d buffers allocated " | ||
| 223 | "reducing ring size to %d", | ||
| 224 | ring->actual_size, new_size); | ||
| 225 | goto reduce_rings; | ||
| 226 | } | ||
| 227 | } | ||
| 228 | ring->actual_size++; | ||
| 229 | ring->prod++; | ||
| 230 | } | ||
| 231 | } | ||
| 232 | return 0; | ||
| 233 | |||
| 234 | reduce_rings: | ||
| 235 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
| 236 | ring = &priv->rx_ring[ring_ind]; | ||
| 237 | while (ring->actual_size > new_size) { | ||
| 238 | ring->actual_size--; | ||
| 239 | ring->prod--; | ||
| 240 | mlx4_en_free_rx_desc(priv, ring, ring->actual_size); | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | return 0; | ||
| 245 | } | ||
| 246 | |||
| 247 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | ||
| 248 | struct mlx4_en_rx_ring *ring) | ||
| 249 | { | ||
| 250 | int index; | ||
| 251 | |||
| 252 | en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", | ||
| 253 | ring->cons, ring->prod); | ||
| 254 | |||
| 255 | /* Unmap and free Rx buffers */ | ||
| 256 | BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); | ||
| 257 | while (ring->cons != ring->prod) { | ||
| 258 | index = ring->cons & ring->size_mask; | ||
| 259 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); | ||
| 260 | mlx4_en_free_rx_desc(priv, ring, index); | ||
| 261 | ++ring->cons; | ||
| 262 | } | ||
| 263 | } | ||
| 264 | |||
| 265 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | ||
| 266 | struct mlx4_en_rx_ring *ring, u32 size, u16 stride) | ||
| 267 | { | ||
| 268 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 269 | int err; | ||
| 270 | int tmp; | ||
| 271 | |||
| 272 | |||
| 273 | ring->prod = 0; | ||
| 274 | ring->cons = 0; | ||
| 275 | ring->size = size; | ||
| 276 | ring->size_mask = size - 1; | ||
| 277 | ring->stride = stride; | ||
| 278 | ring->log_stride = ffs(ring->stride) - 1; | ||
| 279 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; | ||
| 280 | |||
| 281 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | ||
| 282 | sizeof(struct skb_frag_struct)); | ||
| 283 | ring->rx_info = vmalloc(tmp); | ||
| 284 | if (!ring->rx_info) { | ||
| 285 | en_err(priv, "Failed allocating rx_info ring\n"); | ||
| 286 | return -ENOMEM; | ||
| 287 | } | ||
| 288 | en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", | ||
| 289 | ring->rx_info, tmp); | ||
| 290 | |||
| 291 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, | ||
| 292 | ring->buf_size, 2 * PAGE_SIZE); | ||
| 293 | if (err) | ||
| 294 | goto err_ring; | ||
| 295 | |||
| 296 | err = mlx4_en_map_buffer(&ring->wqres.buf); | ||
| 297 | if (err) { | ||
| 298 | en_err(priv, "Failed to map RX buffer\n"); | ||
| 299 | goto err_hwq; | ||
| 300 | } | ||
| 301 | ring->buf = ring->wqres.buf.direct.buf; | ||
| 302 | |||
| 303 | return 0; | ||
| 304 | |||
| 305 | err_hwq: | ||
| 306 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
| 307 | err_ring: | ||
| 308 | vfree(ring->rx_info); | ||
| 309 | ring->rx_info = NULL; | ||
| 310 | return err; | ||
| 311 | } | ||
| 312 | |||
| 313 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | ||
| 314 | { | ||
| 315 | struct mlx4_en_rx_ring *ring; | ||
| 316 | int i; | ||
| 317 | int ring_ind; | ||
| 318 | int err; | ||
| 319 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
| 320 | DS_SIZE * priv->num_frags); | ||
| 321 | |||
| 322 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
| 323 | ring = &priv->rx_ring[ring_ind]; | ||
| 324 | |||
| 325 | ring->prod = 0; | ||
| 326 | ring->cons = 0; | ||
| 327 | ring->actual_size = 0; | ||
| 328 | ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; | ||
| 329 | |||
| 330 | ring->stride = stride; | ||
| 331 | if (ring->stride <= TXBB_SIZE) | ||
| 332 | ring->buf += TXBB_SIZE; | ||
| 333 | |||
| 334 | ring->log_stride = ffs(ring->stride) - 1; | ||
| 335 | ring->buf_size = ring->size * ring->stride; | ||
| 336 | |||
| 337 | memset(ring->buf, 0, ring->buf_size); | ||
| 338 | mlx4_en_update_rx_prod_db(ring); | ||
| 339 | |||
| 340 | /* Initailize all descriptors */ | ||
| 341 | for (i = 0; i < ring->size; i++) | ||
| 342 | mlx4_en_init_rx_desc(priv, ring, i); | ||
| 343 | |||
| 344 | /* Initialize page allocators */ | ||
| 345 | err = mlx4_en_init_allocator(priv, ring); | ||
| 346 | if (err) { | ||
| 347 | en_err(priv, "Failed initializing ring allocator\n"); | ||
| 348 | if (ring->stride <= TXBB_SIZE) | ||
| 349 | ring->buf -= TXBB_SIZE; | ||
| 350 | ring_ind--; | ||
| 351 | goto err_allocator; | ||
| 352 | } | ||
| 353 | } | ||
| 354 | err = mlx4_en_fill_rx_buffers(priv); | ||
| 355 | if (err) | ||
| 356 | goto err_buffers; | ||
| 357 | |||
| 358 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | ||
| 359 | ring = &priv->rx_ring[ring_ind]; | ||
| 360 | |||
| 361 | ring->size_mask = ring->actual_size - 1; | ||
| 362 | mlx4_en_update_rx_prod_db(ring); | ||
| 363 | } | ||
| 364 | |||
| 365 | return 0; | ||
| 366 | |||
| 367 | err_buffers: | ||
| 368 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | ||
| 369 | mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); | ||
| 370 | |||
| 371 | ring_ind = priv->rx_ring_num - 1; | ||
| 372 | err_allocator: | ||
| 373 | while (ring_ind >= 0) { | ||
| 374 | if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) | ||
| 375 | priv->rx_ring[ring_ind].buf -= TXBB_SIZE; | ||
| 376 | mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); | ||
| 377 | ring_ind--; | ||
| 378 | } | ||
| 379 | return err; | ||
| 380 | } | ||
| 381 | |||
| 382 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | ||
| 383 | struct mlx4_en_rx_ring *ring) | ||
| 384 | { | ||
| 385 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 386 | |||
| 387 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
| 388 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); | ||
| 389 | vfree(ring->rx_info); | ||
| 390 | ring->rx_info = NULL; | ||
| 391 | } | ||
| 392 | |||
| 393 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | ||
| 394 | struct mlx4_en_rx_ring *ring) | ||
| 395 | { | ||
| 396 | mlx4_en_free_rx_buf(priv, ring); | ||
| 397 | if (ring->stride <= TXBB_SIZE) | ||
| 398 | ring->buf -= TXBB_SIZE; | ||
| 399 | mlx4_en_destroy_allocator(priv, ring); | ||
| 400 | } | ||
| 401 | |||
| 402 | |||
| 403 | /* Unmap a completed descriptor and free unused pages */ | ||
| 404 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | ||
| 405 | struct mlx4_en_rx_desc *rx_desc, | ||
| 406 | struct skb_frag_struct *skb_frags, | ||
| 407 | struct skb_frag_struct *skb_frags_rx, | ||
| 408 | struct mlx4_en_rx_alloc *page_alloc, | ||
| 409 | int length) | ||
| 410 | { | ||
| 411 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 412 | struct mlx4_en_frag_info *frag_info; | ||
| 413 | int nr; | ||
| 414 | dma_addr_t dma; | ||
| 415 | |||
| 416 | /* Collect used fragments while replacing them in the HW descirptors */ | ||
| 417 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
| 418 | frag_info = &priv->frag_info[nr]; | ||
| 419 | if (length <= frag_info->frag_prefix_size) | ||
| 420 | break; | ||
| 421 | |||
| 422 | /* Save page reference in skb */ | ||
| 423 | skb_frags_rx[nr].page = skb_frags[nr].page; | ||
| 424 | skb_frags_rx[nr].size = skb_frags[nr].size; | ||
| 425 | skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset; | ||
| 426 | dma = be64_to_cpu(rx_desc->data[nr].addr); | ||
| 427 | |||
| 428 | /* Allocate a replacement page */ | ||
| 429 | if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr)) | ||
| 430 | goto fail; | ||
| 431 | |||
| 432 | /* Unmap buffer */ | ||
| 433 | pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size, | ||
| 434 | PCI_DMA_FROMDEVICE); | ||
| 435 | } | ||
| 436 | /* Adjust size of last fragment to match actual length */ | ||
| 437 | if (nr > 0) | ||
| 438 | skb_frags_rx[nr - 1].size = length - | ||
| 439 | priv->frag_info[nr - 1].frag_prefix_size; | ||
| 440 | return nr; | ||
| 441 | |||
| 442 | fail: | ||
| 443 | /* Drop all accumulated fragments (which have already been replaced in | ||
| 444 | * the descriptor) of this packet; remaining fragments are reused... */ | ||
| 445 | while (nr > 0) { | ||
| 446 | nr--; | ||
| 447 | put_page(skb_frags_rx[nr].page); | ||
| 448 | } | ||
| 449 | return 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | |||
| 453 | static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, | ||
| 454 | struct mlx4_en_rx_desc *rx_desc, | ||
| 455 | struct skb_frag_struct *skb_frags, | ||
| 456 | struct mlx4_en_rx_alloc *page_alloc, | ||
| 457 | unsigned int length) | ||
| 458 | { | ||
| 459 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 460 | struct sk_buff *skb; | ||
| 461 | void *va; | ||
| 462 | int used_frags; | ||
| 463 | dma_addr_t dma; | ||
| 464 | |||
| 465 | skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); | ||
| 466 | if (!skb) { | ||
| 467 | en_dbg(RX_ERR, priv, "Failed allocating skb\n"); | ||
| 468 | return NULL; | ||
| 469 | } | ||
| 470 | skb->dev = priv->dev; | ||
| 471 | skb_reserve(skb, NET_IP_ALIGN); | ||
| 472 | skb->len = length; | ||
| 473 | skb->truesize = length + sizeof(struct sk_buff); | ||
| 474 | |||
| 475 | /* Get pointer to first fragment so we could copy the headers into the | ||
| 476 | * (linear part of the) skb */ | ||
| 477 | va = page_address(skb_frags[0].page) + skb_frags[0].page_offset; | ||
| 478 | |||
| 479 | if (length <= SMALL_PACKET_SIZE) { | ||
| 480 | /* We are copying all relevant data to the skb - temporarily | ||
| 481 | * synch buffers for the copy */ | ||
| 482 | dma = be64_to_cpu(rx_desc->data[0].addr); | ||
| 483 | dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length, | ||
| 484 | DMA_FROM_DEVICE); | ||
| 485 | skb_copy_to_linear_data(skb, va, length); | ||
| 486 | dma_sync_single_for_device(&mdev->pdev->dev, dma, length, | ||
| 487 | DMA_FROM_DEVICE); | ||
| 488 | skb->tail += length; | ||
| 489 | } else { | ||
| 490 | |||
| 491 | /* Move relevant fragments to skb */ | ||
| 492 | used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, | ||
| 493 | skb_shinfo(skb)->frags, | ||
| 494 | page_alloc, length); | ||
| 495 | if (unlikely(!used_frags)) { | ||
| 496 | kfree_skb(skb); | ||
| 497 | return NULL; | ||
| 498 | } | ||
| 499 | skb_shinfo(skb)->nr_frags = used_frags; | ||
| 500 | |||
| 501 | /* Copy headers into the skb linear buffer */ | ||
| 502 | memcpy(skb->data, va, HEADER_COPY_SIZE); | ||
| 503 | skb->tail += HEADER_COPY_SIZE; | ||
| 504 | |||
| 505 | /* Skip headers in first fragment */ | ||
| 506 | skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; | ||
| 507 | |||
| 508 | /* Adjust size of first fragment */ | ||
| 509 | skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE; | ||
| 510 | skb->data_len = length - HEADER_COPY_SIZE; | ||
| 511 | } | ||
| 512 | return skb; | ||
| 513 | } | ||
| 514 | |||
| 515 | static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) | ||
| 516 | { | ||
| 517 | int i; | ||
| 518 | int offset = ETH_HLEN; | ||
| 519 | |||
| 520 | for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { | ||
| 521 | if (*(skb->data + offset) != (unsigned char) (i & 0xff)) | ||
| 522 | goto out_loopback; | ||
| 523 | } | ||
| 524 | /* Loopback found */ | ||
| 525 | priv->loopback_ok = 1; | ||
| 526 | |||
| 527 | out_loopback: | ||
| 528 | dev_kfree_skb_any(skb); | ||
| 529 | } | ||
| 530 | |||
| 531 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | ||
| 532 | { | ||
| 533 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 534 | struct mlx4_cqe *cqe; | ||
| 535 | struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; | ||
| 536 | struct skb_frag_struct *skb_frags; | ||
| 537 | struct mlx4_en_rx_desc *rx_desc; | ||
| 538 | struct sk_buff *skb; | ||
| 539 | int index; | ||
| 540 | int nr; | ||
| 541 | unsigned int length; | ||
| 542 | int polled = 0; | ||
| 543 | int ip_summed; | ||
| 544 | |||
| 545 | if (!priv->port_up) | ||
| 546 | return 0; | ||
| 547 | |||
| 548 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx | ||
| 549 | * descriptor offset can be deduced from the CQE index instead of | ||
| 550 | * reading 'cqe->index' */ | ||
| 551 | index = cq->mcq.cons_index & ring->size_mask; | ||
| 552 | cqe = &cq->buf[index]; | ||
| 553 | |||
| 554 | /* Process all completed CQEs */ | ||
| 555 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | ||
| 556 | cq->mcq.cons_index & cq->size)) { | ||
| 557 | |||
| 558 | skb_frags = ring->rx_info + (index << priv->log_rx_info); | ||
| 559 | rx_desc = ring->buf + (index << ring->log_stride); | ||
| 560 | |||
| 561 | /* | ||
| 562 | * make sure we read the CQE after we read the ownership bit | ||
| 563 | */ | ||
| 564 | rmb(); | ||
| 565 | |||
| 566 | /* Drop packet on bad receive or bad checksum */ | ||
| 567 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | ||
| 568 | MLX4_CQE_OPCODE_ERROR)) { | ||
| 569 | en_err(priv, "CQE completed in error - vendor " | ||
| 570 | "syndrom:%d syndrom:%d\n", | ||
| 571 | ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, | ||
| 572 | ((struct mlx4_err_cqe *) cqe)->syndrome); | ||
| 573 | goto next; | ||
| 574 | } | ||
| 575 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { | ||
| 576 | en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); | ||
| 577 | goto next; | ||
| 578 | } | ||
| 579 | |||
| 580 | /* | ||
| 581 | * Packet is OK - process it. | ||
| 582 | */ | ||
| 583 | length = be32_to_cpu(cqe->byte_cnt); | ||
| 584 | ring->bytes += length; | ||
| 585 | ring->packets++; | ||
| 586 | |||
| 587 | if (likely(dev->features & NETIF_F_RXCSUM)) { | ||
| 588 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | ||
| 589 | (cqe->checksum == cpu_to_be16(0xffff))) { | ||
| 590 | priv->port_stats.rx_chksum_good++; | ||
| 591 | /* This packet is eligible for LRO if it is: | ||
| 592 | * - DIX Ethernet (type interpretation) | ||
| 593 | * - TCP/IP (v4) | ||
| 594 | * - without IP options | ||
| 595 | * - not an IP fragment */ | ||
| 596 | if (dev->features & NETIF_F_GRO) { | ||
| 597 | struct sk_buff *gro_skb = napi_get_frags(&cq->napi); | ||
| 598 | if (!gro_skb) | ||
| 599 | goto next; | ||
| 600 | |||
| 601 | nr = mlx4_en_complete_rx_desc( | ||
| 602 | priv, rx_desc, | ||
| 603 | skb_frags, skb_shinfo(gro_skb)->frags, | ||
| 604 | ring->page_alloc, length); | ||
| 605 | if (!nr) | ||
| 606 | goto next; | ||
| 607 | |||
| 608 | skb_shinfo(gro_skb)->nr_frags = nr; | ||
| 609 | gro_skb->len = length; | ||
| 610 | gro_skb->data_len = length; | ||
| 611 | gro_skb->truesize += length; | ||
| 612 | gro_skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 613 | |||
| 614 | if (cqe->vlan_my_qpn & | ||
| 615 | cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) { | ||
| 616 | u16 vid = be16_to_cpu(cqe->sl_vid); | ||
| 617 | |||
| 618 | __vlan_hwaccel_put_tag(gro_skb, vid); | ||
| 619 | } | ||
| 620 | |||
| 621 | napi_gro_frags(&cq->napi); | ||
| 622 | |||
| 623 | goto next; | ||
| 624 | } | ||
| 625 | |||
| 626 | /* LRO not possible, complete processing here */ | ||
| 627 | ip_summed = CHECKSUM_UNNECESSARY; | ||
| 628 | } else { | ||
| 629 | ip_summed = CHECKSUM_NONE; | ||
| 630 | priv->port_stats.rx_chksum_none++; | ||
| 631 | } | ||
| 632 | } else { | ||
| 633 | ip_summed = CHECKSUM_NONE; | ||
| 634 | priv->port_stats.rx_chksum_none++; | ||
| 635 | } | ||
| 636 | |||
| 637 | skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, | ||
| 638 | ring->page_alloc, length); | ||
| 639 | if (!skb) { | ||
| 640 | priv->stats.rx_dropped++; | ||
| 641 | goto next; | ||
| 642 | } | ||
| 643 | |||
| 644 | if (unlikely(priv->validate_loopback)) { | ||
| 645 | validate_loopback(priv, skb); | ||
| 646 | goto next; | ||
| 647 | } | ||
| 648 | |||
| 649 | skb->ip_summed = ip_summed; | ||
| 650 | skb->protocol = eth_type_trans(skb, dev); | ||
| 651 | skb_record_rx_queue(skb, cq->ring); | ||
| 652 | |||
| 653 | if (be32_to_cpu(cqe->vlan_my_qpn) & | ||
| 654 | MLX4_CQE_VLAN_PRESENT_MASK) | ||
| 655 | __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid)); | ||
| 656 | |||
| 657 | /* Push it up the stack */ | ||
| 658 | netif_receive_skb(skb); | ||
| 659 | |||
| 660 | next: | ||
| 661 | ++cq->mcq.cons_index; | ||
| 662 | index = (cq->mcq.cons_index) & ring->size_mask; | ||
| 663 | cqe = &cq->buf[index]; | ||
| 664 | if (++polled == budget) { | ||
| 665 | /* We are here because we reached the NAPI budget - | ||
| 666 | * flush only pending LRO sessions */ | ||
| 667 | goto out; | ||
| 668 | } | ||
| 669 | } | ||
| 670 | |||
| 671 | out: | ||
| 672 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); | ||
| 673 | mlx4_cq_set_ci(&cq->mcq); | ||
| 674 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | ||
| 675 | ring->cons = cq->mcq.cons_index; | ||
| 676 | ring->prod += polled; /* Polled descriptors were realocated in place */ | ||
| 677 | mlx4_en_update_rx_prod_db(ring); | ||
| 678 | return polled; | ||
| 679 | } | ||
| 680 | |||
| 681 | |||
| 682 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) | ||
| 683 | { | ||
| 684 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | ||
| 685 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
| 686 | |||
| 687 | if (priv->port_up) | ||
| 688 | napi_schedule(&cq->napi); | ||
| 689 | else | ||
| 690 | mlx4_en_arm_cq(priv, cq); | ||
| 691 | } | ||
| 692 | |||
| 693 | /* Rx CQ polling - called by NAPI */ | ||
| 694 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | ||
| 695 | { | ||
| 696 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); | ||
| 697 | struct net_device *dev = cq->dev; | ||
| 698 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 699 | int done; | ||
| 700 | |||
| 701 | done = mlx4_en_process_rx_cq(dev, cq, budget); | ||
| 702 | |||
| 703 | /* If we used up all the quota - we're probably not done yet... */ | ||
| 704 | if (done == budget) | ||
| 705 | INC_PERF_COUNTER(priv->pstats.napi_quota); | ||
| 706 | else { | ||
| 707 | /* Done for now */ | ||
| 708 | napi_complete(napi); | ||
| 709 | mlx4_en_arm_cq(priv, cq); | ||
| 710 | } | ||
| 711 | return done; | ||
| 712 | } | ||
| 713 | |||
| 714 | |||
| 715 | /* Calculate the last offset position that accommodates a full fragment | ||
| 716 | * (assuming fagment size = stride-align) */ | ||
| 717 | static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) | ||
| 718 | { | ||
| 719 | u16 res = MLX4_EN_ALLOC_SIZE % stride; | ||
| 720 | u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; | ||
| 721 | |||
| 722 | en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " | ||
| 723 | "res:%d offset:%d\n", stride, align, res, offset); | ||
| 724 | return offset; | ||
| 725 | } | ||
| 726 | |||
| 727 | |||
| 728 | static int frag_sizes[] = { | ||
| 729 | FRAG_SZ0, | ||
| 730 | FRAG_SZ1, | ||
| 731 | FRAG_SZ2, | ||
| 732 | FRAG_SZ3 | ||
| 733 | }; | ||
| 734 | |||
| 735 | void mlx4_en_calc_rx_buf(struct net_device *dev) | ||
| 736 | { | ||
| 737 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 738 | int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE; | ||
| 739 | int buf_size = 0; | ||
| 740 | int i = 0; | ||
| 741 | |||
| 742 | while (buf_size < eff_mtu) { | ||
| 743 | priv->frag_info[i].frag_size = | ||
| 744 | (eff_mtu > buf_size + frag_sizes[i]) ? | ||
| 745 | frag_sizes[i] : eff_mtu - buf_size; | ||
| 746 | priv->frag_info[i].frag_prefix_size = buf_size; | ||
| 747 | if (!i) { | ||
| 748 | priv->frag_info[i].frag_align = NET_IP_ALIGN; | ||
| 749 | priv->frag_info[i].frag_stride = | ||
| 750 | ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); | ||
| 751 | } else { | ||
| 752 | priv->frag_info[i].frag_align = 0; | ||
| 753 | priv->frag_info[i].frag_stride = | ||
| 754 | ALIGN(frag_sizes[i], SMP_CACHE_BYTES); | ||
| 755 | } | ||
| 756 | priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset( | ||
| 757 | priv, priv->frag_info[i].frag_stride, | ||
| 758 | priv->frag_info[i].frag_align); | ||
| 759 | buf_size += priv->frag_info[i].frag_size; | ||
| 760 | i++; | ||
| 761 | } | ||
| 762 | |||
| 763 | priv->num_frags = i; | ||
| 764 | priv->rx_skb_size = eff_mtu; | ||
| 765 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); | ||
| 766 | |||
| 767 | en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " | ||
| 768 | "num_frags:%d):\n", eff_mtu, priv->num_frags); | ||
| 769 | for (i = 0; i < priv->num_frags; i++) { | ||
| 770 | en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " | ||
| 771 | "stride:%d last_offset:%d\n", i, | ||
| 772 | priv->frag_info[i].frag_size, | ||
| 773 | priv->frag_info[i].frag_prefix_size, | ||
| 774 | priv->frag_info[i].frag_align, | ||
| 775 | priv->frag_info[i].frag_stride, | ||
| 776 | priv->frag_info[i].last_offset); | ||
| 777 | } | ||
| 778 | } | ||
| 779 | |||
| 780 | /* RSS related functions */ | ||
| 781 | |||
| 782 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, | ||
| 783 | struct mlx4_en_rx_ring *ring, | ||
| 784 | enum mlx4_qp_state *state, | ||
| 785 | struct mlx4_qp *qp) | ||
| 786 | { | ||
| 787 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 788 | struct mlx4_qp_context *context; | ||
| 789 | int err = 0; | ||
| 790 | |||
| 791 | context = kmalloc(sizeof *context , GFP_KERNEL); | ||
| 792 | if (!context) { | ||
| 793 | en_err(priv, "Failed to allocate qp context\n"); | ||
| 794 | return -ENOMEM; | ||
| 795 | } | ||
| 796 | |||
| 797 | err = mlx4_qp_alloc(mdev->dev, qpn, qp); | ||
| 798 | if (err) { | ||
| 799 | en_err(priv, "Failed to allocate qp #%x\n", qpn); | ||
| 800 | goto out; | ||
| 801 | } | ||
| 802 | qp->event = mlx4_en_sqp_event; | ||
| 803 | |||
| 804 | memset(context, 0, sizeof *context); | ||
| 805 | mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, | ||
| 806 | qpn, ring->cqn, context); | ||
| 807 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); | ||
| 808 | |||
| 809 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); | ||
| 810 | if (err) { | ||
| 811 | mlx4_qp_remove(mdev->dev, qp); | ||
| 812 | mlx4_qp_free(mdev->dev, qp); | ||
| 813 | } | ||
| 814 | mlx4_en_update_rx_prod_db(ring); | ||
| 815 | out: | ||
| 816 | kfree(context); | ||
| 817 | return err; | ||
| 818 | } | ||
| 819 | |||
| 820 | /* Allocate rx qp's and configure them according to rss map */ | ||
| 821 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | ||
| 822 | { | ||
| 823 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 824 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | ||
| 825 | struct mlx4_qp_context context; | ||
| 826 | struct mlx4_en_rss_context *rss_context; | ||
| 827 | void *ptr; | ||
| 828 | u8 rss_mask = 0x3f; | ||
| 829 | int i, qpn; | ||
| 830 | int err = 0; | ||
| 831 | int good_qps = 0; | ||
| 832 | |||
| 833 | en_dbg(DRV, priv, "Configuring rss steering\n"); | ||
| 834 | err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, | ||
| 835 | priv->rx_ring_num, | ||
| 836 | &rss_map->base_qpn); | ||
| 837 | if (err) { | ||
| 838 | en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); | ||
| 839 | return err; | ||
| 840 | } | ||
| 841 | |||
| 842 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 843 | qpn = rss_map->base_qpn + i; | ||
| 844 | err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], | ||
| 845 | &rss_map->state[i], | ||
| 846 | &rss_map->qps[i]); | ||
| 847 | if (err) | ||
| 848 | goto rss_err; | ||
| 849 | |||
| 850 | ++good_qps; | ||
| 851 | } | ||
| 852 | |||
| 853 | /* Configure RSS indirection qp */ | ||
| 854 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); | ||
| 855 | if (err) { | ||
| 856 | en_err(priv, "Failed to allocate RSS indirection QP\n"); | ||
| 857 | goto rss_err; | ||
| 858 | } | ||
| 859 | rss_map->indir_qp.event = mlx4_en_sqp_event; | ||
| 860 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | ||
| 861 | priv->rx_ring[0].cqn, &context); | ||
| 862 | |||
| 863 | ptr = ((void *) &context) + 0x3c; | ||
| 864 | rss_context = ptr; | ||
| 865 | rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | | ||
| 866 | (rss_map->base_qpn)); | ||
| 867 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); | ||
| 868 | rss_context->flags = rss_mask; | ||
| 869 | |||
| 870 | if (priv->mdev->profile.udp_rss) | ||
| 871 | rss_context->base_qpn_udp = rss_context->default_qpn; | ||
| 872 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, | ||
| 873 | &rss_map->indir_qp, &rss_map->indir_state); | ||
| 874 | if (err) | ||
| 875 | goto indir_err; | ||
| 876 | |||
| 877 | return 0; | ||
| 878 | |||
| 879 | indir_err: | ||
| 880 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | ||
| 881 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | ||
| 882 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | ||
| 883 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | ||
| 884 | rss_err: | ||
| 885 | for (i = 0; i < good_qps; i++) { | ||
| 886 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | ||
| 887 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | ||
| 888 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | ||
| 889 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | ||
| 890 | } | ||
| 891 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); | ||
| 892 | return err; | ||
| 893 | } | ||
| 894 | |||
| 895 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | ||
| 896 | { | ||
| 897 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 898 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | ||
| 899 | int i; | ||
| 900 | |||
| 901 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | ||
| 902 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | ||
| 903 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | ||
| 904 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | ||
| 905 | |||
| 906 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 907 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | ||
| 908 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | ||
| 909 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | ||
| 910 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | ||
| 911 | } | ||
| 912 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); | ||
| 913 | } | ||
| 914 | |||
| 915 | |||
| 916 | |||
| 917 | |||
| 918 | |||
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c new file mode 100644 index 00000000000..9fdbcecd499 --- /dev/null +++ b/drivers/net/mlx4/en_selftest.c | |||
| @@ -0,0 +1,180 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/kernel.h> | ||
| 35 | #include <linux/ethtool.h> | ||
| 36 | #include <linux/netdevice.h> | ||
| 37 | #include <linux/delay.h> | ||
| 38 | #include <linux/mlx4/driver.h> | ||
| 39 | |||
| 40 | #include "mlx4_en.h" | ||
| 41 | |||
| 42 | |||
| 43 | static int mlx4_en_test_registers(struct mlx4_en_priv *priv) | ||
| 44 | { | ||
| 45 | return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, | ||
| 46 | MLX4_CMD_TIME_CLASS_A); | ||
| 47 | } | ||
| 48 | |||
| 49 | static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) | ||
| 50 | { | ||
| 51 | struct sk_buff *skb; | ||
| 52 | struct ethhdr *ethh; | ||
| 53 | unsigned char *packet; | ||
| 54 | unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD; | ||
| 55 | unsigned int i; | ||
| 56 | int err; | ||
| 57 | |||
| 58 | |||
| 59 | /* build the pkt before xmit */ | ||
| 60 | skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); | ||
| 61 | if (!skb) { | ||
| 62 | en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n"); | ||
| 63 | return -ENOMEM; | ||
| 64 | } | ||
| 65 | skb_reserve(skb, NET_IP_ALIGN); | ||
| 66 | |||
| 67 | ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); | ||
| 68 | packet = (unsigned char *)skb_put(skb, packet_size); | ||
| 69 | memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); | ||
| 70 | memset(ethh->h_source, 0, ETH_ALEN); | ||
| 71 | ethh->h_proto = htons(ETH_P_ARP); | ||
| 72 | skb_set_mac_header(skb, 0); | ||
| 73 | for (i = 0; i < packet_size; ++i) /* fill our packet */ | ||
| 74 | packet[i] = (unsigned char)(i & 0xff); | ||
| 75 | |||
| 76 | /* xmit the pkt */ | ||
| 77 | err = mlx4_en_xmit(skb, priv->dev); | ||
| 78 | return err; | ||
| 79 | } | ||
| 80 | |||
| 81 | static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) | ||
| 82 | { | ||
| 83 | u32 loopback_ok = 0; | ||
| 84 | int i; | ||
| 85 | |||
| 86 | |||
| 87 | priv->loopback_ok = 0; | ||
| 88 | priv->validate_loopback = 1; | ||
| 89 | |||
| 90 | /* xmit */ | ||
| 91 | if (mlx4_en_test_loopback_xmit(priv)) { | ||
| 92 | en_err(priv, "Transmitting loopback packet failed\n"); | ||
| 93 | goto mlx4_en_test_loopback_exit; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* polling for result */ | ||
| 97 | for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) { | ||
| 98 | msleep(MLX4_EN_LOOPBACK_TIMEOUT); | ||
| 99 | if (priv->loopback_ok) { | ||
| 100 | loopback_ok = 1; | ||
| 101 | break; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | if (!loopback_ok) | ||
| 105 | en_err(priv, "Loopback packet didn't arrive\n"); | ||
| 106 | |||
| 107 | mlx4_en_test_loopback_exit: | ||
| 108 | |||
| 109 | priv->validate_loopback = 0; | ||
| 110 | return !loopback_ok; | ||
| 111 | } | ||
| 112 | |||
| 113 | |||
| 114 | static int mlx4_en_test_link(struct mlx4_en_priv *priv) | ||
| 115 | { | ||
| 116 | if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) | ||
| 117 | return -ENOMEM; | ||
| 118 | if (priv->port_state.link_state == 1) | ||
| 119 | return 0; | ||
| 120 | else | ||
| 121 | return 1; | ||
| 122 | } | ||
| 123 | |||
| 124 | static int mlx4_en_test_speed(struct mlx4_en_priv *priv) | ||
| 125 | { | ||
| 126 | |||
| 127 | if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) | ||
| 128 | return -ENOMEM; | ||
| 129 | |||
| 130 | /* The device currently only supports 10G speed */ | ||
| 131 | if (priv->port_state.link_speed != SPEED_10000) | ||
| 132 | return priv->port_state.link_speed; | ||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | |||
| 137 | void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | ||
| 138 | { | ||
| 139 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 140 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 141 | struct mlx4_en_tx_ring *tx_ring; | ||
| 142 | int i, carrier_ok; | ||
| 143 | |||
| 144 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); | ||
| 145 | |||
| 146 | if (*flags & ETH_TEST_FL_OFFLINE) { | ||
| 147 | /* disable the interface */ | ||
| 148 | carrier_ok = netif_carrier_ok(dev); | ||
| 149 | |||
| 150 | netif_carrier_off(dev); | ||
| 151 | retry_tx: | ||
| 152 | /* Wait until all tx queues are empty. | ||
| 153 | * there should not be any additional incoming traffic | ||
| 154 | * since we turned the carrier off */ | ||
| 155 | msleep(200); | ||
| 156 | for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { | ||
| 157 | tx_ring = &priv->tx_ring[i]; | ||
| 158 | if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) | ||
| 159 | goto retry_tx; | ||
| 160 | } | ||
| 161 | |||
| 162 | if (priv->mdev->dev->caps.flags & | ||
| 163 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { | ||
| 164 | buf[3] = mlx4_en_test_registers(priv); | ||
| 165 | buf[4] = mlx4_en_test_loopback(priv); | ||
| 166 | } | ||
| 167 | |||
| 168 | if (carrier_ok) | ||
| 169 | netif_carrier_on(dev); | ||
| 170 | |||
| 171 | } | ||
| 172 | buf[0] = mlx4_test_interrupts(mdev->dev); | ||
| 173 | buf[1] = mlx4_en_test_link(priv); | ||
| 174 | buf[2] = mlx4_en_test_speed(priv); | ||
| 175 | |||
| 176 | for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) { | ||
| 177 | if (buf[i]) | ||
| 178 | *flags |= ETH_TEST_FL_FAILED; | ||
| 179 | } | ||
| 180 | } | ||
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c new file mode 100644 index 00000000000..f76ab6bf309 --- /dev/null +++ b/drivers/net/mlx4/en_tx.c | |||
| @@ -0,0 +1,828 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <asm/page.h> | ||
| 35 | #include <linux/mlx4/cq.h> | ||
| 36 | #include <linux/slab.h> | ||
| 37 | #include <linux/mlx4/qp.h> | ||
| 38 | #include <linux/skbuff.h> | ||
| 39 | #include <linux/if_vlan.h> | ||
| 40 | #include <linux/vmalloc.h> | ||
| 41 | #include <linux/tcp.h> | ||
| 42 | |||
| 43 | #include "mlx4_en.h" | ||
| 44 | |||
| 45 | enum { | ||
| 46 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ | ||
| 47 | MAX_BF = 256, | ||
| 48 | }; | ||
| 49 | |||
| 50 | static int inline_thold __read_mostly = MAX_INLINE; | ||
| 51 | |||
| 52 | module_param_named(inline_thold, inline_thold, int, 0444); | ||
| 53 | MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); | ||
| 54 | |||
| 55 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | ||
| 56 | struct mlx4_en_tx_ring *ring, int qpn, u32 size, | ||
| 57 | u16 stride) | ||
| 58 | { | ||
| 59 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 60 | int tmp; | ||
| 61 | int err; | ||
| 62 | |||
| 63 | ring->size = size; | ||
| 64 | ring->size_mask = size - 1; | ||
| 65 | ring->stride = stride; | ||
| 66 | |||
| 67 | inline_thold = min(inline_thold, MAX_INLINE); | ||
| 68 | |||
| 69 | spin_lock_init(&ring->comp_lock); | ||
| 70 | |||
| 71 | tmp = size * sizeof(struct mlx4_en_tx_info); | ||
| 72 | ring->tx_info = vmalloc(tmp); | ||
| 73 | if (!ring->tx_info) { | ||
| 74 | en_err(priv, "Failed allocating tx_info ring\n"); | ||
| 75 | return -ENOMEM; | ||
| 76 | } | ||
| 77 | en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", | ||
| 78 | ring->tx_info, tmp); | ||
| 79 | |||
| 80 | ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); | ||
| 81 | if (!ring->bounce_buf) { | ||
| 82 | en_err(priv, "Failed allocating bounce buffer\n"); | ||
| 83 | err = -ENOMEM; | ||
| 84 | goto err_tx; | ||
| 85 | } | ||
| 86 | ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); | ||
| 87 | |||
| 88 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, | ||
| 89 | 2 * PAGE_SIZE); | ||
| 90 | if (err) { | ||
| 91 | en_err(priv, "Failed allocating hwq resources\n"); | ||
| 92 | goto err_bounce; | ||
| 93 | } | ||
| 94 | |||
| 95 | err = mlx4_en_map_buffer(&ring->wqres.buf); | ||
| 96 | if (err) { | ||
| 97 | en_err(priv, "Failed to map TX buffer\n"); | ||
| 98 | goto err_hwq_res; | ||
| 99 | } | ||
| 100 | |||
| 101 | ring->buf = ring->wqres.buf.direct.buf; | ||
| 102 | |||
| 103 | en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " | ||
| 104 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, | ||
| 105 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); | ||
| 106 | |||
| 107 | ring->qpn = qpn; | ||
| 108 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); | ||
| 109 | if (err) { | ||
| 110 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); | ||
| 111 | goto err_map; | ||
| 112 | } | ||
| 113 | ring->qp.event = mlx4_en_sqp_event; | ||
| 114 | |||
| 115 | err = mlx4_bf_alloc(mdev->dev, &ring->bf); | ||
| 116 | if (err) { | ||
| 117 | en_dbg(DRV, priv, "working without blueflame (%d)", err); | ||
| 118 | ring->bf.uar = &mdev->priv_uar; | ||
| 119 | ring->bf.uar->map = mdev->uar_map; | ||
| 120 | ring->bf_enabled = false; | ||
| 121 | } else | ||
| 122 | ring->bf_enabled = true; | ||
| 123 | |||
| 124 | return 0; | ||
| 125 | |||
| 126 | err_map: | ||
| 127 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
| 128 | err_hwq_res: | ||
| 129 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
| 130 | err_bounce: | ||
| 131 | kfree(ring->bounce_buf); | ||
| 132 | ring->bounce_buf = NULL; | ||
| 133 | err_tx: | ||
| 134 | vfree(ring->tx_info); | ||
| 135 | ring->tx_info = NULL; | ||
| 136 | return err; | ||
| 137 | } | ||
| 138 | |||
| 139 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, | ||
| 140 | struct mlx4_en_tx_ring *ring) | ||
| 141 | { | ||
| 142 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 143 | en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); | ||
| 144 | |||
| 145 | if (ring->bf_enabled) | ||
| 146 | mlx4_bf_free(mdev->dev, &ring->bf); | ||
| 147 | mlx4_qp_remove(mdev->dev, &ring->qp); | ||
| 148 | mlx4_qp_free(mdev->dev, &ring->qp); | ||
| 149 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
| 150 | mlx4_en_unmap_buffer(&ring->wqres.buf); | ||
| 151 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | ||
| 152 | kfree(ring->bounce_buf); | ||
| 153 | ring->bounce_buf = NULL; | ||
| 154 | vfree(ring->tx_info); | ||
| 155 | ring->tx_info = NULL; | ||
| 156 | } | ||
| 157 | |||
| 158 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | ||
| 159 | struct mlx4_en_tx_ring *ring, | ||
| 160 | int cq) | ||
| 161 | { | ||
| 162 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 163 | int err; | ||
| 164 | |||
| 165 | ring->cqn = cq; | ||
| 166 | ring->prod = 0; | ||
| 167 | ring->cons = 0xffffffff; | ||
| 168 | ring->last_nr_txbb = 1; | ||
| 169 | ring->poll_cnt = 0; | ||
| 170 | ring->blocked = 0; | ||
| 171 | memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); | ||
| 172 | memset(ring->buf, 0, ring->buf_size); | ||
| 173 | |||
| 174 | ring->qp_state = MLX4_QP_STATE_RST; | ||
| 175 | ring->doorbell_qpn = ring->qp.qpn << 8; | ||
| 176 | |||
| 177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | ||
| 178 | ring->cqn, &ring->context); | ||
| 179 | if (ring->bf_enabled) | ||
| 180 | ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); | ||
| 181 | |||
| 182 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | ||
| 183 | &ring->qp, &ring->qp_state); | ||
| 184 | |||
| 185 | return err; | ||
| 186 | } | ||
| 187 | |||
| 188 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | ||
| 189 | struct mlx4_en_tx_ring *ring) | ||
| 190 | { | ||
| 191 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 192 | |||
| 193 | mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, | ||
| 194 | MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); | ||
| 195 | } | ||
| 196 | |||
| 197 | |||
| 198 | static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, | ||
| 199 | struct mlx4_en_tx_ring *ring, | ||
| 200 | int index, u8 owner) | ||
| 201 | { | ||
| 202 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 203 | struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; | ||
| 204 | struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; | ||
| 205 | struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; | ||
| 206 | struct sk_buff *skb = tx_info->skb; | ||
| 207 | struct skb_frag_struct *frag; | ||
| 208 | void *end = ring->buf + ring->buf_size; | ||
| 209 | int frags = skb_shinfo(skb)->nr_frags; | ||
| 210 | int i; | ||
| 211 | __be32 *ptr = (__be32 *)tx_desc; | ||
| 212 | __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); | ||
| 213 | |||
| 214 | /* Optimize the common case when there are no wraparounds */ | ||
| 215 | if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { | ||
| 216 | if (!tx_info->inl) { | ||
| 217 | if (tx_info->linear) { | ||
| 218 | pci_unmap_single(mdev->pdev, | ||
| 219 | (dma_addr_t) be64_to_cpu(data->addr), | ||
| 220 | be32_to_cpu(data->byte_count), | ||
| 221 | PCI_DMA_TODEVICE); | ||
| 222 | ++data; | ||
| 223 | } | ||
| 224 | |||
| 225 | for (i = 0; i < frags; i++) { | ||
| 226 | frag = &skb_shinfo(skb)->frags[i]; | ||
| 227 | pci_unmap_page(mdev->pdev, | ||
| 228 | (dma_addr_t) be64_to_cpu(data[i].addr), | ||
| 229 | frag->size, PCI_DMA_TODEVICE); | ||
| 230 | } | ||
| 231 | } | ||
| 232 | /* Stamp the freed descriptor */ | ||
| 233 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
| 234 | *ptr = stamp; | ||
| 235 | ptr += STAMP_DWORDS; | ||
| 236 | } | ||
| 237 | |||
| 238 | } else { | ||
| 239 | if (!tx_info->inl) { | ||
| 240 | if ((void *) data >= end) { | ||
| 241 | data = ring->buf + ((void *)data - end); | ||
| 242 | } | ||
| 243 | |||
| 244 | if (tx_info->linear) { | ||
| 245 | pci_unmap_single(mdev->pdev, | ||
| 246 | (dma_addr_t) be64_to_cpu(data->addr), | ||
| 247 | be32_to_cpu(data->byte_count), | ||
| 248 | PCI_DMA_TODEVICE); | ||
| 249 | ++data; | ||
| 250 | } | ||
| 251 | |||
| 252 | for (i = 0; i < frags; i++) { | ||
| 253 | /* Check for wraparound before unmapping */ | ||
| 254 | if ((void *) data >= end) | ||
| 255 | data = ring->buf; | ||
| 256 | frag = &skb_shinfo(skb)->frags[i]; | ||
| 257 | pci_unmap_page(mdev->pdev, | ||
| 258 | (dma_addr_t) be64_to_cpu(data->addr), | ||
| 259 | frag->size, PCI_DMA_TODEVICE); | ||
| 260 | ++data; | ||
| 261 | } | ||
| 262 | } | ||
| 263 | /* Stamp the freed descriptor */ | ||
| 264 | for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { | ||
| 265 | *ptr = stamp; | ||
| 266 | ptr += STAMP_DWORDS; | ||
| 267 | if ((void *) ptr >= end) { | ||
| 268 | ptr = ring->buf; | ||
| 269 | stamp ^= cpu_to_be32(0x80000000); | ||
| 270 | } | ||
| 271 | } | ||
| 272 | |||
| 273 | } | ||
| 274 | dev_kfree_skb_any(skb); | ||
| 275 | return tx_info->nr_txbb; | ||
| 276 | } | ||
| 277 | |||
| 278 | |||
| 279 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) | ||
| 280 | { | ||
| 281 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 282 | int cnt = 0; | ||
| 283 | |||
| 284 | /* Skip last polled descriptor */ | ||
| 285 | ring->cons += ring->last_nr_txbb; | ||
| 286 | en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", | ||
| 287 | ring->cons, ring->prod); | ||
| 288 | |||
| 289 | if ((u32) (ring->prod - ring->cons) > ring->size) { | ||
| 290 | if (netif_msg_tx_err(priv)) | ||
| 291 | en_warn(priv, "Tx consumer passed producer!\n"); | ||
| 292 | return 0; | ||
| 293 | } | ||
| 294 | |||
| 295 | while (ring->cons != ring->prod) { | ||
| 296 | ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, | ||
| 297 | ring->cons & ring->size_mask, | ||
| 298 | !!(ring->cons & ring->size)); | ||
| 299 | ring->cons += ring->last_nr_txbb; | ||
| 300 | cnt++; | ||
| 301 | } | ||
| 302 | |||
| 303 | if (cnt) | ||
| 304 | en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); | ||
| 305 | |||
| 306 | return cnt; | ||
| 307 | } | ||
| 308 | |||
| 309 | |||
| 310 | static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) | ||
| 311 | { | ||
| 312 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 313 | struct mlx4_cq *mcq = &cq->mcq; | ||
| 314 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
| 315 | struct mlx4_cqe *cqe = cq->buf; | ||
| 316 | u16 index; | ||
| 317 | u16 new_index; | ||
| 318 | u32 txbbs_skipped = 0; | ||
| 319 | u32 cq_last_sav; | ||
| 320 | |||
| 321 | /* index always points to the first TXBB of the last polled descriptor */ | ||
| 322 | index = ring->cons & ring->size_mask; | ||
| 323 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
| 324 | if (index == new_index) | ||
| 325 | return; | ||
| 326 | |||
| 327 | if (!priv->port_up) | ||
| 328 | return; | ||
| 329 | |||
| 330 | /* | ||
| 331 | * We use a two-stage loop: | ||
| 332 | * - the first samples the HW-updated CQE | ||
| 333 | * - the second frees TXBBs until the last sample | ||
| 334 | * This lets us amortize CQE cache misses, while still polling the CQ | ||
| 335 | * until is quiescent. | ||
| 336 | */ | ||
| 337 | cq_last_sav = mcq->cons_index; | ||
| 338 | do { | ||
| 339 | do { | ||
| 340 | /* Skip over last polled CQE */ | ||
| 341 | index = (index + ring->last_nr_txbb) & ring->size_mask; | ||
| 342 | txbbs_skipped += ring->last_nr_txbb; | ||
| 343 | |||
| 344 | /* Poll next CQE */ | ||
| 345 | ring->last_nr_txbb = mlx4_en_free_tx_desc( | ||
| 346 | priv, ring, index, | ||
| 347 | !!((ring->cons + txbbs_skipped) & | ||
| 348 | ring->size)); | ||
| 349 | ++mcq->cons_index; | ||
| 350 | |||
| 351 | } while (index != new_index); | ||
| 352 | |||
| 353 | new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; | ||
| 354 | } while (index != new_index); | ||
| 355 | AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, | ||
| 356 | (u32) (mcq->cons_index - cq_last_sav)); | ||
| 357 | |||
| 358 | /* | ||
| 359 | * To prevent CQ overflow we first update CQ consumer and only then | ||
| 360 | * the ring consumer. | ||
| 361 | */ | ||
| 362 | mlx4_cq_set_ci(mcq); | ||
| 363 | wmb(); | ||
| 364 | ring->cons += txbbs_skipped; | ||
| 365 | |||
| 366 | /* Wakeup Tx queue if this ring stopped it */ | ||
| 367 | if (unlikely(ring->blocked)) { | ||
| 368 | if ((u32) (ring->prod - ring->cons) <= | ||
| 369 | ring->size - HEADROOM - MAX_DESC_TXBBS) { | ||
| 370 | ring->blocked = 0; | ||
| 371 | netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); | ||
| 372 | priv->port_stats.wake_queue++; | ||
| 373 | } | ||
| 374 | } | ||
| 375 | } | ||
| 376 | |||
| 377 | void mlx4_en_tx_irq(struct mlx4_cq *mcq) | ||
| 378 | { | ||
| 379 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | ||
| 380 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
| 381 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
| 382 | |||
| 383 | if (!spin_trylock(&ring->comp_lock)) | ||
| 384 | return; | ||
| 385 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
| 386 | mod_timer(&cq->timer, jiffies + 1); | ||
| 387 | spin_unlock(&ring->comp_lock); | ||
| 388 | } | ||
| 389 | |||
| 390 | |||
| 391 | void mlx4_en_poll_tx_cq(unsigned long data) | ||
| 392 | { | ||
| 393 | struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; | ||
| 394 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | ||
| 395 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; | ||
| 396 | u32 inflight; | ||
| 397 | |||
| 398 | INC_PERF_COUNTER(priv->pstats.tx_poll); | ||
| 399 | |||
| 400 | if (!spin_trylock_irq(&ring->comp_lock)) { | ||
| 401 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
| 402 | return; | ||
| 403 | } | ||
| 404 | mlx4_en_process_tx_cq(cq->dev, cq); | ||
| 405 | inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); | ||
| 406 | |||
| 407 | /* If there are still packets in flight and the timer has not already | ||
| 408 | * been scheduled by the Tx routine then schedule it here to guarantee | ||
| 409 | * completion processing of these packets */ | ||
| 410 | if (inflight && priv->port_up) | ||
| 411 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
| 412 | |||
| 413 | spin_unlock_irq(&ring->comp_lock); | ||
| 414 | } | ||
| 415 | |||
| 416 | static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, | ||
| 417 | struct mlx4_en_tx_ring *ring, | ||
| 418 | u32 index, | ||
| 419 | unsigned int desc_size) | ||
| 420 | { | ||
| 421 | u32 copy = (ring->size - index) * TXBB_SIZE; | ||
| 422 | int i; | ||
| 423 | |||
| 424 | for (i = desc_size - copy - 4; i >= 0; i -= 4) { | ||
| 425 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
| 426 | wmb(); | ||
| 427 | |||
| 428 | *((u32 *) (ring->buf + i)) = | ||
| 429 | *((u32 *) (ring->bounce_buf + copy + i)); | ||
| 430 | } | ||
| 431 | |||
| 432 | for (i = copy - 4; i >= 4 ; i -= 4) { | ||
| 433 | if ((i & (TXBB_SIZE - 1)) == 0) | ||
| 434 | wmb(); | ||
| 435 | |||
| 436 | *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = | ||
| 437 | *((u32 *) (ring->bounce_buf + i)); | ||
| 438 | } | ||
| 439 | |||
| 440 | /* Return real descriptor location */ | ||
| 441 | return ring->buf + index * TXBB_SIZE; | ||
| 442 | } | ||
| 443 | |||
| 444 | static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) | ||
| 445 | { | ||
| 446 | struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; | ||
| 447 | struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; | ||
| 448 | unsigned long flags; | ||
| 449 | |||
| 450 | /* If we don't have a pending timer, set one up to catch our recent | ||
| 451 | post in case the interface becomes idle */ | ||
| 452 | if (!timer_pending(&cq->timer)) | ||
| 453 | mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); | ||
| 454 | |||
| 455 | /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ | ||
| 456 | if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) | ||
| 457 | if (spin_trylock_irqsave(&ring->comp_lock, flags)) { | ||
| 458 | mlx4_en_process_tx_cq(priv->dev, cq); | ||
| 459 | spin_unlock_irqrestore(&ring->comp_lock, flags); | ||
| 460 | } | ||
| 461 | } | ||
| 462 | |||
| 463 | static void *get_frag_ptr(struct sk_buff *skb) | ||
| 464 | { | ||
| 465 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
| 466 | struct page *page = frag->page; | ||
| 467 | void *ptr; | ||
| 468 | |||
| 469 | ptr = page_address(page); | ||
| 470 | if (unlikely(!ptr)) | ||
| 471 | return NULL; | ||
| 472 | |||
| 473 | return ptr + frag->page_offset; | ||
| 474 | } | ||
| 475 | |||
| 476 | static int is_inline(struct sk_buff *skb, void **pfrag) | ||
| 477 | { | ||
| 478 | void *ptr; | ||
| 479 | |||
| 480 | if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { | ||
| 481 | if (skb_shinfo(skb)->nr_frags == 1) { | ||
| 482 | ptr = get_frag_ptr(skb); | ||
| 483 | if (unlikely(!ptr)) | ||
| 484 | return 0; | ||
| 485 | |||
| 486 | if (pfrag) | ||
| 487 | *pfrag = ptr; | ||
| 488 | |||
| 489 | return 1; | ||
| 490 | } else if (unlikely(skb_shinfo(skb)->nr_frags)) | ||
| 491 | return 0; | ||
| 492 | else | ||
| 493 | return 1; | ||
| 494 | } | ||
| 495 | |||
| 496 | return 0; | ||
| 497 | } | ||
| 498 | |||
| 499 | static int inline_size(struct sk_buff *skb) | ||
| 500 | { | ||
| 501 | if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) | ||
| 502 | <= MLX4_INLINE_ALIGN) | ||
| 503 | return ALIGN(skb->len + CTRL_SIZE + | ||
| 504 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
| 505 | else | ||
| 506 | return ALIGN(skb->len + CTRL_SIZE + 2 * | ||
| 507 | sizeof(struct mlx4_wqe_inline_seg), 16); | ||
| 508 | } | ||
| 509 | |||
| 510 | static int get_real_size(struct sk_buff *skb, struct net_device *dev, | ||
| 511 | int *lso_header_size) | ||
| 512 | { | ||
| 513 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 514 | int real_size; | ||
| 515 | |||
| 516 | if (skb_is_gso(skb)) { | ||
| 517 | *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
| 518 | real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + | ||
| 519 | ALIGN(*lso_header_size + 4, DS_SIZE); | ||
| 520 | if (unlikely(*lso_header_size != skb_headlen(skb))) { | ||
| 521 | /* We add a segment for the skb linear buffer only if | ||
| 522 | * it contains data */ | ||
| 523 | if (*lso_header_size < skb_headlen(skb)) | ||
| 524 | real_size += DS_SIZE; | ||
| 525 | else { | ||
| 526 | if (netif_msg_tx_err(priv)) | ||
| 527 | en_warn(priv, "Non-linear headers\n"); | ||
| 528 | return 0; | ||
| 529 | } | ||
| 530 | } | ||
| 531 | } else { | ||
| 532 | *lso_header_size = 0; | ||
| 533 | if (!is_inline(skb, NULL)) | ||
| 534 | real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; | ||
| 535 | else | ||
| 536 | real_size = inline_size(skb); | ||
| 537 | } | ||
| 538 | |||
| 539 | return real_size; | ||
| 540 | } | ||
| 541 | |||
| 542 | static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, | ||
| 543 | int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) | ||
| 544 | { | ||
| 545 | struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; | ||
| 546 | int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; | ||
| 547 | |||
| 548 | if (skb->len <= spc) { | ||
| 549 | inl->byte_count = cpu_to_be32(1 << 31 | skb->len); | ||
| 550 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
| 551 | if (skb_shinfo(skb)->nr_frags) | ||
| 552 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, | ||
| 553 | skb_shinfo(skb)->frags[0].size); | ||
| 554 | |||
| 555 | } else { | ||
| 556 | inl->byte_count = cpu_to_be32(1 << 31 | spc); | ||
| 557 | if (skb_headlen(skb) <= spc) { | ||
| 558 | skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); | ||
| 559 | if (skb_headlen(skb) < spc) { | ||
| 560 | memcpy(((void *)(inl + 1)) + skb_headlen(skb), | ||
| 561 | fragptr, spc - skb_headlen(skb)); | ||
| 562 | fragptr += spc - skb_headlen(skb); | ||
| 563 | } | ||
| 564 | inl = (void *) (inl + 1) + spc; | ||
| 565 | memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); | ||
| 566 | } else { | ||
| 567 | skb_copy_from_linear_data(skb, inl + 1, spc); | ||
| 568 | inl = (void *) (inl + 1) + spc; | ||
| 569 | skb_copy_from_linear_data_offset(skb, spc, inl + 1, | ||
| 570 | skb_headlen(skb) - spc); | ||
| 571 | if (skb_shinfo(skb)->nr_frags) | ||
| 572 | memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, | ||
| 573 | fragptr, skb_shinfo(skb)->frags[0].size); | ||
| 574 | } | ||
| 575 | |||
| 576 | wmb(); | ||
| 577 | inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); | ||
| 578 | } | ||
| 579 | tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); | ||
| 580 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); | ||
| 581 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
| 582 | } | ||
| 583 | |||
| 584 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
| 585 | { | ||
| 586 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 587 | u16 vlan_tag = 0; | ||
| 588 | |||
| 589 | /* If we support per priority flow control and the packet contains | ||
| 590 | * a vlan tag, send the packet to the TX ring assigned to that priority | ||
| 591 | */ | ||
| 592 | if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { | ||
| 593 | vlan_tag = vlan_tx_tag_get(skb); | ||
| 594 | return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); | ||
| 595 | } | ||
| 596 | |||
| 597 | return skb_tx_hash(dev, skb); | ||
| 598 | } | ||
| 599 | |||
| 600 | static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) | ||
| 601 | { | ||
| 602 | __iowrite64_copy(dst, src, bytecnt / 8); | ||
| 603 | } | ||
| 604 | |||
| 605 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 606 | { | ||
| 607 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 608 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 609 | struct mlx4_en_tx_ring *ring; | ||
| 610 | struct mlx4_en_cq *cq; | ||
| 611 | struct mlx4_en_tx_desc *tx_desc; | ||
| 612 | struct mlx4_wqe_data_seg *data; | ||
| 613 | struct skb_frag_struct *frag; | ||
| 614 | struct mlx4_en_tx_info *tx_info; | ||
| 615 | struct ethhdr *ethh; | ||
| 616 | u64 mac; | ||
| 617 | u32 mac_l, mac_h; | ||
| 618 | int tx_ind = 0; | ||
| 619 | int nr_txbb; | ||
| 620 | int desc_size; | ||
| 621 | int real_size; | ||
| 622 | dma_addr_t dma; | ||
| 623 | u32 index, bf_index; | ||
| 624 | __be32 op_own; | ||
| 625 | u16 vlan_tag = 0; | ||
| 626 | int i; | ||
| 627 | int lso_header_size; | ||
| 628 | void *fragptr; | ||
| 629 | bool bounce = false; | ||
| 630 | |||
| 631 | if (!priv->port_up) | ||
| 632 | goto tx_drop; | ||
| 633 | |||
| 634 | real_size = get_real_size(skb, dev, &lso_header_size); | ||
| 635 | if (unlikely(!real_size)) | ||
| 636 | goto tx_drop; | ||
| 637 | |||
| 638 | /* Align descriptor to TXBB size */ | ||
| 639 | desc_size = ALIGN(real_size, TXBB_SIZE); | ||
| 640 | nr_txbb = desc_size / TXBB_SIZE; | ||
| 641 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { | ||
| 642 | if (netif_msg_tx_err(priv)) | ||
| 643 | en_warn(priv, "Oversized header or SG list\n"); | ||
| 644 | goto tx_drop; | ||
| 645 | } | ||
| 646 | |||
| 647 | tx_ind = skb->queue_mapping; | ||
| 648 | ring = &priv->tx_ring[tx_ind]; | ||
| 649 | if (vlan_tx_tag_present(skb)) | ||
| 650 | vlan_tag = vlan_tx_tag_get(skb); | ||
| 651 | |||
| 652 | /* Check available TXBBs And 2K spare for prefetch */ | ||
| 653 | if (unlikely(((int)(ring->prod - ring->cons)) > | ||
| 654 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | ||
| 655 | /* every full Tx ring stops queue */ | ||
| 656 | netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); | ||
| 657 | ring->blocked = 1; | ||
| 658 | priv->port_stats.queue_stopped++; | ||
| 659 | |||
| 660 | /* Use interrupts to find out when queue opened */ | ||
| 661 | cq = &priv->tx_cq[tx_ind]; | ||
| 662 | mlx4_en_arm_cq(priv, cq); | ||
| 663 | return NETDEV_TX_BUSY; | ||
| 664 | } | ||
| 665 | |||
| 666 | /* Track current inflight packets for performance analysis */ | ||
| 667 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, | ||
| 668 | (u32) (ring->prod - ring->cons - 1)); | ||
| 669 | |||
| 670 | /* Packet is good - grab an index and transmit it */ | ||
| 671 | index = ring->prod & ring->size_mask; | ||
| 672 | bf_index = ring->prod; | ||
| 673 | |||
| 674 | /* See if we have enough space for whole descriptor TXBB for setting | ||
| 675 | * SW ownership on next descriptor; if not, use a bounce buffer. */ | ||
| 676 | if (likely(index + nr_txbb <= ring->size)) | ||
| 677 | tx_desc = ring->buf + index * TXBB_SIZE; | ||
| 678 | else { | ||
| 679 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; | ||
| 680 | bounce = true; | ||
| 681 | } | ||
| 682 | |||
| 683 | /* Save skb in tx_info ring */ | ||
| 684 | tx_info = &ring->tx_info[index]; | ||
| 685 | tx_info->skb = skb; | ||
| 686 | tx_info->nr_txbb = nr_txbb; | ||
| 687 | |||
| 688 | /* Prepare ctrl segement apart opcode+ownership, which depends on | ||
| 689 | * whether LSO is used */ | ||
| 690 | tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); | ||
| 691 | tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; | ||
| 692 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | ||
| 693 | tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | | ||
| 694 | MLX4_WQE_CTRL_SOLICITED); | ||
| 695 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
| 696 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | | ||
| 697 | MLX4_WQE_CTRL_TCP_UDP_CSUM); | ||
| 698 | priv->port_stats.tx_chksum_offload++; | ||
| 699 | } | ||
| 700 | |||
| 701 | if (unlikely(priv->validate_loopback)) { | ||
| 702 | /* Copy dst mac address to wqe */ | ||
| 703 | skb_reset_mac_header(skb); | ||
| 704 | ethh = eth_hdr(skb); | ||
| 705 | if (ethh && ethh->h_dest) { | ||
| 706 | mac = mlx4_en_mac_to_u64(ethh->h_dest); | ||
| 707 | mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); | ||
| 708 | mac_l = (u32) (mac & 0xffffffff); | ||
| 709 | tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); | ||
| 710 | tx_desc->ctrl.imm = cpu_to_be32(mac_l); | ||
| 711 | } | ||
| 712 | } | ||
| 713 | |||
| 714 | /* Handle LSO (TSO) packets */ | ||
| 715 | if (lso_header_size) { | ||
| 716 | /* Mark opcode as LSO */ | ||
| 717 | op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | | ||
| 718 | ((ring->prod & ring->size) ? | ||
| 719 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
| 720 | |||
| 721 | /* Fill in the LSO prefix */ | ||
| 722 | tx_desc->lso.mss_hdr_size = cpu_to_be32( | ||
| 723 | skb_shinfo(skb)->gso_size << 16 | lso_header_size); | ||
| 724 | |||
| 725 | /* Copy headers; | ||
| 726 | * note that we already verified that it is linear */ | ||
| 727 | memcpy(tx_desc->lso.header, skb->data, lso_header_size); | ||
| 728 | data = ((void *) &tx_desc->lso + | ||
| 729 | ALIGN(lso_header_size + 4, DS_SIZE)); | ||
| 730 | |||
| 731 | priv->port_stats.tso_packets++; | ||
| 732 | i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + | ||
| 733 | !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); | ||
| 734 | ring->bytes += skb->len + (i - 1) * lso_header_size; | ||
| 735 | ring->packets += i; | ||
| 736 | } else { | ||
| 737 | /* Normal (Non LSO) packet */ | ||
| 738 | op_own = cpu_to_be32(MLX4_OPCODE_SEND) | | ||
| 739 | ((ring->prod & ring->size) ? | ||
| 740 | cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); | ||
| 741 | data = &tx_desc->data; | ||
| 742 | ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); | ||
| 743 | ring->packets++; | ||
| 744 | |||
| 745 | } | ||
| 746 | AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); | ||
| 747 | |||
| 748 | |||
| 749 | /* valid only for none inline segments */ | ||
| 750 | tx_info->data_offset = (void *) data - (void *) tx_desc; | ||
| 751 | |||
| 752 | tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; | ||
| 753 | data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; | ||
| 754 | |||
| 755 | if (!is_inline(skb, &fragptr)) { | ||
| 756 | /* Map fragments */ | ||
| 757 | for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { | ||
| 758 | frag = &skb_shinfo(skb)->frags[i]; | ||
| 759 | dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset, | ||
| 760 | frag->size, PCI_DMA_TODEVICE); | ||
| 761 | data->addr = cpu_to_be64(dma); | ||
| 762 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
| 763 | wmb(); | ||
| 764 | data->byte_count = cpu_to_be32(frag->size); | ||
| 765 | --data; | ||
| 766 | } | ||
| 767 | |||
| 768 | /* Map linear part */ | ||
| 769 | if (tx_info->linear) { | ||
| 770 | dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, | ||
| 771 | skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); | ||
| 772 | data->addr = cpu_to_be64(dma); | ||
| 773 | data->lkey = cpu_to_be32(mdev->mr.key); | ||
| 774 | wmb(); | ||
| 775 | data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); | ||
| 776 | } | ||
| 777 | tx_info->inl = 0; | ||
| 778 | } else { | ||
| 779 | build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); | ||
| 780 | tx_info->inl = 1; | ||
| 781 | } | ||
| 782 | |||
| 783 | ring->prod += nr_txbb; | ||
| 784 | |||
| 785 | /* If we used a bounce buffer then copy descriptor back into place */ | ||
| 786 | if (bounce) | ||
| 787 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); | ||
| 788 | |||
| 789 | /* Run destructor before passing skb to HW */ | ||
| 790 | if (likely(!skb_shared(skb))) | ||
| 791 | skb_orphan(skb); | ||
| 792 | |||
| 793 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { | ||
| 794 | *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); | ||
| 795 | op_own |= htonl((bf_index & 0xffff) << 8); | ||
| 796 | /* Ensure new descirptor hits memory | ||
| 797 | * before setting ownership of this descriptor to HW */ | ||
| 798 | wmb(); | ||
| 799 | tx_desc->ctrl.owner_opcode = op_own; | ||
| 800 | |||
| 801 | wmb(); | ||
| 802 | |||
| 803 | mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, | ||
| 804 | desc_size); | ||
| 805 | |||
| 806 | wmb(); | ||
| 807 | |||
| 808 | ring->bf.offset ^= ring->bf.buf_size; | ||
| 809 | } else { | ||
| 810 | /* Ensure new descirptor hits memory | ||
| 811 | * before setting ownership of this descriptor to HW */ | ||
| 812 | wmb(); | ||
| 813 | tx_desc->ctrl.owner_opcode = op_own; | ||
| 814 | wmb(); | ||
| 815 | iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); | ||
| 816 | } | ||
| 817 | |||
| 818 | /* Poll CQ here */ | ||
| 819 | mlx4_en_xmit_poll(priv, tx_ind); | ||
| 820 | |||
| 821 | return NETDEV_TX_OK; | ||
| 822 | |||
| 823 | tx_drop: | ||
| 824 | dev_kfree_skb_any(skb); | ||
| 825 | priv->stats.tx_dropped++; | ||
| 826 | return NETDEV_TX_OK; | ||
| 827 | } | ||
| 828 | |||
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c new file mode 100644 index 00000000000..1ad1f6029af --- /dev/null +++ b/drivers/net/mlx4/eq.c | |||
| @@ -0,0 +1,842 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/interrupt.h> | ||
| 35 | #include <linux/slab.h> | ||
| 36 | #include <linux/mm.h> | ||
| 37 | #include <linux/dma-mapping.h> | ||
| 38 | |||
| 39 | #include <linux/mlx4/cmd.h> | ||
| 40 | |||
| 41 | #include "mlx4.h" | ||
| 42 | #include "fw.h" | ||
| 43 | |||
| 44 | enum { | ||
| 45 | MLX4_IRQNAME_SIZE = 32 | ||
| 46 | }; | ||
| 47 | |||
| 48 | enum { | ||
| 49 | MLX4_NUM_ASYNC_EQE = 0x100, | ||
| 50 | MLX4_NUM_SPARE_EQE = 0x80, | ||
| 51 | MLX4_EQ_ENTRY_SIZE = 0x20 | ||
| 52 | }; | ||
| 53 | |||
| 54 | /* | ||
| 55 | * Must be packed because start is 64 bits but only aligned to 32 bits. | ||
| 56 | */ | ||
| 57 | struct mlx4_eq_context { | ||
| 58 | __be32 flags; | ||
| 59 | u16 reserved1[3]; | ||
| 60 | __be16 page_offset; | ||
| 61 | u8 log_eq_size; | ||
| 62 | u8 reserved2[4]; | ||
| 63 | u8 eq_period; | ||
| 64 | u8 reserved3; | ||
| 65 | u8 eq_max_count; | ||
| 66 | u8 reserved4[3]; | ||
| 67 | u8 intr; | ||
| 68 | u8 log_page_size; | ||
| 69 | u8 reserved5[2]; | ||
| 70 | u8 mtt_base_addr_h; | ||
| 71 | __be32 mtt_base_addr_l; | ||
| 72 | u32 reserved6[2]; | ||
| 73 | __be32 consumer_index; | ||
| 74 | __be32 producer_index; | ||
| 75 | u32 reserved7[4]; | ||
| 76 | }; | ||
| 77 | |||
| 78 | #define MLX4_EQ_STATUS_OK ( 0 << 28) | ||
| 79 | #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) | ||
| 80 | #define MLX4_EQ_OWNER_SW ( 0 << 24) | ||
| 81 | #define MLX4_EQ_OWNER_HW ( 1 << 24) | ||
| 82 | #define MLX4_EQ_FLAG_EC ( 1 << 18) | ||
| 83 | #define MLX4_EQ_FLAG_OI ( 1 << 17) | ||
| 84 | #define MLX4_EQ_STATE_ARMED ( 9 << 8) | ||
| 85 | #define MLX4_EQ_STATE_FIRED (10 << 8) | ||
| 86 | #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) | ||
| 87 | |||
| 88 | #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ | ||
| 89 | (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ | ||
| 90 | (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ | ||
| 91 | (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ | ||
| 92 | (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ | ||
| 93 | (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ | ||
| 94 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ | ||
| 95 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | ||
| 96 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | ||
| 97 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ | ||
| 98 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ | ||
| 99 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | ||
| 100 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | ||
| 101 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | ||
| 102 | (1ull << MLX4_EVENT_TYPE_CMD)) | ||
| 103 | |||
| 104 | struct mlx4_eqe { | ||
| 105 | u8 reserved1; | ||
| 106 | u8 type; | ||
| 107 | u8 reserved2; | ||
| 108 | u8 subtype; | ||
| 109 | union { | ||
| 110 | u32 raw[6]; | ||
| 111 | struct { | ||
| 112 | __be32 cqn; | ||
| 113 | } __packed comp; | ||
| 114 | struct { | ||
| 115 | u16 reserved1; | ||
| 116 | __be16 token; | ||
| 117 | u32 reserved2; | ||
| 118 | u8 reserved3[3]; | ||
| 119 | u8 status; | ||
| 120 | __be64 out_param; | ||
| 121 | } __packed cmd; | ||
| 122 | struct { | ||
| 123 | __be32 qpn; | ||
| 124 | } __packed qp; | ||
| 125 | struct { | ||
| 126 | __be32 srqn; | ||
| 127 | } __packed srq; | ||
| 128 | struct { | ||
| 129 | __be32 cqn; | ||
| 130 | u32 reserved1; | ||
| 131 | u8 reserved2[3]; | ||
| 132 | u8 syndrome; | ||
| 133 | } __packed cq_err; | ||
| 134 | struct { | ||
| 135 | u32 reserved1[2]; | ||
| 136 | __be32 port; | ||
| 137 | } __packed port_change; | ||
| 138 | } event; | ||
| 139 | u8 reserved3[3]; | ||
| 140 | u8 owner; | ||
| 141 | } __packed; | ||
| 142 | |||
| 143 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) | ||
| 144 | { | ||
| 145 | __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | | ||
| 146 | req_not << 31), | ||
| 147 | eq->doorbell); | ||
| 148 | /* We still want ordering, just not swabbing, so add a barrier */ | ||
| 149 | mb(); | ||
| 150 | } | ||
| 151 | |||
| 152 | static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) | ||
| 153 | { | ||
| 154 | unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; | ||
| 155 | return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; | ||
| 156 | } | ||
| 157 | |||
| 158 | static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) | ||
| 159 | { | ||
| 160 | struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); | ||
| 161 | return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; | ||
| 162 | } | ||
| 163 | |||
| 164 | static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | ||
| 165 | { | ||
| 166 | struct mlx4_eqe *eqe; | ||
| 167 | int cqn; | ||
| 168 | int eqes_found = 0; | ||
| 169 | int set_ci = 0; | ||
| 170 | int port; | ||
| 171 | |||
| 172 | while ((eqe = next_eqe_sw(eq))) { | ||
| 173 | /* | ||
| 174 | * Make sure we read EQ entry contents after we've | ||
| 175 | * checked the ownership bit. | ||
| 176 | */ | ||
| 177 | rmb(); | ||
| 178 | |||
| 179 | switch (eqe->type) { | ||
| 180 | case MLX4_EVENT_TYPE_COMP: | ||
| 181 | cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; | ||
| 182 | mlx4_cq_completion(dev, cqn); | ||
| 183 | break; | ||
| 184 | |||
| 185 | case MLX4_EVENT_TYPE_PATH_MIG: | ||
| 186 | case MLX4_EVENT_TYPE_COMM_EST: | ||
| 187 | case MLX4_EVENT_TYPE_SQ_DRAINED: | ||
| 188 | case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: | ||
| 189 | case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: | ||
| 190 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: | ||
| 191 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | ||
| 192 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: | ||
| 193 | mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | ||
| 194 | eqe->type); | ||
| 195 | break; | ||
| 196 | |||
| 197 | case MLX4_EVENT_TYPE_SRQ_LIMIT: | ||
| 198 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: | ||
| 199 | mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, | ||
| 200 | eqe->type); | ||
| 201 | break; | ||
| 202 | |||
| 203 | case MLX4_EVENT_TYPE_CMD: | ||
| 204 | mlx4_cmd_event(dev, | ||
| 205 | be16_to_cpu(eqe->event.cmd.token), | ||
| 206 | eqe->event.cmd.status, | ||
| 207 | be64_to_cpu(eqe->event.cmd.out_param)); | ||
| 208 | break; | ||
| 209 | |||
| 210 | case MLX4_EVENT_TYPE_PORT_CHANGE: | ||
| 211 | port = be32_to_cpu(eqe->event.port_change.port) >> 28; | ||
| 212 | if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { | ||
| 213 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, | ||
| 214 | port); | ||
| 215 | mlx4_priv(dev)->sense.do_sense_port[port] = 1; | ||
| 216 | } else { | ||
| 217 | mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, | ||
| 218 | port); | ||
| 219 | mlx4_priv(dev)->sense.do_sense_port[port] = 0; | ||
| 220 | } | ||
| 221 | break; | ||
| 222 | |||
| 223 | case MLX4_EVENT_TYPE_CQ_ERROR: | ||
| 224 | mlx4_warn(dev, "CQ %s on CQN %06x\n", | ||
| 225 | eqe->event.cq_err.syndrome == 1 ? | ||
| 226 | "overrun" : "access violation", | ||
| 227 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); | ||
| 228 | mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), | ||
| 229 | eqe->type); | ||
| 230 | break; | ||
| 231 | |||
| 232 | case MLX4_EVENT_TYPE_EQ_OVERFLOW: | ||
| 233 | mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); | ||
| 234 | break; | ||
| 235 | |||
| 236 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: | ||
| 237 | case MLX4_EVENT_TYPE_ECC_DETECT: | ||
| 238 | default: | ||
| 239 | mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", | ||
| 240 | eqe->type, eqe->subtype, eq->eqn, eq->cons_index); | ||
| 241 | break; | ||
| 242 | } | ||
| 243 | |||
| 244 | ++eq->cons_index; | ||
| 245 | eqes_found = 1; | ||
| 246 | ++set_ci; | ||
| 247 | |||
| 248 | /* | ||
| 249 | * The HCA will think the queue has overflowed if we | ||
| 250 | * don't tell it we've been processing events. We | ||
| 251 | * create our EQs with MLX4_NUM_SPARE_EQE extra | ||
| 252 | * entries, so we must update our consumer index at | ||
| 253 | * least that often. | ||
| 254 | */ | ||
| 255 | if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { | ||
| 256 | eq_set_ci(eq, 0); | ||
| 257 | set_ci = 0; | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | eq_set_ci(eq, 1); | ||
| 262 | |||
| 263 | return eqes_found; | ||
| 264 | } | ||
| 265 | |||
| 266 | static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | ||
| 267 | { | ||
| 268 | struct mlx4_dev *dev = dev_ptr; | ||
| 269 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 270 | int work = 0; | ||
| 271 | int i; | ||
| 272 | |||
| 273 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | ||
| 274 | |||
| 275 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | ||
| 276 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); | ||
| 277 | |||
| 278 | return IRQ_RETVAL(work); | ||
| 279 | } | ||
| 280 | |||
| 281 | static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) | ||
| 282 | { | ||
| 283 | struct mlx4_eq *eq = eq_ptr; | ||
| 284 | struct mlx4_dev *dev = eq->dev; | ||
| 285 | |||
| 286 | mlx4_eq_int(dev, eq); | ||
| 287 | |||
| 288 | /* MSI-X vectors always belong to us */ | ||
| 289 | return IRQ_HANDLED; | ||
| 290 | } | ||
| 291 | |||
| 292 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, | ||
| 293 | int eq_num) | ||
| 294 | { | ||
| 295 | return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, | ||
| 296 | 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); | ||
| 297 | } | ||
| 298 | |||
| 299 | static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 300 | int eq_num) | ||
| 301 | { | ||
| 302 | return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, | ||
| 303 | MLX4_CMD_TIME_CLASS_A); | ||
| 304 | } | ||
| 305 | |||
| 306 | static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 307 | int eq_num) | ||
| 308 | { | ||
| 309 | return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, | ||
| 310 | MLX4_CMD_TIME_CLASS_A); | ||
| 311 | } | ||
| 312 | |||
| 313 | static int mlx4_num_eq_uar(struct mlx4_dev *dev) | ||
| 314 | { | ||
| 315 | /* | ||
| 316 | * Each UAR holds 4 EQ doorbells. To figure out how many UARs | ||
| 317 | * we need to map, take the difference of highest index and | ||
| 318 | * the lowest index we'll use and add 1. | ||
| 319 | */ | ||
| 320 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + | ||
| 321 | dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; | ||
| 322 | } | ||
| 323 | |||
| 324 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) | ||
| 325 | { | ||
| 326 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 327 | int index; | ||
| 328 | |||
| 329 | index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; | ||
| 330 | |||
| 331 | if (!priv->eq_table.uar_map[index]) { | ||
| 332 | priv->eq_table.uar_map[index] = | ||
| 333 | ioremap(pci_resource_start(dev->pdev, 2) + | ||
| 334 | ((eq->eqn / 4) << PAGE_SHIFT), | ||
| 335 | PAGE_SIZE); | ||
| 336 | if (!priv->eq_table.uar_map[index]) { | ||
| 337 | mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", | ||
| 338 | eq->eqn); | ||
| 339 | return NULL; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | |||
| 343 | return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); | ||
| 344 | } | ||
| 345 | |||
| 346 | static int mlx4_create_eq(struct mlx4_dev *dev, int nent, | ||
| 347 | u8 intr, struct mlx4_eq *eq) | ||
| 348 | { | ||
| 349 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 350 | struct mlx4_cmd_mailbox *mailbox; | ||
| 351 | struct mlx4_eq_context *eq_context; | ||
| 352 | int npages; | ||
| 353 | u64 *dma_list = NULL; | ||
| 354 | dma_addr_t t; | ||
| 355 | u64 mtt_addr; | ||
| 356 | int err = -ENOMEM; | ||
| 357 | int i; | ||
| 358 | |||
| 359 | eq->dev = dev; | ||
| 360 | eq->nent = roundup_pow_of_two(max(nent, 2)); | ||
| 361 | npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; | ||
| 362 | |||
| 363 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | ||
| 364 | GFP_KERNEL); | ||
| 365 | if (!eq->page_list) | ||
| 366 | goto err_out; | ||
| 367 | |||
| 368 | for (i = 0; i < npages; ++i) | ||
| 369 | eq->page_list[i].buf = NULL; | ||
| 370 | |||
| 371 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); | ||
| 372 | if (!dma_list) | ||
| 373 | goto err_out_free; | ||
| 374 | |||
| 375 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 376 | if (IS_ERR(mailbox)) | ||
| 377 | goto err_out_free; | ||
| 378 | eq_context = mailbox->buf; | ||
| 379 | |||
| 380 | for (i = 0; i < npages; ++i) { | ||
| 381 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, | ||
| 382 | PAGE_SIZE, &t, GFP_KERNEL); | ||
| 383 | if (!eq->page_list[i].buf) | ||
| 384 | goto err_out_free_pages; | ||
| 385 | |||
| 386 | dma_list[i] = t; | ||
| 387 | eq->page_list[i].map = t; | ||
| 388 | |||
| 389 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | ||
| 390 | } | ||
| 391 | |||
| 392 | eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); | ||
| 393 | if (eq->eqn == -1) | ||
| 394 | goto err_out_free_pages; | ||
| 395 | |||
| 396 | eq->doorbell = mlx4_get_eq_uar(dev, eq); | ||
| 397 | if (!eq->doorbell) { | ||
| 398 | err = -ENOMEM; | ||
| 399 | goto err_out_free_eq; | ||
| 400 | } | ||
| 401 | |||
| 402 | err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); | ||
| 403 | if (err) | ||
| 404 | goto err_out_free_eq; | ||
| 405 | |||
| 406 | err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); | ||
| 407 | if (err) | ||
| 408 | goto err_out_free_mtt; | ||
| 409 | |||
| 410 | memset(eq_context, 0, sizeof *eq_context); | ||
| 411 | eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | | ||
| 412 | MLX4_EQ_STATE_ARMED); | ||
| 413 | eq_context->log_eq_size = ilog2(eq->nent); | ||
| 414 | eq_context->intr = intr; | ||
| 415 | eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; | ||
| 416 | |||
| 417 | mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); | ||
| 418 | eq_context->mtt_base_addr_h = mtt_addr >> 32; | ||
| 419 | eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | ||
| 420 | |||
| 421 | err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); | ||
| 422 | if (err) { | ||
| 423 | mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); | ||
| 424 | goto err_out_free_mtt; | ||
| 425 | } | ||
| 426 | |||
| 427 | kfree(dma_list); | ||
| 428 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 429 | |||
| 430 | eq->cons_index = 0; | ||
| 431 | |||
| 432 | return err; | ||
| 433 | |||
| 434 | err_out_free_mtt: | ||
| 435 | mlx4_mtt_cleanup(dev, &eq->mtt); | ||
| 436 | |||
| 437 | err_out_free_eq: | ||
| 438 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); | ||
| 439 | |||
| 440 | err_out_free_pages: | ||
| 441 | for (i = 0; i < npages; ++i) | ||
| 442 | if (eq->page_list[i].buf) | ||
| 443 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | ||
| 444 | eq->page_list[i].buf, | ||
| 445 | eq->page_list[i].map); | ||
| 446 | |||
| 447 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 448 | |||
| 449 | err_out_free: | ||
| 450 | kfree(eq->page_list); | ||
| 451 | kfree(dma_list); | ||
| 452 | |||
| 453 | err_out: | ||
| 454 | return err; | ||
| 455 | } | ||
| 456 | |||
| 457 | static void mlx4_free_eq(struct mlx4_dev *dev, | ||
| 458 | struct mlx4_eq *eq) | ||
| 459 | { | ||
| 460 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 461 | struct mlx4_cmd_mailbox *mailbox; | ||
| 462 | int err; | ||
| 463 | int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; | ||
| 464 | int i; | ||
| 465 | |||
| 466 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 467 | if (IS_ERR(mailbox)) | ||
| 468 | return; | ||
| 469 | |||
| 470 | err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); | ||
| 471 | if (err) | ||
| 472 | mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); | ||
| 473 | |||
| 474 | if (0) { | ||
| 475 | mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); | ||
| 476 | for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { | ||
| 477 | if (i % 4 == 0) | ||
| 478 | pr_cont("[%02x] ", i * 4); | ||
| 479 | pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); | ||
| 480 | if ((i + 1) % 4 == 0) | ||
| 481 | pr_cont("\n"); | ||
| 482 | } | ||
| 483 | } | ||
| 484 | |||
| 485 | mlx4_mtt_cleanup(dev, &eq->mtt); | ||
| 486 | for (i = 0; i < npages; ++i) | ||
| 487 | pci_free_consistent(dev->pdev, PAGE_SIZE, | ||
| 488 | eq->page_list[i].buf, | ||
| 489 | eq->page_list[i].map); | ||
| 490 | |||
| 491 | kfree(eq->page_list); | ||
| 492 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); | ||
| 493 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 494 | } | ||
| 495 | |||
| 496 | static void mlx4_free_irqs(struct mlx4_dev *dev) | ||
| 497 | { | ||
| 498 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; | ||
| 499 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 500 | int i, vec; | ||
| 501 | |||
| 502 | if (eq_table->have_irq) | ||
| 503 | free_irq(dev->pdev->irq, dev); | ||
| 504 | |||
| 505 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | ||
| 506 | if (eq_table->eq[i].have_irq) { | ||
| 507 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | ||
| 508 | eq_table->eq[i].have_irq = 0; | ||
| 509 | } | ||
| 510 | |||
| 511 | for (i = 0; i < dev->caps.comp_pool; i++) { | ||
| 512 | /* | ||
| 513 | * Freeing the assigned irq's | ||
| 514 | * all bits should be 0, but we need to validate | ||
| 515 | */ | ||
| 516 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
| 517 | /* NO need protecting*/ | ||
| 518 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
| 519 | free_irq(priv->eq_table.eq[vec].irq, | ||
| 520 | &priv->eq_table.eq[vec]); | ||
| 521 | } | ||
| 522 | } | ||
| 523 | |||
| 524 | |||
| 525 | kfree(eq_table->irq_names); | ||
| 526 | } | ||
| 527 | |||
| 528 | static int mlx4_map_clr_int(struct mlx4_dev *dev) | ||
| 529 | { | ||
| 530 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 531 | |||
| 532 | priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + | ||
| 533 | priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); | ||
| 534 | if (!priv->clr_base) { | ||
| 535 | mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); | ||
| 536 | return -ENOMEM; | ||
| 537 | } | ||
| 538 | |||
| 539 | return 0; | ||
| 540 | } | ||
| 541 | |||
| 542 | static void mlx4_unmap_clr_int(struct mlx4_dev *dev) | ||
| 543 | { | ||
| 544 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 545 | |||
| 546 | iounmap(priv->clr_base); | ||
| 547 | } | ||
| 548 | |||
| 549 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) | ||
| 550 | { | ||
| 551 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 552 | |||
| 553 | priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, | ||
| 554 | sizeof *priv->eq_table.eq, GFP_KERNEL); | ||
| 555 | if (!priv->eq_table.eq) | ||
| 556 | return -ENOMEM; | ||
| 557 | |||
| 558 | return 0; | ||
| 559 | } | ||
| 560 | |||
| 561 | void mlx4_free_eq_table(struct mlx4_dev *dev) | ||
| 562 | { | ||
| 563 | kfree(mlx4_priv(dev)->eq_table.eq); | ||
| 564 | } | ||
| 565 | |||
| 566 | int mlx4_init_eq_table(struct mlx4_dev *dev) | ||
| 567 | { | ||
| 568 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 569 | int err; | ||
| 570 | int i; | ||
| 571 | |||
| 572 | priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, | ||
| 573 | mlx4_num_eq_uar(dev), GFP_KERNEL); | ||
| 574 | if (!priv->eq_table.uar_map) { | ||
| 575 | err = -ENOMEM; | ||
| 576 | goto err_out_free; | ||
| 577 | } | ||
| 578 | |||
| 579 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, | ||
| 580 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); | ||
| 581 | if (err) | ||
| 582 | goto err_out_free; | ||
| 583 | |||
| 584 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) | ||
| 585 | priv->eq_table.uar_map[i] = NULL; | ||
| 586 | |||
| 587 | err = mlx4_map_clr_int(dev); | ||
| 588 | if (err) | ||
| 589 | goto err_out_bitmap; | ||
| 590 | |||
| 591 | priv->eq_table.clr_mask = | ||
| 592 | swab32(1 << (priv->eq_table.inta_pin & 31)); | ||
| 593 | priv->eq_table.clr_int = priv->clr_base + | ||
| 594 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | ||
| 595 | |||
| 596 | priv->eq_table.irq_names = | ||
| 597 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + | ||
| 598 | dev->caps.comp_pool), | ||
| 599 | GFP_KERNEL); | ||
| 600 | if (!priv->eq_table.irq_names) { | ||
| 601 | err = -ENOMEM; | ||
| 602 | goto err_out_bitmap; | ||
| 603 | } | ||
| 604 | |||
| 605 | for (i = 0; i < dev->caps.num_comp_vectors; ++i) { | ||
| 606 | err = mlx4_create_eq(dev, dev->caps.num_cqs - | ||
| 607 | dev->caps.reserved_cqs + | ||
| 608 | MLX4_NUM_SPARE_EQE, | ||
| 609 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | ||
| 610 | &priv->eq_table.eq[i]); | ||
| 611 | if (err) { | ||
| 612 | --i; | ||
| 613 | goto err_out_unmap; | ||
| 614 | } | ||
| 615 | } | ||
| 616 | |||
| 617 | err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, | ||
| 618 | (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, | ||
| 619 | &priv->eq_table.eq[dev->caps.num_comp_vectors]); | ||
| 620 | if (err) | ||
| 621 | goto err_out_comp; | ||
| 622 | |||
| 623 | /*if additional completion vectors poolsize is 0 this loop will not run*/ | ||
| 624 | for (i = dev->caps.num_comp_vectors + 1; | ||
| 625 | i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { | ||
| 626 | |||
| 627 | err = mlx4_create_eq(dev, dev->caps.num_cqs - | ||
| 628 | dev->caps.reserved_cqs + | ||
| 629 | MLX4_NUM_SPARE_EQE, | ||
| 630 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | ||
| 631 | &priv->eq_table.eq[i]); | ||
| 632 | if (err) { | ||
| 633 | --i; | ||
| 634 | goto err_out_unmap; | ||
| 635 | } | ||
| 636 | } | ||
| 637 | |||
| 638 | |||
| 639 | if (dev->flags & MLX4_FLAG_MSI_X) { | ||
| 640 | const char *eq_name; | ||
| 641 | |||
| 642 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { | ||
| 643 | if (i < dev->caps.num_comp_vectors) { | ||
| 644 | snprintf(priv->eq_table.irq_names + | ||
| 645 | i * MLX4_IRQNAME_SIZE, | ||
| 646 | MLX4_IRQNAME_SIZE, | ||
| 647 | "mlx4-comp-%d@pci:%s", i, | ||
| 648 | pci_name(dev->pdev)); | ||
| 649 | } else { | ||
| 650 | snprintf(priv->eq_table.irq_names + | ||
| 651 | i * MLX4_IRQNAME_SIZE, | ||
| 652 | MLX4_IRQNAME_SIZE, | ||
| 653 | "mlx4-async@pci:%s", | ||
| 654 | pci_name(dev->pdev)); | ||
| 655 | } | ||
| 656 | |||
| 657 | eq_name = priv->eq_table.irq_names + | ||
| 658 | i * MLX4_IRQNAME_SIZE; | ||
| 659 | err = request_irq(priv->eq_table.eq[i].irq, | ||
| 660 | mlx4_msi_x_interrupt, 0, eq_name, | ||
| 661 | priv->eq_table.eq + i); | ||
| 662 | if (err) | ||
| 663 | goto err_out_async; | ||
| 664 | |||
| 665 | priv->eq_table.eq[i].have_irq = 1; | ||
| 666 | } | ||
| 667 | } else { | ||
| 668 | snprintf(priv->eq_table.irq_names, | ||
| 669 | MLX4_IRQNAME_SIZE, | ||
| 670 | DRV_NAME "@pci:%s", | ||
| 671 | pci_name(dev->pdev)); | ||
| 672 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | ||
| 673 | IRQF_SHARED, priv->eq_table.irq_names, dev); | ||
| 674 | if (err) | ||
| 675 | goto err_out_async; | ||
| 676 | |||
| 677 | priv->eq_table.have_irq = 1; | ||
| 678 | } | ||
| 679 | |||
| 680 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | ||
| 681 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); | ||
| 682 | if (err) | ||
| 683 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | ||
| 684 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); | ||
| 685 | |||
| 686 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | ||
| 687 | eq_set_ci(&priv->eq_table.eq[i], 1); | ||
| 688 | |||
| 689 | return 0; | ||
| 690 | |||
| 691 | err_out_async: | ||
| 692 | mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); | ||
| 693 | |||
| 694 | err_out_comp: | ||
| 695 | i = dev->caps.num_comp_vectors - 1; | ||
| 696 | |||
| 697 | err_out_unmap: | ||
| 698 | while (i >= 0) { | ||
| 699 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | ||
| 700 | --i; | ||
| 701 | } | ||
| 702 | mlx4_unmap_clr_int(dev); | ||
| 703 | mlx4_free_irqs(dev); | ||
| 704 | |||
| 705 | err_out_bitmap: | ||
| 706 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | ||
| 707 | |||
| 708 | err_out_free: | ||
| 709 | kfree(priv->eq_table.uar_map); | ||
| 710 | |||
| 711 | return err; | ||
| 712 | } | ||
| 713 | |||
| 714 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | ||
| 715 | { | ||
| 716 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 717 | int i; | ||
| 718 | |||
| 719 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, | ||
| 720 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); | ||
| 721 | |||
| 722 | mlx4_free_irqs(dev); | ||
| 723 | |||
| 724 | for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) | ||
| 725 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | ||
| 726 | |||
| 727 | mlx4_unmap_clr_int(dev); | ||
| 728 | |||
| 729 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) | ||
| 730 | if (priv->eq_table.uar_map[i]) | ||
| 731 | iounmap(priv->eq_table.uar_map[i]); | ||
| 732 | |||
| 733 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | ||
| 734 | |||
| 735 | kfree(priv->eq_table.uar_map); | ||
| 736 | } | ||
| 737 | |||
| 738 | /* A test that verifies that we can accept interrupts on all | ||
| 739 | * the irq vectors of the device. | ||
| 740 | * Interrupts are checked using the NOP command. | ||
| 741 | */ | ||
| 742 | int mlx4_test_interrupts(struct mlx4_dev *dev) | ||
| 743 | { | ||
| 744 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 745 | int i; | ||
| 746 | int err; | ||
| 747 | |||
| 748 | err = mlx4_NOP(dev); | ||
| 749 | /* When not in MSI_X, there is only one irq to check */ | ||
| 750 | if (!(dev->flags & MLX4_FLAG_MSI_X)) | ||
| 751 | return err; | ||
| 752 | |||
| 753 | /* A loop over all completion vectors, for each vector we will check | ||
| 754 | * whether it works by mapping command completions to that vector | ||
| 755 | * and performing a NOP command | ||
| 756 | */ | ||
| 757 | for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { | ||
| 758 | /* Temporary use polling for command completions */ | ||
| 759 | mlx4_cmd_use_polling(dev); | ||
| 760 | |||
| 761 | /* Map the new eq to handle all asyncronous events */ | ||
| 762 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | ||
| 763 | priv->eq_table.eq[i].eqn); | ||
| 764 | if (err) { | ||
| 765 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); | ||
| 766 | mlx4_cmd_use_events(dev); | ||
| 767 | break; | ||
| 768 | } | ||
| 769 | |||
| 770 | /* Go back to using events */ | ||
| 771 | mlx4_cmd_use_events(dev); | ||
| 772 | err = mlx4_NOP(dev); | ||
| 773 | } | ||
| 774 | |||
| 775 | /* Return to default */ | ||
| 776 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | ||
| 777 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); | ||
| 778 | return err; | ||
| 779 | } | ||
| 780 | EXPORT_SYMBOL(mlx4_test_interrupts); | ||
| 781 | |||
| 782 | int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) | ||
| 783 | { | ||
| 784 | |||
| 785 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 786 | int vec = 0, err = 0, i; | ||
| 787 | |||
| 788 | spin_lock(&priv->msix_ctl.pool_lock); | ||
| 789 | for (i = 0; !vec && i < dev->caps.comp_pool; i++) { | ||
| 790 | if (~priv->msix_ctl.pool_bm & 1ULL << i) { | ||
| 791 | priv->msix_ctl.pool_bm |= 1ULL << i; | ||
| 792 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
| 793 | snprintf(priv->eq_table.irq_names + | ||
| 794 | vec * MLX4_IRQNAME_SIZE, | ||
| 795 | MLX4_IRQNAME_SIZE, "%s", name); | ||
| 796 | err = request_irq(priv->eq_table.eq[vec].irq, | ||
| 797 | mlx4_msi_x_interrupt, 0, | ||
| 798 | &priv->eq_table.irq_names[vec<<5], | ||
| 799 | priv->eq_table.eq + vec); | ||
| 800 | if (err) { | ||
| 801 | /*zero out bit by fliping it*/ | ||
| 802 | priv->msix_ctl.pool_bm ^= 1 << i; | ||
| 803 | vec = 0; | ||
| 804 | continue; | ||
| 805 | /*we dont want to break here*/ | ||
| 806 | } | ||
| 807 | eq_set_ci(&priv->eq_table.eq[vec], 1); | ||
| 808 | } | ||
| 809 | } | ||
| 810 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
| 811 | |||
| 812 | if (vec) { | ||
| 813 | *vector = vec; | ||
| 814 | } else { | ||
| 815 | *vector = 0; | ||
| 816 | err = (i == dev->caps.comp_pool) ? -ENOSPC : err; | ||
| 817 | } | ||
| 818 | return err; | ||
| 819 | } | ||
| 820 | EXPORT_SYMBOL(mlx4_assign_eq); | ||
| 821 | |||
| 822 | void mlx4_release_eq(struct mlx4_dev *dev, int vec) | ||
| 823 | { | ||
| 824 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 825 | /*bm index*/ | ||
| 826 | int i = vec - dev->caps.num_comp_vectors - 1; | ||
| 827 | |||
| 828 | if (likely(i >= 0)) { | ||
| 829 | /*sanity check , making sure were not trying to free irq's | ||
| 830 | Belonging to a legacy EQ*/ | ||
| 831 | spin_lock(&priv->msix_ctl.pool_lock); | ||
| 832 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
| 833 | free_irq(priv->eq_table.eq[vec].irq, | ||
| 834 | &priv->eq_table.eq[vec]); | ||
| 835 | priv->msix_ctl.pool_bm &= ~(1ULL << i); | ||
| 836 | } | ||
| 837 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
| 838 | } | ||
| 839 | |||
| 840 | } | ||
| 841 | EXPORT_SYMBOL(mlx4_release_eq); | ||
| 842 | |||
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c new file mode 100644 index 00000000000..7eb8ba822e9 --- /dev/null +++ b/drivers/net/mlx4/fw.c | |||
| @@ -0,0 +1,944 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/mlx4/cmd.h> | ||
| 36 | #include <linux/cache.h> | ||
| 37 | |||
| 38 | #include "fw.h" | ||
| 39 | #include "icm.h" | ||
| 40 | |||
| 41 | enum { | ||
| 42 | MLX4_COMMAND_INTERFACE_MIN_REV = 2, | ||
| 43 | MLX4_COMMAND_INTERFACE_MAX_REV = 3, | ||
| 44 | MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, | ||
| 45 | }; | ||
| 46 | |||
| 47 | extern void __buggy_use_of_MLX4_GET(void); | ||
| 48 | extern void __buggy_use_of_MLX4_PUT(void); | ||
| 49 | |||
| 50 | static int enable_qos; | ||
| 51 | module_param(enable_qos, bool, 0444); | ||
| 52 | MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); | ||
| 53 | |||
| 54 | #define MLX4_GET(dest, source, offset) \ | ||
| 55 | do { \ | ||
| 56 | void *__p = (char *) (source) + (offset); \ | ||
| 57 | switch (sizeof (dest)) { \ | ||
| 58 | case 1: (dest) = *(u8 *) __p; break; \ | ||
| 59 | case 2: (dest) = be16_to_cpup(__p); break; \ | ||
| 60 | case 4: (dest) = be32_to_cpup(__p); break; \ | ||
| 61 | case 8: (dest) = be64_to_cpup(__p); break; \ | ||
| 62 | default: __buggy_use_of_MLX4_GET(); \ | ||
| 63 | } \ | ||
| 64 | } while (0) | ||
| 65 | |||
| 66 | #define MLX4_PUT(dest, source, offset) \ | ||
| 67 | do { \ | ||
| 68 | void *__d = ((char *) (dest) + (offset)); \ | ||
| 69 | switch (sizeof(source)) { \ | ||
| 70 | case 1: *(u8 *) __d = (source); break; \ | ||
| 71 | case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ | ||
| 72 | case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ | ||
| 73 | case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ | ||
| 74 | default: __buggy_use_of_MLX4_PUT(); \ | ||
| 75 | } \ | ||
| 76 | } while (0) | ||
| 77 | |||
| 78 | static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) | ||
| 79 | { | ||
| 80 | static const char *fname[] = { | ||
| 81 | [ 0] = "RC transport", | ||
| 82 | [ 1] = "UC transport", | ||
| 83 | [ 2] = "UD transport", | ||
| 84 | [ 3] = "XRC transport", | ||
| 85 | [ 4] = "reliable multicast", | ||
| 86 | [ 5] = "FCoIB support", | ||
| 87 | [ 6] = "SRQ support", | ||
| 88 | [ 7] = "IPoIB checksum offload", | ||
| 89 | [ 8] = "P_Key violation counter", | ||
| 90 | [ 9] = "Q_Key violation counter", | ||
| 91 | [10] = "VMM", | ||
| 92 | [12] = "DPDP", | ||
| 93 | [15] = "Big LSO headers", | ||
| 94 | [16] = "MW support", | ||
| 95 | [17] = "APM support", | ||
| 96 | [18] = "Atomic ops support", | ||
| 97 | [19] = "Raw multicast support", | ||
| 98 | [20] = "Address vector port checking support", | ||
| 99 | [21] = "UD multicast support", | ||
| 100 | [24] = "Demand paging support", | ||
| 101 | [25] = "Router support", | ||
| 102 | [30] = "IBoE support", | ||
| 103 | [32] = "Unicast loopback support", | ||
| 104 | [38] = "Wake On LAN support", | ||
| 105 | [40] = "UDP RSS support", | ||
| 106 | [41] = "Unicast VEP steering support", | ||
| 107 | [42] = "Multicast VEP steering support", | ||
| 108 | [48] = "Counters support", | ||
| 109 | }; | ||
| 110 | int i; | ||
| 111 | |||
| 112 | mlx4_dbg(dev, "DEV_CAP flags:\n"); | ||
| 113 | for (i = 0; i < ARRAY_SIZE(fname); ++i) | ||
| 114 | if (fname[i] && (flags & (1LL << i))) | ||
| 115 | mlx4_dbg(dev, " %s\n", fname[i]); | ||
| 116 | } | ||
| 117 | |||
| 118 | int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) | ||
| 119 | { | ||
| 120 | struct mlx4_cmd_mailbox *mailbox; | ||
| 121 | u32 *inbox; | ||
| 122 | int err = 0; | ||
| 123 | |||
| 124 | #define MOD_STAT_CFG_IN_SIZE 0x100 | ||
| 125 | |||
| 126 | #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 | ||
| 127 | #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 | ||
| 128 | |||
| 129 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 130 | if (IS_ERR(mailbox)) | ||
| 131 | return PTR_ERR(mailbox); | ||
| 132 | inbox = mailbox->buf; | ||
| 133 | |||
| 134 | memset(inbox, 0, MOD_STAT_CFG_IN_SIZE); | ||
| 135 | |||
| 136 | MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); | ||
| 137 | MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); | ||
| 138 | |||
| 139 | err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, | ||
| 140 | MLX4_CMD_TIME_CLASS_A); | ||
| 141 | |||
| 142 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 143 | return err; | ||
| 144 | } | ||
| 145 | |||
| 146 | int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | ||
| 147 | { | ||
| 148 | struct mlx4_cmd_mailbox *mailbox; | ||
| 149 | u32 *outbox; | ||
| 150 | u8 field; | ||
| 151 | u32 field32, flags, ext_flags; | ||
| 152 | u16 size; | ||
| 153 | u16 stat_rate; | ||
| 154 | int err; | ||
| 155 | int i; | ||
| 156 | |||
| 157 | #define QUERY_DEV_CAP_OUT_SIZE 0x100 | ||
| 158 | #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 | ||
| 159 | #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 | ||
| 160 | #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 | ||
| 161 | #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 | ||
| 162 | #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 | ||
| 163 | #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 | ||
| 164 | #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 | ||
| 165 | #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 | ||
| 166 | #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 | ||
| 167 | #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a | ||
| 168 | #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b | ||
| 169 | #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d | ||
| 170 | #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e | ||
| 171 | #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f | ||
| 172 | #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 | ||
| 173 | #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 | ||
| 174 | #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 | ||
| 175 | #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 | ||
| 176 | #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 | ||
| 177 | #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 | ||
| 178 | #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b | ||
| 179 | #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d | ||
| 180 | #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f | ||
| 181 | #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 | ||
| 182 | #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 | ||
| 183 | #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 | ||
| 184 | #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 | ||
| 185 | #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 | ||
| 186 | #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b | ||
| 187 | #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c | ||
| 188 | #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f | ||
| 189 | #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 | ||
| 190 | #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 | ||
| 191 | #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 | ||
| 192 | #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 | ||
| 193 | #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b | ||
| 194 | #define QUERY_DEV_CAP_BF_OFFSET 0x4c | ||
| 195 | #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d | ||
| 196 | #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e | ||
| 197 | #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f | ||
| 198 | #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 | ||
| 199 | #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 | ||
| 200 | #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 | ||
| 201 | #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 | ||
| 202 | #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 | ||
| 203 | #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 | ||
| 204 | #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 | ||
| 205 | #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 | ||
| 206 | #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 | ||
| 207 | #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 | ||
| 208 | #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 | ||
| 209 | #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 | ||
| 210 | #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 | ||
| 211 | #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 | ||
| 212 | #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 | ||
| 213 | #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a | ||
| 214 | #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c | ||
| 215 | #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e | ||
| 216 | #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 | ||
| 217 | #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 | ||
| 218 | #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 | ||
| 219 | #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 | ||
| 220 | #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 | ||
| 221 | |||
| 222 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 223 | if (IS_ERR(mailbox)) | ||
| 224 | return PTR_ERR(mailbox); | ||
| 225 | outbox = mailbox->buf; | ||
| 226 | |||
| 227 | err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, | ||
| 228 | MLX4_CMD_TIME_CLASS_A); | ||
| 229 | if (err) | ||
| 230 | goto out; | ||
| 231 | |||
| 232 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); | ||
| 233 | dev_cap->reserved_qps = 1 << (field & 0xf); | ||
| 234 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); | ||
| 235 | dev_cap->max_qps = 1 << (field & 0x1f); | ||
| 236 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); | ||
| 237 | dev_cap->reserved_srqs = 1 << (field >> 4); | ||
| 238 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); | ||
| 239 | dev_cap->max_srqs = 1 << (field & 0x1f); | ||
| 240 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); | ||
| 241 | dev_cap->max_cq_sz = 1 << field; | ||
| 242 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); | ||
| 243 | dev_cap->reserved_cqs = 1 << (field & 0xf); | ||
| 244 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); | ||
| 245 | dev_cap->max_cqs = 1 << (field & 0x1f); | ||
| 246 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); | ||
| 247 | dev_cap->max_mpts = 1 << (field & 0x3f); | ||
| 248 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); | ||
| 249 | dev_cap->reserved_eqs = field & 0xf; | ||
| 250 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); | ||
| 251 | dev_cap->max_eqs = 1 << (field & 0xf); | ||
| 252 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); | ||
| 253 | dev_cap->reserved_mtts = 1 << (field >> 4); | ||
| 254 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); | ||
| 255 | dev_cap->max_mrw_sz = 1 << field; | ||
| 256 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); | ||
| 257 | dev_cap->reserved_mrws = 1 << (field & 0xf); | ||
| 258 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); | ||
| 259 | dev_cap->max_mtt_seg = 1 << (field & 0x3f); | ||
| 260 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); | ||
| 261 | dev_cap->max_requester_per_qp = 1 << (field & 0x3f); | ||
| 262 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); | ||
| 263 | dev_cap->max_responder_per_qp = 1 << (field & 0x3f); | ||
| 264 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); | ||
| 265 | field &= 0x1f; | ||
| 266 | if (!field) | ||
| 267 | dev_cap->max_gso_sz = 0; | ||
| 268 | else | ||
| 269 | dev_cap->max_gso_sz = 1 << field; | ||
| 270 | |||
| 271 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); | ||
| 272 | dev_cap->max_rdma_global = 1 << (field & 0x3f); | ||
| 273 | MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); | ||
| 274 | dev_cap->local_ca_ack_delay = field & 0x1f; | ||
| 275 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); | ||
| 276 | dev_cap->num_ports = field & 0xf; | ||
| 277 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); | ||
| 278 | dev_cap->max_msg_sz = 1 << (field & 0x1f); | ||
| 279 | MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); | ||
| 280 | dev_cap->stat_rate_support = stat_rate; | ||
| 281 | MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); | ||
| 282 | MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); | ||
| 283 | dev_cap->flags = flags | (u64)ext_flags << 32; | ||
| 284 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); | ||
| 285 | dev_cap->reserved_uars = field >> 4; | ||
| 286 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); | ||
| 287 | dev_cap->uar_size = 1 << ((field & 0x3f) + 20); | ||
| 288 | MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); | ||
| 289 | dev_cap->min_page_sz = 1 << field; | ||
| 290 | |||
| 291 | MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); | ||
| 292 | if (field & 0x80) { | ||
| 293 | MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); | ||
| 294 | dev_cap->bf_reg_size = 1 << (field & 0x1f); | ||
| 295 | MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); | ||
| 296 | if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) | ||
| 297 | field = 3; | ||
| 298 | dev_cap->bf_regs_per_page = 1 << (field & 0x3f); | ||
| 299 | mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", | ||
| 300 | dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); | ||
| 301 | } else { | ||
| 302 | dev_cap->bf_reg_size = 0; | ||
| 303 | mlx4_dbg(dev, "BlueFlame not available\n"); | ||
| 304 | } | ||
| 305 | |||
| 306 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); | ||
| 307 | dev_cap->max_sq_sg = field; | ||
| 308 | MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); | ||
| 309 | dev_cap->max_sq_desc_sz = size; | ||
| 310 | |||
| 311 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); | ||
| 312 | dev_cap->max_qp_per_mcg = 1 << field; | ||
| 313 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); | ||
| 314 | dev_cap->reserved_mgms = field & 0xf; | ||
| 315 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); | ||
| 316 | dev_cap->max_mcgs = 1 << field; | ||
| 317 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); | ||
| 318 | dev_cap->reserved_pds = field >> 4; | ||
| 319 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); | ||
| 320 | dev_cap->max_pds = 1 << (field & 0x3f); | ||
| 321 | |||
| 322 | MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); | ||
| 323 | dev_cap->rdmarc_entry_sz = size; | ||
| 324 | MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); | ||
| 325 | dev_cap->qpc_entry_sz = size; | ||
| 326 | MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); | ||
| 327 | dev_cap->aux_entry_sz = size; | ||
| 328 | MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); | ||
| 329 | dev_cap->altc_entry_sz = size; | ||
| 330 | MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); | ||
| 331 | dev_cap->eqc_entry_sz = size; | ||
| 332 | MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); | ||
| 333 | dev_cap->cqc_entry_sz = size; | ||
| 334 | MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); | ||
| 335 | dev_cap->srq_entry_sz = size; | ||
| 336 | MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); | ||
| 337 | dev_cap->cmpt_entry_sz = size; | ||
| 338 | MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); | ||
| 339 | dev_cap->mtt_entry_sz = size; | ||
| 340 | MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); | ||
| 341 | dev_cap->dmpt_entry_sz = size; | ||
| 342 | |||
| 343 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); | ||
| 344 | dev_cap->max_srq_sz = 1 << field; | ||
| 345 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); | ||
| 346 | dev_cap->max_qp_sz = 1 << field; | ||
| 347 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); | ||
| 348 | dev_cap->resize_srq = field & 1; | ||
| 349 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); | ||
| 350 | dev_cap->max_rq_sg = field; | ||
| 351 | MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); | ||
| 352 | dev_cap->max_rq_desc_sz = size; | ||
| 353 | |||
| 354 | MLX4_GET(dev_cap->bmme_flags, outbox, | ||
| 355 | QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | ||
| 356 | MLX4_GET(dev_cap->reserved_lkey, outbox, | ||
| 357 | QUERY_DEV_CAP_RSVD_LKEY_OFFSET); | ||
| 358 | MLX4_GET(dev_cap->max_icm_sz, outbox, | ||
| 359 | QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); | ||
| 360 | if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) | ||
| 361 | MLX4_GET(dev_cap->max_counters, outbox, | ||
| 362 | QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); | ||
| 363 | |||
| 364 | if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { | ||
| 365 | for (i = 1; i <= dev_cap->num_ports; ++i) { | ||
| 366 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); | ||
| 367 | dev_cap->max_vl[i] = field >> 4; | ||
| 368 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); | ||
| 369 | dev_cap->ib_mtu[i] = field >> 4; | ||
| 370 | dev_cap->max_port_width[i] = field & 0xf; | ||
| 371 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); | ||
| 372 | dev_cap->max_gids[i] = 1 << (field & 0xf); | ||
| 373 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); | ||
| 374 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); | ||
| 375 | } | ||
| 376 | } else { | ||
| 377 | #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 | ||
| 378 | #define QUERY_PORT_MTU_OFFSET 0x01 | ||
| 379 | #define QUERY_PORT_ETH_MTU_OFFSET 0x02 | ||
| 380 | #define QUERY_PORT_WIDTH_OFFSET 0x06 | ||
| 381 | #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 | ||
| 382 | #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a | ||
| 383 | #define QUERY_PORT_MAX_VL_OFFSET 0x0b | ||
| 384 | #define QUERY_PORT_MAC_OFFSET 0x10 | ||
| 385 | #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 | ||
| 386 | #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c | ||
| 387 | #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 | ||
| 388 | |||
| 389 | for (i = 1; i <= dev_cap->num_ports; ++i) { | ||
| 390 | err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, | ||
| 391 | MLX4_CMD_TIME_CLASS_B); | ||
| 392 | if (err) | ||
| 393 | goto out; | ||
| 394 | |||
| 395 | MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); | ||
| 396 | dev_cap->supported_port_types[i] = field & 3; | ||
| 397 | MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); | ||
| 398 | dev_cap->ib_mtu[i] = field & 0xf; | ||
| 399 | MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); | ||
| 400 | dev_cap->max_port_width[i] = field & 0xf; | ||
| 401 | MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); | ||
| 402 | dev_cap->max_gids[i] = 1 << (field >> 4); | ||
| 403 | dev_cap->max_pkeys[i] = 1 << (field & 0xf); | ||
| 404 | MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); | ||
| 405 | dev_cap->max_vl[i] = field & 0xf; | ||
| 406 | MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); | ||
| 407 | dev_cap->log_max_macs[i] = field & 0xf; | ||
| 408 | dev_cap->log_max_vlans[i] = field >> 4; | ||
| 409 | MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); | ||
| 410 | MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); | ||
| 411 | MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); | ||
| 412 | dev_cap->trans_type[i] = field32 >> 24; | ||
| 413 | dev_cap->vendor_oui[i] = field32 & 0xffffff; | ||
| 414 | MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET); | ||
| 415 | MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET); | ||
| 416 | } | ||
| 417 | } | ||
| 418 | |||
| 419 | mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", | ||
| 420 | dev_cap->bmme_flags, dev_cap->reserved_lkey); | ||
| 421 | |||
| 422 | /* | ||
| 423 | * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then | ||
| 424 | * we can't use any EQs whose doorbell falls on that page, | ||
| 425 | * even if the EQ itself isn't reserved. | ||
| 426 | */ | ||
| 427 | dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, | ||
| 428 | dev_cap->reserved_eqs); | ||
| 429 | |||
| 430 | mlx4_dbg(dev, "Max ICM size %lld MB\n", | ||
| 431 | (unsigned long long) dev_cap->max_icm_sz >> 20); | ||
| 432 | mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", | ||
| 433 | dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); | ||
| 434 | mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", | ||
| 435 | dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); | ||
| 436 | mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", | ||
| 437 | dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); | ||
| 438 | mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", | ||
| 439 | dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); | ||
| 440 | mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", | ||
| 441 | dev_cap->reserved_mrws, dev_cap->reserved_mtts); | ||
| 442 | mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", | ||
| 443 | dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); | ||
| 444 | mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", | ||
| 445 | dev_cap->max_pds, dev_cap->reserved_mgms); | ||
| 446 | mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", | ||
| 447 | dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); | ||
| 448 | mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", | ||
| 449 | dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], | ||
| 450 | dev_cap->max_port_width[1]); | ||
| 451 | mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", | ||
| 452 | dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); | ||
| 453 | mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", | ||
| 454 | dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); | ||
| 455 | mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); | ||
| 456 | mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); | ||
| 457 | |||
| 458 | dump_dev_cap_flags(dev, dev_cap->flags); | ||
| 459 | |||
| 460 | out: | ||
| 461 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 462 | return err; | ||
| 463 | } | ||
| 464 | |||
| 465 | int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) | ||
| 466 | { | ||
| 467 | struct mlx4_cmd_mailbox *mailbox; | ||
| 468 | struct mlx4_icm_iter iter; | ||
| 469 | __be64 *pages; | ||
| 470 | int lg; | ||
| 471 | int nent = 0; | ||
| 472 | int i; | ||
| 473 | int err = 0; | ||
| 474 | int ts = 0, tc = 0; | ||
| 475 | |||
| 476 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 477 | if (IS_ERR(mailbox)) | ||
| 478 | return PTR_ERR(mailbox); | ||
| 479 | memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); | ||
| 480 | pages = mailbox->buf; | ||
| 481 | |||
| 482 | for (mlx4_icm_first(icm, &iter); | ||
| 483 | !mlx4_icm_last(&iter); | ||
| 484 | mlx4_icm_next(&iter)) { | ||
| 485 | /* | ||
| 486 | * We have to pass pages that are aligned to their | ||
| 487 | * size, so find the least significant 1 in the | ||
| 488 | * address or size and use that as our log2 size. | ||
| 489 | */ | ||
| 490 | lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; | ||
| 491 | if (lg < MLX4_ICM_PAGE_SHIFT) { | ||
| 492 | mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", | ||
| 493 | MLX4_ICM_PAGE_SIZE, | ||
| 494 | (unsigned long long) mlx4_icm_addr(&iter), | ||
| 495 | mlx4_icm_size(&iter)); | ||
| 496 | err = -EINVAL; | ||
| 497 | goto out; | ||
| 498 | } | ||
| 499 | |||
| 500 | for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { | ||
| 501 | if (virt != -1) { | ||
| 502 | pages[nent * 2] = cpu_to_be64(virt); | ||
| 503 | virt += 1 << lg; | ||
| 504 | } | ||
| 505 | |||
| 506 | pages[nent * 2 + 1] = | ||
| 507 | cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | | ||
| 508 | (lg - MLX4_ICM_PAGE_SHIFT)); | ||
| 509 | ts += 1 << (lg - 10); | ||
| 510 | ++tc; | ||
| 511 | |||
| 512 | if (++nent == MLX4_MAILBOX_SIZE / 16) { | ||
| 513 | err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, | ||
| 514 | MLX4_CMD_TIME_CLASS_B); | ||
| 515 | if (err) | ||
| 516 | goto out; | ||
| 517 | nent = 0; | ||
| 518 | } | ||
| 519 | } | ||
| 520 | } | ||
| 521 | |||
| 522 | if (nent) | ||
| 523 | err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B); | ||
| 524 | if (err) | ||
| 525 | goto out; | ||
| 526 | |||
| 527 | switch (op) { | ||
| 528 | case MLX4_CMD_MAP_FA: | ||
| 529 | mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); | ||
| 530 | break; | ||
| 531 | case MLX4_CMD_MAP_ICM_AUX: | ||
| 532 | mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); | ||
| 533 | break; | ||
| 534 | case MLX4_CMD_MAP_ICM: | ||
| 535 | mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", | ||
| 536 | tc, ts, (unsigned long long) virt - (ts << 10)); | ||
| 537 | break; | ||
| 538 | } | ||
| 539 | |||
| 540 | out: | ||
| 541 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 542 | return err; | ||
| 543 | } | ||
| 544 | |||
| 545 | int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) | ||
| 546 | { | ||
| 547 | return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); | ||
| 548 | } | ||
| 549 | |||
| 550 | int mlx4_UNMAP_FA(struct mlx4_dev *dev) | ||
| 551 | { | ||
| 552 | return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B); | ||
| 553 | } | ||
| 554 | |||
| 555 | |||
| 556 | int mlx4_RUN_FW(struct mlx4_dev *dev) | ||
| 557 | { | ||
| 558 | return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A); | ||
| 559 | } | ||
| 560 | |||
| 561 | int mlx4_QUERY_FW(struct mlx4_dev *dev) | ||
| 562 | { | ||
| 563 | struct mlx4_fw *fw = &mlx4_priv(dev)->fw; | ||
| 564 | struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; | ||
| 565 | struct mlx4_cmd_mailbox *mailbox; | ||
| 566 | u32 *outbox; | ||
| 567 | int err = 0; | ||
| 568 | u64 fw_ver; | ||
| 569 | u16 cmd_if_rev; | ||
| 570 | u8 lg; | ||
| 571 | |||
| 572 | #define QUERY_FW_OUT_SIZE 0x100 | ||
| 573 | #define QUERY_FW_VER_OFFSET 0x00 | ||
| 574 | #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a | ||
| 575 | #define QUERY_FW_MAX_CMD_OFFSET 0x0f | ||
| 576 | #define QUERY_FW_ERR_START_OFFSET 0x30 | ||
| 577 | #define QUERY_FW_ERR_SIZE_OFFSET 0x38 | ||
| 578 | #define QUERY_FW_ERR_BAR_OFFSET 0x3c | ||
| 579 | |||
| 580 | #define QUERY_FW_SIZE_OFFSET 0x00 | ||
| 581 | #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 | ||
| 582 | #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 | ||
| 583 | |||
| 584 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 585 | if (IS_ERR(mailbox)) | ||
| 586 | return PTR_ERR(mailbox); | ||
| 587 | outbox = mailbox->buf; | ||
| 588 | |||
| 589 | err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, | ||
| 590 | MLX4_CMD_TIME_CLASS_A); | ||
| 591 | if (err) | ||
| 592 | goto out; | ||
| 593 | |||
| 594 | MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); | ||
| 595 | /* | ||
| 596 | * FW subminor version is at more significant bits than minor | ||
| 597 | * version, so swap here. | ||
| 598 | */ | ||
| 599 | dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | | ||
| 600 | ((fw_ver & 0xffff0000ull) >> 16) | | ||
| 601 | ((fw_ver & 0x0000ffffull) << 16); | ||
| 602 | |||
| 603 | MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); | ||
| 604 | if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || | ||
| 605 | cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { | ||
| 606 | mlx4_err(dev, "Installed FW has unsupported " | ||
| 607 | "command interface revision %d.\n", | ||
| 608 | cmd_if_rev); | ||
| 609 | mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", | ||
| 610 | (int) (dev->caps.fw_ver >> 32), | ||
| 611 | (int) (dev->caps.fw_ver >> 16) & 0xffff, | ||
| 612 | (int) dev->caps.fw_ver & 0xffff); | ||
| 613 | mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", | ||
| 614 | MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); | ||
| 615 | err = -ENODEV; | ||
| 616 | goto out; | ||
| 617 | } | ||
| 618 | |||
| 619 | if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) | ||
| 620 | dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; | ||
| 621 | |||
| 622 | MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); | ||
| 623 | cmd->max_cmds = 1 << lg; | ||
| 624 | |||
| 625 | mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", | ||
| 626 | (int) (dev->caps.fw_ver >> 32), | ||
| 627 | (int) (dev->caps.fw_ver >> 16) & 0xffff, | ||
| 628 | (int) dev->caps.fw_ver & 0xffff, | ||
| 629 | cmd_if_rev, cmd->max_cmds); | ||
| 630 | |||
| 631 | MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); | ||
| 632 | MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); | ||
| 633 | MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); | ||
| 634 | fw->catas_bar = (fw->catas_bar >> 6) * 2; | ||
| 635 | |||
| 636 | mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", | ||
| 637 | (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); | ||
| 638 | |||
| 639 | MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); | ||
| 640 | MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); | ||
| 641 | MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); | ||
| 642 | fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; | ||
| 643 | |||
| 644 | mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); | ||
| 645 | |||
| 646 | /* | ||
| 647 | * Round up number of system pages needed in case | ||
| 648 | * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. | ||
| 649 | */ | ||
| 650 | fw->fw_pages = | ||
| 651 | ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> | ||
| 652 | (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); | ||
| 653 | |||
| 654 | mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", | ||
| 655 | (unsigned long long) fw->clr_int_base, fw->clr_int_bar); | ||
| 656 | |||
| 657 | out: | ||
| 658 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 659 | return err; | ||
| 660 | } | ||
| 661 | |||
| 662 | static void get_board_id(void *vsd, char *board_id) | ||
| 663 | { | ||
| 664 | int i; | ||
| 665 | |||
| 666 | #define VSD_OFFSET_SIG1 0x00 | ||
| 667 | #define VSD_OFFSET_SIG2 0xde | ||
| 668 | #define VSD_OFFSET_MLX_BOARD_ID 0xd0 | ||
| 669 | #define VSD_OFFSET_TS_BOARD_ID 0x20 | ||
| 670 | |||
| 671 | #define VSD_SIGNATURE_TOPSPIN 0x5ad | ||
| 672 | |||
| 673 | memset(board_id, 0, MLX4_BOARD_ID_LEN); | ||
| 674 | |||
| 675 | if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && | ||
| 676 | be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { | ||
| 677 | strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); | ||
| 678 | } else { | ||
| 679 | /* | ||
| 680 | * The board ID is a string but the firmware byte | ||
| 681 | * swaps each 4-byte word before passing it back to | ||
| 682 | * us. Therefore we need to swab it before printing. | ||
| 683 | */ | ||
| 684 | for (i = 0; i < 4; ++i) | ||
| 685 | ((u32 *) board_id)[i] = | ||
| 686 | swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); | ||
| 687 | } | ||
| 688 | } | ||
| 689 | |||
| 690 | int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) | ||
| 691 | { | ||
| 692 | struct mlx4_cmd_mailbox *mailbox; | ||
| 693 | u32 *outbox; | ||
| 694 | int err; | ||
| 695 | |||
| 696 | #define QUERY_ADAPTER_OUT_SIZE 0x100 | ||
| 697 | #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 | ||
| 698 | #define QUERY_ADAPTER_VSD_OFFSET 0x20 | ||
| 699 | |||
| 700 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 701 | if (IS_ERR(mailbox)) | ||
| 702 | return PTR_ERR(mailbox); | ||
| 703 | outbox = mailbox->buf; | ||
| 704 | |||
| 705 | err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, | ||
| 706 | MLX4_CMD_TIME_CLASS_A); | ||
| 707 | if (err) | ||
| 708 | goto out; | ||
| 709 | |||
| 710 | MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); | ||
| 711 | |||
| 712 | get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, | ||
| 713 | adapter->board_id); | ||
| 714 | |||
| 715 | out: | ||
| 716 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 717 | return err; | ||
| 718 | } | ||
| 719 | |||
| 720 | int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) | ||
| 721 | { | ||
| 722 | struct mlx4_cmd_mailbox *mailbox; | ||
| 723 | __be32 *inbox; | ||
| 724 | int err; | ||
| 725 | |||
| 726 | #define INIT_HCA_IN_SIZE 0x200 | ||
| 727 | #define INIT_HCA_VERSION_OFFSET 0x000 | ||
| 728 | #define INIT_HCA_VERSION 2 | ||
| 729 | #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e | ||
| 730 | #define INIT_HCA_FLAGS_OFFSET 0x014 | ||
| 731 | #define INIT_HCA_QPC_OFFSET 0x020 | ||
| 732 | #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) | ||
| 733 | #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) | ||
| 734 | #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) | ||
| 735 | #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) | ||
| 736 | #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) | ||
| 737 | #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) | ||
| 738 | #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) | ||
| 739 | #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) | ||
| 740 | #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) | ||
| 741 | #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) | ||
| 742 | #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) | ||
| 743 | #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) | ||
| 744 | #define INIT_HCA_MCAST_OFFSET 0x0c0 | ||
| 745 | #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) | ||
| 746 | #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) | ||
| 747 | #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) | ||
| 748 | #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) | ||
| 749 | #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) | ||
| 750 | #define INIT_HCA_TPT_OFFSET 0x0f0 | ||
| 751 | #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) | ||
| 752 | #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) | ||
| 753 | #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) | ||
| 754 | #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) | ||
| 755 | #define INIT_HCA_UAR_OFFSET 0x120 | ||
| 756 | #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) | ||
| 757 | #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) | ||
| 758 | |||
| 759 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 760 | if (IS_ERR(mailbox)) | ||
| 761 | return PTR_ERR(mailbox); | ||
| 762 | inbox = mailbox->buf; | ||
| 763 | |||
| 764 | memset(inbox, 0, INIT_HCA_IN_SIZE); | ||
| 765 | |||
| 766 | *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; | ||
| 767 | |||
| 768 | *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = | ||
| 769 | (ilog2(cache_line_size()) - 4) << 5; | ||
| 770 | |||
| 771 | #if defined(__LITTLE_ENDIAN) | ||
| 772 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); | ||
| 773 | #elif defined(__BIG_ENDIAN) | ||
| 774 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); | ||
| 775 | #else | ||
| 776 | #error Host endianness not defined | ||
| 777 | #endif | ||
| 778 | /* Check port for UD address vector: */ | ||
| 779 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); | ||
| 780 | |||
| 781 | /* Enable IPoIB checksumming if we can: */ | ||
| 782 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) | ||
| 783 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); | ||
| 784 | |||
| 785 | /* Enable QoS support if module parameter set */ | ||
| 786 | if (enable_qos) | ||
| 787 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); | ||
| 788 | |||
| 789 | /* enable counters */ | ||
| 790 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) | ||
| 791 | *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); | ||
| 792 | |||
| 793 | /* QPC/EEC/CQC/EQC/RDMARC attributes */ | ||
| 794 | |||
| 795 | MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); | ||
| 796 | MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); | ||
| 797 | MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); | ||
| 798 | MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); | ||
| 799 | MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); | ||
| 800 | MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); | ||
| 801 | MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); | ||
| 802 | MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); | ||
| 803 | MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); | ||
| 804 | MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); | ||
| 805 | MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); | ||
| 806 | MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); | ||
| 807 | |||
| 808 | /* multicast attributes */ | ||
| 809 | |||
| 810 | MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); | ||
| 811 | MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); | ||
| 812 | MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); | ||
| 813 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) | ||
| 814 | MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); | ||
| 815 | MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); | ||
| 816 | |||
| 817 | /* TPT attributes */ | ||
| 818 | |||
| 819 | MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); | ||
| 820 | MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); | ||
| 821 | MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); | ||
| 822 | MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); | ||
| 823 | |||
| 824 | /* UAR attributes */ | ||
| 825 | |||
| 826 | MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); | ||
| 827 | MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); | ||
| 828 | |||
| 829 | err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000); | ||
| 830 | |||
| 831 | if (err) | ||
| 832 | mlx4_err(dev, "INIT_HCA returns %d\n", err); | ||
| 833 | |||
| 834 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 835 | return err; | ||
| 836 | } | ||
| 837 | |||
| 838 | int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) | ||
| 839 | { | ||
| 840 | struct mlx4_cmd_mailbox *mailbox; | ||
| 841 | u32 *inbox; | ||
| 842 | int err; | ||
| 843 | u32 flags; | ||
| 844 | u16 field; | ||
| 845 | |||
| 846 | if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { | ||
| 847 | #define INIT_PORT_IN_SIZE 256 | ||
| 848 | #define INIT_PORT_FLAGS_OFFSET 0x00 | ||
| 849 | #define INIT_PORT_FLAG_SIG (1 << 18) | ||
| 850 | #define INIT_PORT_FLAG_NG (1 << 17) | ||
| 851 | #define INIT_PORT_FLAG_G0 (1 << 16) | ||
| 852 | #define INIT_PORT_VL_SHIFT 4 | ||
| 853 | #define INIT_PORT_PORT_WIDTH_SHIFT 8 | ||
| 854 | #define INIT_PORT_MTU_OFFSET 0x04 | ||
| 855 | #define INIT_PORT_MAX_GID_OFFSET 0x06 | ||
| 856 | #define INIT_PORT_MAX_PKEY_OFFSET 0x0a | ||
| 857 | #define INIT_PORT_GUID0_OFFSET 0x10 | ||
| 858 | #define INIT_PORT_NODE_GUID_OFFSET 0x18 | ||
| 859 | #define INIT_PORT_SI_GUID_OFFSET 0x20 | ||
| 860 | |||
| 861 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 862 | if (IS_ERR(mailbox)) | ||
| 863 | return PTR_ERR(mailbox); | ||
| 864 | inbox = mailbox->buf; | ||
| 865 | |||
| 866 | memset(inbox, 0, INIT_PORT_IN_SIZE); | ||
| 867 | |||
| 868 | flags = 0; | ||
| 869 | flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; | ||
| 870 | flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; | ||
| 871 | MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); | ||
| 872 | |||
| 873 | field = 128 << dev->caps.ib_mtu_cap[port]; | ||
| 874 | MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); | ||
| 875 | field = dev->caps.gid_table_len[port]; | ||
| 876 | MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); | ||
| 877 | field = dev->caps.pkey_table_len[port]; | ||
| 878 | MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); | ||
| 879 | |||
| 880 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, | ||
| 881 | MLX4_CMD_TIME_CLASS_A); | ||
| 882 | |||
| 883 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 884 | } else | ||
| 885 | err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, | ||
| 886 | MLX4_CMD_TIME_CLASS_A); | ||
| 887 | |||
| 888 | return err; | ||
| 889 | } | ||
| 890 | EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); | ||
| 891 | |||
| 892 | int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) | ||
| 893 | { | ||
| 894 | return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000); | ||
| 895 | } | ||
| 896 | EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); | ||
| 897 | |||
| 898 | int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) | ||
| 899 | { | ||
| 900 | return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000); | ||
| 901 | } | ||
| 902 | |||
| 903 | int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) | ||
| 904 | { | ||
| 905 | int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, | ||
| 906 | MLX4_CMD_SET_ICM_SIZE, | ||
| 907 | MLX4_CMD_TIME_CLASS_A); | ||
| 908 | if (ret) | ||
| 909 | return ret; | ||
| 910 | |||
| 911 | /* | ||
| 912 | * Round up number of system pages needed in case | ||
| 913 | * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. | ||
| 914 | */ | ||
| 915 | *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> | ||
| 916 | (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); | ||
| 917 | |||
| 918 | return 0; | ||
| 919 | } | ||
| 920 | |||
| 921 | int mlx4_NOP(struct mlx4_dev *dev) | ||
| 922 | { | ||
| 923 | /* Input modifier of 0x1f means "finish as soon as possible." */ | ||
| 924 | return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); | ||
| 925 | } | ||
| 926 | |||
| 927 | #define MLX4_WOL_SETUP_MODE (5 << 28) | ||
| 928 | int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) | ||
| 929 | { | ||
| 930 | u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; | ||
| 931 | |||
| 932 | return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, | ||
| 933 | MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); | ||
| 934 | } | ||
| 935 | EXPORT_SYMBOL_GPL(mlx4_wol_read); | ||
| 936 | |||
| 937 | int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) | ||
| 938 | { | ||
| 939 | u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; | ||
| 940 | |||
| 941 | return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, | ||
| 942 | MLX4_CMD_TIME_CLASS_A); | ||
| 943 | } | ||
| 944 | EXPORT_SYMBOL_GPL(mlx4_wol_write); | ||
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h new file mode 100644 index 00000000000..1e8ecc3708e --- /dev/null +++ b/drivers/net/mlx4/fw.h | |||
| @@ -0,0 +1,182 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef MLX4_FW_H | ||
| 36 | #define MLX4_FW_H | ||
| 37 | |||
| 38 | #include "mlx4.h" | ||
| 39 | #include "icm.h" | ||
| 40 | |||
| 41 | struct mlx4_mod_stat_cfg { | ||
| 42 | u8 log_pg_sz; | ||
| 43 | u8 log_pg_sz_m; | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct mlx4_dev_cap { | ||
| 47 | int max_srq_sz; | ||
| 48 | int max_qp_sz; | ||
| 49 | int reserved_qps; | ||
| 50 | int max_qps; | ||
| 51 | int reserved_srqs; | ||
| 52 | int max_srqs; | ||
| 53 | int max_cq_sz; | ||
| 54 | int reserved_cqs; | ||
| 55 | int max_cqs; | ||
| 56 | int max_mpts; | ||
| 57 | int reserved_eqs; | ||
| 58 | int max_eqs; | ||
| 59 | int reserved_mtts; | ||
| 60 | int max_mrw_sz; | ||
| 61 | int reserved_mrws; | ||
| 62 | int max_mtt_seg; | ||
| 63 | int max_requester_per_qp; | ||
| 64 | int max_responder_per_qp; | ||
| 65 | int max_rdma_global; | ||
| 66 | int local_ca_ack_delay; | ||
| 67 | int num_ports; | ||
| 68 | u32 max_msg_sz; | ||
| 69 | int ib_mtu[MLX4_MAX_PORTS + 1]; | ||
| 70 | int max_port_width[MLX4_MAX_PORTS + 1]; | ||
| 71 | int max_vl[MLX4_MAX_PORTS + 1]; | ||
| 72 | int max_gids[MLX4_MAX_PORTS + 1]; | ||
| 73 | int max_pkeys[MLX4_MAX_PORTS + 1]; | ||
| 74 | u64 def_mac[MLX4_MAX_PORTS + 1]; | ||
| 75 | u16 eth_mtu[MLX4_MAX_PORTS + 1]; | ||
| 76 | int trans_type[MLX4_MAX_PORTS + 1]; | ||
| 77 | int vendor_oui[MLX4_MAX_PORTS + 1]; | ||
| 78 | u16 wavelength[MLX4_MAX_PORTS + 1]; | ||
| 79 | u64 trans_code[MLX4_MAX_PORTS + 1]; | ||
| 80 | u16 stat_rate_support; | ||
| 81 | u64 flags; | ||
| 82 | int reserved_uars; | ||
| 83 | int uar_size; | ||
| 84 | int min_page_sz; | ||
| 85 | int bf_reg_size; | ||
| 86 | int bf_regs_per_page; | ||
| 87 | int max_sq_sg; | ||
| 88 | int max_sq_desc_sz; | ||
| 89 | int max_rq_sg; | ||
| 90 | int max_rq_desc_sz; | ||
| 91 | int max_qp_per_mcg; | ||
| 92 | int reserved_mgms; | ||
| 93 | int max_mcgs; | ||
| 94 | int reserved_pds; | ||
| 95 | int max_pds; | ||
| 96 | int qpc_entry_sz; | ||
| 97 | int rdmarc_entry_sz; | ||
| 98 | int altc_entry_sz; | ||
| 99 | int aux_entry_sz; | ||
| 100 | int srq_entry_sz; | ||
| 101 | int cqc_entry_sz; | ||
| 102 | int eqc_entry_sz; | ||
| 103 | int dmpt_entry_sz; | ||
| 104 | int cmpt_entry_sz; | ||
| 105 | int mtt_entry_sz; | ||
| 106 | int resize_srq; | ||
| 107 | u32 bmme_flags; | ||
| 108 | u32 reserved_lkey; | ||
| 109 | u64 max_icm_sz; | ||
| 110 | int max_gso_sz; | ||
| 111 | u8 supported_port_types[MLX4_MAX_PORTS + 1]; | ||
| 112 | u8 log_max_macs[MLX4_MAX_PORTS + 1]; | ||
| 113 | u8 log_max_vlans[MLX4_MAX_PORTS + 1]; | ||
| 114 | u32 max_counters; | ||
| 115 | }; | ||
| 116 | |||
| 117 | struct mlx4_adapter { | ||
| 118 | char board_id[MLX4_BOARD_ID_LEN]; | ||
| 119 | u8 inta_pin; | ||
| 120 | }; | ||
| 121 | |||
| 122 | struct mlx4_init_hca_param { | ||
| 123 | u64 qpc_base; | ||
| 124 | u64 rdmarc_base; | ||
| 125 | u64 auxc_base; | ||
| 126 | u64 altc_base; | ||
| 127 | u64 srqc_base; | ||
| 128 | u64 cqc_base; | ||
| 129 | u64 eqc_base; | ||
| 130 | u64 mc_base; | ||
| 131 | u64 dmpt_base; | ||
| 132 | u64 cmpt_base; | ||
| 133 | u64 mtt_base; | ||
| 134 | u16 log_mc_entry_sz; | ||
| 135 | u16 log_mc_hash_sz; | ||
| 136 | u8 log_num_qps; | ||
| 137 | u8 log_num_srqs; | ||
| 138 | u8 log_num_cqs; | ||
| 139 | u8 log_num_eqs; | ||
| 140 | u8 log_rd_per_qp; | ||
| 141 | u8 log_mc_table_sz; | ||
| 142 | u8 log_mpt_sz; | ||
| 143 | u8 log_uar_sz; | ||
| 144 | }; | ||
| 145 | |||
| 146 | struct mlx4_init_ib_param { | ||
| 147 | int port_width; | ||
| 148 | int vl_cap; | ||
| 149 | int mtu_cap; | ||
| 150 | u16 gid_cap; | ||
| 151 | u16 pkey_cap; | ||
| 152 | int set_guid0; | ||
| 153 | u64 guid0; | ||
| 154 | int set_node_guid; | ||
| 155 | u64 node_guid; | ||
| 156 | int set_si_guid; | ||
| 157 | u64 si_guid; | ||
| 158 | }; | ||
| 159 | |||
| 160 | struct mlx4_set_ib_param { | ||
| 161 | int set_si_guid; | ||
| 162 | int reset_qkey_viol; | ||
| 163 | u64 si_guid; | ||
| 164 | u32 cap_mask; | ||
| 165 | }; | ||
| 166 | |||
| 167 | int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); | ||
| 168 | int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); | ||
| 169 | int mlx4_UNMAP_FA(struct mlx4_dev *dev); | ||
| 170 | int mlx4_RUN_FW(struct mlx4_dev *dev); | ||
| 171 | int mlx4_QUERY_FW(struct mlx4_dev *dev); | ||
| 172 | int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); | ||
| 173 | int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); | ||
| 174 | int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); | ||
| 175 | int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); | ||
| 176 | int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); | ||
| 177 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); | ||
| 178 | int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); | ||
| 179 | int mlx4_NOP(struct mlx4_dev *dev); | ||
| 180 | int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); | ||
| 181 | |||
| 182 | #endif /* MLX4_FW_H */ | ||
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c new file mode 100644 index 00000000000..02393fdf44c --- /dev/null +++ b/drivers/net/mlx4/icm.c | |||
| @@ -0,0 +1,430 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/errno.h> | ||
| 35 | #include <linux/mm.h> | ||
| 36 | #include <linux/scatterlist.h> | ||
| 37 | #include <linux/slab.h> | ||
| 38 | |||
| 39 | #include <linux/mlx4/cmd.h> | ||
| 40 | |||
| 41 | #include "mlx4.h" | ||
| 42 | #include "icm.h" | ||
| 43 | #include "fw.h" | ||
| 44 | |||
| 45 | /* | ||
| 46 | * We allocate in as big chunks as we can, up to a maximum of 256 KB | ||
| 47 | * per chunk. | ||
| 48 | */ | ||
| 49 | enum { | ||
| 50 | MLX4_ICM_ALLOC_SIZE = 1 << 18, | ||
| 51 | MLX4_TABLE_CHUNK_SIZE = 1 << 18 | ||
| 52 | }; | ||
| 53 | |||
| 54 | static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) | ||
| 55 | { | ||
| 56 | int i; | ||
| 57 | |||
| 58 | if (chunk->nsg > 0) | ||
| 59 | pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, | ||
| 60 | PCI_DMA_BIDIRECTIONAL); | ||
| 61 | |||
| 62 | for (i = 0; i < chunk->npages; ++i) | ||
| 63 | __free_pages(sg_page(&chunk->mem[i]), | ||
| 64 | get_order(chunk->mem[i].length)); | ||
| 65 | } | ||
| 66 | |||
| 67 | static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) | ||
| 68 | { | ||
| 69 | int i; | ||
| 70 | |||
| 71 | for (i = 0; i < chunk->npages; ++i) | ||
| 72 | dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, | ||
| 73 | lowmem_page_address(sg_page(&chunk->mem[i])), | ||
| 74 | sg_dma_address(&chunk->mem[i])); | ||
| 75 | } | ||
| 76 | |||
| 77 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) | ||
| 78 | { | ||
| 79 | struct mlx4_icm_chunk *chunk, *tmp; | ||
| 80 | |||
| 81 | if (!icm) | ||
| 82 | return; | ||
| 83 | |||
| 84 | list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { | ||
| 85 | if (coherent) | ||
| 86 | mlx4_free_icm_coherent(dev, chunk); | ||
| 87 | else | ||
| 88 | mlx4_free_icm_pages(dev, chunk); | ||
| 89 | |||
| 90 | kfree(chunk); | ||
| 91 | } | ||
| 92 | |||
| 93 | kfree(icm); | ||
| 94 | } | ||
| 95 | |||
| 96 | static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) | ||
| 97 | { | ||
| 98 | struct page *page; | ||
| 99 | |||
| 100 | page = alloc_pages(gfp_mask, order); | ||
| 101 | if (!page) | ||
| 102 | return -ENOMEM; | ||
| 103 | |||
| 104 | sg_set_page(mem, page, PAGE_SIZE << order, 0); | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, | ||
| 109 | int order, gfp_t gfp_mask) | ||
| 110 | { | ||
| 111 | void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, | ||
| 112 | &sg_dma_address(mem), gfp_mask); | ||
| 113 | if (!buf) | ||
| 114 | return -ENOMEM; | ||
| 115 | |||
| 116 | sg_set_buf(mem, buf, PAGE_SIZE << order); | ||
| 117 | BUG_ON(mem->offset); | ||
| 118 | sg_dma_len(mem) = PAGE_SIZE << order; | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | ||
| 123 | gfp_t gfp_mask, int coherent) | ||
| 124 | { | ||
| 125 | struct mlx4_icm *icm; | ||
| 126 | struct mlx4_icm_chunk *chunk = NULL; | ||
| 127 | int cur_order; | ||
| 128 | int ret; | ||
| 129 | |||
| 130 | /* We use sg_set_buf for coherent allocs, which assumes low memory */ | ||
| 131 | BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); | ||
| 132 | |||
| 133 | icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); | ||
| 134 | if (!icm) | ||
| 135 | return NULL; | ||
| 136 | |||
| 137 | icm->refcount = 0; | ||
| 138 | INIT_LIST_HEAD(&icm->chunk_list); | ||
| 139 | |||
| 140 | cur_order = get_order(MLX4_ICM_ALLOC_SIZE); | ||
| 141 | |||
| 142 | while (npages > 0) { | ||
| 143 | if (!chunk) { | ||
| 144 | chunk = kmalloc(sizeof *chunk, | ||
| 145 | gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); | ||
| 146 | if (!chunk) | ||
| 147 | goto fail; | ||
| 148 | |||
| 149 | sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); | ||
| 150 | chunk->npages = 0; | ||
| 151 | chunk->nsg = 0; | ||
| 152 | list_add_tail(&chunk->list, &icm->chunk_list); | ||
| 153 | } | ||
| 154 | |||
| 155 | while (1 << cur_order > npages) | ||
| 156 | --cur_order; | ||
| 157 | |||
| 158 | if (coherent) | ||
| 159 | ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, | ||
| 160 | &chunk->mem[chunk->npages], | ||
| 161 | cur_order, gfp_mask); | ||
| 162 | else | ||
| 163 | ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], | ||
| 164 | cur_order, gfp_mask); | ||
| 165 | |||
| 166 | if (ret) { | ||
| 167 | if (--cur_order < 0) | ||
| 168 | goto fail; | ||
| 169 | else | ||
| 170 | continue; | ||
| 171 | } | ||
| 172 | |||
| 173 | ++chunk->npages; | ||
| 174 | |||
| 175 | if (coherent) | ||
| 176 | ++chunk->nsg; | ||
| 177 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | ||
| 178 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | ||
| 179 | chunk->npages, | ||
| 180 | PCI_DMA_BIDIRECTIONAL); | ||
| 181 | |||
| 182 | if (chunk->nsg <= 0) | ||
| 183 | goto fail; | ||
| 184 | } | ||
| 185 | |||
| 186 | if (chunk->npages == MLX4_ICM_CHUNK_LEN) | ||
| 187 | chunk = NULL; | ||
| 188 | |||
| 189 | npages -= 1 << cur_order; | ||
| 190 | } | ||
| 191 | |||
| 192 | if (!coherent && chunk) { | ||
| 193 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | ||
| 194 | chunk->npages, | ||
| 195 | PCI_DMA_BIDIRECTIONAL); | ||
| 196 | |||
| 197 | if (chunk->nsg <= 0) | ||
| 198 | goto fail; | ||
| 199 | } | ||
| 200 | |||
| 201 | return icm; | ||
| 202 | |||
| 203 | fail: | ||
| 204 | mlx4_free_icm(dev, icm, coherent); | ||
| 205 | return NULL; | ||
| 206 | } | ||
| 207 | |||
| 208 | static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) | ||
| 209 | { | ||
| 210 | return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); | ||
| 211 | } | ||
| 212 | |||
| 213 | static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) | ||
| 214 | { | ||
| 215 | return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, | ||
| 216 | MLX4_CMD_TIME_CLASS_B); | ||
| 217 | } | ||
| 218 | |||
| 219 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) | ||
| 220 | { | ||
| 221 | return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); | ||
| 222 | } | ||
| 223 | |||
| 224 | int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) | ||
| 225 | { | ||
| 226 | return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B); | ||
| 227 | } | ||
| 228 | |||
| 229 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) | ||
| 230 | { | ||
| 231 | int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); | ||
| 232 | int ret = 0; | ||
| 233 | |||
| 234 | mutex_lock(&table->mutex); | ||
| 235 | |||
| 236 | if (table->icm[i]) { | ||
| 237 | ++table->icm[i]->refcount; | ||
| 238 | goto out; | ||
| 239 | } | ||
| 240 | |||
| 241 | table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, | ||
| 242 | (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | ||
| 243 | __GFP_NOWARN, table->coherent); | ||
| 244 | if (!table->icm[i]) { | ||
| 245 | ret = -ENOMEM; | ||
| 246 | goto out; | ||
| 247 | } | ||
| 248 | |||
| 249 | if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + | ||
| 250 | (u64) i * MLX4_TABLE_CHUNK_SIZE)) { | ||
| 251 | mlx4_free_icm(dev, table->icm[i], table->coherent); | ||
| 252 | table->icm[i] = NULL; | ||
| 253 | ret = -ENOMEM; | ||
| 254 | goto out; | ||
| 255 | } | ||
| 256 | |||
| 257 | ++table->icm[i]->refcount; | ||
| 258 | |||
| 259 | out: | ||
| 260 | mutex_unlock(&table->mutex); | ||
| 261 | return ret; | ||
| 262 | } | ||
| 263 | |||
| 264 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) | ||
| 265 | { | ||
| 266 | int i; | ||
| 267 | |||
| 268 | i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); | ||
| 269 | |||
| 270 | mutex_lock(&table->mutex); | ||
| 271 | |||
| 272 | if (--table->icm[i]->refcount == 0) { | ||
| 273 | mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, | ||
| 274 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | ||
| 275 | mlx4_free_icm(dev, table->icm[i], table->coherent); | ||
| 276 | table->icm[i] = NULL; | ||
| 277 | } | ||
| 278 | |||
| 279 | mutex_unlock(&table->mutex); | ||
| 280 | } | ||
| 281 | |||
| 282 | void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle) | ||
| 283 | { | ||
| 284 | int idx, offset, dma_offset, i; | ||
| 285 | struct mlx4_icm_chunk *chunk; | ||
| 286 | struct mlx4_icm *icm; | ||
| 287 | struct page *page = NULL; | ||
| 288 | |||
| 289 | if (!table->lowmem) | ||
| 290 | return NULL; | ||
| 291 | |||
| 292 | mutex_lock(&table->mutex); | ||
| 293 | |||
| 294 | idx = (obj & (table->num_obj - 1)) * table->obj_size; | ||
| 295 | icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; | ||
| 296 | dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; | ||
| 297 | |||
| 298 | if (!icm) | ||
| 299 | goto out; | ||
| 300 | |||
| 301 | list_for_each_entry(chunk, &icm->chunk_list, list) { | ||
| 302 | for (i = 0; i < chunk->npages; ++i) { | ||
| 303 | if (dma_handle && dma_offset >= 0) { | ||
| 304 | if (sg_dma_len(&chunk->mem[i]) > dma_offset) | ||
| 305 | *dma_handle = sg_dma_address(&chunk->mem[i]) + | ||
| 306 | dma_offset; | ||
| 307 | dma_offset -= sg_dma_len(&chunk->mem[i]); | ||
| 308 | } | ||
| 309 | /* | ||
| 310 | * DMA mapping can merge pages but not split them, | ||
| 311 | * so if we found the page, dma_handle has already | ||
| 312 | * been assigned to. | ||
| 313 | */ | ||
| 314 | if (chunk->mem[i].length > offset) { | ||
| 315 | page = sg_page(&chunk->mem[i]); | ||
| 316 | goto out; | ||
| 317 | } | ||
| 318 | offset -= chunk->mem[i].length; | ||
| 319 | } | ||
| 320 | } | ||
| 321 | |||
| 322 | out: | ||
| 323 | mutex_unlock(&table->mutex); | ||
| 324 | return page ? lowmem_page_address(page) + offset : NULL; | ||
| 325 | } | ||
| 326 | |||
| 327 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 328 | int start, int end) | ||
| 329 | { | ||
| 330 | int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; | ||
| 331 | int i, err; | ||
| 332 | |||
| 333 | for (i = start; i <= end; i += inc) { | ||
| 334 | err = mlx4_table_get(dev, table, i); | ||
| 335 | if (err) | ||
| 336 | goto fail; | ||
| 337 | } | ||
| 338 | |||
| 339 | return 0; | ||
| 340 | |||
| 341 | fail: | ||
| 342 | while (i > start) { | ||
| 343 | i -= inc; | ||
| 344 | mlx4_table_put(dev, table, i); | ||
| 345 | } | ||
| 346 | |||
| 347 | return err; | ||
| 348 | } | ||
| 349 | |||
| 350 | void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 351 | int start, int end) | ||
| 352 | { | ||
| 353 | int i; | ||
| 354 | |||
| 355 | for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) | ||
| 356 | mlx4_table_put(dev, table, i); | ||
| 357 | } | ||
| 358 | |||
| 359 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 360 | u64 virt, int obj_size, int nobj, int reserved, | ||
| 361 | int use_lowmem, int use_coherent) | ||
| 362 | { | ||
| 363 | int obj_per_chunk; | ||
| 364 | int num_icm; | ||
| 365 | unsigned chunk_size; | ||
| 366 | int i; | ||
| 367 | |||
| 368 | obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; | ||
| 369 | num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; | ||
| 370 | |||
| 371 | table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); | ||
| 372 | if (!table->icm) | ||
| 373 | return -ENOMEM; | ||
| 374 | table->virt = virt; | ||
| 375 | table->num_icm = num_icm; | ||
| 376 | table->num_obj = nobj; | ||
| 377 | table->obj_size = obj_size; | ||
| 378 | table->lowmem = use_lowmem; | ||
| 379 | table->coherent = use_coherent; | ||
| 380 | mutex_init(&table->mutex); | ||
| 381 | |||
| 382 | for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { | ||
| 383 | chunk_size = MLX4_TABLE_CHUNK_SIZE; | ||
| 384 | if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) | ||
| 385 | chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); | ||
| 386 | |||
| 387 | table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, | ||
| 388 | (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | ||
| 389 | __GFP_NOWARN, use_coherent); | ||
| 390 | if (!table->icm[i]) | ||
| 391 | goto err; | ||
| 392 | if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { | ||
| 393 | mlx4_free_icm(dev, table->icm[i], use_coherent); | ||
| 394 | table->icm[i] = NULL; | ||
| 395 | goto err; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* | ||
| 399 | * Add a reference to this ICM chunk so that it never | ||
| 400 | * gets freed (since it contains reserved firmware objects). | ||
| 401 | */ | ||
| 402 | ++table->icm[i]->refcount; | ||
| 403 | } | ||
| 404 | |||
| 405 | return 0; | ||
| 406 | |||
| 407 | err: | ||
| 408 | for (i = 0; i < num_icm; ++i) | ||
| 409 | if (table->icm[i]) { | ||
| 410 | mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, | ||
| 411 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | ||
| 412 | mlx4_free_icm(dev, table->icm[i], use_coherent); | ||
| 413 | } | ||
| 414 | |||
| 415 | return -ENOMEM; | ||
| 416 | } | ||
| 417 | |||
| 418 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) | ||
| 419 | { | ||
| 420 | int i; | ||
| 421 | |||
| 422 | for (i = 0; i < table->num_icm; ++i) | ||
| 423 | if (table->icm[i]) { | ||
| 424 | mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, | ||
| 425 | MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); | ||
| 426 | mlx4_free_icm(dev, table->icm[i], table->coherent); | ||
| 427 | } | ||
| 428 | |||
| 429 | kfree(table->icm); | ||
| 430 | } | ||
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h new file mode 100644 index 00000000000..b10c07a1dc1 --- /dev/null +++ b/drivers/net/mlx4/icm.h | |||
| @@ -0,0 +1,134 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #ifndef MLX4_ICM_H | ||
| 35 | #define MLX4_ICM_H | ||
| 36 | |||
| 37 | #include <linux/list.h> | ||
| 38 | #include <linux/pci.h> | ||
| 39 | #include <linux/mutex.h> | ||
| 40 | |||
| 41 | #define MLX4_ICM_CHUNK_LEN \ | ||
| 42 | ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ | ||
| 43 | (sizeof (struct scatterlist))) | ||
| 44 | |||
| 45 | enum { | ||
| 46 | MLX4_ICM_PAGE_SHIFT = 12, | ||
| 47 | MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct mlx4_icm_chunk { | ||
| 51 | struct list_head list; | ||
| 52 | int npages; | ||
| 53 | int nsg; | ||
| 54 | struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct mlx4_icm { | ||
| 58 | struct list_head chunk_list; | ||
| 59 | int refcount; | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct mlx4_icm_iter { | ||
| 63 | struct mlx4_icm *icm; | ||
| 64 | struct mlx4_icm_chunk *chunk; | ||
| 65 | int page_idx; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct mlx4_dev; | ||
| 69 | |||
| 70 | struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | ||
| 71 | gfp_t gfp_mask, int coherent); | ||
| 72 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); | ||
| 73 | |||
| 74 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | ||
| 75 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | ||
| 76 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 77 | int start, int end); | ||
| 78 | void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 79 | int start, int end); | ||
| 80 | int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 81 | u64 virt, int obj_size, int nobj, int reserved, | ||
| 82 | int use_lowmem, int use_coherent); | ||
| 83 | void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); | ||
| 84 | int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | ||
| 85 | void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); | ||
| 86 | void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); | ||
| 87 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 88 | int start, int end); | ||
| 89 | void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | ||
| 90 | int start, int end); | ||
| 91 | |||
| 92 | static inline void mlx4_icm_first(struct mlx4_icm *icm, | ||
| 93 | struct mlx4_icm_iter *iter) | ||
| 94 | { | ||
| 95 | iter->icm = icm; | ||
| 96 | iter->chunk = list_empty(&icm->chunk_list) ? | ||
| 97 | NULL : list_entry(icm->chunk_list.next, | ||
| 98 | struct mlx4_icm_chunk, list); | ||
| 99 | iter->page_idx = 0; | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) | ||
| 103 | { | ||
| 104 | return !iter->chunk; | ||
| 105 | } | ||
| 106 | |||
| 107 | static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) | ||
| 108 | { | ||
| 109 | if (++iter->page_idx >= iter->chunk->nsg) { | ||
| 110 | if (iter->chunk->list.next == &iter->icm->chunk_list) { | ||
| 111 | iter->chunk = NULL; | ||
| 112 | return; | ||
| 113 | } | ||
| 114 | |||
| 115 | iter->chunk = list_entry(iter->chunk->list.next, | ||
| 116 | struct mlx4_icm_chunk, list); | ||
| 117 | iter->page_idx = 0; | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) | ||
| 122 | { | ||
| 123 | return sg_dma_address(&iter->chunk->mem[iter->page_idx]); | ||
| 124 | } | ||
| 125 | |||
| 126 | static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) | ||
| 127 | { | ||
| 128 | return sg_dma_len(&iter->chunk->mem[iter->page_idx]); | ||
| 129 | } | ||
| 130 | |||
| 131 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); | ||
| 132 | int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); | ||
| 133 | |||
| 134 | #endif /* MLX4_ICM_H */ | ||
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c new file mode 100644 index 00000000000..73c94fcdfdd --- /dev/null +++ b/drivers/net/mlx4/intf.c | |||
| @@ -0,0 +1,184 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/slab.h> | ||
| 35 | |||
| 36 | #include "mlx4.h" | ||
| 37 | |||
| 38 | struct mlx4_device_context { | ||
| 39 | struct list_head list; | ||
| 40 | struct mlx4_interface *intf; | ||
| 41 | void *context; | ||
| 42 | }; | ||
| 43 | |||
| 44 | static LIST_HEAD(intf_list); | ||
| 45 | static LIST_HEAD(dev_list); | ||
| 46 | static DEFINE_MUTEX(intf_mutex); | ||
| 47 | |||
| 48 | static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) | ||
| 49 | { | ||
| 50 | struct mlx4_device_context *dev_ctx; | ||
| 51 | |||
| 52 | dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL); | ||
| 53 | if (!dev_ctx) | ||
| 54 | return; | ||
| 55 | |||
| 56 | dev_ctx->intf = intf; | ||
| 57 | dev_ctx->context = intf->add(&priv->dev); | ||
| 58 | |||
| 59 | if (dev_ctx->context) { | ||
| 60 | spin_lock_irq(&priv->ctx_lock); | ||
| 61 | list_add_tail(&dev_ctx->list, &priv->ctx_list); | ||
| 62 | spin_unlock_irq(&priv->ctx_lock); | ||
| 63 | } else | ||
| 64 | kfree(dev_ctx); | ||
| 65 | } | ||
| 66 | |||
| 67 | static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) | ||
| 68 | { | ||
| 69 | struct mlx4_device_context *dev_ctx; | ||
| 70 | |||
| 71 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) | ||
| 72 | if (dev_ctx->intf == intf) { | ||
| 73 | spin_lock_irq(&priv->ctx_lock); | ||
| 74 | list_del(&dev_ctx->list); | ||
| 75 | spin_unlock_irq(&priv->ctx_lock); | ||
| 76 | |||
| 77 | intf->remove(&priv->dev, dev_ctx->context); | ||
| 78 | kfree(dev_ctx); | ||
| 79 | return; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | int mlx4_register_interface(struct mlx4_interface *intf) | ||
| 84 | { | ||
| 85 | struct mlx4_priv *priv; | ||
| 86 | |||
| 87 | if (!intf->add || !intf->remove) | ||
| 88 | return -EINVAL; | ||
| 89 | |||
| 90 | mutex_lock(&intf_mutex); | ||
| 91 | |||
| 92 | list_add_tail(&intf->list, &intf_list); | ||
| 93 | list_for_each_entry(priv, &dev_list, dev_list) | ||
| 94 | mlx4_add_device(intf, priv); | ||
| 95 | |||
| 96 | mutex_unlock(&intf_mutex); | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | EXPORT_SYMBOL_GPL(mlx4_register_interface); | ||
| 101 | |||
| 102 | void mlx4_unregister_interface(struct mlx4_interface *intf) | ||
| 103 | { | ||
| 104 | struct mlx4_priv *priv; | ||
| 105 | |||
| 106 | mutex_lock(&intf_mutex); | ||
| 107 | |||
| 108 | list_for_each_entry(priv, &dev_list, dev_list) | ||
| 109 | mlx4_remove_device(intf, priv); | ||
| 110 | |||
| 111 | list_del(&intf->list); | ||
| 112 | |||
| 113 | mutex_unlock(&intf_mutex); | ||
| 114 | } | ||
| 115 | EXPORT_SYMBOL_GPL(mlx4_unregister_interface); | ||
| 116 | |||
| 117 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port) | ||
| 118 | { | ||
| 119 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 120 | struct mlx4_device_context *dev_ctx; | ||
| 121 | unsigned long flags; | ||
| 122 | |||
| 123 | spin_lock_irqsave(&priv->ctx_lock, flags); | ||
| 124 | |||
| 125 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) | ||
| 126 | if (dev_ctx->intf->event) | ||
| 127 | dev_ctx->intf->event(dev, dev_ctx->context, type, port); | ||
| 128 | |||
| 129 | spin_unlock_irqrestore(&priv->ctx_lock, flags); | ||
| 130 | } | ||
| 131 | |||
| 132 | int mlx4_register_device(struct mlx4_dev *dev) | ||
| 133 | { | ||
| 134 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 135 | struct mlx4_interface *intf; | ||
| 136 | |||
| 137 | mutex_lock(&intf_mutex); | ||
| 138 | |||
| 139 | list_add_tail(&priv->dev_list, &dev_list); | ||
| 140 | list_for_each_entry(intf, &intf_list, list) | ||
| 141 | mlx4_add_device(intf, priv); | ||
| 142 | |||
| 143 | mutex_unlock(&intf_mutex); | ||
| 144 | mlx4_start_catas_poll(dev); | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | void mlx4_unregister_device(struct mlx4_dev *dev) | ||
| 150 | { | ||
| 151 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 152 | struct mlx4_interface *intf; | ||
| 153 | |||
| 154 | mlx4_stop_catas_poll(dev); | ||
| 155 | mutex_lock(&intf_mutex); | ||
| 156 | |||
| 157 | list_for_each_entry(intf, &intf_list, list) | ||
| 158 | mlx4_remove_device(intf, priv); | ||
| 159 | |||
| 160 | list_del(&priv->dev_list); | ||
| 161 | |||
| 162 | mutex_unlock(&intf_mutex); | ||
| 163 | } | ||
| 164 | |||
| 165 | void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port) | ||
| 166 | { | ||
| 167 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 168 | struct mlx4_device_context *dev_ctx; | ||
| 169 | unsigned long flags; | ||
| 170 | void *result = NULL; | ||
| 171 | |||
| 172 | spin_lock_irqsave(&priv->ctx_lock, flags); | ||
| 173 | |||
| 174 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) | ||
| 175 | if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) { | ||
| 176 | result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); | ||
| 177 | break; | ||
| 178 | } | ||
| 179 | |||
| 180 | spin_unlock_irqrestore(&priv->ctx_lock, flags); | ||
| 181 | |||
| 182 | return result; | ||
| 183 | } | ||
| 184 | EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); | ||
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c new file mode 100644 index 00000000000..f0ee35df4dd --- /dev/null +++ b/drivers/net/mlx4/main.c | |||
| @@ -0,0 +1,1529 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | ||
| 4 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 5 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 6 | * | ||
| 7 | * This software is available to you under a choice of one of two | ||
| 8 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 9 | * General Public License (GPL) Version 2, available from the file | ||
| 10 | * COPYING in the main directory of this source tree, or the | ||
| 11 | * OpenIB.org BSD license below: | ||
| 12 | * | ||
| 13 | * Redistribution and use in source and binary forms, with or | ||
| 14 | * without modification, are permitted provided that the following | ||
| 15 | * conditions are met: | ||
| 16 | * | ||
| 17 | * - Redistributions of source code must retain the above | ||
| 18 | * copyright notice, this list of conditions and the following | ||
| 19 | * disclaimer. | ||
| 20 | * | ||
| 21 | * - Redistributions in binary form must reproduce the above | ||
| 22 | * copyright notice, this list of conditions and the following | ||
| 23 | * disclaimer in the documentation and/or other materials | ||
| 24 | * provided with the distribution. | ||
| 25 | * | ||
| 26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 33 | * SOFTWARE. | ||
| 34 | */ | ||
| 35 | |||
| 36 | #include <linux/module.h> | ||
| 37 | #include <linux/init.h> | ||
| 38 | #include <linux/errno.h> | ||
| 39 | #include <linux/pci.h> | ||
| 40 | #include <linux/dma-mapping.h> | ||
| 41 | #include <linux/slab.h> | ||
| 42 | #include <linux/io-mapping.h> | ||
| 43 | |||
| 44 | #include <linux/mlx4/device.h> | ||
| 45 | #include <linux/mlx4/doorbell.h> | ||
| 46 | |||
| 47 | #include "mlx4.h" | ||
| 48 | #include "fw.h" | ||
| 49 | #include "icm.h" | ||
| 50 | |||
| 51 | MODULE_AUTHOR("Roland Dreier"); | ||
| 52 | MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); | ||
| 53 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 54 | MODULE_VERSION(DRV_VERSION); | ||
| 55 | |||
| 56 | struct workqueue_struct *mlx4_wq; | ||
| 57 | |||
| 58 | #ifdef CONFIG_MLX4_DEBUG | ||
| 59 | |||
| 60 | int mlx4_debug_level = 0; | ||
| 61 | module_param_named(debug_level, mlx4_debug_level, int, 0644); | ||
| 62 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); | ||
| 63 | |||
| 64 | #endif /* CONFIG_MLX4_DEBUG */ | ||
| 65 | |||
| 66 | #ifdef CONFIG_PCI_MSI | ||
| 67 | |||
| 68 | static int msi_x = 1; | ||
| 69 | module_param(msi_x, int, 0444); | ||
| 70 | MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); | ||
| 71 | |||
| 72 | #else /* CONFIG_PCI_MSI */ | ||
| 73 | |||
| 74 | #define msi_x (0) | ||
| 75 | |||
| 76 | #endif /* CONFIG_PCI_MSI */ | ||
| 77 | |||
| 78 | static char mlx4_version[] __devinitdata = | ||
| 79 | DRV_NAME ": Mellanox ConnectX core driver v" | ||
| 80 | DRV_VERSION " (" DRV_RELDATE ")\n"; | ||
| 81 | |||
| 82 | static struct mlx4_profile default_profile = { | ||
| 83 | .num_qp = 1 << 17, | ||
| 84 | .num_srq = 1 << 16, | ||
| 85 | .rdmarc_per_qp = 1 << 4, | ||
| 86 | .num_cq = 1 << 16, | ||
| 87 | .num_mcg = 1 << 13, | ||
| 88 | .num_mpt = 1 << 17, | ||
| 89 | .num_mtt = 1 << 20, | ||
| 90 | }; | ||
| 91 | |||
| 92 | static int log_num_mac = 2; | ||
| 93 | module_param_named(log_num_mac, log_num_mac, int, 0444); | ||
| 94 | MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); | ||
| 95 | |||
| 96 | static int log_num_vlan; | ||
| 97 | module_param_named(log_num_vlan, log_num_vlan, int, 0444); | ||
| 98 | MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); | ||
| 99 | |||
| 100 | static int use_prio; | ||
| 101 | module_param_named(use_prio, use_prio, bool, 0444); | ||
| 102 | MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " | ||
| 103 | "(0/1, default 0)"); | ||
| 104 | |||
| 105 | static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); | ||
| 106 | module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); | ||
| 107 | MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); | ||
| 108 | |||
| 109 | int mlx4_check_port_params(struct mlx4_dev *dev, | ||
| 110 | enum mlx4_port_type *port_type) | ||
| 111 | { | ||
| 112 | int i; | ||
| 113 | |||
| 114 | for (i = 0; i < dev->caps.num_ports - 1; i++) { | ||
| 115 | if (port_type[i] != port_type[i + 1]) { | ||
| 116 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { | ||
| 117 | mlx4_err(dev, "Only same port types supported " | ||
| 118 | "on this HCA, aborting.\n"); | ||
| 119 | return -EINVAL; | ||
| 120 | } | ||
| 121 | if (port_type[i] == MLX4_PORT_TYPE_ETH && | ||
| 122 | port_type[i + 1] == MLX4_PORT_TYPE_IB) | ||
| 123 | return -EINVAL; | ||
| 124 | } | ||
| 125 | } | ||
| 126 | |||
| 127 | for (i = 0; i < dev->caps.num_ports; i++) { | ||
| 128 | if (!(port_type[i] & dev->caps.supported_type[i+1])) { | ||
| 129 | mlx4_err(dev, "Requested port type for port %d is not " | ||
| 130 | "supported on this HCA\n", i + 1); | ||
| 131 | return -EINVAL; | ||
| 132 | } | ||
| 133 | } | ||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | static void mlx4_set_port_mask(struct mlx4_dev *dev) | ||
| 138 | { | ||
| 139 | int i; | ||
| 140 | |||
| 141 | dev->caps.port_mask = 0; | ||
| 142 | for (i = 1; i <= dev->caps.num_ports; ++i) | ||
| 143 | if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) | ||
| 144 | dev->caps.port_mask |= 1 << (i - 1); | ||
| 145 | } | ||
| 146 | |||
| 147 | static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | ||
| 148 | { | ||
| 149 | int err; | ||
| 150 | int i; | ||
| 151 | |||
| 152 | err = mlx4_QUERY_DEV_CAP(dev, dev_cap); | ||
| 153 | if (err) { | ||
| 154 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); | ||
| 155 | return err; | ||
| 156 | } | ||
| 157 | |||
| 158 | if (dev_cap->min_page_sz > PAGE_SIZE) { | ||
| 159 | mlx4_err(dev, "HCA minimum page size of %d bigger than " | ||
| 160 | "kernel PAGE_SIZE of %ld, aborting.\n", | ||
| 161 | dev_cap->min_page_sz, PAGE_SIZE); | ||
| 162 | return -ENODEV; | ||
| 163 | } | ||
| 164 | if (dev_cap->num_ports > MLX4_MAX_PORTS) { | ||
| 165 | mlx4_err(dev, "HCA has %d ports, but we only support %d, " | ||
| 166 | "aborting.\n", | ||
| 167 | dev_cap->num_ports, MLX4_MAX_PORTS); | ||
| 168 | return -ENODEV; | ||
| 169 | } | ||
| 170 | |||
| 171 | if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { | ||
| 172 | mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " | ||
| 173 | "PCI resource 2 size of 0x%llx, aborting.\n", | ||
| 174 | dev_cap->uar_size, | ||
| 175 | (unsigned long long) pci_resource_len(dev->pdev, 2)); | ||
| 176 | return -ENODEV; | ||
| 177 | } | ||
| 178 | |||
| 179 | dev->caps.num_ports = dev_cap->num_ports; | ||
| 180 | for (i = 1; i <= dev->caps.num_ports; ++i) { | ||
| 181 | dev->caps.vl_cap[i] = dev_cap->max_vl[i]; | ||
| 182 | dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; | ||
| 183 | dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; | ||
| 184 | dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; | ||
| 185 | dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; | ||
| 186 | dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; | ||
| 187 | dev->caps.def_mac[i] = dev_cap->def_mac[i]; | ||
| 188 | dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; | ||
| 189 | dev->caps.trans_type[i] = dev_cap->trans_type[i]; | ||
| 190 | dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; | ||
| 191 | dev->caps.wavelength[i] = dev_cap->wavelength[i]; | ||
| 192 | dev->caps.trans_code[i] = dev_cap->trans_code[i]; | ||
| 193 | } | ||
| 194 | |||
| 195 | dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; | ||
| 196 | dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; | ||
| 197 | dev->caps.bf_reg_size = dev_cap->bf_reg_size; | ||
| 198 | dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; | ||
| 199 | dev->caps.max_sq_sg = dev_cap->max_sq_sg; | ||
| 200 | dev->caps.max_rq_sg = dev_cap->max_rq_sg; | ||
| 201 | dev->caps.max_wqes = dev_cap->max_qp_sz; | ||
| 202 | dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; | ||
| 203 | dev->caps.max_srq_wqes = dev_cap->max_srq_sz; | ||
| 204 | dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; | ||
| 205 | dev->caps.reserved_srqs = dev_cap->reserved_srqs; | ||
| 206 | dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; | ||
| 207 | dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; | ||
| 208 | dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM; | ||
| 209 | /* | ||
| 210 | * Subtract 1 from the limit because we need to allocate a | ||
| 211 | * spare CQE so the HCA HW can tell the difference between an | ||
| 212 | * empty CQ and a full CQ. | ||
| 213 | */ | ||
| 214 | dev->caps.max_cqes = dev_cap->max_cq_sz - 1; | ||
| 215 | dev->caps.reserved_cqs = dev_cap->reserved_cqs; | ||
| 216 | dev->caps.reserved_eqs = dev_cap->reserved_eqs; | ||
| 217 | dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; | ||
| 218 | dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, | ||
| 219 | dev->caps.mtts_per_seg); | ||
| 220 | dev->caps.reserved_mrws = dev_cap->reserved_mrws; | ||
| 221 | dev->caps.reserved_uars = dev_cap->reserved_uars; | ||
| 222 | dev->caps.reserved_pds = dev_cap->reserved_pds; | ||
| 223 | dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; | ||
| 224 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; | ||
| 225 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); | ||
| 226 | dev->caps.flags = dev_cap->flags; | ||
| 227 | dev->caps.bmme_flags = dev_cap->bmme_flags; | ||
| 228 | dev->caps.reserved_lkey = dev_cap->reserved_lkey; | ||
| 229 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | ||
| 230 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | ||
| 231 | |||
| 232 | dev->caps.log_num_macs = log_num_mac; | ||
| 233 | dev->caps.log_num_vlans = log_num_vlan; | ||
| 234 | dev->caps.log_num_prios = use_prio ? 3 : 0; | ||
| 235 | |||
| 236 | for (i = 1; i <= dev->caps.num_ports; ++i) { | ||
| 237 | if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH) | ||
| 238 | dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; | ||
| 239 | else | ||
| 240 | dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; | ||
| 241 | dev->caps.possible_type[i] = dev->caps.port_type[i]; | ||
| 242 | mlx4_priv(dev)->sense.sense_allowed[i] = | ||
| 243 | dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO; | ||
| 244 | |||
| 245 | if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { | ||
| 246 | dev->caps.log_num_macs = dev_cap->log_max_macs[i]; | ||
| 247 | mlx4_warn(dev, "Requested number of MACs is too much " | ||
| 248 | "for port %d, reducing to %d.\n", | ||
| 249 | i, 1 << dev->caps.log_num_macs); | ||
| 250 | } | ||
| 251 | if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { | ||
| 252 | dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; | ||
| 253 | mlx4_warn(dev, "Requested number of VLANs is too much " | ||
| 254 | "for port %d, reducing to %d.\n", | ||
| 255 | i, 1 << dev->caps.log_num_vlans); | ||
| 256 | } | ||
| 257 | } | ||
| 258 | |||
| 259 | mlx4_set_port_mask(dev); | ||
| 260 | |||
| 261 | dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); | ||
| 262 | |||
| 263 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; | ||
| 264 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = | ||
| 265 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = | ||
| 266 | (1 << dev->caps.log_num_macs) * | ||
| 267 | (1 << dev->caps.log_num_vlans) * | ||
| 268 | (1 << dev->caps.log_num_prios) * | ||
| 269 | dev->caps.num_ports; | ||
| 270 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; | ||
| 271 | |||
| 272 | dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + | ||
| 273 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + | ||
| 274 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + | ||
| 275 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; | ||
| 276 | |||
| 277 | return 0; | ||
| 278 | } | ||
| 279 | |||
| 280 | /* | ||
| 281 | * Change the port configuration of the device. | ||
| 282 | * Every user of this function must hold the port mutex. | ||
| 283 | */ | ||
| 284 | int mlx4_change_port_types(struct mlx4_dev *dev, | ||
| 285 | enum mlx4_port_type *port_types) | ||
| 286 | { | ||
| 287 | int err = 0; | ||
| 288 | int change = 0; | ||
| 289 | int port; | ||
| 290 | |||
| 291 | for (port = 0; port < dev->caps.num_ports; port++) { | ||
| 292 | /* Change the port type only if the new type is different | ||
| 293 | * from the current, and not set to Auto */ | ||
| 294 | if (port_types[port] != dev->caps.port_type[port + 1]) { | ||
| 295 | change = 1; | ||
| 296 | dev->caps.port_type[port + 1] = port_types[port]; | ||
| 297 | } | ||
| 298 | } | ||
| 299 | if (change) { | ||
| 300 | mlx4_unregister_device(dev); | ||
| 301 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
| 302 | mlx4_CLOSE_PORT(dev, port); | ||
| 303 | err = mlx4_SET_PORT(dev, port); | ||
| 304 | if (err) { | ||
| 305 | mlx4_err(dev, "Failed to set port %d, " | ||
| 306 | "aborting\n", port); | ||
| 307 | goto out; | ||
| 308 | } | ||
| 309 | } | ||
| 310 | mlx4_set_port_mask(dev); | ||
| 311 | err = mlx4_register_device(dev); | ||
| 312 | } | ||
| 313 | |||
| 314 | out: | ||
| 315 | return err; | ||
| 316 | } | ||
| 317 | |||
| 318 | static ssize_t show_port_type(struct device *dev, | ||
| 319 | struct device_attribute *attr, | ||
| 320 | char *buf) | ||
| 321 | { | ||
| 322 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
| 323 | port_attr); | ||
| 324 | struct mlx4_dev *mdev = info->dev; | ||
| 325 | char type[8]; | ||
| 326 | |||
| 327 | sprintf(type, "%s", | ||
| 328 | (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? | ||
| 329 | "ib" : "eth"); | ||
| 330 | if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) | ||
| 331 | sprintf(buf, "auto (%s)\n", type); | ||
| 332 | else | ||
| 333 | sprintf(buf, "%s\n", type); | ||
| 334 | |||
| 335 | return strlen(buf); | ||
| 336 | } | ||
| 337 | |||
| 338 | static ssize_t set_port_type(struct device *dev, | ||
| 339 | struct device_attribute *attr, | ||
| 340 | const char *buf, size_t count) | ||
| 341 | { | ||
| 342 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
| 343 | port_attr); | ||
| 344 | struct mlx4_dev *mdev = info->dev; | ||
| 345 | struct mlx4_priv *priv = mlx4_priv(mdev); | ||
| 346 | enum mlx4_port_type types[MLX4_MAX_PORTS]; | ||
| 347 | enum mlx4_port_type new_types[MLX4_MAX_PORTS]; | ||
| 348 | int i; | ||
| 349 | int err = 0; | ||
| 350 | |||
| 351 | if (!strcmp(buf, "ib\n")) | ||
| 352 | info->tmp_type = MLX4_PORT_TYPE_IB; | ||
| 353 | else if (!strcmp(buf, "eth\n")) | ||
| 354 | info->tmp_type = MLX4_PORT_TYPE_ETH; | ||
| 355 | else if (!strcmp(buf, "auto\n")) | ||
| 356 | info->tmp_type = MLX4_PORT_TYPE_AUTO; | ||
| 357 | else { | ||
| 358 | mlx4_err(mdev, "%s is not supported port type\n", buf); | ||
| 359 | return -EINVAL; | ||
| 360 | } | ||
| 361 | |||
| 362 | mlx4_stop_sense(mdev); | ||
| 363 | mutex_lock(&priv->port_mutex); | ||
| 364 | /* Possible type is always the one that was delivered */ | ||
| 365 | mdev->caps.possible_type[info->port] = info->tmp_type; | ||
| 366 | |||
| 367 | for (i = 0; i < mdev->caps.num_ports; i++) { | ||
| 368 | types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : | ||
| 369 | mdev->caps.possible_type[i+1]; | ||
| 370 | if (types[i] == MLX4_PORT_TYPE_AUTO) | ||
| 371 | types[i] = mdev->caps.port_type[i+1]; | ||
| 372 | } | ||
| 373 | |||
| 374 | if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { | ||
| 375 | for (i = 1; i <= mdev->caps.num_ports; i++) { | ||
| 376 | if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { | ||
| 377 | mdev->caps.possible_type[i] = mdev->caps.port_type[i]; | ||
| 378 | err = -EINVAL; | ||
| 379 | } | ||
| 380 | } | ||
| 381 | } | ||
| 382 | if (err) { | ||
| 383 | mlx4_err(mdev, "Auto sensing is not supported on this HCA. " | ||
| 384 | "Set only 'eth' or 'ib' for both ports " | ||
| 385 | "(should be the same)\n"); | ||
| 386 | goto out; | ||
| 387 | } | ||
| 388 | |||
| 389 | mlx4_do_sense_ports(mdev, new_types, types); | ||
| 390 | |||
| 391 | err = mlx4_check_port_params(mdev, new_types); | ||
| 392 | if (err) | ||
| 393 | goto out; | ||
| 394 | |||
| 395 | /* We are about to apply the changes after the configuration | ||
| 396 | * was verified, no need to remember the temporary types | ||
| 397 | * any more */ | ||
| 398 | for (i = 0; i < mdev->caps.num_ports; i++) | ||
| 399 | priv->port[i + 1].tmp_type = 0; | ||
| 400 | |||
| 401 | err = mlx4_change_port_types(mdev, new_types); | ||
| 402 | |||
| 403 | out: | ||
| 404 | mlx4_start_sense(mdev); | ||
| 405 | mutex_unlock(&priv->port_mutex); | ||
| 406 | return err ? err : count; | ||
| 407 | } | ||
| 408 | |||
| 409 | static int mlx4_load_fw(struct mlx4_dev *dev) | ||
| 410 | { | ||
| 411 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 412 | int err; | ||
| 413 | |||
| 414 | priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, | ||
| 415 | GFP_HIGHUSER | __GFP_NOWARN, 0); | ||
| 416 | if (!priv->fw.fw_icm) { | ||
| 417 | mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); | ||
| 418 | return -ENOMEM; | ||
| 419 | } | ||
| 420 | |||
| 421 | err = mlx4_MAP_FA(dev, priv->fw.fw_icm); | ||
| 422 | if (err) { | ||
| 423 | mlx4_err(dev, "MAP_FA command failed, aborting.\n"); | ||
| 424 | goto err_free; | ||
| 425 | } | ||
| 426 | |||
| 427 | err = mlx4_RUN_FW(dev); | ||
| 428 | if (err) { | ||
| 429 | mlx4_err(dev, "RUN_FW command failed, aborting.\n"); | ||
| 430 | goto err_unmap_fa; | ||
| 431 | } | ||
| 432 | |||
| 433 | return 0; | ||
| 434 | |||
| 435 | err_unmap_fa: | ||
| 436 | mlx4_UNMAP_FA(dev); | ||
| 437 | |||
| 438 | err_free: | ||
| 439 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); | ||
| 440 | return err; | ||
| 441 | } | ||
| 442 | |||
| 443 | static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | ||
| 444 | int cmpt_entry_sz) | ||
| 445 | { | ||
| 446 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 447 | int err; | ||
| 448 | |||
| 449 | err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, | ||
| 450 | cmpt_base + | ||
| 451 | ((u64) (MLX4_CMPT_TYPE_QP * | ||
| 452 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | ||
| 453 | cmpt_entry_sz, dev->caps.num_qps, | ||
| 454 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], | ||
| 455 | 0, 0); | ||
| 456 | if (err) | ||
| 457 | goto err; | ||
| 458 | |||
| 459 | err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, | ||
| 460 | cmpt_base + | ||
| 461 | ((u64) (MLX4_CMPT_TYPE_SRQ * | ||
| 462 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | ||
| 463 | cmpt_entry_sz, dev->caps.num_srqs, | ||
| 464 | dev->caps.reserved_srqs, 0, 0); | ||
| 465 | if (err) | ||
| 466 | goto err_qp; | ||
| 467 | |||
| 468 | err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, | ||
| 469 | cmpt_base + | ||
| 470 | ((u64) (MLX4_CMPT_TYPE_CQ * | ||
| 471 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | ||
| 472 | cmpt_entry_sz, dev->caps.num_cqs, | ||
| 473 | dev->caps.reserved_cqs, 0, 0); | ||
| 474 | if (err) | ||
| 475 | goto err_srq; | ||
| 476 | |||
| 477 | err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, | ||
| 478 | cmpt_base + | ||
| 479 | ((u64) (MLX4_CMPT_TYPE_EQ * | ||
| 480 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | ||
| 481 | cmpt_entry_sz, | ||
| 482 | dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); | ||
| 483 | if (err) | ||
| 484 | goto err_cq; | ||
| 485 | |||
| 486 | return 0; | ||
| 487 | |||
| 488 | err_cq: | ||
| 489 | mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); | ||
| 490 | |||
| 491 | err_srq: | ||
| 492 | mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); | ||
| 493 | |||
| 494 | err_qp: | ||
| 495 | mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); | ||
| 496 | |||
| 497 | err: | ||
| 498 | return err; | ||
| 499 | } | ||
| 500 | |||
| 501 | static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | ||
| 502 | struct mlx4_init_hca_param *init_hca, u64 icm_size) | ||
| 503 | { | ||
| 504 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 505 | u64 aux_pages; | ||
| 506 | int err; | ||
| 507 | |||
| 508 | err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); | ||
| 509 | if (err) { | ||
| 510 | mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); | ||
| 511 | return err; | ||
| 512 | } | ||
| 513 | |||
| 514 | mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", | ||
| 515 | (unsigned long long) icm_size >> 10, | ||
| 516 | (unsigned long long) aux_pages << 2); | ||
| 517 | |||
| 518 | priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, | ||
| 519 | GFP_HIGHUSER | __GFP_NOWARN, 0); | ||
| 520 | if (!priv->fw.aux_icm) { | ||
| 521 | mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); | ||
| 522 | return -ENOMEM; | ||
| 523 | } | ||
| 524 | |||
| 525 | err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); | ||
| 526 | if (err) { | ||
| 527 | mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); | ||
| 528 | goto err_free_aux; | ||
| 529 | } | ||
| 530 | |||
| 531 | err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); | ||
| 532 | if (err) { | ||
| 533 | mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); | ||
| 534 | goto err_unmap_aux; | ||
| 535 | } | ||
| 536 | |||
| 537 | err = mlx4_init_icm_table(dev, &priv->eq_table.table, | ||
| 538 | init_hca->eqc_base, dev_cap->eqc_entry_sz, | ||
| 539 | dev->caps.num_eqs, dev->caps.num_eqs, | ||
| 540 | 0, 0); | ||
| 541 | if (err) { | ||
| 542 | mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); | ||
| 543 | goto err_unmap_cmpt; | ||
| 544 | } | ||
| 545 | |||
| 546 | /* | ||
| 547 | * Reserved MTT entries must be aligned up to a cacheline | ||
| 548 | * boundary, since the FW will write to them, while the driver | ||
| 549 | * writes to all other MTT entries. (The variable | ||
| 550 | * dev->caps.mtt_entry_sz below is really the MTT segment | ||
| 551 | * size, not the raw entry size) | ||
| 552 | */ | ||
| 553 | dev->caps.reserved_mtts = | ||
| 554 | ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, | ||
| 555 | dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; | ||
| 556 | |||
| 557 | err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, | ||
| 558 | init_hca->mtt_base, | ||
| 559 | dev->caps.mtt_entry_sz, | ||
| 560 | dev->caps.num_mtt_segs, | ||
| 561 | dev->caps.reserved_mtts, 1, 0); | ||
| 562 | if (err) { | ||
| 563 | mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); | ||
| 564 | goto err_unmap_eq; | ||
| 565 | } | ||
| 566 | |||
| 567 | err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, | ||
| 568 | init_hca->dmpt_base, | ||
| 569 | dev_cap->dmpt_entry_sz, | ||
| 570 | dev->caps.num_mpts, | ||
| 571 | dev->caps.reserved_mrws, 1, 1); | ||
| 572 | if (err) { | ||
| 573 | mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); | ||
| 574 | goto err_unmap_mtt; | ||
| 575 | } | ||
| 576 | |||
| 577 | err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, | ||
| 578 | init_hca->qpc_base, | ||
| 579 | dev_cap->qpc_entry_sz, | ||
| 580 | dev->caps.num_qps, | ||
| 581 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], | ||
| 582 | 0, 0); | ||
| 583 | if (err) { | ||
| 584 | mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); | ||
| 585 | goto err_unmap_dmpt; | ||
| 586 | } | ||
| 587 | |||
| 588 | err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, | ||
| 589 | init_hca->auxc_base, | ||
| 590 | dev_cap->aux_entry_sz, | ||
| 591 | dev->caps.num_qps, | ||
| 592 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], | ||
| 593 | 0, 0); | ||
| 594 | if (err) { | ||
| 595 | mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); | ||
| 596 | goto err_unmap_qp; | ||
| 597 | } | ||
| 598 | |||
| 599 | err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, | ||
| 600 | init_hca->altc_base, | ||
| 601 | dev_cap->altc_entry_sz, | ||
| 602 | dev->caps.num_qps, | ||
| 603 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], | ||
| 604 | 0, 0); | ||
| 605 | if (err) { | ||
| 606 | mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); | ||
| 607 | goto err_unmap_auxc; | ||
| 608 | } | ||
| 609 | |||
| 610 | err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, | ||
| 611 | init_hca->rdmarc_base, | ||
| 612 | dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, | ||
| 613 | dev->caps.num_qps, | ||
| 614 | dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], | ||
| 615 | 0, 0); | ||
| 616 | if (err) { | ||
| 617 | mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); | ||
| 618 | goto err_unmap_altc; | ||
| 619 | } | ||
| 620 | |||
| 621 | err = mlx4_init_icm_table(dev, &priv->cq_table.table, | ||
| 622 | init_hca->cqc_base, | ||
| 623 | dev_cap->cqc_entry_sz, | ||
| 624 | dev->caps.num_cqs, | ||
| 625 | dev->caps.reserved_cqs, 0, 0); | ||
| 626 | if (err) { | ||
| 627 | mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); | ||
| 628 | goto err_unmap_rdmarc; | ||
| 629 | } | ||
| 630 | |||
| 631 | err = mlx4_init_icm_table(dev, &priv->srq_table.table, | ||
| 632 | init_hca->srqc_base, | ||
| 633 | dev_cap->srq_entry_sz, | ||
| 634 | dev->caps.num_srqs, | ||
| 635 | dev->caps.reserved_srqs, 0, 0); | ||
| 636 | if (err) { | ||
| 637 | mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); | ||
| 638 | goto err_unmap_cq; | ||
| 639 | } | ||
| 640 | |||
| 641 | /* | ||
| 642 | * It's not strictly required, but for simplicity just map the | ||
| 643 | * whole multicast group table now. The table isn't very big | ||
| 644 | * and it's a lot easier than trying to track ref counts. | ||
| 645 | */ | ||
| 646 | err = mlx4_init_icm_table(dev, &priv->mcg_table.table, | ||
| 647 | init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, | ||
| 648 | dev->caps.num_mgms + dev->caps.num_amgms, | ||
| 649 | dev->caps.num_mgms + dev->caps.num_amgms, | ||
| 650 | 0, 0); | ||
| 651 | if (err) { | ||
| 652 | mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); | ||
| 653 | goto err_unmap_srq; | ||
| 654 | } | ||
| 655 | |||
| 656 | return 0; | ||
| 657 | |||
| 658 | err_unmap_srq: | ||
| 659 | mlx4_cleanup_icm_table(dev, &priv->srq_table.table); | ||
| 660 | |||
| 661 | err_unmap_cq: | ||
| 662 | mlx4_cleanup_icm_table(dev, &priv->cq_table.table); | ||
| 663 | |||
| 664 | err_unmap_rdmarc: | ||
| 665 | mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); | ||
| 666 | |||
| 667 | err_unmap_altc: | ||
| 668 | mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); | ||
| 669 | |||
| 670 | err_unmap_auxc: | ||
| 671 | mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); | ||
| 672 | |||
| 673 | err_unmap_qp: | ||
| 674 | mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); | ||
| 675 | |||
| 676 | err_unmap_dmpt: | ||
| 677 | mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); | ||
| 678 | |||
| 679 | err_unmap_mtt: | ||
| 680 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); | ||
| 681 | |||
| 682 | err_unmap_eq: | ||
| 683 | mlx4_cleanup_icm_table(dev, &priv->eq_table.table); | ||
| 684 | |||
| 685 | err_unmap_cmpt: | ||
| 686 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); | ||
| 687 | mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); | ||
| 688 | mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); | ||
| 689 | mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); | ||
| 690 | |||
| 691 | err_unmap_aux: | ||
| 692 | mlx4_UNMAP_ICM_AUX(dev); | ||
| 693 | |||
| 694 | err_free_aux: | ||
| 695 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); | ||
| 696 | |||
| 697 | return err; | ||
| 698 | } | ||
| 699 | |||
| 700 | static void mlx4_free_icms(struct mlx4_dev *dev) | ||
| 701 | { | ||
| 702 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 703 | |||
| 704 | mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); | ||
| 705 | mlx4_cleanup_icm_table(dev, &priv->srq_table.table); | ||
| 706 | mlx4_cleanup_icm_table(dev, &priv->cq_table.table); | ||
| 707 | mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); | ||
| 708 | mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); | ||
| 709 | mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); | ||
| 710 | mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); | ||
| 711 | mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); | ||
| 712 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); | ||
| 713 | mlx4_cleanup_icm_table(dev, &priv->eq_table.table); | ||
| 714 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); | ||
| 715 | mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); | ||
| 716 | mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); | ||
| 717 | mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); | ||
| 718 | |||
| 719 | mlx4_UNMAP_ICM_AUX(dev); | ||
| 720 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); | ||
| 721 | } | ||
| 722 | |||
| 723 | static int map_bf_area(struct mlx4_dev *dev) | ||
| 724 | { | ||
| 725 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 726 | resource_size_t bf_start; | ||
| 727 | resource_size_t bf_len; | ||
| 728 | int err = 0; | ||
| 729 | |||
| 730 | bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); | ||
| 731 | bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); | ||
| 732 | priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); | ||
| 733 | if (!priv->bf_mapping) | ||
| 734 | err = -ENOMEM; | ||
| 735 | |||
| 736 | return err; | ||
| 737 | } | ||
| 738 | |||
| 739 | static void unmap_bf_area(struct mlx4_dev *dev) | ||
| 740 | { | ||
| 741 | if (mlx4_priv(dev)->bf_mapping) | ||
| 742 | io_mapping_free(mlx4_priv(dev)->bf_mapping); | ||
| 743 | } | ||
| 744 | |||
| 745 | static void mlx4_close_hca(struct mlx4_dev *dev) | ||
| 746 | { | ||
| 747 | unmap_bf_area(dev); | ||
| 748 | mlx4_CLOSE_HCA(dev, 0); | ||
| 749 | mlx4_free_icms(dev); | ||
| 750 | mlx4_UNMAP_FA(dev); | ||
| 751 | mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); | ||
| 752 | } | ||
| 753 | |||
| 754 | static int mlx4_init_hca(struct mlx4_dev *dev) | ||
| 755 | { | ||
| 756 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 757 | struct mlx4_adapter adapter; | ||
| 758 | struct mlx4_dev_cap dev_cap; | ||
| 759 | struct mlx4_mod_stat_cfg mlx4_cfg; | ||
| 760 | struct mlx4_profile profile; | ||
| 761 | struct mlx4_init_hca_param init_hca; | ||
| 762 | u64 icm_size; | ||
| 763 | int err; | ||
| 764 | |||
| 765 | err = mlx4_QUERY_FW(dev); | ||
| 766 | if (err) { | ||
| 767 | if (err == -EACCES) | ||
| 768 | mlx4_info(dev, "non-primary physical function, skipping.\n"); | ||
| 769 | else | ||
| 770 | mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); | ||
| 771 | return err; | ||
| 772 | } | ||
| 773 | |||
| 774 | err = mlx4_load_fw(dev); | ||
| 775 | if (err) { | ||
| 776 | mlx4_err(dev, "Failed to start FW, aborting.\n"); | ||
| 777 | return err; | ||
| 778 | } | ||
| 779 | |||
| 780 | mlx4_cfg.log_pg_sz_m = 1; | ||
| 781 | mlx4_cfg.log_pg_sz = 0; | ||
| 782 | err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); | ||
| 783 | if (err) | ||
| 784 | mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); | ||
| 785 | |||
| 786 | err = mlx4_dev_cap(dev, &dev_cap); | ||
| 787 | if (err) { | ||
| 788 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); | ||
| 789 | goto err_stop_fw; | ||
| 790 | } | ||
| 791 | |||
| 792 | profile = default_profile; | ||
| 793 | |||
| 794 | icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); | ||
| 795 | if ((long long) icm_size < 0) { | ||
| 796 | err = icm_size; | ||
| 797 | goto err_stop_fw; | ||
| 798 | } | ||
| 799 | |||
| 800 | if (map_bf_area(dev)) | ||
| 801 | mlx4_dbg(dev, "Failed to map blue flame area\n"); | ||
| 802 | |||
| 803 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars); | ||
| 804 | |||
| 805 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); | ||
| 806 | if (err) | ||
| 807 | goto err_stop_fw; | ||
| 808 | |||
| 809 | err = mlx4_INIT_HCA(dev, &init_hca); | ||
| 810 | if (err) { | ||
| 811 | mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); | ||
| 812 | goto err_free_icm; | ||
| 813 | } | ||
| 814 | |||
| 815 | err = mlx4_QUERY_ADAPTER(dev, &adapter); | ||
| 816 | if (err) { | ||
| 817 | mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); | ||
| 818 | goto err_close; | ||
| 819 | } | ||
| 820 | |||
| 821 | priv->eq_table.inta_pin = adapter.inta_pin; | ||
| 822 | memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); | ||
| 823 | |||
| 824 | return 0; | ||
| 825 | |||
| 826 | err_close: | ||
| 827 | mlx4_CLOSE_HCA(dev, 0); | ||
| 828 | |||
| 829 | err_free_icm: | ||
| 830 | mlx4_free_icms(dev); | ||
| 831 | |||
| 832 | err_stop_fw: | ||
| 833 | unmap_bf_area(dev); | ||
| 834 | mlx4_UNMAP_FA(dev); | ||
| 835 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); | ||
| 836 | |||
| 837 | return err; | ||
| 838 | } | ||
| 839 | |||
| 840 | static int mlx4_init_counters_table(struct mlx4_dev *dev) | ||
| 841 | { | ||
| 842 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 843 | int nent; | ||
| 844 | |||
| 845 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) | ||
| 846 | return -ENOENT; | ||
| 847 | |||
| 848 | nent = dev->caps.max_counters; | ||
| 849 | return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); | ||
| 850 | } | ||
| 851 | |||
| 852 | static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) | ||
| 853 | { | ||
| 854 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); | ||
| 855 | } | ||
| 856 | |||
| 857 | int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) | ||
| 858 | { | ||
| 859 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 860 | |||
| 861 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) | ||
| 862 | return -ENOENT; | ||
| 863 | |||
| 864 | *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); | ||
| 865 | if (*idx == -1) | ||
| 866 | return -ENOMEM; | ||
| 867 | |||
| 868 | return 0; | ||
| 869 | } | ||
| 870 | EXPORT_SYMBOL_GPL(mlx4_counter_alloc); | ||
| 871 | |||
| 872 | void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) | ||
| 873 | { | ||
| 874 | mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); | ||
| 875 | return; | ||
| 876 | } | ||
| 877 | EXPORT_SYMBOL_GPL(mlx4_counter_free); | ||
| 878 | |||
| 879 | static int mlx4_setup_hca(struct mlx4_dev *dev) | ||
| 880 | { | ||
| 881 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 882 | int err; | ||
| 883 | int port; | ||
| 884 | __be32 ib_port_default_caps; | ||
| 885 | |||
| 886 | err = mlx4_init_uar_table(dev); | ||
| 887 | if (err) { | ||
| 888 | mlx4_err(dev, "Failed to initialize " | ||
| 889 | "user access region table, aborting.\n"); | ||
| 890 | return err; | ||
| 891 | } | ||
| 892 | |||
| 893 | err = mlx4_uar_alloc(dev, &priv->driver_uar); | ||
| 894 | if (err) { | ||
| 895 | mlx4_err(dev, "Failed to allocate driver access region, " | ||
| 896 | "aborting.\n"); | ||
| 897 | goto err_uar_table_free; | ||
| 898 | } | ||
| 899 | |||
| 900 | priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); | ||
| 901 | if (!priv->kar) { | ||
| 902 | mlx4_err(dev, "Couldn't map kernel access region, " | ||
| 903 | "aborting.\n"); | ||
| 904 | err = -ENOMEM; | ||
| 905 | goto err_uar_free; | ||
| 906 | } | ||
| 907 | |||
| 908 | err = mlx4_init_pd_table(dev); | ||
| 909 | if (err) { | ||
| 910 | mlx4_err(dev, "Failed to initialize " | ||
| 911 | "protection domain table, aborting.\n"); | ||
| 912 | goto err_kar_unmap; | ||
| 913 | } | ||
| 914 | |||
| 915 | err = mlx4_init_mr_table(dev); | ||
| 916 | if (err) { | ||
| 917 | mlx4_err(dev, "Failed to initialize " | ||
| 918 | "memory region table, aborting.\n"); | ||
| 919 | goto err_pd_table_free; | ||
| 920 | } | ||
| 921 | |||
| 922 | err = mlx4_init_eq_table(dev); | ||
| 923 | if (err) { | ||
| 924 | mlx4_err(dev, "Failed to initialize " | ||
| 925 | "event queue table, aborting.\n"); | ||
| 926 | goto err_mr_table_free; | ||
| 927 | } | ||
| 928 | |||
| 929 | err = mlx4_cmd_use_events(dev); | ||
| 930 | if (err) { | ||
| 931 | mlx4_err(dev, "Failed to switch to event-driven " | ||
| 932 | "firmware commands, aborting.\n"); | ||
| 933 | goto err_eq_table_free; | ||
| 934 | } | ||
| 935 | |||
| 936 | err = mlx4_NOP(dev); | ||
| 937 | if (err) { | ||
| 938 | if (dev->flags & MLX4_FLAG_MSI_X) { | ||
| 939 | mlx4_warn(dev, "NOP command failed to generate MSI-X " | ||
| 940 | "interrupt IRQ %d).\n", | ||
| 941 | priv->eq_table.eq[dev->caps.num_comp_vectors].irq); | ||
| 942 | mlx4_warn(dev, "Trying again without MSI-X.\n"); | ||
| 943 | } else { | ||
| 944 | mlx4_err(dev, "NOP command failed to generate interrupt " | ||
| 945 | "(IRQ %d), aborting.\n", | ||
| 946 | priv->eq_table.eq[dev->caps.num_comp_vectors].irq); | ||
| 947 | mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); | ||
| 948 | } | ||
| 949 | |||
| 950 | goto err_cmd_poll; | ||
| 951 | } | ||
| 952 | |||
| 953 | mlx4_dbg(dev, "NOP command IRQ test passed\n"); | ||
| 954 | |||
| 955 | err = mlx4_init_cq_table(dev); | ||
| 956 | if (err) { | ||
| 957 | mlx4_err(dev, "Failed to initialize " | ||
| 958 | "completion queue table, aborting.\n"); | ||
| 959 | goto err_cmd_poll; | ||
| 960 | } | ||
| 961 | |||
| 962 | err = mlx4_init_srq_table(dev); | ||
| 963 | if (err) { | ||
| 964 | mlx4_err(dev, "Failed to initialize " | ||
| 965 | "shared receive queue table, aborting.\n"); | ||
| 966 | goto err_cq_table_free; | ||
| 967 | } | ||
| 968 | |||
| 969 | err = mlx4_init_qp_table(dev); | ||
| 970 | if (err) { | ||
| 971 | mlx4_err(dev, "Failed to initialize " | ||
| 972 | "queue pair table, aborting.\n"); | ||
| 973 | goto err_srq_table_free; | ||
| 974 | } | ||
| 975 | |||
| 976 | err = mlx4_init_mcg_table(dev); | ||
| 977 | if (err) { | ||
| 978 | mlx4_err(dev, "Failed to initialize " | ||
| 979 | "multicast group table, aborting.\n"); | ||
| 980 | goto err_qp_table_free; | ||
| 981 | } | ||
| 982 | |||
| 983 | err = mlx4_init_counters_table(dev); | ||
| 984 | if (err && err != -ENOENT) { | ||
| 985 | mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); | ||
| 986 | goto err_counters_table_free; | ||
| 987 | } | ||
| 988 | |||
| 989 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
| 990 | enum mlx4_port_type port_type = 0; | ||
| 991 | mlx4_SENSE_PORT(dev, port, &port_type); | ||
| 992 | if (port_type) | ||
| 993 | dev->caps.port_type[port] = port_type; | ||
| 994 | ib_port_default_caps = 0; | ||
| 995 | err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); | ||
| 996 | if (err) | ||
| 997 | mlx4_warn(dev, "failed to get port %d default " | ||
| 998 | "ib capabilities (%d). Continuing with " | ||
| 999 | "caps = 0\n", port, err); | ||
| 1000 | dev->caps.ib_port_def_cap[port] = ib_port_default_caps; | ||
| 1001 | err = mlx4_SET_PORT(dev, port); | ||
| 1002 | if (err) { | ||
| 1003 | mlx4_err(dev, "Failed to set port %d, aborting\n", | ||
| 1004 | port); | ||
| 1005 | goto err_mcg_table_free; | ||
| 1006 | } | ||
| 1007 | } | ||
| 1008 | mlx4_set_port_mask(dev); | ||
| 1009 | |||
| 1010 | return 0; | ||
| 1011 | |||
| 1012 | err_mcg_table_free: | ||
| 1013 | mlx4_cleanup_mcg_table(dev); | ||
| 1014 | |||
| 1015 | err_counters_table_free: | ||
| 1016 | mlx4_cleanup_counters_table(dev); | ||
| 1017 | |||
| 1018 | err_qp_table_free: | ||
| 1019 | mlx4_cleanup_qp_table(dev); | ||
| 1020 | |||
| 1021 | err_srq_table_free: | ||
| 1022 | mlx4_cleanup_srq_table(dev); | ||
| 1023 | |||
| 1024 | err_cq_table_free: | ||
| 1025 | mlx4_cleanup_cq_table(dev); | ||
| 1026 | |||
| 1027 | err_cmd_poll: | ||
| 1028 | mlx4_cmd_use_polling(dev); | ||
| 1029 | |||
| 1030 | err_eq_table_free: | ||
| 1031 | mlx4_cleanup_eq_table(dev); | ||
| 1032 | |||
| 1033 | err_mr_table_free: | ||
| 1034 | mlx4_cleanup_mr_table(dev); | ||
| 1035 | |||
| 1036 | err_pd_table_free: | ||
| 1037 | mlx4_cleanup_pd_table(dev); | ||
| 1038 | |||
| 1039 | err_kar_unmap: | ||
| 1040 | iounmap(priv->kar); | ||
| 1041 | |||
| 1042 | err_uar_free: | ||
| 1043 | mlx4_uar_free(dev, &priv->driver_uar); | ||
| 1044 | |||
| 1045 | err_uar_table_free: | ||
| 1046 | mlx4_cleanup_uar_table(dev); | ||
| 1047 | return err; | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | static void mlx4_enable_msi_x(struct mlx4_dev *dev) | ||
| 1051 | { | ||
| 1052 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 1053 | struct msix_entry *entries; | ||
| 1054 | int nreq = min_t(int, dev->caps.num_ports * | ||
| 1055 | min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) | ||
| 1056 | + MSIX_LEGACY_SZ, MAX_MSIX); | ||
| 1057 | int err; | ||
| 1058 | int i; | ||
| 1059 | |||
| 1060 | if (msi_x) { | ||
| 1061 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, | ||
| 1062 | nreq); | ||
| 1063 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); | ||
| 1064 | if (!entries) | ||
| 1065 | goto no_msi; | ||
| 1066 | |||
| 1067 | for (i = 0; i < nreq; ++i) | ||
| 1068 | entries[i].entry = i; | ||
| 1069 | |||
| 1070 | retry: | ||
| 1071 | err = pci_enable_msix(dev->pdev, entries, nreq); | ||
| 1072 | if (err) { | ||
| 1073 | /* Try again if at least 2 vectors are available */ | ||
| 1074 | if (err > 1) { | ||
| 1075 | mlx4_info(dev, "Requested %d vectors, " | ||
| 1076 | "but only %d MSI-X vectors available, " | ||
| 1077 | "trying again\n", nreq, err); | ||
| 1078 | nreq = err; | ||
| 1079 | goto retry; | ||
| 1080 | } | ||
| 1081 | kfree(entries); | ||
| 1082 | goto no_msi; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | if (nreq < | ||
| 1086 | MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { | ||
| 1087 | /*Working in legacy mode , all EQ's shared*/ | ||
| 1088 | dev->caps.comp_pool = 0; | ||
| 1089 | dev->caps.num_comp_vectors = nreq - 1; | ||
| 1090 | } else { | ||
| 1091 | dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; | ||
| 1092 | dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; | ||
| 1093 | } | ||
| 1094 | for (i = 0; i < nreq; ++i) | ||
| 1095 | priv->eq_table.eq[i].irq = entries[i].vector; | ||
| 1096 | |||
| 1097 | dev->flags |= MLX4_FLAG_MSI_X; | ||
| 1098 | |||
| 1099 | kfree(entries); | ||
| 1100 | return; | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | no_msi: | ||
| 1104 | dev->caps.num_comp_vectors = 1; | ||
| 1105 | dev->caps.comp_pool = 0; | ||
| 1106 | |||
| 1107 | for (i = 0; i < 2; ++i) | ||
| 1108 | priv->eq_table.eq[i].irq = dev->pdev->irq; | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | ||
| 1112 | { | ||
| 1113 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
| 1114 | int err = 0; | ||
| 1115 | |||
| 1116 | info->dev = dev; | ||
| 1117 | info->port = port; | ||
| 1118 | mlx4_init_mac_table(dev, &info->mac_table); | ||
| 1119 | mlx4_init_vlan_table(dev, &info->vlan_table); | ||
| 1120 | info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + | ||
| 1121 | (port - 1) * (1 << log_num_mac); | ||
| 1122 | |||
| 1123 | sprintf(info->dev_name, "mlx4_port%d", port); | ||
| 1124 | info->port_attr.attr.name = info->dev_name; | ||
| 1125 | info->port_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
| 1126 | info->port_attr.show = show_port_type; | ||
| 1127 | info->port_attr.store = set_port_type; | ||
| 1128 | sysfs_attr_init(&info->port_attr.attr); | ||
| 1129 | |||
| 1130 | err = device_create_file(&dev->pdev->dev, &info->port_attr); | ||
| 1131 | if (err) { | ||
| 1132 | mlx4_err(dev, "Failed to create file for port %d\n", port); | ||
| 1133 | info->port = -1; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | return err; | ||
| 1137 | } | ||
| 1138 | |||
| 1139 | static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | ||
| 1140 | { | ||
| 1141 | if (info->port < 0) | ||
| 1142 | return; | ||
| 1143 | |||
| 1144 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | static int mlx4_init_steering(struct mlx4_dev *dev) | ||
| 1148 | { | ||
| 1149 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 1150 | int num_entries = dev->caps.num_ports; | ||
| 1151 | int i, j; | ||
| 1152 | |||
| 1153 | priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); | ||
| 1154 | if (!priv->steer) | ||
| 1155 | return -ENOMEM; | ||
| 1156 | |||
| 1157 | for (i = 0; i < num_entries; i++) { | ||
| 1158 | for (j = 0; j < MLX4_NUM_STEERS; j++) { | ||
| 1159 | INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); | ||
| 1160 | INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); | ||
| 1161 | } | ||
| 1162 | INIT_LIST_HEAD(&priv->steer[i].high_prios); | ||
| 1163 | } | ||
| 1164 | return 0; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | static void mlx4_clear_steering(struct mlx4_dev *dev) | ||
| 1168 | { | ||
| 1169 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 1170 | struct mlx4_steer_index *entry, *tmp_entry; | ||
| 1171 | struct mlx4_promisc_qp *pqp, *tmp_pqp; | ||
| 1172 | int num_entries = dev->caps.num_ports; | ||
| 1173 | int i, j; | ||
| 1174 | |||
| 1175 | for (i = 0; i < num_entries; i++) { | ||
| 1176 | for (j = 0; j < MLX4_NUM_STEERS; j++) { | ||
| 1177 | list_for_each_entry_safe(pqp, tmp_pqp, | ||
| 1178 | &priv->steer[i].promisc_qps[j], | ||
| 1179 | list) { | ||
| 1180 | list_del(&pqp->list); | ||
| 1181 | kfree(pqp); | ||
| 1182 | } | ||
| 1183 | list_for_each_entry_safe(entry, tmp_entry, | ||
| 1184 | &priv->steer[i].steer_entries[j], | ||
| 1185 | list) { | ||
| 1186 | list_del(&entry->list); | ||
| 1187 | list_for_each_entry_safe(pqp, tmp_pqp, | ||
| 1188 | &entry->duplicates, | ||
| 1189 | list) { | ||
| 1190 | list_del(&pqp->list); | ||
| 1191 | kfree(pqp); | ||
| 1192 | } | ||
| 1193 | kfree(entry); | ||
| 1194 | } | ||
| 1195 | } | ||
| 1196 | } | ||
| 1197 | kfree(priv->steer); | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | ||
| 1201 | { | ||
| 1202 | struct mlx4_priv *priv; | ||
| 1203 | struct mlx4_dev *dev; | ||
| 1204 | int err; | ||
| 1205 | int port; | ||
| 1206 | |||
| 1207 | pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); | ||
| 1208 | |||
| 1209 | err = pci_enable_device(pdev); | ||
| 1210 | if (err) { | ||
| 1211 | dev_err(&pdev->dev, "Cannot enable PCI device, " | ||
| 1212 | "aborting.\n"); | ||
| 1213 | return err; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | /* | ||
| 1217 | * Check for BARs. We expect 0: 1MB | ||
| 1218 | */ | ||
| 1219 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || | ||
| 1220 | pci_resource_len(pdev, 0) != 1 << 20) { | ||
| 1221 | dev_err(&pdev->dev, "Missing DCS, aborting.\n"); | ||
| 1222 | err = -ENODEV; | ||
| 1223 | goto err_disable_pdev; | ||
| 1224 | } | ||
| 1225 | if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { | ||
| 1226 | dev_err(&pdev->dev, "Missing UAR, aborting.\n"); | ||
| 1227 | err = -ENODEV; | ||
| 1228 | goto err_disable_pdev; | ||
| 1229 | } | ||
| 1230 | |||
| 1231 | err = pci_request_regions(pdev, DRV_NAME); | ||
| 1232 | if (err) { | ||
| 1233 | dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); | ||
| 1234 | goto err_disable_pdev; | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | pci_set_master(pdev); | ||
| 1238 | |||
| 1239 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
| 1240 | if (err) { | ||
| 1241 | dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); | ||
| 1242 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 1243 | if (err) { | ||
| 1244 | dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); | ||
| 1245 | goto err_release_regions; | ||
| 1246 | } | ||
| 1247 | } | ||
| 1248 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
| 1249 | if (err) { | ||
| 1250 | dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " | ||
| 1251 | "consistent PCI DMA mask.\n"); | ||
| 1252 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 1253 | if (err) { | ||
| 1254 | dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " | ||
| 1255 | "aborting.\n"); | ||
| 1256 | goto err_release_regions; | ||
| 1257 | } | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | /* Allow large DMA segments, up to the firmware limit of 1 GB */ | ||
| 1261 | dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); | ||
| 1262 | |||
| 1263 | priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
| 1264 | if (!priv) { | ||
| 1265 | dev_err(&pdev->dev, "Device struct alloc failed, " | ||
| 1266 | "aborting.\n"); | ||
| 1267 | err = -ENOMEM; | ||
| 1268 | goto err_release_regions; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | dev = &priv->dev; | ||
| 1272 | dev->pdev = pdev; | ||
| 1273 | INIT_LIST_HEAD(&priv->ctx_list); | ||
| 1274 | spin_lock_init(&priv->ctx_lock); | ||
| 1275 | |||
| 1276 | mutex_init(&priv->port_mutex); | ||
| 1277 | |||
| 1278 | INIT_LIST_HEAD(&priv->pgdir_list); | ||
| 1279 | mutex_init(&priv->pgdir_mutex); | ||
| 1280 | |||
| 1281 | INIT_LIST_HEAD(&priv->bf_list); | ||
| 1282 | mutex_init(&priv->bf_mutex); | ||
| 1283 | |||
| 1284 | dev->rev_id = pdev->revision; | ||
| 1285 | |||
| 1286 | /* | ||
| 1287 | * Now reset the HCA before we touch the PCI capabilities or | ||
| 1288 | * attempt a firmware command, since a boot ROM may have left | ||
| 1289 | * the HCA in an undefined state. | ||
| 1290 | */ | ||
| 1291 | err = mlx4_reset(dev); | ||
| 1292 | if (err) { | ||
| 1293 | mlx4_err(dev, "Failed to reset HCA, aborting.\n"); | ||
| 1294 | goto err_free_dev; | ||
| 1295 | } | ||
| 1296 | |||
| 1297 | if (mlx4_cmd_init(dev)) { | ||
| 1298 | mlx4_err(dev, "Failed to init command interface, aborting.\n"); | ||
| 1299 | goto err_free_dev; | ||
| 1300 | } | ||
| 1301 | |||
| 1302 | err = mlx4_init_hca(dev); | ||
| 1303 | if (err) | ||
| 1304 | goto err_cmd; | ||
| 1305 | |||
| 1306 | err = mlx4_alloc_eq_table(dev); | ||
| 1307 | if (err) | ||
| 1308 | goto err_close; | ||
| 1309 | |||
| 1310 | priv->msix_ctl.pool_bm = 0; | ||
| 1311 | spin_lock_init(&priv->msix_ctl.pool_lock); | ||
| 1312 | |||
| 1313 | mlx4_enable_msi_x(dev); | ||
| 1314 | |||
| 1315 | err = mlx4_init_steering(dev); | ||
| 1316 | if (err) | ||
| 1317 | goto err_free_eq; | ||
| 1318 | |||
| 1319 | err = mlx4_setup_hca(dev); | ||
| 1320 | if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { | ||
| 1321 | dev->flags &= ~MLX4_FLAG_MSI_X; | ||
| 1322 | pci_disable_msix(pdev); | ||
| 1323 | err = mlx4_setup_hca(dev); | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | if (err) | ||
| 1327 | goto err_steer; | ||
| 1328 | |||
| 1329 | for (port = 1; port <= dev->caps.num_ports; port++) { | ||
| 1330 | err = mlx4_init_port_info(dev, port); | ||
| 1331 | if (err) | ||
| 1332 | goto err_port; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | err = mlx4_register_device(dev); | ||
| 1336 | if (err) | ||
| 1337 | goto err_port; | ||
| 1338 | |||
| 1339 | mlx4_sense_init(dev); | ||
| 1340 | mlx4_start_sense(dev); | ||
| 1341 | |||
| 1342 | pci_set_drvdata(pdev, dev); | ||
| 1343 | |||
| 1344 | return 0; | ||
| 1345 | |||
| 1346 | err_port: | ||
| 1347 | for (--port; port >= 1; --port) | ||
| 1348 | mlx4_cleanup_port_info(&priv->port[port]); | ||
| 1349 | |||
| 1350 | mlx4_cleanup_counters_table(dev); | ||
| 1351 | mlx4_cleanup_mcg_table(dev); | ||
| 1352 | mlx4_cleanup_qp_table(dev); | ||
| 1353 | mlx4_cleanup_srq_table(dev); | ||
| 1354 | mlx4_cleanup_cq_table(dev); | ||
| 1355 | mlx4_cmd_use_polling(dev); | ||
| 1356 | mlx4_cleanup_eq_table(dev); | ||
| 1357 | mlx4_cleanup_mr_table(dev); | ||
| 1358 | mlx4_cleanup_pd_table(dev); | ||
| 1359 | mlx4_cleanup_uar_table(dev); | ||
| 1360 | |||
| 1361 | err_steer: | ||
| 1362 | mlx4_clear_steering(dev); | ||
| 1363 | |||
| 1364 | err_free_eq: | ||
| 1365 | mlx4_free_eq_table(dev); | ||
| 1366 | |||
| 1367 | err_close: | ||
| 1368 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
| 1369 | pci_disable_msix(pdev); | ||
| 1370 | |||
| 1371 | mlx4_close_hca(dev); | ||
| 1372 | |||
| 1373 | err_cmd: | ||
| 1374 | mlx4_cmd_cleanup(dev); | ||
| 1375 | |||
| 1376 | err_free_dev: | ||
| 1377 | kfree(priv); | ||
| 1378 | |||
| 1379 | err_release_regions: | ||
| 1380 | pci_release_regions(pdev); | ||
| 1381 | |||
| 1382 | err_disable_pdev: | ||
| 1383 | pci_disable_device(pdev); | ||
| 1384 | pci_set_drvdata(pdev, NULL); | ||
| 1385 | return err; | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | static int __devinit mlx4_init_one(struct pci_dev *pdev, | ||
| 1389 | const struct pci_device_id *id) | ||
| 1390 | { | ||
| 1391 | printk_once(KERN_INFO "%s", mlx4_version); | ||
| 1392 | |||
| 1393 | return __mlx4_init_one(pdev, id); | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | static void mlx4_remove_one(struct pci_dev *pdev) | ||
| 1397 | { | ||
| 1398 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | ||
| 1399 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 1400 | int p; | ||
| 1401 | |||
| 1402 | if (dev) { | ||
| 1403 | mlx4_stop_sense(dev); | ||
| 1404 | mlx4_unregister_device(dev); | ||
| 1405 | |||
| 1406 | for (p = 1; p <= dev->caps.num_ports; p++) { | ||
| 1407 | mlx4_cleanup_port_info(&priv->port[p]); | ||
| 1408 | mlx4_CLOSE_PORT(dev, p); | ||
| 1409 | } | ||
| 1410 | |||
| 1411 | mlx4_cleanup_counters_table(dev); | ||
| 1412 | mlx4_cleanup_mcg_table(dev); | ||
| 1413 | mlx4_cleanup_qp_table(dev); | ||
| 1414 | mlx4_cleanup_srq_table(dev); | ||
| 1415 | mlx4_cleanup_cq_table(dev); | ||
| 1416 | mlx4_cmd_use_polling(dev); | ||
| 1417 | mlx4_cleanup_eq_table(dev); | ||
| 1418 | mlx4_cleanup_mr_table(dev); | ||
| 1419 | mlx4_cleanup_pd_table(dev); | ||
| 1420 | |||
| 1421 | iounmap(priv->kar); | ||
| 1422 | mlx4_uar_free(dev, &priv->driver_uar); | ||
| 1423 | mlx4_cleanup_uar_table(dev); | ||
| 1424 | mlx4_clear_steering(dev); | ||
| 1425 | mlx4_free_eq_table(dev); | ||
| 1426 | mlx4_close_hca(dev); | ||
| 1427 | mlx4_cmd_cleanup(dev); | ||
| 1428 | |||
| 1429 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
| 1430 | pci_disable_msix(pdev); | ||
| 1431 | |||
| 1432 | kfree(priv); | ||
| 1433 | pci_release_regions(pdev); | ||
| 1434 | pci_disable_device(pdev); | ||
| 1435 | pci_set_drvdata(pdev, NULL); | ||
| 1436 | } | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | int mlx4_restart_one(struct pci_dev *pdev) | ||
| 1440 | { | ||
| 1441 | mlx4_remove_one(pdev); | ||
| 1442 | return __mlx4_init_one(pdev, NULL); | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { | ||
| 1446 | { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ | ||
| 1447 | { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ | ||
| 1448 | { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ | ||
| 1449 | { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ | ||
| 1450 | { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ | ||
| 1451 | { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ | ||
| 1452 | { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ | ||
| 1453 | { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ | ||
| 1454 | { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ | ||
| 1455 | { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ | ||
| 1456 | { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ | ||
| 1457 | { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ | ||
| 1458 | { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */ | ||
| 1459 | { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */ | ||
| 1460 | { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */ | ||
| 1461 | { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ | ||
| 1462 | { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ | ||
| 1463 | { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ | ||
| 1464 | { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ | ||
| 1465 | { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ | ||
| 1466 | { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ | ||
| 1467 | { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ | ||
| 1468 | { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ | ||
| 1469 | { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ | ||
| 1470 | { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ | ||
| 1471 | { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ | ||
| 1472 | { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ | ||
| 1473 | { 0, } | ||
| 1474 | }; | ||
| 1475 | |||
| 1476 | MODULE_DEVICE_TABLE(pci, mlx4_pci_table); | ||
| 1477 | |||
| 1478 | static struct pci_driver mlx4_driver = { | ||
| 1479 | .name = DRV_NAME, | ||
| 1480 | .id_table = mlx4_pci_table, | ||
| 1481 | .probe = mlx4_init_one, | ||
| 1482 | .remove = __devexit_p(mlx4_remove_one) | ||
| 1483 | }; | ||
| 1484 | |||
| 1485 | static int __init mlx4_verify_params(void) | ||
| 1486 | { | ||
| 1487 | if ((log_num_mac < 0) || (log_num_mac > 7)) { | ||
| 1488 | pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); | ||
| 1489 | return -1; | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | if ((log_num_vlan < 0) || (log_num_vlan > 7)) { | ||
| 1493 | pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan); | ||
| 1494 | return -1; | ||
| 1495 | } | ||
| 1496 | |||
| 1497 | if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { | ||
| 1498 | pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); | ||
| 1499 | return -1; | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | return 0; | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | static int __init mlx4_init(void) | ||
| 1506 | { | ||
| 1507 | int ret; | ||
| 1508 | |||
| 1509 | if (mlx4_verify_params()) | ||
| 1510 | return -EINVAL; | ||
| 1511 | |||
| 1512 | mlx4_catas_init(); | ||
| 1513 | |||
| 1514 | mlx4_wq = create_singlethread_workqueue("mlx4"); | ||
| 1515 | if (!mlx4_wq) | ||
| 1516 | return -ENOMEM; | ||
| 1517 | |||
| 1518 | ret = pci_register_driver(&mlx4_driver); | ||
| 1519 | return ret < 0 ? ret : 0; | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | static void __exit mlx4_cleanup(void) | ||
| 1523 | { | ||
| 1524 | pci_unregister_driver(&mlx4_driver); | ||
| 1525 | destroy_workqueue(mlx4_wq); | ||
| 1526 | } | ||
| 1527 | |||
| 1528 | module_init(mlx4_init); | ||
| 1529 | module_exit(mlx4_cleanup); | ||
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c new file mode 100644 index 00000000000..cd1784593a3 --- /dev/null +++ b/drivers/net/mlx4/mcg.c | |||
| @@ -0,0 +1,928 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/string.h> | ||
| 35 | #include <linux/etherdevice.h> | ||
| 36 | |||
| 37 | #include <linux/mlx4/cmd.h> | ||
| 38 | |||
| 39 | #include "mlx4.h" | ||
| 40 | |||
| 41 | #define MGM_QPN_MASK 0x00FFFFFF | ||
| 42 | #define MGM_BLCK_LB_BIT 30 | ||
| 43 | |||
| 44 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ | ||
| 45 | |||
| 46 | static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, | ||
| 47 | struct mlx4_cmd_mailbox *mailbox) | ||
| 48 | { | ||
| 49 | return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, | ||
| 50 | MLX4_CMD_TIME_CLASS_A); | ||
| 51 | } | ||
| 52 | |||
| 53 | static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, | ||
| 54 | struct mlx4_cmd_mailbox *mailbox) | ||
| 55 | { | ||
| 56 | return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, | ||
| 57 | MLX4_CMD_TIME_CLASS_A); | ||
| 58 | } | ||
| 59 | |||
| 60 | static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer, | ||
| 61 | struct mlx4_cmd_mailbox *mailbox) | ||
| 62 | { | ||
| 63 | u32 in_mod; | ||
| 64 | |||
| 65 | in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1; | ||
| 66 | return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, | ||
| 67 | MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A); | ||
| 68 | } | ||
| 69 | |||
| 70 | static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 71 | u16 *hash, u8 op_mod) | ||
| 72 | { | ||
| 73 | u64 imm; | ||
| 74 | int err; | ||
| 75 | |||
| 76 | err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, | ||
| 77 | MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A); | ||
| 78 | |||
| 79 | if (!err) | ||
| 80 | *hash = imm; | ||
| 81 | |||
| 82 | return err; | ||
| 83 | } | ||
| 84 | |||
| 85 | static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, | ||
| 86 | enum mlx4_steer_type steer, | ||
| 87 | u32 qpn) | ||
| 88 | { | ||
| 89 | struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
| 90 | struct mlx4_promisc_qp *pqp; | ||
| 91 | |||
| 92 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | ||
| 93 | if (pqp->qpn == qpn) | ||
| 94 | return pqp; | ||
| 95 | } | ||
| 96 | /* not found */ | ||
| 97 | return NULL; | ||
| 98 | } | ||
| 99 | |||
| 100 | /* | ||
| 101 | * Add new entry to steering data structure. | ||
| 102 | * All promisc QPs should be added as well | ||
| 103 | */ | ||
| 104 | static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
| 105 | enum mlx4_steer_type steer, | ||
| 106 | unsigned int index, u32 qpn) | ||
| 107 | { | ||
| 108 | struct mlx4_steer *s_steer; | ||
| 109 | struct mlx4_cmd_mailbox *mailbox; | ||
| 110 | struct mlx4_mgm *mgm; | ||
| 111 | u32 members_count; | ||
| 112 | struct mlx4_steer_index *new_entry; | ||
| 113 | struct mlx4_promisc_qp *pqp; | ||
| 114 | struct mlx4_promisc_qp *dqp = NULL; | ||
| 115 | u32 prot; | ||
| 116 | int err; | ||
| 117 | u8 pf_num; | ||
| 118 | |||
| 119 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
| 120 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
| 121 | new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); | ||
| 122 | if (!new_entry) | ||
| 123 | return -ENOMEM; | ||
| 124 | |||
| 125 | INIT_LIST_HEAD(&new_entry->duplicates); | ||
| 126 | new_entry->index = index; | ||
| 127 | list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); | ||
| 128 | |||
| 129 | /* If the given qpn is also a promisc qp, | ||
| 130 | * it should be inserted to duplicates list | ||
| 131 | */ | ||
| 132 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
| 133 | if (pqp) { | ||
| 134 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
| 135 | if (!dqp) { | ||
| 136 | err = -ENOMEM; | ||
| 137 | goto out_alloc; | ||
| 138 | } | ||
| 139 | dqp->qpn = qpn; | ||
| 140 | list_add_tail(&dqp->list, &new_entry->duplicates); | ||
| 141 | } | ||
| 142 | |||
| 143 | /* if no promisc qps for this vep, we are done */ | ||
| 144 | if (list_empty(&s_steer->promisc_qps[steer])) | ||
| 145 | return 0; | ||
| 146 | |||
| 147 | /* now need to add all the promisc qps to the new | ||
| 148 | * steering entry, as they should also receive the packets | ||
| 149 | * destined to this address */ | ||
| 150 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 151 | if (IS_ERR(mailbox)) { | ||
| 152 | err = -ENOMEM; | ||
| 153 | goto out_alloc; | ||
| 154 | } | ||
| 155 | mgm = mailbox->buf; | ||
| 156 | |||
| 157 | err = mlx4_READ_ENTRY(dev, index, mailbox); | ||
| 158 | if (err) | ||
| 159 | goto out_mailbox; | ||
| 160 | |||
| 161 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
| 162 | prot = be32_to_cpu(mgm->members_count) >> 30; | ||
| 163 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | ||
| 164 | /* don't add already existing qpn */ | ||
| 165 | if (pqp->qpn == qpn) | ||
| 166 | continue; | ||
| 167 | if (members_count == MLX4_QP_PER_MGM) { | ||
| 168 | /* out of space */ | ||
| 169 | err = -ENOMEM; | ||
| 170 | goto out_mailbox; | ||
| 171 | } | ||
| 172 | |||
| 173 | /* add the qpn */ | ||
| 174 | mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); | ||
| 175 | } | ||
| 176 | /* update the qps count and update the entry with all the promisc qps*/ | ||
| 177 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | ||
| 178 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
| 179 | |||
| 180 | out_mailbox: | ||
| 181 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 182 | if (!err) | ||
| 183 | return 0; | ||
| 184 | out_alloc: | ||
| 185 | if (dqp) { | ||
| 186 | list_del(&dqp->list); | ||
| 187 | kfree(dqp); | ||
| 188 | } | ||
| 189 | list_del(&new_entry->list); | ||
| 190 | kfree(new_entry); | ||
| 191 | return err; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* update the data structures with existing steering entry */ | ||
| 195 | static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
| 196 | enum mlx4_steer_type steer, | ||
| 197 | unsigned int index, u32 qpn) | ||
| 198 | { | ||
| 199 | struct mlx4_steer *s_steer; | ||
| 200 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | ||
| 201 | struct mlx4_promisc_qp *pqp; | ||
| 202 | struct mlx4_promisc_qp *dqp; | ||
| 203 | u8 pf_num; | ||
| 204 | |||
| 205 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
| 206 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
| 207 | |||
| 208 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
| 209 | if (!pqp) | ||
| 210 | return 0; /* nothing to do */ | ||
| 211 | |||
| 212 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | ||
| 213 | if (tmp_entry->index == index) { | ||
| 214 | entry = tmp_entry; | ||
| 215 | break; | ||
| 216 | } | ||
| 217 | } | ||
| 218 | if (unlikely(!entry)) { | ||
| 219 | mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); | ||
| 220 | return -EINVAL; | ||
| 221 | } | ||
| 222 | |||
| 223 | /* the given qpn is listed as a promisc qpn | ||
| 224 | * we need to add it as a duplicate to this entry | ||
| 225 | * for future references */ | ||
| 226 | list_for_each_entry(dqp, &entry->duplicates, list) { | ||
| 227 | if (qpn == dqp->qpn) | ||
| 228 | return 0; /* qp is already duplicated */ | ||
| 229 | } | ||
| 230 | |||
| 231 | /* add the qp as a duplicate on this index */ | ||
| 232 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
| 233 | if (!dqp) | ||
| 234 | return -ENOMEM; | ||
| 235 | dqp->qpn = qpn; | ||
| 236 | list_add_tail(&dqp->list, &entry->duplicates); | ||
| 237 | |||
| 238 | return 0; | ||
| 239 | } | ||
| 240 | |||
| 241 | /* Check whether a qpn is a duplicate on steering entry | ||
| 242 | * If so, it should not be removed from mgm */ | ||
| 243 | static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
| 244 | enum mlx4_steer_type steer, | ||
| 245 | unsigned int index, u32 qpn) | ||
| 246 | { | ||
| 247 | struct mlx4_steer *s_steer; | ||
| 248 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | ||
| 249 | struct mlx4_promisc_qp *dqp, *tmp_dqp; | ||
| 250 | u8 pf_num; | ||
| 251 | |||
| 252 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
| 253 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
| 254 | |||
| 255 | /* if qp is not promisc, it cannot be duplicated */ | ||
| 256 | if (!get_promisc_qp(dev, pf_num, steer, qpn)) | ||
| 257 | return false; | ||
| 258 | |||
| 259 | /* The qp is promisc qp so it is a duplicate on this index | ||
| 260 | * Find the index entry, and remove the duplicate */ | ||
| 261 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | ||
| 262 | if (tmp_entry->index == index) { | ||
| 263 | entry = tmp_entry; | ||
| 264 | break; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | if (unlikely(!entry)) { | ||
| 268 | mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); | ||
| 269 | return false; | ||
| 270 | } | ||
| 271 | list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { | ||
| 272 | if (dqp->qpn == qpn) { | ||
| 273 | list_del(&dqp->list); | ||
| 274 | kfree(dqp); | ||
| 275 | } | ||
| 276 | } | ||
| 277 | return true; | ||
| 278 | } | ||
| 279 | |||
| 280 | /* I a steering entry contains only promisc QPs, it can be removed. */ | ||
| 281 | static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
| 282 | enum mlx4_steer_type steer, | ||
| 283 | unsigned int index, u32 tqpn) | ||
| 284 | { | ||
| 285 | struct mlx4_steer *s_steer; | ||
| 286 | struct mlx4_cmd_mailbox *mailbox; | ||
| 287 | struct mlx4_mgm *mgm; | ||
| 288 | struct mlx4_steer_index *entry = NULL, *tmp_entry; | ||
| 289 | u32 qpn; | ||
| 290 | u32 members_count; | ||
| 291 | bool ret = false; | ||
| 292 | int i; | ||
| 293 | u8 pf_num; | ||
| 294 | |||
| 295 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
| 296 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
| 297 | |||
| 298 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 299 | if (IS_ERR(mailbox)) | ||
| 300 | return false; | ||
| 301 | mgm = mailbox->buf; | ||
| 302 | |||
| 303 | if (mlx4_READ_ENTRY(dev, index, mailbox)) | ||
| 304 | goto out; | ||
| 305 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
| 306 | for (i = 0; i < members_count; i++) { | ||
| 307 | qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; | ||
| 308 | if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) { | ||
| 309 | /* the qp is not promisc, the entry can't be removed */ | ||
| 310 | goto out; | ||
| 311 | } | ||
| 312 | } | ||
| 313 | /* All the qps currently registered for this entry are promiscuous, | ||
| 314 | * Checking for duplicates */ | ||
| 315 | ret = true; | ||
| 316 | list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { | ||
| 317 | if (entry->index == index) { | ||
| 318 | if (list_empty(&entry->duplicates)) { | ||
| 319 | list_del(&entry->list); | ||
| 320 | kfree(entry); | ||
| 321 | } else { | ||
| 322 | /* This entry contains duplicates so it shouldn't be removed */ | ||
| 323 | ret = false; | ||
| 324 | goto out; | ||
| 325 | } | ||
| 326 | } | ||
| 327 | } | ||
| 328 | |||
| 329 | out: | ||
| 330 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 331 | return ret; | ||
| 332 | } | ||
| 333 | |||
| 334 | static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
| 335 | enum mlx4_steer_type steer, u32 qpn) | ||
| 336 | { | ||
| 337 | struct mlx4_steer *s_steer; | ||
| 338 | struct mlx4_cmd_mailbox *mailbox; | ||
| 339 | struct mlx4_mgm *mgm; | ||
| 340 | struct mlx4_steer_index *entry; | ||
| 341 | struct mlx4_promisc_qp *pqp; | ||
| 342 | struct mlx4_promisc_qp *dqp; | ||
| 343 | u32 members_count; | ||
| 344 | u32 prot; | ||
| 345 | int i; | ||
| 346 | bool found; | ||
| 347 | int last_index; | ||
| 348 | int err; | ||
| 349 | u8 pf_num; | ||
| 350 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 351 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
| 352 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
| 353 | |||
| 354 | mutex_lock(&priv->mcg_table.mutex); | ||
| 355 | |||
| 356 | if (get_promisc_qp(dev, pf_num, steer, qpn)) { | ||
| 357 | err = 0; /* Noting to do, already exists */ | ||
| 358 | goto out_mutex; | ||
| 359 | } | ||
| 360 | |||
| 361 | pqp = kmalloc(sizeof *pqp, GFP_KERNEL); | ||
| 362 | if (!pqp) { | ||
| 363 | err = -ENOMEM; | ||
| 364 | goto out_mutex; | ||
| 365 | } | ||
| 366 | pqp->qpn = qpn; | ||
| 367 | |||
| 368 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 369 | if (IS_ERR(mailbox)) { | ||
| 370 | err = -ENOMEM; | ||
| 371 | goto out_alloc; | ||
| 372 | } | ||
| 373 | mgm = mailbox->buf; | ||
| 374 | |||
| 375 | /* the promisc qp needs to be added for each one of the steering | ||
| 376 | * entries, if it already exists, needs to be added as a duplicate | ||
| 377 | * for this entry */ | ||
| 378 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | ||
| 379 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | ||
| 380 | if (err) | ||
| 381 | goto out_mailbox; | ||
| 382 | |||
| 383 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
| 384 | prot = be32_to_cpu(mgm->members_count) >> 30; | ||
| 385 | found = false; | ||
| 386 | for (i = 0; i < members_count; i++) { | ||
| 387 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { | ||
| 388 | /* Entry already exists, add to duplicates */ | ||
| 389 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
| 390 | if (!dqp) | ||
| 391 | goto out_mailbox; | ||
| 392 | dqp->qpn = qpn; | ||
| 393 | list_add_tail(&dqp->list, &entry->duplicates); | ||
| 394 | found = true; | ||
| 395 | } | ||
| 396 | } | ||
| 397 | if (!found) { | ||
| 398 | /* Need to add the qpn to mgm */ | ||
| 399 | if (members_count == MLX4_QP_PER_MGM) { | ||
| 400 | /* entry is full */ | ||
| 401 | err = -ENOMEM; | ||
| 402 | goto out_mailbox; | ||
| 403 | } | ||
| 404 | mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); | ||
| 405 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | ||
| 406 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | ||
| 407 | if (err) | ||
| 408 | goto out_mailbox; | ||
| 409 | } | ||
| 410 | last_index = entry->index; | ||
| 411 | } | ||
| 412 | |||
| 413 | /* add the new qpn to list of promisc qps */ | ||
| 414 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | ||
| 415 | /* now need to add all the promisc qps to default entry */ | ||
| 416 | memset(mgm, 0, sizeof *mgm); | ||
| 417 | members_count = 0; | ||
| 418 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | ||
| 419 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | ||
| 420 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | ||
| 421 | |||
| 422 | err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); | ||
| 423 | if (err) | ||
| 424 | goto out_list; | ||
| 425 | |||
| 426 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 427 | mutex_unlock(&priv->mcg_table.mutex); | ||
| 428 | return 0; | ||
| 429 | |||
| 430 | out_list: | ||
| 431 | list_del(&pqp->list); | ||
| 432 | out_mailbox: | ||
| 433 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 434 | out_alloc: | ||
| 435 | kfree(pqp); | ||
| 436 | out_mutex: | ||
| 437 | mutex_unlock(&priv->mcg_table.mutex); | ||
| 438 | return err; | ||
| 439 | } | ||
| 440 | |||
| 441 | static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
| 442 | enum mlx4_steer_type steer, u32 qpn) | ||
| 443 | { | ||
| 444 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 445 | struct mlx4_steer *s_steer; | ||
| 446 | struct mlx4_cmd_mailbox *mailbox; | ||
| 447 | struct mlx4_mgm *mgm; | ||
| 448 | struct mlx4_steer_index *entry; | ||
| 449 | struct mlx4_promisc_qp *pqp; | ||
| 450 | struct mlx4_promisc_qp *dqp; | ||
| 451 | u32 members_count; | ||
| 452 | bool found; | ||
| 453 | bool back_to_list = false; | ||
| 454 | int loc, i; | ||
| 455 | int err; | ||
| 456 | u8 pf_num; | ||
| 457 | |||
| 458 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
| 459 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
| 460 | mutex_lock(&priv->mcg_table.mutex); | ||
| 461 | |||
| 462 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
| 463 | if (unlikely(!pqp)) { | ||
| 464 | mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); | ||
| 465 | /* nothing to do */ | ||
| 466 | err = 0; | ||
| 467 | goto out_mutex; | ||
| 468 | } | ||
| 469 | |||
| 470 | /*remove from list of promisc qps */ | ||
| 471 | list_del(&pqp->list); | ||
| 472 | |||
| 473 | /* set the default entry not to include the removed one */ | ||
| 474 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 475 | if (IS_ERR(mailbox)) { | ||
| 476 | err = -ENOMEM; | ||
| 477 | back_to_list = true; | ||
| 478 | goto out_list; | ||
| 479 | } | ||
| 480 | mgm = mailbox->buf; | ||
| 481 | members_count = 0; | ||
| 482 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | ||
| 483 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | ||
| 484 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | ||
| 485 | |||
| 486 | err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); | ||
| 487 | if (err) | ||
| 488 | goto out_mailbox; | ||
| 489 | |||
| 490 | /* remove the qp from all the steering entries*/ | ||
| 491 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | ||
| 492 | found = false; | ||
| 493 | list_for_each_entry(dqp, &entry->duplicates, list) { | ||
| 494 | if (dqp->qpn == qpn) { | ||
| 495 | found = true; | ||
| 496 | break; | ||
| 497 | } | ||
| 498 | } | ||
| 499 | if (found) { | ||
| 500 | /* a duplicate, no need to change the mgm, | ||
| 501 | * only update the duplicates list */ | ||
| 502 | list_del(&dqp->list); | ||
| 503 | kfree(dqp); | ||
| 504 | } else { | ||
| 505 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | ||
| 506 | if (err) | ||
| 507 | goto out_mailbox; | ||
| 508 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
| 509 | for (loc = -1, i = 0; i < members_count; ++i) | ||
| 510 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) | ||
| 511 | loc = i; | ||
| 512 | |||
| 513 | mgm->members_count = cpu_to_be32(--members_count | | ||
| 514 | (MLX4_PROT_ETH << 30)); | ||
| 515 | mgm->qp[loc] = mgm->qp[i - 1]; | ||
| 516 | mgm->qp[i - 1] = 0; | ||
| 517 | |||
| 518 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | ||
| 519 | if (err) | ||
| 520 | goto out_mailbox; | ||
| 521 | } | ||
| 522 | |||
| 523 | } | ||
| 524 | |||
| 525 | out_mailbox: | ||
| 526 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 527 | out_list: | ||
| 528 | if (back_to_list) | ||
| 529 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | ||
| 530 | else | ||
| 531 | kfree(pqp); | ||
| 532 | out_mutex: | ||
| 533 | mutex_unlock(&priv->mcg_table.mutex); | ||
| 534 | return err; | ||
| 535 | } | ||
| 536 | |||
| 537 | /* | ||
| 538 | * Caller must hold MCG table semaphore. gid and mgm parameters must | ||
| 539 | * be properly aligned for command interface. | ||
| 540 | * | ||
| 541 | * Returns 0 unless a firmware command error occurs. | ||
| 542 | * | ||
| 543 | * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 | ||
| 544 | * and *mgm holds MGM entry. | ||
| 545 | * | ||
| 546 | * if GID is found in AMGM, *index = index in AMGM, *prev = index of | ||
| 547 | * previous entry in hash chain and *mgm holds AMGM entry. | ||
| 548 | * | ||
| 549 | * If no AMGM exists for given gid, *index = -1, *prev = index of last | ||
| 550 | * entry in hash chain and *mgm holds end of hash chain. | ||
| 551 | */ | ||
| 552 | static int find_entry(struct mlx4_dev *dev, u8 port, | ||
| 553 | u8 *gid, enum mlx4_protocol prot, | ||
| 554 | enum mlx4_steer_type steer, | ||
| 555 | struct mlx4_cmd_mailbox *mgm_mailbox, | ||
| 556 | u16 *hash, int *prev, int *index) | ||
| 557 | { | ||
| 558 | struct mlx4_cmd_mailbox *mailbox; | ||
| 559 | struct mlx4_mgm *mgm = mgm_mailbox->buf; | ||
| 560 | u8 *mgid; | ||
| 561 | int err; | ||
| 562 | u8 op_mod = (prot == MLX4_PROT_ETH) ? | ||
| 563 | !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; | ||
| 564 | |||
| 565 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 566 | if (IS_ERR(mailbox)) | ||
| 567 | return -ENOMEM; | ||
| 568 | mgid = mailbox->buf; | ||
| 569 | |||
| 570 | memcpy(mgid, gid, 16); | ||
| 571 | |||
| 572 | err = mlx4_GID_HASH(dev, mailbox, hash, op_mod); | ||
| 573 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 574 | if (err) | ||
| 575 | return err; | ||
| 576 | |||
| 577 | if (0) | ||
| 578 | mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); | ||
| 579 | |||
| 580 | *index = *hash; | ||
| 581 | *prev = -1; | ||
| 582 | |||
| 583 | do { | ||
| 584 | err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); | ||
| 585 | if (err) | ||
| 586 | return err; | ||
| 587 | |||
| 588 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { | ||
| 589 | if (*index != *hash) { | ||
| 590 | mlx4_err(dev, "Found zero MGID in AMGM.\n"); | ||
| 591 | err = -EINVAL; | ||
| 592 | } | ||
| 593 | return err; | ||
| 594 | } | ||
| 595 | |||
| 596 | if (!memcmp(mgm->gid, gid, 16) && | ||
| 597 | be32_to_cpu(mgm->members_count) >> 30 == prot) | ||
| 598 | return err; | ||
| 599 | |||
| 600 | *prev = *index; | ||
| 601 | *index = be32_to_cpu(mgm->next_gid_index) >> 6; | ||
| 602 | } while (*index); | ||
| 603 | |||
| 604 | *index = -1; | ||
| 605 | return err; | ||
| 606 | } | ||
| 607 | |||
| 608 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 609 | int block_mcast_loopback, enum mlx4_protocol prot, | ||
| 610 | enum mlx4_steer_type steer) | ||
| 611 | { | ||
| 612 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 613 | struct mlx4_cmd_mailbox *mailbox; | ||
| 614 | struct mlx4_mgm *mgm; | ||
| 615 | u32 members_count; | ||
| 616 | u16 hash; | ||
| 617 | int index, prev; | ||
| 618 | int link = 0; | ||
| 619 | int i; | ||
| 620 | int err; | ||
| 621 | u8 port = gid[5]; | ||
| 622 | u8 new_entry = 0; | ||
| 623 | |||
| 624 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 625 | if (IS_ERR(mailbox)) | ||
| 626 | return PTR_ERR(mailbox); | ||
| 627 | mgm = mailbox->buf; | ||
| 628 | |||
| 629 | mutex_lock(&priv->mcg_table.mutex); | ||
| 630 | err = find_entry(dev, port, gid, prot, steer, | ||
| 631 | mailbox, &hash, &prev, &index); | ||
| 632 | if (err) | ||
| 633 | goto out; | ||
| 634 | |||
| 635 | if (index != -1) { | ||
| 636 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { | ||
| 637 | new_entry = 1; | ||
| 638 | memcpy(mgm->gid, gid, 16); | ||
| 639 | } | ||
| 640 | } else { | ||
| 641 | link = 1; | ||
| 642 | |||
| 643 | index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); | ||
| 644 | if (index == -1) { | ||
| 645 | mlx4_err(dev, "No AMGM entries left\n"); | ||
| 646 | err = -ENOMEM; | ||
| 647 | goto out; | ||
| 648 | } | ||
| 649 | index += dev->caps.num_mgms; | ||
| 650 | |||
| 651 | memset(mgm, 0, sizeof *mgm); | ||
| 652 | memcpy(mgm->gid, gid, 16); | ||
| 653 | } | ||
| 654 | |||
| 655 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
| 656 | if (members_count == MLX4_QP_PER_MGM) { | ||
| 657 | mlx4_err(dev, "MGM at index %x is full.\n", index); | ||
| 658 | err = -ENOMEM; | ||
| 659 | goto out; | ||
| 660 | } | ||
| 661 | |||
| 662 | for (i = 0; i < members_count; ++i) | ||
| 663 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { | ||
| 664 | mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); | ||
| 665 | err = 0; | ||
| 666 | goto out; | ||
| 667 | } | ||
| 668 | |||
| 669 | if (block_mcast_loopback) | ||
| 670 | mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | | ||
| 671 | (1U << MGM_BLCK_LB_BIT)); | ||
| 672 | else | ||
| 673 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); | ||
| 674 | |||
| 675 | mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); | ||
| 676 | |||
| 677 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
| 678 | if (err) | ||
| 679 | goto out; | ||
| 680 | |||
| 681 | if (!link) | ||
| 682 | goto out; | ||
| 683 | |||
| 684 | err = mlx4_READ_ENTRY(dev, prev, mailbox); | ||
| 685 | if (err) | ||
| 686 | goto out; | ||
| 687 | |||
| 688 | mgm->next_gid_index = cpu_to_be32(index << 6); | ||
| 689 | |||
| 690 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); | ||
| 691 | if (err) | ||
| 692 | goto out; | ||
| 693 | |||
| 694 | out: | ||
| 695 | if (prot == MLX4_PROT_ETH) { | ||
| 696 | /* manage the steering entry for promisc mode */ | ||
| 697 | if (new_entry) | ||
| 698 | new_steering_entry(dev, 0, port, steer, index, qp->qpn); | ||
| 699 | else | ||
| 700 | existing_steering_entry(dev, 0, port, steer, | ||
| 701 | index, qp->qpn); | ||
| 702 | } | ||
| 703 | if (err && link && index != -1) { | ||
| 704 | if (index < dev->caps.num_mgms) | ||
| 705 | mlx4_warn(dev, "Got AMGM index %d < %d", | ||
| 706 | index, dev->caps.num_mgms); | ||
| 707 | else | ||
| 708 | mlx4_bitmap_free(&priv->mcg_table.bitmap, | ||
| 709 | index - dev->caps.num_mgms); | ||
| 710 | } | ||
| 711 | mutex_unlock(&priv->mcg_table.mutex); | ||
| 712 | |||
| 713 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 714 | return err; | ||
| 715 | } | ||
| 716 | |||
| 717 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 718 | enum mlx4_protocol prot, enum mlx4_steer_type steer) | ||
| 719 | { | ||
| 720 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 721 | struct mlx4_cmd_mailbox *mailbox; | ||
| 722 | struct mlx4_mgm *mgm; | ||
| 723 | u32 members_count; | ||
| 724 | u16 hash; | ||
| 725 | int prev, index; | ||
| 726 | int i, loc; | ||
| 727 | int err; | ||
| 728 | u8 port = gid[5]; | ||
| 729 | bool removed_entry = false; | ||
| 730 | |||
| 731 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 732 | if (IS_ERR(mailbox)) | ||
| 733 | return PTR_ERR(mailbox); | ||
| 734 | mgm = mailbox->buf; | ||
| 735 | |||
| 736 | mutex_lock(&priv->mcg_table.mutex); | ||
| 737 | |||
| 738 | err = find_entry(dev, port, gid, prot, steer, | ||
| 739 | mailbox, &hash, &prev, &index); | ||
| 740 | if (err) | ||
| 741 | goto out; | ||
| 742 | |||
| 743 | if (index == -1) { | ||
| 744 | mlx4_err(dev, "MGID %pI6 not found\n", gid); | ||
| 745 | err = -EINVAL; | ||
| 746 | goto out; | ||
| 747 | } | ||
| 748 | |||
| 749 | /* if this pq is also a promisc qp, it shouldn't be removed */ | ||
| 750 | if (prot == MLX4_PROT_ETH && | ||
| 751 | check_duplicate_entry(dev, 0, port, steer, index, qp->qpn)) | ||
| 752 | goto out; | ||
| 753 | |||
| 754 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
| 755 | for (loc = -1, i = 0; i < members_count; ++i) | ||
| 756 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) | ||
| 757 | loc = i; | ||
| 758 | |||
| 759 | if (loc == -1) { | ||
| 760 | mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); | ||
| 761 | err = -EINVAL; | ||
| 762 | goto out; | ||
| 763 | } | ||
| 764 | |||
| 765 | |||
| 766 | mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); | ||
| 767 | mgm->qp[loc] = mgm->qp[i - 1]; | ||
| 768 | mgm->qp[i - 1] = 0; | ||
| 769 | |||
| 770 | if (prot == MLX4_PROT_ETH) | ||
| 771 | removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn); | ||
| 772 | if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { | ||
| 773 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
| 774 | goto out; | ||
| 775 | } | ||
| 776 | |||
| 777 | /* We are going to delete the entry, members count should be 0 */ | ||
| 778 | mgm->members_count = cpu_to_be32((u32) prot << 30); | ||
| 779 | |||
| 780 | if (prev == -1) { | ||
| 781 | /* Remove entry from MGM */ | ||
| 782 | int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; | ||
| 783 | if (amgm_index) { | ||
| 784 | err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); | ||
| 785 | if (err) | ||
| 786 | goto out; | ||
| 787 | } else | ||
| 788 | memset(mgm->gid, 0, 16); | ||
| 789 | |||
| 790 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
| 791 | if (err) | ||
| 792 | goto out; | ||
| 793 | |||
| 794 | if (amgm_index) { | ||
| 795 | if (amgm_index < dev->caps.num_mgms) | ||
| 796 | mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", | ||
| 797 | index, amgm_index, dev->caps.num_mgms); | ||
| 798 | else | ||
| 799 | mlx4_bitmap_free(&priv->mcg_table.bitmap, | ||
| 800 | amgm_index - dev->caps.num_mgms); | ||
| 801 | } | ||
| 802 | } else { | ||
| 803 | /* Remove entry from AMGM */ | ||
| 804 | int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; | ||
| 805 | err = mlx4_READ_ENTRY(dev, prev, mailbox); | ||
| 806 | if (err) | ||
| 807 | goto out; | ||
| 808 | |||
| 809 | mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); | ||
| 810 | |||
| 811 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); | ||
| 812 | if (err) | ||
| 813 | goto out; | ||
| 814 | |||
| 815 | if (index < dev->caps.num_mgms) | ||
| 816 | mlx4_warn(dev, "entry %d had next AMGM index %d < %d", | ||
| 817 | prev, index, dev->caps.num_mgms); | ||
| 818 | else | ||
| 819 | mlx4_bitmap_free(&priv->mcg_table.bitmap, | ||
| 820 | index - dev->caps.num_mgms); | ||
| 821 | } | ||
| 822 | |||
| 823 | out: | ||
| 824 | mutex_unlock(&priv->mcg_table.mutex); | ||
| 825 | |||
| 826 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 827 | return err; | ||
| 828 | } | ||
| 829 | |||
| 830 | |||
| 831 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 832 | int block_mcast_loopback, enum mlx4_protocol prot) | ||
| 833 | { | ||
| 834 | enum mlx4_steer_type steer; | ||
| 835 | |||
| 836 | steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; | ||
| 837 | |||
| 838 | if (prot == MLX4_PROT_ETH && | ||
| 839 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) | ||
| 840 | return 0; | ||
| 841 | |||
| 842 | if (prot == MLX4_PROT_ETH) | ||
| 843 | gid[7] |= (steer << 1); | ||
| 844 | |||
| 845 | return mlx4_qp_attach_common(dev, qp, gid, | ||
| 846 | block_mcast_loopback, prot, | ||
| 847 | steer); | ||
| 848 | } | ||
| 849 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); | ||
| 850 | |||
| 851 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 852 | enum mlx4_protocol prot) | ||
| 853 | { | ||
| 854 | enum mlx4_steer_type steer; | ||
| 855 | |||
| 856 | steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; | ||
| 857 | |||
| 858 | if (prot == MLX4_PROT_ETH && | ||
| 859 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) | ||
| 860 | return 0; | ||
| 861 | |||
| 862 | if (prot == MLX4_PROT_ETH) { | ||
| 863 | gid[7] |= (steer << 1); | ||
| 864 | } | ||
| 865 | |||
| 866 | return mlx4_qp_detach_common(dev, qp, gid, prot, steer); | ||
| 867 | } | ||
| 868 | EXPORT_SYMBOL_GPL(mlx4_multicast_detach); | ||
| 869 | |||
| 870 | |||
| 871 | int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
| 872 | { | ||
| 873 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) | ||
| 874 | return 0; | ||
| 875 | |||
| 876 | |||
| 877 | return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); | ||
| 878 | } | ||
| 879 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); | ||
| 880 | |||
| 881 | int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
| 882 | { | ||
| 883 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) | ||
| 884 | return 0; | ||
| 885 | |||
| 886 | |||
| 887 | return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); | ||
| 888 | } | ||
| 889 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); | ||
| 890 | |||
| 891 | int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
| 892 | { | ||
| 893 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) | ||
| 894 | return 0; | ||
| 895 | |||
| 896 | |||
| 897 | return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); | ||
| 898 | } | ||
| 899 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); | ||
| 900 | |||
| 901 | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
| 902 | { | ||
| 903 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) | ||
| 904 | return 0; | ||
| 905 | |||
| 906 | return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); | ||
| 907 | } | ||
| 908 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); | ||
| 909 | |||
| 910 | int mlx4_init_mcg_table(struct mlx4_dev *dev) | ||
| 911 | { | ||
| 912 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 913 | int err; | ||
| 914 | |||
| 915 | err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, | ||
| 916 | dev->caps.num_amgms - 1, 0, 0); | ||
| 917 | if (err) | ||
| 918 | return err; | ||
| 919 | |||
| 920 | mutex_init(&priv->mcg_table.mutex); | ||
| 921 | |||
| 922 | return 0; | ||
| 923 | } | ||
| 924 | |||
| 925 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) | ||
| 926 | { | ||
| 927 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); | ||
| 928 | } | ||
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h new file mode 100644 index 00000000000..a2fcd8402d3 --- /dev/null +++ b/drivers/net/mlx4/mlx4.h | |||
| @@ -0,0 +1,459 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | ||
| 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. | ||
| 5 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | ||
| 7 | * | ||
| 8 | * This software is available to you under a choice of one of two | ||
| 9 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 10 | * General Public License (GPL) Version 2, available from the file | ||
| 11 | * COPYING in the main directory of this source tree, or the | ||
| 12 | * OpenIB.org BSD license below: | ||
| 13 | * | ||
| 14 | * Redistribution and use in source and binary forms, with or | ||
| 15 | * without modification, are permitted provided that the following | ||
| 16 | * conditions are met: | ||
| 17 | * | ||
| 18 | * - Redistributions of source code must retain the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer. | ||
| 21 | * | ||
| 22 | * - Redistributions in binary form must reproduce the above | ||
| 23 | * copyright notice, this list of conditions and the following | ||
| 24 | * disclaimer in the documentation and/or other materials | ||
| 25 | * provided with the distribution. | ||
| 26 | * | ||
| 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 34 | * SOFTWARE. | ||
| 35 | */ | ||
| 36 | |||
| 37 | #ifndef MLX4_H | ||
| 38 | #define MLX4_H | ||
| 39 | |||
| 40 | #include <linux/mutex.h> | ||
| 41 | #include <linux/radix-tree.h> | ||
| 42 | #include <linux/timer.h> | ||
| 43 | #include <linux/semaphore.h> | ||
| 44 | #include <linux/workqueue.h> | ||
| 45 | |||
| 46 | #include <linux/mlx4/device.h> | ||
| 47 | #include <linux/mlx4/driver.h> | ||
| 48 | #include <linux/mlx4/doorbell.h> | ||
| 49 | |||
| 50 | #define DRV_NAME "mlx4_core" | ||
| 51 | #define DRV_VERSION "1.0" | ||
| 52 | #define DRV_RELDATE "July 14, 2011" | ||
| 53 | |||
| 54 | enum { | ||
| 55 | MLX4_HCR_BASE = 0x80680, | ||
| 56 | MLX4_HCR_SIZE = 0x0001c, | ||
| 57 | MLX4_CLR_INT_SIZE = 0x00008 | ||
| 58 | }; | ||
| 59 | |||
| 60 | enum { | ||
| 61 | MLX4_MGM_ENTRY_SIZE = 0x100, | ||
| 62 | MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2), | ||
| 63 | MLX4_MTT_ENTRY_PER_SEG = 8 | ||
| 64 | }; | ||
| 65 | |||
| 66 | enum { | ||
| 67 | MLX4_NUM_PDS = 1 << 15 | ||
| 68 | }; | ||
| 69 | |||
| 70 | enum { | ||
| 71 | MLX4_CMPT_TYPE_QP = 0, | ||
| 72 | MLX4_CMPT_TYPE_SRQ = 1, | ||
| 73 | MLX4_CMPT_TYPE_CQ = 2, | ||
| 74 | MLX4_CMPT_TYPE_EQ = 3, | ||
| 75 | MLX4_CMPT_NUM_TYPE | ||
| 76 | }; | ||
| 77 | |||
| 78 | enum { | ||
| 79 | MLX4_CMPT_SHIFT = 24, | ||
| 80 | MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT | ||
| 81 | }; | ||
| 82 | |||
| 83 | #ifdef CONFIG_MLX4_DEBUG | ||
| 84 | extern int mlx4_debug_level; | ||
| 85 | #else /* CONFIG_MLX4_DEBUG */ | ||
| 86 | #define mlx4_debug_level (0) | ||
| 87 | #endif /* CONFIG_MLX4_DEBUG */ | ||
| 88 | |||
| 89 | #define mlx4_dbg(mdev, format, arg...) \ | ||
| 90 | do { \ | ||
| 91 | if (mlx4_debug_level) \ | ||
| 92 | dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ | ||
| 93 | } while (0) | ||
| 94 | |||
| 95 | #define mlx4_err(mdev, format, arg...) \ | ||
| 96 | dev_err(&mdev->pdev->dev, format, ##arg) | ||
| 97 | #define mlx4_info(mdev, format, arg...) \ | ||
| 98 | dev_info(&mdev->pdev->dev, format, ##arg) | ||
| 99 | #define mlx4_warn(mdev, format, arg...) \ | ||
| 100 | dev_warn(&mdev->pdev->dev, format, ##arg) | ||
| 101 | |||
| 102 | struct mlx4_bitmap { | ||
| 103 | u32 last; | ||
| 104 | u32 top; | ||
| 105 | u32 max; | ||
| 106 | u32 reserved_top; | ||
| 107 | u32 mask; | ||
| 108 | u32 avail; | ||
| 109 | spinlock_t lock; | ||
| 110 | unsigned long *table; | ||
| 111 | }; | ||
| 112 | |||
| 113 | struct mlx4_buddy { | ||
| 114 | unsigned long **bits; | ||
| 115 | unsigned int *num_free; | ||
| 116 | int max_order; | ||
| 117 | spinlock_t lock; | ||
| 118 | }; | ||
| 119 | |||
| 120 | struct mlx4_icm; | ||
| 121 | |||
| 122 | struct mlx4_icm_table { | ||
| 123 | u64 virt; | ||
| 124 | int num_icm; | ||
| 125 | int num_obj; | ||
| 126 | int obj_size; | ||
| 127 | int lowmem; | ||
| 128 | int coherent; | ||
| 129 | struct mutex mutex; | ||
| 130 | struct mlx4_icm **icm; | ||
| 131 | }; | ||
| 132 | |||
| 133 | struct mlx4_eq { | ||
| 134 | struct mlx4_dev *dev; | ||
| 135 | void __iomem *doorbell; | ||
| 136 | int eqn; | ||
| 137 | u32 cons_index; | ||
| 138 | u16 irq; | ||
| 139 | u16 have_irq; | ||
| 140 | int nent; | ||
| 141 | struct mlx4_buf_list *page_list; | ||
| 142 | struct mlx4_mtt mtt; | ||
| 143 | }; | ||
| 144 | |||
| 145 | struct mlx4_profile { | ||
| 146 | int num_qp; | ||
| 147 | int rdmarc_per_qp; | ||
| 148 | int num_srq; | ||
| 149 | int num_cq; | ||
| 150 | int num_mcg; | ||
| 151 | int num_mpt; | ||
| 152 | int num_mtt; | ||
| 153 | }; | ||
| 154 | |||
| 155 | struct mlx4_fw { | ||
| 156 | u64 clr_int_base; | ||
| 157 | u64 catas_offset; | ||
| 158 | struct mlx4_icm *fw_icm; | ||
| 159 | struct mlx4_icm *aux_icm; | ||
| 160 | u32 catas_size; | ||
| 161 | u16 fw_pages; | ||
| 162 | u8 clr_int_bar; | ||
| 163 | u8 catas_bar; | ||
| 164 | }; | ||
| 165 | |||
| 166 | #define MGM_QPN_MASK 0x00FFFFFF | ||
| 167 | #define MGM_BLCK_LB_BIT 30 | ||
| 168 | |||
| 169 | struct mlx4_promisc_qp { | ||
| 170 | struct list_head list; | ||
| 171 | u32 qpn; | ||
| 172 | }; | ||
| 173 | |||
| 174 | struct mlx4_steer_index { | ||
| 175 | struct list_head list; | ||
| 176 | unsigned int index; | ||
| 177 | struct list_head duplicates; | ||
| 178 | }; | ||
| 179 | |||
| 180 | struct mlx4_mgm { | ||
| 181 | __be32 next_gid_index; | ||
| 182 | __be32 members_count; | ||
| 183 | u32 reserved[2]; | ||
| 184 | u8 gid[16]; | ||
| 185 | __be32 qp[MLX4_QP_PER_MGM]; | ||
| 186 | }; | ||
| 187 | struct mlx4_cmd { | ||
| 188 | struct pci_pool *pool; | ||
| 189 | void __iomem *hcr; | ||
| 190 | struct mutex hcr_mutex; | ||
| 191 | struct semaphore poll_sem; | ||
| 192 | struct semaphore event_sem; | ||
| 193 | int max_cmds; | ||
| 194 | spinlock_t context_lock; | ||
| 195 | int free_head; | ||
| 196 | struct mlx4_cmd_context *context; | ||
| 197 | u16 token_mask; | ||
| 198 | u8 use_events; | ||
| 199 | u8 toggle; | ||
| 200 | }; | ||
| 201 | |||
| 202 | struct mlx4_uar_table { | ||
| 203 | struct mlx4_bitmap bitmap; | ||
| 204 | }; | ||
| 205 | |||
| 206 | struct mlx4_mr_table { | ||
| 207 | struct mlx4_bitmap mpt_bitmap; | ||
| 208 | struct mlx4_buddy mtt_buddy; | ||
| 209 | u64 mtt_base; | ||
| 210 | u64 mpt_base; | ||
| 211 | struct mlx4_icm_table mtt_table; | ||
| 212 | struct mlx4_icm_table dmpt_table; | ||
| 213 | }; | ||
| 214 | |||
| 215 | struct mlx4_cq_table { | ||
| 216 | struct mlx4_bitmap bitmap; | ||
| 217 | spinlock_t lock; | ||
| 218 | struct radix_tree_root tree; | ||
| 219 | struct mlx4_icm_table table; | ||
| 220 | struct mlx4_icm_table cmpt_table; | ||
| 221 | }; | ||
| 222 | |||
| 223 | struct mlx4_eq_table { | ||
| 224 | struct mlx4_bitmap bitmap; | ||
| 225 | char *irq_names; | ||
| 226 | void __iomem *clr_int; | ||
| 227 | void __iomem **uar_map; | ||
| 228 | u32 clr_mask; | ||
| 229 | struct mlx4_eq *eq; | ||
| 230 | struct mlx4_icm_table table; | ||
| 231 | struct mlx4_icm_table cmpt_table; | ||
| 232 | int have_irq; | ||
| 233 | u8 inta_pin; | ||
| 234 | }; | ||
| 235 | |||
| 236 | struct mlx4_srq_table { | ||
| 237 | struct mlx4_bitmap bitmap; | ||
| 238 | spinlock_t lock; | ||
| 239 | struct radix_tree_root tree; | ||
| 240 | struct mlx4_icm_table table; | ||
| 241 | struct mlx4_icm_table cmpt_table; | ||
| 242 | }; | ||
| 243 | |||
| 244 | struct mlx4_qp_table { | ||
| 245 | struct mlx4_bitmap bitmap; | ||
| 246 | u32 rdmarc_base; | ||
| 247 | int rdmarc_shift; | ||
| 248 | spinlock_t lock; | ||
| 249 | struct mlx4_icm_table qp_table; | ||
| 250 | struct mlx4_icm_table auxc_table; | ||
| 251 | struct mlx4_icm_table altc_table; | ||
| 252 | struct mlx4_icm_table rdmarc_table; | ||
| 253 | struct mlx4_icm_table cmpt_table; | ||
| 254 | }; | ||
| 255 | |||
| 256 | struct mlx4_mcg_table { | ||
| 257 | struct mutex mutex; | ||
| 258 | struct mlx4_bitmap bitmap; | ||
| 259 | struct mlx4_icm_table table; | ||
| 260 | }; | ||
| 261 | |||
| 262 | struct mlx4_catas_err { | ||
| 263 | u32 __iomem *map; | ||
| 264 | struct timer_list timer; | ||
| 265 | struct list_head list; | ||
| 266 | }; | ||
| 267 | |||
| 268 | #define MLX4_MAX_MAC_NUM 128 | ||
| 269 | #define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) | ||
| 270 | |||
| 271 | struct mlx4_mac_table { | ||
| 272 | __be64 entries[MLX4_MAX_MAC_NUM]; | ||
| 273 | int refs[MLX4_MAX_MAC_NUM]; | ||
| 274 | struct mutex mutex; | ||
| 275 | int total; | ||
| 276 | int max; | ||
| 277 | }; | ||
| 278 | |||
| 279 | #define MLX4_MAX_VLAN_NUM 128 | ||
| 280 | #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) | ||
| 281 | |||
| 282 | struct mlx4_vlan_table { | ||
| 283 | __be32 entries[MLX4_MAX_VLAN_NUM]; | ||
| 284 | int refs[MLX4_MAX_VLAN_NUM]; | ||
| 285 | struct mutex mutex; | ||
| 286 | int total; | ||
| 287 | int max; | ||
| 288 | }; | ||
| 289 | |||
| 290 | struct mlx4_mac_entry { | ||
| 291 | u64 mac; | ||
| 292 | }; | ||
| 293 | |||
| 294 | struct mlx4_port_info { | ||
| 295 | struct mlx4_dev *dev; | ||
| 296 | int port; | ||
| 297 | char dev_name[16]; | ||
| 298 | struct device_attribute port_attr; | ||
| 299 | enum mlx4_port_type tmp_type; | ||
| 300 | struct mlx4_mac_table mac_table; | ||
| 301 | struct radix_tree_root mac_tree; | ||
| 302 | struct mlx4_vlan_table vlan_table; | ||
| 303 | int base_qpn; | ||
| 304 | }; | ||
| 305 | |||
| 306 | struct mlx4_sense { | ||
| 307 | struct mlx4_dev *dev; | ||
| 308 | u8 do_sense_port[MLX4_MAX_PORTS + 1]; | ||
| 309 | u8 sense_allowed[MLX4_MAX_PORTS + 1]; | ||
| 310 | struct delayed_work sense_poll; | ||
| 311 | }; | ||
| 312 | |||
| 313 | struct mlx4_msix_ctl { | ||
| 314 | u64 pool_bm; | ||
| 315 | spinlock_t pool_lock; | ||
| 316 | }; | ||
| 317 | |||
| 318 | struct mlx4_steer { | ||
| 319 | struct list_head promisc_qps[MLX4_NUM_STEERS]; | ||
| 320 | struct list_head steer_entries[MLX4_NUM_STEERS]; | ||
| 321 | struct list_head high_prios; | ||
| 322 | }; | ||
| 323 | |||
| 324 | struct mlx4_priv { | ||
| 325 | struct mlx4_dev dev; | ||
| 326 | |||
| 327 | struct list_head dev_list; | ||
| 328 | struct list_head ctx_list; | ||
| 329 | spinlock_t ctx_lock; | ||
| 330 | |||
| 331 | struct list_head pgdir_list; | ||
| 332 | struct mutex pgdir_mutex; | ||
| 333 | |||
| 334 | struct mlx4_fw fw; | ||
| 335 | struct mlx4_cmd cmd; | ||
| 336 | |||
| 337 | struct mlx4_bitmap pd_bitmap; | ||
| 338 | struct mlx4_uar_table uar_table; | ||
| 339 | struct mlx4_mr_table mr_table; | ||
| 340 | struct mlx4_cq_table cq_table; | ||
| 341 | struct mlx4_eq_table eq_table; | ||
| 342 | struct mlx4_srq_table srq_table; | ||
| 343 | struct mlx4_qp_table qp_table; | ||
| 344 | struct mlx4_mcg_table mcg_table; | ||
| 345 | struct mlx4_bitmap counters_bitmap; | ||
| 346 | |||
| 347 | struct mlx4_catas_err catas_err; | ||
| 348 | |||
| 349 | void __iomem *clr_base; | ||
| 350 | |||
| 351 | struct mlx4_uar driver_uar; | ||
| 352 | void __iomem *kar; | ||
| 353 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; | ||
| 354 | struct mlx4_sense sense; | ||
| 355 | struct mutex port_mutex; | ||
| 356 | struct mlx4_msix_ctl msix_ctl; | ||
| 357 | struct mlx4_steer *steer; | ||
| 358 | struct list_head bf_list; | ||
| 359 | struct mutex bf_mutex; | ||
| 360 | struct io_mapping *bf_mapping; | ||
| 361 | }; | ||
| 362 | |||
| 363 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) | ||
| 364 | { | ||
| 365 | return container_of(dev, struct mlx4_priv, dev); | ||
| 366 | } | ||
| 367 | |||
| 368 | #define MLX4_SENSE_RANGE (HZ * 3) | ||
| 369 | |||
| 370 | extern struct workqueue_struct *mlx4_wq; | ||
| 371 | |||
| 372 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); | ||
| 373 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); | ||
| 374 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); | ||
| 375 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); | ||
| 376 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); | ||
| 377 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | ||
| 378 | u32 reserved_bot, u32 resetrved_top); | ||
| 379 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); | ||
| 380 | |||
| 381 | int mlx4_reset(struct mlx4_dev *dev); | ||
| 382 | |||
| 383 | int mlx4_alloc_eq_table(struct mlx4_dev *dev); | ||
| 384 | void mlx4_free_eq_table(struct mlx4_dev *dev); | ||
| 385 | |||
| 386 | int mlx4_init_pd_table(struct mlx4_dev *dev); | ||
| 387 | int mlx4_init_uar_table(struct mlx4_dev *dev); | ||
| 388 | int mlx4_init_mr_table(struct mlx4_dev *dev); | ||
| 389 | int mlx4_init_eq_table(struct mlx4_dev *dev); | ||
| 390 | int mlx4_init_cq_table(struct mlx4_dev *dev); | ||
| 391 | int mlx4_init_qp_table(struct mlx4_dev *dev); | ||
| 392 | int mlx4_init_srq_table(struct mlx4_dev *dev); | ||
| 393 | int mlx4_init_mcg_table(struct mlx4_dev *dev); | ||
| 394 | |||
| 395 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev); | ||
| 396 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev); | ||
| 397 | void mlx4_cleanup_mr_table(struct mlx4_dev *dev); | ||
| 398 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev); | ||
| 399 | void mlx4_cleanup_cq_table(struct mlx4_dev *dev); | ||
| 400 | void mlx4_cleanup_qp_table(struct mlx4_dev *dev); | ||
| 401 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); | ||
| 402 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); | ||
| 403 | |||
| 404 | void mlx4_start_catas_poll(struct mlx4_dev *dev); | ||
| 405 | void mlx4_stop_catas_poll(struct mlx4_dev *dev); | ||
| 406 | void mlx4_catas_init(void); | ||
| 407 | int mlx4_restart_one(struct pci_dev *pdev); | ||
| 408 | int mlx4_register_device(struct mlx4_dev *dev); | ||
| 409 | void mlx4_unregister_device(struct mlx4_dev *dev); | ||
| 410 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port); | ||
| 411 | |||
| 412 | struct mlx4_dev_cap; | ||
| 413 | struct mlx4_init_hca_param; | ||
| 414 | |||
| 415 | u64 mlx4_make_profile(struct mlx4_dev *dev, | ||
| 416 | struct mlx4_profile *request, | ||
| 417 | struct mlx4_dev_cap *dev_cap, | ||
| 418 | struct mlx4_init_hca_param *init_hca); | ||
| 419 | |||
| 420 | int mlx4_cmd_init(struct mlx4_dev *dev); | ||
| 421 | void mlx4_cmd_cleanup(struct mlx4_dev *dev); | ||
| 422 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); | ||
| 423 | int mlx4_cmd_use_events(struct mlx4_dev *dev); | ||
| 424 | void mlx4_cmd_use_polling(struct mlx4_dev *dev); | ||
| 425 | |||
| 426 | void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); | ||
| 427 | void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); | ||
| 428 | |||
| 429 | void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); | ||
| 430 | |||
| 431 | void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); | ||
| 432 | |||
| 433 | void mlx4_handle_catas_err(struct mlx4_dev *dev); | ||
| 434 | |||
| 435 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, | ||
| 436 | enum mlx4_port_type *type); | ||
| 437 | void mlx4_do_sense_ports(struct mlx4_dev *dev, | ||
| 438 | enum mlx4_port_type *stype, | ||
| 439 | enum mlx4_port_type *defaults); | ||
| 440 | void mlx4_start_sense(struct mlx4_dev *dev); | ||
| 441 | void mlx4_stop_sense(struct mlx4_dev *dev); | ||
| 442 | void mlx4_sense_init(struct mlx4_dev *dev); | ||
| 443 | int mlx4_check_port_params(struct mlx4_dev *dev, | ||
| 444 | enum mlx4_port_type *port_type); | ||
| 445 | int mlx4_change_port_types(struct mlx4_dev *dev, | ||
| 446 | enum mlx4_port_type *port_types); | ||
| 447 | |||
| 448 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); | ||
| 449 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); | ||
| 450 | |||
| 451 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); | ||
| 452 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); | ||
| 453 | |||
| 454 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 455 | enum mlx4_protocol prot, enum mlx4_steer_type steer); | ||
| 456 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
| 457 | int block_mcast_loopback, enum mlx4_protocol prot, | ||
| 458 | enum mlx4_steer_type steer); | ||
| 459 | #endif /* MLX4_H */ | ||
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h new file mode 100644 index 00000000000..ed84811766e --- /dev/null +++ b/drivers/net/mlx4/mlx4_en.h | |||
| @@ -0,0 +1,607 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #ifndef _MLX4_EN_H_ | ||
| 35 | #define _MLX4_EN_H_ | ||
| 36 | |||
| 37 | #include <linux/bitops.h> | ||
| 38 | #include <linux/compiler.h> | ||
| 39 | #include <linux/list.h> | ||
| 40 | #include <linux/mutex.h> | ||
| 41 | #include <linux/netdevice.h> | ||
| 42 | #include <linux/if_vlan.h> | ||
| 43 | |||
| 44 | #include <linux/mlx4/device.h> | ||
| 45 | #include <linux/mlx4/qp.h> | ||
| 46 | #include <linux/mlx4/cq.h> | ||
| 47 | #include <linux/mlx4/srq.h> | ||
| 48 | #include <linux/mlx4/doorbell.h> | ||
| 49 | #include <linux/mlx4/cmd.h> | ||
| 50 | |||
| 51 | #include "en_port.h" | ||
| 52 | |||
| 53 | #define DRV_NAME "mlx4_en" | ||
| 54 | #define DRV_VERSION "1.5.4.1" | ||
| 55 | #define DRV_RELDATE "March 2011" | ||
| 56 | |||
| 57 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) | ||
| 58 | |||
| 59 | /* | ||
| 60 | * Device constants | ||
| 61 | */ | ||
| 62 | |||
| 63 | |||
| 64 | #define MLX4_EN_PAGE_SHIFT 12 | ||
| 65 | #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) | ||
| 66 | #define MAX_RX_RINGS 16 | ||
| 67 | #define MIN_RX_RINGS 4 | ||
| 68 | #define TXBB_SIZE 64 | ||
| 69 | #define HEADROOM (2048 / TXBB_SIZE + 1) | ||
| 70 | #define STAMP_STRIDE 64 | ||
| 71 | #define STAMP_DWORDS (STAMP_STRIDE / 4) | ||
| 72 | #define STAMP_SHIFT 31 | ||
| 73 | #define STAMP_VAL 0x7fffffff | ||
| 74 | #define STATS_DELAY (HZ / 4) | ||
| 75 | |||
| 76 | /* Typical TSO descriptor with 16 gather entries is 352 bytes... */ | ||
| 77 | #define MAX_DESC_SIZE 512 | ||
| 78 | #define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE) | ||
| 79 | |||
| 80 | /* | ||
| 81 | * OS related constants and tunables | ||
| 82 | */ | ||
| 83 | |||
| 84 | #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) | ||
| 85 | |||
| 86 | #define MLX4_EN_ALLOC_ORDER 2 | ||
| 87 | #define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER) | ||
| 88 | |||
| 89 | #define MLX4_EN_MAX_LRO_DESCRIPTORS 32 | ||
| 90 | |||
| 91 | /* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU | ||
| 92 | * and 4K allocations) */ | ||
| 93 | enum { | ||
| 94 | FRAG_SZ0 = 512 - NET_IP_ALIGN, | ||
| 95 | FRAG_SZ1 = 1024, | ||
| 96 | FRAG_SZ2 = 4096, | ||
| 97 | FRAG_SZ3 = MLX4_EN_ALLOC_SIZE | ||
| 98 | }; | ||
| 99 | #define MLX4_EN_MAX_RX_FRAGS 4 | ||
| 100 | |||
| 101 | /* Maximum ring sizes */ | ||
| 102 | #define MLX4_EN_MAX_TX_SIZE 8192 | ||
| 103 | #define MLX4_EN_MAX_RX_SIZE 8192 | ||
| 104 | |||
| 105 | /* Minimum ring size for our page-allocation sceme to work */ | ||
| 106 | #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) | ||
| 107 | #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) | ||
| 108 | |||
| 109 | #define MLX4_EN_SMALL_PKT_SIZE 64 | ||
| 110 | #define MLX4_EN_NUM_TX_RINGS 8 | ||
| 111 | #define MLX4_EN_NUM_PPP_RINGS 8 | ||
| 112 | #define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS) | ||
| 113 | #define MLX4_EN_DEF_TX_RING_SIZE 512 | ||
| 114 | #define MLX4_EN_DEF_RX_RING_SIZE 1024 | ||
| 115 | |||
| 116 | /* Target number of packets to coalesce with interrupt moderation */ | ||
| 117 | #define MLX4_EN_RX_COAL_TARGET 44 | ||
| 118 | #define MLX4_EN_RX_COAL_TIME 0x10 | ||
| 119 | |||
| 120 | #define MLX4_EN_TX_COAL_PKTS 5 | ||
| 121 | #define MLX4_EN_TX_COAL_TIME 0x80 | ||
| 122 | |||
| 123 | #define MLX4_EN_RX_RATE_LOW 400000 | ||
| 124 | #define MLX4_EN_RX_COAL_TIME_LOW 0 | ||
| 125 | #define MLX4_EN_RX_RATE_HIGH 450000 | ||
| 126 | #define MLX4_EN_RX_COAL_TIME_HIGH 128 | ||
| 127 | #define MLX4_EN_RX_SIZE_THRESH 1024 | ||
| 128 | #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) | ||
| 129 | #define MLX4_EN_SAMPLE_INTERVAL 0 | ||
| 130 | #define MLX4_EN_AVG_PKT_SMALL 256 | ||
| 131 | |||
| 132 | #define MLX4_EN_AUTO_CONF 0xffff | ||
| 133 | |||
| 134 | #define MLX4_EN_DEF_RX_PAUSE 1 | ||
| 135 | #define MLX4_EN_DEF_TX_PAUSE 1 | ||
| 136 | |||
| 137 | /* Interval between successive polls in the Tx routine when polling is used | ||
| 138 | instead of interrupts (in per-core Tx rings) - should be power of 2 */ | ||
| 139 | #define MLX4_EN_TX_POLL_MODER 16 | ||
| 140 | #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4) | ||
| 141 | |||
| 142 | #define ETH_LLC_SNAP_SIZE 8 | ||
| 143 | |||
| 144 | #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) | ||
| 145 | #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) | ||
| 146 | #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) | ||
| 147 | |||
| 148 | #define MLX4_EN_MIN_MTU 46 | ||
| 149 | #define ETH_BCAST 0xffffffffffffULL | ||
| 150 | |||
| 151 | #define MLX4_EN_LOOPBACK_RETRIES 5 | ||
| 152 | #define MLX4_EN_LOOPBACK_TIMEOUT 100 | ||
| 153 | |||
| 154 | #ifdef MLX4_EN_PERF_STAT | ||
| 155 | /* Number of samples to 'average' */ | ||
| 156 | #define AVG_SIZE 128 | ||
| 157 | #define AVG_FACTOR 1024 | ||
| 158 | #define NUM_PERF_STATS NUM_PERF_COUNTERS | ||
| 159 | |||
| 160 | #define INC_PERF_COUNTER(cnt) (++(cnt)) | ||
| 161 | #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add)) | ||
| 162 | #define AVG_PERF_COUNTER(cnt, sample) \ | ||
| 163 | ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE) | ||
| 164 | #define GET_PERF_COUNTER(cnt) (cnt) | ||
| 165 | #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR) | ||
| 166 | |||
| 167 | #else | ||
| 168 | |||
| 169 | #define NUM_PERF_STATS 0 | ||
| 170 | #define INC_PERF_COUNTER(cnt) do {} while (0) | ||
| 171 | #define ADD_PERF_COUNTER(cnt, add) do {} while (0) | ||
| 172 | #define AVG_PERF_COUNTER(cnt, sample) do {} while (0) | ||
| 173 | #define GET_PERF_COUNTER(cnt) (0) | ||
| 174 | #define GET_AVG_PERF_COUNTER(cnt) (0) | ||
| 175 | #endif /* MLX4_EN_PERF_STAT */ | ||
| 176 | |||
| 177 | /* | ||
| 178 | * Configurables | ||
| 179 | */ | ||
| 180 | |||
| 181 | enum cq_type { | ||
| 182 | RX = 0, | ||
| 183 | TX = 1, | ||
| 184 | }; | ||
| 185 | |||
| 186 | |||
| 187 | /* | ||
| 188 | * Useful macros | ||
| 189 | */ | ||
| 190 | #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x)) | ||
| 191 | #define XNOR(x, y) (!(x) == !(y)) | ||
| 192 | #define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0) | ||
| 193 | |||
| 194 | |||
| 195 | struct mlx4_en_tx_info { | ||
| 196 | struct sk_buff *skb; | ||
| 197 | u32 nr_txbb; | ||
| 198 | u8 linear; | ||
| 199 | u8 data_offset; | ||
| 200 | u8 inl; | ||
| 201 | }; | ||
| 202 | |||
| 203 | |||
| 204 | #define MLX4_EN_BIT_DESC_OWN 0x80000000 | ||
| 205 | #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg) | ||
| 206 | #define MLX4_EN_MEMTYPE_PAD 0x100 | ||
| 207 | #define DS_SIZE sizeof(struct mlx4_wqe_data_seg) | ||
| 208 | |||
| 209 | |||
| 210 | struct mlx4_en_tx_desc { | ||
| 211 | struct mlx4_wqe_ctrl_seg ctrl; | ||
| 212 | union { | ||
| 213 | struct mlx4_wqe_data_seg data; /* at least one data segment */ | ||
| 214 | struct mlx4_wqe_lso_seg lso; | ||
| 215 | struct mlx4_wqe_inline_seg inl; | ||
| 216 | }; | ||
| 217 | }; | ||
| 218 | |||
| 219 | #define MLX4_EN_USE_SRQ 0x01000000 | ||
| 220 | |||
| 221 | #define MLX4_EN_CX3_LOW_ID 0x1000 | ||
| 222 | #define MLX4_EN_CX3_HIGH_ID 0x1005 | ||
| 223 | |||
| 224 | struct mlx4_en_rx_alloc { | ||
| 225 | struct page *page; | ||
| 226 | u16 offset; | ||
| 227 | }; | ||
| 228 | |||
| 229 | struct mlx4_en_tx_ring { | ||
| 230 | struct mlx4_hwq_resources wqres; | ||
| 231 | u32 size ; /* number of TXBBs */ | ||
| 232 | u32 size_mask; | ||
| 233 | u16 stride; | ||
| 234 | u16 cqn; /* index of port CQ associated with this ring */ | ||
| 235 | u32 prod; | ||
| 236 | u32 cons; | ||
| 237 | u32 buf_size; | ||
| 238 | u32 doorbell_qpn; | ||
| 239 | void *buf; | ||
| 240 | u16 poll_cnt; | ||
| 241 | int blocked; | ||
| 242 | struct mlx4_en_tx_info *tx_info; | ||
| 243 | u8 *bounce_buf; | ||
| 244 | u32 last_nr_txbb; | ||
| 245 | struct mlx4_qp qp; | ||
| 246 | struct mlx4_qp_context context; | ||
| 247 | int qpn; | ||
| 248 | enum mlx4_qp_state qp_state; | ||
| 249 | struct mlx4_srq dummy; | ||
| 250 | unsigned long bytes; | ||
| 251 | unsigned long packets; | ||
| 252 | spinlock_t comp_lock; | ||
| 253 | struct mlx4_bf bf; | ||
| 254 | bool bf_enabled; | ||
| 255 | }; | ||
| 256 | |||
| 257 | struct mlx4_en_rx_desc { | ||
| 258 | /* actual number of entries depends on rx ring stride */ | ||
| 259 | struct mlx4_wqe_data_seg data[0]; | ||
| 260 | }; | ||
| 261 | |||
| 262 | struct mlx4_en_rx_ring { | ||
| 263 | struct mlx4_hwq_resources wqres; | ||
| 264 | struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; | ||
| 265 | u32 size ; /* number of Rx descs*/ | ||
| 266 | u32 actual_size; | ||
| 267 | u32 size_mask; | ||
| 268 | u16 stride; | ||
| 269 | u16 log_stride; | ||
| 270 | u16 cqn; /* index of port CQ associated with this ring */ | ||
| 271 | u32 prod; | ||
| 272 | u32 cons; | ||
| 273 | u32 buf_size; | ||
| 274 | void *buf; | ||
| 275 | void *rx_info; | ||
| 276 | unsigned long bytes; | ||
| 277 | unsigned long packets; | ||
| 278 | }; | ||
| 279 | |||
| 280 | |||
| 281 | static inline int mlx4_en_can_lro(__be16 status) | ||
| 282 | { | ||
| 283 | return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | ||
| 284 | MLX4_CQE_STATUS_IPV4F | | ||
| 285 | MLX4_CQE_STATUS_IPV6 | | ||
| 286 | MLX4_CQE_STATUS_IPV4OPT | | ||
| 287 | MLX4_CQE_STATUS_TCP | | ||
| 288 | MLX4_CQE_STATUS_UDP | | ||
| 289 | MLX4_CQE_STATUS_IPOK)) == | ||
| 290 | cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | ||
| 291 | MLX4_CQE_STATUS_IPOK | | ||
| 292 | MLX4_CQE_STATUS_TCP); | ||
| 293 | } | ||
| 294 | |||
| 295 | struct mlx4_en_cq { | ||
| 296 | struct mlx4_cq mcq; | ||
| 297 | struct mlx4_hwq_resources wqres; | ||
| 298 | int ring; | ||
| 299 | spinlock_t lock; | ||
| 300 | struct net_device *dev; | ||
| 301 | struct napi_struct napi; | ||
| 302 | /* Per-core Tx cq processing support */ | ||
| 303 | struct timer_list timer; | ||
| 304 | int size; | ||
| 305 | int buf_size; | ||
| 306 | unsigned vector; | ||
| 307 | enum cq_type is_tx; | ||
| 308 | u16 moder_time; | ||
| 309 | u16 moder_cnt; | ||
| 310 | struct mlx4_cqe *buf; | ||
| 311 | #define MLX4_EN_OPCODE_ERROR 0x1e | ||
| 312 | }; | ||
| 313 | |||
| 314 | struct mlx4_en_port_profile { | ||
| 315 | u32 flags; | ||
| 316 | u32 tx_ring_num; | ||
| 317 | u32 rx_ring_num; | ||
| 318 | u32 tx_ring_size; | ||
| 319 | u32 rx_ring_size; | ||
| 320 | u8 rx_pause; | ||
| 321 | u8 rx_ppp; | ||
| 322 | u8 tx_pause; | ||
| 323 | u8 tx_ppp; | ||
| 324 | }; | ||
| 325 | |||
| 326 | struct mlx4_en_profile { | ||
| 327 | int rss_xor; | ||
| 328 | int tcp_rss; | ||
| 329 | int udp_rss; | ||
| 330 | u8 rss_mask; | ||
| 331 | u32 active_ports; | ||
| 332 | u32 small_pkt_int; | ||
| 333 | u8 no_reset; | ||
| 334 | struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; | ||
| 335 | }; | ||
| 336 | |||
| 337 | struct mlx4_en_dev { | ||
| 338 | struct mlx4_dev *dev; | ||
| 339 | struct pci_dev *pdev; | ||
| 340 | struct mutex state_lock; | ||
| 341 | struct net_device *pndev[MLX4_MAX_PORTS + 1]; | ||
| 342 | u32 port_cnt; | ||
| 343 | bool device_up; | ||
| 344 | struct mlx4_en_profile profile; | ||
| 345 | u32 LSO_support; | ||
| 346 | struct workqueue_struct *workqueue; | ||
| 347 | struct device *dma_device; | ||
| 348 | void __iomem *uar_map; | ||
| 349 | struct mlx4_uar priv_uar; | ||
| 350 | struct mlx4_mr mr; | ||
| 351 | u32 priv_pdn; | ||
| 352 | spinlock_t uar_lock; | ||
| 353 | u8 mac_removed[MLX4_MAX_PORTS + 1]; | ||
| 354 | }; | ||
| 355 | |||
| 356 | |||
| 357 | struct mlx4_en_rss_map { | ||
| 358 | int base_qpn; | ||
| 359 | struct mlx4_qp qps[MAX_RX_RINGS]; | ||
| 360 | enum mlx4_qp_state state[MAX_RX_RINGS]; | ||
| 361 | struct mlx4_qp indir_qp; | ||
| 362 | enum mlx4_qp_state indir_state; | ||
| 363 | }; | ||
| 364 | |||
| 365 | struct mlx4_en_rss_context { | ||
| 366 | __be32 base_qpn; | ||
| 367 | __be32 default_qpn; | ||
| 368 | u16 reserved; | ||
| 369 | u8 hash_fn; | ||
| 370 | u8 flags; | ||
| 371 | __be32 rss_key[10]; | ||
| 372 | __be32 base_qpn_udp; | ||
| 373 | }; | ||
| 374 | |||
| 375 | struct mlx4_en_port_state { | ||
| 376 | int link_state; | ||
| 377 | int link_speed; | ||
| 378 | int transciver; | ||
| 379 | }; | ||
| 380 | |||
| 381 | struct mlx4_en_pkt_stats { | ||
| 382 | unsigned long broadcast; | ||
| 383 | unsigned long rx_prio[8]; | ||
| 384 | unsigned long tx_prio[8]; | ||
| 385 | #define NUM_PKT_STATS 17 | ||
| 386 | }; | ||
| 387 | |||
| 388 | struct mlx4_en_port_stats { | ||
| 389 | unsigned long tso_packets; | ||
| 390 | unsigned long queue_stopped; | ||
| 391 | unsigned long wake_queue; | ||
| 392 | unsigned long tx_timeout; | ||
| 393 | unsigned long rx_alloc_failed; | ||
| 394 | unsigned long rx_chksum_good; | ||
| 395 | unsigned long rx_chksum_none; | ||
| 396 | unsigned long tx_chksum_offload; | ||
| 397 | #define NUM_PORT_STATS 8 | ||
| 398 | }; | ||
| 399 | |||
| 400 | struct mlx4_en_perf_stats { | ||
| 401 | u32 tx_poll; | ||
| 402 | u64 tx_pktsz_avg; | ||
| 403 | u32 inflight_avg; | ||
| 404 | u16 tx_coal_avg; | ||
| 405 | u16 rx_coal_avg; | ||
| 406 | u32 napi_quota; | ||
| 407 | #define NUM_PERF_COUNTERS 6 | ||
| 408 | }; | ||
| 409 | |||
| 410 | struct mlx4_en_frag_info { | ||
| 411 | u16 frag_size; | ||
| 412 | u16 frag_prefix_size; | ||
| 413 | u16 frag_stride; | ||
| 414 | u16 frag_align; | ||
| 415 | u16 last_offset; | ||
| 416 | |||
| 417 | }; | ||
| 418 | |||
| 419 | struct mlx4_en_priv { | ||
| 420 | struct mlx4_en_dev *mdev; | ||
| 421 | struct mlx4_en_port_profile *prof; | ||
| 422 | struct net_device *dev; | ||
| 423 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | ||
| 424 | struct net_device_stats stats; | ||
| 425 | struct net_device_stats ret_stats; | ||
| 426 | struct mlx4_en_port_state port_state; | ||
| 427 | spinlock_t stats_lock; | ||
| 428 | |||
| 429 | unsigned long last_moder_packets; | ||
| 430 | unsigned long last_moder_tx_packets; | ||
| 431 | unsigned long last_moder_bytes; | ||
| 432 | unsigned long last_moder_jiffies; | ||
| 433 | int last_moder_time; | ||
| 434 | u16 rx_usecs; | ||
| 435 | u16 rx_frames; | ||
| 436 | u16 tx_usecs; | ||
| 437 | u16 tx_frames; | ||
| 438 | u32 pkt_rate_low; | ||
| 439 | u16 rx_usecs_low; | ||
| 440 | u32 pkt_rate_high; | ||
| 441 | u16 rx_usecs_high; | ||
| 442 | u16 sample_interval; | ||
| 443 | u16 adaptive_rx_coal; | ||
| 444 | u32 msg_enable; | ||
| 445 | u32 loopback_ok; | ||
| 446 | u32 validate_loopback; | ||
| 447 | |||
| 448 | struct mlx4_hwq_resources res; | ||
| 449 | int link_state; | ||
| 450 | int last_link_state; | ||
| 451 | bool port_up; | ||
| 452 | int port; | ||
| 453 | int registered; | ||
| 454 | int allocated; | ||
| 455 | int stride; | ||
| 456 | u64 mac; | ||
| 457 | int mac_index; | ||
| 458 | unsigned max_mtu; | ||
| 459 | int base_qpn; | ||
| 460 | |||
| 461 | struct mlx4_en_rss_map rss_map; | ||
| 462 | u32 flags; | ||
| 463 | #define MLX4_EN_FLAG_PROMISC 0x1 | ||
| 464 | #define MLX4_EN_FLAG_MC_PROMISC 0x2 | ||
| 465 | u32 tx_ring_num; | ||
| 466 | u32 rx_ring_num; | ||
| 467 | u32 rx_skb_size; | ||
| 468 | struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; | ||
| 469 | u16 num_frags; | ||
| 470 | u16 log_rx_info; | ||
| 471 | |||
| 472 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; | ||
| 473 | int tx_vector; | ||
| 474 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; | ||
| 475 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; | ||
| 476 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | ||
| 477 | struct work_struct mcast_task; | ||
| 478 | struct work_struct mac_task; | ||
| 479 | struct work_struct watchdog_task; | ||
| 480 | struct work_struct linkstate_task; | ||
| 481 | struct delayed_work stats_task; | ||
| 482 | struct mlx4_en_perf_stats pstats; | ||
| 483 | struct mlx4_en_pkt_stats pkstats; | ||
| 484 | struct mlx4_en_port_stats port_stats; | ||
| 485 | char *mc_addrs; | ||
| 486 | int mc_addrs_cnt; | ||
| 487 | struct mlx4_en_stat_out_mbox hw_stats; | ||
| 488 | int vids[128]; | ||
| 489 | bool wol; | ||
| 490 | }; | ||
| 491 | |||
| 492 | enum mlx4_en_wol { | ||
| 493 | MLX4_EN_WOL_MAGIC = (1ULL << 61), | ||
| 494 | MLX4_EN_WOL_ENABLED = (1ULL << 62), | ||
| 495 | MLX4_EN_WOL_DO_MODIFY = (1ULL << 63), | ||
| 496 | }; | ||
| 497 | |||
| 498 | |||
| 499 | void mlx4_en_destroy_netdev(struct net_device *dev); | ||
| 500 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
| 501 | struct mlx4_en_port_profile *prof); | ||
| 502 | |||
| 503 | int mlx4_en_start_port(struct net_device *dev); | ||
| 504 | void mlx4_en_stop_port(struct net_device *dev); | ||
| 505 | |||
| 506 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors); | ||
| 507 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); | ||
| 508 | |||
| 509 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | ||
| 510 | int entries, int ring, enum cq_type mode); | ||
| 511 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | ||
| 512 | bool reserve_vectors); | ||
| 513 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
| 514 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
| 515 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
| 516 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | ||
| 517 | |||
| 518 | void mlx4_en_poll_tx_cq(unsigned long data); | ||
| 519 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | ||
| 520 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); | ||
| 521 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | ||
| 522 | |||
| 523 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, | ||
| 524 | int qpn, u32 size, u16 stride); | ||
| 525 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); | ||
| 526 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | ||
| 527 | struct mlx4_en_tx_ring *ring, | ||
| 528 | int cq); | ||
| 529 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | ||
| 530 | struct mlx4_en_tx_ring *ring); | ||
| 531 | |||
| 532 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | ||
| 533 | struct mlx4_en_rx_ring *ring, | ||
| 534 | u32 size, u16 stride); | ||
| 535 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | ||
| 536 | struct mlx4_en_rx_ring *ring); | ||
| 537 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); | ||
| 538 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | ||
| 539 | struct mlx4_en_rx_ring *ring); | ||
| 540 | int mlx4_en_process_rx_cq(struct net_device *dev, | ||
| 541 | struct mlx4_en_cq *cq, | ||
| 542 | int budget); | ||
| 543 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); | ||
| 544 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | ||
| 545 | int is_tx, int rss, int qpn, int cqn, | ||
| 546 | struct mlx4_qp_context *context); | ||
| 547 | void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); | ||
| 548 | int mlx4_en_map_buffer(struct mlx4_buf *buf); | ||
| 549 | void mlx4_en_unmap_buffer(struct mlx4_buf *buf); | ||
| 550 | |||
| 551 | void mlx4_en_calc_rx_buf(struct net_device *dev); | ||
| 552 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); | ||
| 553 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); | ||
| 554 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); | ||
| 555 | void mlx4_en_rx_irq(struct mlx4_cq *mcq); | ||
| 556 | |||
| 557 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | ||
| 558 | int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv); | ||
| 559 | int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, | ||
| 560 | u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); | ||
| 561 | int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | ||
| 562 | u8 promisc); | ||
| 563 | |||
| 564 | int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); | ||
| 565 | int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); | ||
| 566 | |||
| 567 | #define MLX4_EN_NUM_SELF_TEST 5 | ||
| 568 | void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); | ||
| 569 | u64 mlx4_en_mac_to_u64(u8 *addr); | ||
| 570 | |||
| 571 | /* | ||
| 572 | * Globals | ||
| 573 | */ | ||
| 574 | extern const struct ethtool_ops mlx4_en_ethtool_ops; | ||
| 575 | |||
| 576 | |||
| 577 | |||
| 578 | /* | ||
| 579 | * printk / logging functions | ||
| 580 | */ | ||
| 581 | |||
| 582 | int en_print(const char *level, const struct mlx4_en_priv *priv, | ||
| 583 | const char *format, ...) __attribute__ ((format (printf, 3, 4))); | ||
| 584 | |||
| 585 | #define en_dbg(mlevel, priv, format, arg...) \ | ||
| 586 | do { \ | ||
| 587 | if (NETIF_MSG_##mlevel & priv->msg_enable) \ | ||
| 588 | en_print(KERN_DEBUG, priv, format, ##arg); \ | ||
| 589 | } while (0) | ||
| 590 | #define en_warn(priv, format, arg...) \ | ||
| 591 | en_print(KERN_WARNING, priv, format, ##arg) | ||
| 592 | #define en_err(priv, format, arg...) \ | ||
| 593 | en_print(KERN_ERR, priv, format, ##arg) | ||
| 594 | #define en_info(priv, format, arg...) \ | ||
| 595 | en_print(KERN_INFO, priv, format, ## arg) | ||
| 596 | |||
| 597 | #define mlx4_err(mdev, format, arg...) \ | ||
| 598 | pr_err("%s %s: " format, DRV_NAME, \ | ||
| 599 | dev_name(&mdev->pdev->dev), ##arg) | ||
| 600 | #define mlx4_info(mdev, format, arg...) \ | ||
| 601 | pr_info("%s %s: " format, DRV_NAME, \ | ||
| 602 | dev_name(&mdev->pdev->dev), ##arg) | ||
| 603 | #define mlx4_warn(mdev, format, arg...) \ | ||
| 604 | pr_warning("%s %s: " format, DRV_NAME, \ | ||
| 605 | dev_name(&mdev->pdev->dev), ##arg) | ||
| 606 | |||
| 607 | #endif | ||
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c new file mode 100644 index 00000000000..9c188bdd7f4 --- /dev/null +++ b/drivers/net/mlx4/mr.c | |||
| @@ -0,0 +1,667 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/errno.h> | ||
| 36 | #include <linux/slab.h> | ||
| 37 | |||
| 38 | #include <linux/mlx4/cmd.h> | ||
| 39 | |||
| 40 | #include "mlx4.h" | ||
| 41 | #include "icm.h" | ||
| 42 | |||
| 43 | /* | ||
| 44 | * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. | ||
| 45 | */ | ||
| 46 | struct mlx4_mpt_entry { | ||
| 47 | __be32 flags; | ||
| 48 | __be32 qpn; | ||
| 49 | __be32 key; | ||
| 50 | __be32 pd_flags; | ||
| 51 | __be64 start; | ||
| 52 | __be64 length; | ||
| 53 | __be32 lkey; | ||
| 54 | __be32 win_cnt; | ||
| 55 | u8 reserved1[3]; | ||
| 56 | u8 mtt_rep; | ||
| 57 | __be64 mtt_seg; | ||
| 58 | __be32 mtt_sz; | ||
| 59 | __be32 entity_size; | ||
| 60 | __be32 first_byte_offset; | ||
| 61 | } __packed; | ||
| 62 | |||
| 63 | #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) | ||
| 64 | #define MLX4_MPT_FLAG_FREE (0x3UL << 28) | ||
| 65 | #define MLX4_MPT_FLAG_MIO (1 << 17) | ||
| 66 | #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) | ||
| 67 | #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) | ||
| 68 | #define MLX4_MPT_FLAG_REGION (1 << 8) | ||
| 69 | |||
| 70 | #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) | ||
| 71 | #define MLX4_MPT_PD_FLAG_RAE (1 << 28) | ||
| 72 | #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) | ||
| 73 | |||
| 74 | #define MLX4_MPT_STATUS_SW 0xF0 | ||
| 75 | #define MLX4_MPT_STATUS_HW 0x00 | ||
| 76 | |||
| 77 | static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) | ||
| 78 | { | ||
| 79 | int o; | ||
| 80 | int m; | ||
| 81 | u32 seg; | ||
| 82 | |||
| 83 | spin_lock(&buddy->lock); | ||
| 84 | |||
| 85 | for (o = order; o <= buddy->max_order; ++o) | ||
| 86 | if (buddy->num_free[o]) { | ||
| 87 | m = 1 << (buddy->max_order - o); | ||
| 88 | seg = find_first_bit(buddy->bits[o], m); | ||
| 89 | if (seg < m) | ||
| 90 | goto found; | ||
| 91 | } | ||
| 92 | |||
| 93 | spin_unlock(&buddy->lock); | ||
| 94 | return -1; | ||
| 95 | |||
| 96 | found: | ||
| 97 | clear_bit(seg, buddy->bits[o]); | ||
| 98 | --buddy->num_free[o]; | ||
| 99 | |||
| 100 | while (o > order) { | ||
| 101 | --o; | ||
| 102 | seg <<= 1; | ||
| 103 | set_bit(seg ^ 1, buddy->bits[o]); | ||
| 104 | ++buddy->num_free[o]; | ||
| 105 | } | ||
| 106 | |||
| 107 | spin_unlock(&buddy->lock); | ||
| 108 | |||
| 109 | seg <<= order; | ||
| 110 | |||
| 111 | return seg; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) | ||
| 115 | { | ||
| 116 | seg >>= order; | ||
| 117 | |||
| 118 | spin_lock(&buddy->lock); | ||
| 119 | |||
| 120 | while (test_bit(seg ^ 1, buddy->bits[order])) { | ||
| 121 | clear_bit(seg ^ 1, buddy->bits[order]); | ||
| 122 | --buddy->num_free[order]; | ||
| 123 | seg >>= 1; | ||
| 124 | ++order; | ||
| 125 | } | ||
| 126 | |||
| 127 | set_bit(seg, buddy->bits[order]); | ||
| 128 | ++buddy->num_free[order]; | ||
| 129 | |||
| 130 | spin_unlock(&buddy->lock); | ||
| 131 | } | ||
| 132 | |||
| 133 | static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) | ||
| 134 | { | ||
| 135 | int i, s; | ||
| 136 | |||
| 137 | buddy->max_order = max_order; | ||
| 138 | spin_lock_init(&buddy->lock); | ||
| 139 | |||
| 140 | buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), | ||
| 141 | GFP_KERNEL); | ||
| 142 | buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), | ||
| 143 | GFP_KERNEL); | ||
| 144 | if (!buddy->bits || !buddy->num_free) | ||
| 145 | goto err_out; | ||
| 146 | |||
| 147 | for (i = 0; i <= buddy->max_order; ++i) { | ||
| 148 | s = BITS_TO_LONGS(1 << (buddy->max_order - i)); | ||
| 149 | buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); | ||
| 150 | if (!buddy->bits[i]) | ||
| 151 | goto err_out_free; | ||
| 152 | bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); | ||
| 153 | } | ||
| 154 | |||
| 155 | set_bit(0, buddy->bits[buddy->max_order]); | ||
| 156 | buddy->num_free[buddy->max_order] = 1; | ||
| 157 | |||
| 158 | return 0; | ||
| 159 | |||
| 160 | err_out_free: | ||
| 161 | for (i = 0; i <= buddy->max_order; ++i) | ||
| 162 | kfree(buddy->bits[i]); | ||
| 163 | |||
| 164 | err_out: | ||
| 165 | kfree(buddy->bits); | ||
| 166 | kfree(buddy->num_free); | ||
| 167 | |||
| 168 | return -ENOMEM; | ||
| 169 | } | ||
| 170 | |||
| 171 | static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) | ||
| 172 | { | ||
| 173 | int i; | ||
| 174 | |||
| 175 | for (i = 0; i <= buddy->max_order; ++i) | ||
| 176 | kfree(buddy->bits[i]); | ||
| 177 | |||
| 178 | kfree(buddy->bits); | ||
| 179 | kfree(buddy->num_free); | ||
| 180 | } | ||
| 181 | |||
| 182 | static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) | ||
| 183 | { | ||
| 184 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | ||
| 185 | u32 seg; | ||
| 186 | |||
| 187 | seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); | ||
| 188 | if (seg == -1) | ||
| 189 | return -1; | ||
| 190 | |||
| 191 | if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, | ||
| 192 | seg + (1 << order) - 1)) { | ||
| 193 | mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); | ||
| 194 | return -1; | ||
| 195 | } | ||
| 196 | |||
| 197 | return seg; | ||
| 198 | } | ||
| 199 | |||
| 200 | int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, | ||
| 201 | struct mlx4_mtt *mtt) | ||
| 202 | { | ||
| 203 | int i; | ||
| 204 | |||
| 205 | if (!npages) { | ||
| 206 | mtt->order = -1; | ||
| 207 | mtt->page_shift = MLX4_ICM_PAGE_SHIFT; | ||
| 208 | return 0; | ||
| 209 | } else | ||
| 210 | mtt->page_shift = page_shift; | ||
| 211 | |||
| 212 | for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) | ||
| 213 | ++mtt->order; | ||
| 214 | |||
| 215 | mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); | ||
| 216 | if (mtt->first_seg == -1) | ||
| 217 | return -ENOMEM; | ||
| 218 | |||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | EXPORT_SYMBOL_GPL(mlx4_mtt_init); | ||
| 222 | |||
| 223 | void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) | ||
| 224 | { | ||
| 225 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | ||
| 226 | |||
| 227 | if (mtt->order < 0) | ||
| 228 | return; | ||
| 229 | |||
| 230 | mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); | ||
| 231 | mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg, | ||
| 232 | mtt->first_seg + (1 << mtt->order) - 1); | ||
| 233 | } | ||
| 234 | EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); | ||
| 235 | |||
| 236 | u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) | ||
| 237 | { | ||
| 238 | return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; | ||
| 239 | } | ||
| 240 | EXPORT_SYMBOL_GPL(mlx4_mtt_addr); | ||
| 241 | |||
| 242 | static u32 hw_index_to_key(u32 ind) | ||
| 243 | { | ||
| 244 | return (ind >> 24) | (ind << 8); | ||
| 245 | } | ||
| 246 | |||
| 247 | static u32 key_to_hw_index(u32 key) | ||
| 248 | { | ||
| 249 | return (key << 24) | (key >> 8); | ||
| 250 | } | ||
| 251 | |||
| 252 | static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 253 | int mpt_index) | ||
| 254 | { | ||
| 255 | return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, | ||
| 256 | MLX4_CMD_TIME_CLASS_B); | ||
| 257 | } | ||
| 258 | |||
| 259 | static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 260 | int mpt_index) | ||
| 261 | { | ||
| 262 | return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, | ||
| 263 | !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B); | ||
| 264 | } | ||
| 265 | |||
| 266 | int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, | ||
| 267 | int npages, int page_shift, struct mlx4_mr *mr) | ||
| 268 | { | ||
| 269 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 270 | u32 index; | ||
| 271 | int err; | ||
| 272 | |||
| 273 | index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); | ||
| 274 | if (index == -1) | ||
| 275 | return -ENOMEM; | ||
| 276 | |||
| 277 | mr->iova = iova; | ||
| 278 | mr->size = size; | ||
| 279 | mr->pd = pd; | ||
| 280 | mr->access = access; | ||
| 281 | mr->enabled = 0; | ||
| 282 | mr->key = hw_index_to_key(index); | ||
| 283 | |||
| 284 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | ||
| 285 | if (err) | ||
| 286 | mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); | ||
| 287 | |||
| 288 | return err; | ||
| 289 | } | ||
| 290 | EXPORT_SYMBOL_GPL(mlx4_mr_alloc); | ||
| 291 | |||
| 292 | void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) | ||
| 293 | { | ||
| 294 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 295 | int err; | ||
| 296 | |||
| 297 | if (mr->enabled) { | ||
| 298 | err = mlx4_HW2SW_MPT(dev, NULL, | ||
| 299 | key_to_hw_index(mr->key) & | ||
| 300 | (dev->caps.num_mpts - 1)); | ||
| 301 | if (err) | ||
| 302 | mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err); | ||
| 303 | } | ||
| 304 | |||
| 305 | mlx4_mtt_cleanup(dev, &mr->mtt); | ||
| 306 | mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key)); | ||
| 307 | } | ||
| 308 | EXPORT_SYMBOL_GPL(mlx4_mr_free); | ||
| 309 | |||
| 310 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) | ||
| 311 | { | ||
| 312 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | ||
| 313 | struct mlx4_cmd_mailbox *mailbox; | ||
| 314 | struct mlx4_mpt_entry *mpt_entry; | ||
| 315 | int err; | ||
| 316 | |||
| 317 | err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); | ||
| 318 | if (err) | ||
| 319 | return err; | ||
| 320 | |||
| 321 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 322 | if (IS_ERR(mailbox)) { | ||
| 323 | err = PTR_ERR(mailbox); | ||
| 324 | goto err_table; | ||
| 325 | } | ||
| 326 | mpt_entry = mailbox->buf; | ||
| 327 | |||
| 328 | memset(mpt_entry, 0, sizeof *mpt_entry); | ||
| 329 | |||
| 330 | mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | | ||
| 331 | MLX4_MPT_FLAG_REGION | | ||
| 332 | mr->access); | ||
| 333 | |||
| 334 | mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); | ||
| 335 | mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); | ||
| 336 | mpt_entry->start = cpu_to_be64(mr->iova); | ||
| 337 | mpt_entry->length = cpu_to_be64(mr->size); | ||
| 338 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | ||
| 339 | |||
| 340 | if (mr->mtt.order < 0) { | ||
| 341 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | ||
| 342 | mpt_entry->mtt_seg = 0; | ||
| 343 | } else { | ||
| 344 | mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); | ||
| 345 | } | ||
| 346 | |||
| 347 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { | ||
| 348 | /* fast register MR in free state */ | ||
| 349 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | ||
| 350 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | ||
| 351 | MLX4_MPT_PD_FLAG_RAE); | ||
| 352 | mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * | ||
| 353 | dev->caps.mtts_per_seg); | ||
| 354 | } else { | ||
| 355 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | ||
| 356 | } | ||
| 357 | |||
| 358 | err = mlx4_SW2HW_MPT(dev, mailbox, | ||
| 359 | key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); | ||
| 360 | if (err) { | ||
| 361 | mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); | ||
| 362 | goto err_cmd; | ||
| 363 | } | ||
| 364 | |||
| 365 | mr->enabled = 1; | ||
| 366 | |||
| 367 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 368 | |||
| 369 | return 0; | ||
| 370 | |||
| 371 | err_cmd: | ||
| 372 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 373 | |||
| 374 | err_table: | ||
| 375 | mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); | ||
| 376 | return err; | ||
| 377 | } | ||
| 378 | EXPORT_SYMBOL_GPL(mlx4_mr_enable); | ||
| 379 | |||
| 380 | static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
| 381 | int start_index, int npages, u64 *page_list) | ||
| 382 | { | ||
| 383 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 384 | __be64 *mtts; | ||
| 385 | dma_addr_t dma_handle; | ||
| 386 | int i; | ||
| 387 | int s = start_index * sizeof (u64); | ||
| 388 | |||
| 389 | /* All MTTs must fit in the same page */ | ||
| 390 | if (start_index / (PAGE_SIZE / sizeof (u64)) != | ||
| 391 | (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) | ||
| 392 | return -EINVAL; | ||
| 393 | |||
| 394 | if (start_index & (dev->caps.mtts_per_seg - 1)) | ||
| 395 | return -EINVAL; | ||
| 396 | |||
| 397 | mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + | ||
| 398 | s / dev->caps.mtt_entry_sz, &dma_handle); | ||
| 399 | if (!mtts) | ||
| 400 | return -ENOMEM; | ||
| 401 | |||
| 402 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | ||
| 403 | npages * sizeof (u64), DMA_TO_DEVICE); | ||
| 404 | |||
| 405 | for (i = 0; i < npages; ++i) | ||
| 406 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | ||
| 407 | |||
| 408 | dma_sync_single_for_device(&dev->pdev->dev, dma_handle, | ||
| 409 | npages * sizeof (u64), DMA_TO_DEVICE); | ||
| 410 | |||
| 411 | return 0; | ||
| 412 | } | ||
| 413 | |||
| 414 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
| 415 | int start_index, int npages, u64 *page_list) | ||
| 416 | { | ||
| 417 | int chunk; | ||
| 418 | int err; | ||
| 419 | |||
| 420 | if (mtt->order < 0) | ||
| 421 | return -EINVAL; | ||
| 422 | |||
| 423 | while (npages > 0) { | ||
| 424 | chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); | ||
| 425 | err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); | ||
| 426 | if (err) | ||
| 427 | return err; | ||
| 428 | |||
| 429 | npages -= chunk; | ||
| 430 | start_index += chunk; | ||
| 431 | page_list += chunk; | ||
| 432 | } | ||
| 433 | |||
| 434 | return 0; | ||
| 435 | } | ||
| 436 | EXPORT_SYMBOL_GPL(mlx4_write_mtt); | ||
| 437 | |||
| 438 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
| 439 | struct mlx4_buf *buf) | ||
| 440 | { | ||
| 441 | u64 *page_list; | ||
| 442 | int err; | ||
| 443 | int i; | ||
| 444 | |||
| 445 | page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); | ||
| 446 | if (!page_list) | ||
| 447 | return -ENOMEM; | ||
| 448 | |||
| 449 | for (i = 0; i < buf->npages; ++i) | ||
| 450 | if (buf->nbufs == 1) | ||
| 451 | page_list[i] = buf->direct.map + (i << buf->page_shift); | ||
| 452 | else | ||
| 453 | page_list[i] = buf->page_list[i].map; | ||
| 454 | |||
| 455 | err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); | ||
| 456 | |||
| 457 | kfree(page_list); | ||
| 458 | return err; | ||
| 459 | } | ||
| 460 | EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); | ||
| 461 | |||
| 462 | int mlx4_init_mr_table(struct mlx4_dev *dev) | ||
| 463 | { | ||
| 464 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | ||
| 465 | int err; | ||
| 466 | |||
| 467 | err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, | ||
| 468 | ~0, dev->caps.reserved_mrws, 0); | ||
| 469 | if (err) | ||
| 470 | return err; | ||
| 471 | |||
| 472 | err = mlx4_buddy_init(&mr_table->mtt_buddy, | ||
| 473 | ilog2(dev->caps.num_mtt_segs)); | ||
| 474 | if (err) | ||
| 475 | goto err_buddy; | ||
| 476 | |||
| 477 | if (dev->caps.reserved_mtts) { | ||
| 478 | if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) { | ||
| 479 | mlx4_warn(dev, "MTT table of order %d is too small.\n", | ||
| 480 | mr_table->mtt_buddy.max_order); | ||
| 481 | err = -ENOMEM; | ||
| 482 | goto err_reserve_mtts; | ||
| 483 | } | ||
| 484 | } | ||
| 485 | |||
| 486 | return 0; | ||
| 487 | |||
| 488 | err_reserve_mtts: | ||
| 489 | mlx4_buddy_cleanup(&mr_table->mtt_buddy); | ||
| 490 | |||
| 491 | err_buddy: | ||
| 492 | mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); | ||
| 493 | |||
| 494 | return err; | ||
| 495 | } | ||
| 496 | |||
| 497 | void mlx4_cleanup_mr_table(struct mlx4_dev *dev) | ||
| 498 | { | ||
| 499 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | ||
| 500 | |||
| 501 | mlx4_buddy_cleanup(&mr_table->mtt_buddy); | ||
| 502 | mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); | ||
| 503 | } | ||
| 504 | |||
| 505 | static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, | ||
| 506 | int npages, u64 iova) | ||
| 507 | { | ||
| 508 | int i, page_mask; | ||
| 509 | |||
| 510 | if (npages > fmr->max_pages) | ||
| 511 | return -EINVAL; | ||
| 512 | |||
| 513 | page_mask = (1 << fmr->page_shift) - 1; | ||
| 514 | |||
| 515 | /* We are getting page lists, so va must be page aligned. */ | ||
| 516 | if (iova & page_mask) | ||
| 517 | return -EINVAL; | ||
| 518 | |||
| 519 | /* Trust the user not to pass misaligned data in page_list */ | ||
| 520 | if (0) | ||
| 521 | for (i = 0; i < npages; ++i) { | ||
| 522 | if (page_list[i] & ~page_mask) | ||
| 523 | return -EINVAL; | ||
| 524 | } | ||
| 525 | |||
| 526 | if (fmr->maps >= fmr->max_maps) | ||
| 527 | return -EINVAL; | ||
| 528 | |||
| 529 | return 0; | ||
| 530 | } | ||
| 531 | |||
| 532 | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, | ||
| 533 | int npages, u64 iova, u32 *lkey, u32 *rkey) | ||
| 534 | { | ||
| 535 | u32 key; | ||
| 536 | int i, err; | ||
| 537 | |||
| 538 | err = mlx4_check_fmr(fmr, page_list, npages, iova); | ||
| 539 | if (err) | ||
| 540 | return err; | ||
| 541 | |||
| 542 | ++fmr->maps; | ||
| 543 | |||
| 544 | key = key_to_hw_index(fmr->mr.key); | ||
| 545 | key += dev->caps.num_mpts; | ||
| 546 | *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); | ||
| 547 | |||
| 548 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; | ||
| 549 | |||
| 550 | /* Make sure MPT status is visible before writing MTT entries */ | ||
| 551 | wmb(); | ||
| 552 | |||
| 553 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | ||
| 554 | npages * sizeof(u64), DMA_TO_DEVICE); | ||
| 555 | |||
| 556 | for (i = 0; i < npages; ++i) | ||
| 557 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | ||
| 558 | |||
| 559 | dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, | ||
| 560 | npages * sizeof(u64), DMA_TO_DEVICE); | ||
| 561 | |||
| 562 | fmr->mpt->key = cpu_to_be32(key); | ||
| 563 | fmr->mpt->lkey = cpu_to_be32(key); | ||
| 564 | fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); | ||
| 565 | fmr->mpt->start = cpu_to_be64(iova); | ||
| 566 | |||
| 567 | /* Make MTT entries are visible before setting MPT status */ | ||
| 568 | wmb(); | ||
| 569 | |||
| 570 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; | ||
| 571 | |||
| 572 | /* Make sure MPT status is visible before consumer can use FMR */ | ||
| 573 | wmb(); | ||
| 574 | |||
| 575 | return 0; | ||
| 576 | } | ||
| 577 | EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); | ||
| 578 | |||
| 579 | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | ||
| 580 | int max_maps, u8 page_shift, struct mlx4_fmr *fmr) | ||
| 581 | { | ||
| 582 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 583 | u64 mtt_seg; | ||
| 584 | int err = -ENOMEM; | ||
| 585 | |||
| 586 | if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) | ||
| 587 | return -EINVAL; | ||
| 588 | |||
| 589 | /* All MTTs must fit in the same page */ | ||
| 590 | if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) | ||
| 591 | return -EINVAL; | ||
| 592 | |||
| 593 | fmr->page_shift = page_shift; | ||
| 594 | fmr->max_pages = max_pages; | ||
| 595 | fmr->max_maps = max_maps; | ||
| 596 | fmr->maps = 0; | ||
| 597 | |||
| 598 | err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, | ||
| 599 | page_shift, &fmr->mr); | ||
| 600 | if (err) | ||
| 601 | return err; | ||
| 602 | |||
| 603 | mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; | ||
| 604 | |||
| 605 | fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, | ||
| 606 | fmr->mr.mtt.first_seg, | ||
| 607 | &fmr->dma_handle); | ||
| 608 | if (!fmr->mtts) { | ||
| 609 | err = -ENOMEM; | ||
| 610 | goto err_free; | ||
| 611 | } | ||
| 612 | |||
| 613 | return 0; | ||
| 614 | |||
| 615 | err_free: | ||
| 616 | mlx4_mr_free(dev, &fmr->mr); | ||
| 617 | return err; | ||
| 618 | } | ||
| 619 | EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); | ||
| 620 | |||
| 621 | int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) | ||
| 622 | { | ||
| 623 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 624 | int err; | ||
| 625 | |||
| 626 | err = mlx4_mr_enable(dev, &fmr->mr); | ||
| 627 | if (err) | ||
| 628 | return err; | ||
| 629 | |||
| 630 | fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, | ||
| 631 | key_to_hw_index(fmr->mr.key), NULL); | ||
| 632 | if (!fmr->mpt) | ||
| 633 | return -ENOMEM; | ||
| 634 | |||
| 635 | return 0; | ||
| 636 | } | ||
| 637 | EXPORT_SYMBOL_GPL(mlx4_fmr_enable); | ||
| 638 | |||
| 639 | void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | ||
| 640 | u32 *lkey, u32 *rkey) | ||
| 641 | { | ||
| 642 | if (!fmr->maps) | ||
| 643 | return; | ||
| 644 | |||
| 645 | fmr->maps = 0; | ||
| 646 | |||
| 647 | *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; | ||
| 648 | } | ||
| 649 | EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); | ||
| 650 | |||
| 651 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) | ||
| 652 | { | ||
| 653 | if (fmr->maps) | ||
| 654 | return -EBUSY; | ||
| 655 | |||
| 656 | fmr->mr.enabled = 0; | ||
| 657 | mlx4_mr_free(dev, &fmr->mr); | ||
| 658 | |||
| 659 | return 0; | ||
| 660 | } | ||
| 661 | EXPORT_SYMBOL_GPL(mlx4_fmr_free); | ||
| 662 | |||
| 663 | int mlx4_SYNC_TPT(struct mlx4_dev *dev) | ||
| 664 | { | ||
| 665 | return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000); | ||
| 666 | } | ||
| 667 | EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); | ||
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c new file mode 100644 index 00000000000..1286b886dce --- /dev/null +++ b/drivers/net/mlx4/pd.c | |||
| @@ -0,0 +1,210 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/errno.h> | ||
| 35 | #include <linux/io-mapping.h> | ||
| 36 | |||
| 37 | #include <asm/page.h> | ||
| 38 | |||
| 39 | #include "mlx4.h" | ||
| 40 | #include "icm.h" | ||
| 41 | |||
| 42 | enum { | ||
| 43 | MLX4_NUM_RESERVED_UARS = 8 | ||
| 44 | }; | ||
| 45 | |||
| 46 | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) | ||
| 47 | { | ||
| 48 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 49 | |||
| 50 | *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); | ||
| 51 | if (*pdn == -1) | ||
| 52 | return -ENOMEM; | ||
| 53 | |||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | EXPORT_SYMBOL_GPL(mlx4_pd_alloc); | ||
| 57 | |||
| 58 | void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) | ||
| 59 | { | ||
| 60 | mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn); | ||
| 61 | } | ||
| 62 | EXPORT_SYMBOL_GPL(mlx4_pd_free); | ||
| 63 | |||
| 64 | int mlx4_init_pd_table(struct mlx4_dev *dev) | ||
| 65 | { | ||
| 66 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 67 | |||
| 68 | return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, | ||
| 69 | (1 << 24) - 1, dev->caps.reserved_pds, 0); | ||
| 70 | } | ||
| 71 | |||
| 72 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev) | ||
| 73 | { | ||
| 74 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); | ||
| 75 | } | ||
| 76 | |||
| 77 | |||
| 78 | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) | ||
| 79 | { | ||
| 80 | uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap); | ||
| 81 | if (uar->index == -1) | ||
| 82 | return -ENOMEM; | ||
| 83 | |||
| 84 | uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; | ||
| 85 | uar->map = NULL; | ||
| 86 | |||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | EXPORT_SYMBOL_GPL(mlx4_uar_alloc); | ||
| 90 | |||
| 91 | void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) | ||
| 92 | { | ||
| 93 | mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index); | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL_GPL(mlx4_uar_free); | ||
| 96 | |||
| 97 | int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) | ||
| 98 | { | ||
| 99 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 100 | struct mlx4_uar *uar; | ||
| 101 | int err = 0; | ||
| 102 | int idx; | ||
| 103 | |||
| 104 | if (!priv->bf_mapping) | ||
| 105 | return -ENOMEM; | ||
| 106 | |||
| 107 | mutex_lock(&priv->bf_mutex); | ||
| 108 | if (!list_empty(&priv->bf_list)) | ||
| 109 | uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list); | ||
| 110 | else { | ||
| 111 | if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) { | ||
| 112 | err = -ENOMEM; | ||
| 113 | goto out; | ||
| 114 | } | ||
| 115 | uar = kmalloc(sizeof *uar, GFP_KERNEL); | ||
| 116 | if (!uar) { | ||
| 117 | err = -ENOMEM; | ||
| 118 | goto out; | ||
| 119 | } | ||
| 120 | err = mlx4_uar_alloc(dev, uar); | ||
| 121 | if (err) | ||
| 122 | goto free_kmalloc; | ||
| 123 | |||
| 124 | uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); | ||
| 125 | if (!uar->map) { | ||
| 126 | err = -ENOMEM; | ||
| 127 | goto free_uar; | ||
| 128 | } | ||
| 129 | |||
| 130 | uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); | ||
| 131 | if (!uar->bf_map) { | ||
| 132 | err = -ENOMEM; | ||
| 133 | goto unamp_uar; | ||
| 134 | } | ||
| 135 | uar->free_bf_bmap = 0; | ||
| 136 | list_add(&uar->bf_list, &priv->bf_list); | ||
| 137 | } | ||
| 138 | |||
| 139 | bf->uar = uar; | ||
| 140 | idx = ffz(uar->free_bf_bmap); | ||
| 141 | uar->free_bf_bmap |= 1 << idx; | ||
| 142 | bf->uar = uar; | ||
| 143 | bf->offset = 0; | ||
| 144 | bf->buf_size = dev->caps.bf_reg_size / 2; | ||
| 145 | bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size; | ||
| 146 | if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1) | ||
| 147 | list_del_init(&uar->bf_list); | ||
| 148 | |||
| 149 | goto out; | ||
| 150 | |||
| 151 | unamp_uar: | ||
| 152 | bf->uar = NULL; | ||
| 153 | iounmap(uar->map); | ||
| 154 | |||
| 155 | free_uar: | ||
| 156 | mlx4_uar_free(dev, uar); | ||
| 157 | |||
| 158 | free_kmalloc: | ||
| 159 | kfree(uar); | ||
| 160 | |||
| 161 | out: | ||
| 162 | mutex_unlock(&priv->bf_mutex); | ||
| 163 | return err; | ||
| 164 | } | ||
| 165 | EXPORT_SYMBOL_GPL(mlx4_bf_alloc); | ||
| 166 | |||
| 167 | void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) | ||
| 168 | { | ||
| 169 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 170 | int idx; | ||
| 171 | |||
| 172 | if (!bf->uar || !bf->uar->bf_map) | ||
| 173 | return; | ||
| 174 | |||
| 175 | mutex_lock(&priv->bf_mutex); | ||
| 176 | idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size; | ||
| 177 | bf->uar->free_bf_bmap &= ~(1 << idx); | ||
| 178 | if (!bf->uar->free_bf_bmap) { | ||
| 179 | if (!list_empty(&bf->uar->bf_list)) | ||
| 180 | list_del(&bf->uar->bf_list); | ||
| 181 | |||
| 182 | io_mapping_unmap(bf->uar->bf_map); | ||
| 183 | iounmap(bf->uar->map); | ||
| 184 | mlx4_uar_free(dev, bf->uar); | ||
| 185 | kfree(bf->uar); | ||
| 186 | } else if (list_empty(&bf->uar->bf_list)) | ||
| 187 | list_add(&bf->uar->bf_list, &priv->bf_list); | ||
| 188 | |||
| 189 | mutex_unlock(&priv->bf_mutex); | ||
| 190 | } | ||
| 191 | EXPORT_SYMBOL_GPL(mlx4_bf_free); | ||
| 192 | |||
| 193 | int mlx4_init_uar_table(struct mlx4_dev *dev) | ||
| 194 | { | ||
| 195 | if (dev->caps.num_uars <= 128) { | ||
| 196 | mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", | ||
| 197 | dev->caps.num_uars); | ||
| 198 | mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); | ||
| 199 | return -ENODEV; | ||
| 200 | } | ||
| 201 | |||
| 202 | return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, | ||
| 203 | dev->caps.num_uars, dev->caps.num_uars - 1, | ||
| 204 | max(128, dev->caps.reserved_uars), 0); | ||
| 205 | } | ||
| 206 | |||
| 207 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev) | ||
| 208 | { | ||
| 209 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap); | ||
| 210 | } | ||
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c new file mode 100644 index 00000000000..609e0ec14ce --- /dev/null +++ b/drivers/net/mlx4/port.c | |||
| @@ -0,0 +1,487 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | */ | ||
| 32 | |||
| 33 | #include <linux/errno.h> | ||
| 34 | #include <linux/if_ether.h> | ||
| 35 | |||
| 36 | #include <linux/mlx4/cmd.h> | ||
| 37 | |||
| 38 | #include "mlx4.h" | ||
| 39 | |||
| 40 | #define MLX4_MAC_VALID (1ull << 63) | ||
| 41 | #define MLX4_MAC_MASK 0xffffffffffffULL | ||
| 42 | |||
| 43 | #define MLX4_VLAN_VALID (1u << 31) | ||
| 44 | #define MLX4_VLAN_MASK 0xfff | ||
| 45 | |||
| 46 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) | ||
| 47 | { | ||
| 48 | int i; | ||
| 49 | |||
| 50 | mutex_init(&table->mutex); | ||
| 51 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | ||
| 52 | table->entries[i] = 0; | ||
| 53 | table->refs[i] = 0; | ||
| 54 | } | ||
| 55 | table->max = 1 << dev->caps.log_num_macs; | ||
| 56 | table->total = 0; | ||
| 57 | } | ||
| 58 | |||
| 59 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) | ||
| 60 | { | ||
| 61 | int i; | ||
| 62 | |||
| 63 | mutex_init(&table->mutex); | ||
| 64 | for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { | ||
| 65 | table->entries[i] = 0; | ||
| 66 | table->refs[i] = 0; | ||
| 67 | } | ||
| 68 | table->max = 1 << dev->caps.log_num_vlans; | ||
| 69 | table->total = 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, | ||
| 73 | __be64 *entries) | ||
| 74 | { | ||
| 75 | struct mlx4_cmd_mailbox *mailbox; | ||
| 76 | u32 in_mod; | ||
| 77 | int err; | ||
| 78 | |||
| 79 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 80 | if (IS_ERR(mailbox)) | ||
| 81 | return PTR_ERR(mailbox); | ||
| 82 | |||
| 83 | memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); | ||
| 84 | |||
| 85 | in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; | ||
| 86 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
| 87 | MLX4_CMD_TIME_CLASS_B); | ||
| 88 | |||
| 89 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 90 | return err; | ||
| 91 | } | ||
| 92 | |||
| 93 | static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, | ||
| 94 | u64 mac, int *qpn, u8 reserve) | ||
| 95 | { | ||
| 96 | struct mlx4_qp qp; | ||
| 97 | u8 gid[16] = {0}; | ||
| 98 | int err; | ||
| 99 | |||
| 100 | if (reserve) { | ||
| 101 | err = mlx4_qp_reserve_range(dev, 1, 1, qpn); | ||
| 102 | if (err) { | ||
| 103 | mlx4_err(dev, "Failed to reserve qp for mac registration\n"); | ||
| 104 | return err; | ||
| 105 | } | ||
| 106 | } | ||
| 107 | qp.qpn = *qpn; | ||
| 108 | |||
| 109 | mac &= 0xffffffffffffULL; | ||
| 110 | mac = cpu_to_be64(mac << 16); | ||
| 111 | memcpy(&gid[10], &mac, ETH_ALEN); | ||
| 112 | gid[5] = port; | ||
| 113 | gid[7] = MLX4_UC_STEER << 1; | ||
| 114 | |||
| 115 | err = mlx4_qp_attach_common(dev, &qp, gid, 0, | ||
| 116 | MLX4_PROT_ETH, MLX4_UC_STEER); | ||
| 117 | if (err && reserve) | ||
| 118 | mlx4_qp_release_range(dev, *qpn, 1); | ||
| 119 | |||
| 120 | return err; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, | ||
| 124 | u64 mac, int qpn, u8 free) | ||
| 125 | { | ||
| 126 | struct mlx4_qp qp; | ||
| 127 | u8 gid[16] = {0}; | ||
| 128 | |||
| 129 | qp.qpn = qpn; | ||
| 130 | mac &= 0xffffffffffffULL; | ||
| 131 | mac = cpu_to_be64(mac << 16); | ||
| 132 | memcpy(&gid[10], &mac, ETH_ALEN); | ||
| 133 | gid[5] = port; | ||
| 134 | gid[7] = MLX4_UC_STEER << 1; | ||
| 135 | |||
| 136 | mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); | ||
| 137 | if (free) | ||
| 138 | mlx4_qp_release_range(dev, qpn, 1); | ||
| 139 | } | ||
| 140 | |||
| 141 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) | ||
| 142 | { | ||
| 143 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
| 144 | struct mlx4_mac_table *table = &info->mac_table; | ||
| 145 | struct mlx4_mac_entry *entry; | ||
| 146 | int i, err = 0; | ||
| 147 | int free = -1; | ||
| 148 | |||
| 149 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { | ||
| 150 | err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); | ||
| 151 | if (!err) { | ||
| 152 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
| 153 | if (!entry) { | ||
| 154 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
| 155 | return -ENOMEM; | ||
| 156 | } | ||
| 157 | entry->mac = mac; | ||
| 158 | err = radix_tree_insert(&info->mac_tree, *qpn, entry); | ||
| 159 | if (err) { | ||
| 160 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
| 161 | return err; | ||
| 162 | } | ||
| 163 | } else | ||
| 164 | return err; | ||
| 165 | } | ||
| 166 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); | ||
| 167 | mutex_lock(&table->mutex); | ||
| 168 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { | ||
| 169 | if (free < 0 && !table->refs[i]) { | ||
| 170 | free = i; | ||
| 171 | continue; | ||
| 172 | } | ||
| 173 | |||
| 174 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | ||
| 175 | /* MAC already registered, increase references count */ | ||
| 176 | ++table->refs[i]; | ||
| 177 | goto out; | ||
| 178 | } | ||
| 179 | } | ||
| 180 | |||
| 181 | if (free < 0) { | ||
| 182 | err = -ENOMEM; | ||
| 183 | goto out; | ||
| 184 | } | ||
| 185 | |||
| 186 | mlx4_dbg(dev, "Free MAC index is %d\n", free); | ||
| 187 | |||
| 188 | if (table->total == table->max) { | ||
| 189 | /* No free mac entries */ | ||
| 190 | err = -ENOSPC; | ||
| 191 | goto out; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* Register new MAC */ | ||
| 195 | table->refs[free] = 1; | ||
| 196 | table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); | ||
| 197 | |||
| 198 | err = mlx4_set_port_mac_table(dev, port, table->entries); | ||
| 199 | if (unlikely(err)) { | ||
| 200 | mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac); | ||
| 201 | table->refs[free] = 0; | ||
| 202 | table->entries[free] = 0; | ||
| 203 | goto out; | ||
| 204 | } | ||
| 205 | |||
| 206 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) | ||
| 207 | *qpn = info->base_qpn + free; | ||
| 208 | ++table->total; | ||
| 209 | out: | ||
| 210 | mutex_unlock(&table->mutex); | ||
| 211 | return err; | ||
| 212 | } | ||
| 213 | EXPORT_SYMBOL_GPL(mlx4_register_mac); | ||
| 214 | |||
| 215 | static int validate_index(struct mlx4_dev *dev, | ||
| 216 | struct mlx4_mac_table *table, int index) | ||
| 217 | { | ||
| 218 | int err = 0; | ||
| 219 | |||
| 220 | if (index < 0 || index >= table->max || !table->entries[index]) { | ||
| 221 | mlx4_warn(dev, "No valid Mac entry for the given index\n"); | ||
| 222 | err = -EINVAL; | ||
| 223 | } | ||
| 224 | return err; | ||
| 225 | } | ||
| 226 | |||
| 227 | static int find_index(struct mlx4_dev *dev, | ||
| 228 | struct mlx4_mac_table *table, u64 mac) | ||
| 229 | { | ||
| 230 | int i; | ||
| 231 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | ||
| 232 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) | ||
| 233 | return i; | ||
| 234 | } | ||
| 235 | /* Mac not found */ | ||
| 236 | return -EINVAL; | ||
| 237 | } | ||
| 238 | |||
| 239 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) | ||
| 240 | { | ||
| 241 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
| 242 | struct mlx4_mac_table *table = &info->mac_table; | ||
| 243 | int index = qpn - info->base_qpn; | ||
| 244 | struct mlx4_mac_entry *entry; | ||
| 245 | |||
| 246 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { | ||
| 247 | entry = radix_tree_lookup(&info->mac_tree, qpn); | ||
| 248 | if (entry) { | ||
| 249 | mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); | ||
| 250 | radix_tree_delete(&info->mac_tree, qpn); | ||
| 251 | index = find_index(dev, table, entry->mac); | ||
| 252 | kfree(entry); | ||
| 253 | } | ||
| 254 | } | ||
| 255 | |||
| 256 | mutex_lock(&table->mutex); | ||
| 257 | |||
| 258 | if (validate_index(dev, table, index)) | ||
| 259 | goto out; | ||
| 260 | |||
| 261 | /* Check whether this address has reference count */ | ||
| 262 | if (!(--table->refs[index])) { | ||
| 263 | table->entries[index] = 0; | ||
| 264 | mlx4_set_port_mac_table(dev, port, table->entries); | ||
| 265 | --table->total; | ||
| 266 | } | ||
| 267 | out: | ||
| 268 | mutex_unlock(&table->mutex); | ||
| 269 | } | ||
| 270 | EXPORT_SYMBOL_GPL(mlx4_unregister_mac); | ||
| 271 | |||
| 272 | int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) | ||
| 273 | { | ||
| 274 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
| 275 | struct mlx4_mac_table *table = &info->mac_table; | ||
| 276 | int index = qpn - info->base_qpn; | ||
| 277 | struct mlx4_mac_entry *entry; | ||
| 278 | int err; | ||
| 279 | |||
| 280 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { | ||
| 281 | entry = radix_tree_lookup(&info->mac_tree, qpn); | ||
| 282 | if (!entry) | ||
| 283 | return -EINVAL; | ||
| 284 | index = find_index(dev, table, entry->mac); | ||
| 285 | mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); | ||
| 286 | entry->mac = new_mac; | ||
| 287 | err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); | ||
| 288 | if (err || index < 0) | ||
| 289 | return err; | ||
| 290 | } | ||
| 291 | |||
| 292 | mutex_lock(&table->mutex); | ||
| 293 | |||
| 294 | err = validate_index(dev, table, index); | ||
| 295 | if (err) | ||
| 296 | goto out; | ||
| 297 | |||
| 298 | table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); | ||
| 299 | |||
| 300 | err = mlx4_set_port_mac_table(dev, port, table->entries); | ||
| 301 | if (unlikely(err)) { | ||
| 302 | mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); | ||
| 303 | table->entries[index] = 0; | ||
| 304 | } | ||
| 305 | out: | ||
| 306 | mutex_unlock(&table->mutex); | ||
| 307 | return err; | ||
| 308 | } | ||
| 309 | EXPORT_SYMBOL_GPL(mlx4_replace_mac); | ||
| 310 | static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, | ||
| 311 | __be32 *entries) | ||
| 312 | { | ||
| 313 | struct mlx4_cmd_mailbox *mailbox; | ||
| 314 | u32 in_mod; | ||
| 315 | int err; | ||
| 316 | |||
| 317 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 318 | if (IS_ERR(mailbox)) | ||
| 319 | return PTR_ERR(mailbox); | ||
| 320 | |||
| 321 | memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); | ||
| 322 | in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; | ||
| 323 | err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, | ||
| 324 | MLX4_CMD_TIME_CLASS_B); | ||
| 325 | |||
| 326 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 327 | |||
| 328 | return err; | ||
| 329 | } | ||
| 330 | |||
| 331 | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) | ||
| 332 | { | ||
| 333 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
| 334 | int i; | ||
| 335 | |||
| 336 | for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { | ||
| 337 | if (table->refs[i] && | ||
| 338 | (vid == (MLX4_VLAN_MASK & | ||
| 339 | be32_to_cpu(table->entries[i])))) { | ||
| 340 | /* VLAN already registered, increase reference count */ | ||
| 341 | *idx = i; | ||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | return -ENOENT; | ||
| 347 | } | ||
| 348 | EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); | ||
| 349 | |||
| 350 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) | ||
| 351 | { | ||
| 352 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
| 353 | int i, err = 0; | ||
| 354 | int free = -1; | ||
| 355 | |||
| 356 | mutex_lock(&table->mutex); | ||
| 357 | for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { | ||
| 358 | if (free < 0 && (table->refs[i] == 0)) { | ||
| 359 | free = i; | ||
| 360 | continue; | ||
| 361 | } | ||
| 362 | |||
| 363 | if (table->refs[i] && | ||
| 364 | (vlan == (MLX4_VLAN_MASK & | ||
| 365 | be32_to_cpu(table->entries[i])))) { | ||
| 366 | /* Vlan already registered, increase references count */ | ||
| 367 | *index = i; | ||
| 368 | ++table->refs[i]; | ||
| 369 | goto out; | ||
| 370 | } | ||
| 371 | } | ||
| 372 | |||
| 373 | if (free < 0) { | ||
| 374 | err = -ENOMEM; | ||
| 375 | goto out; | ||
| 376 | } | ||
| 377 | |||
| 378 | if (table->total == table->max) { | ||
| 379 | /* No free vlan entries */ | ||
| 380 | err = -ENOSPC; | ||
| 381 | goto out; | ||
| 382 | } | ||
| 383 | |||
| 384 | /* Register new MAC */ | ||
| 385 | table->refs[free] = 1; | ||
| 386 | table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); | ||
| 387 | |||
| 388 | err = mlx4_set_port_vlan_table(dev, port, table->entries); | ||
| 389 | if (unlikely(err)) { | ||
| 390 | mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); | ||
| 391 | table->refs[free] = 0; | ||
| 392 | table->entries[free] = 0; | ||
| 393 | goto out; | ||
| 394 | } | ||
| 395 | |||
| 396 | *index = free; | ||
| 397 | ++table->total; | ||
| 398 | out: | ||
| 399 | mutex_unlock(&table->mutex); | ||
| 400 | return err; | ||
| 401 | } | ||
| 402 | EXPORT_SYMBOL_GPL(mlx4_register_vlan); | ||
| 403 | |||
| 404 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) | ||
| 405 | { | ||
| 406 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
| 407 | |||
| 408 | if (index < MLX4_VLAN_REGULAR) { | ||
| 409 | mlx4_warn(dev, "Trying to free special vlan index %d\n", index); | ||
| 410 | return; | ||
| 411 | } | ||
| 412 | |||
| 413 | mutex_lock(&table->mutex); | ||
| 414 | if (!table->refs[index]) { | ||
| 415 | mlx4_warn(dev, "No vlan entry for index %d\n", index); | ||
| 416 | goto out; | ||
| 417 | } | ||
| 418 | if (--table->refs[index]) { | ||
| 419 | mlx4_dbg(dev, "Have more references for index %d," | ||
| 420 | "no need to modify vlan table\n", index); | ||
| 421 | goto out; | ||
| 422 | } | ||
| 423 | table->entries[index] = 0; | ||
| 424 | mlx4_set_port_vlan_table(dev, port, table->entries); | ||
| 425 | --table->total; | ||
| 426 | out: | ||
| 427 | mutex_unlock(&table->mutex); | ||
| 428 | } | ||
| 429 | EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); | ||
| 430 | |||
| 431 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) | ||
| 432 | { | ||
| 433 | struct mlx4_cmd_mailbox *inmailbox, *outmailbox; | ||
| 434 | u8 *inbuf, *outbuf; | ||
| 435 | int err; | ||
| 436 | |||
| 437 | inmailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 438 | if (IS_ERR(inmailbox)) | ||
| 439 | return PTR_ERR(inmailbox); | ||
| 440 | |||
| 441 | outmailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 442 | if (IS_ERR(outmailbox)) { | ||
| 443 | mlx4_free_cmd_mailbox(dev, inmailbox); | ||
| 444 | return PTR_ERR(outmailbox); | ||
| 445 | } | ||
| 446 | |||
| 447 | inbuf = inmailbox->buf; | ||
| 448 | outbuf = outmailbox->buf; | ||
| 449 | memset(inbuf, 0, 256); | ||
| 450 | memset(outbuf, 0, 256); | ||
| 451 | inbuf[0] = 1; | ||
| 452 | inbuf[1] = 1; | ||
| 453 | inbuf[2] = 1; | ||
| 454 | inbuf[3] = 1; | ||
| 455 | *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); | ||
| 456 | *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); | ||
| 457 | |||
| 458 | err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, | ||
| 459 | MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C); | ||
| 460 | if (!err) | ||
| 461 | *caps = *(__be32 *) (outbuf + 84); | ||
| 462 | mlx4_free_cmd_mailbox(dev, inmailbox); | ||
| 463 | mlx4_free_cmd_mailbox(dev, outmailbox); | ||
| 464 | return err; | ||
| 465 | } | ||
| 466 | |||
| 467 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | ||
| 468 | { | ||
| 469 | struct mlx4_cmd_mailbox *mailbox; | ||
| 470 | int err; | ||
| 471 | |||
| 472 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) | ||
| 473 | return 0; | ||
| 474 | |||
| 475 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 476 | if (IS_ERR(mailbox)) | ||
| 477 | return PTR_ERR(mailbox); | ||
| 478 | |||
| 479 | memset(mailbox->buf, 0, 256); | ||
| 480 | |||
| 481 | ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; | ||
| 482 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, | ||
| 483 | MLX4_CMD_TIME_CLASS_B); | ||
| 484 | |||
| 485 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 486 | return err; | ||
| 487 | } | ||
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c new file mode 100644 index 00000000000..b967647d0c7 --- /dev/null +++ b/drivers/net/mlx4/profile.c | |||
| @@ -0,0 +1,238 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 5 | * | ||
| 6 | * This software is available to you under a choice of one of two | ||
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 8 | * General Public License (GPL) Version 2, available from the file | ||
| 9 | * COPYING in the main directory of this source tree, or the | ||
| 10 | * OpenIB.org BSD license below: | ||
| 11 | * | ||
| 12 | * Redistribution and use in source and binary forms, with or | ||
| 13 | * without modification, are permitted provided that the following | ||
| 14 | * conditions are met: | ||
| 15 | * | ||
| 16 | * - Redistributions of source code must retain the above | ||
| 17 | * copyright notice, this list of conditions and the following | ||
| 18 | * disclaimer. | ||
| 19 | * | ||
| 20 | * - Redistributions in binary form must reproduce the above | ||
| 21 | * copyright notice, this list of conditions and the following | ||
| 22 | * disclaimer in the documentation and/or other materials | ||
| 23 | * provided with the distribution. | ||
| 24 | * | ||
| 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 32 | * SOFTWARE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/slab.h> | ||
| 36 | |||
| 37 | #include "mlx4.h" | ||
| 38 | #include "fw.h" | ||
| 39 | |||
| 40 | enum { | ||
| 41 | MLX4_RES_QP, | ||
| 42 | MLX4_RES_RDMARC, | ||
| 43 | MLX4_RES_ALTC, | ||
| 44 | MLX4_RES_AUXC, | ||
| 45 | MLX4_RES_SRQ, | ||
| 46 | MLX4_RES_CQ, | ||
| 47 | MLX4_RES_EQ, | ||
| 48 | MLX4_RES_DMPT, | ||
| 49 | MLX4_RES_CMPT, | ||
| 50 | MLX4_RES_MTT, | ||
| 51 | MLX4_RES_MCG, | ||
| 52 | MLX4_RES_NUM | ||
| 53 | }; | ||
| 54 | |||
| 55 | static const char *res_name[] = { | ||
| 56 | [MLX4_RES_QP] = "QP", | ||
| 57 | [MLX4_RES_RDMARC] = "RDMARC", | ||
| 58 | [MLX4_RES_ALTC] = "ALTC", | ||
| 59 | [MLX4_RES_AUXC] = "AUXC", | ||
| 60 | [MLX4_RES_SRQ] = "SRQ", | ||
| 61 | [MLX4_RES_CQ] = "CQ", | ||
| 62 | [MLX4_RES_EQ] = "EQ", | ||
| 63 | [MLX4_RES_DMPT] = "DMPT", | ||
| 64 | [MLX4_RES_CMPT] = "CMPT", | ||
| 65 | [MLX4_RES_MTT] = "MTT", | ||
| 66 | [MLX4_RES_MCG] = "MCG", | ||
| 67 | }; | ||
| 68 | |||
| 69 | u64 mlx4_make_profile(struct mlx4_dev *dev, | ||
| 70 | struct mlx4_profile *request, | ||
| 71 | struct mlx4_dev_cap *dev_cap, | ||
| 72 | struct mlx4_init_hca_param *init_hca) | ||
| 73 | { | ||
| 74 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 75 | struct mlx4_resource { | ||
| 76 | u64 size; | ||
| 77 | u64 start; | ||
| 78 | int type; | ||
| 79 | int num; | ||
| 80 | int log_num; | ||
| 81 | }; | ||
| 82 | |||
| 83 | u64 total_size = 0; | ||
| 84 | struct mlx4_resource *profile; | ||
| 85 | struct mlx4_resource tmp; | ||
| 86 | int i, j; | ||
| 87 | |||
| 88 | profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); | ||
| 89 | if (!profile) | ||
| 90 | return -ENOMEM; | ||
| 91 | |||
| 92 | profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; | ||
| 93 | profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; | ||
| 94 | profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; | ||
| 95 | profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz; | ||
| 96 | profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz; | ||
| 97 | profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz; | ||
| 98 | profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; | ||
| 99 | profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; | ||
| 100 | profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; | ||
| 101 | profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; | ||
| 102 | profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; | ||
| 103 | |||
| 104 | profile[MLX4_RES_QP].num = request->num_qp; | ||
| 105 | profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp; | ||
| 106 | profile[MLX4_RES_ALTC].num = request->num_qp; | ||
| 107 | profile[MLX4_RES_AUXC].num = request->num_qp; | ||
| 108 | profile[MLX4_RES_SRQ].num = request->num_srq; | ||
| 109 | profile[MLX4_RES_CQ].num = request->num_cq; | ||
| 110 | profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); | ||
| 111 | profile[MLX4_RES_DMPT].num = request->num_mpt; | ||
| 112 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; | ||
| 113 | profile[MLX4_RES_MTT].num = request->num_mtt; | ||
| 114 | profile[MLX4_RES_MCG].num = request->num_mcg; | ||
| 115 | |||
| 116 | for (i = 0; i < MLX4_RES_NUM; ++i) { | ||
| 117 | profile[i].type = i; | ||
| 118 | profile[i].num = roundup_pow_of_two(profile[i].num); | ||
| 119 | profile[i].log_num = ilog2(profile[i].num); | ||
| 120 | profile[i].size *= profile[i].num; | ||
| 121 | profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* | ||
| 125 | * Sort the resources in decreasing order of size. Since they | ||
| 126 | * all have sizes that are powers of 2, we'll be able to keep | ||
| 127 | * resources aligned to their size and pack them without gaps | ||
| 128 | * using the sorted order. | ||
| 129 | */ | ||
| 130 | for (i = MLX4_RES_NUM; i > 0; --i) | ||
| 131 | for (j = 1; j < i; ++j) { | ||
| 132 | if (profile[j].size > profile[j - 1].size) { | ||
| 133 | tmp = profile[j]; | ||
| 134 | profile[j] = profile[j - 1]; | ||
| 135 | profile[j - 1] = tmp; | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | for (i = 0; i < MLX4_RES_NUM; ++i) { | ||
| 140 | if (profile[i].size) { | ||
| 141 | profile[i].start = total_size; | ||
| 142 | total_size += profile[i].size; | ||
| 143 | } | ||
| 144 | |||
| 145 | if (total_size > dev_cap->max_icm_sz) { | ||
| 146 | mlx4_err(dev, "Profile requires 0x%llx bytes; " | ||
| 147 | "won't fit in 0x%llx bytes of context memory.\n", | ||
| 148 | (unsigned long long) total_size, | ||
| 149 | (unsigned long long) dev_cap->max_icm_sz); | ||
| 150 | kfree(profile); | ||
| 151 | return -ENOMEM; | ||
| 152 | } | ||
| 153 | |||
| 154 | if (profile[i].size) | ||
| 155 | mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " | ||
| 156 | "size 0x%10llx\n", | ||
| 157 | i, res_name[profile[i].type], profile[i].log_num, | ||
| 158 | (unsigned long long) profile[i].start, | ||
| 159 | (unsigned long long) profile[i].size); | ||
| 160 | } | ||
| 161 | |||
| 162 | mlx4_dbg(dev, "HCA context memory: reserving %d KB\n", | ||
| 163 | (int) (total_size >> 10)); | ||
| 164 | |||
| 165 | for (i = 0; i < MLX4_RES_NUM; ++i) { | ||
| 166 | switch (profile[i].type) { | ||
| 167 | case MLX4_RES_QP: | ||
| 168 | dev->caps.num_qps = profile[i].num; | ||
| 169 | init_hca->qpc_base = profile[i].start; | ||
| 170 | init_hca->log_num_qps = profile[i].log_num; | ||
| 171 | break; | ||
| 172 | case MLX4_RES_RDMARC: | ||
| 173 | for (priv->qp_table.rdmarc_shift = 0; | ||
| 174 | request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; | ||
| 175 | ++priv->qp_table.rdmarc_shift) | ||
| 176 | ; /* nothing */ | ||
| 177 | dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; | ||
| 178 | priv->qp_table.rdmarc_base = (u32) profile[i].start; | ||
| 179 | init_hca->rdmarc_base = profile[i].start; | ||
| 180 | init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; | ||
| 181 | break; | ||
| 182 | case MLX4_RES_ALTC: | ||
| 183 | init_hca->altc_base = profile[i].start; | ||
| 184 | break; | ||
| 185 | case MLX4_RES_AUXC: | ||
| 186 | init_hca->auxc_base = profile[i].start; | ||
| 187 | break; | ||
| 188 | case MLX4_RES_SRQ: | ||
| 189 | dev->caps.num_srqs = profile[i].num; | ||
| 190 | init_hca->srqc_base = profile[i].start; | ||
| 191 | init_hca->log_num_srqs = profile[i].log_num; | ||
| 192 | break; | ||
| 193 | case MLX4_RES_CQ: | ||
| 194 | dev->caps.num_cqs = profile[i].num; | ||
| 195 | init_hca->cqc_base = profile[i].start; | ||
| 196 | init_hca->log_num_cqs = profile[i].log_num; | ||
| 197 | break; | ||
| 198 | case MLX4_RES_EQ: | ||
| 199 | dev->caps.num_eqs = profile[i].num; | ||
| 200 | init_hca->eqc_base = profile[i].start; | ||
| 201 | init_hca->log_num_eqs = profile[i].log_num; | ||
| 202 | break; | ||
| 203 | case MLX4_RES_DMPT: | ||
| 204 | dev->caps.num_mpts = profile[i].num; | ||
| 205 | priv->mr_table.mpt_base = profile[i].start; | ||
| 206 | init_hca->dmpt_base = profile[i].start; | ||
| 207 | init_hca->log_mpt_sz = profile[i].log_num; | ||
| 208 | break; | ||
| 209 | case MLX4_RES_CMPT: | ||
| 210 | init_hca->cmpt_base = profile[i].start; | ||
| 211 | break; | ||
| 212 | case MLX4_RES_MTT: | ||
| 213 | dev->caps.num_mtt_segs = profile[i].num; | ||
| 214 | priv->mr_table.mtt_base = profile[i].start; | ||
| 215 | init_hca->mtt_base = profile[i].start; | ||
| 216 | break; | ||
| 217 | case MLX4_RES_MCG: | ||
| 218 | dev->caps.num_mgms = profile[i].num >> 1; | ||
| 219 | dev->caps.num_amgms = profile[i].num >> 1; | ||
| 220 | init_hca->mc_base = profile[i].start; | ||
| 221 | init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE); | ||
| 222 | init_hca->log_mc_table_sz = profile[i].log_num; | ||
| 223 | init_hca->log_mc_hash_sz = profile[i].log_num - 1; | ||
| 224 | break; | ||
| 225 | default: | ||
| 226 | break; | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | /* | ||
| 231 | * PDs don't take any HCA memory, but we assign them as part | ||
| 232 | * of the HCA profile anyway. | ||
| 233 | */ | ||
| 234 | dev->caps.num_pds = MLX4_NUM_PDS; | ||
| 235 | |||
| 236 | kfree(profile); | ||
| 237 | return total_size; | ||
| 238 | } | ||
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c new file mode 100644 index 00000000000..ec9350e5f21 --- /dev/null +++ b/drivers/net/mlx4/qp.c | |||
| @@ -0,0 +1,380 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | ||
| 3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 4 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | ||
| 6 | * | ||
| 7 | * This software is available to you under a choice of one of two | ||
| 8 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 9 | * General Public License (GPL) Version 2, available from the file | ||
| 10 | * COPYING in the main directory of this source tree, or the | ||
| 11 | * OpenIB.org BSD license below: | ||
| 12 | * | ||
| 13 | * Redistribution and use in source and binary forms, with or | ||
| 14 | * without modification, are permitted provided that the following | ||
| 15 | * conditions are met: | ||
| 16 | * | ||
| 17 | * - Redistributions of source code must retain the above | ||
| 18 | * copyright notice, this list of conditions and the following | ||
| 19 | * disclaimer. | ||
| 20 | * | ||
| 21 | * - Redistributions in binary form must reproduce the above | ||
| 22 | * copyright notice, this list of conditions and the following | ||
| 23 | * disclaimer in the documentation and/or other materials | ||
| 24 | * provided with the distribution. | ||
| 25 | * | ||
| 26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 33 | * SOFTWARE. | ||
| 34 | */ | ||
| 35 | |||
| 36 | #include <linux/gfp.h> | ||
| 37 | #include <linux/mlx4/cmd.h> | ||
| 38 | #include <linux/mlx4/qp.h> | ||
| 39 | |||
| 40 | #include "mlx4.h" | ||
| 41 | #include "icm.h" | ||
| 42 | |||
| 43 | void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) | ||
| 44 | { | ||
| 45 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | ||
| 46 | struct mlx4_qp *qp; | ||
| 47 | |||
| 48 | spin_lock(&qp_table->lock); | ||
| 49 | |||
| 50 | qp = __mlx4_qp_lookup(dev, qpn); | ||
| 51 | if (qp) | ||
| 52 | atomic_inc(&qp->refcount); | ||
| 53 | |||
| 54 | spin_unlock(&qp_table->lock); | ||
| 55 | |||
| 56 | if (!qp) { | ||
| 57 | mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); | ||
| 58 | return; | ||
| 59 | } | ||
| 60 | |||
| 61 | qp->event(qp, event_type); | ||
| 62 | |||
| 63 | if (atomic_dec_and_test(&qp->refcount)) | ||
| 64 | complete(&qp->free); | ||
| 65 | } | ||
| 66 | |||
| 67 | int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
| 68 | enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, | ||
| 69 | struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, | ||
| 70 | int sqd_event, struct mlx4_qp *qp) | ||
| 71 | { | ||
| 72 | static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { | ||
| 73 | [MLX4_QP_STATE_RST] = { | ||
| 74 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | ||
| 75 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | ||
| 76 | [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, | ||
| 77 | }, | ||
| 78 | [MLX4_QP_STATE_INIT] = { | ||
| 79 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | ||
| 80 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | ||
| 81 | [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, | ||
| 82 | [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, | ||
| 83 | }, | ||
| 84 | [MLX4_QP_STATE_RTR] = { | ||
| 85 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | ||
| 86 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | ||
| 87 | [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, | ||
| 88 | }, | ||
| 89 | [MLX4_QP_STATE_RTS] = { | ||
| 90 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | ||
| 91 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | ||
| 92 | [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, | ||
| 93 | [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, | ||
| 94 | }, | ||
| 95 | [MLX4_QP_STATE_SQD] = { | ||
| 96 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | ||
| 97 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | ||
| 98 | [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, | ||
| 99 | [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, | ||
| 100 | }, | ||
| 101 | [MLX4_QP_STATE_SQER] = { | ||
| 102 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | ||
| 103 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | ||
| 104 | [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, | ||
| 105 | }, | ||
| 106 | [MLX4_QP_STATE_ERR] = { | ||
| 107 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | ||
| 108 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | ||
| 109 | } | ||
| 110 | }; | ||
| 111 | |||
| 112 | struct mlx4_cmd_mailbox *mailbox; | ||
| 113 | int ret = 0; | ||
| 114 | |||
| 115 | if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || | ||
| 116 | !op[cur_state][new_state]) | ||
| 117 | return -EINVAL; | ||
| 118 | |||
| 119 | if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) | ||
| 120 | return mlx4_cmd(dev, 0, qp->qpn, 2, | ||
| 121 | MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A); | ||
| 122 | |||
| 123 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 124 | if (IS_ERR(mailbox)) | ||
| 125 | return PTR_ERR(mailbox); | ||
| 126 | |||
| 127 | if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { | ||
| 128 | u64 mtt_addr = mlx4_mtt_addr(dev, mtt); | ||
| 129 | context->mtt_base_addr_h = mtt_addr >> 32; | ||
| 130 | context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | ||
| 131 | context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | ||
| 132 | } | ||
| 133 | |||
| 134 | *(__be32 *) mailbox->buf = cpu_to_be32(optpar); | ||
| 135 | memcpy(mailbox->buf + 8, context, sizeof *context); | ||
| 136 | |||
| 137 | ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = | ||
| 138 | cpu_to_be32(qp->qpn); | ||
| 139 | |||
| 140 | ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), | ||
| 141 | new_state == MLX4_QP_STATE_RST ? 2 : 0, | ||
| 142 | op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C); | ||
| 143 | |||
| 144 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 145 | return ret; | ||
| 146 | } | ||
| 147 | EXPORT_SYMBOL_GPL(mlx4_qp_modify); | ||
| 148 | |||
| 149 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) | ||
| 150 | { | ||
| 151 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 152 | struct mlx4_qp_table *qp_table = &priv->qp_table; | ||
| 153 | int qpn; | ||
| 154 | |||
| 155 | qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); | ||
| 156 | if (qpn == -1) | ||
| 157 | return -ENOMEM; | ||
| 158 | |||
| 159 | *base = qpn; | ||
| 160 | return 0; | ||
| 161 | } | ||
| 162 | EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); | ||
| 163 | |||
| 164 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | ||
| 165 | { | ||
| 166 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 167 | struct mlx4_qp_table *qp_table = &priv->qp_table; | ||
| 168 | if (base_qpn < dev->caps.sqp_start + 8) | ||
| 169 | return; | ||
| 170 | |||
| 171 | mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); | ||
| 172 | } | ||
| 173 | EXPORT_SYMBOL_GPL(mlx4_qp_release_range); | ||
| 174 | |||
| 175 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) | ||
| 176 | { | ||
| 177 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 178 | struct mlx4_qp_table *qp_table = &priv->qp_table; | ||
| 179 | int err; | ||
| 180 | |||
| 181 | if (!qpn) | ||
| 182 | return -EINVAL; | ||
| 183 | |||
| 184 | qp->qpn = qpn; | ||
| 185 | |||
| 186 | err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); | ||
| 187 | if (err) | ||
| 188 | goto err_out; | ||
| 189 | |||
| 190 | err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); | ||
| 191 | if (err) | ||
| 192 | goto err_put_qp; | ||
| 193 | |||
| 194 | err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); | ||
| 195 | if (err) | ||
| 196 | goto err_put_auxc; | ||
| 197 | |||
| 198 | err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); | ||
| 199 | if (err) | ||
| 200 | goto err_put_altc; | ||
| 201 | |||
| 202 | err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); | ||
| 203 | if (err) | ||
| 204 | goto err_put_rdmarc; | ||
| 205 | |||
| 206 | spin_lock_irq(&qp_table->lock); | ||
| 207 | err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); | ||
| 208 | spin_unlock_irq(&qp_table->lock); | ||
| 209 | if (err) | ||
| 210 | goto err_put_cmpt; | ||
| 211 | |||
| 212 | atomic_set(&qp->refcount, 1); | ||
| 213 | init_completion(&qp->free); | ||
| 214 | |||
| 215 | return 0; | ||
| 216 | |||
| 217 | err_put_cmpt: | ||
| 218 | mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); | ||
| 219 | |||
| 220 | err_put_rdmarc: | ||
| 221 | mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); | ||
| 222 | |||
| 223 | err_put_altc: | ||
| 224 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | ||
| 225 | |||
| 226 | err_put_auxc: | ||
| 227 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | ||
| 228 | |||
| 229 | err_put_qp: | ||
| 230 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | ||
| 231 | |||
| 232 | err_out: | ||
| 233 | return err; | ||
| 234 | } | ||
| 235 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | ||
| 236 | |||
| 237 | void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) | ||
| 238 | { | ||
| 239 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | ||
| 240 | unsigned long flags; | ||
| 241 | |||
| 242 | spin_lock_irqsave(&qp_table->lock, flags); | ||
| 243 | radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); | ||
| 244 | spin_unlock_irqrestore(&qp_table->lock, flags); | ||
| 245 | } | ||
| 246 | EXPORT_SYMBOL_GPL(mlx4_qp_remove); | ||
| 247 | |||
| 248 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) | ||
| 249 | { | ||
| 250 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | ||
| 251 | |||
| 252 | if (atomic_dec_and_test(&qp->refcount)) | ||
| 253 | complete(&qp->free); | ||
| 254 | wait_for_completion(&qp->free); | ||
| 255 | |||
| 256 | mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); | ||
| 257 | mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); | ||
| 258 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | ||
| 259 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | ||
| 260 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | ||
| 261 | } | ||
| 262 | EXPORT_SYMBOL_GPL(mlx4_qp_free); | ||
| 263 | |||
| 264 | static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) | ||
| 265 | { | ||
| 266 | return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, | ||
| 267 | MLX4_CMD_TIME_CLASS_B); | ||
| 268 | } | ||
| 269 | |||
| 270 | int mlx4_init_qp_table(struct mlx4_dev *dev) | ||
| 271 | { | ||
| 272 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | ||
| 273 | int err; | ||
| 274 | int reserved_from_top = 0; | ||
| 275 | |||
| 276 | spin_lock_init(&qp_table->lock); | ||
| 277 | INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); | ||
| 278 | |||
| 279 | /* | ||
| 280 | * We reserve 2 extra QPs per port for the special QPs. The | ||
| 281 | * block of special QPs must be aligned to a multiple of 8, so | ||
| 282 | * round up. | ||
| 283 | */ | ||
| 284 | dev->caps.sqp_start = | ||
| 285 | ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); | ||
| 286 | |||
| 287 | { | ||
| 288 | int sort[MLX4_NUM_QP_REGION]; | ||
| 289 | int i, j, tmp; | ||
| 290 | int last_base = dev->caps.num_qps; | ||
| 291 | |||
| 292 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) | ||
| 293 | sort[i] = i; | ||
| 294 | |||
| 295 | for (i = MLX4_NUM_QP_REGION; i > 0; --i) { | ||
| 296 | for (j = 2; j < i; ++j) { | ||
| 297 | if (dev->caps.reserved_qps_cnt[sort[j]] > | ||
| 298 | dev->caps.reserved_qps_cnt[sort[j - 1]]) { | ||
| 299 | tmp = sort[j]; | ||
| 300 | sort[j] = sort[j - 1]; | ||
| 301 | sort[j - 1] = tmp; | ||
| 302 | } | ||
| 303 | } | ||
| 304 | } | ||
| 305 | |||
| 306 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { | ||
| 307 | last_base -= dev->caps.reserved_qps_cnt[sort[i]]; | ||
| 308 | dev->caps.reserved_qps_base[sort[i]] = last_base; | ||
| 309 | reserved_from_top += | ||
| 310 | dev->caps.reserved_qps_cnt[sort[i]]; | ||
| 311 | } | ||
| 312 | |||
| 313 | } | ||
| 314 | |||
| 315 | err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, | ||
| 316 | (1 << 23) - 1, dev->caps.sqp_start + 8, | ||
| 317 | reserved_from_top); | ||
| 318 | if (err) | ||
| 319 | return err; | ||
| 320 | |||
| 321 | return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); | ||
| 322 | } | ||
| 323 | |||
| 324 | void mlx4_cleanup_qp_table(struct mlx4_dev *dev) | ||
| 325 | { | ||
| 326 | mlx4_CONF_SPECIAL_QP(dev, 0); | ||
| 327 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); | ||
| 328 | } | ||
| 329 | |||
| 330 | int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, | ||
| 331 | struct mlx4_qp_context *context) | ||
| 332 | { | ||
| 333 | struct mlx4_cmd_mailbox *mailbox; | ||
| 334 | int err; | ||
| 335 | |||
| 336 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 337 | if (IS_ERR(mailbox)) | ||
| 338 | return PTR_ERR(mailbox); | ||
| 339 | |||
| 340 | err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, | ||
| 341 | MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A); | ||
| 342 | if (!err) | ||
| 343 | memcpy(context, mailbox->buf + 8, sizeof *context); | ||
| 344 | |||
| 345 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 346 | return err; | ||
| 347 | } | ||
| 348 | EXPORT_SYMBOL_GPL(mlx4_qp_query); | ||
| 349 | |||
| 350 | int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
| 351 | struct mlx4_qp_context *context, | ||
| 352 | struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) | ||
| 353 | { | ||
| 354 | int err; | ||
| 355 | int i; | ||
| 356 | enum mlx4_qp_state states[] = { | ||
| 357 | MLX4_QP_STATE_RST, | ||
| 358 | MLX4_QP_STATE_INIT, | ||
| 359 | MLX4_QP_STATE_RTR, | ||
| 360 | MLX4_QP_STATE_RTS | ||
| 361 | }; | ||
| 362 | |||
| 363 | for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { | ||
| 364 | context->flags &= cpu_to_be32(~(0xf << 28)); | ||
| 365 | context->flags |= cpu_to_be32(states[i + 1] << 28); | ||
| 366 | err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], | ||
| 367 | context, 0, 0, qp); | ||
| 368 | if (err) { | ||
| 369 | mlx4_err(dev, "Failed to bring QP to state: " | ||
| 370 | "%d with error: %d\n", | ||
| 371 | states[i + 1], err); | ||
| 372 | return err; | ||
| 373 | } | ||
| 374 | |||
| 375 | *qp_state = states[i + 1]; | ||
| 376 | } | ||
| 377 | |||
| 378 | return 0; | ||
| 379 | } | ||
| 380 | EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); | ||
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c new file mode 100644 index 00000000000..11e7c1cb99b --- /dev/null +++ b/drivers/net/mlx4/reset.c | |||
| @@ -0,0 +1,185 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/errno.h> | ||
| 35 | #include <linux/pci.h> | ||
| 36 | #include <linux/delay.h> | ||
| 37 | #include <linux/slab.h> | ||
| 38 | #include <linux/jiffies.h> | ||
| 39 | |||
| 40 | #include "mlx4.h" | ||
| 41 | |||
| 42 | int mlx4_reset(struct mlx4_dev *dev) | ||
| 43 | { | ||
| 44 | void __iomem *reset; | ||
| 45 | u32 *hca_header = NULL; | ||
| 46 | int pcie_cap; | ||
| 47 | u16 devctl; | ||
| 48 | u16 linkctl; | ||
| 49 | u16 vendor; | ||
| 50 | unsigned long end; | ||
| 51 | u32 sem; | ||
| 52 | int i; | ||
| 53 | int err = 0; | ||
| 54 | |||
| 55 | #define MLX4_RESET_BASE 0xf0000 | ||
| 56 | #define MLX4_RESET_SIZE 0x400 | ||
| 57 | #define MLX4_SEM_OFFSET 0x3fc | ||
| 58 | #define MLX4_RESET_OFFSET 0x10 | ||
| 59 | #define MLX4_RESET_VALUE swab32(1) | ||
| 60 | |||
| 61 | #define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ) | ||
| 62 | #define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ) | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Reset the chip. This is somewhat ugly because we have to | ||
| 66 | * save off the PCI header before reset and then restore it | ||
| 67 | * after the chip reboots. We skip config space offsets 22 | ||
| 68 | * and 23 since those have a special meaning. | ||
| 69 | */ | ||
| 70 | |||
| 71 | /* Do we need to save off the full 4K PCI Express header?? */ | ||
| 72 | hca_header = kmalloc(256, GFP_KERNEL); | ||
| 73 | if (!hca_header) { | ||
| 74 | err = -ENOMEM; | ||
| 75 | mlx4_err(dev, "Couldn't allocate memory to save HCA " | ||
| 76 | "PCI header, aborting.\n"); | ||
| 77 | goto out; | ||
| 78 | } | ||
| 79 | |||
| 80 | pcie_cap = pci_pcie_cap(dev->pdev); | ||
| 81 | |||
| 82 | for (i = 0; i < 64; ++i) { | ||
| 83 | if (i == 22 || i == 23) | ||
| 84 | continue; | ||
| 85 | if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { | ||
| 86 | err = -ENODEV; | ||
| 87 | mlx4_err(dev, "Couldn't save HCA " | ||
| 88 | "PCI header, aborting.\n"); | ||
| 89 | goto out; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, | ||
| 94 | MLX4_RESET_SIZE); | ||
| 95 | if (!reset) { | ||
| 96 | err = -ENOMEM; | ||
| 97 | mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); | ||
| 98 | goto out; | ||
| 99 | } | ||
| 100 | |||
| 101 | /* grab HW semaphore to lock out flash updates */ | ||
| 102 | end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES; | ||
| 103 | do { | ||
| 104 | sem = readl(reset + MLX4_SEM_OFFSET); | ||
| 105 | if (!sem) | ||
| 106 | break; | ||
| 107 | |||
| 108 | msleep(1); | ||
| 109 | } while (time_before(jiffies, end)); | ||
| 110 | |||
| 111 | if (sem) { | ||
| 112 | mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n"); | ||
| 113 | err = -EAGAIN; | ||
| 114 | iounmap(reset); | ||
| 115 | goto out; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* actually hit reset */ | ||
| 119 | writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); | ||
| 120 | iounmap(reset); | ||
| 121 | |||
| 122 | /* Docs say to wait one second before accessing device */ | ||
| 123 | msleep(1000); | ||
| 124 | |||
| 125 | end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; | ||
| 126 | do { | ||
| 127 | if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && | ||
| 128 | vendor != 0xffff) | ||
| 129 | break; | ||
| 130 | |||
| 131 | msleep(1); | ||
| 132 | } while (time_before(jiffies, end)); | ||
| 133 | |||
| 134 | if (vendor == 0xffff) { | ||
| 135 | err = -ENODEV; | ||
| 136 | mlx4_err(dev, "PCI device did not come back after reset, " | ||
| 137 | "aborting.\n"); | ||
| 138 | goto out; | ||
| 139 | } | ||
| 140 | |||
| 141 | /* Now restore the PCI headers */ | ||
| 142 | if (pcie_cap) { | ||
| 143 | devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; | ||
| 144 | if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL, | ||
| 145 | devctl)) { | ||
| 146 | err = -ENODEV; | ||
| 147 | mlx4_err(dev, "Couldn't restore HCA PCI Express " | ||
| 148 | "Device Control register, aborting.\n"); | ||
| 149 | goto out; | ||
| 150 | } | ||
| 151 | linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; | ||
| 152 | if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL, | ||
| 153 | linkctl)) { | ||
| 154 | err = -ENODEV; | ||
| 155 | mlx4_err(dev, "Couldn't restore HCA PCI Express " | ||
| 156 | "Link control register, aborting.\n"); | ||
| 157 | goto out; | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | for (i = 0; i < 16; ++i) { | ||
| 162 | if (i * 4 == PCI_COMMAND) | ||
| 163 | continue; | ||
| 164 | |||
| 165 | if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { | ||
| 166 | err = -ENODEV; | ||
| 167 | mlx4_err(dev, "Couldn't restore HCA reg %x, " | ||
| 168 | "aborting.\n", i); | ||
| 169 | goto out; | ||
| 170 | } | ||
| 171 | } | ||
| 172 | |||
| 173 | if (pci_write_config_dword(dev->pdev, PCI_COMMAND, | ||
| 174 | hca_header[PCI_COMMAND / 4])) { | ||
| 175 | err = -ENODEV; | ||
| 176 | mlx4_err(dev, "Couldn't restore HCA COMMAND, " | ||
| 177 | "aborting.\n"); | ||
| 178 | goto out; | ||
| 179 | } | ||
| 180 | |||
| 181 | out: | ||
| 182 | kfree(hca_header); | ||
| 183 | |||
| 184 | return err; | ||
| 185 | } | ||
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c new file mode 100644 index 00000000000..e2337a7411d --- /dev/null +++ b/drivers/net/mlx4/sense.c | |||
| @@ -0,0 +1,156 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/errno.h> | ||
| 35 | #include <linux/if_ether.h> | ||
| 36 | |||
| 37 | #include <linux/mlx4/cmd.h> | ||
| 38 | |||
| 39 | #include "mlx4.h" | ||
| 40 | |||
| 41 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, | ||
| 42 | enum mlx4_port_type *type) | ||
| 43 | { | ||
| 44 | u64 out_param; | ||
| 45 | int err = 0; | ||
| 46 | |||
| 47 | err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, | ||
| 48 | MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B); | ||
| 49 | if (err) { | ||
| 50 | mlx4_err(dev, "Sense command failed for port: %d\n", port); | ||
| 51 | return err; | ||
| 52 | } | ||
| 53 | |||
| 54 | if (out_param > 2) { | ||
| 55 | mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param); | ||
| 56 | return -EINVAL; | ||
| 57 | } | ||
| 58 | |||
| 59 | *type = out_param; | ||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | void mlx4_do_sense_ports(struct mlx4_dev *dev, | ||
| 64 | enum mlx4_port_type *stype, | ||
| 65 | enum mlx4_port_type *defaults) | ||
| 66 | { | ||
| 67 | struct mlx4_sense *sense = &mlx4_priv(dev)->sense; | ||
| 68 | int err; | ||
| 69 | int i; | ||
| 70 | |||
| 71 | for (i = 1; i <= dev->caps.num_ports; i++) { | ||
| 72 | stype[i - 1] = 0; | ||
| 73 | if (sense->do_sense_port[i] && sense->sense_allowed[i] && | ||
| 74 | dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { | ||
| 75 | err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]); | ||
| 76 | if (err) | ||
| 77 | stype[i - 1] = defaults[i - 1]; | ||
| 78 | } else | ||
| 79 | stype[i - 1] = defaults[i - 1]; | ||
| 80 | } | ||
| 81 | |||
| 82 | /* | ||
| 83 | * Adjust port configuration: | ||
| 84 | * If port 1 sensed nothing and port 2 is IB, set both as IB | ||
| 85 | * If port 2 sensed nothing and port 1 is Eth, set both as Eth | ||
| 86 | */ | ||
| 87 | if (stype[0] == MLX4_PORT_TYPE_ETH) { | ||
| 88 | for (i = 1; i < dev->caps.num_ports; i++) | ||
| 89 | stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; | ||
| 90 | } | ||
| 91 | if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { | ||
| 92 | for (i = 0; i < dev->caps.num_ports - 1; i++) | ||
| 93 | stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* | ||
| 97 | * If sensed nothing, remain in current configuration. | ||
| 98 | */ | ||
| 99 | for (i = 0; i < dev->caps.num_ports; i++) | ||
| 100 | stype[i] = stype[i] ? stype[i] : defaults[i]; | ||
| 101 | |||
| 102 | } | ||
| 103 | |||
| 104 | static void mlx4_sense_port(struct work_struct *work) | ||
| 105 | { | ||
| 106 | struct delayed_work *delay = to_delayed_work(work); | ||
| 107 | struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, | ||
| 108 | sense_poll); | ||
| 109 | struct mlx4_dev *dev = sense->dev; | ||
| 110 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 111 | enum mlx4_port_type stype[MLX4_MAX_PORTS]; | ||
| 112 | |||
| 113 | mutex_lock(&priv->port_mutex); | ||
| 114 | mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]); | ||
| 115 | |||
| 116 | if (mlx4_check_port_params(dev, stype)) | ||
| 117 | goto sense_again; | ||
| 118 | |||
| 119 | if (mlx4_change_port_types(dev, stype)) | ||
| 120 | mlx4_err(dev, "Failed to change port_types\n"); | ||
| 121 | |||
| 122 | sense_again: | ||
| 123 | mutex_unlock(&priv->port_mutex); | ||
| 124 | queue_delayed_work(mlx4_wq , &sense->sense_poll, | ||
| 125 | round_jiffies_relative(MLX4_SENSE_RANGE)); | ||
| 126 | } | ||
| 127 | |||
| 128 | void mlx4_start_sense(struct mlx4_dev *dev) | ||
| 129 | { | ||
| 130 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 131 | struct mlx4_sense *sense = &priv->sense; | ||
| 132 | |||
| 133 | if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) | ||
| 134 | return; | ||
| 135 | |||
| 136 | queue_delayed_work(mlx4_wq , &sense->sense_poll, | ||
| 137 | round_jiffies_relative(MLX4_SENSE_RANGE)); | ||
| 138 | } | ||
| 139 | |||
| 140 | void mlx4_stop_sense(struct mlx4_dev *dev) | ||
| 141 | { | ||
| 142 | cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll); | ||
| 143 | } | ||
| 144 | |||
| 145 | void mlx4_sense_init(struct mlx4_dev *dev) | ||
| 146 | { | ||
| 147 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 148 | struct mlx4_sense *sense = &priv->sense; | ||
| 149 | int port; | ||
| 150 | |||
| 151 | sense->dev = dev; | ||
| 152 | for (port = 1; port <= dev->caps.num_ports; port++) | ||
| 153 | sense->do_sense_port[port] = 1; | ||
| 154 | |||
| 155 | INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port); | ||
| 156 | } | ||
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c new file mode 100644 index 00000000000..3b07b80a045 --- /dev/null +++ b/drivers/net/mlx4/srq.c | |||
| @@ -0,0 +1,257 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | ||
| 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/mlx4/cmd.h> | ||
| 35 | #include <linux/gfp.h> | ||
| 36 | |||
| 37 | #include "mlx4.h" | ||
| 38 | #include "icm.h" | ||
| 39 | |||
| 40 | struct mlx4_srq_context { | ||
| 41 | __be32 state_logsize_srqn; | ||
| 42 | u8 logstride; | ||
| 43 | u8 reserved1[3]; | ||
| 44 | u8 pg_offset; | ||
| 45 | u8 reserved2[3]; | ||
| 46 | u32 reserved3; | ||
| 47 | u8 log_page_size; | ||
| 48 | u8 reserved4[2]; | ||
| 49 | u8 mtt_base_addr_h; | ||
| 50 | __be32 mtt_base_addr_l; | ||
| 51 | __be32 pd; | ||
| 52 | __be16 limit_watermark; | ||
| 53 | __be16 wqe_cnt; | ||
| 54 | u16 reserved5; | ||
| 55 | __be16 wqe_counter; | ||
| 56 | u32 reserved6; | ||
| 57 | __be64 db_rec_addr; | ||
| 58 | }; | ||
| 59 | |||
| 60 | void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) | ||
| 61 | { | ||
| 62 | struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; | ||
| 63 | struct mlx4_srq *srq; | ||
| 64 | |||
| 65 | spin_lock(&srq_table->lock); | ||
| 66 | |||
| 67 | srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); | ||
| 68 | if (srq) | ||
| 69 | atomic_inc(&srq->refcount); | ||
| 70 | |||
| 71 | spin_unlock(&srq_table->lock); | ||
| 72 | |||
| 73 | if (!srq) { | ||
| 74 | mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); | ||
| 75 | return; | ||
| 76 | } | ||
| 77 | |||
| 78 | srq->event(srq, event_type); | ||
| 79 | |||
| 80 | if (atomic_dec_and_test(&srq->refcount)) | ||
| 81 | complete(&srq->free); | ||
| 82 | } | ||
| 83 | |||
| 84 | static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 85 | int srq_num) | ||
| 86 | { | ||
| 87 | return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ, | ||
| 88 | MLX4_CMD_TIME_CLASS_A); | ||
| 89 | } | ||
| 90 | |||
| 91 | static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 92 | int srq_num) | ||
| 93 | { | ||
| 94 | return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, | ||
| 95 | mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, | ||
| 96 | MLX4_CMD_TIME_CLASS_A); | ||
| 97 | } | ||
| 98 | |||
| 99 | static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) | ||
| 100 | { | ||
| 101 | return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, | ||
| 102 | MLX4_CMD_TIME_CLASS_B); | ||
| 103 | } | ||
| 104 | |||
| 105 | static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
| 106 | int srq_num) | ||
| 107 | { | ||
| 108 | return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, | ||
| 109 | MLX4_CMD_TIME_CLASS_A); | ||
| 110 | } | ||
| 111 | |||
| 112 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, | ||
| 113 | u64 db_rec, struct mlx4_srq *srq) | ||
| 114 | { | ||
| 115 | struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; | ||
| 116 | struct mlx4_cmd_mailbox *mailbox; | ||
| 117 | struct mlx4_srq_context *srq_context; | ||
| 118 | u64 mtt_addr; | ||
| 119 | int err; | ||
| 120 | |||
| 121 | srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); | ||
| 122 | if (srq->srqn == -1) | ||
| 123 | return -ENOMEM; | ||
| 124 | |||
| 125 | err = mlx4_table_get(dev, &srq_table->table, srq->srqn); | ||
| 126 | if (err) | ||
| 127 | goto err_out; | ||
| 128 | |||
| 129 | err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn); | ||
| 130 | if (err) | ||
| 131 | goto err_put; | ||
| 132 | |||
| 133 | spin_lock_irq(&srq_table->lock); | ||
| 134 | err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); | ||
| 135 | spin_unlock_irq(&srq_table->lock); | ||
| 136 | if (err) | ||
| 137 | goto err_cmpt_put; | ||
| 138 | |||
| 139 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 140 | if (IS_ERR(mailbox)) { | ||
| 141 | err = PTR_ERR(mailbox); | ||
| 142 | goto err_radix; | ||
| 143 | } | ||
| 144 | |||
| 145 | srq_context = mailbox->buf; | ||
| 146 | memset(srq_context, 0, sizeof *srq_context); | ||
| 147 | |||
| 148 | srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | | ||
| 149 | srq->srqn); | ||
| 150 | srq_context->logstride = srq->wqe_shift - 4; | ||
| 151 | srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | ||
| 152 | |||
| 153 | mtt_addr = mlx4_mtt_addr(dev, mtt); | ||
| 154 | srq_context->mtt_base_addr_h = mtt_addr >> 32; | ||
| 155 | srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | ||
| 156 | srq_context->pd = cpu_to_be32(pdn); | ||
| 157 | srq_context->db_rec_addr = cpu_to_be64(db_rec); | ||
| 158 | |||
| 159 | err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); | ||
| 160 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 161 | if (err) | ||
| 162 | goto err_radix; | ||
| 163 | |||
| 164 | atomic_set(&srq->refcount, 1); | ||
| 165 | init_completion(&srq->free); | ||
| 166 | |||
| 167 | return 0; | ||
| 168 | |||
| 169 | err_radix: | ||
| 170 | spin_lock_irq(&srq_table->lock); | ||
| 171 | radix_tree_delete(&srq_table->tree, srq->srqn); | ||
| 172 | spin_unlock_irq(&srq_table->lock); | ||
| 173 | |||
| 174 | err_cmpt_put: | ||
| 175 | mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn); | ||
| 176 | |||
| 177 | err_put: | ||
| 178 | mlx4_table_put(dev, &srq_table->table, srq->srqn); | ||
| 179 | |||
| 180 | err_out: | ||
| 181 | mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); | ||
| 182 | |||
| 183 | return err; | ||
| 184 | } | ||
| 185 | EXPORT_SYMBOL_GPL(mlx4_srq_alloc); | ||
| 186 | |||
| 187 | void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) | ||
| 188 | { | ||
| 189 | struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; | ||
| 190 | int err; | ||
| 191 | |||
| 192 | err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); | ||
| 193 | if (err) | ||
| 194 | mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); | ||
| 195 | |||
| 196 | spin_lock_irq(&srq_table->lock); | ||
| 197 | radix_tree_delete(&srq_table->tree, srq->srqn); | ||
| 198 | spin_unlock_irq(&srq_table->lock); | ||
| 199 | |||
| 200 | if (atomic_dec_and_test(&srq->refcount)) | ||
| 201 | complete(&srq->free); | ||
| 202 | wait_for_completion(&srq->free); | ||
| 203 | |||
| 204 | mlx4_table_put(dev, &srq_table->table, srq->srqn); | ||
| 205 | mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); | ||
| 206 | } | ||
| 207 | EXPORT_SYMBOL_GPL(mlx4_srq_free); | ||
| 208 | |||
| 209 | int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) | ||
| 210 | { | ||
| 211 | return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark); | ||
| 212 | } | ||
| 213 | EXPORT_SYMBOL_GPL(mlx4_srq_arm); | ||
| 214 | |||
| 215 | int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) | ||
| 216 | { | ||
| 217 | struct mlx4_cmd_mailbox *mailbox; | ||
| 218 | struct mlx4_srq_context *srq_context; | ||
| 219 | int err; | ||
| 220 | |||
| 221 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 222 | if (IS_ERR(mailbox)) | ||
| 223 | return PTR_ERR(mailbox); | ||
| 224 | |||
| 225 | srq_context = mailbox->buf; | ||
| 226 | |||
| 227 | err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); | ||
| 228 | if (err) | ||
| 229 | goto err_out; | ||
| 230 | *limit_watermark = be16_to_cpu(srq_context->limit_watermark); | ||
| 231 | |||
| 232 | err_out: | ||
| 233 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 234 | return err; | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL_GPL(mlx4_srq_query); | ||
| 237 | |||
| 238 | int mlx4_init_srq_table(struct mlx4_dev *dev) | ||
| 239 | { | ||
| 240 | struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; | ||
| 241 | int err; | ||
| 242 | |||
| 243 | spin_lock_init(&srq_table->lock); | ||
| 244 | INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); | ||
| 245 | |||
| 246 | err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, | ||
| 247 | dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); | ||
| 248 | if (err) | ||
| 249 | return err; | ||
| 250 | |||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev) | ||
| 255 | { | ||
| 256 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); | ||
| 257 | } | ||
