diff options
80 files changed, 10546 insertions, 447 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index fddf29c057a1..73a8b561414b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3629,7 +3629,7 @@ S: Maintained | |||
| 3629 | F: drivers/net/ethernet/icplus/ipg.* | 3629 | F: drivers/net/ethernet/icplus/ipg.* |
| 3630 | 3630 | ||
| 3631 | IPATH DRIVER | 3631 | IPATH DRIVER |
| 3632 | M: Mike Marciniszyn <infinipath@qlogic.com> | 3632 | M: Mike Marciniszyn <infinipath@intel.com> |
| 3633 | L: linux-rdma@vger.kernel.org | 3633 | L: linux-rdma@vger.kernel.org |
| 3634 | S: Maintained | 3634 | S: Maintained |
| 3635 | F: drivers/infiniband/hw/ipath/ | 3635 | F: drivers/infiniband/hw/ipath/ |
| @@ -5448,7 +5448,7 @@ L: rtc-linux@googlegroups.com | |||
| 5448 | S: Maintained | 5448 | S: Maintained |
| 5449 | 5449 | ||
| 5450 | QIB DRIVER | 5450 | QIB DRIVER |
| 5451 | M: Mike Marciniszyn <infinipath@qlogic.com> | 5451 | M: Mike Marciniszyn <infinipath@intel.com> |
| 5452 | L: linux-rdma@vger.kernel.org | 5452 | L: linux-rdma@vger.kernel.org |
| 5453 | S: Supported | 5453 | S: Supported |
| 5454 | F: drivers/infiniband/hw/qib/ | 5454 | F: drivers/infiniband/hw/qib/ |
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index eb0add311dc8..a0f29c1d03bc 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
| @@ -51,6 +51,7 @@ source "drivers/infiniband/hw/cxgb3/Kconfig" | |||
| 51 | source "drivers/infiniband/hw/cxgb4/Kconfig" | 51 | source "drivers/infiniband/hw/cxgb4/Kconfig" |
| 52 | source "drivers/infiniband/hw/mlx4/Kconfig" | 52 | source "drivers/infiniband/hw/mlx4/Kconfig" |
| 53 | source "drivers/infiniband/hw/nes/Kconfig" | 53 | source "drivers/infiniband/hw/nes/Kconfig" |
| 54 | source "drivers/infiniband/hw/ocrdma/Kconfig" | ||
| 54 | 55 | ||
| 55 | source "drivers/infiniband/ulp/ipoib/Kconfig" | 56 | source "drivers/infiniband/ulp/ipoib/Kconfig" |
| 56 | 57 | ||
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index a3b2d8eac86e..bf846a14b9d3 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile | |||
| @@ -8,6 +8,7 @@ obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ | |||
| 8 | obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ | 8 | obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ |
| 9 | obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ | 9 | obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ |
| 10 | obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ | 10 | obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ |
| 11 | obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ | ||
| 11 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ | 12 | obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ |
| 12 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ | 13 | obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ |
| 13 | obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ | 14 | obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 59fbd704a1ec..55d5642eb10a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -1219,13 +1219,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1219 | } | 1219 | } |
| 1220 | if (!conn_id) { | 1220 | if (!conn_id) { |
| 1221 | ret = -ENOMEM; | 1221 | ret = -ENOMEM; |
| 1222 | goto out; | 1222 | goto err1; |
| 1223 | } | 1223 | } |
| 1224 | 1224 | ||
| 1225 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); | 1225 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
| 1226 | ret = cma_acquire_dev(conn_id); | 1226 | ret = cma_acquire_dev(conn_id); |
| 1227 | if (ret) | 1227 | if (ret) |
| 1228 | goto release_conn_id; | 1228 | goto err2; |
| 1229 | 1229 | ||
| 1230 | conn_id->cm_id.ib = cm_id; | 1230 | conn_id->cm_id.ib = cm_id; |
| 1231 | cm_id->context = conn_id; | 1231 | cm_id->context = conn_id; |
| @@ -1237,31 +1237,33 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1237 | */ | 1237 | */ |
| 1238 | atomic_inc(&conn_id->refcount); | 1238 | atomic_inc(&conn_id->refcount); |
| 1239 | ret = conn_id->id.event_handler(&conn_id->id, &event); | 1239 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
| 1240 | if (!ret) { | 1240 | if (ret) |
| 1241 | /* | 1241 | goto err3; |
| 1242 | * Acquire mutex to prevent user executing rdma_destroy_id() | 1242 | |
| 1243 | * while we're accessing the cm_id. | 1243 | /* |
| 1244 | */ | 1244 | * Acquire mutex to prevent user executing rdma_destroy_id() |
| 1245 | mutex_lock(&lock); | 1245 | * while we're accessing the cm_id. |
| 1246 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) | 1246 | */ |
| 1247 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | 1247 | mutex_lock(&lock); |
| 1248 | mutex_unlock(&lock); | 1248 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) |
| 1249 | mutex_unlock(&conn_id->handler_mutex); | 1249 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
| 1250 | cma_deref_id(conn_id); | 1250 | mutex_unlock(&lock); |
| 1251 | goto out; | 1251 | mutex_unlock(&conn_id->handler_mutex); |
| 1252 | } | 1252 | mutex_unlock(&listen_id->handler_mutex); |
| 1253 | cma_deref_id(conn_id); | 1253 | cma_deref_id(conn_id); |
| 1254 | return 0; | ||
| 1254 | 1255 | ||
| 1256 | err3: | ||
| 1257 | cma_deref_id(conn_id); | ||
| 1255 | /* Destroy the CM ID by returning a non-zero value. */ | 1258 | /* Destroy the CM ID by returning a non-zero value. */ |
| 1256 | conn_id->cm_id.ib = NULL; | 1259 | conn_id->cm_id.ib = NULL; |
| 1257 | 1260 | err2: | |
| 1258 | release_conn_id: | ||
| 1259 | cma_exch(conn_id, RDMA_CM_DESTROYING); | 1261 | cma_exch(conn_id, RDMA_CM_DESTROYING); |
| 1260 | mutex_unlock(&conn_id->handler_mutex); | 1262 | mutex_unlock(&conn_id->handler_mutex); |
| 1261 | rdma_destroy_id(&conn_id->id); | 1263 | err1: |
| 1262 | |||
| 1263 | out: | ||
| 1264 | mutex_unlock(&listen_id->handler_mutex); | 1264 | mutex_unlock(&listen_id->handler_mutex); |
| 1265 | if (conn_id) | ||
| 1266 | rdma_destroy_id(&conn_id->id); | ||
| 1265 | return ret; | 1267 | return ret; |
| 1266 | } | 1268 | } |
| 1267 | 1269 | ||
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 71f0c0f7df94..a84112322071 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -269,7 +269,7 @@ void ib_umem_release(struct ib_umem *umem) | |||
| 269 | } else | 269 | } else |
| 270 | down_write(&mm->mmap_sem); | 270 | down_write(&mm->mmap_sem); |
| 271 | 271 | ||
| 272 | current->mm->locked_vm -= diff; | 272 | current->mm->pinned_vm -= diff; |
| 273 | up_write(&mm->mmap_sem); | 273 | up_write(&mm->mmap_sem); |
| 274 | mmput(mm); | 274 | mmput(mm); |
| 275 | kfree(umem); | 275 | kfree(umem); |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4d27e4c3fe34..f9d0d7c413a2 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -41,13 +41,18 @@ | |||
| 41 | 41 | ||
| 42 | #include "uverbs.h" | 42 | #include "uverbs.h" |
| 43 | 43 | ||
| 44 | static struct lock_class_key pd_lock_key; | 44 | struct uverbs_lock_class { |
| 45 | static struct lock_class_key mr_lock_key; | 45 | struct lock_class_key key; |
| 46 | static struct lock_class_key cq_lock_key; | 46 | char name[16]; |
| 47 | static struct lock_class_key qp_lock_key; | 47 | }; |
| 48 | static struct lock_class_key ah_lock_key; | 48 | |
| 49 | static struct lock_class_key srq_lock_key; | 49 | static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; |
| 50 | static struct lock_class_key xrcd_lock_key; | 50 | static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; |
| 51 | static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; | ||
| 52 | static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; | ||
| 53 | static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; | ||
| 54 | static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; | ||
| 55 | static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; | ||
| 51 | 56 | ||
| 52 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 57 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
| 53 | do { \ | 58 | do { \ |
| @@ -83,13 +88,13 @@ static struct lock_class_key xrcd_lock_key; | |||
| 83 | */ | 88 | */ |
| 84 | 89 | ||
| 85 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, | 90 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, |
| 86 | struct ib_ucontext *context, struct lock_class_key *key) | 91 | struct ib_ucontext *context, struct uverbs_lock_class *c) |
| 87 | { | 92 | { |
| 88 | uobj->user_handle = user_handle; | 93 | uobj->user_handle = user_handle; |
| 89 | uobj->context = context; | 94 | uobj->context = context; |
| 90 | kref_init(&uobj->ref); | 95 | kref_init(&uobj->ref); |
| 91 | init_rwsem(&uobj->mutex); | 96 | init_rwsem(&uobj->mutex); |
| 92 | lockdep_set_class(&uobj->mutex, key); | 97 | lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); |
| 93 | uobj->live = 0; | 98 | uobj->live = 0; |
| 94 | } | 99 | } |
| 95 | 100 | ||
| @@ -522,7 +527,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, | |||
| 522 | if (!uobj) | 527 | if (!uobj) |
| 523 | return -ENOMEM; | 528 | return -ENOMEM; |
| 524 | 529 | ||
| 525 | init_uobj(uobj, 0, file->ucontext, &pd_lock_key); | 530 | init_uobj(uobj, 0, file->ucontext, &pd_lock_class); |
| 526 | down_write(&uobj->mutex); | 531 | down_write(&uobj->mutex); |
| 527 | 532 | ||
| 528 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, | 533 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, |
| @@ -750,7 +755,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, | |||
| 750 | goto err_tree_mutex_unlock; | 755 | goto err_tree_mutex_unlock; |
| 751 | } | 756 | } |
| 752 | 757 | ||
| 753 | init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key); | 758 | init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); |
| 754 | 759 | ||
| 755 | down_write(&obj->uobject.mutex); | 760 | down_write(&obj->uobject.mutex); |
| 756 | 761 | ||
| @@ -947,7 +952,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, | |||
| 947 | if (!uobj) | 952 | if (!uobj) |
| 948 | return -ENOMEM; | 953 | return -ENOMEM; |
| 949 | 954 | ||
| 950 | init_uobj(uobj, 0, file->ucontext, &mr_lock_key); | 955 | init_uobj(uobj, 0, file->ucontext, &mr_lock_class); |
| 951 | down_write(&uobj->mutex); | 956 | down_write(&uobj->mutex); |
| 952 | 957 | ||
| 953 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 958 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
| @@ -1115,7 +1120,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
| 1115 | if (!obj) | 1120 | if (!obj) |
| 1116 | return -ENOMEM; | 1121 | return -ENOMEM; |
| 1117 | 1122 | ||
| 1118 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); | 1123 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); |
| 1119 | down_write(&obj->uobject.mutex); | 1124 | down_write(&obj->uobject.mutex); |
| 1120 | 1125 | ||
| 1121 | if (cmd.comp_channel >= 0) { | 1126 | if (cmd.comp_channel >= 0) { |
| @@ -1399,6 +1404,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
| 1399 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 1404 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
| 1400 | return -EFAULT; | 1405 | return -EFAULT; |
| 1401 | 1406 | ||
| 1407 | if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) | ||
| 1408 | return -EPERM; | ||
| 1409 | |||
| 1402 | INIT_UDATA(&udata, buf + sizeof cmd, | 1410 | INIT_UDATA(&udata, buf + sizeof cmd, |
| 1403 | (unsigned long) cmd.response + sizeof resp, | 1411 | (unsigned long) cmd.response + sizeof resp, |
| 1404 | in_len - sizeof cmd, out_len - sizeof resp); | 1412 | in_len - sizeof cmd, out_len - sizeof resp); |
| @@ -1407,7 +1415,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
| 1407 | if (!obj) | 1415 | if (!obj) |
| 1408 | return -ENOMEM; | 1416 | return -ENOMEM; |
| 1409 | 1417 | ||
| 1410 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); | 1418 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); |
| 1411 | down_write(&obj->uevent.uobject.mutex); | 1419 | down_write(&obj->uevent.uobject.mutex); |
| 1412 | 1420 | ||
| 1413 | if (cmd.qp_type == IB_QPT_XRC_TGT) { | 1421 | if (cmd.qp_type == IB_QPT_XRC_TGT) { |
| @@ -1418,13 +1426,6 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
| 1418 | } | 1426 | } |
| 1419 | device = xrcd->device; | 1427 | device = xrcd->device; |
| 1420 | } else { | 1428 | } else { |
| 1421 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
| 1422 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); | ||
| 1423 | if (!pd || !scq) { | ||
| 1424 | ret = -EINVAL; | ||
| 1425 | goto err_put; | ||
| 1426 | } | ||
| 1427 | |||
| 1428 | if (cmd.qp_type == IB_QPT_XRC_INI) { | 1429 | if (cmd.qp_type == IB_QPT_XRC_INI) { |
| 1429 | cmd.max_recv_wr = cmd.max_recv_sge = 0; | 1430 | cmd.max_recv_wr = cmd.max_recv_sge = 0; |
| 1430 | } else { | 1431 | } else { |
| @@ -1435,13 +1436,24 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
| 1435 | goto err_put; | 1436 | goto err_put; |
| 1436 | } | 1437 | } |
| 1437 | } | 1438 | } |
| 1438 | rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ? | 1439 | |
| 1439 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); | 1440 | if (cmd.recv_cq_handle != cmd.send_cq_handle) { |
| 1440 | if (!rcq) { | 1441 | rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); |
| 1441 | ret = -EINVAL; | 1442 | if (!rcq) { |
| 1442 | goto err_put; | 1443 | ret = -EINVAL; |
| 1444 | goto err_put; | ||
| 1445 | } | ||
| 1443 | } | 1446 | } |
| 1444 | } | 1447 | } |
| 1448 | |||
| 1449 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); | ||
| 1450 | rcq = rcq ?: scq; | ||
| 1451 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
| 1452 | if (!pd || !scq) { | ||
| 1453 | ret = -EINVAL; | ||
| 1454 | goto err_put; | ||
| 1455 | } | ||
| 1456 | |||
| 1445 | device = pd->device; | 1457 | device = pd->device; |
| 1446 | } | 1458 | } |
| 1447 | 1459 | ||
| @@ -1585,7 +1597,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, | |||
| 1585 | if (!obj) | 1597 | if (!obj) |
| 1586 | return -ENOMEM; | 1598 | return -ENOMEM; |
| 1587 | 1599 | ||
| 1588 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); | 1600 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); |
| 1589 | down_write(&obj->uevent.uobject.mutex); | 1601 | down_write(&obj->uevent.uobject.mutex); |
| 1590 | 1602 | ||
| 1591 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); | 1603 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); |
| @@ -2272,7 +2284,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
| 2272 | if (!uobj) | 2284 | if (!uobj) |
| 2273 | return -ENOMEM; | 2285 | return -ENOMEM; |
| 2274 | 2286 | ||
| 2275 | init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); | 2287 | init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); |
| 2276 | down_write(&uobj->mutex); | 2288 | down_write(&uobj->mutex); |
| 2277 | 2289 | ||
| 2278 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 2290 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
| @@ -2476,30 +2488,30 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, | |||
| 2476 | if (!obj) | 2488 | if (!obj) |
| 2477 | return -ENOMEM; | 2489 | return -ENOMEM; |
| 2478 | 2490 | ||
| 2479 | init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key); | 2491 | init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); |
| 2480 | down_write(&obj->uevent.uobject.mutex); | 2492 | down_write(&obj->uevent.uobject.mutex); |
| 2481 | 2493 | ||
| 2482 | pd = idr_read_pd(cmd->pd_handle, file->ucontext); | ||
| 2483 | if (!pd) { | ||
| 2484 | ret = -EINVAL; | ||
| 2485 | goto err; | ||
| 2486 | } | ||
| 2487 | |||
| 2488 | if (cmd->srq_type == IB_SRQT_XRC) { | 2494 | if (cmd->srq_type == IB_SRQT_XRC) { |
| 2489 | attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); | ||
| 2490 | if (!attr.ext.xrc.cq) { | ||
| 2491 | ret = -EINVAL; | ||
| 2492 | goto err_put_pd; | ||
| 2493 | } | ||
| 2494 | |||
| 2495 | attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); | 2495 | attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); |
| 2496 | if (!attr.ext.xrc.xrcd) { | 2496 | if (!attr.ext.xrc.xrcd) { |
| 2497 | ret = -EINVAL; | 2497 | ret = -EINVAL; |
| 2498 | goto err_put_cq; | 2498 | goto err; |
| 2499 | } | 2499 | } |
| 2500 | 2500 | ||
| 2501 | obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); | 2501 | obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); |
| 2502 | atomic_inc(&obj->uxrcd->refcnt); | 2502 | atomic_inc(&obj->uxrcd->refcnt); |
| 2503 | |||
| 2504 | attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); | ||
| 2505 | if (!attr.ext.xrc.cq) { | ||
| 2506 | ret = -EINVAL; | ||
| 2507 | goto err_put_xrcd; | ||
| 2508 | } | ||
| 2509 | } | ||
| 2510 | |||
| 2511 | pd = idr_read_pd(cmd->pd_handle, file->ucontext); | ||
| 2512 | if (!pd) { | ||
| 2513 | ret = -EINVAL; | ||
| 2514 | goto err_put_cq; | ||
| 2503 | } | 2515 | } |
| 2504 | 2516 | ||
| 2505 | attr.event_handler = ib_uverbs_srq_event_handler; | 2517 | attr.event_handler = ib_uverbs_srq_event_handler; |
| @@ -2576,17 +2588,17 @@ err_destroy: | |||
| 2576 | ib_destroy_srq(srq); | 2588 | ib_destroy_srq(srq); |
| 2577 | 2589 | ||
| 2578 | err_put: | 2590 | err_put: |
| 2579 | if (cmd->srq_type == IB_SRQT_XRC) { | 2591 | put_pd_read(pd); |
| 2580 | atomic_dec(&obj->uxrcd->refcnt); | ||
| 2581 | put_uobj_read(xrcd_uobj); | ||
| 2582 | } | ||
| 2583 | 2592 | ||
| 2584 | err_put_cq: | 2593 | err_put_cq: |
| 2585 | if (cmd->srq_type == IB_SRQT_XRC) | 2594 | if (cmd->srq_type == IB_SRQT_XRC) |
| 2586 | put_cq_read(attr.ext.xrc.cq); | 2595 | put_cq_read(attr.ext.xrc.cq); |
| 2587 | 2596 | ||
| 2588 | err_put_pd: | 2597 | err_put_xrcd: |
| 2589 | put_pd_read(pd); | 2598 | if (cmd->srq_type == IB_SRQT_XRC) { |
| 2599 | atomic_dec(&obj->uxrcd->refcnt); | ||
| 2600 | put_uobj_read(xrcd_uobj); | ||
| 2601 | } | ||
| 2590 | 2602 | ||
| 2591 | err: | 2603 | err: |
| 2592 | put_uobj_write(&obj->uevent.uobject); | 2604 | put_uobj_write(&obj->uevent.uobject); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 575b78045aaf..30f199e8579f 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
| @@ -479,6 +479,7 @@ static const struct { | |||
| 479 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | 479 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
| 480 | IB_QP_PORT | | 480 | IB_QP_PORT | |
| 481 | IB_QP_QKEY), | 481 | IB_QP_QKEY), |
| 482 | [IB_QPT_RAW_PACKET] = IB_QP_PORT, | ||
| 482 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | | 483 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
| 483 | IB_QP_PORT | | 484 | IB_QP_PORT | |
| 484 | IB_QP_ACCESS_FLAGS), | 485 | IB_QP_ACCESS_FLAGS), |
| @@ -1183,23 +1184,33 @@ EXPORT_SYMBOL(ib_dealloc_fmr); | |||
| 1183 | 1184 | ||
| 1184 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) | 1185 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1185 | { | 1186 | { |
| 1187 | int ret; | ||
| 1188 | |||
| 1186 | if (!qp->device->attach_mcast) | 1189 | if (!qp->device->attach_mcast) |
| 1187 | return -ENOSYS; | 1190 | return -ENOSYS; |
| 1188 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) | 1191 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) |
| 1189 | return -EINVAL; | 1192 | return -EINVAL; |
| 1190 | 1193 | ||
| 1191 | return qp->device->attach_mcast(qp, gid, lid); | 1194 | ret = qp->device->attach_mcast(qp, gid, lid); |
| 1195 | if (!ret) | ||
| 1196 | atomic_inc(&qp->usecnt); | ||
| 1197 | return ret; | ||
| 1192 | } | 1198 | } |
| 1193 | EXPORT_SYMBOL(ib_attach_mcast); | 1199 | EXPORT_SYMBOL(ib_attach_mcast); |
| 1194 | 1200 | ||
| 1195 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) | 1201 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
| 1196 | { | 1202 | { |
| 1203 | int ret; | ||
| 1204 | |||
| 1197 | if (!qp->device->detach_mcast) | 1205 | if (!qp->device->detach_mcast) |
| 1198 | return -ENOSYS; | 1206 | return -ENOSYS; |
| 1199 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) | 1207 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) |
| 1200 | return -EINVAL; | 1208 | return -EINVAL; |
| 1201 | 1209 | ||
| 1202 | return qp->device->detach_mcast(qp, gid, lid); | 1210 | ret = qp->device->detach_mcast(qp, gid, lid); |
| 1211 | if (!ret) | ||
| 1212 | atomic_dec(&qp->usecnt); | ||
| 1213 | return ret; | ||
| 1203 | } | 1214 | } |
| 1204 | EXPORT_SYMBOL(ib_detach_mcast); | 1215 | EXPORT_SYMBOL(ib_detach_mcast); |
| 1205 | 1216 | ||
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile index 46b878ca2c3b..e11cf7299945 100644 --- a/drivers/infiniband/hw/cxgb4/Makefile +++ b/drivers/infiniband/hw/cxgb4/Makefile | |||
| @@ -2,4 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 | |||
| 2 | 2 | ||
| 3 | obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o | 3 | obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o |
| 4 | 4 | ||
| 5 | iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o | 5 | iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 92b4c2b0308b..55ab284e22f2 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -1362,7 +1362,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1362 | 1362 | ||
| 1363 | ep = lookup_tid(t, tid); | 1363 | ep = lookup_tid(t, tid); |
| 1364 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1364 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1365 | BUG_ON(!ep); | 1365 | if (!ep) { |
| 1366 | printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); | ||
| 1367 | return 0; | ||
| 1368 | } | ||
| 1366 | mutex_lock(&ep->com.mutex); | 1369 | mutex_lock(&ep->com.mutex); |
| 1367 | switch (ep->com.state) { | 1370 | switch (ep->com.state) { |
| 1368 | case ABORTING: | 1371 | case ABORTING: |
| @@ -1410,6 +1413,24 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1410 | return 0; | 1413 | return 0; |
| 1411 | } | 1414 | } |
| 1412 | 1415 | ||
| 1416 | /* | ||
| 1417 | * Log interesting failures. | ||
| 1418 | */ | ||
| 1419 | switch (status) { | ||
| 1420 | case CPL_ERR_CONN_RESET: | ||
| 1421 | case CPL_ERR_CONN_TIMEDOUT: | ||
| 1422 | break; | ||
| 1423 | default: | ||
| 1424 | printk(KERN_INFO MOD "Active open failure - " | ||
| 1425 | "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", | ||
| 1426 | atid, status, status2errno(status), | ||
| 1427 | &ep->com.local_addr.sin_addr.s_addr, | ||
| 1428 | ntohs(ep->com.local_addr.sin_port), | ||
| 1429 | &ep->com.remote_addr.sin_addr.s_addr, | ||
| 1430 | ntohs(ep->com.remote_addr.sin_port)); | ||
| 1431 | break; | ||
| 1432 | } | ||
| 1433 | |||
| 1413 | connect_reply_upcall(ep, status2errno(status)); | 1434 | connect_reply_upcall(ep, status2errno(status)); |
| 1414 | state_set(&ep->com, DEAD); | 1435 | state_set(&ep->com, DEAD); |
| 1415 | 1436 | ||
| @@ -1593,7 +1614,7 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, | |||
| 1593 | n, n->dev, 0); | 1614 | n, n->dev, 0); |
| 1594 | if (!ep->l2t) | 1615 | if (!ep->l2t) |
| 1595 | goto out; | 1616 | goto out; |
| 1596 | ep->mtu = dst_mtu(ep->dst); | 1617 | ep->mtu = dst_mtu(dst); |
| 1597 | ep->tx_chan = cxgb4_port_chan(n->dev); | 1618 | ep->tx_chan = cxgb4_port_chan(n->dev); |
| 1598 | ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; | 1619 | ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; |
| 1599 | step = cdev->rdev.lldi.ntxq / | 1620 | step = cdev->rdev.lldi.ntxq / |
| @@ -2656,6 +2677,12 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2656 | unsigned int tid = GET_TID(req); | 2677 | unsigned int tid = GET_TID(req); |
| 2657 | 2678 | ||
| 2658 | ep = lookup_tid(t, tid); | 2679 | ep = lookup_tid(t, tid); |
| 2680 | if (!ep) { | ||
| 2681 | printk(KERN_WARNING MOD | ||
| 2682 | "Abort on non-existent endpoint, tid %d\n", tid); | ||
| 2683 | kfree_skb(skb); | ||
| 2684 | return 0; | ||
| 2685 | } | ||
| 2659 | if (is_neg_adv_abort(req->status)) { | 2686 | if (is_neg_adv_abort(req->status)) { |
| 2660 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | 2687 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, |
| 2661 | ep->hwtid); | 2688 | ep->hwtid); |
| @@ -2667,11 +2694,8 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2667 | 2694 | ||
| 2668 | /* | 2695 | /* |
| 2669 | * Wake up any threads in rdma_init() or rdma_fini(). | 2696 | * Wake up any threads in rdma_init() or rdma_fini(). |
| 2670 | * However, this is not needed if com state is just | ||
| 2671 | * MPA_REQ_SENT | ||
| 2672 | */ | 2697 | */ |
| 2673 | if (ep->com.state != MPA_REQ_SENT) | 2698 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
| 2674 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
| 2675 | sched(dev, skb); | 2699 | sched(dev, skb); |
| 2676 | return 0; | 2700 | return 0; |
| 2677 | } | 2701 | } |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 6d0df6ec161b..cb4ecd783700 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
| 33 | #include <linux/moduleparam.h> | 33 | #include <linux/moduleparam.h> |
| 34 | #include <linux/debugfs.h> | 34 | #include <linux/debugfs.h> |
| 35 | #include <linux/vmalloc.h> | ||
| 35 | 36 | ||
| 36 | #include <rdma/ib_verbs.h> | 37 | #include <rdma/ib_verbs.h> |
| 37 | 38 | ||
| @@ -44,6 +45,12 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); | |||
| 44 | MODULE_LICENSE("Dual BSD/GPL"); | 45 | MODULE_LICENSE("Dual BSD/GPL"); |
| 45 | MODULE_VERSION(DRV_VERSION); | 46 | MODULE_VERSION(DRV_VERSION); |
| 46 | 47 | ||
| 48 | struct uld_ctx { | ||
| 49 | struct list_head entry; | ||
| 50 | struct cxgb4_lld_info lldi; | ||
| 51 | struct c4iw_dev *dev; | ||
| 52 | }; | ||
| 53 | |||
| 47 | static LIST_HEAD(uld_ctx_list); | 54 | static LIST_HEAD(uld_ctx_list); |
| 48 | static DEFINE_MUTEX(dev_mutex); | 55 | static DEFINE_MUTEX(dev_mutex); |
| 49 | 56 | ||
| @@ -115,7 +122,7 @@ static int qp_release(struct inode *inode, struct file *file) | |||
| 115 | printk(KERN_INFO "%s null qpd?\n", __func__); | 122 | printk(KERN_INFO "%s null qpd?\n", __func__); |
| 116 | return 0; | 123 | return 0; |
| 117 | } | 124 | } |
| 118 | kfree(qpd->buf); | 125 | vfree(qpd->buf); |
| 119 | kfree(qpd); | 126 | kfree(qpd); |
| 120 | return 0; | 127 | return 0; |
| 121 | } | 128 | } |
| @@ -139,7 +146,7 @@ static int qp_open(struct inode *inode, struct file *file) | |||
| 139 | spin_unlock_irq(&qpd->devp->lock); | 146 | spin_unlock_irq(&qpd->devp->lock); |
| 140 | 147 | ||
| 141 | qpd->bufsize = count * 128; | 148 | qpd->bufsize = count * 128; |
| 142 | qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL); | 149 | qpd->buf = vmalloc(qpd->bufsize); |
| 143 | if (!qpd->buf) { | 150 | if (!qpd->buf) { |
| 144 | ret = -ENOMEM; | 151 | ret = -ENOMEM; |
| 145 | goto err1; | 152 | goto err1; |
| @@ -240,6 +247,81 @@ static const struct file_operations stag_debugfs_fops = { | |||
| 240 | .llseek = default_llseek, | 247 | .llseek = default_llseek, |
| 241 | }; | 248 | }; |
| 242 | 249 | ||
| 250 | static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"}; | ||
| 251 | |||
| 252 | static int stats_show(struct seq_file *seq, void *v) | ||
| 253 | { | ||
| 254 | struct c4iw_dev *dev = seq->private; | ||
| 255 | |||
| 256 | seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current", | ||
| 257 | "Max", "Fail"); | ||
| 258 | seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n", | ||
| 259 | dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur, | ||
| 260 | dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail); | ||
| 261 | seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n", | ||
| 262 | dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, | ||
| 263 | dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); | ||
| 264 | seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n", | ||
| 265 | dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, | ||
| 266 | dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); | ||
| 267 | seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n", | ||
| 268 | dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur, | ||
| 269 | dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail); | ||
| 270 | seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n", | ||
| 271 | dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur, | ||
| 272 | dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail); | ||
| 273 | seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n", | ||
| 274 | dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur, | ||
| 275 | dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail); | ||
| 276 | seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); | ||
| 277 | seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); | ||
| 278 | seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); | ||
| 279 | seq_printf(seq, " DB State: %s Transitions %llu\n", | ||
| 280 | db_state_str[dev->db_state], | ||
| 281 | dev->rdev.stats.db_state_transitions); | ||
| 282 | return 0; | ||
| 283 | } | ||
| 284 | |||
| 285 | static int stats_open(struct inode *inode, struct file *file) | ||
| 286 | { | ||
| 287 | return single_open(file, stats_show, inode->i_private); | ||
| 288 | } | ||
| 289 | |||
| 290 | static ssize_t stats_clear(struct file *file, const char __user *buf, | ||
| 291 | size_t count, loff_t *pos) | ||
| 292 | { | ||
| 293 | struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; | ||
| 294 | |||
| 295 | mutex_lock(&dev->rdev.stats.lock); | ||
| 296 | dev->rdev.stats.pd.max = 0; | ||
| 297 | dev->rdev.stats.pd.fail = 0; | ||
| 298 | dev->rdev.stats.qid.max = 0; | ||
| 299 | dev->rdev.stats.qid.fail = 0; | ||
| 300 | dev->rdev.stats.stag.max = 0; | ||
| 301 | dev->rdev.stats.stag.fail = 0; | ||
| 302 | dev->rdev.stats.pbl.max = 0; | ||
| 303 | dev->rdev.stats.pbl.fail = 0; | ||
| 304 | dev->rdev.stats.rqt.max = 0; | ||
| 305 | dev->rdev.stats.rqt.fail = 0; | ||
| 306 | dev->rdev.stats.ocqp.max = 0; | ||
| 307 | dev->rdev.stats.ocqp.fail = 0; | ||
| 308 | dev->rdev.stats.db_full = 0; | ||
| 309 | dev->rdev.stats.db_empty = 0; | ||
| 310 | dev->rdev.stats.db_drop = 0; | ||
| 311 | dev->rdev.stats.db_state_transitions = 0; | ||
| 312 | mutex_unlock(&dev->rdev.stats.lock); | ||
| 313 | return count; | ||
| 314 | } | ||
| 315 | |||
| 316 | static const struct file_operations stats_debugfs_fops = { | ||
| 317 | .owner = THIS_MODULE, | ||
| 318 | .open = stats_open, | ||
| 319 | .release = single_release, | ||
| 320 | .read = seq_read, | ||
| 321 | .llseek = seq_lseek, | ||
| 322 | .write = stats_clear, | ||
| 323 | }; | ||
| 324 | |||
| 243 | static int setup_debugfs(struct c4iw_dev *devp) | 325 | static int setup_debugfs(struct c4iw_dev *devp) |
| 244 | { | 326 | { |
| 245 | struct dentry *de; | 327 | struct dentry *de; |
| @@ -256,6 +338,12 @@ static int setup_debugfs(struct c4iw_dev *devp) | |||
| 256 | (void *)devp, &stag_debugfs_fops); | 338 | (void *)devp, &stag_debugfs_fops); |
| 257 | if (de && de->d_inode) | 339 | if (de && de->d_inode) |
| 258 | de->d_inode->i_size = 4096; | 340 | de->d_inode->i_size = 4096; |
| 341 | |||
| 342 | de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root, | ||
| 343 | (void *)devp, &stats_debugfs_fops); | ||
| 344 | if (de && de->d_inode) | ||
| 345 | de->d_inode->i_size = 4096; | ||
| 346 | |||
| 259 | return 0; | 347 | return 0; |
| 260 | } | 348 | } |
| 261 | 349 | ||
| @@ -269,9 +357,13 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, | |||
| 269 | list_for_each_safe(pos, nxt, &uctx->qpids) { | 357 | list_for_each_safe(pos, nxt, &uctx->qpids) { |
| 270 | entry = list_entry(pos, struct c4iw_qid_list, entry); | 358 | entry = list_entry(pos, struct c4iw_qid_list, entry); |
| 271 | list_del_init(&entry->entry); | 359 | list_del_init(&entry->entry); |
| 272 | if (!(entry->qid & rdev->qpmask)) | 360 | if (!(entry->qid & rdev->qpmask)) { |
| 273 | c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid, | 361 | c4iw_put_resource(&rdev->resource.qid_table, |
| 274 | &rdev->resource.qid_fifo_lock); | 362 | entry->qid); |
| 363 | mutex_lock(&rdev->stats.lock); | ||
| 364 | rdev->stats.qid.cur -= rdev->qpmask + 1; | ||
| 365 | mutex_unlock(&rdev->stats.lock); | ||
| 366 | } | ||
| 275 | kfree(entry); | 367 | kfree(entry); |
| 276 | } | 368 | } |
| 277 | 369 | ||
| @@ -332,6 +424,13 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
| 332 | goto err1; | 424 | goto err1; |
| 333 | } | 425 | } |
| 334 | 426 | ||
| 427 | rdev->stats.pd.total = T4_MAX_NUM_PD; | ||
| 428 | rdev->stats.stag.total = rdev->lldi.vr->stag.size; | ||
| 429 | rdev->stats.pbl.total = rdev->lldi.vr->pbl.size; | ||
| 430 | rdev->stats.rqt.total = rdev->lldi.vr->rq.size; | ||
| 431 | rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size; | ||
| 432 | rdev->stats.qid.total = rdev->lldi.vr->qp.size; | ||
| 433 | |||
| 335 | err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); | 434 | err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); |
| 336 | if (err) { | 435 | if (err) { |
| 337 | printk(KERN_ERR MOD "error %d initializing resources\n", err); | 436 | printk(KERN_ERR MOD "error %d initializing resources\n", err); |
| @@ -370,12 +469,6 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) | |||
| 370 | c4iw_destroy_resource(&rdev->resource); | 469 | c4iw_destroy_resource(&rdev->resource); |
| 371 | } | 470 | } |
| 372 | 471 | ||
| 373 | struct uld_ctx { | ||
| 374 | struct list_head entry; | ||
| 375 | struct cxgb4_lld_info lldi; | ||
| 376 | struct c4iw_dev *dev; | ||
| 377 | }; | ||
| 378 | |||
| 379 | static void c4iw_dealloc(struct uld_ctx *ctx) | 472 | static void c4iw_dealloc(struct uld_ctx *ctx) |
| 380 | { | 473 | { |
| 381 | c4iw_rdev_close(&ctx->dev->rdev); | 474 | c4iw_rdev_close(&ctx->dev->rdev); |
| @@ -440,6 +533,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
| 440 | idr_init(&devp->qpidr); | 533 | idr_init(&devp->qpidr); |
| 441 | idr_init(&devp->mmidr); | 534 | idr_init(&devp->mmidr); |
| 442 | spin_lock_init(&devp->lock); | 535 | spin_lock_init(&devp->lock); |
| 536 | mutex_init(&devp->rdev.stats.lock); | ||
| 537 | mutex_init(&devp->db_mutex); | ||
| 443 | 538 | ||
| 444 | if (c4iw_debugfs_root) { | 539 | if (c4iw_debugfs_root) { |
| 445 | devp->debugfs_root = debugfs_create_dir( | 540 | devp->debugfs_root = debugfs_create_dir( |
| @@ -585,11 +680,234 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) | |||
| 585 | return 0; | 680 | return 0; |
| 586 | } | 681 | } |
| 587 | 682 | ||
| 683 | static int disable_qp_db(int id, void *p, void *data) | ||
| 684 | { | ||
| 685 | struct c4iw_qp *qp = p; | ||
| 686 | |||
| 687 | t4_disable_wq_db(&qp->wq); | ||
| 688 | return 0; | ||
| 689 | } | ||
| 690 | |||
| 691 | static void stop_queues(struct uld_ctx *ctx) | ||
| 692 | { | ||
| 693 | spin_lock_irq(&ctx->dev->lock); | ||
| 694 | if (ctx->dev->db_state == NORMAL) { | ||
| 695 | ctx->dev->rdev.stats.db_state_transitions++; | ||
| 696 | ctx->dev->db_state = FLOW_CONTROL; | ||
| 697 | idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); | ||
| 698 | } | ||
| 699 | spin_unlock_irq(&ctx->dev->lock); | ||
| 700 | } | ||
| 701 | |||
| 702 | static int enable_qp_db(int id, void *p, void *data) | ||
| 703 | { | ||
| 704 | struct c4iw_qp *qp = p; | ||
| 705 | |||
| 706 | t4_enable_wq_db(&qp->wq); | ||
| 707 | return 0; | ||
| 708 | } | ||
| 709 | |||
| 710 | static void resume_queues(struct uld_ctx *ctx) | ||
| 711 | { | ||
| 712 | spin_lock_irq(&ctx->dev->lock); | ||
| 713 | if (ctx->dev->qpcnt <= db_fc_threshold && | ||
| 714 | ctx->dev->db_state == FLOW_CONTROL) { | ||
| 715 | ctx->dev->db_state = NORMAL; | ||
| 716 | ctx->dev->rdev.stats.db_state_transitions++; | ||
| 717 | idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL); | ||
| 718 | } | ||
| 719 | spin_unlock_irq(&ctx->dev->lock); | ||
| 720 | } | ||
| 721 | |||
| 722 | struct qp_list { | ||
| 723 | unsigned idx; | ||
| 724 | struct c4iw_qp **qps; | ||
| 725 | }; | ||
| 726 | |||
| 727 | static int add_and_ref_qp(int id, void *p, void *data) | ||
| 728 | { | ||
| 729 | struct qp_list *qp_listp = data; | ||
| 730 | struct c4iw_qp *qp = p; | ||
| 731 | |||
| 732 | c4iw_qp_add_ref(&qp->ibqp); | ||
| 733 | qp_listp->qps[qp_listp->idx++] = qp; | ||
| 734 | return 0; | ||
| 735 | } | ||
| 736 | |||
| 737 | static int count_qps(int id, void *p, void *data) | ||
| 738 | { | ||
| 739 | unsigned *countp = data; | ||
| 740 | (*countp)++; | ||
| 741 | return 0; | ||
| 742 | } | ||
| 743 | |||
| 744 | static void deref_qps(struct qp_list qp_list) | ||
| 745 | { | ||
| 746 | int idx; | ||
| 747 | |||
| 748 | for (idx = 0; idx < qp_list.idx; idx++) | ||
| 749 | c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp); | ||
| 750 | } | ||
| 751 | |||
| 752 | static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) | ||
| 753 | { | ||
| 754 | int idx; | ||
| 755 | int ret; | ||
| 756 | |||
| 757 | for (idx = 0; idx < qp_list->idx; idx++) { | ||
| 758 | struct c4iw_qp *qp = qp_list->qps[idx]; | ||
| 759 | |||
| 760 | ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], | ||
| 761 | qp->wq.sq.qid, | ||
| 762 | t4_sq_host_wq_pidx(&qp->wq), | ||
| 763 | t4_sq_wq_size(&qp->wq)); | ||
| 764 | if (ret) { | ||
| 765 | printk(KERN_ERR MOD "%s: Fatal error - " | ||
| 766 | "DB overflow recovery failed - " | ||
| 767 | "error syncing SQ qid %u\n", | ||
| 768 | pci_name(ctx->lldi.pdev), qp->wq.sq.qid); | ||
| 769 | return; | ||
| 770 | } | ||
| 771 | |||
| 772 | ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], | ||
| 773 | qp->wq.rq.qid, | ||
| 774 | t4_rq_host_wq_pidx(&qp->wq), | ||
| 775 | t4_rq_wq_size(&qp->wq)); | ||
| 776 | |||
| 777 | if (ret) { | ||
| 778 | printk(KERN_ERR MOD "%s: Fatal error - " | ||
| 779 | "DB overflow recovery failed - " | ||
| 780 | "error syncing RQ qid %u\n", | ||
| 781 | pci_name(ctx->lldi.pdev), qp->wq.rq.qid); | ||
| 782 | return; | ||
| 783 | } | ||
| 784 | |||
| 785 | /* Wait for the dbfifo to drain */ | ||
| 786 | while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { | ||
| 787 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 788 | schedule_timeout(usecs_to_jiffies(10)); | ||
| 789 | } | ||
| 790 | } | ||
| 791 | } | ||
| 792 | |||
| 793 | static void recover_queues(struct uld_ctx *ctx) | ||
| 794 | { | ||
| 795 | int count = 0; | ||
| 796 | struct qp_list qp_list; | ||
| 797 | int ret; | ||
| 798 | |||
| 799 | /* lock out kernel db ringers */ | ||
| 800 | mutex_lock(&ctx->dev->db_mutex); | ||
| 801 | |||
| 802 | /* put all queues in to recovery mode */ | ||
| 803 | spin_lock_irq(&ctx->dev->lock); | ||
| 804 | ctx->dev->db_state = RECOVERY; | ||
| 805 | ctx->dev->rdev.stats.db_state_transitions++; | ||
| 806 | idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); | ||
| 807 | spin_unlock_irq(&ctx->dev->lock); | ||
| 808 | |||
| 809 | /* slow everybody down */ | ||
| 810 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 811 | schedule_timeout(usecs_to_jiffies(1000)); | ||
| 812 | |||
| 813 | /* Wait for the dbfifo to completely drain. */ | ||
| 814 | while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) { | ||
| 815 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 816 | schedule_timeout(usecs_to_jiffies(10)); | ||
| 817 | } | ||
| 818 | |||
| 819 | /* flush the SGE contexts */ | ||
| 820 | ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); | ||
| 821 | if (ret) { | ||
| 822 | printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", | ||
| 823 | pci_name(ctx->lldi.pdev)); | ||
| 824 | goto out; | ||
| 825 | } | ||
| 826 | |||
| 827 | /* Count active queues so we can build a list of queues to recover */ | ||
| 828 | spin_lock_irq(&ctx->dev->lock); | ||
| 829 | idr_for_each(&ctx->dev->qpidr, count_qps, &count); | ||
| 830 | |||
| 831 | qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC); | ||
| 832 | if (!qp_list.qps) { | ||
| 833 | printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", | ||
| 834 | pci_name(ctx->lldi.pdev)); | ||
| 835 | spin_unlock_irq(&ctx->dev->lock); | ||
| 836 | goto out; | ||
| 837 | } | ||
| 838 | qp_list.idx = 0; | ||
| 839 | |||
| 840 | /* add and ref each qp so it doesn't get freed */ | ||
| 841 | idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list); | ||
| 842 | |||
| 843 | spin_unlock_irq(&ctx->dev->lock); | ||
| 844 | |||
| 845 | /* now traverse the list in a safe context to recover the db state*/ | ||
| 846 | recover_lost_dbs(ctx, &qp_list); | ||
| 847 | |||
| 848 | /* we're almost done! deref the qps and clean up */ | ||
| 849 | deref_qps(qp_list); | ||
| 850 | kfree(qp_list.qps); | ||
| 851 | |||
| 852 | /* Wait for the dbfifo to completely drain again */ | ||
| 853 | while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) { | ||
| 854 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 855 | schedule_timeout(usecs_to_jiffies(10)); | ||
| 856 | } | ||
| 857 | |||
| 858 | /* resume the queues */ | ||
| 859 | spin_lock_irq(&ctx->dev->lock); | ||
| 860 | if (ctx->dev->qpcnt > db_fc_threshold) | ||
| 861 | ctx->dev->db_state = FLOW_CONTROL; | ||
| 862 | else { | ||
| 863 | ctx->dev->db_state = NORMAL; | ||
| 864 | idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL); | ||
| 865 | } | ||
| 866 | ctx->dev->rdev.stats.db_state_transitions++; | ||
| 867 | spin_unlock_irq(&ctx->dev->lock); | ||
| 868 | |||
| 869 | out: | ||
| 870 | /* start up kernel db ringers again */ | ||
| 871 | mutex_unlock(&ctx->dev->db_mutex); | ||
| 872 | } | ||
| 873 | |||
| 874 | static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) | ||
| 875 | { | ||
| 876 | struct uld_ctx *ctx = handle; | ||
| 877 | |||
| 878 | switch (control) { | ||
| 879 | case CXGB4_CONTROL_DB_FULL: | ||
| 880 | stop_queues(ctx); | ||
| 881 | mutex_lock(&ctx->dev->rdev.stats.lock); | ||
| 882 | ctx->dev->rdev.stats.db_full++; | ||
| 883 | mutex_unlock(&ctx->dev->rdev.stats.lock); | ||
| 884 | break; | ||
| 885 | case CXGB4_CONTROL_DB_EMPTY: | ||
| 886 | resume_queues(ctx); | ||
| 887 | mutex_lock(&ctx->dev->rdev.stats.lock); | ||
| 888 | ctx->dev->rdev.stats.db_empty++; | ||
| 889 | mutex_unlock(&ctx->dev->rdev.stats.lock); | ||
| 890 | break; | ||
| 891 | case CXGB4_CONTROL_DB_DROP: | ||
| 892 | recover_queues(ctx); | ||
| 893 | mutex_lock(&ctx->dev->rdev.stats.lock); | ||
| 894 | ctx->dev->rdev.stats.db_drop++; | ||
| 895 | mutex_unlock(&ctx->dev->rdev.stats.lock); | ||
| 896 | break; | ||
| 897 | default: | ||
| 898 | printk(KERN_WARNING MOD "%s: unknown control cmd %u\n", | ||
| 899 | pci_name(ctx->lldi.pdev), control); | ||
| 900 | break; | ||
| 901 | } | ||
| 902 | return 0; | ||
| 903 | } | ||
| 904 | |||
| 588 | static struct cxgb4_uld_info c4iw_uld_info = { | 905 | static struct cxgb4_uld_info c4iw_uld_info = { |
| 589 | .name = DRV_NAME, | 906 | .name = DRV_NAME, |
| 590 | .add = c4iw_uld_add, | 907 | .add = c4iw_uld_add, |
| 591 | .rx_handler = c4iw_uld_rx_handler, | 908 | .rx_handler = c4iw_uld_rx_handler, |
| 592 | .state_change = c4iw_uld_state_change, | 909 | .state_change = c4iw_uld_state_change, |
| 910 | .control = c4iw_uld_control, | ||
| 593 | }; | 911 | }; |
| 594 | 912 | ||
| 595 | static int __init c4iw_init_module(void) | 913 | static int __init c4iw_init_module(void) |
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 397cb36cf103..cf2f6b47617a 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
| @@ -84,7 +84,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) | |||
| 84 | struct c4iw_qp *qhp; | 84 | struct c4iw_qp *qhp; |
| 85 | u32 cqid; | 85 | u32 cqid; |
| 86 | 86 | ||
| 87 | spin_lock(&dev->lock); | 87 | spin_lock_irq(&dev->lock); |
| 88 | qhp = get_qhp(dev, CQE_QPID(err_cqe)); | 88 | qhp = get_qhp(dev, CQE_QPID(err_cqe)); |
| 89 | if (!qhp) { | 89 | if (!qhp) { |
| 90 | printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " | 90 | printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " |
| @@ -93,7 +93,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) | |||
| 93 | CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), | 93 | CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), |
| 94 | CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), | 94 | CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), |
| 95 | CQE_WRID_LOW(err_cqe)); | 95 | CQE_WRID_LOW(err_cqe)); |
| 96 | spin_unlock(&dev->lock); | 96 | spin_unlock_irq(&dev->lock); |
| 97 | goto out; | 97 | goto out; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| @@ -109,13 +109,13 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) | |||
| 109 | CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), | 109 | CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), |
| 110 | CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), | 110 | CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), |
| 111 | CQE_WRID_LOW(err_cqe)); | 111 | CQE_WRID_LOW(err_cqe)); |
| 112 | spin_unlock(&dev->lock); | 112 | spin_unlock_irq(&dev->lock); |
| 113 | goto out; | 113 | goto out; |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | c4iw_qp_add_ref(&qhp->ibqp); | 116 | c4iw_qp_add_ref(&qhp->ibqp); |
| 117 | atomic_inc(&chp->refcnt); | 117 | atomic_inc(&chp->refcnt); |
| 118 | spin_unlock(&dev->lock); | 118 | spin_unlock_irq(&dev->lock); |
| 119 | 119 | ||
| 120 | /* Bad incoming write */ | 120 | /* Bad incoming write */ |
| 121 | if (RQ_TYPE(err_cqe) && | 121 | if (RQ_TYPE(err_cqe) && |
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c new file mode 100644 index 000000000000..f95e5df30db2 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/id_table.c | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2011 Chelsio Communications. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | */ | ||
| 32 | #include <linux/kernel.h> | ||
| 33 | #include <linux/random.h> | ||
| 34 | #include "iw_cxgb4.h" | ||
| 35 | |||
| 36 | #define RANDOM_SKIP 16 | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Trivial bitmap-based allocator. If the random flag is set, the | ||
| 40 | * allocator is designed to: | ||
| 41 | * - pseudo-randomize the id returned such that it is not trivially predictable. | ||
| 42 | * - avoid reuse of recently used id (at the expense of predictability) | ||
| 43 | */ | ||
| 44 | u32 c4iw_id_alloc(struct c4iw_id_table *alloc) | ||
| 45 | { | ||
| 46 | unsigned long flags; | ||
| 47 | u32 obj; | ||
| 48 | |||
| 49 | spin_lock_irqsave(&alloc->lock, flags); | ||
| 50 | |||
| 51 | obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); | ||
| 52 | if (obj >= alloc->max) | ||
| 53 | obj = find_first_zero_bit(alloc->table, alloc->max); | ||
| 54 | |||
| 55 | if (obj < alloc->max) { | ||
| 56 | if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) | ||
| 57 | alloc->last += random32() % RANDOM_SKIP; | ||
| 58 | else | ||
| 59 | alloc->last = obj + 1; | ||
| 60 | if (alloc->last >= alloc->max) | ||
| 61 | alloc->last = 0; | ||
| 62 | set_bit(obj, alloc->table); | ||
| 63 | obj += alloc->start; | ||
| 64 | } else | ||
| 65 | obj = -1; | ||
| 66 | |||
| 67 | spin_unlock_irqrestore(&alloc->lock, flags); | ||
| 68 | return obj; | ||
| 69 | } | ||
| 70 | |||
| 71 | void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj) | ||
| 72 | { | ||
| 73 | unsigned long flags; | ||
| 74 | |||
| 75 | obj -= alloc->start; | ||
| 76 | BUG_ON((int)obj < 0); | ||
| 77 | |||
| 78 | spin_lock_irqsave(&alloc->lock, flags); | ||
| 79 | clear_bit(obj, alloc->table); | ||
| 80 | spin_unlock_irqrestore(&alloc->lock, flags); | ||
| 81 | } | ||
| 82 | |||
| 83 | int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, | ||
| 84 | u32 reserved, u32 flags) | ||
| 85 | { | ||
| 86 | int i; | ||
| 87 | |||
| 88 | alloc->start = start; | ||
| 89 | alloc->flags = flags; | ||
| 90 | if (flags & C4IW_ID_TABLE_F_RANDOM) | ||
| 91 | alloc->last = random32() % RANDOM_SKIP; | ||
| 92 | else | ||
| 93 | alloc->last = 0; | ||
| 94 | alloc->max = num; | ||
| 95 | spin_lock_init(&alloc->lock); | ||
| 96 | alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long), | ||
| 97 | GFP_KERNEL); | ||
| 98 | if (!alloc->table) | ||
| 99 | return -ENOMEM; | ||
| 100 | |||
| 101 | bitmap_zero(alloc->table, num); | ||
| 102 | if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) | ||
| 103 | for (i = 0; i < reserved; ++i) | ||
| 104 | set_bit(i, alloc->table); | ||
| 105 | |||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | |||
| 109 | void c4iw_id_table_free(struct c4iw_id_table *alloc) | ||
| 110 | { | ||
| 111 | kfree(alloc->table); | ||
| 112 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 1357c5bf209b..9beb3a9f0336 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
| @@ -45,7 +45,6 @@ | |||
| 45 | #include <linux/kref.h> | 45 | #include <linux/kref.h> |
| 46 | #include <linux/timer.h> | 46 | #include <linux/timer.h> |
| 47 | #include <linux/io.h> | 47 | #include <linux/io.h> |
| 48 | #include <linux/kfifo.h> | ||
| 49 | 48 | ||
| 50 | #include <asm/byteorder.h> | 49 | #include <asm/byteorder.h> |
| 51 | 50 | ||
| @@ -79,13 +78,22 @@ static inline void *cplhdr(struct sk_buff *skb) | |||
| 79 | return skb->data; | 78 | return skb->data; |
| 80 | } | 79 | } |
| 81 | 80 | ||
| 81 | #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */ | ||
| 82 | #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */ | ||
| 83 | |||
| 84 | struct c4iw_id_table { | ||
| 85 | u32 flags; | ||
| 86 | u32 start; /* logical minimal id */ | ||
| 87 | u32 last; /* hint for find */ | ||
| 88 | u32 max; | ||
| 89 | spinlock_t lock; | ||
| 90 | unsigned long *table; | ||
| 91 | }; | ||
| 92 | |||
| 82 | struct c4iw_resource { | 93 | struct c4iw_resource { |
| 83 | struct kfifo tpt_fifo; | 94 | struct c4iw_id_table tpt_table; |
| 84 | spinlock_t tpt_fifo_lock; | 95 | struct c4iw_id_table qid_table; |
| 85 | struct kfifo qid_fifo; | 96 | struct c4iw_id_table pdid_table; |
| 86 | spinlock_t qid_fifo_lock; | ||
| 87 | struct kfifo pdid_fifo; | ||
| 88 | spinlock_t pdid_fifo_lock; | ||
| 89 | }; | 97 | }; |
| 90 | 98 | ||
| 91 | struct c4iw_qid_list { | 99 | struct c4iw_qid_list { |
| @@ -103,6 +111,27 @@ enum c4iw_rdev_flags { | |||
| 103 | T4_FATAL_ERROR = (1<<0), | 111 | T4_FATAL_ERROR = (1<<0), |
| 104 | }; | 112 | }; |
| 105 | 113 | ||
| 114 | struct c4iw_stat { | ||
| 115 | u64 total; | ||
| 116 | u64 cur; | ||
| 117 | u64 max; | ||
| 118 | u64 fail; | ||
| 119 | }; | ||
| 120 | |||
| 121 | struct c4iw_stats { | ||
| 122 | struct mutex lock; | ||
| 123 | struct c4iw_stat qid; | ||
| 124 | struct c4iw_stat pd; | ||
| 125 | struct c4iw_stat stag; | ||
| 126 | struct c4iw_stat pbl; | ||
| 127 | struct c4iw_stat rqt; | ||
| 128 | struct c4iw_stat ocqp; | ||
| 129 | u64 db_full; | ||
| 130 | u64 db_empty; | ||
| 131 | u64 db_drop; | ||
| 132 | u64 db_state_transitions; | ||
| 133 | }; | ||
| 134 | |||
| 106 | struct c4iw_rdev { | 135 | struct c4iw_rdev { |
| 107 | struct c4iw_resource resource; | 136 | struct c4iw_resource resource; |
| 108 | unsigned long qpshift; | 137 | unsigned long qpshift; |
| @@ -117,6 +146,7 @@ struct c4iw_rdev { | |||
| 117 | struct cxgb4_lld_info lldi; | 146 | struct cxgb4_lld_info lldi; |
| 118 | unsigned long oc_mw_pa; | 147 | unsigned long oc_mw_pa; |
| 119 | void __iomem *oc_mw_kva; | 148 | void __iomem *oc_mw_kva; |
| 149 | struct c4iw_stats stats; | ||
| 120 | }; | 150 | }; |
| 121 | 151 | ||
| 122 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) | 152 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) |
| @@ -175,6 +205,12 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | |||
| 175 | return wr_waitp->ret; | 205 | return wr_waitp->ret; |
| 176 | } | 206 | } |
| 177 | 207 | ||
| 208 | enum db_state { | ||
| 209 | NORMAL = 0, | ||
| 210 | FLOW_CONTROL = 1, | ||
| 211 | RECOVERY = 2 | ||
| 212 | }; | ||
| 213 | |||
| 178 | struct c4iw_dev { | 214 | struct c4iw_dev { |
| 179 | struct ib_device ibdev; | 215 | struct ib_device ibdev; |
| 180 | struct c4iw_rdev rdev; | 216 | struct c4iw_rdev rdev; |
| @@ -183,7 +219,10 @@ struct c4iw_dev { | |||
| 183 | struct idr qpidr; | 219 | struct idr qpidr; |
| 184 | struct idr mmidr; | 220 | struct idr mmidr; |
| 185 | spinlock_t lock; | 221 | spinlock_t lock; |
| 222 | struct mutex db_mutex; | ||
| 186 | struct dentry *debugfs_root; | 223 | struct dentry *debugfs_root; |
| 224 | enum db_state db_state; | ||
| 225 | int qpcnt; | ||
| 187 | }; | 226 | }; |
| 188 | 227 | ||
| 189 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) | 228 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) |
| @@ -211,29 +250,57 @@ static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) | |||
| 211 | return idr_find(&rhp->mmidr, mmid); | 250 | return idr_find(&rhp->mmidr, mmid); |
| 212 | } | 251 | } |
| 213 | 252 | ||
| 214 | static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, | 253 | static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, |
| 215 | void *handle, u32 id) | 254 | void *handle, u32 id, int lock) |
| 216 | { | 255 | { |
| 217 | int ret; | 256 | int ret; |
| 218 | int newid; | 257 | int newid; |
| 219 | 258 | ||
| 220 | do { | 259 | do { |
| 221 | if (!idr_pre_get(idr, GFP_KERNEL)) | 260 | if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) |
| 222 | return -ENOMEM; | 261 | return -ENOMEM; |
| 223 | spin_lock_irq(&rhp->lock); | 262 | if (lock) |
| 263 | spin_lock_irq(&rhp->lock); | ||
| 224 | ret = idr_get_new_above(idr, handle, id, &newid); | 264 | ret = idr_get_new_above(idr, handle, id, &newid); |
| 225 | BUG_ON(newid != id); | 265 | BUG_ON(!ret && newid != id); |
| 226 | spin_unlock_irq(&rhp->lock); | 266 | if (lock) |
| 267 | spin_unlock_irq(&rhp->lock); | ||
| 227 | } while (ret == -EAGAIN); | 268 | } while (ret == -EAGAIN); |
| 228 | 269 | ||
| 229 | return ret; | 270 | return ret; |
| 230 | } | 271 | } |
| 231 | 272 | ||
| 232 | static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) | 273 | static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, |
| 274 | void *handle, u32 id) | ||
| 275 | { | ||
| 276 | return _insert_handle(rhp, idr, handle, id, 1); | ||
| 277 | } | ||
| 278 | |||
| 279 | static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, | ||
| 280 | void *handle, u32 id) | ||
| 281 | { | ||
| 282 | return _insert_handle(rhp, idr, handle, id, 0); | ||
| 283 | } | ||
| 284 | |||
| 285 | static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, | ||
| 286 | u32 id, int lock) | ||
| 233 | { | 287 | { |
| 234 | spin_lock_irq(&rhp->lock); | 288 | if (lock) |
| 289 | spin_lock_irq(&rhp->lock); | ||
| 235 | idr_remove(idr, id); | 290 | idr_remove(idr, id); |
| 236 | spin_unlock_irq(&rhp->lock); | 291 | if (lock) |
| 292 | spin_unlock_irq(&rhp->lock); | ||
| 293 | } | ||
| 294 | |||
| 295 | static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) | ||
| 296 | { | ||
| 297 | _remove_handle(rhp, idr, id, 1); | ||
| 298 | } | ||
| 299 | |||
| 300 | static inline void remove_handle_nolock(struct c4iw_dev *rhp, | ||
| 301 | struct idr *idr, u32 id) | ||
| 302 | { | ||
| 303 | _remove_handle(rhp, idr, id, 0); | ||
| 237 | } | 304 | } |
| 238 | 305 | ||
| 239 | struct c4iw_pd { | 306 | struct c4iw_pd { |
| @@ -353,6 +420,8 @@ struct c4iw_qp_attributes { | |||
| 353 | struct c4iw_ep *llp_stream_handle; | 420 | struct c4iw_ep *llp_stream_handle; |
| 354 | u8 layer_etype; | 421 | u8 layer_etype; |
| 355 | u8 ecode; | 422 | u8 ecode; |
| 423 | u16 sq_db_inc; | ||
| 424 | u16 rq_db_inc; | ||
| 356 | }; | 425 | }; |
| 357 | 426 | ||
| 358 | struct c4iw_qp { | 427 | struct c4iw_qp { |
| @@ -427,6 +496,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext, | |||
| 427 | 496 | ||
| 428 | enum c4iw_qp_attr_mask { | 497 | enum c4iw_qp_attr_mask { |
| 429 | C4IW_QP_ATTR_NEXT_STATE = 1 << 0, | 498 | C4IW_QP_ATTR_NEXT_STATE = 1 << 0, |
| 499 | C4IW_QP_ATTR_SQ_DB = 1<<1, | ||
| 500 | C4IW_QP_ATTR_RQ_DB = 1<<2, | ||
| 430 | C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, | 501 | C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, |
| 431 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, | 502 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, |
| 432 | C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, | 503 | C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, |
| @@ -480,6 +551,23 @@ static inline int c4iw_convert_state(enum ib_qp_state ib_state) | |||
| 480 | } | 551 | } |
| 481 | } | 552 | } |
| 482 | 553 | ||
| 554 | static inline int to_ib_qp_state(int c4iw_qp_state) | ||
| 555 | { | ||
| 556 | switch (c4iw_qp_state) { | ||
| 557 | case C4IW_QP_STATE_IDLE: | ||
| 558 | return IB_QPS_INIT; | ||
| 559 | case C4IW_QP_STATE_RTS: | ||
| 560 | return IB_QPS_RTS; | ||
| 561 | case C4IW_QP_STATE_CLOSING: | ||
| 562 | return IB_QPS_SQD; | ||
| 563 | case C4IW_QP_STATE_TERMINATE: | ||
| 564 | return IB_QPS_SQE; | ||
| 565 | case C4IW_QP_STATE_ERROR: | ||
| 566 | return IB_QPS_ERR; | ||
| 567 | } | ||
| 568 | return IB_QPS_ERR; | ||
| 569 | } | ||
| 570 | |||
| 483 | static inline u32 c4iw_ib_to_tpt_access(int a) | 571 | static inline u32 c4iw_ib_to_tpt_access(int a) |
| 484 | { | 572 | { |
| 485 | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | | 573 | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | |
| @@ -693,14 +781,20 @@ static inline int compute_wscale(int win) | |||
| 693 | return wscale; | 781 | return wscale; |
| 694 | } | 782 | } |
| 695 | 783 | ||
| 784 | u32 c4iw_id_alloc(struct c4iw_id_table *alloc); | ||
| 785 | void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); | ||
| 786 | int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, | ||
| 787 | u32 reserved, u32 flags); | ||
| 788 | void c4iw_id_table_free(struct c4iw_id_table *alloc); | ||
| 789 | |||
| 696 | typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb); | 790 | typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb); |
| 697 | 791 | ||
| 698 | int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | 792 | int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, |
| 699 | struct l2t_entry *l2t); | 793 | struct l2t_entry *l2t); |
| 700 | void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid, | 794 | void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid, |
| 701 | struct c4iw_dev_ucontext *uctx); | 795 | struct c4iw_dev_ucontext *uctx); |
| 702 | u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock); | 796 | u32 c4iw_get_resource(struct c4iw_id_table *id_table); |
| 703 | void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock); | 797 | void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry); |
| 704 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid); | 798 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid); |
| 705 | int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); | 799 | int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); |
| 706 | int c4iw_pblpool_create(struct c4iw_rdev *rdev); | 800 | int c4iw_pblpool_create(struct c4iw_rdev *rdev); |
| @@ -769,6 +863,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, | |||
| 769 | struct ib_udata *udata); | 863 | struct ib_udata *udata); |
| 770 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 864 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 771 | int attr_mask, struct ib_udata *udata); | 865 | int attr_mask, struct ib_udata *udata); |
| 866 | int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
| 867 | int attr_mask, struct ib_qp_init_attr *init_attr); | ||
| 772 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn); | 868 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn); |
| 773 | u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size); | 869 | u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size); |
| 774 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); | 870 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); |
| @@ -797,5 +893,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); | |||
| 797 | extern struct cxgb4_client t4c_client; | 893 | extern struct cxgb4_client t4c_client; |
| 798 | extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; | 894 | extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; |
| 799 | extern int c4iw_max_read_depth; | 895 | extern int c4iw_max_read_depth; |
| 896 | extern int db_fc_threshold; | ||
| 897 | |||
| 800 | 898 | ||
| 801 | #endif | 899 | #endif |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 40c835309e49..57e07c61ace2 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -131,10 +131,14 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, | |||
| 131 | stag_idx = (*stag) >> 8; | 131 | stag_idx = (*stag) >> 8; |
| 132 | 132 | ||
| 133 | if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { | 133 | if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { |
| 134 | stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo, | 134 | stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); |
| 135 | &rdev->resource.tpt_fifo_lock); | ||
| 136 | if (!stag_idx) | 135 | if (!stag_idx) |
| 137 | return -ENOMEM; | 136 | return -ENOMEM; |
| 137 | mutex_lock(&rdev->stats.lock); | ||
| 138 | rdev->stats.stag.cur += 32; | ||
| 139 | if (rdev->stats.stag.cur > rdev->stats.stag.max) | ||
| 140 | rdev->stats.stag.max = rdev->stats.stag.cur; | ||
| 141 | mutex_unlock(&rdev->stats.lock); | ||
| 138 | *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); | 142 | *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); |
| 139 | } | 143 | } |
| 140 | PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", | 144 | PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", |
| @@ -165,9 +169,12 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, | |||
| 165 | (rdev->lldi.vr->stag.start >> 5), | 169 | (rdev->lldi.vr->stag.start >> 5), |
| 166 | sizeof(tpt), &tpt); | 170 | sizeof(tpt), &tpt); |
| 167 | 171 | ||
| 168 | if (reset_tpt_entry) | 172 | if (reset_tpt_entry) { |
| 169 | c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx, | 173 | c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); |
| 170 | &rdev->resource.tpt_fifo_lock); | 174 | mutex_lock(&rdev->stats.lock); |
| 175 | rdev->stats.stag.cur -= 32; | ||
| 176 | mutex_unlock(&rdev->stats.lock); | ||
| 177 | } | ||
| 171 | return err; | 178 | return err; |
| 172 | } | 179 | } |
| 173 | 180 | ||
| @@ -686,8 +693,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw) | |||
| 686 | mhp = to_c4iw_mw(mw); | 693 | mhp = to_c4iw_mw(mw); |
| 687 | rhp = mhp->rhp; | 694 | rhp = mhp->rhp; |
| 688 | mmid = (mw->rkey) >> 8; | 695 | mmid = (mw->rkey) >> 8; |
| 689 | deallocate_window(&rhp->rdev, mhp->attr.stag); | ||
| 690 | remove_handle(rhp, &rhp->mmidr, mmid); | 696 | remove_handle(rhp, &rhp->mmidr, mmid); |
| 697 | deallocate_window(&rhp->rdev, mhp->attr.stag); | ||
| 691 | kfree(mhp); | 698 | kfree(mhp); |
| 692 | PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); | 699 | PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); |
| 693 | return 0; | 700 | return 0; |
| @@ -789,12 +796,12 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) | |||
| 789 | mhp = to_c4iw_mr(ib_mr); | 796 | mhp = to_c4iw_mr(ib_mr); |
| 790 | rhp = mhp->rhp; | 797 | rhp = mhp->rhp; |
| 791 | mmid = mhp->attr.stag >> 8; | 798 | mmid = mhp->attr.stag >> 8; |
| 799 | remove_handle(rhp, &rhp->mmidr, mmid); | ||
| 792 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, | 800 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, |
| 793 | mhp->attr.pbl_addr); | 801 | mhp->attr.pbl_addr); |
| 794 | if (mhp->attr.pbl_size) | 802 | if (mhp->attr.pbl_size) |
| 795 | c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, | 803 | c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, |
| 796 | mhp->attr.pbl_size << 3); | 804 | mhp->attr.pbl_size << 3); |
| 797 | remove_handle(rhp, &rhp->mmidr, mmid); | ||
| 798 | if (mhp->kva) | 805 | if (mhp->kva) |
| 799 | kfree((void *) (unsigned long) mhp->kva); | 806 | kfree((void *) (unsigned long) mhp->kva); |
| 800 | if (mhp->umem) | 807 | if (mhp->umem) |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index be1c18f44400..e084fdc6da7f 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
| @@ -188,8 +188,10 @@ static int c4iw_deallocate_pd(struct ib_pd *pd) | |||
| 188 | php = to_c4iw_pd(pd); | 188 | php = to_c4iw_pd(pd); |
| 189 | rhp = php->rhp; | 189 | rhp = php->rhp; |
| 190 | PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); | 190 | PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); |
| 191 | c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid, | 191 | c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); |
| 192 | &rhp->rdev.resource.pdid_fifo_lock); | 192 | mutex_lock(&rhp->rdev.stats.lock); |
| 193 | rhp->rdev.stats.pd.cur--; | ||
| 194 | mutex_unlock(&rhp->rdev.stats.lock); | ||
| 193 | kfree(php); | 195 | kfree(php); |
| 194 | return 0; | 196 | return 0; |
| 195 | } | 197 | } |
| @@ -204,14 +206,12 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, | |||
| 204 | 206 | ||
| 205 | PDBG("%s ibdev %p\n", __func__, ibdev); | 207 | PDBG("%s ibdev %p\n", __func__, ibdev); |
| 206 | rhp = (struct c4iw_dev *) ibdev; | 208 | rhp = (struct c4iw_dev *) ibdev; |
| 207 | pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo, | 209 | pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); |
| 208 | &rhp->rdev.resource.pdid_fifo_lock); | ||
| 209 | if (!pdid) | 210 | if (!pdid) |
| 210 | return ERR_PTR(-EINVAL); | 211 | return ERR_PTR(-EINVAL); |
| 211 | php = kzalloc(sizeof(*php), GFP_KERNEL); | 212 | php = kzalloc(sizeof(*php), GFP_KERNEL); |
| 212 | if (!php) { | 213 | if (!php) { |
| 213 | c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid, | 214 | c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid); |
| 214 | &rhp->rdev.resource.pdid_fifo_lock); | ||
| 215 | return ERR_PTR(-ENOMEM); | 215 | return ERR_PTR(-ENOMEM); |
| 216 | } | 216 | } |
| 217 | php->pdid = pdid; | 217 | php->pdid = pdid; |
| @@ -222,6 +222,11 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, | |||
| 222 | return ERR_PTR(-EFAULT); | 222 | return ERR_PTR(-EFAULT); |
| 223 | } | 223 | } |
| 224 | } | 224 | } |
| 225 | mutex_lock(&rhp->rdev.stats.lock); | ||
| 226 | rhp->rdev.stats.pd.cur++; | ||
| 227 | if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max) | ||
| 228 | rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; | ||
| 229 | mutex_unlock(&rhp->rdev.stats.lock); | ||
| 225 | PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); | 230 | PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); |
| 226 | return &php->ibpd; | 231 | return &php->ibpd; |
| 227 | } | 232 | } |
| @@ -438,6 +443,7 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
| 438 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | | 443 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | |
| 439 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | 444 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | |
| 440 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | 445 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | |
| 446 | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | | ||
| 441 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | | 447 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | |
| 442 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | 448 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | |
| 443 | (1ull << IB_USER_VERBS_CMD_POST_SEND) | | 449 | (1ull << IB_USER_VERBS_CMD_POST_SEND) | |
| @@ -460,6 +466,7 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
| 460 | dev->ibdev.destroy_ah = c4iw_ah_destroy; | 466 | dev->ibdev.destroy_ah = c4iw_ah_destroy; |
| 461 | dev->ibdev.create_qp = c4iw_create_qp; | 467 | dev->ibdev.create_qp = c4iw_create_qp; |
| 462 | dev->ibdev.modify_qp = c4iw_ib_modify_qp; | 468 | dev->ibdev.modify_qp = c4iw_ib_modify_qp; |
| 469 | dev->ibdev.query_qp = c4iw_ib_query_qp; | ||
| 463 | dev->ibdev.destroy_qp = c4iw_destroy_qp; | 470 | dev->ibdev.destroy_qp = c4iw_destroy_qp; |
| 464 | dev->ibdev.create_cq = c4iw_create_cq; | 471 | dev->ibdev.create_cq = c4iw_create_cq; |
| 465 | dev->ibdev.destroy_cq = c4iw_destroy_cq; | 472 | dev->ibdev.destroy_cq = c4iw_destroy_cq; |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 5f940aeaab1e..45aedf1d9338 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -34,10 +34,19 @@ | |||
| 34 | 34 | ||
| 35 | #include "iw_cxgb4.h" | 35 | #include "iw_cxgb4.h" |
| 36 | 36 | ||
| 37 | static int db_delay_usecs = 1; | ||
| 38 | module_param(db_delay_usecs, int, 0644); | ||
| 39 | MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain"); | ||
| 40 | |||
| 37 | static int ocqp_support = 1; | 41 | static int ocqp_support = 1; |
| 38 | module_param(ocqp_support, int, 0644); | 42 | module_param(ocqp_support, int, 0644); |
| 39 | MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); | 43 | MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); |
| 40 | 44 | ||
| 45 | int db_fc_threshold = 2000; | ||
| 46 | module_param(db_fc_threshold, int, 0644); | ||
| 47 | MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic " | ||
| 48 | "db flow control mode (default = 2000)"); | ||
| 49 | |||
| 41 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) | 50 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) |
| 42 | { | 51 | { |
| 43 | unsigned long flag; | 52 | unsigned long flag; |
| @@ -1128,6 +1137,35 @@ out: | |||
| 1128 | return ret; | 1137 | return ret; |
| 1129 | } | 1138 | } |
| 1130 | 1139 | ||
| 1140 | /* | ||
| 1141 | * Called by the library when the qp has user dbs disabled due to | ||
| 1142 | * a DB_FULL condition. This function will single-thread all user | ||
| 1143 | * DB rings to avoid overflowing the hw db-fifo. | ||
| 1144 | */ | ||
| 1145 | static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc) | ||
| 1146 | { | ||
| 1147 | int delay = db_delay_usecs; | ||
| 1148 | |||
| 1149 | mutex_lock(&qhp->rhp->db_mutex); | ||
| 1150 | do { | ||
| 1151 | |||
| 1152 | /* | ||
| 1153 | * The interrupt threshold is dbfifo_int_thresh << 6. So | ||
| 1154 | * make sure we don't cross that and generate an interrupt. | ||
| 1155 | */ | ||
| 1156 | if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) < | ||
| 1157 | (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) { | ||
| 1158 | writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db); | ||
| 1159 | break; | ||
| 1160 | } | ||
| 1161 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 1162 | schedule_timeout(usecs_to_jiffies(delay)); | ||
| 1163 | delay = min(delay << 1, 2000); | ||
| 1164 | } while (1); | ||
| 1165 | mutex_unlock(&qhp->rhp->db_mutex); | ||
| 1166 | return 0; | ||
| 1167 | } | ||
| 1168 | |||
| 1131 | int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | 1169 | int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, |
| 1132 | enum c4iw_qp_attr_mask mask, | 1170 | enum c4iw_qp_attr_mask mask, |
| 1133 | struct c4iw_qp_attributes *attrs, | 1171 | struct c4iw_qp_attributes *attrs, |
| @@ -1176,6 +1214,15 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1176 | qhp->attr = newattr; | 1214 | qhp->attr = newattr; |
| 1177 | } | 1215 | } |
| 1178 | 1216 | ||
| 1217 | if (mask & C4IW_QP_ATTR_SQ_DB) { | ||
| 1218 | ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc); | ||
| 1219 | goto out; | ||
| 1220 | } | ||
| 1221 | if (mask & C4IW_QP_ATTR_RQ_DB) { | ||
| 1222 | ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc); | ||
| 1223 | goto out; | ||
| 1224 | } | ||
| 1225 | |||
| 1179 | if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) | 1226 | if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) |
| 1180 | goto out; | 1227 | goto out; |
| 1181 | if (qhp->attr.state == attrs->next_state) | 1228 | if (qhp->attr.state == attrs->next_state) |
| @@ -1352,6 +1399,14 @@ out: | |||
| 1352 | return ret; | 1399 | return ret; |
| 1353 | } | 1400 | } |
| 1354 | 1401 | ||
| 1402 | static int enable_qp_db(int id, void *p, void *data) | ||
| 1403 | { | ||
| 1404 | struct c4iw_qp *qp = p; | ||
| 1405 | |||
| 1406 | t4_enable_wq_db(&qp->wq); | ||
| 1407 | return 0; | ||
| 1408 | } | ||
| 1409 | |||
| 1355 | int c4iw_destroy_qp(struct ib_qp *ib_qp) | 1410 | int c4iw_destroy_qp(struct ib_qp *ib_qp) |
| 1356 | { | 1411 | { |
| 1357 | struct c4iw_dev *rhp; | 1412 | struct c4iw_dev *rhp; |
| @@ -1369,7 +1424,16 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) | |||
| 1369 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1424 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); |
| 1370 | wait_event(qhp->wait, !qhp->ep); | 1425 | wait_event(qhp->wait, !qhp->ep); |
| 1371 | 1426 | ||
| 1372 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | 1427 | spin_lock_irq(&rhp->lock); |
| 1428 | remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid); | ||
| 1429 | rhp->qpcnt--; | ||
| 1430 | BUG_ON(rhp->qpcnt < 0); | ||
| 1431 | if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) { | ||
| 1432 | rhp->rdev.stats.db_state_transitions++; | ||
| 1433 | rhp->db_state = NORMAL; | ||
| 1434 | idr_for_each(&rhp->qpidr, enable_qp_db, NULL); | ||
| 1435 | } | ||
| 1436 | spin_unlock_irq(&rhp->lock); | ||
| 1373 | atomic_dec(&qhp->refcnt); | 1437 | atomic_dec(&qhp->refcnt); |
| 1374 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); | 1438 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); |
| 1375 | 1439 | ||
| @@ -1383,6 +1447,14 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) | |||
| 1383 | return 0; | 1447 | return 0; |
| 1384 | } | 1448 | } |
| 1385 | 1449 | ||
| 1450 | static int disable_qp_db(int id, void *p, void *data) | ||
| 1451 | { | ||
| 1452 | struct c4iw_qp *qp = p; | ||
| 1453 | |||
| 1454 | t4_disable_wq_db(&qp->wq); | ||
| 1455 | return 0; | ||
| 1456 | } | ||
| 1457 | |||
| 1386 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | 1458 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, |
| 1387 | struct ib_udata *udata) | 1459 | struct ib_udata *udata) |
| 1388 | { | 1460 | { |
| @@ -1469,7 +1541,16 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1469 | init_waitqueue_head(&qhp->wait); | 1541 | init_waitqueue_head(&qhp->wait); |
| 1470 | atomic_set(&qhp->refcnt, 1); | 1542 | atomic_set(&qhp->refcnt, 1); |
| 1471 | 1543 | ||
| 1472 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); | 1544 | spin_lock_irq(&rhp->lock); |
| 1545 | if (rhp->db_state != NORMAL) | ||
| 1546 | t4_disable_wq_db(&qhp->wq); | ||
| 1547 | if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { | ||
| 1548 | rhp->rdev.stats.db_state_transitions++; | ||
| 1549 | rhp->db_state = FLOW_CONTROL; | ||
| 1550 | idr_for_each(&rhp->qpidr, disable_qp_db, NULL); | ||
| 1551 | } | ||
| 1552 | ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); | ||
| 1553 | spin_unlock_irq(&rhp->lock); | ||
| 1473 | if (ret) | 1554 | if (ret) |
| 1474 | goto err2; | 1555 | goto err2; |
| 1475 | 1556 | ||
| @@ -1613,6 +1694,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 1613 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | | 1694 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | |
| 1614 | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; | 1695 | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; |
| 1615 | 1696 | ||
| 1697 | /* | ||
| 1698 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for | ||
| 1699 | * ringing the queue db when we're in DB_FULL mode. | ||
| 1700 | */ | ||
| 1701 | attrs.sq_db_inc = attr->sq_psn; | ||
| 1702 | attrs.rq_db_inc = attr->rq_psn; | ||
| 1703 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; | ||
| 1704 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; | ||
| 1705 | |||
| 1616 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); | 1706 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); |
| 1617 | } | 1707 | } |
| 1618 | 1708 | ||
| @@ -1621,3 +1711,14 @@ struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) | |||
| 1621 | PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); | 1711 | PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); |
| 1622 | return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); | 1712 | return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); |
| 1623 | } | 1713 | } |
| 1714 | |||
| 1715 | int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
| 1716 | int attr_mask, struct ib_qp_init_attr *init_attr) | ||
| 1717 | { | ||
| 1718 | struct c4iw_qp *qhp = to_c4iw_qp(ibqp); | ||
| 1719 | |||
| 1720 | memset(attr, 0, sizeof *attr); | ||
| 1721 | memset(init_attr, 0, sizeof *init_attr); | ||
| 1722 | attr->qp_state = to_ib_qp_state(qhp->attr.state); | ||
| 1723 | return 0; | ||
| 1724 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index 407ff3924150..cdef4d7fb6d8 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c | |||
| @@ -30,96 +30,25 @@ | |||
| 30 | * SOFTWARE. | 30 | * SOFTWARE. |
| 31 | */ | 31 | */ |
| 32 | /* Crude resource management */ | 32 | /* Crude resource management */ |
| 33 | #include <linux/kernel.h> | ||
| 34 | #include <linux/random.h> | ||
| 35 | #include <linux/slab.h> | ||
| 36 | #include <linux/kfifo.h> | ||
| 37 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
| 38 | #include <linux/errno.h> | ||
| 39 | #include <linux/genalloc.h> | 34 | #include <linux/genalloc.h> |
| 40 | #include <linux/ratelimit.h> | 35 | #include <linux/ratelimit.h> |
| 41 | #include "iw_cxgb4.h" | 36 | #include "iw_cxgb4.h" |
| 42 | 37 | ||
| 43 | #define RANDOM_SIZE 16 | 38 | static int c4iw_init_qid_table(struct c4iw_rdev *rdev) |
| 44 | |||
| 45 | static int __c4iw_init_resource_fifo(struct kfifo *fifo, | ||
| 46 | spinlock_t *fifo_lock, | ||
| 47 | u32 nr, u32 skip_low, | ||
| 48 | u32 skip_high, | ||
| 49 | int random) | ||
| 50 | { | ||
| 51 | u32 i, j, entry = 0, idx; | ||
| 52 | u32 random_bytes; | ||
| 53 | u32 rarray[16]; | ||
| 54 | spin_lock_init(fifo_lock); | ||
| 55 | |||
| 56 | if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL)) | ||
| 57 | return -ENOMEM; | ||
| 58 | |||
| 59 | for (i = 0; i < skip_low + skip_high; i++) | ||
| 60 | kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); | ||
| 61 | if (random) { | ||
| 62 | j = 0; | ||
| 63 | random_bytes = random32(); | ||
| 64 | for (i = 0; i < RANDOM_SIZE; i++) | ||
| 65 | rarray[i] = i + skip_low; | ||
| 66 | for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { | ||
| 67 | if (j >= RANDOM_SIZE) { | ||
| 68 | j = 0; | ||
| 69 | random_bytes = random32(); | ||
| 70 | } | ||
| 71 | idx = (random_bytes >> (j * 2)) & 0xF; | ||
| 72 | kfifo_in(fifo, | ||
| 73 | (unsigned char *) &rarray[idx], | ||
| 74 | sizeof(u32)); | ||
| 75 | rarray[idx] = i; | ||
| 76 | j++; | ||
| 77 | } | ||
| 78 | for (i = 0; i < RANDOM_SIZE; i++) | ||
| 79 | kfifo_in(fifo, | ||
| 80 | (unsigned char *) &rarray[i], | ||
| 81 | sizeof(u32)); | ||
| 82 | } else | ||
| 83 | for (i = skip_low; i < nr - skip_high; i++) | ||
| 84 | kfifo_in(fifo, (unsigned char *) &i, sizeof(u32)); | ||
| 85 | |||
| 86 | for (i = 0; i < skip_low + skip_high; i++) | ||
| 87 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, | ||
| 88 | sizeof(u32), fifo_lock)) | ||
| 89 | break; | ||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock, | ||
| 94 | u32 nr, u32 skip_low, u32 skip_high) | ||
| 95 | { | ||
| 96 | return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, | ||
| 97 | skip_high, 0); | ||
| 98 | } | ||
| 99 | |||
| 100 | static int c4iw_init_resource_fifo_random(struct kfifo *fifo, | ||
| 101 | spinlock_t *fifo_lock, | ||
| 102 | u32 nr, u32 skip_low, u32 skip_high) | ||
| 103 | { | ||
| 104 | return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, | ||
| 105 | skip_high, 1); | ||
| 106 | } | ||
| 107 | |||
| 108 | static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev) | ||
| 109 | { | 39 | { |
| 110 | u32 i; | 40 | u32 i; |
| 111 | 41 | ||
| 112 | spin_lock_init(&rdev->resource.qid_fifo_lock); | 42 | if (c4iw_id_table_alloc(&rdev->resource.qid_table, |
| 113 | 43 | rdev->lldi.vr->qp.start, | |
| 114 | if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size * | 44 | rdev->lldi.vr->qp.size, |
| 115 | sizeof(u32), GFP_KERNEL)) | 45 | rdev->lldi.vr->qp.size, 0)) |
| 116 | return -ENOMEM; | 46 | return -ENOMEM; |
| 117 | 47 | ||
| 118 | for (i = rdev->lldi.vr->qp.start; | 48 | for (i = rdev->lldi.vr->qp.start; |
| 119 | i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) | 49 | i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) |
| 120 | if (!(i & rdev->qpmask)) | 50 | if (!(i & rdev->qpmask)) |
| 121 | kfifo_in(&rdev->resource.qid_fifo, | 51 | c4iw_id_free(&rdev->resource.qid_table, i); |
| 122 | (unsigned char *) &i, sizeof(u32)); | ||
| 123 | return 0; | 52 | return 0; |
| 124 | } | 53 | } |
| 125 | 54 | ||
| @@ -127,44 +56,42 @@ static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev) | |||
| 127 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) | 56 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) |
| 128 | { | 57 | { |
| 129 | int err = 0; | 58 | int err = 0; |
| 130 | err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo, | 59 | err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, |
| 131 | &rdev->resource.tpt_fifo_lock, | 60 | C4IW_ID_TABLE_F_RANDOM); |
| 132 | nr_tpt, 1, 0); | ||
| 133 | if (err) | 61 | if (err) |
| 134 | goto tpt_err; | 62 | goto tpt_err; |
| 135 | err = c4iw_init_qid_fifo(rdev); | 63 | err = c4iw_init_qid_table(rdev); |
| 136 | if (err) | 64 | if (err) |
| 137 | goto qid_err; | 65 | goto qid_err; |
| 138 | err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo, | 66 | err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, |
| 139 | &rdev->resource.pdid_fifo_lock, | 67 | nr_pdid, 1, 0); |
| 140 | nr_pdid, 1, 0); | ||
| 141 | if (err) | 68 | if (err) |
| 142 | goto pdid_err; | 69 | goto pdid_err; |
| 143 | return 0; | 70 | return 0; |
| 144 | pdid_err: | 71 | pdid_err: |
| 145 | kfifo_free(&rdev->resource.qid_fifo); | 72 | c4iw_id_table_free(&rdev->resource.qid_table); |
| 146 | qid_err: | 73 | qid_err: |
| 147 | kfifo_free(&rdev->resource.tpt_fifo); | 74 | c4iw_id_table_free(&rdev->resource.tpt_table); |
| 148 | tpt_err: | 75 | tpt_err: |
| 149 | return -ENOMEM; | 76 | return -ENOMEM; |
| 150 | } | 77 | } |
| 151 | 78 | ||
| 152 | /* | 79 | /* |
| 153 | * returns 0 if no resource available | 80 | * returns 0 if no resource available |
| 154 | */ | 81 | */ |
| 155 | u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock) | 82 | u32 c4iw_get_resource(struct c4iw_id_table *id_table) |
| 156 | { | 83 | { |
| 157 | u32 entry; | 84 | u32 entry; |
| 158 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)) | 85 | entry = c4iw_id_alloc(id_table); |
| 159 | return entry; | 86 | if (entry == (u32)(-1)) |
| 160 | else | ||
| 161 | return 0; | 87 | return 0; |
| 88 | return entry; | ||
| 162 | } | 89 | } |
| 163 | 90 | ||
| 164 | void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock) | 91 | void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) |
| 165 | { | 92 | { |
| 166 | PDBG("%s entry 0x%x\n", __func__, entry); | 93 | PDBG("%s entry 0x%x\n", __func__, entry); |
| 167 | kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock); | 94 | c4iw_id_free(id_table, entry); |
| 168 | } | 95 | } |
| 169 | 96 | ||
| 170 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | 97 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) |
| @@ -181,10 +108,12 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
| 181 | qid = entry->qid; | 108 | qid = entry->qid; |
| 182 | kfree(entry); | 109 | kfree(entry); |
| 183 | } else { | 110 | } else { |
| 184 | qid = c4iw_get_resource(&rdev->resource.qid_fifo, | 111 | qid = c4iw_get_resource(&rdev->resource.qid_table); |
| 185 | &rdev->resource.qid_fifo_lock); | ||
| 186 | if (!qid) | 112 | if (!qid) |
| 187 | goto out; | 113 | goto out; |
| 114 | mutex_lock(&rdev->stats.lock); | ||
| 115 | rdev->stats.qid.cur += rdev->qpmask + 1; | ||
| 116 | mutex_unlock(&rdev->stats.lock); | ||
| 188 | for (i = qid+1; i & rdev->qpmask; i++) { | 117 | for (i = qid+1; i & rdev->qpmask; i++) { |
| 189 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | 118 | entry = kmalloc(sizeof *entry, GFP_KERNEL); |
| 190 | if (!entry) | 119 | if (!entry) |
| @@ -213,6 +142,10 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
| 213 | out: | 142 | out: |
| 214 | mutex_unlock(&uctx->lock); | 143 | mutex_unlock(&uctx->lock); |
| 215 | PDBG("%s qid 0x%x\n", __func__, qid); | 144 | PDBG("%s qid 0x%x\n", __func__, qid); |
| 145 | mutex_lock(&rdev->stats.lock); | ||
| 146 | if (rdev->stats.qid.cur > rdev->stats.qid.max) | ||
| 147 | rdev->stats.qid.max = rdev->stats.qid.cur; | ||
| 148 | mutex_unlock(&rdev->stats.lock); | ||
| 216 | return qid; | 149 | return qid; |
| 217 | } | 150 | } |
| 218 | 151 | ||
| @@ -245,10 +178,12 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
| 245 | qid = entry->qid; | 178 | qid = entry->qid; |
| 246 | kfree(entry); | 179 | kfree(entry); |
| 247 | } else { | 180 | } else { |
| 248 | qid = c4iw_get_resource(&rdev->resource.qid_fifo, | 181 | qid = c4iw_get_resource(&rdev->resource.qid_table); |
| 249 | &rdev->resource.qid_fifo_lock); | ||
| 250 | if (!qid) | 182 | if (!qid) |
| 251 | goto out; | 183 | goto out; |
| 184 | mutex_lock(&rdev->stats.lock); | ||
| 185 | rdev->stats.qid.cur += rdev->qpmask + 1; | ||
| 186 | mutex_unlock(&rdev->stats.lock); | ||
| 252 | for (i = qid+1; i & rdev->qpmask; i++) { | 187 | for (i = qid+1; i & rdev->qpmask; i++) { |
| 253 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | 188 | entry = kmalloc(sizeof *entry, GFP_KERNEL); |
| 254 | if (!entry) | 189 | if (!entry) |
| @@ -277,6 +212,10 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
| 277 | out: | 212 | out: |
| 278 | mutex_unlock(&uctx->lock); | 213 | mutex_unlock(&uctx->lock); |
| 279 | PDBG("%s qid 0x%x\n", __func__, qid); | 214 | PDBG("%s qid 0x%x\n", __func__, qid); |
| 215 | mutex_lock(&rdev->stats.lock); | ||
| 216 | if (rdev->stats.qid.cur > rdev->stats.qid.max) | ||
| 217 | rdev->stats.qid.max = rdev->stats.qid.cur; | ||
| 218 | mutex_unlock(&rdev->stats.lock); | ||
| 280 | return qid; | 219 | return qid; |
| 281 | } | 220 | } |
| 282 | 221 | ||
| @@ -297,9 +236,9 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, | |||
| 297 | 236 | ||
| 298 | void c4iw_destroy_resource(struct c4iw_resource *rscp) | 237 | void c4iw_destroy_resource(struct c4iw_resource *rscp) |
| 299 | { | 238 | { |
| 300 | kfifo_free(&rscp->tpt_fifo); | 239 | c4iw_id_table_free(&rscp->tpt_table); |
| 301 | kfifo_free(&rscp->qid_fifo); | 240 | c4iw_id_table_free(&rscp->qid_table); |
| 302 | kfifo_free(&rscp->pdid_fifo); | 241 | c4iw_id_table_free(&rscp->pdid_table); |
| 303 | } | 242 | } |
| 304 | 243 | ||
| 305 | /* | 244 | /* |
| @@ -312,15 +251,23 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) | |||
| 312 | { | 251 | { |
| 313 | unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); | 252 | unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); |
| 314 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); | 253 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); |
| 315 | if (!addr) | 254 | mutex_lock(&rdev->stats.lock); |
| 316 | printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n", | 255 | if (addr) { |
| 317 | pci_name(rdev->lldi.pdev)); | 256 | rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); |
| 257 | if (rdev->stats.pbl.cur > rdev->stats.pbl.max) | ||
| 258 | rdev->stats.pbl.max = rdev->stats.pbl.cur; | ||
| 259 | } else | ||
| 260 | rdev->stats.pbl.fail++; | ||
| 261 | mutex_unlock(&rdev->stats.lock); | ||
| 318 | return (u32)addr; | 262 | return (u32)addr; |
| 319 | } | 263 | } |
| 320 | 264 | ||
| 321 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) | 265 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) |
| 322 | { | 266 | { |
| 323 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); | 267 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); |
| 268 | mutex_lock(&rdev->stats.lock); | ||
| 269 | rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); | ||
| 270 | mutex_unlock(&rdev->stats.lock); | ||
| 324 | gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); | 271 | gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); |
| 325 | } | 272 | } |
| 326 | 273 | ||
| @@ -377,12 +324,23 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) | |||
| 377 | if (!addr) | 324 | if (!addr) |
| 378 | printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", | 325 | printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", |
| 379 | pci_name(rdev->lldi.pdev)); | 326 | pci_name(rdev->lldi.pdev)); |
| 327 | mutex_lock(&rdev->stats.lock); | ||
| 328 | if (addr) { | ||
| 329 | rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); | ||
| 330 | if (rdev->stats.rqt.cur > rdev->stats.rqt.max) | ||
| 331 | rdev->stats.rqt.max = rdev->stats.rqt.cur; | ||
| 332 | } else | ||
| 333 | rdev->stats.rqt.fail++; | ||
| 334 | mutex_unlock(&rdev->stats.lock); | ||
| 380 | return (u32)addr; | 335 | return (u32)addr; |
| 381 | } | 336 | } |
| 382 | 337 | ||
| 383 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) | 338 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) |
| 384 | { | 339 | { |
| 385 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); | 340 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); |
| 341 | mutex_lock(&rdev->stats.lock); | ||
| 342 | rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); | ||
| 343 | mutex_unlock(&rdev->stats.lock); | ||
| 386 | gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); | 344 | gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); |
| 387 | } | 345 | } |
| 388 | 346 | ||
| @@ -433,12 +391,22 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) | |||
| 433 | { | 391 | { |
| 434 | unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); | 392 | unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); |
| 435 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); | 393 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); |
| 394 | if (addr) { | ||
| 395 | mutex_lock(&rdev->stats.lock); | ||
| 396 | rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); | ||
| 397 | if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) | ||
| 398 | rdev->stats.ocqp.max = rdev->stats.ocqp.cur; | ||
| 399 | mutex_unlock(&rdev->stats.lock); | ||
| 400 | } | ||
| 436 | return (u32)addr; | 401 | return (u32)addr; |
| 437 | } | 402 | } |
| 438 | 403 | ||
| 439 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) | 404 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) |
| 440 | { | 405 | { |
| 441 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); | 406 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); |
| 407 | mutex_lock(&rdev->stats.lock); | ||
| 408 | rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); | ||
| 409 | mutex_unlock(&rdev->stats.lock); | ||
| 442 | gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); | 410 | gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); |
| 443 | } | 411 | } |
| 444 | 412 | ||
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index c0221eec8817..16f26ab29302 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
| @@ -62,6 +62,10 @@ struct t4_status_page { | |||
| 62 | __be16 pidx; | 62 | __be16 pidx; |
| 63 | u8 qp_err; /* flit 1 - sw owns */ | 63 | u8 qp_err; /* flit 1 - sw owns */ |
| 64 | u8 db_off; | 64 | u8 db_off; |
| 65 | u8 pad; | ||
| 66 | u16 host_wq_pidx; | ||
| 67 | u16 host_cidx; | ||
| 68 | u16 host_pidx; | ||
| 65 | }; | 69 | }; |
| 66 | 70 | ||
| 67 | #define T4_EQ_ENTRY_SIZE 64 | 71 | #define T4_EQ_ENTRY_SIZE 64 |
| @@ -375,6 +379,16 @@ static inline void t4_rq_consume(struct t4_wq *wq) | |||
| 375 | wq->rq.cidx = 0; | 379 | wq->rq.cidx = 0; |
| 376 | } | 380 | } |
| 377 | 381 | ||
| 382 | static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq) | ||
| 383 | { | ||
| 384 | return wq->rq.queue[wq->rq.size].status.host_wq_pidx; | ||
| 385 | } | ||
| 386 | |||
| 387 | static inline u16 t4_rq_wq_size(struct t4_wq *wq) | ||
| 388 | { | ||
| 389 | return wq->rq.size * T4_RQ_NUM_SLOTS; | ||
| 390 | } | ||
| 391 | |||
| 378 | static inline int t4_sq_onchip(struct t4_sq *sq) | 392 | static inline int t4_sq_onchip(struct t4_sq *sq) |
| 379 | { | 393 | { |
| 380 | return sq->flags & T4_SQ_ONCHIP; | 394 | return sq->flags & T4_SQ_ONCHIP; |
| @@ -412,6 +426,16 @@ static inline void t4_sq_consume(struct t4_wq *wq) | |||
| 412 | wq->sq.cidx = 0; | 426 | wq->sq.cidx = 0; |
| 413 | } | 427 | } |
| 414 | 428 | ||
| 429 | static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq) | ||
| 430 | { | ||
| 431 | return wq->sq.queue[wq->sq.size].status.host_wq_pidx; | ||
| 432 | } | ||
| 433 | |||
| 434 | static inline u16 t4_sq_wq_size(struct t4_wq *wq) | ||
| 435 | { | ||
| 436 | return wq->sq.size * T4_SQ_NUM_SLOTS; | ||
| 437 | } | ||
| 438 | |||
| 415 | static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) | 439 | static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) |
| 416 | { | 440 | { |
| 417 | wmb(); | 441 | wmb(); |
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h index e6669d54770e..32b754c35ab7 100644 --- a/drivers/infiniband/hw/cxgb4/user.h +++ b/drivers/infiniband/hw/cxgb4/user.h | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #ifndef __C4IW_USER_H__ | 32 | #ifndef __C4IW_USER_H__ |
| 33 | #define __C4IW_USER_H__ | 33 | #define __C4IW_USER_H__ |
| 34 | 34 | ||
| 35 | #define C4IW_UVERBS_ABI_VERSION 1 | 35 | #define C4IW_UVERBS_ABI_VERSION 2 |
| 36 | 36 | ||
| 37 | /* | 37 | /* |
| 38 | * Make sure that all structs defined in this file remain laid out so | 38 | * Make sure that all structs defined in this file remain laid out so |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 1d7aea132a09..7cc305488a3d 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6110.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c | |||
| @@ -596,8 +596,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, | |||
| 596 | 596 | ||
| 597 | ipath_format_hwerrors(hwerrs, | 597 | ipath_format_hwerrors(hwerrs, |
| 598 | ipath_6110_hwerror_msgs, | 598 | ipath_6110_hwerror_msgs, |
| 599 | sizeof(ipath_6110_hwerror_msgs) / | 599 | ARRAY_SIZE(ipath_6110_hwerror_msgs), |
| 600 | sizeof(ipath_6110_hwerror_msgs[0]), | ||
| 601 | msg, msgl); | 600 | msg, msgl); |
| 602 | 601 | ||
| 603 | if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS)) | 602 | if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS)) |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index c0a03ac03ee7..26dfbc8ee0f1 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
| @@ -209,8 +209,7 @@ void ipath_format_hwerrors(u64 hwerrs, | |||
| 209 | { | 209 | { |
| 210 | int i; | 210 | int i; |
| 211 | const int glen = | 211 | const int glen = |
| 212 | sizeof(ipath_generic_hwerror_msgs) / | 212 | ARRAY_SIZE(ipath_generic_hwerror_msgs); |
| 213 | sizeof(ipath_generic_hwerror_msgs[0]); | ||
| 214 | 213 | ||
| 215 | for (i=0; i<glen; i++) { | 214 | for (i=0; i<glen; i++) { |
| 216 | if (hwerrs & ipath_generic_hwerror_msgs[i].mask) { | 215 | if (hwerrs & ipath_generic_hwerror_msgs[i].mask) { |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 77c8cb4c5073..6d4ef71cbcdf 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
| @@ -50,7 +50,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) | |||
| 50 | struct ib_cq *ibcq; | 50 | struct ib_cq *ibcq; |
| 51 | 51 | ||
| 52 | if (type != MLX4_EVENT_TYPE_CQ_ERROR) { | 52 | if (type != MLX4_EVENT_TYPE_CQ_ERROR) { |
| 53 | printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " | 53 | pr_warn("Unexpected event type %d " |
| 54 | "on CQ %06x\n", type, cq->cqn); | 54 | "on CQ %06x\n", type, cq->cqn); |
| 55 | return; | 55 | return; |
| 56 | } | 56 | } |
| @@ -222,6 +222,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
| 222 | uar = &dev->priv_uar; | 222 | uar = &dev->priv_uar; |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | if (dev->eq_table) | ||
| 226 | vector = dev->eq_table[vector % ibdev->num_comp_vectors]; | ||
| 227 | |||
| 225 | err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, | 228 | err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, |
| 226 | cq->db.dma, &cq->mcq, vector, 0); | 229 | cq->db.dma, &cq->mcq, vector, 0); |
| 227 | if (err) | 230 | if (err) |
| @@ -463,7 +466,7 @@ static void dump_cqe(void *cqe) | |||
| 463 | { | 466 | { |
| 464 | __be32 *buf = cqe; | 467 | __be32 *buf = cqe; |
| 465 | 468 | ||
| 466 | printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", | 469 | pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", |
| 467 | be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), | 470 | be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), |
| 468 | be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), | 471 | be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), |
| 469 | be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); | 472 | be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); |
| @@ -473,7 +476,7 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, | |||
| 473 | struct ib_wc *wc) | 476 | struct ib_wc *wc) |
| 474 | { | 477 | { |
| 475 | if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { | 478 | if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { |
| 476 | printk(KERN_DEBUG "local QP operation err " | 479 | pr_debug("local QP operation err " |
| 477 | "(QPN %06x, WQE index %x, vendor syndrome %02x, " | 480 | "(QPN %06x, WQE index %x, vendor syndrome %02x, " |
| 478 | "opcode = %02x)\n", | 481 | "opcode = %02x)\n", |
| 479 | be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), | 482 | be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), |
| @@ -576,7 +579,7 @@ repoll: | |||
| 576 | 579 | ||
| 577 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && | 580 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && |
| 578 | is_send)) { | 581 | is_send)) { |
| 579 | printk(KERN_WARNING "Completion for NOP opcode detected!\n"); | 582 | pr_warn("Completion for NOP opcode detected!\n"); |
| 580 | return -EINVAL; | 583 | return -EINVAL; |
| 581 | } | 584 | } |
| 582 | 585 | ||
| @@ -606,7 +609,7 @@ repoll: | |||
| 606 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, | 609 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, |
| 607 | be32_to_cpu(cqe->vlan_my_qpn)); | 610 | be32_to_cpu(cqe->vlan_my_qpn)); |
| 608 | if (unlikely(!mqp)) { | 611 | if (unlikely(!mqp)) { |
| 609 | printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", | 612 | pr_warn("CQ %06x with entry for unknown QPN %06x\n", |
| 610 | cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); | 613 | cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); |
| 611 | return -EINVAL; | 614 | return -EINVAL; |
| 612 | } | 615 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b948b6dd5d55..ee1c577238f7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -789,7 +789,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
| 789 | list_del(&ge->list); | 789 | list_del(&ge->list); |
| 790 | kfree(ge); | 790 | kfree(ge); |
| 791 | } else | 791 | } else |
| 792 | printk(KERN_WARNING "could not find mgid entry\n"); | 792 | pr_warn("could not find mgid entry\n"); |
| 793 | 793 | ||
| 794 | mutex_unlock(&mqp->mutex); | 794 | mutex_unlock(&mqp->mutex); |
| 795 | 795 | ||
| @@ -902,7 +902,7 @@ static void update_gids_task(struct work_struct *work) | |||
| 902 | 902 | ||
| 903 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 903 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| 904 | if (IS_ERR(mailbox)) { | 904 | if (IS_ERR(mailbox)) { |
| 905 | printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox)); | 905 | pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); |
| 906 | return; | 906 | return; |
| 907 | } | 907 | } |
| 908 | 908 | ||
| @@ -913,7 +913,7 @@ static void update_gids_task(struct work_struct *work) | |||
| 913 | 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, | 913 | 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, |
| 914 | MLX4_CMD_NATIVE); | 914 | MLX4_CMD_NATIVE); |
| 915 | if (err) | 915 | if (err) |
| 916 | printk(KERN_WARNING "set port command failed\n"); | 916 | pr_warn("set port command failed\n"); |
| 917 | else { | 917 | else { |
| 918 | memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); | 918 | memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); |
| 919 | event.device = &gw->dev->ib_dev; | 919 | event.device = &gw->dev->ib_dev; |
| @@ -1076,18 +1076,98 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event | |||
| 1076 | return NOTIFY_DONE; | 1076 | return NOTIFY_DONE; |
| 1077 | } | 1077 | } |
| 1078 | 1078 | ||
| 1079 | static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | ||
| 1080 | { | ||
| 1081 | char name[32]; | ||
| 1082 | int eq_per_port = 0; | ||
| 1083 | int added_eqs = 0; | ||
| 1084 | int total_eqs = 0; | ||
| 1085 | int i, j, eq; | ||
| 1086 | |||
| 1087 | /* Init eq table */ | ||
| 1088 | ibdev->eq_table = NULL; | ||
| 1089 | ibdev->eq_added = 0; | ||
| 1090 | |||
| 1091 | /* Legacy mode? */ | ||
| 1092 | if (dev->caps.comp_pool == 0) | ||
| 1093 | return; | ||
| 1094 | |||
| 1095 | eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ | ||
| 1096 | dev->caps.num_ports); | ||
| 1097 | |||
| 1098 | /* Init eq table */ | ||
| 1099 | added_eqs = 0; | ||
| 1100 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
| 1101 | added_eqs += eq_per_port; | ||
| 1102 | |||
| 1103 | total_eqs = dev->caps.num_comp_vectors + added_eqs; | ||
| 1104 | |||
| 1105 | ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL); | ||
| 1106 | if (!ibdev->eq_table) | ||
| 1107 | return; | ||
| 1108 | |||
| 1109 | ibdev->eq_added = added_eqs; | ||
| 1110 | |||
| 1111 | eq = 0; | ||
| 1112 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { | ||
| 1113 | for (j = 0; j < eq_per_port; j++) { | ||
| 1114 | sprintf(name, "mlx4-ib-%d-%d@%s", | ||
| 1115 | i, j, dev->pdev->bus->name); | ||
| 1116 | /* Set IRQ for specific name (per ring) */ | ||
| 1117 | if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) { | ||
| 1118 | /* Use legacy (same as mlx4_en driver) */ | ||
| 1119 | pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq); | ||
| 1120 | ibdev->eq_table[eq] = | ||
| 1121 | (eq % dev->caps.num_comp_vectors); | ||
| 1122 | } | ||
| 1123 | eq++; | ||
| 1124 | } | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | /* Fill the reset of the vector with legacy EQ */ | ||
| 1128 | for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++) | ||
| 1129 | ibdev->eq_table[eq++] = i; | ||
| 1130 | |||
| 1131 | /* Advertise the new number of EQs to clients */ | ||
| 1132 | ibdev->ib_dev.num_comp_vectors = total_eqs; | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | ||
| 1136 | { | ||
| 1137 | int i; | ||
| 1138 | int total_eqs; | ||
| 1139 | |||
| 1140 | /* Reset the advertised EQ number */ | ||
| 1141 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | ||
| 1142 | |||
| 1143 | /* Free only the added eqs */ | ||
| 1144 | for (i = 0; i < ibdev->eq_added; i++) { | ||
| 1145 | /* Don't free legacy eqs if used */ | ||
| 1146 | if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors) | ||
| 1147 | continue; | ||
| 1148 | mlx4_release_eq(dev, ibdev->eq_table[i]); | ||
| 1149 | } | ||
| 1150 | |||
| 1151 | total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added; | ||
| 1152 | memset(ibdev->eq_table, 0, total_eqs * sizeof(int)); | ||
| 1153 | kfree(ibdev->eq_table); | ||
| 1154 | |||
| 1155 | ibdev->eq_table = NULL; | ||
| 1156 | ibdev->eq_added = 0; | ||
| 1157 | } | ||
| 1158 | |||
| 1079 | static void *mlx4_ib_add(struct mlx4_dev *dev) | 1159 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
| 1080 | { | 1160 | { |
| 1081 | struct mlx4_ib_dev *ibdev; | 1161 | struct mlx4_ib_dev *ibdev; |
| 1082 | int num_ports = 0; | 1162 | int num_ports = 0; |
| 1083 | int i; | 1163 | int i, j; |
| 1084 | int err; | 1164 | int err; |
| 1085 | struct mlx4_ib_iboe *iboe; | 1165 | struct mlx4_ib_iboe *iboe; |
| 1086 | 1166 | ||
| 1087 | printk_once(KERN_INFO "%s", mlx4_ib_version); | 1167 | pr_info_once("%s", mlx4_ib_version); |
| 1088 | 1168 | ||
| 1089 | if (mlx4_is_mfunc(dev)) { | 1169 | if (mlx4_is_mfunc(dev)) { |
| 1090 | printk(KERN_WARNING "IB not yet supported in SRIOV\n"); | 1170 | pr_warn("IB not yet supported in SRIOV\n"); |
| 1091 | return NULL; | 1171 | return NULL; |
| 1092 | } | 1172 | } |
| 1093 | 1173 | ||
| @@ -1210,6 +1290,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 1210 | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); | 1290 | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); |
| 1211 | } | 1291 | } |
| 1212 | 1292 | ||
| 1293 | mlx4_ib_alloc_eqs(dev, ibdev); | ||
| 1294 | |||
| 1213 | spin_lock_init(&iboe->lock); | 1295 | spin_lock_init(&iboe->lock); |
| 1214 | 1296 | ||
| 1215 | if (init_node_data(ibdev)) | 1297 | if (init_node_data(ibdev)) |
| @@ -1241,9 +1323,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 1241 | goto err_reg; | 1323 | goto err_reg; |
| 1242 | } | 1324 | } |
| 1243 | 1325 | ||
| 1244 | for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) { | 1326 | for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { |
| 1245 | if (device_create_file(&ibdev->ib_dev.dev, | 1327 | if (device_create_file(&ibdev->ib_dev.dev, |
| 1246 | mlx4_class_attributes[i])) | 1328 | mlx4_class_attributes[j])) |
| 1247 | goto err_notif; | 1329 | goto err_notif; |
| 1248 | } | 1330 | } |
| 1249 | 1331 | ||
| @@ -1253,7 +1335,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 1253 | 1335 | ||
| 1254 | err_notif: | 1336 | err_notif: |
| 1255 | if (unregister_netdevice_notifier(&ibdev->iboe.nb)) | 1337 | if (unregister_netdevice_notifier(&ibdev->iboe.nb)) |
| 1256 | printk(KERN_WARNING "failure unregistering notifier\n"); | 1338 | pr_warn("failure unregistering notifier\n"); |
| 1257 | flush_workqueue(wq); | 1339 | flush_workqueue(wq); |
| 1258 | 1340 | ||
| 1259 | err_reg: | 1341 | err_reg: |
| @@ -1288,7 +1370,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
| 1288 | ib_unregister_device(&ibdev->ib_dev); | 1370 | ib_unregister_device(&ibdev->ib_dev); |
| 1289 | if (ibdev->iboe.nb.notifier_call) { | 1371 | if (ibdev->iboe.nb.notifier_call) { |
| 1290 | if (unregister_netdevice_notifier(&ibdev->iboe.nb)) | 1372 | if (unregister_netdevice_notifier(&ibdev->iboe.nb)) |
| 1291 | printk(KERN_WARNING "failure unregistering notifier\n"); | 1373 | pr_warn("failure unregistering notifier\n"); |
| 1292 | ibdev->iboe.nb.notifier_call = NULL; | 1374 | ibdev->iboe.nb.notifier_call = NULL; |
| 1293 | } | 1375 | } |
| 1294 | iounmap(ibdev->uar_map); | 1376 | iounmap(ibdev->uar_map); |
| @@ -1298,6 +1380,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
| 1298 | mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) | 1380 | mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) |
| 1299 | mlx4_CLOSE_PORT(dev, p); | 1381 | mlx4_CLOSE_PORT(dev, p); |
| 1300 | 1382 | ||
| 1383 | mlx4_ib_free_eqs(dev, ibdev); | ||
| 1384 | |||
| 1301 | mlx4_uar_free(dev, &ibdev->priv_uar); | 1385 | mlx4_uar_free(dev, &ibdev->priv_uar); |
| 1302 | mlx4_pd_free(dev, ibdev->priv_pdn); | 1386 | mlx4_pd_free(dev, ibdev->priv_pdn); |
| 1303 | ib_dealloc_device(&ibdev->ib_dev); | 1387 | ib_dealloc_device(&ibdev->ib_dev); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index ed80345c99ae..e62297cc77cc 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -202,6 +202,8 @@ struct mlx4_ib_dev { | |||
| 202 | bool ib_active; | 202 | bool ib_active; |
| 203 | struct mlx4_ib_iboe iboe; | 203 | struct mlx4_ib_iboe iboe; |
| 204 | int counters[MLX4_MAX_PORTS]; | 204 | int counters[MLX4_MAX_PORTS]; |
| 205 | int *eq_table; | ||
| 206 | int eq_added; | ||
| 205 | }; | 207 | }; |
| 206 | 208 | ||
| 207 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) | 209 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index dca55b19a6f1..bbaf6176f207 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -338,7 +338,7 @@ int mlx4_ib_unmap_fmr(struct list_head *fmr_list) | |||
| 338 | 338 | ||
| 339 | err = mlx4_SYNC_TPT(mdev); | 339 | err = mlx4_SYNC_TPT(mdev); |
| 340 | if (err) | 340 | if (err) |
| 341 | printk(KERN_WARNING "mlx4_ib: SYNC_TPT error %d when " | 341 | pr_warn("SYNC_TPT error %d when " |
| 342 | "unmapping FMRs\n", err); | 342 | "unmapping FMRs\n", err); |
| 343 | 343 | ||
| 344 | return 0; | 344 | return 0; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 3a7848966627..ceb33327091a 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -84,6 +84,11 @@ enum { | |||
| 84 | MLX4_IB_CACHE_LINE_SIZE = 64, | 84 | MLX4_IB_CACHE_LINE_SIZE = 64, |
| 85 | }; | 85 | }; |
| 86 | 86 | ||
| 87 | enum { | ||
| 88 | MLX4_RAW_QP_MTU = 7, | ||
| 89 | MLX4_RAW_QP_MSGMAX = 31, | ||
| 90 | }; | ||
| 91 | |||
| 87 | static const __be32 mlx4_ib_opcode[] = { | 92 | static const __be32 mlx4_ib_opcode[] = { |
| 88 | [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), | 93 | [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), |
| 89 | [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), | 94 | [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), |
| @@ -256,7 +261,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) | |||
| 256 | event.event = IB_EVENT_QP_ACCESS_ERR; | 261 | event.event = IB_EVENT_QP_ACCESS_ERR; |
| 257 | break; | 262 | break; |
| 258 | default: | 263 | default: |
| 259 | printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " | 264 | pr_warn("Unexpected event type %d " |
| 260 | "on QP %06x\n", type, qp->qpn); | 265 | "on QP %06x\n", type, qp->qpn); |
| 261 | return; | 266 | return; |
| 262 | } | 267 | } |
| @@ -573,7 +578,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
| 573 | if (sqpn) { | 578 | if (sqpn) { |
| 574 | qpn = sqpn; | 579 | qpn = sqpn; |
| 575 | } else { | 580 | } else { |
| 576 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); | 581 | /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE |
| 582 | * BlueFlame setup flow wrongly causes VLAN insertion. */ | ||
| 583 | if (init_attr->qp_type == IB_QPT_RAW_PACKET) | ||
| 584 | err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); | ||
| 585 | else | ||
| 586 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); | ||
| 577 | if (err) | 587 | if (err) |
| 578 | goto err_wrid; | 588 | goto err_wrid; |
| 579 | } | 589 | } |
| @@ -715,7 +725,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
| 715 | if (qp->state != IB_QPS_RESET) | 725 | if (qp->state != IB_QPS_RESET) |
| 716 | if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), | 726 | if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), |
| 717 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) | 727 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) |
| 718 | printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", | 728 | pr_warn("modify QP %06x to RESET failed.\n", |
| 719 | qp->mqp.qpn); | 729 | qp->mqp.qpn); |
| 720 | 730 | ||
| 721 | get_cqs(qp, &send_cq, &recv_cq); | 731 | get_cqs(qp, &send_cq, &recv_cq); |
| @@ -791,6 +801,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | |||
| 791 | case IB_QPT_RC: | 801 | case IB_QPT_RC: |
| 792 | case IB_QPT_UC: | 802 | case IB_QPT_UC: |
| 793 | case IB_QPT_UD: | 803 | case IB_QPT_UD: |
| 804 | case IB_QPT_RAW_PACKET: | ||
| 794 | { | 805 | { |
| 795 | qp = kzalloc(sizeof *qp, GFP_KERNEL); | 806 | qp = kzalloc(sizeof *qp, GFP_KERNEL); |
| 796 | if (!qp) | 807 | if (!qp) |
| @@ -872,7 +883,8 @@ static int to_mlx4_st(enum ib_qp_type type) | |||
| 872 | case IB_QPT_XRC_INI: | 883 | case IB_QPT_XRC_INI: |
| 873 | case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; | 884 | case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; |
| 874 | case IB_QPT_SMI: | 885 | case IB_QPT_SMI: |
| 875 | case IB_QPT_GSI: return MLX4_QP_ST_MLX; | 886 | case IB_QPT_GSI: |
| 887 | case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX; | ||
| 876 | default: return -1; | 888 | default: return -1; |
| 877 | } | 889 | } |
| 878 | } | 890 | } |
| @@ -946,7 +958,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, | |||
| 946 | 958 | ||
| 947 | if (ah->ah_flags & IB_AH_GRH) { | 959 | if (ah->ah_flags & IB_AH_GRH) { |
| 948 | if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { | 960 | if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { |
| 949 | printk(KERN_ERR "sgid_index (%u) too large. max is %d\n", | 961 | pr_err("sgid_index (%u) too large. max is %d\n", |
| 950 | ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); | 962 | ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); |
| 951 | return -1; | 963 | return -1; |
| 952 | } | 964 | } |
| @@ -1042,6 +1054,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1042 | 1054 | ||
| 1043 | if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) | 1055 | if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) |
| 1044 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; | 1056 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; |
| 1057 | else if (ibqp->qp_type == IB_QPT_RAW_PACKET) | ||
| 1058 | context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX; | ||
| 1045 | else if (ibqp->qp_type == IB_QPT_UD) { | 1059 | else if (ibqp->qp_type == IB_QPT_UD) { |
| 1046 | if (qp->flags & MLX4_IB_QP_LSO) | 1060 | if (qp->flags & MLX4_IB_QP_LSO) |
| 1047 | context->mtu_msgmax = (IB_MTU_4096 << 5) | | 1061 | context->mtu_msgmax = (IB_MTU_4096 << 5) | |
| @@ -1050,7 +1064,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1050 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; | 1064 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; |
| 1051 | } else if (attr_mask & IB_QP_PATH_MTU) { | 1065 | } else if (attr_mask & IB_QP_PATH_MTU) { |
| 1052 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { | 1066 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { |
| 1053 | printk(KERN_ERR "path MTU (%u) is invalid\n", | 1067 | pr_err("path MTU (%u) is invalid\n", |
| 1054 | attr->path_mtu); | 1068 | attr->path_mtu); |
| 1055 | goto out; | 1069 | goto out; |
| 1056 | } | 1070 | } |
| @@ -1200,7 +1214,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1200 | if (cur_state == IB_QPS_INIT && | 1214 | if (cur_state == IB_QPS_INIT && |
| 1201 | new_state == IB_QPS_RTR && | 1215 | new_state == IB_QPS_RTR && |
| 1202 | (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || | 1216 | (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || |
| 1203 | ibqp->qp_type == IB_QPT_UD)) { | 1217 | ibqp->qp_type == IB_QPT_UD || |
| 1218 | ibqp->qp_type == IB_QPT_RAW_PACKET)) { | ||
| 1204 | context->pri_path.sched_queue = (qp->port - 1) << 6; | 1219 | context->pri_path.sched_queue = (qp->port - 1) << 6; |
| 1205 | if (is_qp0(dev, qp)) | 1220 | if (is_qp0(dev, qp)) |
| 1206 | context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; | 1221 | context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; |
| @@ -1266,7 +1281,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1266 | if (is_qp0(dev, qp)) { | 1281 | if (is_qp0(dev, qp)) { |
| 1267 | if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) | 1282 | if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) |
| 1268 | if (mlx4_INIT_PORT(dev->dev, qp->port)) | 1283 | if (mlx4_INIT_PORT(dev->dev, qp->port)) |
| 1269 | printk(KERN_WARNING "INIT_PORT failed for port %d\n", | 1284 | pr_warn("INIT_PORT failed for port %d\n", |
| 1270 | qp->port); | 1285 | qp->port); |
| 1271 | 1286 | ||
| 1272 | if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && | 1287 | if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && |
| @@ -1319,6 +1334,11 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 1319 | goto out; | 1334 | goto out; |
| 1320 | } | 1335 | } |
| 1321 | 1336 | ||
| 1337 | if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) && | ||
| 1338 | (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) != | ||
| 1339 | IB_LINK_LAYER_ETHERNET)) | ||
| 1340 | goto out; | ||
| 1341 | |||
| 1322 | if (attr_mask & IB_QP_PKEY_INDEX) { | 1342 | if (attr_mask & IB_QP_PKEY_INDEX) { |
| 1323 | int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; | 1343 | int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
| 1324 | if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) | 1344 | if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) |
| @@ -1424,6 +1444,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 1424 | 1444 | ||
| 1425 | if (is_eth) { | 1445 | if (is_eth) { |
| 1426 | u8 *smac; | 1446 | u8 *smac; |
| 1447 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; | ||
| 1448 | |||
| 1449 | mlx->sched_prio = cpu_to_be16(pcp); | ||
| 1427 | 1450 | ||
| 1428 | memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); | 1451 | memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); |
| 1429 | /* FIXME: cache smac value? */ | 1452 | /* FIXME: cache smac value? */ |
| @@ -1434,10 +1457,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 1434 | if (!is_vlan) { | 1457 | if (!is_vlan) { |
| 1435 | sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); | 1458 | sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); |
| 1436 | } else { | 1459 | } else { |
| 1437 | u16 pcp; | ||
| 1438 | |||
| 1439 | sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); | 1460 | sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); |
| 1440 | pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; | ||
| 1441 | sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); | 1461 | sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); |
| 1442 | } | 1462 | } |
| 1443 | } else { | 1463 | } else { |
| @@ -1460,16 +1480,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 1460 | header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); | 1480 | header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); |
| 1461 | 1481 | ||
| 1462 | if (0) { | 1482 | if (0) { |
| 1463 | printk(KERN_ERR "built UD header of size %d:\n", header_size); | 1483 | pr_err("built UD header of size %d:\n", header_size); |
| 1464 | for (i = 0; i < header_size / 4; ++i) { | 1484 | for (i = 0; i < header_size / 4; ++i) { |
| 1465 | if (i % 8 == 0) | 1485 | if (i % 8 == 0) |
| 1466 | printk(" [%02x] ", i * 4); | 1486 | pr_err(" [%02x] ", i * 4); |
| 1467 | printk(" %08x", | 1487 | pr_cont(" %08x", |
| 1468 | be32_to_cpu(((__be32 *) sqp->header_buf)[i])); | 1488 | be32_to_cpu(((__be32 *) sqp->header_buf)[i])); |
| 1469 | if ((i + 1) % 8 == 0) | 1489 | if ((i + 1) % 8 == 0) |
| 1470 | printk("\n"); | 1490 | pr_cont("\n"); |
| 1471 | } | 1491 | } |
| 1472 | printk("\n"); | 1492 | pr_err("\n"); |
| 1473 | } | 1493 | } |
| 1474 | 1494 | ||
| 1475 | /* | 1495 | /* |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 39542f3703b8..60c5fb025fc7 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
| @@ -59,7 +59,7 @@ static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) | |||
| 59 | event.event = IB_EVENT_SRQ_ERR; | 59 | event.event = IB_EVENT_SRQ_ERR; |
| 60 | break; | 60 | break; |
| 61 | default: | 61 | default: |
| 62 | printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " | 62 | pr_warn("Unexpected event type %d " |
| 63 | "on SRQ %06x\n", type, srq->srqn); | 63 | "on SRQ %06x\n", type, srq->srqn); |
| 64 | return; | 64 | return; |
| 65 | } | 65 | } |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 71edfbbcce1c..020e95c4c4b9 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
| @@ -2884,7 +2884,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
| 2884 | ibevent.device = nesqp->ibqp.device; | 2884 | ibevent.device = nesqp->ibqp.device; |
| 2885 | ibevent.event = nesqp->terminate_eventtype; | 2885 | ibevent.event = nesqp->terminate_eventtype; |
| 2886 | ibevent.element.qp = &nesqp->ibqp; | 2886 | ibevent.element.qp = &nesqp->ibqp; |
| 2887 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | 2887 | if (nesqp->ibqp.event_handler) |
| 2888 | nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); | ||
| 2888 | } | 2889 | } |
| 2889 | } | 2890 | } |
| 2890 | 2891 | ||
| @@ -3320,6 +3321,10 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
| 3320 | 3321 | ||
| 3321 | nesqp->private_data_len = conn_param->private_data_len; | 3322 | nesqp->private_data_len = conn_param->private_data_len; |
| 3322 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); | 3323 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); |
| 3324 | /* space for rdma0 read msg */ | ||
| 3325 | if (conn_param->ord == 0) | ||
| 3326 | nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(1); | ||
| 3327 | |||
| 3323 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); | 3328 | nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); |
| 3324 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", | 3329 | nes_debug(NES_DBG_CM, "mpa private data len =%u\n", |
| 3325 | conn_param->private_data_len); | 3330 | conn_param->private_data_len); |
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig new file mode 100644 index 000000000000..b5b6056c8518 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/Kconfig | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | config INFINIBAND_OCRDMA | ||
| 2 | tristate "Emulex One Connect HCA support" | ||
| 3 | depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n) | ||
| 4 | select NET_VENDOR_EMULEX | ||
| 5 | select BE2NET | ||
| 6 | ---help--- | ||
| 7 | This driver provides low-level InfiniBand over Ethernet | ||
| 8 | support for Emulex One Connect host channel adapters (HCAs). | ||
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile new file mode 100644 index 000000000000..06a5bed12e43 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/Makefile | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | ccflags-y := -Idrivers/net/ethernet/emulex/benet | ||
| 2 | |||
| 3 | obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o | ||
| 4 | |||
| 5 | ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h new file mode 100644 index 000000000000..85a69c958559 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h | |||
| @@ -0,0 +1,393 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __OCRDMA_H__ | ||
| 29 | #define __OCRDMA_H__ | ||
| 30 | |||
| 31 | #include <linux/mutex.h> | ||
| 32 | #include <linux/list.h> | ||
| 33 | #include <linux/spinlock.h> | ||
| 34 | #include <linux/pci.h> | ||
| 35 | |||
| 36 | #include <rdma/ib_verbs.h> | ||
| 37 | #include <rdma/ib_user_verbs.h> | ||
| 38 | |||
| 39 | #include <be_roce.h> | ||
| 40 | #include "ocrdma_sli.h" | ||
| 41 | |||
| 42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" | ||
| 43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | ||
| 44 | |||
| 45 | #define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg) | ||
| 46 | |||
| 47 | #define OCRDMA_MAX_AH 512 | ||
| 48 | |||
| 49 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | ||
| 50 | |||
| 51 | struct ocrdma_dev_attr { | ||
| 52 | u8 fw_ver[32]; | ||
| 53 | u32 vendor_id; | ||
| 54 | u32 device_id; | ||
| 55 | u16 max_pd; | ||
| 56 | u16 max_cq; | ||
| 57 | u16 max_cqe; | ||
| 58 | u16 max_qp; | ||
| 59 | u16 max_wqe; | ||
| 60 | u16 max_rqe; | ||
| 61 | u32 max_inline_data; | ||
| 62 | int max_send_sge; | ||
| 63 | int max_recv_sge; | ||
| 64 | int max_mr; | ||
| 65 | u64 max_mr_size; | ||
| 66 | u32 max_num_mr_pbl; | ||
| 67 | int max_fmr; | ||
| 68 | int max_map_per_fmr; | ||
| 69 | int max_pages_per_frmr; | ||
| 70 | u16 max_ord_per_qp; | ||
| 71 | u16 max_ird_per_qp; | ||
| 72 | |||
| 73 | int device_cap_flags; | ||
| 74 | u8 cq_overflow_detect; | ||
| 75 | u8 srq_supported; | ||
| 76 | |||
| 77 | u32 wqe_size; | ||
| 78 | u32 rqe_size; | ||
| 79 | u32 ird_page_size; | ||
| 80 | u8 local_ca_ack_delay; | ||
| 81 | u8 ird; | ||
| 82 | u8 num_ird_pages; | ||
| 83 | }; | ||
| 84 | |||
| 85 | struct ocrdma_pbl { | ||
| 86 | void *va; | ||
| 87 | dma_addr_t pa; | ||
| 88 | }; | ||
| 89 | |||
| 90 | struct ocrdma_queue_info { | ||
| 91 | void *va; | ||
| 92 | dma_addr_t dma; | ||
| 93 | u32 size; | ||
| 94 | u16 len; | ||
| 95 | u16 entry_size; /* Size of an element in the queue */ | ||
| 96 | u16 id; /* qid, where to ring the doorbell. */ | ||
| 97 | u16 head, tail; | ||
| 98 | bool created; | ||
| 99 | atomic_t used; /* Number of valid elements in the queue */ | ||
| 100 | }; | ||
| 101 | |||
| 102 | struct ocrdma_eq { | ||
| 103 | struct ocrdma_queue_info q; | ||
| 104 | u32 vector; | ||
| 105 | int cq_cnt; | ||
| 106 | struct ocrdma_dev *dev; | ||
| 107 | char irq_name[32]; | ||
| 108 | }; | ||
| 109 | |||
| 110 | struct ocrdma_mq { | ||
| 111 | struct ocrdma_queue_info sq; | ||
| 112 | struct ocrdma_queue_info cq; | ||
| 113 | bool rearm_cq; | ||
| 114 | }; | ||
| 115 | |||
| 116 | struct mqe_ctx { | ||
| 117 | struct mutex lock; /* for serializing mailbox commands on MQ */ | ||
| 118 | wait_queue_head_t cmd_wait; | ||
| 119 | u32 tag; | ||
| 120 | u16 cqe_status; | ||
| 121 | u16 ext_status; | ||
| 122 | bool cmd_done; | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct ocrdma_dev { | ||
| 126 | struct ib_device ibdev; | ||
| 127 | struct ocrdma_dev_attr attr; | ||
| 128 | |||
| 129 | struct mutex dev_lock; /* provides syncronise access to device data */ | ||
| 130 | spinlock_t flush_q_lock ____cacheline_aligned; | ||
| 131 | |||
| 132 | struct ocrdma_cq **cq_tbl; | ||
| 133 | struct ocrdma_qp **qp_tbl; | ||
| 134 | |||
| 135 | struct ocrdma_eq meq; | ||
| 136 | struct ocrdma_eq *qp_eq_tbl; | ||
| 137 | int eq_cnt; | ||
| 138 | u16 base_eqid; | ||
| 139 | u16 max_eq; | ||
| 140 | |||
| 141 | union ib_gid *sgid_tbl; | ||
| 142 | /* provided synchronization to sgid table for | ||
| 143 | * updating gid entries triggered by notifier. | ||
| 144 | */ | ||
| 145 | spinlock_t sgid_lock; | ||
| 146 | |||
| 147 | int gsi_qp_created; | ||
| 148 | struct ocrdma_cq *gsi_sqcq; | ||
| 149 | struct ocrdma_cq *gsi_rqcq; | ||
| 150 | |||
| 151 | struct { | ||
| 152 | struct ocrdma_av *va; | ||
| 153 | dma_addr_t pa; | ||
| 154 | u32 size; | ||
| 155 | u32 num_ah; | ||
| 156 | /* provide synchronization for av | ||
| 157 | * entry allocations. | ||
| 158 | */ | ||
| 159 | spinlock_t lock; | ||
| 160 | u32 ahid; | ||
| 161 | struct ocrdma_pbl pbl; | ||
| 162 | } av_tbl; | ||
| 163 | |||
| 164 | void *mbx_cmd; | ||
| 165 | struct ocrdma_mq mq; | ||
| 166 | struct mqe_ctx mqe_ctx; | ||
| 167 | |||
| 168 | struct be_dev_info nic_info; | ||
| 169 | |||
| 170 | struct list_head entry; | ||
| 171 | struct rcu_head rcu; | ||
| 172 | int id; | ||
| 173 | }; | ||
| 174 | |||
| 175 | struct ocrdma_cq { | ||
| 176 | struct ib_cq ibcq; | ||
| 177 | struct ocrdma_dev *dev; | ||
| 178 | struct ocrdma_cqe *va; | ||
| 179 | u32 phase; | ||
| 180 | u32 getp; /* pointer to pending wrs to | ||
| 181 | * return to stack, wrap arounds | ||
| 182 | * at max_hw_cqe | ||
| 183 | */ | ||
| 184 | u32 max_hw_cqe; | ||
| 185 | bool phase_change; | ||
| 186 | bool armed, solicited; | ||
| 187 | bool arm_needed; | ||
| 188 | |||
| 189 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization | ||
| 190 | * to cq polling | ||
| 191 | */ | ||
| 192 | /* syncronizes cq completion handler invoked from multiple context */ | ||
| 193 | spinlock_t comp_handler_lock ____cacheline_aligned; | ||
| 194 | u16 id; | ||
| 195 | u16 eqn; | ||
| 196 | |||
| 197 | struct ocrdma_ucontext *ucontext; | ||
| 198 | dma_addr_t pa; | ||
| 199 | u32 len; | ||
| 200 | atomic_t use_cnt; | ||
| 201 | |||
| 202 | /* head of all qp's sq and rq for which cqes need to be flushed | ||
| 203 | * by the software. | ||
| 204 | */ | ||
| 205 | struct list_head sq_head, rq_head; | ||
| 206 | }; | ||
| 207 | |||
| 208 | struct ocrdma_pd { | ||
| 209 | struct ib_pd ibpd; | ||
| 210 | struct ocrdma_dev *dev; | ||
| 211 | struct ocrdma_ucontext *uctx; | ||
| 212 | atomic_t use_cnt; | ||
| 213 | u32 id; | ||
| 214 | int num_dpp_qp; | ||
| 215 | u32 dpp_page; | ||
| 216 | bool dpp_enabled; | ||
| 217 | }; | ||
| 218 | |||
| 219 | struct ocrdma_ah { | ||
| 220 | struct ib_ah ibah; | ||
| 221 | struct ocrdma_dev *dev; | ||
| 222 | struct ocrdma_av *av; | ||
| 223 | u16 sgid_index; | ||
| 224 | u32 id; | ||
| 225 | }; | ||
| 226 | |||
| 227 | struct ocrdma_qp_hwq_info { | ||
| 228 | u8 *va; /* virtual address */ | ||
| 229 | u32 max_sges; | ||
| 230 | u32 head, tail; | ||
| 231 | u32 entry_size; | ||
| 232 | u32 max_cnt; | ||
| 233 | u32 max_wqe_idx; | ||
| 234 | u32 free_delta; | ||
| 235 | u16 dbid; /* qid, where to ring the doorbell. */ | ||
| 236 | u32 len; | ||
| 237 | dma_addr_t pa; | ||
| 238 | }; | ||
| 239 | |||
| 240 | struct ocrdma_srq { | ||
| 241 | struct ib_srq ibsrq; | ||
| 242 | struct ocrdma_dev *dev; | ||
| 243 | u8 __iomem *db; | ||
| 244 | /* provide synchronization to multiple context(s) posting rqe */ | ||
| 245 | spinlock_t q_lock ____cacheline_aligned; | ||
| 246 | |||
| 247 | struct ocrdma_qp_hwq_info rq; | ||
| 248 | struct ocrdma_pd *pd; | ||
| 249 | atomic_t use_cnt; | ||
| 250 | u32 id; | ||
| 251 | u64 *rqe_wr_id_tbl; | ||
| 252 | u32 *idx_bit_fields; | ||
| 253 | u32 bit_fields_len; | ||
| 254 | }; | ||
| 255 | |||
| 256 | struct ocrdma_qp { | ||
| 257 | struct ib_qp ibqp; | ||
| 258 | struct ocrdma_dev *dev; | ||
| 259 | |||
| 260 | u8 __iomem *sq_db; | ||
| 261 | /* provide synchronization to multiple context(s) posting wqe, rqe */ | ||
| 262 | spinlock_t q_lock ____cacheline_aligned; | ||
| 263 | struct ocrdma_qp_hwq_info sq; | ||
| 264 | struct { | ||
| 265 | uint64_t wrid; | ||
| 266 | uint16_t dpp_wqe_idx; | ||
| 267 | uint16_t dpp_wqe; | ||
| 268 | uint8_t signaled; | ||
| 269 | uint8_t rsvd[3]; | ||
| 270 | } *wqe_wr_id_tbl; | ||
| 271 | u32 max_inline_data; | ||
| 272 | struct ocrdma_cq *sq_cq; | ||
| 273 | /* list maintained per CQ to flush SQ errors */ | ||
| 274 | struct list_head sq_entry; | ||
| 275 | |||
| 276 | u8 __iomem *rq_db; | ||
| 277 | struct ocrdma_qp_hwq_info rq; | ||
| 278 | u64 *rqe_wr_id_tbl; | ||
| 279 | struct ocrdma_cq *rq_cq; | ||
| 280 | struct ocrdma_srq *srq; | ||
| 281 | /* list maintained per CQ to flush RQ errors */ | ||
| 282 | struct list_head rq_entry; | ||
| 283 | |||
| 284 | enum ocrdma_qp_state state; /* QP state */ | ||
| 285 | int cap_flags; | ||
| 286 | u32 max_ord, max_ird; | ||
| 287 | |||
| 288 | u32 id; | ||
| 289 | struct ocrdma_pd *pd; | ||
| 290 | |||
| 291 | enum ib_qp_type qp_type; | ||
| 292 | |||
| 293 | int sgid_idx; | ||
| 294 | u32 qkey; | ||
| 295 | bool dpp_enabled; | ||
| 296 | u8 *ird_q_va; | ||
| 297 | }; | ||
| 298 | |||
| 299 | #define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \ | ||
| 300 | (((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \ | ||
| 301 | (qp->id < 64)) ? 24 : 16) | ||
| 302 | |||
| 303 | struct ocrdma_hw_mr { | ||
| 304 | struct ocrdma_dev *dev; | ||
| 305 | u32 lkey; | ||
| 306 | u8 fr_mr; | ||
| 307 | u8 remote_atomic; | ||
| 308 | u8 remote_rd; | ||
| 309 | u8 remote_wr; | ||
| 310 | u8 local_rd; | ||
| 311 | u8 local_wr; | ||
| 312 | u8 mw_bind; | ||
| 313 | u8 rsvd; | ||
| 314 | u64 len; | ||
| 315 | struct ocrdma_pbl *pbl_table; | ||
| 316 | u32 num_pbls; | ||
| 317 | u32 num_pbes; | ||
| 318 | u32 pbl_size; | ||
| 319 | u32 pbe_size; | ||
| 320 | u64 fbo; | ||
| 321 | u64 va; | ||
| 322 | }; | ||
| 323 | |||
| 324 | struct ocrdma_mr { | ||
| 325 | struct ib_mr ibmr; | ||
| 326 | struct ib_umem *umem; | ||
| 327 | struct ocrdma_hw_mr hwmr; | ||
| 328 | struct ocrdma_pd *pd; | ||
| 329 | }; | ||
| 330 | |||
| 331 | struct ocrdma_ucontext { | ||
| 332 | struct ib_ucontext ibucontext; | ||
| 333 | struct ocrdma_dev *dev; | ||
| 334 | |||
| 335 | struct list_head mm_head; | ||
| 336 | struct mutex mm_list_lock; /* protects list entries of mm type */ | ||
| 337 | struct { | ||
| 338 | u32 *va; | ||
| 339 | dma_addr_t pa; | ||
| 340 | u32 len; | ||
| 341 | } ah_tbl; | ||
| 342 | }; | ||
| 343 | |||
| 344 | struct ocrdma_mm { | ||
| 345 | struct { | ||
| 346 | u64 phy_addr; | ||
| 347 | unsigned long len; | ||
| 348 | } key; | ||
| 349 | struct list_head entry; | ||
| 350 | }; | ||
| 351 | |||
| 352 | static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) | ||
| 353 | { | ||
| 354 | return container_of(ibdev, struct ocrdma_dev, ibdev); | ||
| 355 | } | ||
| 356 | |||
| 357 | static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext | ||
| 358 | *ibucontext) | ||
| 359 | { | ||
| 360 | return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); | ||
| 361 | } | ||
| 362 | |||
| 363 | static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) | ||
| 364 | { | ||
| 365 | return container_of(ibpd, struct ocrdma_pd, ibpd); | ||
| 366 | } | ||
| 367 | |||
| 368 | static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) | ||
| 369 | { | ||
| 370 | return container_of(ibcq, struct ocrdma_cq, ibcq); | ||
| 371 | } | ||
| 372 | |||
| 373 | static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) | ||
| 374 | { | ||
| 375 | return container_of(ibqp, struct ocrdma_qp, ibqp); | ||
| 376 | } | ||
| 377 | |||
| 378 | static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) | ||
| 379 | { | ||
| 380 | return container_of(ibmr, struct ocrdma_mr, ibmr); | ||
| 381 | } | ||
| 382 | |||
| 383 | static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) | ||
| 384 | { | ||
| 385 | return container_of(ibah, struct ocrdma_ah, ibah); | ||
| 386 | } | ||
| 387 | |||
| 388 | static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) | ||
| 389 | { | ||
| 390 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); | ||
| 391 | } | ||
| 392 | |||
| 393 | #endif | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h new file mode 100644 index 000000000000..a411a4e3193d --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h | |||
| @@ -0,0 +1,134 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __OCRDMA_ABI_H__ | ||
| 29 | #define __OCRDMA_ABI_H__ | ||
| 30 | |||
| 31 | struct ocrdma_alloc_ucontext_resp { | ||
| 32 | u32 dev_id; | ||
| 33 | u32 wqe_size; | ||
| 34 | u32 max_inline_data; | ||
| 35 | u32 dpp_wqe_size; | ||
| 36 | u64 ah_tbl_page; | ||
| 37 | u32 ah_tbl_len; | ||
| 38 | u32 rsvd; | ||
| 39 | u8 fw_ver[32]; | ||
| 40 | u32 rqe_size; | ||
| 41 | u64 rsvd1; | ||
| 42 | } __packed; | ||
| 43 | |||
| 44 | /* user kernel communication data structures. */ | ||
| 45 | struct ocrdma_alloc_pd_ureq { | ||
| 46 | u64 rsvd1; | ||
| 47 | } __packed; | ||
| 48 | |||
| 49 | struct ocrdma_alloc_pd_uresp { | ||
| 50 | u32 id; | ||
| 51 | u32 dpp_enabled; | ||
| 52 | u32 dpp_page_addr_hi; | ||
| 53 | u32 dpp_page_addr_lo; | ||
| 54 | u64 rsvd1; | ||
| 55 | } __packed; | ||
| 56 | |||
| 57 | struct ocrdma_create_cq_ureq { | ||
| 58 | u32 dpp_cq; | ||
| 59 | u32 rsvd; | ||
| 60 | } __packed; | ||
| 61 | |||
| 62 | #define MAX_CQ_PAGES 8 | ||
| 63 | struct ocrdma_create_cq_uresp { | ||
| 64 | u32 cq_id; | ||
| 65 | u32 page_size; | ||
| 66 | u32 num_pages; | ||
| 67 | u32 max_hw_cqe; | ||
| 68 | u64 page_addr[MAX_CQ_PAGES]; | ||
| 69 | u64 db_page_addr; | ||
| 70 | u32 db_page_size; | ||
| 71 | u32 phase_change; | ||
| 72 | u64 rsvd1; | ||
| 73 | u64 rsvd2; | ||
| 74 | } __packed; | ||
| 75 | |||
| 76 | #define MAX_QP_PAGES 8 | ||
| 77 | #define MAX_UD_AV_PAGES 8 | ||
| 78 | |||
| 79 | struct ocrdma_create_qp_ureq { | ||
| 80 | u8 enable_dpp_cq; | ||
| 81 | u8 rsvd; | ||
| 82 | u16 dpp_cq_id; | ||
| 83 | u32 rsvd1; | ||
| 84 | }; | ||
| 85 | |||
| 86 | struct ocrdma_create_qp_uresp { | ||
| 87 | u16 qp_id; | ||
| 88 | u16 sq_dbid; | ||
| 89 | u16 rq_dbid; | ||
| 90 | u16 resv0; | ||
| 91 | u32 sq_page_size; | ||
| 92 | u32 rq_page_size; | ||
| 93 | u32 num_sq_pages; | ||
| 94 | u32 num_rq_pages; | ||
| 95 | u64 sq_page_addr[MAX_QP_PAGES]; | ||
| 96 | u64 rq_page_addr[MAX_QP_PAGES]; | ||
| 97 | u64 db_page_addr; | ||
| 98 | u32 db_page_size; | ||
| 99 | u32 dpp_credit; | ||
| 100 | u32 dpp_offset; | ||
| 101 | u32 rsvd1; | ||
| 102 | u32 num_wqe_allocated; | ||
| 103 | u32 num_rqe_allocated; | ||
| 104 | u32 free_wqe_delta; | ||
| 105 | u32 free_rqe_delta; | ||
| 106 | u32 db_sq_offset; | ||
| 107 | u32 db_rq_offset; | ||
| 108 | u32 db_shift; | ||
| 109 | u64 rsvd2; | ||
| 110 | u64 rsvd3; | ||
| 111 | } __packed; | ||
| 112 | |||
| 113 | struct ocrdma_create_srq_uresp { | ||
| 114 | u16 rq_dbid; | ||
| 115 | u16 resv0; | ||
| 116 | u32 resv1; | ||
| 117 | |||
| 118 | u32 rq_page_size; | ||
| 119 | u32 num_rq_pages; | ||
| 120 | |||
| 121 | u64 rq_page_addr[MAX_QP_PAGES]; | ||
| 122 | u64 db_page_addr; | ||
| 123 | |||
| 124 | u32 db_page_size; | ||
| 125 | u32 num_rqe_allocated; | ||
| 126 | u32 db_rq_offset; | ||
| 127 | u32 db_shift; | ||
| 128 | |||
| 129 | u32 free_rqe_delta; | ||
| 130 | u32 rsvd2; | ||
| 131 | u64 rsvd3; | ||
| 132 | } __packed; | ||
| 133 | |||
| 134 | #endif /* __OCRDMA_ABI_H__ */ | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c new file mode 100644 index 000000000000..a877a8ed7907 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
| @@ -0,0 +1,172 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #include <net/neighbour.h> | ||
| 29 | #include <net/netevent.h> | ||
| 30 | |||
| 31 | #include <rdma/ib_addr.h> | ||
| 32 | #include <rdma/ib_cache.h> | ||
| 33 | |||
| 34 | #include "ocrdma.h" | ||
| 35 | #include "ocrdma_verbs.h" | ||
| 36 | #include "ocrdma_ah.h" | ||
| 37 | #include "ocrdma_hw.h" | ||
| 38 | |||
| 39 | static inline int set_av_attr(struct ocrdma_ah *ah, | ||
| 40 | struct ib_ah_attr *attr, int pdid) | ||
| 41 | { | ||
| 42 | int status = 0; | ||
| 43 | u16 vlan_tag; bool vlan_enabled = false; | ||
| 44 | struct ocrdma_dev *dev = ah->dev; | ||
| 45 | struct ocrdma_eth_vlan eth; | ||
| 46 | struct ocrdma_grh grh; | ||
| 47 | int eth_sz; | ||
| 48 | |||
| 49 | memset(ð, 0, sizeof(eth)); | ||
| 50 | memset(&grh, 0, sizeof(grh)); | ||
| 51 | |||
| 52 | ah->sgid_index = attr->grh.sgid_index; | ||
| 53 | |||
| 54 | vlan_tag = rdma_get_vlan_id(&attr->grh.dgid); | ||
| 55 | if (vlan_tag && (vlan_tag < 0x1000)) { | ||
| 56 | eth.eth_type = cpu_to_be16(0x8100); | ||
| 57 | eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); | ||
| 58 | vlan_tag |= (attr->sl & 7) << 13; | ||
| 59 | eth.vlan_tag = cpu_to_be16(vlan_tag); | ||
| 60 | eth_sz = sizeof(struct ocrdma_eth_vlan); | ||
| 61 | vlan_enabled = true; | ||
| 62 | } else { | ||
| 63 | eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); | ||
| 64 | eth_sz = sizeof(struct ocrdma_eth_basic); | ||
| 65 | } | ||
| 66 | memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); | ||
| 67 | status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, ð.dmac[0]); | ||
| 68 | if (status) | ||
| 69 | return status; | ||
| 70 | status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, | ||
| 71 | (union ib_gid *)&grh.sgid[0]); | ||
| 72 | if (status) | ||
| 73 | return status; | ||
| 74 | |||
| 75 | grh.tclass_flow = cpu_to_be32((6 << 28) | | ||
| 76 | (attr->grh.traffic_class << 24) | | ||
| 77 | attr->grh.flow_label); | ||
| 78 | /* 0x1b is next header value in GRH */ | ||
| 79 | grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | | ||
| 80 | (0x1b << 8) | attr->grh.hop_limit); | ||
| 81 | |||
| 82 | memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); | ||
| 83 | memcpy(&ah->av->eth_hdr, ð, eth_sz); | ||
| 84 | memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); | ||
| 85 | if (vlan_enabled) | ||
| 86 | ah->av->valid |= OCRDMA_AV_VLAN_VALID; | ||
| 87 | return status; | ||
| 88 | } | ||
| 89 | |||
| 90 | struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | ||
| 91 | { | ||
| 92 | u32 *ahid_addr; | ||
| 93 | int status; | ||
| 94 | struct ocrdma_ah *ah; | ||
| 95 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | ||
| 96 | struct ocrdma_dev *dev = pd->dev; | ||
| 97 | |||
| 98 | if (!(attr->ah_flags & IB_AH_GRH)) | ||
| 99 | return ERR_PTR(-EINVAL); | ||
| 100 | |||
| 101 | ah = kzalloc(sizeof *ah, GFP_ATOMIC); | ||
| 102 | if (!ah) | ||
| 103 | return ERR_PTR(-ENOMEM); | ||
| 104 | ah->dev = pd->dev; | ||
| 105 | |||
| 106 | status = ocrdma_alloc_av(dev, ah); | ||
| 107 | if (status) | ||
| 108 | goto av_err; | ||
| 109 | status = set_av_attr(ah, attr, pd->id); | ||
| 110 | if (status) | ||
| 111 | goto av_conf_err; | ||
| 112 | |||
| 113 | /* if pd is for the user process, pass the ah_id to user space */ | ||
| 114 | if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { | ||
| 115 | ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; | ||
| 116 | *ahid_addr = ah->id; | ||
| 117 | } | ||
| 118 | return &ah->ibah; | ||
| 119 | |||
| 120 | av_conf_err: | ||
| 121 | ocrdma_free_av(dev, ah); | ||
| 122 | av_err: | ||
| 123 | kfree(ah); | ||
| 124 | return ERR_PTR(status); | ||
| 125 | } | ||
| 126 | |||
| 127 | int ocrdma_destroy_ah(struct ib_ah *ibah) | ||
| 128 | { | ||
| 129 | struct ocrdma_ah *ah = get_ocrdma_ah(ibah); | ||
| 130 | ocrdma_free_av(ah->dev, ah); | ||
| 131 | kfree(ah); | ||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) | ||
| 136 | { | ||
| 137 | struct ocrdma_ah *ah = get_ocrdma_ah(ibah); | ||
| 138 | struct ocrdma_av *av = ah->av; | ||
| 139 | struct ocrdma_grh *grh; | ||
| 140 | attr->ah_flags |= IB_AH_GRH; | ||
| 141 | if (ah->av->valid & Bit(1)) { | ||
| 142 | grh = (struct ocrdma_grh *)((u8 *)ah->av + | ||
| 143 | sizeof(struct ocrdma_eth_vlan)); | ||
| 144 | attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; | ||
| 145 | } else { | ||
| 146 | grh = (struct ocrdma_grh *)((u8 *)ah->av + | ||
| 147 | sizeof(struct ocrdma_eth_basic)); | ||
| 148 | attr->sl = 0; | ||
| 149 | } | ||
| 150 | memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid)); | ||
| 151 | attr->grh.sgid_index = ah->sgid_index; | ||
| 152 | attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff; | ||
| 153 | attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24; | ||
| 154 | attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff; | ||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) | ||
| 159 | { | ||
| 160 | /* modify_ah is unsupported */ | ||
| 161 | return -ENOSYS; | ||
| 162 | } | ||
| 163 | |||
| 164 | int ocrdma_process_mad(struct ib_device *ibdev, | ||
| 165 | int process_mad_flags, | ||
| 166 | u8 port_num, | ||
| 167 | struct ib_wc *in_wc, | ||
| 168 | struct ib_grh *in_grh, | ||
| 169 | struct ib_mad *in_mad, struct ib_mad *out_mad) | ||
| 170 | { | ||
| 171 | return IB_MAD_RESULT_SUCCESS; | ||
| 172 | } | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h new file mode 100644 index 000000000000..8ac49e7f96d1 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __OCRDMA_AH_H__ | ||
| 29 | #define __OCRDMA_AH_H__ | ||
| 30 | |||
| 31 | struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); | ||
| 32 | int ocrdma_destroy_ah(struct ib_ah *); | ||
| 33 | int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); | ||
| 34 | int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *); | ||
| 35 | |||
| 36 | int ocrdma_process_mad(struct ib_device *, | ||
| 37 | int process_mad_flags, | ||
| 38 | u8 port_num, | ||
| 39 | struct ib_wc *in_wc, | ||
| 40 | struct ib_grh *in_grh, | ||
| 41 | struct ib_mad *in_mad, struct ib_mad *out_mad); | ||
| 42 | #endif /* __OCRDMA_AH_H__ */ | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c new file mode 100644 index 000000000000..9b204b1ba336 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
| @@ -0,0 +1,2640 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) CNA Adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #include <linux/sched.h> | ||
| 29 | #include <linux/interrupt.h> | ||
| 30 | #include <linux/log2.h> | ||
| 31 | #include <linux/dma-mapping.h> | ||
| 32 | |||
| 33 | #include <rdma/ib_verbs.h> | ||
| 34 | #include <rdma/ib_user_verbs.h> | ||
| 35 | #include <rdma/ib_addr.h> | ||
| 36 | |||
| 37 | #include "ocrdma.h" | ||
| 38 | #include "ocrdma_hw.h" | ||
| 39 | #include "ocrdma_verbs.h" | ||
| 40 | #include "ocrdma_ah.h" | ||
| 41 | |||
| 42 | enum mbx_status { | ||
| 43 | OCRDMA_MBX_STATUS_FAILED = 1, | ||
| 44 | OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3, | ||
| 45 | OCRDMA_MBX_STATUS_OOR = 100, | ||
| 46 | OCRDMA_MBX_STATUS_INVALID_PD = 101, | ||
| 47 | OCRDMA_MBX_STATUS_PD_INUSE = 102, | ||
| 48 | OCRDMA_MBX_STATUS_INVALID_CQ = 103, | ||
| 49 | OCRDMA_MBX_STATUS_INVALID_QP = 104, | ||
| 50 | OCRDMA_MBX_STATUS_INVALID_LKEY = 105, | ||
| 51 | OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106, | ||
| 52 | OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107, | ||
| 53 | OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108, | ||
| 54 | OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109, | ||
| 55 | OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110, | ||
| 56 | OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111, | ||
| 57 | OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112, | ||
| 58 | OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113, | ||
| 59 | OCRDMA_MBX_STATUS_MW_BOUND = 114, | ||
| 60 | OCRDMA_MBX_STATUS_INVALID_VA = 115, | ||
| 61 | OCRDMA_MBX_STATUS_INVALID_LENGTH = 116, | ||
| 62 | OCRDMA_MBX_STATUS_INVALID_FBO = 117, | ||
| 63 | OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118, | ||
| 64 | OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119, | ||
| 65 | OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120, | ||
| 66 | OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121, | ||
| 67 | OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129, | ||
| 68 | OCRDMA_MBX_STATUS_SRQ_ERROR = 133, | ||
| 69 | OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134, | ||
| 70 | OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135, | ||
| 71 | OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136, | ||
| 72 | OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137, | ||
| 73 | OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138, | ||
| 74 | OCRDMA_MBX_STATUS_QP_BOUND = 130, | ||
| 75 | OCRDMA_MBX_STATUS_INVALID_CHANGE = 139, | ||
| 76 | OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140, | ||
| 77 | OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141, | ||
| 78 | OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142, | ||
| 79 | OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143, | ||
| 80 | OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144 | ||
| 81 | }; | ||
| 82 | |||
| 83 | enum additional_status { | ||
| 84 | OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22 | ||
| 85 | }; | ||
| 86 | |||
| 87 | enum cqe_status { | ||
| 88 | OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1, | ||
| 89 | OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2, | ||
| 90 | OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3, | ||
| 91 | OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4, | ||
| 92 | OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5 | ||
| 93 | }; | ||
| 94 | |||
| 95 | static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq) | ||
| 96 | { | ||
| 97 | return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe)); | ||
| 98 | } | ||
| 99 | |||
| 100 | static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq) | ||
| 101 | { | ||
| 102 | eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1); | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev) | ||
| 106 | { | ||
| 107 | struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *) | ||
| 108 | ((u8 *) dev->mq.cq.va + | ||
| 109 | (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); | ||
| 110 | |||
| 111 | if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK)) | ||
| 112 | return NULL; | ||
| 113 | return cqe; | ||
| 114 | } | ||
| 115 | |||
| 116 | static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev) | ||
| 117 | { | ||
| 118 | dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev) | ||
| 122 | { | ||
| 123 | return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va + | ||
| 124 | (dev->mq.sq.head * | ||
| 125 | sizeof(struct ocrdma_mqe))); | ||
| 126 | } | ||
| 127 | |||
| 128 | static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev) | ||
| 129 | { | ||
| 130 | dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); | ||
| 131 | atomic_inc(&dev->mq.sq.used); | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev) | ||
| 135 | { | ||
| 136 | return (void *)((u8 *) dev->mq.sq.va + | ||
| 137 | (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe))); | ||
| 138 | } | ||
| 139 | |||
| 140 | enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps) | ||
| 141 | { | ||
| 142 | switch (qps) { | ||
| 143 | case OCRDMA_QPS_RST: | ||
| 144 | return IB_QPS_RESET; | ||
| 145 | case OCRDMA_QPS_INIT: | ||
| 146 | return IB_QPS_INIT; | ||
| 147 | case OCRDMA_QPS_RTR: | ||
| 148 | return IB_QPS_RTR; | ||
| 149 | case OCRDMA_QPS_RTS: | ||
| 150 | return IB_QPS_RTS; | ||
| 151 | case OCRDMA_QPS_SQD: | ||
| 152 | case OCRDMA_QPS_SQ_DRAINING: | ||
| 153 | return IB_QPS_SQD; | ||
| 154 | case OCRDMA_QPS_SQE: | ||
| 155 | return IB_QPS_SQE; | ||
| 156 | case OCRDMA_QPS_ERR: | ||
| 157 | return IB_QPS_ERR; | ||
| 158 | }; | ||
| 159 | return IB_QPS_ERR; | ||
| 160 | } | ||
| 161 | |||
| 162 | static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps) | ||
| 163 | { | ||
| 164 | switch (qps) { | ||
| 165 | case IB_QPS_RESET: | ||
| 166 | return OCRDMA_QPS_RST; | ||
| 167 | case IB_QPS_INIT: | ||
| 168 | return OCRDMA_QPS_INIT; | ||
| 169 | case IB_QPS_RTR: | ||
| 170 | return OCRDMA_QPS_RTR; | ||
| 171 | case IB_QPS_RTS: | ||
| 172 | return OCRDMA_QPS_RTS; | ||
| 173 | case IB_QPS_SQD: | ||
| 174 | return OCRDMA_QPS_SQD; | ||
| 175 | case IB_QPS_SQE: | ||
| 176 | return OCRDMA_QPS_SQE; | ||
| 177 | case IB_QPS_ERR: | ||
| 178 | return OCRDMA_QPS_ERR; | ||
| 179 | }; | ||
| 180 | return OCRDMA_QPS_ERR; | ||
| 181 | } | ||
| 182 | |||
| 183 | static int ocrdma_get_mbx_errno(u32 status) | ||
| 184 | { | ||
| 185 | int err_num = -EFAULT; | ||
| 186 | u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >> | ||
| 187 | OCRDMA_MBX_RSP_STATUS_SHIFT; | ||
| 188 | u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >> | ||
| 189 | OCRDMA_MBX_RSP_ASTATUS_SHIFT; | ||
| 190 | |||
| 191 | switch (mbox_status) { | ||
| 192 | case OCRDMA_MBX_STATUS_OOR: | ||
| 193 | case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS: | ||
| 194 | err_num = -EAGAIN; | ||
| 195 | break; | ||
| 196 | |||
| 197 | case OCRDMA_MBX_STATUS_INVALID_PD: | ||
| 198 | case OCRDMA_MBX_STATUS_INVALID_CQ: | ||
| 199 | case OCRDMA_MBX_STATUS_INVALID_SRQ_ID: | ||
| 200 | case OCRDMA_MBX_STATUS_INVALID_QP: | ||
| 201 | case OCRDMA_MBX_STATUS_INVALID_CHANGE: | ||
| 202 | case OCRDMA_MBX_STATUS_MTU_EXCEEDS: | ||
| 203 | case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER: | ||
| 204 | case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID: | ||
| 205 | case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS: | ||
| 206 | case OCRDMA_MBX_STATUS_ILLEGAL_FIELD: | ||
| 207 | case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY: | ||
| 208 | case OCRDMA_MBX_STATUS_INVALID_LKEY: | ||
| 209 | case OCRDMA_MBX_STATUS_INVALID_VA: | ||
| 210 | case OCRDMA_MBX_STATUS_INVALID_LENGTH: | ||
| 211 | case OCRDMA_MBX_STATUS_INVALID_FBO: | ||
| 212 | case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS: | ||
| 213 | case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE: | ||
| 214 | case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP: | ||
| 215 | case OCRDMA_MBX_STATUS_SRQ_ERROR: | ||
| 216 | case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS: | ||
| 217 | err_num = -EINVAL; | ||
| 218 | break; | ||
| 219 | |||
| 220 | case OCRDMA_MBX_STATUS_PD_INUSE: | ||
| 221 | case OCRDMA_MBX_STATUS_QP_BOUND: | ||
| 222 | case OCRDMA_MBX_STATUS_MW_STILL_BOUND: | ||
| 223 | case OCRDMA_MBX_STATUS_MW_BOUND: | ||
| 224 | err_num = -EBUSY; | ||
| 225 | break; | ||
| 226 | |||
| 227 | case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS: | ||
| 228 | case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS: | ||
| 229 | case OCRDMA_MBX_STATUS_RQE_EXCEEDS: | ||
| 230 | case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS: | ||
| 231 | case OCRDMA_MBX_STATUS_ORD_EXCEEDS: | ||
| 232 | case OCRDMA_MBX_STATUS_IRD_EXCEEDS: | ||
| 233 | case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS: | ||
| 234 | case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS: | ||
| 235 | case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS: | ||
| 236 | err_num = -ENOBUFS; | ||
| 237 | break; | ||
| 238 | |||
| 239 | case OCRDMA_MBX_STATUS_FAILED: | ||
| 240 | switch (add_status) { | ||
| 241 | case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: | ||
| 242 | err_num = -EAGAIN; | ||
| 243 | break; | ||
| 244 | } | ||
| 245 | default: | ||
| 246 | err_num = -EFAULT; | ||
| 247 | } | ||
| 248 | return err_num; | ||
| 249 | } | ||
| 250 | |||
| 251 | static int ocrdma_get_mbx_cqe_errno(u16 cqe_status) | ||
| 252 | { | ||
| 253 | int err_num = -EINVAL; | ||
| 254 | |||
| 255 | switch (cqe_status) { | ||
| 256 | case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES: | ||
| 257 | err_num = -EPERM; | ||
| 258 | break; | ||
| 259 | case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER: | ||
| 260 | err_num = -EINVAL; | ||
| 261 | break; | ||
| 262 | case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES: | ||
| 263 | case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING: | ||
| 264 | err_num = -EAGAIN; | ||
| 265 | break; | ||
| 266 | case OCRDMA_MBX_CQE_STATUS_DMA_FAILED: | ||
| 267 | err_num = -EIO; | ||
| 268 | break; | ||
| 269 | } | ||
| 270 | return err_num; | ||
| 271 | } | ||
| 272 | |||
| 273 | void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed, | ||
| 274 | bool solicited, u16 cqe_popped) | ||
| 275 | { | ||
| 276 | u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK; | ||
| 277 | |||
| 278 | val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) << | ||
| 279 | OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT); | ||
| 280 | |||
| 281 | if (armed) | ||
| 282 | val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT); | ||
| 283 | if (solicited) | ||
| 284 | val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT); | ||
| 285 | val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT); | ||
| 286 | iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET); | ||
| 287 | } | ||
| 288 | |||
| 289 | static void ocrdma_ring_mq_db(struct ocrdma_dev *dev) | ||
| 290 | { | ||
| 291 | u32 val = 0; | ||
| 292 | |||
| 293 | val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK; | ||
| 294 | val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT; | ||
| 295 | iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET); | ||
| 296 | } | ||
| 297 | |||
| 298 | static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id, | ||
| 299 | bool arm, bool clear_int, u16 num_eqe) | ||
| 300 | { | ||
| 301 | u32 val = 0; | ||
| 302 | |||
| 303 | val |= eq_id & OCRDMA_EQ_ID_MASK; | ||
| 304 | val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT); | ||
| 305 | if (arm) | ||
| 306 | val |= (1 << OCRDMA_REARM_SHIFT); | ||
| 307 | if (clear_int) | ||
| 308 | val |= (1 << OCRDMA_EQ_CLR_SHIFT); | ||
| 309 | val |= (1 << OCRDMA_EQ_TYPE_SHIFT); | ||
| 310 | val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT); | ||
| 311 | iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET); | ||
| 312 | } | ||
| 313 | |||
| 314 | static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr, | ||
| 315 | u8 opcode, u8 subsys, u32 cmd_len) | ||
| 316 | { | ||
| 317 | cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT)); | ||
| 318 | cmd_hdr->timeout = 20; /* seconds */ | ||
| 319 | cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr); | ||
| 320 | } | ||
| 321 | |||
| 322 | static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len) | ||
| 323 | { | ||
| 324 | struct ocrdma_mqe *mqe; | ||
| 325 | |||
| 326 | mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); | ||
| 327 | if (!mqe) | ||
| 328 | return NULL; | ||
| 329 | mqe->hdr.spcl_sge_cnt_emb |= | ||
| 330 | (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) & | ||
| 331 | OCRDMA_MQE_HDR_EMB_MASK; | ||
| 332 | mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr); | ||
| 333 | |||
| 334 | ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE, | ||
| 335 | mqe->hdr.pyld_len); | ||
| 336 | return mqe; | ||
| 337 | } | ||
| 338 | |||
| 339 | static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) | ||
| 340 | { | ||
| 341 | dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); | ||
| 342 | } | ||
| 343 | |||
| 344 | static int ocrdma_alloc_q(struct ocrdma_dev *dev, | ||
| 345 | struct ocrdma_queue_info *q, u16 len, u16 entry_size) | ||
| 346 | { | ||
| 347 | memset(q, 0, sizeof(*q)); | ||
| 348 | q->len = len; | ||
| 349 | q->entry_size = entry_size; | ||
| 350 | q->size = len * entry_size; | ||
| 351 | q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, | ||
| 352 | &q->dma, GFP_KERNEL); | ||
| 353 | if (!q->va) | ||
| 354 | return -ENOMEM; | ||
| 355 | memset(q->va, 0, q->size); | ||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | |||
| 359 | static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt, | ||
| 360 | dma_addr_t host_pa, int hw_page_size) | ||
| 361 | { | ||
| 362 | int i; | ||
| 363 | |||
| 364 | for (i = 0; i < cnt; i++) { | ||
| 365 | q_pa[i].lo = (u32) (host_pa & 0xffffffff); | ||
| 366 | q_pa[i].hi = (u32) upper_32_bits(host_pa); | ||
| 367 | host_pa += hw_page_size; | ||
| 368 | } | ||
| 369 | } | ||
| 370 | |||
| 371 | static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev, | ||
| 372 | struct ocrdma_eq *eq) | ||
| 373 | { | ||
| 374 | /* assign vector and update vector id for next EQ */ | ||
| 375 | eq->vector = dev->nic_info.msix.start_vector; | ||
| 376 | dev->nic_info.msix.start_vector += 1; | ||
| 377 | } | ||
| 378 | |||
| 379 | static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev) | ||
| 380 | { | ||
| 381 | /* this assumes that EQs are freed in exactly reverse order | ||
| 382 | * as its allocation. | ||
| 383 | */ | ||
| 384 | dev->nic_info.msix.start_vector -= 1; | ||
| 385 | } | ||
| 386 | |||
| 387 | static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q, | ||
| 388 | int queue_type) | ||
| 389 | { | ||
| 390 | u8 opcode = 0; | ||
| 391 | int status; | ||
| 392 | struct ocrdma_delete_q_req *cmd = dev->mbx_cmd; | ||
| 393 | |||
| 394 | switch (queue_type) { | ||
| 395 | case QTYPE_MCCQ: | ||
| 396 | opcode = OCRDMA_CMD_DELETE_MQ; | ||
| 397 | break; | ||
| 398 | case QTYPE_CQ: | ||
| 399 | opcode = OCRDMA_CMD_DELETE_CQ; | ||
| 400 | break; | ||
| 401 | case QTYPE_EQ: | ||
| 402 | opcode = OCRDMA_CMD_DELETE_EQ; | ||
| 403 | break; | ||
| 404 | default: | ||
| 405 | BUG(); | ||
| 406 | } | ||
| 407 | memset(cmd, 0, sizeof(*cmd)); | ||
| 408 | ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 409 | cmd->id = q->id; | ||
| 410 | |||
| 411 | status = be_roce_mcc_cmd(dev->nic_info.netdev, | ||
| 412 | cmd, sizeof(*cmd), NULL, NULL); | ||
| 413 | if (!status) | ||
| 414 | q->created = false; | ||
| 415 | return status; | ||
| 416 | } | ||
| 417 | |||
| 418 | static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) | ||
| 419 | { | ||
| 420 | int status; | ||
| 421 | struct ocrdma_create_eq_req *cmd = dev->mbx_cmd; | ||
| 422 | struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd; | ||
| 423 | |||
| 424 | memset(cmd, 0, sizeof(*cmd)); | ||
| 425 | ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON, | ||
| 426 | sizeof(*cmd)); | ||
| 427 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) | ||
| 428 | cmd->req.rsvd_version = 0; | ||
| 429 | else | ||
| 430 | cmd->req.rsvd_version = 2; | ||
| 431 | |||
| 432 | cmd->num_pages = 4; | ||
| 433 | cmd->valid = OCRDMA_CREATE_EQ_VALID; | ||
| 434 | cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT; | ||
| 435 | |||
| 436 | ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma, | ||
| 437 | PAGE_SIZE_4K); | ||
| 438 | status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL, | ||
| 439 | NULL); | ||
| 440 | if (!status) { | ||
| 441 | eq->q.id = rsp->vector_eqid & 0xffff; | ||
| 442 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) | ||
| 443 | ocrdma_assign_eq_vect_gen2(dev, eq); | ||
| 444 | else { | ||
| 445 | eq->vector = (rsp->vector_eqid >> 16) & 0xffff; | ||
| 446 | dev->nic_info.msix.start_vector += 1; | ||
| 447 | } | ||
| 448 | eq->q.created = true; | ||
| 449 | } | ||
| 450 | return status; | ||
| 451 | } | ||
| 452 | |||
| 453 | static int ocrdma_create_eq(struct ocrdma_dev *dev, | ||
| 454 | struct ocrdma_eq *eq, u16 q_len) | ||
| 455 | { | ||
| 456 | int status; | ||
| 457 | |||
| 458 | status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN, | ||
| 459 | sizeof(struct ocrdma_eqe)); | ||
| 460 | if (status) | ||
| 461 | return status; | ||
| 462 | |||
| 463 | status = ocrdma_mbx_create_eq(dev, eq); | ||
| 464 | if (status) | ||
| 465 | goto mbx_err; | ||
| 466 | eq->dev = dev; | ||
| 467 | ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); | ||
| 468 | |||
| 469 | return 0; | ||
| 470 | mbx_err: | ||
| 471 | ocrdma_free_q(dev, &eq->q); | ||
| 472 | return status; | ||
| 473 | } | ||
| 474 | |||
| 475 | static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) | ||
| 476 | { | ||
| 477 | int irq; | ||
| 478 | |||
| 479 | if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) | ||
| 480 | irq = dev->nic_info.pdev->irq; | ||
| 481 | else | ||
| 482 | irq = dev->nic_info.msix.vector_list[eq->vector]; | ||
| 483 | return irq; | ||
| 484 | } | ||
| 485 | |||
| 486 | static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) | ||
| 487 | { | ||
| 488 | if (eq->q.created) { | ||
| 489 | ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ); | ||
| 490 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) | ||
| 491 | ocrdma_free_eq_vect_gen2(dev); | ||
| 492 | ocrdma_free_q(dev, &eq->q); | ||
| 493 | } | ||
| 494 | } | ||
| 495 | |||
| 496 | static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) | ||
| 497 | { | ||
| 498 | int irq; | ||
| 499 | |||
| 500 | /* disarm EQ so that interrupts are not generated | ||
| 501 | * during freeing and EQ delete is in progress. | ||
| 502 | */ | ||
| 503 | ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0); | ||
| 504 | |||
| 505 | irq = ocrdma_get_irq(dev, eq); | ||
| 506 | free_irq(irq, eq); | ||
| 507 | _ocrdma_destroy_eq(dev, eq); | ||
| 508 | } | ||
| 509 | |||
| 510 | static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev) | ||
| 511 | { | ||
| 512 | int i; | ||
| 513 | |||
| 514 | /* deallocate the data path eqs */ | ||
| 515 | for (i = 0; i < dev->eq_cnt; i++) | ||
| 516 | ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]); | ||
| 517 | } | ||
| 518 | |||
| 519 | static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev, | ||
| 520 | struct ocrdma_queue_info *cq, | ||
| 521 | struct ocrdma_queue_info *eq) | ||
| 522 | { | ||
| 523 | struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd; | ||
| 524 | struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd; | ||
| 525 | int status; | ||
| 526 | |||
| 527 | memset(cmd, 0, sizeof(*cmd)); | ||
| 528 | ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ, | ||
| 529 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 530 | |||
| 531 | cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size); | ||
| 532 | cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; | ||
| 533 | cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT); | ||
| 534 | |||
| 535 | ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt, | ||
| 536 | cq->dma, PAGE_SIZE_4K); | ||
| 537 | status = be_roce_mcc_cmd(dev->nic_info.netdev, | ||
| 538 | cmd, sizeof(*cmd), NULL, NULL); | ||
| 539 | if (!status) { | ||
| 540 | cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); | ||
| 541 | cq->created = true; | ||
| 542 | } | ||
| 543 | return status; | ||
| 544 | } | ||
| 545 | |||
| 546 | static u32 ocrdma_encoded_q_len(int q_len) | ||
| 547 | { | ||
| 548 | u32 len_encoded = fls(q_len); /* log2(len) + 1 */ | ||
| 549 | |||
| 550 | if (len_encoded == 16) | ||
| 551 | len_encoded = 0; | ||
| 552 | return len_encoded; | ||
| 553 | } | ||
| 554 | |||
| 555 | static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, | ||
| 556 | struct ocrdma_queue_info *mq, | ||
| 557 | struct ocrdma_queue_info *cq) | ||
| 558 | { | ||
| 559 | int num_pages, status; | ||
| 560 | struct ocrdma_create_mq_req *cmd = dev->mbx_cmd; | ||
| 561 | struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd; | ||
| 562 | struct ocrdma_pa *pa; | ||
| 563 | |||
| 564 | memset(cmd, 0, sizeof(*cmd)); | ||
| 565 | num_pages = PAGES_4K_SPANNED(mq->va, mq->size); | ||
| 566 | |||
| 567 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 568 | ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ, | ||
| 569 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 570 | cmd->v0.pages = num_pages; | ||
| 571 | cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; | ||
| 572 | cmd->v0.async_cqid_valid = (cq->id << 1); | ||
| 573 | cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << | ||
| 574 | OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); | ||
| 575 | cmd->v0.cqid_ringsize |= | ||
| 576 | (cq->id << OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT); | ||
| 577 | cmd->v0.valid = OCRDMA_CREATE_MQ_VALID; | ||
| 578 | pa = &cmd->v0.pa[0]; | ||
| 579 | } else { | ||
| 580 | ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT, | ||
| 581 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 582 | cmd->req.rsvd_version = 1; | ||
| 583 | cmd->v1.cqid_pages = num_pages; | ||
| 584 | cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); | ||
| 585 | cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; | ||
| 586 | cmd->v1.async_event_bitmap = Bit(20); | ||
| 587 | cmd->v1.async_cqid_ringsize = cq->id; | ||
| 588 | cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << | ||
| 589 | OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); | ||
| 590 | cmd->v1.valid = OCRDMA_CREATE_MQ_VALID; | ||
| 591 | pa = &cmd->v1.pa[0]; | ||
| 592 | } | ||
| 593 | ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); | ||
| 594 | status = be_roce_mcc_cmd(dev->nic_info.netdev, | ||
| 595 | cmd, sizeof(*cmd), NULL, NULL); | ||
| 596 | if (!status) { | ||
| 597 | mq->id = rsp->id; | ||
| 598 | mq->created = true; | ||
| 599 | } | ||
| 600 | return status; | ||
| 601 | } | ||
| 602 | |||
| 603 | static int ocrdma_create_mq(struct ocrdma_dev *dev) | ||
| 604 | { | ||
| 605 | int status; | ||
| 606 | |||
| 607 | /* Alloc completion queue for Mailbox queue */ | ||
| 608 | status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN, | ||
| 609 | sizeof(struct ocrdma_mcqe)); | ||
| 610 | if (status) | ||
| 611 | goto alloc_err; | ||
| 612 | |||
| 613 | status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q); | ||
| 614 | if (status) | ||
| 615 | goto mbx_cq_free; | ||
| 616 | |||
| 617 | memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx)); | ||
| 618 | init_waitqueue_head(&dev->mqe_ctx.cmd_wait); | ||
| 619 | mutex_init(&dev->mqe_ctx.lock); | ||
| 620 | |||
| 621 | /* Alloc Mailbox queue */ | ||
| 622 | status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN, | ||
| 623 | sizeof(struct ocrdma_mqe)); | ||
| 624 | if (status) | ||
| 625 | goto mbx_cq_destroy; | ||
| 626 | status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq); | ||
| 627 | if (status) | ||
| 628 | goto mbx_q_free; | ||
| 629 | ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0); | ||
| 630 | return 0; | ||
| 631 | |||
| 632 | mbx_q_free: | ||
| 633 | ocrdma_free_q(dev, &dev->mq.sq); | ||
| 634 | mbx_cq_destroy: | ||
| 635 | ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ); | ||
| 636 | mbx_cq_free: | ||
| 637 | ocrdma_free_q(dev, &dev->mq.cq); | ||
| 638 | alloc_err: | ||
| 639 | return status; | ||
| 640 | } | ||
| 641 | |||
| 642 | static void ocrdma_destroy_mq(struct ocrdma_dev *dev) | ||
| 643 | { | ||
| 644 | struct ocrdma_queue_info *mbxq, *cq; | ||
| 645 | |||
| 646 | /* mqe_ctx lock synchronizes with any other pending cmds. */ | ||
| 647 | mutex_lock(&dev->mqe_ctx.lock); | ||
| 648 | mbxq = &dev->mq.sq; | ||
| 649 | if (mbxq->created) { | ||
| 650 | ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ); | ||
| 651 | ocrdma_free_q(dev, mbxq); | ||
| 652 | } | ||
| 653 | mutex_unlock(&dev->mqe_ctx.lock); | ||
| 654 | |||
| 655 | cq = &dev->mq.cq; | ||
| 656 | if (cq->created) { | ||
| 657 | ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ); | ||
| 658 | ocrdma_free_q(dev, cq); | ||
| 659 | } | ||
| 660 | } | ||
| 661 | |||
| 662 | static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev, | ||
| 663 | struct ocrdma_qp *qp) | ||
| 664 | { | ||
| 665 | enum ib_qp_state new_ib_qps = IB_QPS_ERR; | ||
| 666 | enum ib_qp_state old_ib_qps; | ||
| 667 | |||
| 668 | if (qp == NULL) | ||
| 669 | BUG(); | ||
| 670 | ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps); | ||
| 671 | } | ||
| 672 | |||
| 673 | static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | ||
| 674 | struct ocrdma_ae_mcqe *cqe) | ||
| 675 | { | ||
| 676 | struct ocrdma_qp *qp = NULL; | ||
| 677 | struct ocrdma_cq *cq = NULL; | ||
| 678 | struct ib_event ib_evt; | ||
| 679 | int cq_event = 0; | ||
| 680 | int qp_event = 1; | ||
| 681 | int srq_event = 0; | ||
| 682 | int dev_event = 0; | ||
| 683 | int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >> | ||
| 684 | OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT; | ||
| 685 | |||
| 686 | if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) | ||
| 687 | qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK]; | ||
| 688 | if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) | ||
| 689 | cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; | ||
| 690 | |||
| 691 | ib_evt.device = &dev->ibdev; | ||
| 692 | |||
| 693 | switch (type) { | ||
| 694 | case OCRDMA_CQ_ERROR: | ||
| 695 | ib_evt.element.cq = &cq->ibcq; | ||
| 696 | ib_evt.event = IB_EVENT_CQ_ERR; | ||
| 697 | cq_event = 1; | ||
| 698 | qp_event = 0; | ||
| 699 | break; | ||
| 700 | case OCRDMA_CQ_OVERRUN_ERROR: | ||
| 701 | ib_evt.element.cq = &cq->ibcq; | ||
| 702 | ib_evt.event = IB_EVENT_CQ_ERR; | ||
| 703 | break; | ||
| 704 | case OCRDMA_CQ_QPCAT_ERROR: | ||
| 705 | ib_evt.element.qp = &qp->ibqp; | ||
| 706 | ib_evt.event = IB_EVENT_QP_FATAL; | ||
| 707 | ocrdma_process_qpcat_error(dev, qp); | ||
| 708 | break; | ||
| 709 | case OCRDMA_QP_ACCESS_ERROR: | ||
| 710 | ib_evt.element.qp = &qp->ibqp; | ||
| 711 | ib_evt.event = IB_EVENT_QP_ACCESS_ERR; | ||
| 712 | break; | ||
| 713 | case OCRDMA_QP_COMM_EST_EVENT: | ||
| 714 | ib_evt.element.qp = &qp->ibqp; | ||
| 715 | ib_evt.event = IB_EVENT_COMM_EST; | ||
| 716 | break; | ||
| 717 | case OCRDMA_SQ_DRAINED_EVENT: | ||
| 718 | ib_evt.element.qp = &qp->ibqp; | ||
| 719 | ib_evt.event = IB_EVENT_SQ_DRAINED; | ||
| 720 | break; | ||
| 721 | case OCRDMA_DEVICE_FATAL_EVENT: | ||
| 722 | ib_evt.element.port_num = 1; | ||
| 723 | ib_evt.event = IB_EVENT_DEVICE_FATAL; | ||
| 724 | qp_event = 0; | ||
| 725 | dev_event = 1; | ||
| 726 | break; | ||
| 727 | case OCRDMA_SRQCAT_ERROR: | ||
| 728 | ib_evt.element.srq = &qp->srq->ibsrq; | ||
| 729 | ib_evt.event = IB_EVENT_SRQ_ERR; | ||
| 730 | srq_event = 1; | ||
| 731 | qp_event = 0; | ||
| 732 | break; | ||
| 733 | case OCRDMA_SRQ_LIMIT_EVENT: | ||
| 734 | ib_evt.element.srq = &qp->srq->ibsrq; | ||
| 735 | ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; | ||
| 736 | srq_event = 1; | ||
| 737 | qp_event = 0; | ||
| 738 | break; | ||
| 739 | case OCRDMA_QP_LAST_WQE_EVENT: | ||
| 740 | ib_evt.element.qp = &qp->ibqp; | ||
| 741 | ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; | ||
| 742 | break; | ||
| 743 | default: | ||
| 744 | cq_event = 0; | ||
| 745 | qp_event = 0; | ||
| 746 | srq_event = 0; | ||
| 747 | dev_event = 0; | ||
| 748 | ocrdma_err("%s() unknown type=0x%x\n", __func__, type); | ||
| 749 | break; | ||
| 750 | } | ||
| 751 | |||
| 752 | if (qp_event) { | ||
| 753 | if (qp->ibqp.event_handler) | ||
| 754 | qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); | ||
| 755 | } else if (cq_event) { | ||
| 756 | if (cq->ibcq.event_handler) | ||
| 757 | cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context); | ||
| 758 | } else if (srq_event) { | ||
| 759 | if (qp->srq->ibsrq.event_handler) | ||
| 760 | qp->srq->ibsrq.event_handler(&ib_evt, | ||
| 761 | qp->srq->ibsrq. | ||
| 762 | srq_context); | ||
| 763 | } else if (dev_event) | ||
| 764 | ib_dispatch_event(&ib_evt); | ||
| 765 | |||
| 766 | } | ||
| 767 | |||
| 768 | static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) | ||
| 769 | { | ||
| 770 | /* async CQE processing */ | ||
| 771 | struct ocrdma_ae_mcqe *cqe = ae_cqe; | ||
| 772 | u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> | ||
| 773 | OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; | ||
| 774 | |||
| 775 | if (evt_code == OCRDMA_ASYNC_EVE_CODE) | ||
| 776 | ocrdma_dispatch_ibevent(dev, cqe); | ||
| 777 | else | ||
| 778 | ocrdma_err("%s(%d) invalid evt code=0x%x\n", | ||
| 779 | __func__, dev->id, evt_code); | ||
| 780 | } | ||
| 781 | |||
| 782 | static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) | ||
| 783 | { | ||
| 784 | if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) { | ||
| 785 | dev->mqe_ctx.cqe_status = (cqe->status & | ||
| 786 | OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT; | ||
| 787 | dev->mqe_ctx.ext_status = | ||
| 788 | (cqe->status & OCRDMA_MCQE_ESTATUS_MASK) | ||
| 789 | >> OCRDMA_MCQE_ESTATUS_SHIFT; | ||
| 790 | dev->mqe_ctx.cmd_done = true; | ||
| 791 | wake_up(&dev->mqe_ctx.cmd_wait); | ||
| 792 | } else | ||
| 793 | ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", | ||
| 794 | __func__, cqe->tag_lo, dev->mqe_ctx.tag); | ||
| 795 | } | ||
| 796 | |||
| 797 | static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) | ||
| 798 | { | ||
| 799 | u16 cqe_popped = 0; | ||
| 800 | struct ocrdma_mcqe *cqe; | ||
| 801 | |||
| 802 | while (1) { | ||
| 803 | cqe = ocrdma_get_mcqe(dev); | ||
| 804 | if (cqe == NULL) | ||
| 805 | break; | ||
| 806 | ocrdma_le32_to_cpu(cqe, sizeof(*cqe)); | ||
| 807 | cqe_popped += 1; | ||
| 808 | if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK) | ||
| 809 | ocrdma_process_acqe(dev, cqe); | ||
| 810 | else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) | ||
| 811 | ocrdma_process_mcqe(dev, cqe); | ||
| 812 | else | ||
| 813 | ocrdma_err("%s() cqe->compl is not set.\n", __func__); | ||
| 814 | memset(cqe, 0, sizeof(struct ocrdma_mcqe)); | ||
| 815 | ocrdma_mcq_inc_tail(dev); | ||
| 816 | } | ||
| 817 | ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped); | ||
| 818 | return 0; | ||
| 819 | } | ||
| 820 | |||
| 821 | static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, | ||
| 822 | struct ocrdma_cq *cq) | ||
| 823 | { | ||
| 824 | unsigned long flags; | ||
| 825 | struct ocrdma_qp *qp; | ||
| 826 | bool buddy_cq_found = false; | ||
| 827 | /* Go through list of QPs in error state which are using this CQ | ||
| 828 | * and invoke its callback handler to trigger CQE processing for | ||
| 829 | * error/flushed CQE. It is rare to find more than few entries in | ||
| 830 | * this list as most consumers stops after getting error CQE. | ||
| 831 | * List is traversed only once when a matching buddy cq found for a QP. | ||
| 832 | */ | ||
| 833 | spin_lock_irqsave(&dev->flush_q_lock, flags); | ||
| 834 | list_for_each_entry(qp, &cq->sq_head, sq_entry) { | ||
| 835 | if (qp->srq) | ||
| 836 | continue; | ||
| 837 | /* if wq and rq share the same cq, than comp_handler | ||
| 838 | * is already invoked. | ||
| 839 | */ | ||
| 840 | if (qp->sq_cq == qp->rq_cq) | ||
| 841 | continue; | ||
| 842 | /* if completion came on sq, rq's cq is buddy cq. | ||
| 843 | * if completion came on rq, sq's cq is buddy cq. | ||
| 844 | */ | ||
| 845 | if (qp->sq_cq == cq) | ||
| 846 | cq = qp->rq_cq; | ||
| 847 | else | ||
| 848 | cq = qp->sq_cq; | ||
| 849 | buddy_cq_found = true; | ||
| 850 | break; | ||
| 851 | } | ||
| 852 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | ||
| 853 | if (buddy_cq_found == false) | ||
| 854 | return; | ||
| 855 | if (cq->ibcq.comp_handler) { | ||
| 856 | spin_lock_irqsave(&cq->comp_handler_lock, flags); | ||
| 857 | (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); | ||
| 858 | spin_unlock_irqrestore(&cq->comp_handler_lock, flags); | ||
| 859 | } | ||
| 860 | } | ||
| 861 | |||
| 862 | static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx) | ||
| 863 | { | ||
| 864 | unsigned long flags; | ||
| 865 | struct ocrdma_cq *cq; | ||
| 866 | |||
| 867 | if (cq_idx >= OCRDMA_MAX_CQ) | ||
| 868 | BUG(); | ||
| 869 | |||
| 870 | cq = dev->cq_tbl[cq_idx]; | ||
| 871 | if (cq == NULL) { | ||
| 872 | ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); | ||
| 873 | return; | ||
| 874 | } | ||
| 875 | spin_lock_irqsave(&cq->cq_lock, flags); | ||
| 876 | cq->armed = false; | ||
| 877 | cq->solicited = false; | ||
| 878 | spin_unlock_irqrestore(&cq->cq_lock, flags); | ||
| 879 | |||
| 880 | ocrdma_ring_cq_db(dev, cq->id, false, false, 0); | ||
| 881 | |||
| 882 | if (cq->ibcq.comp_handler) { | ||
| 883 | spin_lock_irqsave(&cq->comp_handler_lock, flags); | ||
| 884 | (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); | ||
| 885 | spin_unlock_irqrestore(&cq->comp_handler_lock, flags); | ||
| 886 | } | ||
| 887 | ocrdma_qp_buddy_cq_handler(dev, cq); | ||
| 888 | } | ||
| 889 | |||
| 890 | static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id) | ||
| 891 | { | ||
| 892 | /* process the MQ-CQE. */ | ||
| 893 | if (cq_id == dev->mq.cq.id) | ||
| 894 | ocrdma_mq_cq_handler(dev, cq_id); | ||
| 895 | else | ||
| 896 | ocrdma_qp_cq_handler(dev, cq_id); | ||
| 897 | } | ||
| 898 | |||
| 899 | static irqreturn_t ocrdma_irq_handler(int irq, void *handle) | ||
| 900 | { | ||
| 901 | struct ocrdma_eq *eq = handle; | ||
| 902 | struct ocrdma_dev *dev = eq->dev; | ||
| 903 | struct ocrdma_eqe eqe; | ||
| 904 | struct ocrdma_eqe *ptr; | ||
| 905 | u16 eqe_popped = 0; | ||
| 906 | u16 cq_id; | ||
| 907 | while (1) { | ||
| 908 | ptr = ocrdma_get_eqe(eq); | ||
| 909 | eqe = *ptr; | ||
| 910 | ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); | ||
| 911 | if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) | ||
| 912 | break; | ||
| 913 | eqe_popped += 1; | ||
| 914 | ptr->id_valid = 0; | ||
| 915 | /* check whether its CQE or not. */ | ||
| 916 | if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { | ||
| 917 | cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; | ||
| 918 | ocrdma_cq_handler(dev, cq_id); | ||
| 919 | } | ||
| 920 | ocrdma_eq_inc_tail(eq); | ||
| 921 | } | ||
| 922 | ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped); | ||
| 923 | /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */ | ||
| 924 | if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) | ||
| 925 | ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); | ||
| 926 | return IRQ_HANDLED; | ||
| 927 | } | ||
| 928 | |||
| 929 | static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd) | ||
| 930 | { | ||
| 931 | struct ocrdma_mqe *mqe; | ||
| 932 | |||
| 933 | dev->mqe_ctx.tag = dev->mq.sq.head; | ||
| 934 | dev->mqe_ctx.cmd_done = false; | ||
| 935 | mqe = ocrdma_get_mqe(dev); | ||
| 936 | cmd->hdr.tag_lo = dev->mq.sq.head; | ||
| 937 | ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe)); | ||
| 938 | /* make sure descriptor is written before ringing doorbell */ | ||
| 939 | wmb(); | ||
| 940 | ocrdma_mq_inc_head(dev); | ||
| 941 | ocrdma_ring_mq_db(dev); | ||
| 942 | } | ||
| 943 | |||
| 944 | static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev) | ||
| 945 | { | ||
| 946 | long status; | ||
| 947 | /* 30 sec timeout */ | ||
| 948 | status = wait_event_timeout(dev->mqe_ctx.cmd_wait, | ||
| 949 | (dev->mqe_ctx.cmd_done != false), | ||
| 950 | msecs_to_jiffies(30000)); | ||
| 951 | if (status) | ||
| 952 | return 0; | ||
| 953 | else | ||
| 954 | return -1; | ||
| 955 | } | ||
| 956 | |||
| 957 | /* issue a mailbox command on the MQ */ | ||
| 958 | static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) | ||
| 959 | { | ||
| 960 | int status = 0; | ||
| 961 | u16 cqe_status, ext_status; | ||
| 962 | struct ocrdma_mqe *rsp; | ||
| 963 | |||
| 964 | mutex_lock(&dev->mqe_ctx.lock); | ||
| 965 | ocrdma_post_mqe(dev, mqe); | ||
| 966 | status = ocrdma_wait_mqe_cmpl(dev); | ||
| 967 | if (status) | ||
| 968 | goto mbx_err; | ||
| 969 | cqe_status = dev->mqe_ctx.cqe_status; | ||
| 970 | ext_status = dev->mqe_ctx.ext_status; | ||
| 971 | rsp = ocrdma_get_mqe_rsp(dev); | ||
| 972 | ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); | ||
| 973 | if (cqe_status || ext_status) { | ||
| 974 | ocrdma_err | ||
| 975 | ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", | ||
| 976 | __func__, | ||
| 977 | (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> | ||
| 978 | OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status); | ||
| 979 | status = ocrdma_get_mbx_cqe_errno(cqe_status); | ||
| 980 | goto mbx_err; | ||
| 981 | } | ||
| 982 | if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK) | ||
| 983 | status = ocrdma_get_mbx_errno(mqe->u.rsp.status); | ||
| 984 | mbx_err: | ||
| 985 | mutex_unlock(&dev->mqe_ctx.lock); | ||
| 986 | return status; | ||
| 987 | } | ||
| 988 | |||
| 989 | static void ocrdma_get_attr(struct ocrdma_dev *dev, | ||
| 990 | struct ocrdma_dev_attr *attr, | ||
| 991 | struct ocrdma_mbx_query_config *rsp) | ||
| 992 | { | ||
| 993 | int max_q_mem; | ||
| 994 | |||
| 995 | attr->max_pd = | ||
| 996 | (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> | ||
| 997 | OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; | ||
| 998 | attr->max_qp = | ||
| 999 | (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> | ||
| 1000 | OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; | ||
| 1001 | attr->max_send_sge = ((rsp->max_write_send_sge & | ||
| 1002 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | ||
| 1003 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); | ||
| 1004 | attr->max_recv_sge = (rsp->max_write_send_sge & | ||
| 1005 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | ||
| 1006 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; | ||
| 1007 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & | ||
| 1008 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> | ||
| 1009 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; | ||
| 1010 | attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp & | ||
| 1011 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >> | ||
| 1012 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT; | ||
| 1013 | attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord & | ||
| 1014 | OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >> | ||
| 1015 | OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT; | ||
| 1016 | attr->srq_supported = (rsp->qp_srq_cq_ird_ord & | ||
| 1017 | OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >> | ||
| 1018 | OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT; | ||
| 1019 | attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay & | ||
| 1020 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >> | ||
| 1021 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; | ||
| 1022 | attr->max_mr = rsp->max_mr; | ||
| 1023 | attr->max_mr_size = ~0ull; | ||
| 1024 | attr->max_fmr = 0; | ||
| 1025 | attr->max_pages_per_frmr = rsp->max_pages_per_frmr; | ||
| 1026 | attr->max_num_mr_pbl = rsp->max_num_mr_pbl; | ||
| 1027 | attr->max_cqe = rsp->max_cq_cqes_per_cq & | ||
| 1028 | OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK; | ||
| 1029 | attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs & | ||
| 1030 | OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >> | ||
| 1031 | OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) * | ||
| 1032 | OCRDMA_WQE_STRIDE; | ||
| 1033 | attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs & | ||
| 1034 | OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >> | ||
| 1035 | OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) * | ||
| 1036 | OCRDMA_WQE_STRIDE; | ||
| 1037 | attr->max_inline_data = | ||
| 1038 | attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + | ||
| 1039 | sizeof(struct ocrdma_sge)); | ||
| 1040 | max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1); | ||
| 1041 | /* hw can queue one less then the configured size, | ||
| 1042 | * so publish less by one to stack. | ||
| 1043 | */ | ||
| 1044 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 1045 | dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size; | ||
| 1046 | attr->ird = 1; | ||
| 1047 | attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; | ||
| 1048 | attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; | ||
| 1049 | } else | ||
| 1050 | dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1; | ||
| 1051 | dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | static int ocrdma_check_fw_config(struct ocrdma_dev *dev, | ||
| 1055 | struct ocrdma_fw_conf_rsp *conf) | ||
| 1056 | { | ||
| 1057 | u32 fn_mode; | ||
| 1058 | |||
| 1059 | fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA; | ||
| 1060 | if (fn_mode != OCRDMA_FN_MODE_RDMA) | ||
| 1061 | return -EINVAL; | ||
| 1062 | dev->base_eqid = conf->base_eqid; | ||
| 1063 | dev->max_eq = conf->max_eq; | ||
| 1064 | dev->attr.max_cq = OCRDMA_MAX_CQ - 1; | ||
| 1065 | return 0; | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | /* can be issued only during init time. */ | ||
| 1069 | static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev) | ||
| 1070 | { | ||
| 1071 | int status = -ENOMEM; | ||
| 1072 | struct ocrdma_mqe *cmd; | ||
| 1073 | struct ocrdma_fw_ver_rsp *rsp; | ||
| 1074 | |||
| 1075 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd)); | ||
| 1076 | if (!cmd) | ||
| 1077 | return -ENOMEM; | ||
| 1078 | ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], | ||
| 1079 | OCRDMA_CMD_GET_FW_VER, | ||
| 1080 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 1081 | |||
| 1082 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1083 | if (status) | ||
| 1084 | goto mbx_err; | ||
| 1085 | rsp = (struct ocrdma_fw_ver_rsp *)cmd; | ||
| 1086 | memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver)); | ||
| 1087 | memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0], | ||
| 1088 | sizeof(rsp->running_ver)); | ||
| 1089 | ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver)); | ||
| 1090 | mbx_err: | ||
| 1091 | kfree(cmd); | ||
| 1092 | return status; | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | /* can be issued only during init time. */ | ||
| 1096 | static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev) | ||
| 1097 | { | ||
| 1098 | int status = -ENOMEM; | ||
| 1099 | struct ocrdma_mqe *cmd; | ||
| 1100 | struct ocrdma_fw_conf_rsp *rsp; | ||
| 1101 | |||
| 1102 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd)); | ||
| 1103 | if (!cmd) | ||
| 1104 | return -ENOMEM; | ||
| 1105 | ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], | ||
| 1106 | OCRDMA_CMD_GET_FW_CONFIG, | ||
| 1107 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 1108 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1109 | if (status) | ||
| 1110 | goto mbx_err; | ||
| 1111 | rsp = (struct ocrdma_fw_conf_rsp *)cmd; | ||
| 1112 | status = ocrdma_check_fw_config(dev, rsp); | ||
| 1113 | mbx_err: | ||
| 1114 | kfree(cmd); | ||
| 1115 | return status; | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) | ||
| 1119 | { | ||
| 1120 | int status = -ENOMEM; | ||
| 1121 | struct ocrdma_mbx_query_config *rsp; | ||
| 1122 | struct ocrdma_mqe *cmd; | ||
| 1123 | |||
| 1124 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd)); | ||
| 1125 | if (!cmd) | ||
| 1126 | return status; | ||
| 1127 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1128 | if (status) | ||
| 1129 | goto mbx_err; | ||
| 1130 | rsp = (struct ocrdma_mbx_query_config *)cmd; | ||
| 1131 | ocrdma_get_attr(dev, &dev->attr, rsp); | ||
| 1132 | mbx_err: | ||
| 1133 | kfree(cmd); | ||
| 1134 | return status; | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) | ||
| 1138 | { | ||
| 1139 | int status = -ENOMEM; | ||
| 1140 | struct ocrdma_alloc_pd *cmd; | ||
| 1141 | struct ocrdma_alloc_pd_rsp *rsp; | ||
| 1142 | |||
| 1143 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd)); | ||
| 1144 | if (!cmd) | ||
| 1145 | return status; | ||
| 1146 | if (pd->dpp_enabled) | ||
| 1147 | cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; | ||
| 1148 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1149 | if (status) | ||
| 1150 | goto mbx_err; | ||
| 1151 | rsp = (struct ocrdma_alloc_pd_rsp *)cmd; | ||
| 1152 | pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK; | ||
| 1153 | if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) { | ||
| 1154 | pd->dpp_enabled = true; | ||
| 1155 | pd->dpp_page = rsp->dpp_page_pdid >> | ||
| 1156 | OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; | ||
| 1157 | } else { | ||
| 1158 | pd->dpp_enabled = false; | ||
| 1159 | pd->num_dpp_qp = 0; | ||
| 1160 | } | ||
| 1161 | mbx_err: | ||
| 1162 | kfree(cmd); | ||
| 1163 | return status; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) | ||
| 1167 | { | ||
| 1168 | int status = -ENOMEM; | ||
| 1169 | struct ocrdma_dealloc_pd *cmd; | ||
| 1170 | |||
| 1171 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd)); | ||
| 1172 | if (!cmd) | ||
| 1173 | return status; | ||
| 1174 | cmd->id = pd->id; | ||
| 1175 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1176 | kfree(cmd); | ||
| 1177 | return status; | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, | ||
| 1181 | int *num_pages, int *page_size) | ||
| 1182 | { | ||
| 1183 | int i; | ||
| 1184 | int mem_size; | ||
| 1185 | |||
| 1186 | *num_entries = roundup_pow_of_two(*num_entries); | ||
| 1187 | mem_size = *num_entries * entry_size; | ||
| 1188 | /* find the possible lowest possible multiplier */ | ||
| 1189 | for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) { | ||
| 1190 | if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i)) | ||
| 1191 | break; | ||
| 1192 | } | ||
| 1193 | if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT) | ||
| 1194 | return -EINVAL; | ||
| 1195 | mem_size = roundup(mem_size, | ||
| 1196 | ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES)); | ||
| 1197 | *num_pages = | ||
| 1198 | mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES); | ||
| 1199 | *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES); | ||
| 1200 | *num_entries = mem_size / entry_size; | ||
| 1201 | return 0; | ||
| 1202 | } | ||
| 1203 | |||
| 1204 | static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) | ||
| 1205 | { | ||
| 1206 | int i ; | ||
| 1207 | int status = 0; | ||
| 1208 | int max_ah; | ||
| 1209 | struct ocrdma_create_ah_tbl *cmd; | ||
| 1210 | struct ocrdma_create_ah_tbl_rsp *rsp; | ||
| 1211 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 1212 | dma_addr_t pa; | ||
| 1213 | struct ocrdma_pbe *pbes; | ||
| 1214 | |||
| 1215 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd)); | ||
| 1216 | if (!cmd) | ||
| 1217 | return status; | ||
| 1218 | |||
| 1219 | max_ah = OCRDMA_MAX_AH; | ||
| 1220 | dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah; | ||
| 1221 | |||
| 1222 | /* number of PBEs in PBL */ | ||
| 1223 | cmd->ah_conf = (OCRDMA_AH_TBL_PAGES << | ||
| 1224 | OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) & | ||
| 1225 | OCRDMA_CREATE_AH_NUM_PAGES_MASK; | ||
| 1226 | |||
| 1227 | /* page size */ | ||
| 1228 | for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) { | ||
| 1229 | if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i)) | ||
| 1230 | break; | ||
| 1231 | } | ||
| 1232 | cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) & | ||
| 1233 | OCRDMA_CREATE_AH_PAGE_SIZE_MASK; | ||
| 1234 | |||
| 1235 | /* ah_entry size */ | ||
| 1236 | cmd->ah_conf |= (sizeof(struct ocrdma_av) << | ||
| 1237 | OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) & | ||
| 1238 | OCRDMA_CREATE_AH_ENTRY_SIZE_MASK; | ||
| 1239 | |||
| 1240 | dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, | ||
| 1241 | &dev->av_tbl.pbl.pa, | ||
| 1242 | GFP_KERNEL); | ||
| 1243 | if (dev->av_tbl.pbl.va == NULL) | ||
| 1244 | goto mem_err; | ||
| 1245 | |||
| 1246 | dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size, | ||
| 1247 | &pa, GFP_KERNEL); | ||
| 1248 | if (dev->av_tbl.va == NULL) | ||
| 1249 | goto mem_err_ah; | ||
| 1250 | dev->av_tbl.pa = pa; | ||
| 1251 | dev->av_tbl.num_ah = max_ah; | ||
| 1252 | memset(dev->av_tbl.va, 0, dev->av_tbl.size); | ||
| 1253 | |||
| 1254 | pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va; | ||
| 1255 | for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) { | ||
| 1256 | pbes[i].pa_lo = (u32) (pa & 0xffffffff); | ||
| 1257 | pbes[i].pa_hi = (u32) upper_32_bits(pa); | ||
| 1258 | pa += PAGE_SIZE; | ||
| 1259 | } | ||
| 1260 | cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF); | ||
| 1261 | cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa); | ||
| 1262 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1263 | if (status) | ||
| 1264 | goto mbx_err; | ||
| 1265 | rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd; | ||
| 1266 | dev->av_tbl.ahid = rsp->ahid & 0xFFFF; | ||
| 1267 | kfree(cmd); | ||
| 1268 | return 0; | ||
| 1269 | |||
| 1270 | mbx_err: | ||
| 1271 | dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, | ||
| 1272 | dev->av_tbl.pa); | ||
| 1273 | dev->av_tbl.va = NULL; | ||
| 1274 | mem_err_ah: | ||
| 1275 | dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, | ||
| 1276 | dev->av_tbl.pbl.pa); | ||
| 1277 | dev->av_tbl.pbl.va = NULL; | ||
| 1278 | dev->av_tbl.size = 0; | ||
| 1279 | mem_err: | ||
| 1280 | kfree(cmd); | ||
| 1281 | return status; | ||
| 1282 | } | ||
| 1283 | |||
| 1284 | static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev) | ||
| 1285 | { | ||
| 1286 | struct ocrdma_delete_ah_tbl *cmd; | ||
| 1287 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 1288 | |||
| 1289 | if (dev->av_tbl.va == NULL) | ||
| 1290 | return; | ||
| 1291 | |||
| 1292 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd)); | ||
| 1293 | if (!cmd) | ||
| 1294 | return; | ||
| 1295 | cmd->ahid = dev->av_tbl.ahid; | ||
| 1296 | |||
| 1297 | ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1298 | dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, | ||
| 1299 | dev->av_tbl.pa); | ||
| 1300 | dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, | ||
| 1301 | dev->av_tbl.pbl.pa); | ||
| 1302 | kfree(cmd); | ||
| 1303 | } | ||
| 1304 | |||
| 1305 | /* Multiple CQs uses the EQ. This routine returns least used | ||
| 1306 | * EQ to associate with CQ. This will distributes the interrupt | ||
| 1307 | * processing and CPU load to associated EQ, vector and so to that CPU. | ||
| 1308 | */ | ||
| 1309 | static u16 ocrdma_bind_eq(struct ocrdma_dev *dev) | ||
| 1310 | { | ||
| 1311 | int i, selected_eq = 0, cq_cnt = 0; | ||
| 1312 | u16 eq_id; | ||
| 1313 | |||
| 1314 | mutex_lock(&dev->dev_lock); | ||
| 1315 | cq_cnt = dev->qp_eq_tbl[0].cq_cnt; | ||
| 1316 | eq_id = dev->qp_eq_tbl[0].q.id; | ||
| 1317 | /* find the EQ which is has the least number of | ||
| 1318 | * CQs associated with it. | ||
| 1319 | */ | ||
| 1320 | for (i = 0; i < dev->eq_cnt; i++) { | ||
| 1321 | if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) { | ||
| 1322 | cq_cnt = dev->qp_eq_tbl[i].cq_cnt; | ||
| 1323 | eq_id = dev->qp_eq_tbl[i].q.id; | ||
| 1324 | selected_eq = i; | ||
| 1325 | } | ||
| 1326 | } | ||
| 1327 | dev->qp_eq_tbl[selected_eq].cq_cnt += 1; | ||
| 1328 | mutex_unlock(&dev->dev_lock); | ||
| 1329 | return eq_id; | ||
| 1330 | } | ||
| 1331 | |||
| 1332 | static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id) | ||
| 1333 | { | ||
| 1334 | int i; | ||
| 1335 | |||
| 1336 | mutex_lock(&dev->dev_lock); | ||
| 1337 | for (i = 0; i < dev->eq_cnt; i++) { | ||
| 1338 | if (dev->qp_eq_tbl[i].q.id != eq_id) | ||
| 1339 | continue; | ||
| 1340 | dev->qp_eq_tbl[i].cq_cnt -= 1; | ||
| 1341 | break; | ||
| 1342 | } | ||
| 1343 | mutex_unlock(&dev->dev_lock); | ||
| 1344 | } | ||
| 1345 | |||
| 1346 | int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | ||
| 1347 | int entries, int dpp_cq) | ||
| 1348 | { | ||
| 1349 | int status = -ENOMEM; int max_hw_cqe; | ||
| 1350 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 1351 | struct ocrdma_create_cq *cmd; | ||
| 1352 | struct ocrdma_create_cq_rsp *rsp; | ||
| 1353 | u32 hw_pages, cqe_size, page_size, cqe_count; | ||
| 1354 | |||
| 1355 | if (dpp_cq) | ||
| 1356 | return -EINVAL; | ||
| 1357 | if (entries > dev->attr.max_cqe) { | ||
| 1358 | ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", | ||
| 1359 | __func__, dev->id, dev->attr.max_cqe, entries); | ||
| 1360 | return -EINVAL; | ||
| 1361 | } | ||
| 1362 | if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) | ||
| 1363 | return -EINVAL; | ||
| 1364 | |||
| 1365 | if (dpp_cq) { | ||
| 1366 | cq->max_hw_cqe = 1; | ||
| 1367 | max_hw_cqe = 1; | ||
| 1368 | cqe_size = OCRDMA_DPP_CQE_SIZE; | ||
| 1369 | hw_pages = 1; | ||
| 1370 | } else { | ||
| 1371 | cq->max_hw_cqe = dev->attr.max_cqe; | ||
| 1372 | max_hw_cqe = dev->attr.max_cqe; | ||
| 1373 | cqe_size = sizeof(struct ocrdma_cqe); | ||
| 1374 | hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES; | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE); | ||
| 1378 | |||
| 1379 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd)); | ||
| 1380 | if (!cmd) | ||
| 1381 | return -ENOMEM; | ||
| 1382 | ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, | ||
| 1383 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 1384 | cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); | ||
| 1385 | if (!cq->va) { | ||
| 1386 | status = -ENOMEM; | ||
| 1387 | goto mem_err; | ||
| 1388 | } | ||
| 1389 | memset(cq->va, 0, cq->len); | ||
| 1390 | page_size = cq->len / hw_pages; | ||
| 1391 | cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) << | ||
| 1392 | OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT; | ||
| 1393 | cmd->cmd.pgsz_pgcnt |= hw_pages; | ||
| 1394 | cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; | ||
| 1395 | |||
| 1396 | if (dev->eq_cnt < 0) | ||
| 1397 | goto eq_err; | ||
| 1398 | cq->eqn = ocrdma_bind_eq(dev); | ||
| 1399 | cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2; | ||
| 1400 | cqe_count = cq->len / cqe_size; | ||
| 1401 | if (cqe_count > 1024) | ||
| 1402 | /* Set cnt to 3 to indicate more than 1024 cq entries */ | ||
| 1403 | cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT); | ||
| 1404 | else { | ||
| 1405 | u8 count = 0; | ||
| 1406 | switch (cqe_count) { | ||
| 1407 | case 256: | ||
| 1408 | count = 0; | ||
| 1409 | break; | ||
| 1410 | case 512: | ||
| 1411 | count = 1; | ||
| 1412 | break; | ||
| 1413 | case 1024: | ||
| 1414 | count = 2; | ||
| 1415 | break; | ||
| 1416 | default: | ||
| 1417 | goto mbx_err; | ||
| 1418 | } | ||
| 1419 | cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT); | ||
| 1420 | } | ||
| 1421 | /* shared eq between all the consumer cqs. */ | ||
| 1422 | cmd->cmd.eqn = cq->eqn; | ||
| 1423 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 1424 | if (dpp_cq) | ||
| 1425 | cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << | ||
| 1426 | OCRDMA_CREATE_CQ_TYPE_SHIFT; | ||
| 1427 | cq->phase_change = false; | ||
| 1428 | cmd->cmd.cqe_count = (cq->len / cqe_size); | ||
| 1429 | } else { | ||
| 1430 | cmd->cmd.cqe_count = (cq->len / cqe_size) - 1; | ||
| 1431 | cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID; | ||
| 1432 | cq->phase_change = true; | ||
| 1433 | } | ||
| 1434 | |||
| 1435 | ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); | ||
| 1436 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1437 | if (status) | ||
| 1438 | goto mbx_err; | ||
| 1439 | |||
| 1440 | rsp = (struct ocrdma_create_cq_rsp *)cmd; | ||
| 1441 | cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); | ||
| 1442 | kfree(cmd); | ||
| 1443 | return 0; | ||
| 1444 | mbx_err: | ||
| 1445 | ocrdma_unbind_eq(dev, cq->eqn); | ||
| 1446 | eq_err: | ||
| 1447 | dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa); | ||
| 1448 | mem_err: | ||
| 1449 | kfree(cmd); | ||
| 1450 | return status; | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq) | ||
| 1454 | { | ||
| 1455 | int status = -ENOMEM; | ||
| 1456 | struct ocrdma_destroy_cq *cmd; | ||
| 1457 | |||
| 1458 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd)); | ||
| 1459 | if (!cmd) | ||
| 1460 | return status; | ||
| 1461 | ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ, | ||
| 1462 | OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||
| 1463 | |||
| 1464 | cmd->bypass_flush_qid |= | ||
| 1465 | (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) & | ||
| 1466 | OCRDMA_DESTROY_CQ_QID_MASK; | ||
| 1467 | |||
| 1468 | ocrdma_unbind_eq(dev, cq->eqn); | ||
| 1469 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1470 | if (status) | ||
| 1471 | goto mbx_err; | ||
| 1472 | dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa); | ||
| 1473 | mbx_err: | ||
| 1474 | kfree(cmd); | ||
| 1475 | return status; | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr, | ||
| 1479 | u32 pdid, int addr_check) | ||
| 1480 | { | ||
| 1481 | int status = -ENOMEM; | ||
| 1482 | struct ocrdma_alloc_lkey *cmd; | ||
| 1483 | struct ocrdma_alloc_lkey_rsp *rsp; | ||
| 1484 | |||
| 1485 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd)); | ||
| 1486 | if (!cmd) | ||
| 1487 | return status; | ||
| 1488 | cmd->pdid = pdid; | ||
| 1489 | cmd->pbl_sz_flags |= addr_check; | ||
| 1490 | cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT); | ||
| 1491 | cmd->pbl_sz_flags |= | ||
| 1492 | (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT); | ||
| 1493 | cmd->pbl_sz_flags |= | ||
| 1494 | (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT); | ||
| 1495 | cmd->pbl_sz_flags |= | ||
| 1496 | (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT); | ||
| 1497 | cmd->pbl_sz_flags |= | ||
| 1498 | (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT); | ||
| 1499 | cmd->pbl_sz_flags |= | ||
| 1500 | (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT); | ||
| 1501 | |||
| 1502 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1503 | if (status) | ||
| 1504 | goto mbx_err; | ||
| 1505 | rsp = (struct ocrdma_alloc_lkey_rsp *)cmd; | ||
| 1506 | hwmr->lkey = rsp->lrkey; | ||
| 1507 | mbx_err: | ||
| 1508 | kfree(cmd); | ||
| 1509 | return status; | ||
| 1510 | } | ||
| 1511 | |||
| 1512 | int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey) | ||
| 1513 | { | ||
| 1514 | int status = -ENOMEM; | ||
| 1515 | struct ocrdma_dealloc_lkey *cmd; | ||
| 1516 | |||
| 1517 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd)); | ||
| 1518 | if (!cmd) | ||
| 1519 | return -ENOMEM; | ||
| 1520 | cmd->lkey = lkey; | ||
| 1521 | cmd->rsvd_frmr = fr_mr ? 1 : 0; | ||
| 1522 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1523 | if (status) | ||
| 1524 | goto mbx_err; | ||
| 1525 | mbx_err: | ||
| 1526 | kfree(cmd); | ||
| 1527 | return status; | ||
| 1528 | } | ||
| 1529 | |||
| 1530 | static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr, | ||
| 1531 | u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last) | ||
| 1532 | { | ||
| 1533 | int status = -ENOMEM; | ||
| 1534 | int i; | ||
| 1535 | struct ocrdma_reg_nsmr *cmd; | ||
| 1536 | struct ocrdma_reg_nsmr_rsp *rsp; | ||
| 1537 | |||
| 1538 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd)); | ||
| 1539 | if (!cmd) | ||
| 1540 | return -ENOMEM; | ||
| 1541 | cmd->num_pbl_pdid = | ||
| 1542 | pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT); | ||
| 1543 | |||
| 1544 | cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr << | ||
| 1545 | OCRDMA_REG_NSMR_REMOTE_WR_SHIFT); | ||
| 1546 | cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd << | ||
| 1547 | OCRDMA_REG_NSMR_REMOTE_RD_SHIFT); | ||
| 1548 | cmd->flags_hpage_pbe_sz |= (hwmr->local_wr << | ||
| 1549 | OCRDMA_REG_NSMR_LOCAL_WR_SHIFT); | ||
| 1550 | cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic << | ||
| 1551 | OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT); | ||
| 1552 | cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind << | ||
| 1553 | OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT); | ||
| 1554 | cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT); | ||
| 1555 | |||
| 1556 | cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE); | ||
| 1557 | cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) << | ||
| 1558 | OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT; | ||
| 1559 | cmd->totlen_low = hwmr->len; | ||
| 1560 | cmd->totlen_high = upper_32_bits(hwmr->len); | ||
| 1561 | cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff); | ||
| 1562 | cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo); | ||
| 1563 | cmd->va_loaddr = (u32) hwmr->va; | ||
| 1564 | cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va); | ||
| 1565 | |||
| 1566 | for (i = 0; i < pbl_cnt; i++) { | ||
| 1567 | cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff); | ||
| 1568 | cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa); | ||
| 1569 | } | ||
| 1570 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1571 | if (status) | ||
| 1572 | goto mbx_err; | ||
| 1573 | rsp = (struct ocrdma_reg_nsmr_rsp *)cmd; | ||
| 1574 | hwmr->lkey = rsp->lrkey; | ||
| 1575 | mbx_err: | ||
| 1576 | kfree(cmd); | ||
| 1577 | return status; | ||
| 1578 | } | ||
| 1579 | |||
| 1580 | static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev, | ||
| 1581 | struct ocrdma_hw_mr *hwmr, u32 pbl_cnt, | ||
| 1582 | u32 pbl_offset, u32 last) | ||
| 1583 | { | ||
| 1584 | int status = -ENOMEM; | ||
| 1585 | int i; | ||
| 1586 | struct ocrdma_reg_nsmr_cont *cmd; | ||
| 1587 | |||
| 1588 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd)); | ||
| 1589 | if (!cmd) | ||
| 1590 | return -ENOMEM; | ||
| 1591 | cmd->lrkey = hwmr->lkey; | ||
| 1592 | cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) | | ||
| 1593 | (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK); | ||
| 1594 | cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT; | ||
| 1595 | |||
| 1596 | for (i = 0; i < pbl_cnt; i++) { | ||
| 1597 | cmd->pbl[i].lo = | ||
| 1598 | (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff); | ||
| 1599 | cmd->pbl[i].hi = | ||
| 1600 | upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa); | ||
| 1601 | } | ||
| 1602 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 1603 | if (status) | ||
| 1604 | goto mbx_err; | ||
| 1605 | mbx_err: | ||
| 1606 | kfree(cmd); | ||
| 1607 | return status; | ||
| 1608 | } | ||
| 1609 | |||
| 1610 | int ocrdma_reg_mr(struct ocrdma_dev *dev, | ||
| 1611 | struct ocrdma_hw_mr *hwmr, u32 pdid, int acc) | ||
| 1612 | { | ||
| 1613 | int status; | ||
| 1614 | u32 last = 0; | ||
| 1615 | u32 cur_pbl_cnt, pbl_offset; | ||
| 1616 | u32 pending_pbl_cnt = hwmr->num_pbls; | ||
| 1617 | |||
| 1618 | pbl_offset = 0; | ||
| 1619 | cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL); | ||
| 1620 | if (cur_pbl_cnt == pending_pbl_cnt) | ||
| 1621 | last = 1; | ||
| 1622 | |||
| 1623 | status = ocrdma_mbx_reg_mr(dev, hwmr, pdid, | ||
| 1624 | cur_pbl_cnt, hwmr->pbe_size, last); | ||
| 1625 | if (status) { | ||
| 1626 | ocrdma_err("%s() status=%d\n", __func__, status); | ||
| 1627 | return status; | ||
| 1628 | } | ||
| 1629 | /* if there is no more pbls to register then exit. */ | ||
| 1630 | if (last) | ||
| 1631 | return 0; | ||
| 1632 | |||
| 1633 | while (!last) { | ||
| 1634 | pbl_offset += cur_pbl_cnt; | ||
| 1635 | pending_pbl_cnt -= cur_pbl_cnt; | ||
| 1636 | cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL); | ||
| 1637 | /* if we reach the end of the pbls, then need to set the last | ||
| 1638 | * bit, indicating no more pbls to register for this memory key. | ||
| 1639 | */ | ||
| 1640 | if (cur_pbl_cnt == pending_pbl_cnt) | ||
| 1641 | last = 1; | ||
| 1642 | |||
| 1643 | status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt, | ||
| 1644 | pbl_offset, last); | ||
| 1645 | if (status) | ||
| 1646 | break; | ||
| 1647 | } | ||
| 1648 | if (status) | ||
| 1649 | ocrdma_err("%s() err. status=%d\n", __func__, status); | ||
| 1650 | |||
| 1651 | return status; | ||
| 1652 | } | ||
| 1653 | |||
| 1654 | bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) | ||
| 1655 | { | ||
| 1656 | struct ocrdma_qp *tmp; | ||
| 1657 | bool found = false; | ||
| 1658 | list_for_each_entry(tmp, &cq->sq_head, sq_entry) { | ||
| 1659 | if (qp == tmp) { | ||
| 1660 | found = true; | ||
| 1661 | break; | ||
| 1662 | } | ||
| 1663 | } | ||
| 1664 | return found; | ||
| 1665 | } | ||
| 1666 | |||
| 1667 | bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) | ||
| 1668 | { | ||
| 1669 | struct ocrdma_qp *tmp; | ||
| 1670 | bool found = false; | ||
| 1671 | list_for_each_entry(tmp, &cq->rq_head, rq_entry) { | ||
| 1672 | if (qp == tmp) { | ||
| 1673 | found = true; | ||
| 1674 | break; | ||
| 1675 | } | ||
| 1676 | } | ||
| 1677 | return found; | ||
| 1678 | } | ||
| 1679 | |||
| 1680 | void ocrdma_flush_qp(struct ocrdma_qp *qp) | ||
| 1681 | { | ||
| 1682 | bool found; | ||
| 1683 | unsigned long flags; | ||
| 1684 | |||
| 1685 | spin_lock_irqsave(&qp->dev->flush_q_lock, flags); | ||
| 1686 | found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); | ||
| 1687 | if (!found) | ||
| 1688 | list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); | ||
| 1689 | if (!qp->srq) { | ||
| 1690 | found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); | ||
| 1691 | if (!found) | ||
| 1692 | list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); | ||
| 1693 | } | ||
| 1694 | spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); | ||
| 1695 | } | ||
| 1696 | |||
| 1697 | int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state, | ||
| 1698 | enum ib_qp_state *old_ib_state) | ||
| 1699 | { | ||
| 1700 | unsigned long flags; | ||
| 1701 | int status = 0; | ||
| 1702 | enum ocrdma_qp_state new_state; | ||
| 1703 | new_state = get_ocrdma_qp_state(new_ib_state); | ||
| 1704 | |||
| 1705 | /* sync with wqe and rqe posting */ | ||
| 1706 | spin_lock_irqsave(&qp->q_lock, flags); | ||
| 1707 | |||
| 1708 | if (old_ib_state) | ||
| 1709 | *old_ib_state = get_ibqp_state(qp->state); | ||
| 1710 | if (new_state == qp->state) { | ||
| 1711 | spin_unlock_irqrestore(&qp->q_lock, flags); | ||
| 1712 | return 1; | ||
| 1713 | } | ||
| 1714 | |||
| 1715 | switch (qp->state) { | ||
| 1716 | case OCRDMA_QPS_RST: | ||
| 1717 | switch (new_state) { | ||
| 1718 | case OCRDMA_QPS_RST: | ||
| 1719 | case OCRDMA_QPS_INIT: | ||
| 1720 | break; | ||
| 1721 | default: | ||
| 1722 | status = -EINVAL; | ||
| 1723 | break; | ||
| 1724 | }; | ||
| 1725 | break; | ||
| 1726 | case OCRDMA_QPS_INIT: | ||
| 1727 | /* qps: INIT->XXX */ | ||
| 1728 | switch (new_state) { | ||
| 1729 | case OCRDMA_QPS_INIT: | ||
| 1730 | case OCRDMA_QPS_RTR: | ||
| 1731 | break; | ||
| 1732 | case OCRDMA_QPS_ERR: | ||
| 1733 | ocrdma_flush_qp(qp); | ||
| 1734 | break; | ||
| 1735 | default: | ||
| 1736 | status = -EINVAL; | ||
| 1737 | break; | ||
| 1738 | }; | ||
| 1739 | break; | ||
| 1740 | case OCRDMA_QPS_RTR: | ||
| 1741 | /* qps: RTS->XXX */ | ||
| 1742 | switch (new_state) { | ||
| 1743 | case OCRDMA_QPS_RTS: | ||
| 1744 | break; | ||
| 1745 | case OCRDMA_QPS_ERR: | ||
| 1746 | ocrdma_flush_qp(qp); | ||
| 1747 | break; | ||
| 1748 | default: | ||
| 1749 | status = -EINVAL; | ||
| 1750 | break; | ||
| 1751 | }; | ||
| 1752 | break; | ||
| 1753 | case OCRDMA_QPS_RTS: | ||
| 1754 | /* qps: RTS->XXX */ | ||
| 1755 | switch (new_state) { | ||
| 1756 | case OCRDMA_QPS_SQD: | ||
| 1757 | case OCRDMA_QPS_SQE: | ||
| 1758 | break; | ||
| 1759 | case OCRDMA_QPS_ERR: | ||
| 1760 | ocrdma_flush_qp(qp); | ||
| 1761 | break; | ||
| 1762 | default: | ||
| 1763 | status = -EINVAL; | ||
| 1764 | break; | ||
| 1765 | }; | ||
| 1766 | break; | ||
| 1767 | case OCRDMA_QPS_SQD: | ||
| 1768 | /* qps: SQD->XXX */ | ||
| 1769 | switch (new_state) { | ||
| 1770 | case OCRDMA_QPS_RTS: | ||
| 1771 | case OCRDMA_QPS_SQE: | ||
| 1772 | case OCRDMA_QPS_ERR: | ||
| 1773 | break; | ||
| 1774 | default: | ||
| 1775 | status = -EINVAL; | ||
| 1776 | break; | ||
| 1777 | }; | ||
| 1778 | break; | ||
| 1779 | case OCRDMA_QPS_SQE: | ||
| 1780 | switch (new_state) { | ||
| 1781 | case OCRDMA_QPS_RTS: | ||
| 1782 | case OCRDMA_QPS_ERR: | ||
| 1783 | break; | ||
| 1784 | default: | ||
| 1785 | status = -EINVAL; | ||
| 1786 | break; | ||
| 1787 | }; | ||
| 1788 | break; | ||
| 1789 | case OCRDMA_QPS_ERR: | ||
| 1790 | /* qps: ERR->XXX */ | ||
| 1791 | switch (new_state) { | ||
| 1792 | case OCRDMA_QPS_RST: | ||
| 1793 | break; | ||
| 1794 | default: | ||
| 1795 | status = -EINVAL; | ||
| 1796 | break; | ||
| 1797 | }; | ||
| 1798 | break; | ||
| 1799 | default: | ||
| 1800 | status = -EINVAL; | ||
| 1801 | break; | ||
| 1802 | }; | ||
| 1803 | if (!status) | ||
| 1804 | qp->state = new_state; | ||
| 1805 | |||
| 1806 | spin_unlock_irqrestore(&qp->q_lock, flags); | ||
| 1807 | return status; | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp) | ||
| 1811 | { | ||
| 1812 | u32 flags = 0; | ||
| 1813 | if (qp->cap_flags & OCRDMA_QP_INB_RD) | ||
| 1814 | flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK; | ||
| 1815 | if (qp->cap_flags & OCRDMA_QP_INB_WR) | ||
| 1816 | flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK; | ||
| 1817 | if (qp->cap_flags & OCRDMA_QP_MW_BIND) | ||
| 1818 | flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK; | ||
| 1819 | if (qp->cap_flags & OCRDMA_QP_LKEY0) | ||
| 1820 | flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK; | ||
| 1821 | if (qp->cap_flags & OCRDMA_QP_FAST_REG) | ||
| 1822 | flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK; | ||
| 1823 | return flags; | ||
| 1824 | } | ||
| 1825 | |||
| 1826 | static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, | ||
| 1827 | struct ib_qp_init_attr *attrs, | ||
| 1828 | struct ocrdma_qp *qp) | ||
| 1829 | { | ||
| 1830 | int status; | ||
| 1831 | u32 len, hw_pages, hw_page_size; | ||
| 1832 | dma_addr_t pa; | ||
| 1833 | struct ocrdma_dev *dev = qp->dev; | ||
| 1834 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 1835 | u32 max_wqe_allocated; | ||
| 1836 | u32 max_sges = attrs->cap.max_send_sge; | ||
| 1837 | |||
| 1838 | max_wqe_allocated = attrs->cap.max_send_wr; | ||
| 1839 | /* need to allocate one extra to for GEN1 family */ | ||
| 1840 | if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY) | ||
| 1841 | max_wqe_allocated += 1; | ||
| 1842 | |||
| 1843 | status = ocrdma_build_q_conf(&max_wqe_allocated, | ||
| 1844 | dev->attr.wqe_size, &hw_pages, &hw_page_size); | ||
| 1845 | if (status) { | ||
| 1846 | ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__, | ||
| 1847 | max_wqe_allocated); | ||
| 1848 | return -EINVAL; | ||
| 1849 | } | ||
| 1850 | qp->sq.max_cnt = max_wqe_allocated; | ||
| 1851 | len = (hw_pages * hw_page_size); | ||
| 1852 | |||
| 1853 | qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | ||
| 1854 | if (!qp->sq.va) | ||
| 1855 | return -EINVAL; | ||
| 1856 | memset(qp->sq.va, 0, len); | ||
| 1857 | qp->sq.len = len; | ||
| 1858 | qp->sq.pa = pa; | ||
| 1859 | qp->sq.entry_size = dev->attr.wqe_size; | ||
| 1860 | ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size); | ||
| 1861 | |||
| 1862 | cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) | ||
| 1863 | << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT); | ||
| 1864 | cmd->num_wq_rq_pages |= (hw_pages << | ||
| 1865 | OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) & | ||
| 1866 | OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK; | ||
| 1867 | cmd->max_sge_send_write |= (max_sges << | ||
| 1868 | OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) & | ||
| 1869 | OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK; | ||
| 1870 | cmd->max_sge_send_write |= (max_sges << | ||
| 1871 | OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) & | ||
| 1872 | OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK; | ||
| 1873 | cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) << | ||
| 1874 | OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) & | ||
| 1875 | OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK; | ||
| 1876 | cmd->wqe_rqe_size |= (dev->attr.wqe_size << | ||
| 1877 | OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) & | ||
| 1878 | OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK; | ||
| 1879 | return 0; | ||
| 1880 | } | ||
| 1881 | |||
| 1882 | static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, | ||
| 1883 | struct ib_qp_init_attr *attrs, | ||
| 1884 | struct ocrdma_qp *qp) | ||
| 1885 | { | ||
| 1886 | int status; | ||
| 1887 | u32 len, hw_pages, hw_page_size; | ||
| 1888 | dma_addr_t pa = 0; | ||
| 1889 | struct ocrdma_dev *dev = qp->dev; | ||
| 1890 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 1891 | u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; | ||
| 1892 | |||
| 1893 | status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size, | ||
| 1894 | &hw_pages, &hw_page_size); | ||
| 1895 | if (status) { | ||
| 1896 | ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__, | ||
| 1897 | attrs->cap.max_recv_wr + 1); | ||
| 1898 | return status; | ||
| 1899 | } | ||
| 1900 | qp->rq.max_cnt = max_rqe_allocated; | ||
| 1901 | len = (hw_pages * hw_page_size); | ||
| 1902 | |||
| 1903 | qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | ||
| 1904 | if (!qp->rq.va) | ||
| 1905 | return status; | ||
| 1906 | memset(qp->rq.va, 0, len); | ||
| 1907 | qp->rq.pa = pa; | ||
| 1908 | qp->rq.len = len; | ||
| 1909 | qp->rq.entry_size = dev->attr.rqe_size; | ||
| 1910 | |||
| 1911 | ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size); | ||
| 1912 | cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) << | ||
| 1913 | OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT); | ||
| 1914 | cmd->num_wq_rq_pages |= | ||
| 1915 | (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) & | ||
| 1916 | OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK; | ||
| 1917 | cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge << | ||
| 1918 | OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) & | ||
| 1919 | OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK; | ||
| 1920 | cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) << | ||
| 1921 | OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) & | ||
| 1922 | OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK; | ||
| 1923 | cmd->wqe_rqe_size |= (dev->attr.rqe_size << | ||
| 1924 | OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) & | ||
| 1925 | OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK; | ||
| 1926 | return 0; | ||
| 1927 | } | ||
| 1928 | |||
| 1929 | static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd, | ||
| 1930 | struct ocrdma_pd *pd, | ||
| 1931 | struct ocrdma_qp *qp, | ||
| 1932 | u8 enable_dpp_cq, u16 dpp_cq_id) | ||
| 1933 | { | ||
| 1934 | pd->num_dpp_qp--; | ||
| 1935 | qp->dpp_enabled = true; | ||
| 1936 | cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK; | ||
| 1937 | if (!enable_dpp_cq) | ||
| 1938 | return; | ||
| 1939 | cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK; | ||
| 1940 | cmd->dpp_credits_cqid = dpp_cq_id; | ||
| 1941 | cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT << | ||
| 1942 | OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT; | ||
| 1943 | } | ||
| 1944 | |||
| 1945 | static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, | ||
| 1946 | struct ocrdma_qp *qp) | ||
| 1947 | { | ||
| 1948 | struct ocrdma_dev *dev = qp->dev; | ||
| 1949 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 1950 | dma_addr_t pa = 0; | ||
| 1951 | int ird_page_size = dev->attr.ird_page_size; | ||
| 1952 | int ird_q_len = dev->attr.num_ird_pages * ird_page_size; | ||
| 1953 | |||
| 1954 | if (dev->attr.ird == 0) | ||
| 1955 | return 0; | ||
| 1956 | |||
| 1957 | qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, | ||
| 1958 | &pa, GFP_KERNEL); | ||
| 1959 | if (!qp->ird_q_va) | ||
| 1960 | return -ENOMEM; | ||
| 1961 | memset(qp->ird_q_va, 0, ird_q_len); | ||
| 1962 | ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, | ||
| 1963 | pa, ird_page_size); | ||
| 1964 | return 0; | ||
| 1965 | } | ||
| 1966 | |||
| 1967 | static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, | ||
| 1968 | struct ocrdma_qp *qp, | ||
| 1969 | struct ib_qp_init_attr *attrs, | ||
| 1970 | u16 *dpp_offset, u16 *dpp_credit_lmt) | ||
| 1971 | { | ||
| 1972 | u32 max_wqe_allocated, max_rqe_allocated; | ||
| 1973 | qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK; | ||
| 1974 | qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK; | ||
| 1975 | qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT; | ||
| 1976 | qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK; | ||
| 1977 | qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT); | ||
| 1978 | qp->dpp_enabled = false; | ||
| 1979 | if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) { | ||
| 1980 | qp->dpp_enabled = true; | ||
| 1981 | *dpp_credit_lmt = (rsp->dpp_response & | ||
| 1982 | OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >> | ||
| 1983 | OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT; | ||
| 1984 | *dpp_offset = (rsp->dpp_response & | ||
| 1985 | OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >> | ||
| 1986 | OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT; | ||
| 1987 | } | ||
| 1988 | max_wqe_allocated = | ||
| 1989 | rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT; | ||
| 1990 | max_wqe_allocated = 1 << max_wqe_allocated; | ||
| 1991 | max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); | ||
| 1992 | |||
| 1993 | if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 1994 | qp->sq.free_delta = 0; | ||
| 1995 | qp->rq.free_delta = 1; | ||
| 1996 | } else | ||
| 1997 | qp->sq.free_delta = 1; | ||
| 1998 | |||
| 1999 | qp->sq.max_cnt = max_wqe_allocated; | ||
| 2000 | qp->sq.max_wqe_idx = max_wqe_allocated - 1; | ||
| 2001 | |||
| 2002 | if (!attrs->srq) { | ||
| 2003 | qp->rq.max_cnt = max_rqe_allocated; | ||
| 2004 | qp->rq.max_wqe_idx = max_rqe_allocated - 1; | ||
| 2005 | qp->rq.free_delta = 1; | ||
| 2006 | } | ||
| 2007 | } | ||
| 2008 | |||
| 2009 | int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, | ||
| 2010 | u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, | ||
| 2011 | u16 *dpp_credit_lmt) | ||
| 2012 | { | ||
| 2013 | int status = -ENOMEM; | ||
| 2014 | u32 flags = 0; | ||
| 2015 | struct ocrdma_dev *dev = qp->dev; | ||
| 2016 | struct ocrdma_pd *pd = qp->pd; | ||
| 2017 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 2018 | struct ocrdma_cq *cq; | ||
| 2019 | struct ocrdma_create_qp_req *cmd; | ||
| 2020 | struct ocrdma_create_qp_rsp *rsp; | ||
| 2021 | int qptype; | ||
| 2022 | |||
| 2023 | switch (attrs->qp_type) { | ||
| 2024 | case IB_QPT_GSI: | ||
| 2025 | qptype = OCRDMA_QPT_GSI; | ||
| 2026 | break; | ||
| 2027 | case IB_QPT_RC: | ||
| 2028 | qptype = OCRDMA_QPT_RC; | ||
| 2029 | break; | ||
| 2030 | case IB_QPT_UD: | ||
| 2031 | qptype = OCRDMA_QPT_UD; | ||
| 2032 | break; | ||
| 2033 | default: | ||
| 2034 | return -EINVAL; | ||
| 2035 | }; | ||
| 2036 | |||
| 2037 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); | ||
| 2038 | if (!cmd) | ||
| 2039 | return status; | ||
| 2040 | cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) & | ||
| 2041 | OCRDMA_CREATE_QP_REQ_QPT_MASK; | ||
| 2042 | status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp); | ||
| 2043 | if (status) | ||
| 2044 | goto sq_err; | ||
| 2045 | |||
| 2046 | if (attrs->srq) { | ||
| 2047 | struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq); | ||
| 2048 | cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK; | ||
| 2049 | cmd->rq_addr[0].lo = srq->id; | ||
| 2050 | qp->srq = srq; | ||
| 2051 | } else { | ||
| 2052 | status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp); | ||
| 2053 | if (status) | ||
| 2054 | goto rq_err; | ||
| 2055 | } | ||
| 2056 | |||
| 2057 | status = ocrdma_set_create_qp_ird_cmd(cmd, qp); | ||
| 2058 | if (status) | ||
| 2059 | goto mbx_err; | ||
| 2060 | |||
| 2061 | cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) & | ||
| 2062 | OCRDMA_CREATE_QP_REQ_PD_ID_MASK; | ||
| 2063 | |||
| 2064 | flags = ocrdma_set_create_qp_mbx_access_flags(qp); | ||
| 2065 | |||
| 2066 | cmd->max_sge_recv_flags |= flags; | ||
| 2067 | cmd->max_ord_ird |= (dev->attr.max_ord_per_qp << | ||
| 2068 | OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) & | ||
| 2069 | OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK; | ||
| 2070 | cmd->max_ord_ird |= (dev->attr.max_ird_per_qp << | ||
| 2071 | OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) & | ||
| 2072 | OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK; | ||
| 2073 | cq = get_ocrdma_cq(attrs->send_cq); | ||
| 2074 | cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) & | ||
| 2075 | OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK; | ||
| 2076 | qp->sq_cq = cq; | ||
| 2077 | cq = get_ocrdma_cq(attrs->recv_cq); | ||
| 2078 | cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) & | ||
| 2079 | OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; | ||
| 2080 | qp->rq_cq = cq; | ||
| 2081 | |||
| 2082 | if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && | ||
| 2083 | (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) | ||
| 2084 | ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, | ||
| 2085 | dpp_cq_id); | ||
| 2086 | |||
| 2087 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 2088 | if (status) | ||
| 2089 | goto mbx_err; | ||
| 2090 | rsp = (struct ocrdma_create_qp_rsp *)cmd; | ||
| 2091 | ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt); | ||
| 2092 | qp->state = OCRDMA_QPS_RST; | ||
| 2093 | kfree(cmd); | ||
| 2094 | return 0; | ||
| 2095 | mbx_err: | ||
| 2096 | if (qp->rq.va) | ||
| 2097 | dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); | ||
| 2098 | rq_err: | ||
| 2099 | ocrdma_err("%s(%d) rq_err\n", __func__, dev->id); | ||
| 2100 | dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); | ||
| 2101 | sq_err: | ||
| 2102 | ocrdma_err("%s(%d) sq_err\n", __func__, dev->id); | ||
| 2103 | kfree(cmd); | ||
| 2104 | return status; | ||
| 2105 | } | ||
| 2106 | |||
| 2107 | int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | ||
| 2108 | struct ocrdma_qp_params *param) | ||
| 2109 | { | ||
| 2110 | int status = -ENOMEM; | ||
| 2111 | struct ocrdma_query_qp *cmd; | ||
| 2112 | struct ocrdma_query_qp_rsp *rsp; | ||
| 2113 | |||
| 2114 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd)); | ||
| 2115 | if (!cmd) | ||
| 2116 | return status; | ||
| 2117 | cmd->qp_id = qp->id; | ||
| 2118 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 2119 | if (status) | ||
| 2120 | goto mbx_err; | ||
| 2121 | rsp = (struct ocrdma_query_qp_rsp *)cmd; | ||
| 2122 | memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params)); | ||
| 2123 | mbx_err: | ||
| 2124 | kfree(cmd); | ||
| 2125 | return status; | ||
| 2126 | } | ||
| 2127 | |||
| 2128 | int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid, | ||
| 2129 | u8 *mac_addr) | ||
| 2130 | { | ||
| 2131 | struct in6_addr in6; | ||
| 2132 | |||
| 2133 | memcpy(&in6, dgid, sizeof in6); | ||
| 2134 | if (rdma_is_multicast_addr(&in6)) | ||
| 2135 | rdma_get_mcast_mac(&in6, mac_addr); | ||
| 2136 | else if (rdma_link_local_addr(&in6)) | ||
| 2137 | rdma_get_ll_mac(&in6, mac_addr); | ||
| 2138 | else { | ||
| 2139 | ocrdma_err("%s() fail to resolve mac_addr.\n", __func__); | ||
| 2140 | return -EINVAL; | ||
| 2141 | } | ||
| 2142 | return 0; | ||
| 2143 | } | ||
| 2144 | |||
| 2145 | static void ocrdma_set_av_params(struct ocrdma_qp *qp, | ||
| 2146 | struct ocrdma_modify_qp *cmd, | ||
| 2147 | struct ib_qp_attr *attrs) | ||
| 2148 | { | ||
| 2149 | struct ib_ah_attr *ah_attr = &attrs->ah_attr; | ||
| 2150 | union ib_gid sgid; | ||
| 2151 | u32 vlan_id; | ||
| 2152 | u8 mac_addr[6]; | ||
| 2153 | if ((ah_attr->ah_flags & IB_AH_GRH) == 0) | ||
| 2154 | return; | ||
| 2155 | cmd->params.tclass_sq_psn |= | ||
| 2156 | (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); | ||
| 2157 | cmd->params.rnt_rc_sl_fl |= | ||
| 2158 | (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK); | ||
| 2159 | cmd->params.hop_lmt_rq_psn |= | ||
| 2160 | (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT); | ||
| 2161 | cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; | ||
| 2162 | memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], | ||
| 2163 | sizeof(cmd->params.dgid)); | ||
| 2164 | ocrdma_query_gid(&qp->dev->ibdev, 1, | ||
| 2165 | ah_attr->grh.sgid_index, &sgid); | ||
| 2166 | qp->sgid_idx = ah_attr->grh.sgid_index; | ||
| 2167 | memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); | ||
| 2168 | ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]); | ||
| 2169 | cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | | ||
| 2170 | (mac_addr[2] << 16) | (mac_addr[3] << 24); | ||
| 2171 | /* convert them to LE format. */ | ||
| 2172 | ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); | ||
| 2173 | ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); | ||
| 2174 | cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); | ||
| 2175 | vlan_id = rdma_get_vlan_id(&sgid); | ||
| 2176 | if (vlan_id && (vlan_id < 0x1000)) { | ||
| 2177 | cmd->params.vlan_dmac_b4_to_b5 |= | ||
| 2178 | vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; | ||
| 2179 | cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; | ||
| 2180 | } | ||
| 2181 | } | ||
| 2182 | |||
| 2183 | static int ocrdma_set_qp_params(struct ocrdma_qp *qp, | ||
| 2184 | struct ocrdma_modify_qp *cmd, | ||
| 2185 | struct ib_qp_attr *attrs, int attr_mask, | ||
| 2186 | enum ib_qp_state old_qps) | ||
| 2187 | { | ||
| 2188 | int status = 0; | ||
| 2189 | struct net_device *netdev = qp->dev->nic_info.netdev; | ||
| 2190 | int eth_mtu = iboe_get_mtu(netdev->mtu); | ||
| 2191 | |||
| 2192 | if (attr_mask & IB_QP_PKEY_INDEX) { | ||
| 2193 | cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & | ||
| 2194 | OCRDMA_QP_PARAMS_PKEY_INDEX_MASK); | ||
| 2195 | cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID; | ||
| 2196 | } | ||
| 2197 | if (attr_mask & IB_QP_QKEY) { | ||
| 2198 | qp->qkey = attrs->qkey; | ||
| 2199 | cmd->params.qkey = attrs->qkey; | ||
| 2200 | cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID; | ||
| 2201 | } | ||
| 2202 | if (attr_mask & IB_QP_AV) | ||
| 2203 | ocrdma_set_av_params(qp, cmd, attrs); | ||
| 2204 | else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { | ||
| 2205 | /* set the default mac address for UD, GSI QPs */ | ||
| 2206 | cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | | ||
| 2207 | (qp->dev->nic_info.mac_addr[1] << 8) | | ||
| 2208 | (qp->dev->nic_info.mac_addr[2] << 16) | | ||
| 2209 | (qp->dev->nic_info.mac_addr[3] << 24); | ||
| 2210 | cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | | ||
| 2211 | (qp->dev->nic_info.mac_addr[5] << 8); | ||
| 2212 | } | ||
| 2213 | if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && | ||
| 2214 | attrs->en_sqd_async_notify) { | ||
| 2215 | cmd->params.max_sge_recv_flags |= | ||
| 2216 | OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC; | ||
| 2217 | cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; | ||
| 2218 | } | ||
| 2219 | if (attr_mask & IB_QP_DEST_QPN) { | ||
| 2220 | cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num & | ||
| 2221 | OCRDMA_QP_PARAMS_DEST_QPN_MASK); | ||
| 2222 | cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; | ||
| 2223 | } | ||
| 2224 | if (attr_mask & IB_QP_PATH_MTU) { | ||
| 2225 | if (ib_mtu_enum_to_int(eth_mtu) < | ||
| 2226 | ib_mtu_enum_to_int(attrs->path_mtu)) { | ||
| 2227 | status = -EINVAL; | ||
| 2228 | goto pmtu_err; | ||
| 2229 | } | ||
| 2230 | cmd->params.path_mtu_pkey_indx |= | ||
| 2231 | (ib_mtu_enum_to_int(attrs->path_mtu) << | ||
| 2232 | OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) & | ||
| 2233 | OCRDMA_QP_PARAMS_PATH_MTU_MASK; | ||
| 2234 | cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID; | ||
| 2235 | } | ||
| 2236 | if (attr_mask & IB_QP_TIMEOUT) { | ||
| 2237 | cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout << | ||
| 2238 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; | ||
| 2239 | cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID; | ||
| 2240 | } | ||
| 2241 | if (attr_mask & IB_QP_RETRY_CNT) { | ||
| 2242 | cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt << | ||
| 2243 | OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) & | ||
| 2244 | OCRDMA_QP_PARAMS_RETRY_CNT_MASK; | ||
| 2245 | cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID; | ||
| 2246 | } | ||
| 2247 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | ||
| 2248 | cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer << | ||
| 2249 | OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) & | ||
| 2250 | OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK; | ||
| 2251 | cmd->flags |= OCRDMA_QP_PARA_RNT_VALID; | ||
| 2252 | } | ||
| 2253 | if (attr_mask & IB_QP_RNR_RETRY) { | ||
| 2254 | cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry << | ||
| 2255 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT) | ||
| 2256 | & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK; | ||
| 2257 | cmd->flags |= OCRDMA_QP_PARA_RRC_VALID; | ||
| 2258 | } | ||
| 2259 | if (attr_mask & IB_QP_SQ_PSN) { | ||
| 2260 | cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff); | ||
| 2261 | cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID; | ||
| 2262 | } | ||
| 2263 | if (attr_mask & IB_QP_RQ_PSN) { | ||
| 2264 | cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff); | ||
| 2265 | cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; | ||
| 2266 | } | ||
| 2267 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { | ||
| 2268 | if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { | ||
| 2269 | status = -EINVAL; | ||
| 2270 | goto pmtu_err; | ||
| 2271 | } | ||
| 2272 | qp->max_ord = attrs->max_rd_atomic; | ||
| 2273 | cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; | ||
| 2274 | } | ||
| 2275 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { | ||
| 2276 | if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { | ||
| 2277 | status = -EINVAL; | ||
| 2278 | goto pmtu_err; | ||
| 2279 | } | ||
| 2280 | qp->max_ird = attrs->max_dest_rd_atomic; | ||
| 2281 | cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID; | ||
| 2282 | } | ||
| 2283 | cmd->params.max_ord_ird = (qp->max_ord << | ||
| 2284 | OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) | | ||
| 2285 | (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK); | ||
| 2286 | pmtu_err: | ||
| 2287 | return status; | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | ||
| 2291 | struct ib_qp_attr *attrs, int attr_mask, | ||
| 2292 | enum ib_qp_state old_qps) | ||
| 2293 | { | ||
| 2294 | int status = -ENOMEM; | ||
| 2295 | struct ocrdma_modify_qp *cmd; | ||
| 2296 | |||
| 2297 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd)); | ||
| 2298 | if (!cmd) | ||
| 2299 | return status; | ||
| 2300 | |||
| 2301 | cmd->params.id = qp->id; | ||
| 2302 | cmd->flags = 0; | ||
| 2303 | if (attr_mask & IB_QP_STATE) { | ||
| 2304 | cmd->params.max_sge_recv_flags |= | ||
| 2305 | (get_ocrdma_qp_state(attrs->qp_state) << | ||
| 2306 | OCRDMA_QP_PARAMS_STATE_SHIFT) & | ||
| 2307 | OCRDMA_QP_PARAMS_STATE_MASK; | ||
| 2308 | cmd->flags |= OCRDMA_QP_PARA_QPS_VALID; | ||
| 2309 | } else | ||
| 2310 | cmd->params.max_sge_recv_flags |= | ||
| 2311 | (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) & | ||
| 2312 | OCRDMA_QP_PARAMS_STATE_MASK; | ||
| 2313 | status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps); | ||
| 2314 | if (status) | ||
| 2315 | goto mbx_err; | ||
| 2316 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 2317 | if (status) | ||
| 2318 | goto mbx_err; | ||
| 2319 | |||
| 2320 | mbx_err: | ||
| 2321 | kfree(cmd); | ||
| 2322 | return status; | ||
| 2323 | } | ||
| 2324 | |||
| 2325 | int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | ||
| 2326 | { | ||
| 2327 | int status = -ENOMEM; | ||
| 2328 | struct ocrdma_destroy_qp *cmd; | ||
| 2329 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 2330 | |||
| 2331 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd)); | ||
| 2332 | if (!cmd) | ||
| 2333 | return status; | ||
| 2334 | cmd->qp_id = qp->id; | ||
| 2335 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 2336 | if (status) | ||
| 2337 | goto mbx_err; | ||
| 2338 | |||
| 2339 | mbx_err: | ||
| 2340 | kfree(cmd); | ||
| 2341 | if (qp->sq.va) | ||
| 2342 | dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); | ||
| 2343 | if (!qp->srq && qp->rq.va) | ||
| 2344 | dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); | ||
| 2345 | if (qp->dpp_enabled) | ||
| 2346 | qp->pd->num_dpp_qp++; | ||
| 2347 | return status; | ||
| 2348 | } | ||
| 2349 | |||
| 2350 | int ocrdma_mbx_create_srq(struct ocrdma_srq *srq, | ||
| 2351 | struct ib_srq_init_attr *srq_attr, | ||
| 2352 | struct ocrdma_pd *pd) | ||
| 2353 | { | ||
| 2354 | int status = -ENOMEM; | ||
| 2355 | int hw_pages, hw_page_size; | ||
| 2356 | int len; | ||
| 2357 | struct ocrdma_create_srq_rsp *rsp; | ||
| 2358 | struct ocrdma_create_srq *cmd; | ||
| 2359 | dma_addr_t pa; | ||
| 2360 | struct ocrdma_dev *dev = srq->dev; | ||
| 2361 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 2362 | u32 max_rqe_allocated; | ||
| 2363 | |||
| 2364 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd)); | ||
| 2365 | if (!cmd) | ||
| 2366 | return status; | ||
| 2367 | |||
| 2368 | cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK; | ||
| 2369 | max_rqe_allocated = srq_attr->attr.max_wr + 1; | ||
| 2370 | status = ocrdma_build_q_conf(&max_rqe_allocated, | ||
| 2371 | dev->attr.rqe_size, | ||
| 2372 | &hw_pages, &hw_page_size); | ||
| 2373 | if (status) { | ||
| 2374 | ocrdma_err("%s() req. max_wr=0x%x\n", __func__, | ||
| 2375 | srq_attr->attr.max_wr); | ||
| 2376 | status = -EINVAL; | ||
| 2377 | goto ret; | ||
| 2378 | } | ||
| 2379 | len = hw_pages * hw_page_size; | ||
| 2380 | srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | ||
| 2381 | if (!srq->rq.va) { | ||
| 2382 | status = -ENOMEM; | ||
| 2383 | goto ret; | ||
| 2384 | } | ||
| 2385 | ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size); | ||
| 2386 | |||
| 2387 | srq->rq.entry_size = dev->attr.rqe_size; | ||
| 2388 | srq->rq.pa = pa; | ||
| 2389 | srq->rq.len = len; | ||
| 2390 | srq->rq.max_cnt = max_rqe_allocated; | ||
| 2391 | |||
| 2392 | cmd->max_sge_rqe = ilog2(max_rqe_allocated); | ||
| 2393 | cmd->max_sge_rqe |= srq_attr->attr.max_sge << | ||
| 2394 | OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT; | ||
| 2395 | |||
| 2396 | cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) | ||
| 2397 | << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT); | ||
| 2398 | cmd->pages_rqe_sz |= (dev->attr.rqe_size | ||
| 2399 | << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT) | ||
| 2400 | & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK; | ||
| 2401 | cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT; | ||
| 2402 | |||
| 2403 | status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); | ||
| 2404 | if (status) | ||
| 2405 | goto mbx_err; | ||
| 2406 | rsp = (struct ocrdma_create_srq_rsp *)cmd; | ||
| 2407 | srq->id = rsp->id; | ||
| 2408 | srq->rq.dbid = rsp->id; | ||
| 2409 | max_rqe_allocated = ((rsp->max_sge_rqe_allocated & | ||
| 2410 | OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >> | ||
| 2411 | OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT); | ||
| 2412 | max_rqe_allocated = (1 << max_rqe_allocated); | ||
| 2413 | srq->rq.max_cnt = max_rqe_allocated; | ||
| 2414 | srq->rq.max_wqe_idx = max_rqe_allocated - 1; | ||
| 2415 | srq->rq.max_sges = (rsp->max_sge_rqe_allocated & | ||
| 2416 | OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >> | ||
| 2417 | OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT; | ||
| 2418 | goto ret; | ||
| 2419 | mbx_err: | ||
| 2420 | dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa); | ||
| 2421 | ret: | ||
| 2422 | kfree(cmd); | ||
| 2423 | return status; | ||
| 2424 | } | ||
| 2425 | |||
| 2426 | int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr) | ||
| 2427 | { | ||
| 2428 | int status = -ENOMEM; | ||
| 2429 | struct ocrdma_modify_srq *cmd; | ||
| 2430 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd)); | ||
| 2431 | if (!cmd) | ||
| 2432 | return status; | ||
| 2433 | cmd->id = srq->id; | ||
| 2434 | cmd->limit_max_rqe |= srq_attr->srq_limit << | ||
| 2435 | OCRDMA_MODIFY_SRQ_LIMIT_SHIFT; | ||
| 2436 | status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd); | ||
| 2437 | kfree(cmd); | ||
| 2438 | return status; | ||
| 2439 | } | ||
| 2440 | |||
| 2441 | int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr) | ||
| 2442 | { | ||
| 2443 | int status = -ENOMEM; | ||
| 2444 | struct ocrdma_query_srq *cmd; | ||
| 2445 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd)); | ||
| 2446 | if (!cmd) | ||
| 2447 | return status; | ||
| 2448 | cmd->id = srq->rq.dbid; | ||
| 2449 | status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd); | ||
| 2450 | if (status == 0) { | ||
| 2451 | struct ocrdma_query_srq_rsp *rsp = | ||
| 2452 | (struct ocrdma_query_srq_rsp *)cmd; | ||
| 2453 | srq_attr->max_sge = | ||
| 2454 | rsp->srq_lmt_max_sge & | ||
| 2455 | OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK; | ||
| 2456 | srq_attr->max_wr = | ||
| 2457 | rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT; | ||
| 2458 | srq_attr->srq_limit = rsp->srq_lmt_max_sge >> | ||
| 2459 | OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT; | ||
| 2460 | } | ||
| 2461 | kfree(cmd); | ||
| 2462 | return status; | ||
| 2463 | } | ||
| 2464 | |||
| 2465 | int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq) | ||
| 2466 | { | ||
| 2467 | int status = -ENOMEM; | ||
| 2468 | struct ocrdma_destroy_srq *cmd; | ||
| 2469 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 2470 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd)); | ||
| 2471 | if (!cmd) | ||
| 2472 | return status; | ||
| 2473 | cmd->id = srq->id; | ||
| 2474 | status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd); | ||
| 2475 | if (srq->rq.va) | ||
| 2476 | dma_free_coherent(&pdev->dev, srq->rq.len, | ||
| 2477 | srq->rq.va, srq->rq.pa); | ||
| 2478 | kfree(cmd); | ||
| 2479 | return status; | ||
| 2480 | } | ||
| 2481 | |||
| 2482 | int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) | ||
| 2483 | { | ||
| 2484 | int i; | ||
| 2485 | int status = -EINVAL; | ||
| 2486 | struct ocrdma_av *av; | ||
| 2487 | unsigned long flags; | ||
| 2488 | |||
| 2489 | av = dev->av_tbl.va; | ||
| 2490 | spin_lock_irqsave(&dev->av_tbl.lock, flags); | ||
| 2491 | for (i = 0; i < dev->av_tbl.num_ah; i++) { | ||
| 2492 | if (av->valid == 0) { | ||
| 2493 | av->valid = OCRDMA_AV_VALID; | ||
| 2494 | ah->av = av; | ||
| 2495 | ah->id = i; | ||
| 2496 | status = 0; | ||
| 2497 | break; | ||
| 2498 | } | ||
| 2499 | av++; | ||
| 2500 | } | ||
| 2501 | if (i == dev->av_tbl.num_ah) | ||
| 2502 | status = -EAGAIN; | ||
| 2503 | spin_unlock_irqrestore(&dev->av_tbl.lock, flags); | ||
| 2504 | return status; | ||
| 2505 | } | ||
| 2506 | |||
| 2507 | int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) | ||
| 2508 | { | ||
| 2509 | unsigned long flags; | ||
| 2510 | spin_lock_irqsave(&dev->av_tbl.lock, flags); | ||
| 2511 | ah->av->valid = 0; | ||
| 2512 | spin_unlock_irqrestore(&dev->av_tbl.lock, flags); | ||
| 2513 | return 0; | ||
| 2514 | } | ||
| 2515 | |||
| 2516 | static int ocrdma_create_mq_eq(struct ocrdma_dev *dev) | ||
| 2517 | { | ||
| 2518 | int status; | ||
| 2519 | int irq; | ||
| 2520 | unsigned long flags = 0; | ||
| 2521 | int num_eq = 0; | ||
| 2522 | |||
| 2523 | if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) | ||
| 2524 | flags = IRQF_SHARED; | ||
| 2525 | else { | ||
| 2526 | num_eq = dev->nic_info.msix.num_vectors - | ||
| 2527 | dev->nic_info.msix.start_vector; | ||
| 2528 | /* minimum two vectors/eq are required for rdma to work. | ||
| 2529 | * one for control path and one for data path. | ||
| 2530 | */ | ||
| 2531 | if (num_eq < 2) | ||
| 2532 | return -EBUSY; | ||
| 2533 | } | ||
| 2534 | |||
| 2535 | status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN); | ||
| 2536 | if (status) | ||
| 2537 | return status; | ||
| 2538 | sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id); | ||
| 2539 | irq = ocrdma_get_irq(dev, &dev->meq); | ||
| 2540 | status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name, | ||
| 2541 | &dev->meq); | ||
| 2542 | if (status) | ||
| 2543 | _ocrdma_destroy_eq(dev, &dev->meq); | ||
| 2544 | return status; | ||
| 2545 | } | ||
| 2546 | |||
| 2547 | static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev) | ||
| 2548 | { | ||
| 2549 | int num_eq, i, status = 0; | ||
| 2550 | int irq; | ||
| 2551 | unsigned long flags = 0; | ||
| 2552 | |||
| 2553 | num_eq = dev->nic_info.msix.num_vectors - | ||
| 2554 | dev->nic_info.msix.start_vector; | ||
| 2555 | if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) { | ||
| 2556 | num_eq = 1; | ||
| 2557 | flags = IRQF_SHARED; | ||
| 2558 | } else | ||
| 2559 | num_eq = min_t(u32, num_eq, num_online_cpus()); | ||
| 2560 | dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL); | ||
| 2561 | if (!dev->qp_eq_tbl) | ||
| 2562 | return -ENOMEM; | ||
| 2563 | |||
| 2564 | for (i = 0; i < num_eq; i++) { | ||
| 2565 | status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i], | ||
| 2566 | OCRDMA_EQ_LEN); | ||
| 2567 | if (status) { | ||
| 2568 | status = -EINVAL; | ||
| 2569 | break; | ||
| 2570 | } | ||
| 2571 | sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d", | ||
| 2572 | dev->id, i); | ||
| 2573 | irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]); | ||
| 2574 | status = request_irq(irq, ocrdma_irq_handler, flags, | ||
| 2575 | dev->qp_eq_tbl[i].irq_name, | ||
| 2576 | &dev->qp_eq_tbl[i]); | ||
| 2577 | if (status) { | ||
| 2578 | _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]); | ||
| 2579 | status = -EINVAL; | ||
| 2580 | break; | ||
| 2581 | } | ||
| 2582 | dev->eq_cnt += 1; | ||
| 2583 | } | ||
| 2584 | /* one eq is sufficient for data path to work */ | ||
| 2585 | if (dev->eq_cnt >= 1) | ||
| 2586 | return 0; | ||
| 2587 | if (status) | ||
| 2588 | ocrdma_destroy_qp_eqs(dev); | ||
| 2589 | return status; | ||
| 2590 | } | ||
| 2591 | |||
| 2592 | int ocrdma_init_hw(struct ocrdma_dev *dev) | ||
| 2593 | { | ||
| 2594 | int status; | ||
| 2595 | /* set up control path eq */ | ||
| 2596 | status = ocrdma_create_mq_eq(dev); | ||
| 2597 | if (status) | ||
| 2598 | return status; | ||
| 2599 | /* set up data path eq */ | ||
| 2600 | status = ocrdma_create_qp_eqs(dev); | ||
| 2601 | if (status) | ||
| 2602 | goto qpeq_err; | ||
| 2603 | status = ocrdma_create_mq(dev); | ||
| 2604 | if (status) | ||
| 2605 | goto mq_err; | ||
| 2606 | status = ocrdma_mbx_query_fw_config(dev); | ||
| 2607 | if (status) | ||
| 2608 | goto conf_err; | ||
| 2609 | status = ocrdma_mbx_query_dev(dev); | ||
| 2610 | if (status) | ||
| 2611 | goto conf_err; | ||
| 2612 | status = ocrdma_mbx_query_fw_ver(dev); | ||
| 2613 | if (status) | ||
| 2614 | goto conf_err; | ||
| 2615 | status = ocrdma_mbx_create_ah_tbl(dev); | ||
| 2616 | if (status) | ||
| 2617 | goto conf_err; | ||
| 2618 | return 0; | ||
| 2619 | |||
| 2620 | conf_err: | ||
| 2621 | ocrdma_destroy_mq(dev); | ||
| 2622 | mq_err: | ||
| 2623 | ocrdma_destroy_qp_eqs(dev); | ||
| 2624 | qpeq_err: | ||
| 2625 | ocrdma_destroy_eq(dev, &dev->meq); | ||
| 2626 | ocrdma_err("%s() status=%d\n", __func__, status); | ||
| 2627 | return status; | ||
| 2628 | } | ||
| 2629 | |||
| 2630 | void ocrdma_cleanup_hw(struct ocrdma_dev *dev) | ||
| 2631 | { | ||
| 2632 | ocrdma_mbx_delete_ah_tbl(dev); | ||
| 2633 | |||
| 2634 | /* cleanup the data path eqs */ | ||
| 2635 | ocrdma_destroy_qp_eqs(dev); | ||
| 2636 | |||
| 2637 | /* cleanup the control path */ | ||
| 2638 | ocrdma_destroy_mq(dev); | ||
| 2639 | ocrdma_destroy_eq(dev, &dev->meq); | ||
| 2640 | } | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h new file mode 100644 index 000000000000..be5db77404db --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h | |||
| @@ -0,0 +1,132 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) CNA Adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __OCRDMA_HW_H__ | ||
| 29 | #define __OCRDMA_HW_H__ | ||
| 30 | |||
| 31 | #include "ocrdma_sli.h" | ||
| 32 | |||
| 33 | static inline void ocrdma_cpu_to_le32(void *dst, u32 len) | ||
| 34 | { | ||
| 35 | #ifdef __BIG_ENDIAN | ||
| 36 | int i = 0; | ||
| 37 | u32 *src_ptr = dst; | ||
| 38 | u32 *dst_ptr = dst; | ||
| 39 | for (; i < (len / 4); i++) | ||
| 40 | *(dst_ptr + i) = cpu_to_le32p(src_ptr + i); | ||
| 41 | #endif | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline void ocrdma_le32_to_cpu(void *dst, u32 len) | ||
| 45 | { | ||
| 46 | #ifdef __BIG_ENDIAN | ||
| 47 | int i = 0; | ||
| 48 | u32 *src_ptr = dst; | ||
| 49 | u32 *dst_ptr = dst; | ||
| 50 | for (; i < (len / sizeof(u32)); i++) | ||
| 51 | *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i)); | ||
| 52 | #endif | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len) | ||
| 56 | { | ||
| 57 | #ifdef __BIG_ENDIAN | ||
| 58 | int i = 0; | ||
| 59 | u32 *src_ptr = src; | ||
| 60 | u32 *dst_ptr = dst; | ||
| 61 | for (; i < (len / sizeof(u32)); i++) | ||
| 62 | *(dst_ptr + i) = cpu_to_le32p(src_ptr + i); | ||
| 63 | #else | ||
| 64 | memcpy(dst, src, len); | ||
| 65 | #endif | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len) | ||
| 69 | { | ||
| 70 | #ifdef __BIG_ENDIAN | ||
| 71 | int i = 0; | ||
| 72 | u32 *src_ptr = src; | ||
| 73 | u32 *dst_ptr = dst; | ||
| 74 | for (; i < len / sizeof(u32); i++) | ||
| 75 | *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i)); | ||
| 76 | #else | ||
| 77 | memcpy(dst, src, len); | ||
| 78 | #endif | ||
| 79 | } | ||
| 80 | |||
| 81 | int ocrdma_init_hw(struct ocrdma_dev *); | ||
| 82 | void ocrdma_cleanup_hw(struct ocrdma_dev *); | ||
| 83 | |||
| 84 | enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps); | ||
| 85 | void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed, | ||
| 86 | bool solicited, u16 cqe_popped); | ||
| 87 | |||
| 88 | /* verbs specific mailbox commands */ | ||
| 89 | int ocrdma_query_config(struct ocrdma_dev *, | ||
| 90 | struct ocrdma_mbx_query_config *config); | ||
| 91 | int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr); | ||
| 92 | |||
| 93 | int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); | ||
| 94 | int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); | ||
| 95 | |||
| 96 | int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr, | ||
| 97 | u32 pd_id, int addr_check); | ||
| 98 | int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey); | ||
| 99 | |||
| 100 | int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr, | ||
| 101 | u32 pd_id, int acc); | ||
| 102 | int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *, | ||
| 103 | int entries, int dpp_cq); | ||
| 104 | int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *); | ||
| 105 | |||
| 106 | int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs, | ||
| 107 | u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, | ||
| 108 | u16 *dpp_credit_lmt); | ||
| 109 | int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *, | ||
| 110 | struct ib_qp_attr *attrs, int attr_mask, | ||
| 111 | enum ib_qp_state old_qps); | ||
| 112 | int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *, | ||
| 113 | struct ocrdma_qp_params *param); | ||
| 114 | int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *); | ||
| 115 | |||
| 116 | int ocrdma_mbx_create_srq(struct ocrdma_srq *, | ||
| 117 | struct ib_srq_init_attr *, | ||
| 118 | struct ocrdma_pd *); | ||
| 119 | int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *); | ||
| 120 | int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *); | ||
| 121 | int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *); | ||
| 122 | |||
| 123 | int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *); | ||
| 124 | int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *); | ||
| 125 | |||
| 126 | int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state, | ||
| 127 | enum ib_qp_state *old_ib_state); | ||
| 128 | bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); | ||
| 129 | bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); | ||
| 130 | void ocrdma_flush_qp(struct ocrdma_qp *); | ||
| 131 | |||
| 132 | #endif /* __OCRDMA_HW_H__ */ | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c new file mode 100644 index 000000000000..a20d16eaae71 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
| @@ -0,0 +1,577 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/version.h> | ||
| 30 | #include <linux/idr.h> | ||
| 31 | #include <rdma/ib_verbs.h> | ||
| 32 | #include <rdma/ib_user_verbs.h> | ||
| 33 | #include <rdma/ib_addr.h> | ||
| 34 | |||
| 35 | #include <linux/netdevice.h> | ||
| 36 | #include <net/addrconf.h> | ||
| 37 | |||
| 38 | #include "ocrdma.h" | ||
| 39 | #include "ocrdma_verbs.h" | ||
| 40 | #include "ocrdma_ah.h" | ||
| 41 | #include "be_roce.h" | ||
| 42 | #include "ocrdma_hw.h" | ||
| 43 | |||
| 44 | MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION); | ||
| 45 | MODULE_DESCRIPTION("Emulex RoCE HCA Driver"); | ||
| 46 | MODULE_AUTHOR("Emulex Corporation"); | ||
| 47 | MODULE_LICENSE("GPL"); | ||
| 48 | |||
| 49 | static LIST_HEAD(ocrdma_dev_list); | ||
| 50 | static DEFINE_SPINLOCK(ocrdma_devlist_lock); | ||
| 51 | static DEFINE_IDR(ocrdma_dev_id); | ||
| 52 | |||
| 53 | static union ib_gid ocrdma_zero_sgid; | ||
| 54 | |||
| 55 | static int ocrdma_get_instance(void) | ||
| 56 | { | ||
| 57 | int instance = 0; | ||
| 58 | |||
| 59 | /* Assign an unused number */ | ||
| 60 | if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL)) | ||
| 61 | return -1; | ||
| 62 | if (idr_get_new(&ocrdma_dev_id, NULL, &instance)) | ||
| 63 | return -1; | ||
| 64 | return instance; | ||
| 65 | } | ||
| 66 | |||
| 67 | void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) | ||
| 68 | { | ||
| 69 | u8 mac_addr[6]; | ||
| 70 | |||
| 71 | memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); | ||
| 72 | guid[0] = mac_addr[0] ^ 2; | ||
| 73 | guid[1] = mac_addr[1]; | ||
| 74 | guid[2] = mac_addr[2]; | ||
| 75 | guid[3] = 0xff; | ||
| 76 | guid[4] = 0xfe; | ||
| 77 | guid[5] = mac_addr[3]; | ||
| 78 | guid[6] = mac_addr[4]; | ||
| 79 | guid[7] = mac_addr[5]; | ||
| 80 | } | ||
| 81 | |||
| 82 | static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr, | ||
| 83 | bool is_vlan, u16 vlan_id) | ||
| 84 | { | ||
| 85 | sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); | ||
| 86 | sgid->raw[8] = mac_addr[0] ^ 2; | ||
| 87 | sgid->raw[9] = mac_addr[1]; | ||
| 88 | sgid->raw[10] = mac_addr[2]; | ||
| 89 | if (is_vlan) { | ||
| 90 | sgid->raw[11] = vlan_id >> 8; | ||
| 91 | sgid->raw[12] = vlan_id & 0xff; | ||
| 92 | } else { | ||
| 93 | sgid->raw[11] = 0xff; | ||
| 94 | sgid->raw[12] = 0xfe; | ||
| 95 | } | ||
| 96 | sgid->raw[13] = mac_addr[3]; | ||
| 97 | sgid->raw[14] = mac_addr[4]; | ||
| 98 | sgid->raw[15] = mac_addr[5]; | ||
| 99 | } | ||
| 100 | |||
| 101 | static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, | ||
| 102 | bool is_vlan, u16 vlan_id) | ||
| 103 | { | ||
| 104 | int i; | ||
| 105 | bool found = false; | ||
| 106 | union ib_gid new_sgid; | ||
| 107 | int free_idx = OCRDMA_MAX_SGID; | ||
| 108 | unsigned long flags; | ||
| 109 | |||
| 110 | memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); | ||
| 111 | |||
| 112 | ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id); | ||
| 113 | |||
| 114 | spin_lock_irqsave(&dev->sgid_lock, flags); | ||
| 115 | for (i = 0; i < OCRDMA_MAX_SGID; i++) { | ||
| 116 | if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, | ||
| 117 | sizeof(union ib_gid))) { | ||
| 118 | /* found free entry */ | ||
| 119 | if (!found) { | ||
| 120 | free_idx = i; | ||
| 121 | found = true; | ||
| 122 | break; | ||
| 123 | } | ||
| 124 | } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, | ||
| 125 | sizeof(union ib_gid))) { | ||
| 126 | /* entry already present, no addition is required. */ | ||
| 127 | spin_unlock_irqrestore(&dev->sgid_lock, flags); | ||
| 128 | return; | ||
| 129 | } | ||
| 130 | } | ||
| 131 | /* if entry doesn't exist and if table has some space, add entry */ | ||
| 132 | if (found) | ||
| 133 | memcpy(&dev->sgid_tbl[free_idx], &new_sgid, | ||
| 134 | sizeof(union ib_gid)); | ||
| 135 | spin_unlock_irqrestore(&dev->sgid_lock, flags); | ||
| 136 | } | ||
| 137 | |||
| 138 | static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, | ||
| 139 | bool is_vlan, u16 vlan_id) | ||
| 140 | { | ||
| 141 | int found = false; | ||
| 142 | int i; | ||
| 143 | union ib_gid sgid; | ||
| 144 | unsigned long flags; | ||
| 145 | |||
| 146 | ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id); | ||
| 147 | |||
| 148 | spin_lock_irqsave(&dev->sgid_lock, flags); | ||
| 149 | /* first is default sgid, which cannot be deleted. */ | ||
| 150 | for (i = 1; i < OCRDMA_MAX_SGID; i++) { | ||
| 151 | if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) { | ||
| 152 | /* found matching entry */ | ||
| 153 | memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid)); | ||
| 154 | found = true; | ||
| 155 | break; | ||
| 156 | } | ||
| 157 | } | ||
| 158 | spin_unlock_irqrestore(&dev->sgid_lock, flags); | ||
| 159 | return found; | ||
| 160 | } | ||
| 161 | |||
| 162 | static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) | ||
| 163 | { | ||
| 164 | /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */ | ||
| 165 | union ib_gid *sgid = &dev->sgid_tbl[0]; | ||
| 166 | |||
| 167 | sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); | ||
| 168 | ocrdma_get_guid(dev, &sgid->raw[8]); | ||
| 169 | } | ||
| 170 | |||
| 171 | static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) | ||
| 172 | { | ||
| 173 | struct net_device *netdev, *tmp; | ||
| 174 | u16 vlan_id; | ||
| 175 | bool is_vlan; | ||
| 176 | |||
| 177 | netdev = dev->nic_info.netdev; | ||
| 178 | |||
| 179 | ocrdma_add_default_sgid(dev); | ||
| 180 | |||
| 181 | rcu_read_lock(); | ||
| 182 | for_each_netdev_rcu(&init_net, tmp) { | ||
| 183 | if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { | ||
| 184 | if (!netif_running(tmp) || !netif_oper_up(tmp)) | ||
| 185 | continue; | ||
| 186 | if (netdev != tmp) { | ||
| 187 | vlan_id = vlan_dev_vlan_id(tmp); | ||
| 188 | is_vlan = true; | ||
| 189 | } else { | ||
| 190 | is_vlan = false; | ||
| 191 | vlan_id = 0; | ||
| 192 | tmp = netdev; | ||
| 193 | } | ||
| 194 | ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | rcu_read_unlock(); | ||
| 198 | return 0; | ||
| 199 | } | ||
| 200 | |||
| 201 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
| 202 | |||
| 203 | static int ocrdma_inet6addr_event(struct notifier_block *notifier, | ||
| 204 | unsigned long event, void *ptr) | ||
| 205 | { | ||
| 206 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; | ||
| 207 | struct net_device *event_netdev = ifa->idev->dev; | ||
| 208 | struct net_device *netdev = NULL; | ||
| 209 | struct ib_event gid_event; | ||
| 210 | struct ocrdma_dev *dev; | ||
| 211 | bool found = false; | ||
| 212 | bool is_vlan = false; | ||
| 213 | u16 vid = 0; | ||
| 214 | |||
| 215 | netdev = vlan_dev_real_dev(event_netdev); | ||
| 216 | if (netdev != event_netdev) { | ||
| 217 | is_vlan = true; | ||
| 218 | vid = vlan_dev_vlan_id(event_netdev); | ||
| 219 | } | ||
| 220 | rcu_read_lock(); | ||
| 221 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { | ||
| 222 | if (dev->nic_info.netdev == netdev) { | ||
| 223 | found = true; | ||
| 224 | break; | ||
| 225 | } | ||
| 226 | } | ||
| 227 | rcu_read_unlock(); | ||
| 228 | |||
| 229 | if (!found) | ||
| 230 | return NOTIFY_DONE; | ||
| 231 | if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr)) | ||
| 232 | return NOTIFY_DONE; | ||
| 233 | |||
| 234 | mutex_lock(&dev->dev_lock); | ||
| 235 | switch (event) { | ||
| 236 | case NETDEV_UP: | ||
| 237 | ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); | ||
| 238 | break; | ||
| 239 | case NETDEV_DOWN: | ||
| 240 | found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); | ||
| 241 | if (found) { | ||
| 242 | /* found the matching entry, notify | ||
| 243 | * the consumers about it | ||
| 244 | */ | ||
| 245 | gid_event.device = &dev->ibdev; | ||
| 246 | gid_event.element.port_num = 1; | ||
| 247 | gid_event.event = IB_EVENT_GID_CHANGE; | ||
| 248 | ib_dispatch_event(&gid_event); | ||
| 249 | } | ||
| 250 | break; | ||
| 251 | default: | ||
| 252 | break; | ||
| 253 | } | ||
| 254 | mutex_unlock(&dev->dev_lock); | ||
| 255 | return NOTIFY_OK; | ||
| 256 | } | ||
| 257 | |||
| 258 | static struct notifier_block ocrdma_inet6addr_notifier = { | ||
| 259 | .notifier_call = ocrdma_inet6addr_event | ||
| 260 | }; | ||
| 261 | |||
| 262 | #endif /* IPV6 */ | ||
| 263 | |||
| 264 | static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, | ||
| 265 | u8 port_num) | ||
| 266 | { | ||
| 267 | return IB_LINK_LAYER_ETHERNET; | ||
| 268 | } | ||
| 269 | |||
| 270 | static int ocrdma_register_device(struct ocrdma_dev *dev) | ||
| 271 | { | ||
| 272 | strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX); | ||
| 273 | ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); | ||
| 274 | memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, | ||
| 275 | sizeof(OCRDMA_NODE_DESC)); | ||
| 276 | dev->ibdev.owner = THIS_MODULE; | ||
| 277 | dev->ibdev.uverbs_cmd_mask = | ||
| 278 | OCRDMA_UVERBS(GET_CONTEXT) | | ||
| 279 | OCRDMA_UVERBS(QUERY_DEVICE) | | ||
| 280 | OCRDMA_UVERBS(QUERY_PORT) | | ||
| 281 | OCRDMA_UVERBS(ALLOC_PD) | | ||
| 282 | OCRDMA_UVERBS(DEALLOC_PD) | | ||
| 283 | OCRDMA_UVERBS(REG_MR) | | ||
| 284 | OCRDMA_UVERBS(DEREG_MR) | | ||
| 285 | OCRDMA_UVERBS(CREATE_COMP_CHANNEL) | | ||
| 286 | OCRDMA_UVERBS(CREATE_CQ) | | ||
| 287 | OCRDMA_UVERBS(RESIZE_CQ) | | ||
| 288 | OCRDMA_UVERBS(DESTROY_CQ) | | ||
| 289 | OCRDMA_UVERBS(REQ_NOTIFY_CQ) | | ||
| 290 | OCRDMA_UVERBS(CREATE_QP) | | ||
| 291 | OCRDMA_UVERBS(MODIFY_QP) | | ||
| 292 | OCRDMA_UVERBS(QUERY_QP) | | ||
| 293 | OCRDMA_UVERBS(DESTROY_QP) | | ||
| 294 | OCRDMA_UVERBS(POLL_CQ) | | ||
| 295 | OCRDMA_UVERBS(POST_SEND) | | ||
| 296 | OCRDMA_UVERBS(POST_RECV); | ||
| 297 | |||
| 298 | dev->ibdev.uverbs_cmd_mask |= | ||
| 299 | OCRDMA_UVERBS(CREATE_AH) | | ||
| 300 | OCRDMA_UVERBS(MODIFY_AH) | | ||
| 301 | OCRDMA_UVERBS(QUERY_AH) | | ||
| 302 | OCRDMA_UVERBS(DESTROY_AH); | ||
| 303 | |||
| 304 | dev->ibdev.node_type = RDMA_NODE_IB_CA; | ||
| 305 | dev->ibdev.phys_port_cnt = 1; | ||
| 306 | dev->ibdev.num_comp_vectors = 1; | ||
| 307 | |||
| 308 | /* mandatory verbs. */ | ||
| 309 | dev->ibdev.query_device = ocrdma_query_device; | ||
| 310 | dev->ibdev.query_port = ocrdma_query_port; | ||
| 311 | dev->ibdev.modify_port = ocrdma_modify_port; | ||
| 312 | dev->ibdev.query_gid = ocrdma_query_gid; | ||
| 313 | dev->ibdev.get_link_layer = ocrdma_link_layer; | ||
| 314 | dev->ibdev.alloc_pd = ocrdma_alloc_pd; | ||
| 315 | dev->ibdev.dealloc_pd = ocrdma_dealloc_pd; | ||
| 316 | |||
| 317 | dev->ibdev.create_cq = ocrdma_create_cq; | ||
| 318 | dev->ibdev.destroy_cq = ocrdma_destroy_cq; | ||
| 319 | dev->ibdev.resize_cq = ocrdma_resize_cq; | ||
| 320 | |||
| 321 | dev->ibdev.create_qp = ocrdma_create_qp; | ||
| 322 | dev->ibdev.modify_qp = ocrdma_modify_qp; | ||
| 323 | dev->ibdev.query_qp = ocrdma_query_qp; | ||
| 324 | dev->ibdev.destroy_qp = ocrdma_destroy_qp; | ||
| 325 | |||
| 326 | dev->ibdev.query_pkey = ocrdma_query_pkey; | ||
| 327 | dev->ibdev.create_ah = ocrdma_create_ah; | ||
| 328 | dev->ibdev.destroy_ah = ocrdma_destroy_ah; | ||
| 329 | dev->ibdev.query_ah = ocrdma_query_ah; | ||
| 330 | dev->ibdev.modify_ah = ocrdma_modify_ah; | ||
| 331 | |||
| 332 | dev->ibdev.poll_cq = ocrdma_poll_cq; | ||
| 333 | dev->ibdev.post_send = ocrdma_post_send; | ||
| 334 | dev->ibdev.post_recv = ocrdma_post_recv; | ||
| 335 | dev->ibdev.req_notify_cq = ocrdma_arm_cq; | ||
| 336 | |||
| 337 | dev->ibdev.get_dma_mr = ocrdma_get_dma_mr; | ||
| 338 | dev->ibdev.dereg_mr = ocrdma_dereg_mr; | ||
| 339 | dev->ibdev.reg_user_mr = ocrdma_reg_user_mr; | ||
| 340 | |||
| 341 | /* mandatory to support user space verbs consumer. */ | ||
| 342 | dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext; | ||
| 343 | dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext; | ||
| 344 | dev->ibdev.mmap = ocrdma_mmap; | ||
| 345 | dev->ibdev.dma_device = &dev->nic_info.pdev->dev; | ||
| 346 | |||
| 347 | dev->ibdev.process_mad = ocrdma_process_mad; | ||
| 348 | |||
| 349 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 350 | dev->ibdev.uverbs_cmd_mask |= | ||
| 351 | OCRDMA_UVERBS(CREATE_SRQ) | | ||
| 352 | OCRDMA_UVERBS(MODIFY_SRQ) | | ||
| 353 | OCRDMA_UVERBS(QUERY_SRQ) | | ||
| 354 | OCRDMA_UVERBS(DESTROY_SRQ) | | ||
| 355 | OCRDMA_UVERBS(POST_SRQ_RECV); | ||
| 356 | |||
| 357 | dev->ibdev.create_srq = ocrdma_create_srq; | ||
| 358 | dev->ibdev.modify_srq = ocrdma_modify_srq; | ||
| 359 | dev->ibdev.query_srq = ocrdma_query_srq; | ||
| 360 | dev->ibdev.destroy_srq = ocrdma_destroy_srq; | ||
| 361 | dev->ibdev.post_srq_recv = ocrdma_post_srq_recv; | ||
| 362 | } | ||
| 363 | return ib_register_device(&dev->ibdev, NULL); | ||
| 364 | } | ||
| 365 | |||
| 366 | static int ocrdma_alloc_resources(struct ocrdma_dev *dev) | ||
| 367 | { | ||
| 368 | mutex_init(&dev->dev_lock); | ||
| 369 | dev->sgid_tbl = kzalloc(sizeof(union ib_gid) * | ||
| 370 | OCRDMA_MAX_SGID, GFP_KERNEL); | ||
| 371 | if (!dev->sgid_tbl) | ||
| 372 | goto alloc_err; | ||
| 373 | spin_lock_init(&dev->sgid_lock); | ||
| 374 | |||
| 375 | dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) * | ||
| 376 | OCRDMA_MAX_CQ, GFP_KERNEL); | ||
| 377 | if (!dev->cq_tbl) | ||
| 378 | goto alloc_err; | ||
| 379 | |||
| 380 | if (dev->attr.max_qp) { | ||
| 381 | dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) * | ||
| 382 | OCRDMA_MAX_QP, GFP_KERNEL); | ||
| 383 | if (!dev->qp_tbl) | ||
| 384 | goto alloc_err; | ||
| 385 | } | ||
| 386 | spin_lock_init(&dev->av_tbl.lock); | ||
| 387 | spin_lock_init(&dev->flush_q_lock); | ||
| 388 | return 0; | ||
| 389 | alloc_err: | ||
| 390 | ocrdma_err("%s(%d) error.\n", __func__, dev->id); | ||
| 391 | return -ENOMEM; | ||
| 392 | } | ||
| 393 | |||
| 394 | static void ocrdma_free_resources(struct ocrdma_dev *dev) | ||
| 395 | { | ||
| 396 | kfree(dev->qp_tbl); | ||
| 397 | kfree(dev->cq_tbl); | ||
| 398 | kfree(dev->sgid_tbl); | ||
| 399 | } | ||
| 400 | |||
| 401 | static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) | ||
| 402 | { | ||
| 403 | int status = 0; | ||
| 404 | struct ocrdma_dev *dev; | ||
| 405 | |||
| 406 | dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); | ||
| 407 | if (!dev) { | ||
| 408 | ocrdma_err("Unable to allocate ib device\n"); | ||
| 409 | return NULL; | ||
| 410 | } | ||
| 411 | dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); | ||
| 412 | if (!dev->mbx_cmd) | ||
| 413 | goto idr_err; | ||
| 414 | |||
| 415 | memcpy(&dev->nic_info, dev_info, sizeof(*dev_info)); | ||
| 416 | dev->id = ocrdma_get_instance(); | ||
| 417 | if (dev->id < 0) | ||
| 418 | goto idr_err; | ||
| 419 | |||
| 420 | status = ocrdma_init_hw(dev); | ||
| 421 | if (status) | ||
| 422 | goto init_err; | ||
| 423 | |||
| 424 | status = ocrdma_alloc_resources(dev); | ||
| 425 | if (status) | ||
| 426 | goto alloc_err; | ||
| 427 | |||
| 428 | status = ocrdma_build_sgid_tbl(dev); | ||
| 429 | if (status) | ||
| 430 | goto alloc_err; | ||
| 431 | |||
| 432 | status = ocrdma_register_device(dev); | ||
| 433 | if (status) | ||
| 434 | goto alloc_err; | ||
| 435 | |||
| 436 | spin_lock(&ocrdma_devlist_lock); | ||
| 437 | list_add_tail_rcu(&dev->entry, &ocrdma_dev_list); | ||
| 438 | spin_unlock(&ocrdma_devlist_lock); | ||
| 439 | return dev; | ||
| 440 | |||
| 441 | alloc_err: | ||
| 442 | ocrdma_free_resources(dev); | ||
| 443 | ocrdma_cleanup_hw(dev); | ||
| 444 | init_err: | ||
| 445 | idr_remove(&ocrdma_dev_id, dev->id); | ||
| 446 | idr_err: | ||
| 447 | kfree(dev->mbx_cmd); | ||
| 448 | ib_dealloc_device(&dev->ibdev); | ||
| 449 | ocrdma_err("%s() leaving. ret=%d\n", __func__, status); | ||
| 450 | return NULL; | ||
| 451 | } | ||
| 452 | |||
| 453 | static void ocrdma_remove_free(struct rcu_head *rcu) | ||
| 454 | { | ||
| 455 | struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu); | ||
| 456 | |||
| 457 | ocrdma_free_resources(dev); | ||
| 458 | ocrdma_cleanup_hw(dev); | ||
| 459 | |||
| 460 | idr_remove(&ocrdma_dev_id, dev->id); | ||
| 461 | kfree(dev->mbx_cmd); | ||
| 462 | ib_dealloc_device(&dev->ibdev); | ||
| 463 | } | ||
| 464 | |||
| 465 | static void ocrdma_remove(struct ocrdma_dev *dev) | ||
| 466 | { | ||
| 467 | /* first unregister with stack to stop all the active traffic | ||
| 468 | * of the registered clients. | ||
| 469 | */ | ||
| 470 | ib_unregister_device(&dev->ibdev); | ||
| 471 | |||
| 472 | spin_lock(&ocrdma_devlist_lock); | ||
| 473 | list_del_rcu(&dev->entry); | ||
| 474 | spin_unlock(&ocrdma_devlist_lock); | ||
| 475 | call_rcu(&dev->rcu, ocrdma_remove_free); | ||
| 476 | } | ||
| 477 | |||
| 478 | static int ocrdma_open(struct ocrdma_dev *dev) | ||
| 479 | { | ||
| 480 | struct ib_event port_event; | ||
| 481 | |||
| 482 | port_event.event = IB_EVENT_PORT_ACTIVE; | ||
| 483 | port_event.element.port_num = 1; | ||
| 484 | port_event.device = &dev->ibdev; | ||
| 485 | ib_dispatch_event(&port_event); | ||
| 486 | return 0; | ||
| 487 | } | ||
| 488 | |||
| 489 | static int ocrdma_close(struct ocrdma_dev *dev) | ||
| 490 | { | ||
| 491 | int i; | ||
| 492 | struct ocrdma_qp *qp, **cur_qp; | ||
| 493 | struct ib_event err_event; | ||
| 494 | struct ib_qp_attr attrs; | ||
| 495 | int attr_mask = IB_QP_STATE; | ||
| 496 | |||
| 497 | attrs.qp_state = IB_QPS_ERR; | ||
| 498 | mutex_lock(&dev->dev_lock); | ||
| 499 | if (dev->qp_tbl) { | ||
| 500 | cur_qp = dev->qp_tbl; | ||
| 501 | for (i = 0; i < OCRDMA_MAX_QP; i++) { | ||
| 502 | qp = cur_qp[i]; | ||
| 503 | if (qp) { | ||
| 504 | /* change the QP state to ERROR */ | ||
| 505 | _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); | ||
| 506 | |||
| 507 | err_event.event = IB_EVENT_QP_FATAL; | ||
| 508 | err_event.element.qp = &qp->ibqp; | ||
| 509 | err_event.device = &dev->ibdev; | ||
| 510 | ib_dispatch_event(&err_event); | ||
| 511 | } | ||
| 512 | } | ||
| 513 | } | ||
| 514 | mutex_unlock(&dev->dev_lock); | ||
| 515 | |||
| 516 | err_event.event = IB_EVENT_PORT_ERR; | ||
| 517 | err_event.element.port_num = 1; | ||
| 518 | err_event.device = &dev->ibdev; | ||
| 519 | ib_dispatch_event(&err_event); | ||
| 520 | return 0; | ||
| 521 | } | ||
| 522 | |||
| 523 | /* event handling via NIC driver ensures that all the NIC specific | ||
| 524 | * initialization done before RoCE driver notifies | ||
| 525 | * event to stack. | ||
| 526 | */ | ||
| 527 | static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) | ||
| 528 | { | ||
| 529 | switch (event) { | ||
| 530 | case BE_DEV_UP: | ||
| 531 | ocrdma_open(dev); | ||
| 532 | break; | ||
| 533 | case BE_DEV_DOWN: | ||
| 534 | ocrdma_close(dev); | ||
| 535 | break; | ||
| 536 | }; | ||
| 537 | } | ||
| 538 | |||
| 539 | static struct ocrdma_driver ocrdma_drv = { | ||
| 540 | .name = "ocrdma_driver", | ||
| 541 | .add = ocrdma_add, | ||
| 542 | .remove = ocrdma_remove, | ||
| 543 | .state_change_handler = ocrdma_event_handler, | ||
| 544 | }; | ||
| 545 | |||
| 546 | static void ocrdma_unregister_inet6addr_notifier(void) | ||
| 547 | { | ||
| 548 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
| 549 | unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); | ||
| 550 | #endif | ||
| 551 | } | ||
| 552 | |||
| 553 | static int __init ocrdma_init_module(void) | ||
| 554 | { | ||
| 555 | int status; | ||
| 556 | |||
| 557 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
| 558 | status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); | ||
| 559 | if (status) | ||
| 560 | return status; | ||
| 561 | #endif | ||
| 562 | |||
| 563 | status = be_roce_register_driver(&ocrdma_drv); | ||
| 564 | if (status) | ||
| 565 | ocrdma_unregister_inet6addr_notifier(); | ||
| 566 | |||
| 567 | return status; | ||
| 568 | } | ||
| 569 | |||
| 570 | static void __exit ocrdma_exit_module(void) | ||
| 571 | { | ||
| 572 | be_roce_unregister_driver(&ocrdma_drv); | ||
| 573 | ocrdma_unregister_inet6addr_notifier(); | ||
| 574 | } | ||
| 575 | |||
| 576 | module_init(ocrdma_init_module); | ||
| 577 | module_exit(ocrdma_exit_module); | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h new file mode 100644 index 000000000000..7fd80cc0f037 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h | |||
| @@ -0,0 +1,1672 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __OCRDMA_SLI_H__ | ||
| 29 | #define __OCRDMA_SLI_H__ | ||
| 30 | |||
| 31 | #define Bit(_b) (1 << (_b)) | ||
| 32 | |||
| 33 | #define OCRDMA_GEN1_FAMILY 0xB | ||
| 34 | #define OCRDMA_GEN2_FAMILY 0x2 | ||
| 35 | |||
| 36 | #define OCRDMA_SUBSYS_ROCE 10 | ||
| 37 | enum { | ||
| 38 | OCRDMA_CMD_QUERY_CONFIG = 1, | ||
| 39 | OCRDMA_CMD_ALLOC_PD, | ||
| 40 | OCRDMA_CMD_DEALLOC_PD, | ||
| 41 | |||
| 42 | OCRDMA_CMD_CREATE_AH_TBL, | ||
| 43 | OCRDMA_CMD_DELETE_AH_TBL, | ||
| 44 | |||
| 45 | OCRDMA_CMD_CREATE_QP, | ||
| 46 | OCRDMA_CMD_QUERY_QP, | ||
| 47 | OCRDMA_CMD_MODIFY_QP, | ||
| 48 | OCRDMA_CMD_DELETE_QP, | ||
| 49 | |||
| 50 | OCRDMA_CMD_RSVD1, | ||
| 51 | OCRDMA_CMD_ALLOC_LKEY, | ||
| 52 | OCRDMA_CMD_DEALLOC_LKEY, | ||
| 53 | OCRDMA_CMD_REGISTER_NSMR, | ||
| 54 | OCRDMA_CMD_REREGISTER_NSMR, | ||
| 55 | OCRDMA_CMD_REGISTER_NSMR_CONT, | ||
| 56 | OCRDMA_CMD_QUERY_NSMR, | ||
| 57 | OCRDMA_CMD_ALLOC_MW, | ||
| 58 | OCRDMA_CMD_QUERY_MW, | ||
| 59 | |||
| 60 | OCRDMA_CMD_CREATE_SRQ, | ||
| 61 | OCRDMA_CMD_QUERY_SRQ, | ||
| 62 | OCRDMA_CMD_MODIFY_SRQ, | ||
| 63 | OCRDMA_CMD_DELETE_SRQ, | ||
| 64 | |||
| 65 | OCRDMA_CMD_ATTACH_MCAST, | ||
| 66 | OCRDMA_CMD_DETACH_MCAST, | ||
| 67 | |||
| 68 | OCRDMA_CMD_MAX | ||
| 69 | }; | ||
| 70 | |||
| 71 | #define OCRDMA_SUBSYS_COMMON 1 | ||
| 72 | enum { | ||
| 73 | OCRDMA_CMD_CREATE_CQ = 12, | ||
| 74 | OCRDMA_CMD_CREATE_EQ = 13, | ||
| 75 | OCRDMA_CMD_CREATE_MQ = 21, | ||
| 76 | OCRDMA_CMD_GET_FW_VER = 35, | ||
| 77 | OCRDMA_CMD_DELETE_MQ = 53, | ||
| 78 | OCRDMA_CMD_DELETE_CQ = 54, | ||
| 79 | OCRDMA_CMD_DELETE_EQ = 55, | ||
| 80 | OCRDMA_CMD_GET_FW_CONFIG = 58, | ||
| 81 | OCRDMA_CMD_CREATE_MQ_EXT = 90 | ||
| 82 | }; | ||
| 83 | |||
| 84 | enum { | ||
| 85 | QTYPE_EQ = 1, | ||
| 86 | QTYPE_CQ = 2, | ||
| 87 | QTYPE_MCCQ = 3 | ||
| 88 | }; | ||
| 89 | |||
| 90 | #define OCRDMA_MAX_SGID (8) | ||
| 91 | |||
| 92 | #define OCRDMA_MAX_QP 2048 | ||
| 93 | #define OCRDMA_MAX_CQ 2048 | ||
| 94 | |||
| 95 | enum { | ||
| 96 | OCRDMA_DB_RQ_OFFSET = 0xE0, | ||
| 97 | OCRDMA_DB_GEN2_RQ1_OFFSET = 0x100, | ||
| 98 | OCRDMA_DB_GEN2_RQ2_OFFSET = 0xC0, | ||
| 99 | OCRDMA_DB_SQ_OFFSET = 0x60, | ||
| 100 | OCRDMA_DB_GEN2_SQ_OFFSET = 0x1C0, | ||
| 101 | OCRDMA_DB_SRQ_OFFSET = OCRDMA_DB_RQ_OFFSET, | ||
| 102 | OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ1_OFFSET, | ||
| 103 | OCRDMA_DB_CQ_OFFSET = 0x120, | ||
| 104 | OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET, | ||
| 105 | OCRDMA_DB_MQ_OFFSET = 0x140 | ||
| 106 | }; | ||
| 107 | |||
| 108 | #define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ | ||
| 109 | #define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */ | ||
| 110 | /* qid #2 msbits at 12-11 */ | ||
| 111 | #define OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT 0x1 | ||
| 112 | #define OCRDMA_DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ | ||
| 113 | /* Rearm bit */ | ||
| 114 | #define OCRDMA_DB_CQ_REARM_SHIFT (29) /* bit 29 */ | ||
| 115 | /* solicited bit */ | ||
| 116 | #define OCRDMA_DB_CQ_SOLICIT_SHIFT (31) /* bit 31 */ | ||
| 117 | |||
| 118 | #define OCRDMA_EQ_ID_MASK 0x1FF /* bits 0 - 8 */ | ||
| 119 | #define OCRDMA_EQ_ID_EXT_MASK 0x3e00 /* bits 9-13 */ | ||
| 120 | #define OCRDMA_EQ_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 at 11-15 */ | ||
| 121 | |||
| 122 | /* Clear the interrupt for this eq */ | ||
| 123 | #define OCRDMA_EQ_CLR_SHIFT (9) /* bit 9 */ | ||
| 124 | /* Must be 1 */ | ||
| 125 | #define OCRDMA_EQ_TYPE_SHIFT (10) /* bit 10 */ | ||
| 126 | /* Number of event entries processed */ | ||
| 127 | #define OCRDMA_NUM_EQE_SHIFT (16) /* bits 16 - 28 */ | ||
| 128 | /* Rearm bit */ | ||
| 129 | #define OCRDMA_REARM_SHIFT (29) /* bit 29 */ | ||
| 130 | |||
| 131 | #define OCRDMA_MQ_ID_MASK 0x7FF /* bits 0 - 10 */ | ||
| 132 | /* Number of entries posted */ | ||
| 133 | #define OCRDMA_MQ_NUM_MQE_SHIFT (16) /* bits 16 - 29 */ | ||
| 134 | |||
| 135 | #define OCRDMA_MIN_HPAGE_SIZE (4096) | ||
| 136 | |||
| 137 | #define OCRDMA_MIN_Q_PAGE_SIZE (4096) | ||
| 138 | #define OCRDMA_MAX_Q_PAGES (8) | ||
| 139 | |||
| 140 | /* | ||
| 141 | # 0: 4K Bytes | ||
| 142 | # 1: 8K Bytes | ||
| 143 | # 2: 16K Bytes | ||
| 144 | # 3: 32K Bytes | ||
| 145 | # 4: 64K Bytes | ||
| 146 | */ | ||
| 147 | #define OCRDMA_MAX_Q_PAGE_SIZE_CNT (5) | ||
| 148 | #define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES) | ||
| 149 | |||
| 150 | #define MAX_OCRDMA_QP_PAGES (8) | ||
| 151 | #define OCRDMA_MAX_WQE_MEM_SIZE (MAX_OCRDMA_QP_PAGES * OCRDMA_MIN_HQ_PAGE_SIZE) | ||
| 152 | |||
| 153 | #define OCRDMA_CREATE_CQ_MAX_PAGES (4) | ||
| 154 | #define OCRDMA_DPP_CQE_SIZE (4) | ||
| 155 | |||
| 156 | #define OCRDMA_GEN2_MAX_CQE 1024 | ||
| 157 | #define OCRDMA_GEN2_CQ_PAGE_SIZE 4096 | ||
| 158 | #define OCRDMA_GEN2_WQE_SIZE 256 | ||
| 159 | #define OCRDMA_MAX_CQE 4095 | ||
| 160 | #define OCRDMA_CQ_PAGE_SIZE 16384 | ||
| 161 | #define OCRDMA_WQE_SIZE 128 | ||
| 162 | #define OCRDMA_WQE_STRIDE 8 | ||
| 163 | #define OCRDMA_WQE_ALIGN_BYTES 16 | ||
| 164 | |||
| 165 | #define MAX_OCRDMA_SRQ_PAGES MAX_OCRDMA_QP_PAGES | ||
| 166 | |||
| 167 | enum { | ||
| 168 | OCRDMA_MCH_OPCODE_SHIFT = 0, | ||
| 169 | OCRDMA_MCH_OPCODE_MASK = 0xFF, | ||
| 170 | OCRDMA_MCH_SUBSYS_SHIFT = 8, | ||
| 171 | OCRDMA_MCH_SUBSYS_MASK = 0xFF00 | ||
| 172 | }; | ||
| 173 | |||
| 174 | /* mailbox cmd header */ | ||
| 175 | struct ocrdma_mbx_hdr { | ||
| 176 | u32 subsys_op; | ||
| 177 | u32 timeout; /* in seconds */ | ||
| 178 | u32 cmd_len; | ||
| 179 | u32 rsvd_version; | ||
| 180 | } __packed; | ||
| 181 | |||
| 182 | enum { | ||
| 183 | OCRDMA_MBX_RSP_OPCODE_SHIFT = 0, | ||
| 184 | OCRDMA_MBX_RSP_OPCODE_MASK = 0xFF, | ||
| 185 | OCRDMA_MBX_RSP_SUBSYS_SHIFT = 8, | ||
| 186 | OCRDMA_MBX_RSP_SUBSYS_MASK = 0xFF << OCRDMA_MBX_RSP_SUBSYS_SHIFT, | ||
| 187 | |||
| 188 | OCRDMA_MBX_RSP_STATUS_SHIFT = 0, | ||
| 189 | OCRDMA_MBX_RSP_STATUS_MASK = 0xFF, | ||
| 190 | OCRDMA_MBX_RSP_ASTATUS_SHIFT = 8, | ||
| 191 | OCRDMA_MBX_RSP_ASTATUS_MASK = 0xFF << OCRDMA_MBX_RSP_ASTATUS_SHIFT | ||
| 192 | }; | ||
| 193 | |||
| 194 | /* mailbox cmd response */ | ||
| 195 | struct ocrdma_mbx_rsp { | ||
| 196 | u32 subsys_op; | ||
| 197 | u32 status; | ||
| 198 | u32 rsp_len; | ||
| 199 | u32 add_rsp_len; | ||
| 200 | } __packed; | ||
| 201 | |||
| 202 | enum { | ||
| 203 | OCRDMA_MQE_EMBEDDED = 1, | ||
| 204 | OCRDMA_MQE_NONEMBEDDED = 0 | ||
| 205 | }; | ||
| 206 | |||
| 207 | struct ocrdma_mqe_sge { | ||
| 208 | u32 pa_lo; | ||
| 209 | u32 pa_hi; | ||
| 210 | u32 len; | ||
| 211 | } __packed; | ||
| 212 | |||
| 213 | enum { | ||
| 214 | OCRDMA_MQE_HDR_EMB_SHIFT = 0, | ||
| 215 | OCRDMA_MQE_HDR_EMB_MASK = Bit(0), | ||
| 216 | OCRDMA_MQE_HDR_SGE_CNT_SHIFT = 3, | ||
| 217 | OCRDMA_MQE_HDR_SGE_CNT_MASK = 0x1F << OCRDMA_MQE_HDR_SGE_CNT_SHIFT, | ||
| 218 | OCRDMA_MQE_HDR_SPECIAL_SHIFT = 24, | ||
| 219 | OCRDMA_MQE_HDR_SPECIAL_MASK = 0xFF << OCRDMA_MQE_HDR_SPECIAL_SHIFT | ||
| 220 | }; | ||
| 221 | |||
| 222 | struct ocrdma_mqe_hdr { | ||
| 223 | u32 spcl_sge_cnt_emb; | ||
| 224 | u32 pyld_len; | ||
| 225 | u32 tag_lo; | ||
| 226 | u32 tag_hi; | ||
| 227 | u32 rsvd3; | ||
| 228 | } __packed; | ||
| 229 | |||
| 230 | struct ocrdma_mqe_emb_cmd { | ||
| 231 | struct ocrdma_mbx_hdr mch; | ||
| 232 | u8 pyld[220]; | ||
| 233 | } __packed; | ||
| 234 | |||
| 235 | struct ocrdma_mqe { | ||
| 236 | struct ocrdma_mqe_hdr hdr; | ||
| 237 | union { | ||
| 238 | struct ocrdma_mqe_emb_cmd emb_req; | ||
| 239 | struct { | ||
| 240 | struct ocrdma_mqe_sge sge[19]; | ||
| 241 | } nonemb_req; | ||
| 242 | u8 cmd[236]; | ||
| 243 | struct ocrdma_mbx_rsp rsp; | ||
| 244 | } u; | ||
| 245 | } __packed; | ||
| 246 | |||
| 247 | #define OCRDMA_EQ_LEN 4096 | ||
| 248 | #define OCRDMA_MQ_CQ_LEN 256 | ||
| 249 | #define OCRDMA_MQ_LEN 128 | ||
| 250 | |||
| 251 | #define PAGE_SHIFT_4K 12 | ||
| 252 | #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) | ||
| 253 | |||
| 254 | /* Returns number of pages spanned by the data starting at the given addr */ | ||
| 255 | #define PAGES_4K_SPANNED(_address, size) \ | ||
| 256 | ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ | ||
| 257 | (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) | ||
| 258 | |||
| 259 | struct ocrdma_delete_q_req { | ||
| 260 | struct ocrdma_mbx_hdr req; | ||
| 261 | u32 id; | ||
| 262 | } __packed; | ||
| 263 | |||
| 264 | struct ocrdma_pa { | ||
| 265 | u32 lo; | ||
| 266 | u32 hi; | ||
| 267 | } __packed; | ||
| 268 | |||
| 269 | #define MAX_OCRDMA_EQ_PAGES (8) | ||
| 270 | struct ocrdma_create_eq_req { | ||
| 271 | struct ocrdma_mbx_hdr req; | ||
| 272 | u32 num_pages; | ||
| 273 | u32 valid; | ||
| 274 | u32 cnt; | ||
| 275 | u32 delay; | ||
| 276 | u32 rsvd; | ||
| 277 | struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES]; | ||
| 278 | } __packed; | ||
| 279 | |||
| 280 | enum { | ||
| 281 | OCRDMA_CREATE_EQ_VALID = Bit(29), | ||
| 282 | OCRDMA_CREATE_EQ_CNT_SHIFT = 26, | ||
| 283 | OCRDMA_CREATE_CQ_DELAY_SHIFT = 13, | ||
| 284 | }; | ||
| 285 | |||
| 286 | struct ocrdma_create_eq_rsp { | ||
| 287 | struct ocrdma_mbx_rsp rsp; | ||
| 288 | u32 vector_eqid; | ||
| 289 | }; | ||
| 290 | |||
| 291 | #define OCRDMA_EQ_MINOR_OTHER (0x1) | ||
| 292 | |||
| 293 | enum { | ||
| 294 | OCRDMA_MCQE_STATUS_SHIFT = 0, | ||
| 295 | OCRDMA_MCQE_STATUS_MASK = 0xFFFF, | ||
| 296 | OCRDMA_MCQE_ESTATUS_SHIFT = 16, | ||
| 297 | OCRDMA_MCQE_ESTATUS_MASK = 0xFFFF << OCRDMA_MCQE_ESTATUS_SHIFT, | ||
| 298 | OCRDMA_MCQE_CONS_SHIFT = 27, | ||
| 299 | OCRDMA_MCQE_CONS_MASK = Bit(27), | ||
| 300 | OCRDMA_MCQE_CMPL_SHIFT = 28, | ||
| 301 | OCRDMA_MCQE_CMPL_MASK = Bit(28), | ||
| 302 | OCRDMA_MCQE_AE_SHIFT = 30, | ||
| 303 | OCRDMA_MCQE_AE_MASK = Bit(30), | ||
| 304 | OCRDMA_MCQE_VALID_SHIFT = 31, | ||
| 305 | OCRDMA_MCQE_VALID_MASK = Bit(31) | ||
| 306 | }; | ||
| 307 | |||
| 308 | struct ocrdma_mcqe { | ||
| 309 | u32 status; | ||
| 310 | u32 tag_lo; | ||
| 311 | u32 tag_hi; | ||
| 312 | u32 valid_ae_cmpl_cons; | ||
| 313 | } __packed; | ||
| 314 | |||
| 315 | enum { | ||
| 316 | OCRDMA_AE_MCQE_QPVALID = Bit(31), | ||
| 317 | OCRDMA_AE_MCQE_QPID_MASK = 0xFFFF, | ||
| 318 | |||
| 319 | OCRDMA_AE_MCQE_CQVALID = Bit(31), | ||
| 320 | OCRDMA_AE_MCQE_CQID_MASK = 0xFFFF, | ||
| 321 | OCRDMA_AE_MCQE_VALID = Bit(31), | ||
| 322 | OCRDMA_AE_MCQE_AE = Bit(30), | ||
| 323 | OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT = 16, | ||
| 324 | OCRDMA_AE_MCQE_EVENT_TYPE_MASK = | ||
| 325 | 0xFF << OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT, | ||
| 326 | OCRDMA_AE_MCQE_EVENT_CODE_SHIFT = 8, | ||
| 327 | OCRDMA_AE_MCQE_EVENT_CODE_MASK = | ||
| 328 | 0xFF << OCRDMA_AE_MCQE_EVENT_CODE_SHIFT | ||
| 329 | }; | ||
| 330 | struct ocrdma_ae_mcqe { | ||
| 331 | u32 qpvalid_qpid; | ||
| 332 | u32 cqvalid_cqid; | ||
| 333 | u32 evt_tag; | ||
| 334 | u32 valid_ae_event; | ||
| 335 | } __packed; | ||
| 336 | |||
| 337 | enum { | ||
| 338 | OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16, | ||
| 339 | OCRDMA_AE_MPA_MCQE_REQ_ID_MASK = 0xFFFF << | ||
| 340 | OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT, | ||
| 341 | |||
| 342 | OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT = 8, | ||
| 343 | OCRDMA_AE_MPA_MCQE_EVENT_CODE_MASK = 0xFF << | ||
| 344 | OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT, | ||
| 345 | OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT = 16, | ||
| 346 | OCRDMA_AE_MPA_MCQE_EVENT_TYPE_MASK = 0xFF << | ||
| 347 | OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT, | ||
| 348 | OCRDMA_AE_MPA_MCQE_EVENT_AE_SHIFT = 30, | ||
| 349 | OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = Bit(30), | ||
| 350 | OCRDMA_AE_MPA_MCQE_EVENT_VALID_SHIFT = 31, | ||
| 351 | OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = Bit(31) | ||
| 352 | }; | ||
| 353 | |||
| 354 | struct ocrdma_ae_mpa_mcqe { | ||
| 355 | u32 req_id; | ||
| 356 | u32 w1; | ||
| 357 | u32 w2; | ||
| 358 | u32 valid_ae_event; | ||
| 359 | } __packed; | ||
| 360 | |||
| 361 | enum { | ||
| 362 | OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT = 0, | ||
| 363 | OCRDMA_AE_QP_MCQE_NEW_QP_STATE_MASK = 0xFFFF, | ||
| 364 | OCRDMA_AE_QP_MCQE_QP_ID_SHIFT = 16, | ||
| 365 | OCRDMA_AE_QP_MCQE_QP_ID_MASK = 0xFFFF << | ||
| 366 | OCRDMA_AE_QP_MCQE_QP_ID_SHIFT, | ||
| 367 | |||
| 368 | OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT = 8, | ||
| 369 | OCRDMA_AE_QP_MCQE_EVENT_CODE_MASK = 0xFF << | ||
| 370 | OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT, | ||
| 371 | OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT = 16, | ||
| 372 | OCRDMA_AE_QP_MCQE_EVENT_TYPE_MASK = 0xFF << | ||
| 373 | OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT, | ||
| 374 | OCRDMA_AE_QP_MCQE_EVENT_AE_SHIFT = 30, | ||
| 375 | OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = Bit(30), | ||
| 376 | OCRDMA_AE_QP_MCQE_EVENT_VALID_SHIFT = 31, | ||
| 377 | OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = Bit(31) | ||
| 378 | }; | ||
| 379 | |||
| 380 | struct ocrdma_ae_qp_mcqe { | ||
| 381 | u32 qp_id_state; | ||
| 382 | u32 w1; | ||
| 383 | u32 w2; | ||
| 384 | u32 valid_ae_event; | ||
| 385 | } __packed; | ||
| 386 | |||
| 387 | #define OCRDMA_ASYNC_EVE_CODE 0x14 | ||
| 388 | |||
| 389 | enum OCRDMA_ASYNC_EVENT_TYPE { | ||
| 390 | OCRDMA_CQ_ERROR = 0x00, | ||
| 391 | OCRDMA_CQ_OVERRUN_ERROR = 0x01, | ||
| 392 | OCRDMA_CQ_QPCAT_ERROR = 0x02, | ||
| 393 | OCRDMA_QP_ACCESS_ERROR = 0x03, | ||
| 394 | OCRDMA_QP_COMM_EST_EVENT = 0x04, | ||
| 395 | OCRDMA_SQ_DRAINED_EVENT = 0x05, | ||
| 396 | OCRDMA_DEVICE_FATAL_EVENT = 0x08, | ||
| 397 | OCRDMA_SRQCAT_ERROR = 0x0E, | ||
| 398 | OCRDMA_SRQ_LIMIT_EVENT = 0x0F, | ||
| 399 | OCRDMA_QP_LAST_WQE_EVENT = 0x10 | ||
| 400 | }; | ||
| 401 | |||
| 402 | /* mailbox command request and responses */ | ||
| 403 | enum { | ||
| 404 | OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2, | ||
| 405 | OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = Bit(2), | ||
| 406 | OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT = 3, | ||
| 407 | OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = Bit(3), | ||
| 408 | OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT = 8, | ||
| 409 | OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK = 0xFFFFFF << | ||
| 410 | OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT, | ||
| 411 | |||
| 412 | OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT = 16, | ||
| 413 | OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK = 0xFFFF << | ||
| 414 | OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT, | ||
| 415 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8, | ||
| 416 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF << | ||
| 417 | OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT, | ||
| 418 | |||
| 419 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, | ||
| 420 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, | ||
| 421 | |||
| 422 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, | ||
| 423 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, | ||
| 424 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT = 16, | ||
| 425 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK = 0xFFFF << | ||
| 426 | OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT, | ||
| 427 | |||
| 428 | OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET = 24, | ||
| 429 | OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK = 0xFF << | ||
| 430 | OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET, | ||
| 431 | OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET = 16, | ||
| 432 | OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK = 0xFF << | ||
| 433 | OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET, | ||
| 434 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET = 0, | ||
| 435 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_MASK = 0xFFFF << | ||
| 436 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET, | ||
| 437 | |||
| 438 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET = 16, | ||
| 439 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK = 0xFFFF << | ||
| 440 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET, | ||
| 441 | OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET = 0, | ||
| 442 | OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_MASK = 0xFFFF << | ||
| 443 | OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET, | ||
| 444 | |||
| 445 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET = 16, | ||
| 446 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK = 0xFFFF << | ||
| 447 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET, | ||
| 448 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET = 0, | ||
| 449 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_MASK = 0xFFFF << | ||
| 450 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET, | ||
| 451 | |||
| 452 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET = 0, | ||
| 453 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_MASK = 0xFFFF << | ||
| 454 | OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET, | ||
| 455 | |||
| 456 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET = 16, | ||
| 457 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_MASK = 0xFFFF << | ||
| 458 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, | ||
| 459 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, | ||
| 460 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << | ||
| 461 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, | ||
| 462 | |||
| 463 | OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, | ||
| 464 | OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << | ||
| 465 | OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET, | ||
| 466 | OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET = 0, | ||
| 467 | OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK = 0xFFFF << | ||
| 468 | OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET, | ||
| 469 | |||
| 470 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET = 16, | ||
| 471 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_MASK = 0xFFFF << | ||
| 472 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET, | ||
| 473 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, | ||
| 474 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << | ||
| 475 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, | ||
| 476 | }; | ||
| 477 | |||
| 478 | struct ocrdma_mbx_query_config { | ||
| 479 | struct ocrdma_mqe_hdr hdr; | ||
| 480 | struct ocrdma_mbx_rsp rsp; | ||
| 481 | u32 qp_srq_cq_ird_ord; | ||
| 482 | u32 max_pd_ca_ack_delay; | ||
| 483 | u32 max_write_send_sge; | ||
| 484 | u32 max_ird_ord_per_qp; | ||
| 485 | u32 max_shared_ird_ord; | ||
| 486 | u32 max_mr; | ||
| 487 | u64 max_mr_size; | ||
| 488 | u32 max_num_mr_pbl; | ||
| 489 | u32 max_mw; | ||
| 490 | u32 max_fmr; | ||
| 491 | u32 max_pages_per_frmr; | ||
| 492 | u32 max_mcast_group; | ||
| 493 | u32 max_mcast_qp_attach; | ||
| 494 | u32 max_total_mcast_qp_attach; | ||
| 495 | u32 wqe_rqe_stride_max_dpp_cqs; | ||
| 496 | u32 max_srq_rpir_qps; | ||
| 497 | u32 max_dpp_pds_credits; | ||
| 498 | u32 max_dpp_credits_pds_per_pd; | ||
| 499 | u32 max_wqes_rqes_per_q; | ||
| 500 | u32 max_cq_cqes_per_cq; | ||
| 501 | u32 max_srq_rqe_sge; | ||
| 502 | } __packed; | ||
| 503 | |||
| 504 | struct ocrdma_fw_ver_rsp { | ||
| 505 | struct ocrdma_mqe_hdr hdr; | ||
| 506 | struct ocrdma_mbx_rsp rsp; | ||
| 507 | |||
| 508 | u8 running_ver[32]; | ||
| 509 | } __packed; | ||
| 510 | |||
| 511 | struct ocrdma_fw_conf_rsp { | ||
| 512 | struct ocrdma_mqe_hdr hdr; | ||
| 513 | struct ocrdma_mbx_rsp rsp; | ||
| 514 | |||
| 515 | u32 config_num; | ||
| 516 | u32 asic_revision; | ||
| 517 | u32 phy_port; | ||
| 518 | u32 fn_mode; | ||
| 519 | struct { | ||
| 520 | u32 mode; | ||
| 521 | u32 nic_wqid_base; | ||
| 522 | u32 nic_wq_tot; | ||
| 523 | u32 prot_wqid_base; | ||
| 524 | u32 prot_wq_tot; | ||
| 525 | u32 prot_rqid_base; | ||
| 526 | u32 prot_rqid_tot; | ||
| 527 | u32 rsvd[6]; | ||
| 528 | } ulp[2]; | ||
| 529 | u32 fn_capabilities; | ||
| 530 | u32 rsvd1; | ||
| 531 | u32 rsvd2; | ||
| 532 | u32 base_eqid; | ||
| 533 | u32 max_eq; | ||
| 534 | |||
| 535 | } __packed; | ||
| 536 | |||
| 537 | enum { | ||
| 538 | OCRDMA_FN_MODE_RDMA = 0x4 | ||
| 539 | }; | ||
| 540 | |||
| 541 | enum { | ||
| 542 | OCRDMA_CREATE_CQ_VER2 = 2, | ||
| 543 | |||
| 544 | OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF, | ||
| 545 | OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16, | ||
| 546 | OCRDMA_CREATE_CQ_PAGE_SIZE_MASK = 0xFF, | ||
| 547 | |||
| 548 | OCRDMA_CREATE_CQ_COALESCWM_SHIFT = 12, | ||
| 549 | OCRDMA_CREATE_CQ_COALESCWM_MASK = Bit(13) | Bit(12), | ||
| 550 | OCRDMA_CREATE_CQ_FLAGS_NODELAY = Bit(14), | ||
| 551 | OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = Bit(15), | ||
| 552 | |||
| 553 | OCRDMA_CREATE_CQ_EQ_ID_MASK = 0xFFFF, | ||
| 554 | OCRDMA_CREATE_CQ_CQE_COUNT_MASK = 0xFFFF | ||
| 555 | }; | ||
| 556 | |||
| 557 | enum { | ||
| 558 | OCRDMA_CREATE_CQ_VER0 = 0, | ||
| 559 | OCRDMA_CREATE_CQ_DPP = 1, | ||
| 560 | OCRDMA_CREATE_CQ_TYPE_SHIFT = 24, | ||
| 561 | OCRDMA_CREATE_CQ_EQID_SHIFT = 22, | ||
| 562 | |||
| 563 | OCRDMA_CREATE_CQ_CNT_SHIFT = 27, | ||
| 564 | OCRDMA_CREATE_CQ_FLAGS_VALID = Bit(29), | ||
| 565 | OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = Bit(31), | ||
| 566 | OCRDMA_CREATE_CQ_DEF_FLAGS = OCRDMA_CREATE_CQ_FLAGS_VALID | | ||
| 567 | OCRDMA_CREATE_CQ_FLAGS_EVENTABLE | | ||
| 568 | OCRDMA_CREATE_CQ_FLAGS_NODELAY | ||
| 569 | }; | ||
| 570 | |||
| 571 | struct ocrdma_create_cq_cmd { | ||
| 572 | struct ocrdma_mbx_hdr req; | ||
| 573 | u32 pgsz_pgcnt; | ||
| 574 | u32 ev_cnt_flags; | ||
| 575 | u32 eqn; | ||
| 576 | u32 cqe_count; | ||
| 577 | u32 rsvd6; | ||
| 578 | struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES]; | ||
| 579 | }; | ||
| 580 | |||
| 581 | struct ocrdma_create_cq { | ||
| 582 | struct ocrdma_mqe_hdr hdr; | ||
| 583 | struct ocrdma_create_cq_cmd cmd; | ||
| 584 | } __packed; | ||
| 585 | |||
| 586 | enum { | ||
| 587 | OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF | ||
| 588 | }; | ||
| 589 | |||
| 590 | struct ocrdma_create_cq_cmd_rsp { | ||
| 591 | struct ocrdma_mbx_rsp rsp; | ||
| 592 | u32 cq_id; | ||
| 593 | } __packed; | ||
| 594 | |||
| 595 | struct ocrdma_create_cq_rsp { | ||
| 596 | struct ocrdma_mqe_hdr hdr; | ||
| 597 | struct ocrdma_create_cq_cmd_rsp rsp; | ||
| 598 | } __packed; | ||
| 599 | |||
| 600 | enum { | ||
| 601 | OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22, | ||
| 602 | OCRDMA_CREATE_MQ_CQ_ID_SHIFT = 16, | ||
| 603 | OCRDMA_CREATE_MQ_RING_SIZE_SHIFT = 16, | ||
| 604 | OCRDMA_CREATE_MQ_VALID = Bit(31), | ||
| 605 | OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = Bit(0) | ||
| 606 | }; | ||
| 607 | |||
| 608 | struct ocrdma_create_mq_v0 { | ||
| 609 | u32 pages; | ||
| 610 | u32 cqid_ringsize; | ||
| 611 | u32 valid; | ||
| 612 | u32 async_cqid_valid; | ||
| 613 | u32 rsvd; | ||
| 614 | struct ocrdma_pa pa[8]; | ||
| 615 | } __packed; | ||
| 616 | |||
| 617 | struct ocrdma_create_mq_v1 { | ||
| 618 | u32 cqid_pages; | ||
| 619 | u32 async_event_bitmap; | ||
| 620 | u32 async_cqid_ringsize; | ||
| 621 | u32 valid; | ||
| 622 | u32 async_cqid_valid; | ||
| 623 | u32 rsvd; | ||
| 624 | struct ocrdma_pa pa[8]; | ||
| 625 | } __packed; | ||
| 626 | |||
| 627 | struct ocrdma_create_mq_req { | ||
| 628 | struct ocrdma_mbx_hdr req; | ||
| 629 | union { | ||
| 630 | struct ocrdma_create_mq_v0 v0; | ||
| 631 | struct ocrdma_create_mq_v1 v1; | ||
| 632 | }; | ||
| 633 | } __packed; | ||
| 634 | |||
| 635 | struct ocrdma_create_mq_rsp { | ||
| 636 | struct ocrdma_mbx_rsp rsp; | ||
| 637 | u32 id; | ||
| 638 | } __packed; | ||
| 639 | |||
| 640 | enum { | ||
| 641 | OCRDMA_DESTROY_CQ_QID_SHIFT = 0, | ||
| 642 | OCRDMA_DESTROY_CQ_QID_MASK = 0xFFFF, | ||
| 643 | OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT = 16, | ||
| 644 | OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_MASK = 0xFFFF << | ||
| 645 | OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT | ||
| 646 | }; | ||
| 647 | |||
| 648 | struct ocrdma_destroy_cq { | ||
| 649 | struct ocrdma_mqe_hdr hdr; | ||
| 650 | struct ocrdma_mbx_hdr req; | ||
| 651 | |||
| 652 | u32 bypass_flush_qid; | ||
| 653 | } __packed; | ||
| 654 | |||
| 655 | struct ocrdma_destroy_cq_rsp { | ||
| 656 | struct ocrdma_mqe_hdr hdr; | ||
| 657 | struct ocrdma_mbx_rsp rsp; | ||
| 658 | } __packed; | ||
| 659 | |||
| 660 | enum { | ||
| 661 | OCRDMA_QPT_GSI = 1, | ||
| 662 | OCRDMA_QPT_RC = 2, | ||
| 663 | OCRDMA_QPT_UD = 4, | ||
| 664 | }; | ||
| 665 | |||
| 666 | enum { | ||
| 667 | OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT = 0, | ||
| 668 | OCRDMA_CREATE_QP_REQ_PD_ID_MASK = 0xFFFF, | ||
| 669 | OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT = 16, | ||
| 670 | OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT = 19, | ||
| 671 | OCRDMA_CREATE_QP_REQ_QPT_SHIFT = 29, | ||
| 672 | OCRDMA_CREATE_QP_REQ_QPT_MASK = Bit(31) | Bit(30) | Bit(29), | ||
| 673 | |||
| 674 | OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT = 0, | ||
| 675 | OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK = 0xFFFF, | ||
| 676 | OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT = 16, | ||
| 677 | OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK = 0xFFFF << | ||
| 678 | OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT, | ||
| 679 | |||
| 680 | OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT = 0, | ||
| 681 | OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK = 0xFFFF, | ||
| 682 | OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT = 16, | ||
| 683 | OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK = 0xFFFF << | ||
| 684 | OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT, | ||
| 685 | |||
| 686 | OCRDMA_CREATE_QP_REQ_FMR_EN_SHIFT = 0, | ||
| 687 | OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = Bit(0), | ||
| 688 | OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_SHIFT = 1, | ||
| 689 | OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = Bit(1), | ||
| 690 | OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_SHIFT = 2, | ||
| 691 | OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = Bit(2), | ||
| 692 | OCRDMA_CREATE_QP_REQ_INB_WREN_SHIFT = 3, | ||
| 693 | OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = Bit(3), | ||
| 694 | OCRDMA_CREATE_QP_REQ_INB_RDEN_SHIFT = 4, | ||
| 695 | OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = Bit(4), | ||
| 696 | OCRDMA_CREATE_QP_REQ_USE_SRQ_SHIFT = 5, | ||
| 697 | OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = Bit(5), | ||
| 698 | OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_SHIFT = 6, | ||
| 699 | OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = Bit(6), | ||
| 700 | OCRDMA_CREATE_QP_REQ_ENABLE_DPP_SHIFT = 7, | ||
| 701 | OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = Bit(7), | ||
| 702 | OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_SHIFT = 8, | ||
| 703 | OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = Bit(8), | ||
| 704 | OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT = 16, | ||
| 705 | OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK = 0xFFFF << | ||
| 706 | OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT, | ||
| 707 | |||
| 708 | OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT = 0, | ||
| 709 | OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK = 0xFFFF, | ||
| 710 | OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT = 16, | ||
| 711 | OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK = 0xFFFF << | ||
| 712 | OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT, | ||
| 713 | |||
| 714 | OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT = 0, | ||
| 715 | OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK = 0xFFFF, | ||
| 716 | OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT = 16, | ||
| 717 | OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK = 0xFFFF << | ||
| 718 | OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT, | ||
| 719 | |||
| 720 | OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT = 0, | ||
| 721 | OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK = 0xFFFF, | ||
| 722 | OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT = 16, | ||
| 723 | OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK = 0xFFFF << | ||
| 724 | OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT, | ||
| 725 | |||
| 726 | OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT = 0, | ||
| 727 | OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK = 0xFFFF, | ||
| 728 | OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT = 16, | ||
| 729 | OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK = 0xFFFF << | ||
| 730 | OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT, | ||
| 731 | |||
| 732 | OCRDMA_CREATE_QP_REQ_DPP_CQPID_SHIFT = 0, | ||
| 733 | OCRDMA_CREATE_QP_REQ_DPP_CQPID_MASK = 0xFFFF, | ||
| 734 | OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT = 16, | ||
| 735 | OCRDMA_CREATE_QP_REQ_DPP_CREDIT_MASK = 0xFFFF << | ||
| 736 | OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT | ||
| 737 | }; | ||
| 738 | |||
| 739 | enum { | ||
| 740 | OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT = 16, | ||
| 741 | OCRDMA_CREATE_QP_RSP_DPP_PAGE_SHIFT = 1 | ||
| 742 | }; | ||
| 743 | |||
| 744 | #define MAX_OCRDMA_IRD_PAGES 4 | ||
| 745 | |||
| 746 | enum ocrdma_qp_flags { | ||
| 747 | OCRDMA_QP_MW_BIND = 1, | ||
| 748 | OCRDMA_QP_LKEY0 = (1 << 1), | ||
| 749 | OCRDMA_QP_FAST_REG = (1 << 2), | ||
| 750 | OCRDMA_QP_INB_RD = (1 << 6), | ||
| 751 | OCRDMA_QP_INB_WR = (1 << 7), | ||
| 752 | }; | ||
| 753 | |||
| 754 | enum ocrdma_qp_state { | ||
| 755 | OCRDMA_QPS_RST = 0, | ||
| 756 | OCRDMA_QPS_INIT = 1, | ||
| 757 | OCRDMA_QPS_RTR = 2, | ||
| 758 | OCRDMA_QPS_RTS = 3, | ||
| 759 | OCRDMA_QPS_SQE = 4, | ||
| 760 | OCRDMA_QPS_SQ_DRAINING = 5, | ||
| 761 | OCRDMA_QPS_ERR = 6, | ||
| 762 | OCRDMA_QPS_SQD = 7 | ||
| 763 | }; | ||
| 764 | |||
| 765 | struct ocrdma_create_qp_req { | ||
| 766 | struct ocrdma_mqe_hdr hdr; | ||
| 767 | struct ocrdma_mbx_hdr req; | ||
| 768 | |||
| 769 | u32 type_pgsz_pdn; | ||
| 770 | u32 max_wqe_rqe; | ||
| 771 | u32 max_sge_send_write; | ||
| 772 | u32 max_sge_recv_flags; | ||
| 773 | u32 max_ord_ird; | ||
| 774 | u32 num_wq_rq_pages; | ||
| 775 | u32 wqe_rqe_size; | ||
| 776 | u32 wq_rq_cqid; | ||
| 777 | struct ocrdma_pa wq_addr[MAX_OCRDMA_QP_PAGES]; | ||
| 778 | struct ocrdma_pa rq_addr[MAX_OCRDMA_QP_PAGES]; | ||
| 779 | u32 dpp_credits_cqid; | ||
| 780 | u32 rpir_lkey; | ||
| 781 | struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES]; | ||
| 782 | } __packed; | ||
| 783 | |||
| 784 | enum { | ||
| 785 | OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT = 0, | ||
| 786 | OCRDMA_CREATE_QP_RSP_QP_ID_MASK = 0xFFFF, | ||
| 787 | |||
| 788 | OCRDMA_CREATE_QP_RSP_MAX_RQE_SHIFT = 0, | ||
| 789 | OCRDMA_CREATE_QP_RSP_MAX_RQE_MASK = 0xFFFF, | ||
| 790 | OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT = 16, | ||
| 791 | OCRDMA_CREATE_QP_RSP_MAX_WQE_MASK = 0xFFFF << | ||
| 792 | OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT, | ||
| 793 | |||
| 794 | OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_SHIFT = 0, | ||
| 795 | OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_MASK = 0xFFFF, | ||
| 796 | OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT = 16, | ||
| 797 | OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_MASK = 0xFFFF << | ||
| 798 | OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT, | ||
| 799 | |||
| 800 | OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT = 16, | ||
| 801 | OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_MASK = 0xFFFF << | ||
| 802 | OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT, | ||
| 803 | |||
| 804 | OCRDMA_CREATE_QP_RSP_MAX_IRD_SHIFT = 0, | ||
| 805 | OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK = 0xFFFF, | ||
| 806 | OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT = 16, | ||
| 807 | OCRDMA_CREATE_QP_RSP_MAX_ORD_MASK = 0xFFFF << | ||
| 808 | OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT, | ||
| 809 | |||
| 810 | OCRDMA_CREATE_QP_RSP_RQ_ID_SHIFT = 0, | ||
| 811 | OCRDMA_CREATE_QP_RSP_RQ_ID_MASK = 0xFFFF, | ||
| 812 | OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT = 16, | ||
| 813 | OCRDMA_CREATE_QP_RSP_SQ_ID_MASK = 0xFFFF << | ||
| 814 | OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT, | ||
| 815 | |||
| 816 | OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = Bit(0), | ||
| 817 | OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT = 1, | ||
| 818 | OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK = 0x7FFF << | ||
| 819 | OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT, | ||
| 820 | OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT = 16, | ||
| 821 | OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK = 0xFFFF << | ||
| 822 | OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT, | ||
| 823 | }; | ||
| 824 | |||
| 825 | struct ocrdma_create_qp_rsp { | ||
| 826 | struct ocrdma_mqe_hdr hdr; | ||
| 827 | struct ocrdma_mbx_rsp rsp; | ||
| 828 | |||
| 829 | u32 qp_id; | ||
| 830 | u32 max_wqe_rqe; | ||
| 831 | u32 max_sge_send_write; | ||
| 832 | u32 max_sge_recv; | ||
| 833 | u32 max_ord_ird; | ||
| 834 | u32 sq_rq_id; | ||
| 835 | u32 dpp_response; | ||
| 836 | } __packed; | ||
| 837 | |||
| 838 | struct ocrdma_destroy_qp { | ||
| 839 | struct ocrdma_mqe_hdr hdr; | ||
| 840 | struct ocrdma_mbx_hdr req; | ||
| 841 | u32 qp_id; | ||
| 842 | } __packed; | ||
| 843 | |||
| 844 | struct ocrdma_destroy_qp_rsp { | ||
| 845 | struct ocrdma_mqe_hdr hdr; | ||
| 846 | struct ocrdma_mbx_rsp rsp; | ||
| 847 | } __packed; | ||
| 848 | |||
| 849 | enum { | ||
| 850 | OCRDMA_MODIFY_QP_ID_SHIFT = 0, | ||
| 851 | OCRDMA_MODIFY_QP_ID_MASK = 0xFFFF, | ||
| 852 | |||
| 853 | OCRDMA_QP_PARA_QPS_VALID = Bit(0), | ||
| 854 | OCRDMA_QP_PARA_SQD_ASYNC_VALID = Bit(1), | ||
| 855 | OCRDMA_QP_PARA_PKEY_VALID = Bit(2), | ||
| 856 | OCRDMA_QP_PARA_QKEY_VALID = Bit(3), | ||
| 857 | OCRDMA_QP_PARA_PMTU_VALID = Bit(4), | ||
| 858 | OCRDMA_QP_PARA_ACK_TO_VALID = Bit(5), | ||
| 859 | OCRDMA_QP_PARA_RETRY_CNT_VALID = Bit(6), | ||
| 860 | OCRDMA_QP_PARA_RRC_VALID = Bit(7), | ||
| 861 | OCRDMA_QP_PARA_RQPSN_VALID = Bit(8), | ||
| 862 | OCRDMA_QP_PARA_MAX_IRD_VALID = Bit(9), | ||
| 863 | OCRDMA_QP_PARA_MAX_ORD_VALID = Bit(10), | ||
| 864 | OCRDMA_QP_PARA_RNT_VALID = Bit(11), | ||
| 865 | OCRDMA_QP_PARA_SQPSN_VALID = Bit(12), | ||
| 866 | OCRDMA_QP_PARA_DST_QPN_VALID = Bit(13), | ||
| 867 | OCRDMA_QP_PARA_MAX_WQE_VALID = Bit(14), | ||
| 868 | OCRDMA_QP_PARA_MAX_RQE_VALID = Bit(15), | ||
| 869 | OCRDMA_QP_PARA_SGE_SEND_VALID = Bit(16), | ||
| 870 | OCRDMA_QP_PARA_SGE_RECV_VALID = Bit(17), | ||
| 871 | OCRDMA_QP_PARA_SGE_WR_VALID = Bit(18), | ||
| 872 | OCRDMA_QP_PARA_INB_RDEN_VALID = Bit(19), | ||
| 873 | OCRDMA_QP_PARA_INB_WREN_VALID = Bit(20), | ||
| 874 | OCRDMA_QP_PARA_FLOW_LBL_VALID = Bit(21), | ||
| 875 | OCRDMA_QP_PARA_BIND_EN_VALID = Bit(22), | ||
| 876 | OCRDMA_QP_PARA_ZLKEY_EN_VALID = Bit(23), | ||
| 877 | OCRDMA_QP_PARA_FMR_EN_VALID = Bit(24), | ||
| 878 | OCRDMA_QP_PARA_INBAT_EN_VALID = Bit(25), | ||
| 879 | OCRDMA_QP_PARA_VLAN_EN_VALID = Bit(26), | ||
| 880 | |||
| 881 | OCRDMA_MODIFY_QP_FLAGS_RD = Bit(0), | ||
| 882 | OCRDMA_MODIFY_QP_FLAGS_WR = Bit(1), | ||
| 883 | OCRDMA_MODIFY_QP_FLAGS_SEND = Bit(2), | ||
| 884 | OCRDMA_MODIFY_QP_FLAGS_ATOMIC = Bit(3) | ||
| 885 | }; | ||
| 886 | |||
| 887 | enum { | ||
| 888 | OCRDMA_QP_PARAMS_SRQ_ID_SHIFT = 0, | ||
| 889 | OCRDMA_QP_PARAMS_SRQ_ID_MASK = 0xFFFF, | ||
| 890 | |||
| 891 | OCRDMA_QP_PARAMS_MAX_RQE_SHIFT = 0, | ||
| 892 | OCRDMA_QP_PARAMS_MAX_RQE_MASK = 0xFFFF, | ||
| 893 | OCRDMA_QP_PARAMS_MAX_WQE_SHIFT = 16, | ||
| 894 | OCRDMA_QP_PARAMS_MAX_WQE_MASK = 0xFFFF << | ||
| 895 | OCRDMA_QP_PARAMS_MAX_WQE_SHIFT, | ||
| 896 | |||
| 897 | OCRDMA_QP_PARAMS_MAX_SGE_WRITE_SHIFT = 0, | ||
| 898 | OCRDMA_QP_PARAMS_MAX_SGE_WRITE_MASK = 0xFFFF, | ||
| 899 | OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT = 16, | ||
| 900 | OCRDMA_QP_PARAMS_MAX_SGE_SEND_MASK = 0xFFFF << | ||
| 901 | OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT, | ||
| 902 | |||
| 903 | OCRDMA_QP_PARAMS_FLAGS_FMR_EN = Bit(0), | ||
| 904 | OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = Bit(1), | ||
| 905 | OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = Bit(2), | ||
| 906 | OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = Bit(3), | ||
| 907 | OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = Bit(4), | ||
| 908 | OCRDMA_QP_PARAMS_STATE_SHIFT = 5, | ||
| 909 | OCRDMA_QP_PARAMS_STATE_MASK = Bit(5) | Bit(6) | Bit(7), | ||
| 910 | OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = Bit(8), | ||
| 911 | OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = Bit(9), | ||
| 912 | OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16, | ||
| 913 | OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF << | ||
| 914 | OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT, | ||
| 915 | |||
| 916 | OCRDMA_QP_PARAMS_MAX_IRD_SHIFT = 0, | ||
| 917 | OCRDMA_QP_PARAMS_MAX_IRD_MASK = 0xFFFF, | ||
| 918 | OCRDMA_QP_PARAMS_MAX_ORD_SHIFT = 16, | ||
| 919 | OCRDMA_QP_PARAMS_MAX_ORD_MASK = 0xFFFF << | ||
| 920 | OCRDMA_QP_PARAMS_MAX_ORD_SHIFT, | ||
| 921 | |||
| 922 | OCRDMA_QP_PARAMS_RQ_CQID_SHIFT = 0, | ||
| 923 | OCRDMA_QP_PARAMS_RQ_CQID_MASK = 0xFFFF, | ||
| 924 | OCRDMA_QP_PARAMS_WQ_CQID_SHIFT = 16, | ||
| 925 | OCRDMA_QP_PARAMS_WQ_CQID_MASK = 0xFFFF << | ||
| 926 | OCRDMA_QP_PARAMS_WQ_CQID_SHIFT, | ||
| 927 | |||
| 928 | OCRDMA_QP_PARAMS_RQ_PSN_SHIFT = 0, | ||
| 929 | OCRDMA_QP_PARAMS_RQ_PSN_MASK = 0xFFFFFF, | ||
| 930 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT = 24, | ||
| 931 | OCRDMA_QP_PARAMS_HOP_LMT_MASK = 0xFF << | ||
| 932 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT, | ||
| 933 | |||
| 934 | OCRDMA_QP_PARAMS_SQ_PSN_SHIFT = 0, | ||
| 935 | OCRDMA_QP_PARAMS_SQ_PSN_MASK = 0xFFFFFF, | ||
| 936 | OCRDMA_QP_PARAMS_TCLASS_SHIFT = 24, | ||
| 937 | OCRDMA_QP_PARAMS_TCLASS_MASK = 0xFF << | ||
| 938 | OCRDMA_QP_PARAMS_TCLASS_SHIFT, | ||
| 939 | |||
| 940 | OCRDMA_QP_PARAMS_DEST_QPN_SHIFT = 0, | ||
| 941 | OCRDMA_QP_PARAMS_DEST_QPN_MASK = 0xFFFFFF, | ||
| 942 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT = 24, | ||
| 943 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK = 0x7 << | ||
| 944 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT, | ||
| 945 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT = 27, | ||
| 946 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK = 0x1F << | ||
| 947 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT, | ||
| 948 | |||
| 949 | OCRDMA_QP_PARAMS_PKEY_IDNEX_SHIFT = 0, | ||
| 950 | OCRDMA_QP_PARAMS_PKEY_INDEX_MASK = 0xFFFF, | ||
| 951 | OCRDMA_QP_PARAMS_PATH_MTU_SHIFT = 18, | ||
| 952 | OCRDMA_QP_PARAMS_PATH_MTU_MASK = 0x3FFF << | ||
| 953 | OCRDMA_QP_PARAMS_PATH_MTU_SHIFT, | ||
| 954 | |||
| 955 | OCRDMA_QP_PARAMS_FLOW_LABEL_SHIFT = 0, | ||
| 956 | OCRDMA_QP_PARAMS_FLOW_LABEL_MASK = 0xFFFFF, | ||
| 957 | OCRDMA_QP_PARAMS_SL_SHIFT = 20, | ||
| 958 | OCRDMA_QP_PARAMS_SL_MASK = 0xF << | ||
| 959 | OCRDMA_QP_PARAMS_SL_SHIFT, | ||
| 960 | OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT = 24, | ||
| 961 | OCRDMA_QP_PARAMS_RETRY_CNT_MASK = 0x7 << | ||
| 962 | OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT, | ||
| 963 | OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT = 27, | ||
| 964 | OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK = 0x1F << | ||
| 965 | OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT, | ||
| 966 | |||
| 967 | OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_SHIFT = 0, | ||
| 968 | OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_MASK = 0xFFFF, | ||
| 969 | OCRDMA_QP_PARAMS_VLAN_SHIFT = 16, | ||
| 970 | OCRDMA_QP_PARAMS_VLAN_MASK = 0xFFFF << | ||
| 971 | OCRDMA_QP_PARAMS_VLAN_SHIFT | ||
| 972 | }; | ||
| 973 | |||
| 974 | struct ocrdma_qp_params { | ||
| 975 | u32 id; | ||
| 976 | u32 max_wqe_rqe; | ||
| 977 | u32 max_sge_send_write; | ||
| 978 | u32 max_sge_recv_flags; | ||
| 979 | u32 max_ord_ird; | ||
| 980 | u32 wq_rq_cqid; | ||
| 981 | u32 hop_lmt_rq_psn; | ||
| 982 | u32 tclass_sq_psn; | ||
| 983 | u32 ack_to_rnr_rtc_dest_qpn; | ||
| 984 | u32 path_mtu_pkey_indx; | ||
| 985 | u32 rnt_rc_sl_fl; | ||
| 986 | u8 sgid[16]; | ||
| 987 | u8 dgid[16]; | ||
| 988 | u32 dmac_b0_to_b3; | ||
| 989 | u32 vlan_dmac_b4_to_b5; | ||
| 990 | u32 qkey; | ||
| 991 | } __packed; | ||
| 992 | |||
| 993 | |||
| 994 | struct ocrdma_modify_qp { | ||
| 995 | struct ocrdma_mqe_hdr hdr; | ||
| 996 | struct ocrdma_mbx_hdr req; | ||
| 997 | |||
| 998 | struct ocrdma_qp_params params; | ||
| 999 | u32 flags; | ||
| 1000 | u32 rdma_flags; | ||
| 1001 | u32 num_outstanding_atomic_rd; | ||
| 1002 | } __packed; | ||
| 1003 | |||
| 1004 | enum { | ||
| 1005 | OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT = 0, | ||
| 1006 | OCRDMA_MODIFY_QP_RSP_MAX_RQE_MASK = 0xFFFF, | ||
| 1007 | OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT = 16, | ||
| 1008 | OCRDMA_MODIFY_QP_RSP_MAX_WQE_MASK = 0xFFFF << | ||
| 1009 | OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT, | ||
| 1010 | |||
| 1011 | OCRDMA_MODIFY_QP_RSP_MAX_IRD_SHIFT = 0, | ||
| 1012 | OCRDMA_MODIFY_QP_RSP_MAX_IRD_MASK = 0xFFFF, | ||
| 1013 | OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT = 16, | ||
| 1014 | OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF << | ||
| 1015 | OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT | ||
| 1016 | }; | ||
| 1017 | struct ocrdma_modify_qp_rsp { | ||
| 1018 | struct ocrdma_mqe_hdr hdr; | ||
| 1019 | struct ocrdma_mbx_rsp rsp; | ||
| 1020 | |||
| 1021 | u32 max_wqe_rqe; | ||
| 1022 | u32 max_ord_ird; | ||
| 1023 | } __packed; | ||
| 1024 | |||
| 1025 | struct ocrdma_query_qp { | ||
| 1026 | struct ocrdma_mqe_hdr hdr; | ||
| 1027 | struct ocrdma_mbx_hdr req; | ||
| 1028 | |||
| 1029 | #define OCRDMA_QUERY_UP_QP_ID_SHIFT 0 | ||
| 1030 | #define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF | ||
| 1031 | u32 qp_id; | ||
| 1032 | } __packed; | ||
| 1033 | |||
| 1034 | struct ocrdma_query_qp_rsp { | ||
| 1035 | struct ocrdma_mqe_hdr hdr; | ||
| 1036 | struct ocrdma_mbx_rsp rsp; | ||
| 1037 | struct ocrdma_qp_params params; | ||
| 1038 | } __packed; | ||
| 1039 | |||
| 1040 | enum { | ||
| 1041 | OCRDMA_CREATE_SRQ_PD_ID_SHIFT = 0, | ||
| 1042 | OCRDMA_CREATE_SRQ_PD_ID_MASK = 0xFFFF, | ||
| 1043 | OCRDMA_CREATE_SRQ_PG_SZ_SHIFT = 16, | ||
| 1044 | OCRDMA_CREATE_SRQ_PG_SZ_MASK = 0x3 << | ||
| 1045 | OCRDMA_CREATE_SRQ_PG_SZ_SHIFT, | ||
| 1046 | |||
| 1047 | OCRDMA_CREATE_SRQ_MAX_RQE_SHIFT = 0, | ||
| 1048 | OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT = 16, | ||
| 1049 | OCRDMA_CREATE_SRQ_MAX_SGE_RECV_MASK = 0xFFFF << | ||
| 1050 | OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT, | ||
| 1051 | |||
| 1052 | OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT = 0, | ||
| 1053 | OCRDMA_CREATE_SRQ_RQE_SIZE_MASK = 0xFFFF, | ||
| 1054 | OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT = 16, | ||
| 1055 | OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_MASK = 0xFFFF << | ||
| 1056 | OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT | ||
| 1057 | }; | ||
| 1058 | |||
| 1059 | struct ocrdma_create_srq { | ||
| 1060 | struct ocrdma_mqe_hdr hdr; | ||
| 1061 | struct ocrdma_mbx_hdr req; | ||
| 1062 | |||
| 1063 | u32 pgsz_pdid; | ||
| 1064 | u32 max_sge_rqe; | ||
| 1065 | u32 pages_rqe_sz; | ||
| 1066 | struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES]; | ||
| 1067 | } __packed; | ||
| 1068 | |||
| 1069 | enum { | ||
| 1070 | OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT = 0, | ||
| 1071 | OCRDMA_CREATE_SRQ_RSP_SRQ_ID_MASK = 0xFFFFFF, | ||
| 1072 | |||
| 1073 | OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT = 0, | ||
| 1074 | OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK = 0xFFFF, | ||
| 1075 | OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT = 16, | ||
| 1076 | OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK = 0xFFFF << | ||
| 1077 | OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT | ||
| 1078 | }; | ||
| 1079 | |||
| 1080 | struct ocrdma_create_srq_rsp { | ||
| 1081 | struct ocrdma_mqe_hdr hdr; | ||
| 1082 | struct ocrdma_mbx_rsp rsp; | ||
| 1083 | |||
| 1084 | u32 id; | ||
| 1085 | u32 max_sge_rqe_allocated; | ||
| 1086 | } __packed; | ||
| 1087 | |||
| 1088 | enum { | ||
| 1089 | OCRDMA_MODIFY_SRQ_ID_SHIFT = 0, | ||
| 1090 | OCRDMA_MODIFY_SRQ_ID_MASK = 0xFFFFFF, | ||
| 1091 | |||
| 1092 | OCRDMA_MODIFY_SRQ_MAX_RQE_SHIFT = 0, | ||
| 1093 | OCRDMA_MODIFY_SRQ_MAX_RQE_MASK = 0xFFFF, | ||
| 1094 | OCRDMA_MODIFY_SRQ_LIMIT_SHIFT = 16, | ||
| 1095 | OCRDMA_MODIFY_SRQ__LIMIT_MASK = 0xFFFF << | ||
| 1096 | OCRDMA_MODIFY_SRQ_LIMIT_SHIFT | ||
| 1097 | }; | ||
| 1098 | |||
| 1099 | struct ocrdma_modify_srq { | ||
| 1100 | struct ocrdma_mqe_hdr hdr; | ||
| 1101 | struct ocrdma_mbx_rsp rep; | ||
| 1102 | |||
| 1103 | u32 id; | ||
| 1104 | u32 limit_max_rqe; | ||
| 1105 | } __packed; | ||
| 1106 | |||
| 1107 | enum { | ||
| 1108 | OCRDMA_QUERY_SRQ_ID_SHIFT = 0, | ||
| 1109 | OCRDMA_QUERY_SRQ_ID_MASK = 0xFFFFFF | ||
| 1110 | }; | ||
| 1111 | |||
| 1112 | struct ocrdma_query_srq { | ||
| 1113 | struct ocrdma_mqe_hdr hdr; | ||
| 1114 | struct ocrdma_mbx_rsp req; | ||
| 1115 | |||
| 1116 | u32 id; | ||
| 1117 | } __packed; | ||
| 1118 | |||
| 1119 | enum { | ||
| 1120 | OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT = 0, | ||
| 1121 | OCRDMA_QUERY_SRQ_RSP_PD_ID_MASK = 0xFFFF, | ||
| 1122 | OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT = 16, | ||
| 1123 | OCRDMA_QUERY_SRQ_RSP_MAX_RQE_MASK = 0xFFFF << | ||
| 1124 | OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT, | ||
| 1125 | |||
| 1126 | OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_SHIFT = 0, | ||
| 1127 | OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK = 0xFFFF, | ||
| 1128 | OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT = 16, | ||
| 1129 | OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_MASK = 0xFFFF << | ||
| 1130 | OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT | ||
| 1131 | }; | ||
| 1132 | |||
| 1133 | struct ocrdma_query_srq_rsp { | ||
| 1134 | struct ocrdma_mqe_hdr hdr; | ||
| 1135 | struct ocrdma_mbx_rsp req; | ||
| 1136 | |||
| 1137 | u32 max_rqe_pdid; | ||
| 1138 | u32 srq_lmt_max_sge; | ||
| 1139 | } __packed; | ||
| 1140 | |||
| 1141 | enum { | ||
| 1142 | OCRDMA_DESTROY_SRQ_ID_SHIFT = 0, | ||
| 1143 | OCRDMA_DESTROY_SRQ_ID_MASK = 0xFFFFFF | ||
| 1144 | }; | ||
| 1145 | |||
| 1146 | struct ocrdma_destroy_srq { | ||
| 1147 | struct ocrdma_mqe_hdr hdr; | ||
| 1148 | struct ocrdma_mbx_rsp req; | ||
| 1149 | |||
| 1150 | u32 id; | ||
| 1151 | } __packed; | ||
| 1152 | |||
| 1153 | enum { | ||
| 1154 | OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16), | ||
| 1155 | OCRDMA_PD_MAX_DPP_ENABLED_QP = 8, | ||
| 1156 | OCRDMA_DPP_PAGE_SIZE = 4096 | ||
| 1157 | }; | ||
| 1158 | |||
| 1159 | struct ocrdma_alloc_pd { | ||
| 1160 | struct ocrdma_mqe_hdr hdr; | ||
| 1161 | struct ocrdma_mbx_hdr req; | ||
| 1162 | u32 enable_dpp_rsvd; | ||
| 1163 | } __packed; | ||
| 1164 | |||
| 1165 | enum { | ||
| 1166 | OCRDMA_ALLOC_PD_RSP_DPP = Bit(16), | ||
| 1167 | OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT = 20, | ||
| 1168 | OCRDMA_ALLOC_PD_RSP_PDID_MASK = 0xFFFF, | ||
| 1169 | }; | ||
| 1170 | |||
| 1171 | struct ocrdma_alloc_pd_rsp { | ||
| 1172 | struct ocrdma_mqe_hdr hdr; | ||
| 1173 | struct ocrdma_mbx_rsp rsp; | ||
| 1174 | u32 dpp_page_pdid; | ||
| 1175 | } __packed; | ||
| 1176 | |||
| 1177 | struct ocrdma_dealloc_pd { | ||
| 1178 | struct ocrdma_mqe_hdr hdr; | ||
| 1179 | struct ocrdma_mbx_hdr req; | ||
| 1180 | u32 id; | ||
| 1181 | } __packed; | ||
| 1182 | |||
| 1183 | struct ocrdma_dealloc_pd_rsp { | ||
| 1184 | struct ocrdma_mqe_hdr hdr; | ||
| 1185 | struct ocrdma_mbx_rsp rsp; | ||
| 1186 | } __packed; | ||
| 1187 | |||
| 1188 | enum { | ||
| 1189 | OCRDMA_ADDR_CHECK_ENABLE = 1, | ||
| 1190 | OCRDMA_ADDR_CHECK_DISABLE = 0 | ||
| 1191 | }; | ||
| 1192 | |||
| 1193 | enum { | ||
| 1194 | OCRDMA_ALLOC_LKEY_PD_ID_SHIFT = 0, | ||
| 1195 | OCRDMA_ALLOC_LKEY_PD_ID_MASK = 0xFFFF, | ||
| 1196 | |||
| 1197 | OCRDMA_ALLOC_LKEY_ADDR_CHECK_SHIFT = 0, | ||
| 1198 | OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = Bit(0), | ||
| 1199 | OCRDMA_ALLOC_LKEY_FMR_SHIFT = 1, | ||
| 1200 | OCRDMA_ALLOC_LKEY_FMR_MASK = Bit(1), | ||
| 1201 | OCRDMA_ALLOC_LKEY_REMOTE_INV_SHIFT = 2, | ||
| 1202 | OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = Bit(2), | ||
| 1203 | OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT = 3, | ||
| 1204 | OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = Bit(3), | ||
| 1205 | OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT = 4, | ||
| 1206 | OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = Bit(4), | ||
| 1207 | OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT = 5, | ||
| 1208 | OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = Bit(5), | ||
| 1209 | OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = Bit(6), | ||
| 1210 | OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT = 6, | ||
| 1211 | OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT = 16, | ||
| 1212 | OCRDMA_ALLOC_LKEY_PBL_SIZE_MASK = 0xFFFF << | ||
| 1213 | OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT | ||
| 1214 | }; | ||
| 1215 | |||
| 1216 | struct ocrdma_alloc_lkey { | ||
| 1217 | struct ocrdma_mqe_hdr hdr; | ||
| 1218 | struct ocrdma_mbx_hdr req; | ||
| 1219 | |||
| 1220 | u32 pdid; | ||
| 1221 | u32 pbl_sz_flags; | ||
| 1222 | } __packed; | ||
| 1223 | |||
| 1224 | struct ocrdma_alloc_lkey_rsp { | ||
| 1225 | struct ocrdma_mqe_hdr hdr; | ||
| 1226 | struct ocrdma_mbx_rsp rsp; | ||
| 1227 | |||
| 1228 | u32 lrkey; | ||
| 1229 | u32 num_pbl_rsvd; | ||
| 1230 | } __packed; | ||
| 1231 | |||
| 1232 | struct ocrdma_dealloc_lkey { | ||
| 1233 | struct ocrdma_mqe_hdr hdr; | ||
| 1234 | struct ocrdma_mbx_hdr req; | ||
| 1235 | |||
| 1236 | u32 lkey; | ||
| 1237 | u32 rsvd_frmr; | ||
| 1238 | } __packed; | ||
| 1239 | |||
| 1240 | struct ocrdma_dealloc_lkey_rsp { | ||
| 1241 | struct ocrdma_mqe_hdr hdr; | ||
| 1242 | struct ocrdma_mbx_rsp rsp; | ||
| 1243 | } __packed; | ||
| 1244 | |||
| 1245 | #define MAX_OCRDMA_NSMR_PBL (u32)22 | ||
| 1246 | #define MAX_OCRDMA_PBL_SIZE 65536 | ||
| 1247 | #define MAX_OCRDMA_PBL_PER_LKEY 32767 | ||
| 1248 | |||
| 1249 | enum { | ||
| 1250 | OCRDMA_REG_NSMR_LRKEY_INDEX_SHIFT = 0, | ||
| 1251 | OCRDMA_REG_NSMR_LRKEY_INDEX_MASK = 0xFFFFFF, | ||
| 1252 | OCRDMA_REG_NSMR_LRKEY_SHIFT = 24, | ||
| 1253 | OCRDMA_REG_NSMR_LRKEY_MASK = 0xFF << | ||
| 1254 | OCRDMA_REG_NSMR_LRKEY_SHIFT, | ||
| 1255 | |||
| 1256 | OCRDMA_REG_NSMR_PD_ID_SHIFT = 0, | ||
| 1257 | OCRDMA_REG_NSMR_PD_ID_MASK = 0xFFFF, | ||
| 1258 | OCRDMA_REG_NSMR_NUM_PBL_SHIFT = 16, | ||
| 1259 | OCRDMA_REG_NSMR_NUM_PBL_MASK = 0xFFFF << | ||
| 1260 | OCRDMA_REG_NSMR_NUM_PBL_SHIFT, | ||
| 1261 | |||
| 1262 | OCRDMA_REG_NSMR_PBE_SIZE_SHIFT = 0, | ||
| 1263 | OCRDMA_REG_NSMR_PBE_SIZE_MASK = 0xFFFF, | ||
| 1264 | OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT = 16, | ||
| 1265 | OCRDMA_REG_NSMR_HPAGE_SIZE_MASK = 0xFF << | ||
| 1266 | OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT, | ||
| 1267 | OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT = 24, | ||
| 1268 | OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = Bit(24), | ||
| 1269 | OCRDMA_REG_NSMR_ZB_SHIFT = 25, | ||
| 1270 | OCRDMA_REG_NSMR_ZB_SHIFT_MASK = Bit(25), | ||
| 1271 | OCRDMA_REG_NSMR_REMOTE_INV_SHIFT = 26, | ||
| 1272 | OCRDMA_REG_NSMR_REMOTE_INV_MASK = Bit(26), | ||
| 1273 | OCRDMA_REG_NSMR_REMOTE_WR_SHIFT = 27, | ||
| 1274 | OCRDMA_REG_NSMR_REMOTE_WR_MASK = Bit(27), | ||
| 1275 | OCRDMA_REG_NSMR_REMOTE_RD_SHIFT = 28, | ||
| 1276 | OCRDMA_REG_NSMR_REMOTE_RD_MASK = Bit(28), | ||
| 1277 | OCRDMA_REG_NSMR_LOCAL_WR_SHIFT = 29, | ||
| 1278 | OCRDMA_REG_NSMR_LOCAL_WR_MASK = Bit(29), | ||
| 1279 | OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT = 30, | ||
| 1280 | OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = Bit(30), | ||
| 1281 | OCRDMA_REG_NSMR_LAST_SHIFT = 31, | ||
| 1282 | OCRDMA_REG_NSMR_LAST_MASK = Bit(31) | ||
| 1283 | }; | ||
| 1284 | |||
| 1285 | struct ocrdma_reg_nsmr { | ||
| 1286 | struct ocrdma_mqe_hdr hdr; | ||
| 1287 | struct ocrdma_mbx_hdr cmd; | ||
| 1288 | |||
| 1289 | u32 lrkey_key_index; | ||
| 1290 | u32 num_pbl_pdid; | ||
| 1291 | u32 flags_hpage_pbe_sz; | ||
| 1292 | u32 totlen_low; | ||
| 1293 | u32 totlen_high; | ||
| 1294 | u32 fbo_low; | ||
| 1295 | u32 fbo_high; | ||
| 1296 | u32 va_loaddr; | ||
| 1297 | u32 va_hiaddr; | ||
| 1298 | struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL]; | ||
| 1299 | } __packed; | ||
| 1300 | |||
| 1301 | enum { | ||
| 1302 | OCRDMA_REG_NSMR_CONT_PBL_SHIFT = 0, | ||
| 1303 | OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK = 0xFFFF, | ||
| 1304 | OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT = 16, | ||
| 1305 | OCRDMA_REG_NSMR_CONT_NUM_PBL_MASK = 0xFFFF << | ||
| 1306 | OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT, | ||
| 1307 | |||
| 1308 | OCRDMA_REG_NSMR_CONT_LAST_SHIFT = 31, | ||
| 1309 | OCRDMA_REG_NSMR_CONT_LAST_MASK = Bit(31) | ||
| 1310 | }; | ||
| 1311 | |||
| 1312 | struct ocrdma_reg_nsmr_cont { | ||
| 1313 | struct ocrdma_mqe_hdr hdr; | ||
| 1314 | struct ocrdma_mbx_hdr cmd; | ||
| 1315 | |||
| 1316 | u32 lrkey; | ||
| 1317 | u32 num_pbl_offset; | ||
| 1318 | u32 last; | ||
| 1319 | |||
| 1320 | struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL]; | ||
| 1321 | } __packed; | ||
| 1322 | |||
| 1323 | struct ocrdma_pbe { | ||
| 1324 | u32 pa_hi; | ||
| 1325 | u32 pa_lo; | ||
| 1326 | } __packed; | ||
| 1327 | |||
| 1328 | enum { | ||
| 1329 | OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT = 16, | ||
| 1330 | OCRDMA_REG_NSMR_RSP_NUM_PBL_MASK = 0xFFFF0000 | ||
| 1331 | }; | ||
| 1332 | struct ocrdma_reg_nsmr_rsp { | ||
| 1333 | struct ocrdma_mqe_hdr hdr; | ||
| 1334 | struct ocrdma_mbx_rsp rsp; | ||
| 1335 | |||
| 1336 | u32 lrkey; | ||
| 1337 | u32 num_pbl; | ||
| 1338 | } __packed; | ||
| 1339 | |||
| 1340 | enum { | ||
| 1341 | OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT = 0, | ||
| 1342 | OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_MASK = 0xFFFFFF, | ||
| 1343 | OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT = 24, | ||
| 1344 | OCRDMA_REG_NSMR_CONT_RSP_LRKEY_MASK = 0xFF << | ||
| 1345 | OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT, | ||
| 1346 | |||
| 1347 | OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT = 16, | ||
| 1348 | OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_MASK = 0xFFFF << | ||
| 1349 | OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT | ||
| 1350 | }; | ||
| 1351 | |||
| 1352 | struct ocrdma_reg_nsmr_cont_rsp { | ||
| 1353 | struct ocrdma_mqe_hdr hdr; | ||
| 1354 | struct ocrdma_mbx_rsp rsp; | ||
| 1355 | |||
| 1356 | u32 lrkey_key_index; | ||
| 1357 | u32 num_pbl; | ||
| 1358 | } __packed; | ||
| 1359 | |||
| 1360 | enum { | ||
| 1361 | OCRDMA_ALLOC_MW_PD_ID_SHIFT = 0, | ||
| 1362 | OCRDMA_ALLOC_MW_PD_ID_MASK = 0xFFFF | ||
| 1363 | }; | ||
| 1364 | |||
| 1365 | struct ocrdma_alloc_mw { | ||
| 1366 | struct ocrdma_mqe_hdr hdr; | ||
| 1367 | struct ocrdma_mbx_hdr req; | ||
| 1368 | |||
| 1369 | u32 pdid; | ||
| 1370 | } __packed; | ||
| 1371 | |||
| 1372 | enum { | ||
| 1373 | OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT = 0, | ||
| 1374 | OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_MASK = 0xFFFFFF | ||
| 1375 | }; | ||
| 1376 | |||
| 1377 | struct ocrdma_alloc_mw_rsp { | ||
| 1378 | struct ocrdma_mqe_hdr hdr; | ||
| 1379 | struct ocrdma_mbx_rsp rsp; | ||
| 1380 | |||
| 1381 | u32 lrkey_index; | ||
| 1382 | } __packed; | ||
| 1383 | |||
| 1384 | struct ocrdma_attach_mcast { | ||
| 1385 | struct ocrdma_mqe_hdr hdr; | ||
| 1386 | struct ocrdma_mbx_hdr req; | ||
| 1387 | u32 qp_id; | ||
| 1388 | u8 mgid[16]; | ||
| 1389 | u32 mac_b0_to_b3; | ||
| 1390 | u32 vlan_mac_b4_to_b5; | ||
| 1391 | } __packed; | ||
| 1392 | |||
| 1393 | struct ocrdma_attach_mcast_rsp { | ||
| 1394 | struct ocrdma_mqe_hdr hdr; | ||
| 1395 | struct ocrdma_mbx_rsp rsp; | ||
| 1396 | } __packed; | ||
| 1397 | |||
| 1398 | struct ocrdma_detach_mcast { | ||
| 1399 | struct ocrdma_mqe_hdr hdr; | ||
| 1400 | struct ocrdma_mbx_hdr req; | ||
| 1401 | u32 qp_id; | ||
| 1402 | u8 mgid[16]; | ||
| 1403 | u32 mac_b0_to_b3; | ||
| 1404 | u32 vlan_mac_b4_to_b5; | ||
| 1405 | } __packed; | ||
| 1406 | |||
| 1407 | struct ocrdma_detach_mcast_rsp { | ||
| 1408 | struct ocrdma_mqe_hdr hdr; | ||
| 1409 | struct ocrdma_mbx_rsp rsp; | ||
| 1410 | } __packed; | ||
| 1411 | |||
| 1412 | enum { | ||
| 1413 | OCRDMA_CREATE_AH_NUM_PAGES_SHIFT = 19, | ||
| 1414 | OCRDMA_CREATE_AH_NUM_PAGES_MASK = 0xF << | ||
| 1415 | OCRDMA_CREATE_AH_NUM_PAGES_SHIFT, | ||
| 1416 | |||
| 1417 | OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT = 16, | ||
| 1418 | OCRDMA_CREATE_AH_PAGE_SIZE_MASK = 0x7 << | ||
| 1419 | OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT, | ||
| 1420 | |||
| 1421 | OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT = 23, | ||
| 1422 | OCRDMA_CREATE_AH_ENTRY_SIZE_MASK = 0x1FF << | ||
| 1423 | OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT, | ||
| 1424 | }; | ||
| 1425 | |||
| 1426 | #define OCRDMA_AH_TBL_PAGES 8 | ||
| 1427 | |||
| 1428 | struct ocrdma_create_ah_tbl { | ||
| 1429 | struct ocrdma_mqe_hdr hdr; | ||
| 1430 | struct ocrdma_mbx_hdr req; | ||
| 1431 | |||
| 1432 | u32 ah_conf; | ||
| 1433 | struct ocrdma_pa tbl_addr[8]; | ||
| 1434 | } __packed; | ||
| 1435 | |||
| 1436 | struct ocrdma_create_ah_tbl_rsp { | ||
| 1437 | struct ocrdma_mqe_hdr hdr; | ||
| 1438 | struct ocrdma_mbx_rsp rsp; | ||
| 1439 | u32 ahid; | ||
| 1440 | } __packed; | ||
| 1441 | |||
| 1442 | struct ocrdma_delete_ah_tbl { | ||
| 1443 | struct ocrdma_mqe_hdr hdr; | ||
| 1444 | struct ocrdma_mbx_hdr req; | ||
| 1445 | u32 ahid; | ||
| 1446 | } __packed; | ||
| 1447 | |||
| 1448 | struct ocrdma_delete_ah_tbl_rsp { | ||
| 1449 | struct ocrdma_mqe_hdr hdr; | ||
| 1450 | struct ocrdma_mbx_rsp rsp; | ||
| 1451 | } __packed; | ||
| 1452 | |||
| 1453 | enum { | ||
| 1454 | OCRDMA_EQE_VALID_SHIFT = 0, | ||
| 1455 | OCRDMA_EQE_VALID_MASK = Bit(0), | ||
| 1456 | OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, | ||
| 1457 | OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, | ||
| 1458 | OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << | ||
| 1459 | OCRDMA_EQE_RESOURCE_ID_SHIFT, | ||
| 1460 | }; | ||
| 1461 | |||
| 1462 | struct ocrdma_eqe { | ||
| 1463 | u32 id_valid; | ||
| 1464 | } __packed; | ||
| 1465 | |||
| 1466 | enum OCRDMA_CQE_STATUS { | ||
| 1467 | OCRDMA_CQE_SUCCESS = 0, | ||
| 1468 | OCRDMA_CQE_LOC_LEN_ERR, | ||
| 1469 | OCRDMA_CQE_LOC_QP_OP_ERR, | ||
| 1470 | OCRDMA_CQE_LOC_EEC_OP_ERR, | ||
| 1471 | OCRDMA_CQE_LOC_PROT_ERR, | ||
| 1472 | OCRDMA_CQE_WR_FLUSH_ERR, | ||
| 1473 | OCRDMA_CQE_MW_BIND_ERR, | ||
| 1474 | OCRDMA_CQE_BAD_RESP_ERR, | ||
| 1475 | OCRDMA_CQE_LOC_ACCESS_ERR, | ||
| 1476 | OCRDMA_CQE_REM_INV_REQ_ERR, | ||
| 1477 | OCRDMA_CQE_REM_ACCESS_ERR, | ||
| 1478 | OCRDMA_CQE_REM_OP_ERR, | ||
| 1479 | OCRDMA_CQE_RETRY_EXC_ERR, | ||
| 1480 | OCRDMA_CQE_RNR_RETRY_EXC_ERR, | ||
| 1481 | OCRDMA_CQE_LOC_RDD_VIOL_ERR, | ||
| 1482 | OCRDMA_CQE_REM_INV_RD_REQ_ERR, | ||
| 1483 | OCRDMA_CQE_REM_ABORT_ERR, | ||
| 1484 | OCRDMA_CQE_INV_EECN_ERR, | ||
| 1485 | OCRDMA_CQE_INV_EEC_STATE_ERR, | ||
| 1486 | OCRDMA_CQE_FATAL_ERR, | ||
| 1487 | OCRDMA_CQE_RESP_TIMEOUT_ERR, | ||
| 1488 | OCRDMA_CQE_GENERAL_ERR | ||
| 1489 | }; | ||
| 1490 | |||
| 1491 | enum { | ||
| 1492 | /* w0 */ | ||
| 1493 | OCRDMA_CQE_WQEIDX_SHIFT = 0, | ||
| 1494 | OCRDMA_CQE_WQEIDX_MASK = 0xFFFF, | ||
| 1495 | |||
| 1496 | /* w1 */ | ||
| 1497 | OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16, | ||
| 1498 | OCRDMA_CQE_PKEY_SHIFT = 0, | ||
| 1499 | OCRDMA_CQE_PKEY_MASK = 0xFFFF, | ||
| 1500 | |||
| 1501 | /* w2 */ | ||
| 1502 | OCRDMA_CQE_QPN_SHIFT = 0, | ||
| 1503 | OCRDMA_CQE_QPN_MASK = 0x0000FFFF, | ||
| 1504 | |||
| 1505 | OCRDMA_CQE_BUFTAG_SHIFT = 16, | ||
| 1506 | OCRDMA_CQE_BUFTAG_MASK = 0xFFFF << OCRDMA_CQE_BUFTAG_SHIFT, | ||
| 1507 | |||
| 1508 | /* w3 */ | ||
| 1509 | OCRDMA_CQE_UD_STATUS_SHIFT = 24, | ||
| 1510 | OCRDMA_CQE_UD_STATUS_MASK = 0x7 << OCRDMA_CQE_UD_STATUS_SHIFT, | ||
| 1511 | OCRDMA_CQE_STATUS_SHIFT = 16, | ||
| 1512 | OCRDMA_CQE_STATUS_MASK = 0xFF << OCRDMA_CQE_STATUS_SHIFT, | ||
| 1513 | OCRDMA_CQE_VALID = Bit(31), | ||
| 1514 | OCRDMA_CQE_INVALIDATE = Bit(30), | ||
| 1515 | OCRDMA_CQE_QTYPE = Bit(29), | ||
| 1516 | OCRDMA_CQE_IMM = Bit(28), | ||
| 1517 | OCRDMA_CQE_WRITE_IMM = Bit(27), | ||
| 1518 | OCRDMA_CQE_QTYPE_SQ = 0, | ||
| 1519 | OCRDMA_CQE_QTYPE_RQ = 1, | ||
| 1520 | OCRDMA_CQE_SRCQP_MASK = 0xFFFFFF | ||
| 1521 | }; | ||
| 1522 | |||
| 1523 | struct ocrdma_cqe { | ||
| 1524 | union { | ||
| 1525 | /* w0 to w2 */ | ||
| 1526 | struct { | ||
| 1527 | u32 wqeidx; | ||
| 1528 | u32 bytes_xfered; | ||
| 1529 | u32 qpn; | ||
| 1530 | } wq; | ||
| 1531 | struct { | ||
| 1532 | u32 lkey_immdt; | ||
| 1533 | u32 rxlen; | ||
| 1534 | u32 buftag_qpn; | ||
| 1535 | } rq; | ||
| 1536 | struct { | ||
| 1537 | u32 lkey_immdt; | ||
| 1538 | u32 rxlen_pkey; | ||
| 1539 | u32 buftag_qpn; | ||
| 1540 | } ud; | ||
| 1541 | struct { | ||
| 1542 | u32 word_0; | ||
| 1543 | u32 word_1; | ||
| 1544 | u32 qpn; | ||
| 1545 | } cmn; | ||
| 1546 | }; | ||
| 1547 | u32 flags_status_srcqpn; /* w3 */ | ||
| 1548 | } __packed; | ||
| 1549 | |||
| 1550 | #define is_cqe_valid(cq, cqe) \ | ||
| 1551 | (((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID)\ | ||
| 1552 | == cq->phase) ? 1 : 0) | ||
| 1553 | #define is_cqe_for_sq(cqe) \ | ||
| 1554 | ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 0 : 1) | ||
| 1555 | #define is_cqe_for_rq(cqe) \ | ||
| 1556 | ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 1 : 0) | ||
| 1557 | #define is_cqe_invalidated(cqe) \ | ||
| 1558 | ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_INVALIDATE) ? \ | ||
| 1559 | 1 : 0) | ||
| 1560 | #define is_cqe_imm(cqe) \ | ||
| 1561 | ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_IMM) ? 1 : 0) | ||
| 1562 | #define is_cqe_wr_imm(cqe) \ | ||
| 1563 | ((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_WRITE_IMM) ? 1 : 0) | ||
| 1564 | |||
| 1565 | struct ocrdma_sge { | ||
| 1566 | u32 addr_hi; | ||
| 1567 | u32 addr_lo; | ||
| 1568 | u32 lrkey; | ||
| 1569 | u32 len; | ||
| 1570 | } __packed; | ||
| 1571 | |||
| 1572 | enum { | ||
| 1573 | OCRDMA_FLAG_SIG = 0x1, | ||
| 1574 | OCRDMA_FLAG_INV = 0x2, | ||
| 1575 | OCRDMA_FLAG_FENCE_L = 0x4, | ||
| 1576 | OCRDMA_FLAG_FENCE_R = 0x8, | ||
| 1577 | OCRDMA_FLAG_SOLICIT = 0x10, | ||
| 1578 | OCRDMA_FLAG_IMM = 0x20, | ||
| 1579 | |||
| 1580 | /* Stag flags */ | ||
| 1581 | OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, | ||
| 1582 | OCRDMA_LKEY_FLAG_REMOTE_RD = 0x2, | ||
| 1583 | OCRDMA_LKEY_FLAG_REMOTE_WR = 0x4, | ||
| 1584 | OCRDMA_LKEY_FLAG_VATO = 0x8, | ||
| 1585 | }; | ||
| 1586 | |||
| 1587 | enum OCRDMA_WQE_OPCODE { | ||
| 1588 | OCRDMA_WRITE = 0x06, | ||
| 1589 | OCRDMA_READ = 0x0C, | ||
| 1590 | OCRDMA_RESV0 = 0x02, | ||
| 1591 | OCRDMA_SEND = 0x00, | ||
| 1592 | OCRDMA_CMP_SWP = 0x14, | ||
| 1593 | OCRDMA_BIND_MW = 0x10, | ||
| 1594 | OCRDMA_RESV1 = 0x0A, | ||
| 1595 | OCRDMA_LKEY_INV = 0x15, | ||
| 1596 | OCRDMA_FETCH_ADD = 0x13, | ||
| 1597 | OCRDMA_POST_RQ = 0x12 | ||
| 1598 | }; | ||
| 1599 | |||
| 1600 | enum { | ||
| 1601 | OCRDMA_TYPE_INLINE = 0x0, | ||
| 1602 | OCRDMA_TYPE_LKEY = 0x1, | ||
| 1603 | }; | ||
| 1604 | |||
| 1605 | enum { | ||
| 1606 | OCRDMA_WQE_OPCODE_SHIFT = 0, | ||
| 1607 | OCRDMA_WQE_OPCODE_MASK = 0x0000001F, | ||
| 1608 | OCRDMA_WQE_FLAGS_SHIFT = 5, | ||
| 1609 | OCRDMA_WQE_TYPE_SHIFT = 16, | ||
| 1610 | OCRDMA_WQE_TYPE_MASK = 0x00030000, | ||
| 1611 | OCRDMA_WQE_SIZE_SHIFT = 18, | ||
| 1612 | OCRDMA_WQE_SIZE_MASK = 0xFF, | ||
| 1613 | OCRDMA_WQE_NXT_WQE_SIZE_SHIFT = 25, | ||
| 1614 | |||
| 1615 | OCRDMA_WQE_LKEY_FLAGS_SHIFT = 0, | ||
| 1616 | OCRDMA_WQE_LKEY_FLAGS_MASK = 0xF | ||
| 1617 | }; | ||
| 1618 | |||
| 1619 | /* header WQE for all the SQ and RQ operations */ | ||
| 1620 | struct ocrdma_hdr_wqe { | ||
| 1621 | u32 cw; | ||
| 1622 | union { | ||
| 1623 | u32 rsvd_tag; | ||
| 1624 | u32 rsvd_lkey_flags; | ||
| 1625 | }; | ||
| 1626 | union { | ||
| 1627 | u32 immdt; | ||
| 1628 | u32 lkey; | ||
| 1629 | }; | ||
| 1630 | u32 total_len; | ||
| 1631 | } __packed; | ||
| 1632 | |||
| 1633 | struct ocrdma_ewqe_ud_hdr { | ||
| 1634 | u32 rsvd_dest_qpn; | ||
| 1635 | u32 qkey; | ||
| 1636 | u32 rsvd_ahid; | ||
| 1637 | u32 rsvd; | ||
| 1638 | } __packed; | ||
| 1639 | |||
| 1640 | struct ocrdma_eth_basic { | ||
| 1641 | u8 dmac[6]; | ||
| 1642 | u8 smac[6]; | ||
| 1643 | __be16 eth_type; | ||
| 1644 | } __packed; | ||
| 1645 | |||
| 1646 | struct ocrdma_eth_vlan { | ||
| 1647 | u8 dmac[6]; | ||
| 1648 | u8 smac[6]; | ||
| 1649 | __be16 eth_type; | ||
| 1650 | __be16 vlan_tag; | ||
| 1651 | #define OCRDMA_ROCE_ETH_TYPE 0x8915 | ||
| 1652 | __be16 roce_eth_type; | ||
| 1653 | } __packed; | ||
| 1654 | |||
| 1655 | struct ocrdma_grh { | ||
| 1656 | __be32 tclass_flow; | ||
| 1657 | __be32 pdid_hoplimit; | ||
| 1658 | u8 sgid[16]; | ||
| 1659 | u8 dgid[16]; | ||
| 1660 | u16 rsvd; | ||
| 1661 | } __packed; | ||
| 1662 | |||
| 1663 | #define OCRDMA_AV_VALID Bit(0) | ||
| 1664 | #define OCRDMA_AV_VLAN_VALID Bit(1) | ||
| 1665 | |||
| 1666 | struct ocrdma_av { | ||
| 1667 | struct ocrdma_eth_vlan eth_hdr; | ||
| 1668 | struct ocrdma_grh grh; | ||
| 1669 | u32 valid; | ||
| 1670 | } __packed; | ||
| 1671 | |||
| 1672 | #endif /* __OCRDMA_SLI_H__ */ | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c new file mode 100644 index 000000000000..e9f74d1b48f6 --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -0,0 +1,2537 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #include <linux/dma-mapping.h> | ||
| 29 | #include <rdma/ib_verbs.h> | ||
| 30 | #include <rdma/ib_user_verbs.h> | ||
| 31 | #include <rdma/iw_cm.h> | ||
| 32 | #include <rdma/ib_umem.h> | ||
| 33 | #include <rdma/ib_addr.h> | ||
| 34 | |||
| 35 | #include "ocrdma.h" | ||
| 36 | #include "ocrdma_hw.h" | ||
| 37 | #include "ocrdma_verbs.h" | ||
| 38 | #include "ocrdma_abi.h" | ||
| 39 | |||
| 40 | int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | ||
| 41 | { | ||
| 42 | if (index > 1) | ||
| 43 | return -EINVAL; | ||
| 44 | |||
| 45 | *pkey = 0xffff; | ||
| 46 | return 0; | ||
| 47 | } | ||
| 48 | |||
| 49 | int ocrdma_query_gid(struct ib_device *ibdev, u8 port, | ||
| 50 | int index, union ib_gid *sgid) | ||
| 51 | { | ||
| 52 | struct ocrdma_dev *dev; | ||
| 53 | |||
| 54 | dev = get_ocrdma_dev(ibdev); | ||
| 55 | memset(sgid, 0, sizeof(*sgid)); | ||
| 56 | if (index > OCRDMA_MAX_SGID) | ||
| 57 | return -EINVAL; | ||
| 58 | |||
| 59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | ||
| 65 | { | ||
| 66 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | ||
| 67 | |||
| 68 | memset(attr, 0, sizeof *attr); | ||
| 69 | memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], | ||
| 70 | min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); | ||
| 71 | ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); | ||
| 72 | attr->max_mr_size = ~0ull; | ||
| 73 | attr->page_size_cap = 0xffff000; | ||
| 74 | attr->vendor_id = dev->nic_info.pdev->vendor; | ||
| 75 | attr->vendor_part_id = dev->nic_info.pdev->device; | ||
| 76 | attr->hw_ver = 0; | ||
| 77 | attr->max_qp = dev->attr.max_qp; | ||
| 78 | attr->max_ah = dev->attr.max_qp; | ||
| 79 | attr->max_qp_wr = dev->attr.max_wqe; | ||
| 80 | |||
| 81 | attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | | ||
| 82 | IB_DEVICE_RC_RNR_NAK_GEN | | ||
| 83 | IB_DEVICE_SHUTDOWN_PORT | | ||
| 84 | IB_DEVICE_SYS_IMAGE_GUID | | ||
| 85 | IB_DEVICE_LOCAL_DMA_LKEY; | ||
| 86 | attr->max_sge = dev->attr.max_send_sge; | ||
| 87 | attr->max_sge_rd = dev->attr.max_send_sge; | ||
| 88 | attr->max_cq = dev->attr.max_cq; | ||
| 89 | attr->max_cqe = dev->attr.max_cqe; | ||
| 90 | attr->max_mr = dev->attr.max_mr; | ||
| 91 | attr->max_mw = 0; | ||
| 92 | attr->max_pd = dev->attr.max_pd; | ||
| 93 | attr->atomic_cap = 0; | ||
| 94 | attr->max_fmr = 0; | ||
| 95 | attr->max_map_per_fmr = 0; | ||
| 96 | attr->max_qp_rd_atom = | ||
| 97 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); | ||
| 98 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; | ||
| 99 | attr->max_srq = (dev->attr.max_qp - 1); | ||
| 100 | attr->max_srq_sge = attr->max_sge; | ||
| 101 | attr->max_srq_wr = dev->attr.max_rqe; | ||
| 102 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | ||
| 103 | attr->max_fast_reg_page_list_len = 0; | ||
| 104 | attr->max_pkeys = 1; | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | int ocrdma_query_port(struct ib_device *ibdev, | ||
| 109 | u8 port, struct ib_port_attr *props) | ||
| 110 | { | ||
| 111 | enum ib_port_state port_state; | ||
| 112 | struct ocrdma_dev *dev; | ||
| 113 | struct net_device *netdev; | ||
| 114 | |||
| 115 | dev = get_ocrdma_dev(ibdev); | ||
| 116 | if (port > 1) { | ||
| 117 | ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | ||
| 118 | dev->id, port); | ||
| 119 | return -EINVAL; | ||
| 120 | } | ||
| 121 | netdev = dev->nic_info.netdev; | ||
| 122 | if (netif_running(netdev) && netif_oper_up(netdev)) { | ||
| 123 | port_state = IB_PORT_ACTIVE; | ||
| 124 | props->phys_state = 5; | ||
| 125 | } else { | ||
| 126 | port_state = IB_PORT_DOWN; | ||
| 127 | props->phys_state = 3; | ||
| 128 | } | ||
| 129 | props->max_mtu = IB_MTU_4096; | ||
| 130 | props->active_mtu = iboe_get_mtu(netdev->mtu); | ||
| 131 | props->lid = 0; | ||
| 132 | props->lmc = 0; | ||
| 133 | props->sm_lid = 0; | ||
| 134 | props->sm_sl = 0; | ||
| 135 | props->state = port_state; | ||
| 136 | props->port_cap_flags = | ||
| 137 | IB_PORT_CM_SUP | | ||
| 138 | IB_PORT_REINIT_SUP | | ||
| 139 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; | ||
| 140 | props->gid_tbl_len = OCRDMA_MAX_SGID; | ||
| 141 | props->pkey_tbl_len = 1; | ||
| 142 | props->bad_pkey_cntr = 0; | ||
| 143 | props->qkey_viol_cntr = 0; | ||
| 144 | props->active_width = IB_WIDTH_1X; | ||
| 145 | props->active_speed = 4; | ||
| 146 | props->max_msg_sz = 0x80000000; | ||
| 147 | props->max_vl_num = 4; | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, | ||
| 152 | struct ib_port_modify *props) | ||
| 153 | { | ||
| 154 | struct ocrdma_dev *dev; | ||
| 155 | |||
| 156 | dev = get_ocrdma_dev(ibdev); | ||
| 157 | if (port > 1) { | ||
| 158 | ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | ||
| 159 | dev->id, port); | ||
| 160 | return -EINVAL; | ||
| 161 | } | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | ||
| 166 | unsigned long len) | ||
| 167 | { | ||
| 168 | struct ocrdma_mm *mm; | ||
| 169 | |||
| 170 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | ||
| 171 | if (mm == NULL) | ||
| 172 | return -ENOMEM; | ||
| 173 | mm->key.phy_addr = phy_addr; | ||
| 174 | mm->key.len = len; | ||
| 175 | INIT_LIST_HEAD(&mm->entry); | ||
| 176 | |||
| 177 | mutex_lock(&uctx->mm_list_lock); | ||
| 178 | list_add_tail(&mm->entry, &uctx->mm_head); | ||
| 179 | mutex_unlock(&uctx->mm_list_lock); | ||
| 180 | return 0; | ||
| 181 | } | ||
| 182 | |||
| 183 | static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | ||
| 184 | unsigned long len) | ||
| 185 | { | ||
| 186 | struct ocrdma_mm *mm, *tmp; | ||
| 187 | |||
| 188 | mutex_lock(&uctx->mm_list_lock); | ||
| 189 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | ||
| 190 | if (len != mm->key.len || phy_addr != mm->key.phy_addr) | ||
| 191 | continue; | ||
| 192 | |||
| 193 | list_del(&mm->entry); | ||
| 194 | kfree(mm); | ||
| 195 | break; | ||
| 196 | } | ||
| 197 | mutex_unlock(&uctx->mm_list_lock); | ||
| 198 | } | ||
| 199 | |||
| 200 | static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | ||
| 201 | unsigned long len) | ||
| 202 | { | ||
| 203 | bool found = false; | ||
| 204 | struct ocrdma_mm *mm; | ||
| 205 | |||
| 206 | mutex_lock(&uctx->mm_list_lock); | ||
| 207 | list_for_each_entry(mm, &uctx->mm_head, entry) { | ||
| 208 | if (len != mm->key.len || phy_addr != mm->key.phy_addr) | ||
| 209 | continue; | ||
| 210 | |||
| 211 | found = true; | ||
| 212 | break; | ||
| 213 | } | ||
| 214 | mutex_unlock(&uctx->mm_list_lock); | ||
| 215 | return found; | ||
| 216 | } | ||
| 217 | |||
| 218 | struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | ||
| 219 | struct ib_udata *udata) | ||
| 220 | { | ||
| 221 | int status; | ||
| 222 | struct ocrdma_ucontext *ctx; | ||
| 223 | struct ocrdma_alloc_ucontext_resp resp; | ||
| 224 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | ||
| 225 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 226 | u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); | ||
| 227 | |||
| 228 | if (!udata) | ||
| 229 | return ERR_PTR(-EFAULT); | ||
| 230 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
| 231 | if (!ctx) | ||
| 232 | return ERR_PTR(-ENOMEM); | ||
| 233 | ctx->dev = dev; | ||
| 234 | INIT_LIST_HEAD(&ctx->mm_head); | ||
| 235 | mutex_init(&ctx->mm_list_lock); | ||
| 236 | |||
| 237 | ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, | ||
| 238 | &ctx->ah_tbl.pa, GFP_KERNEL); | ||
| 239 | if (!ctx->ah_tbl.va) { | ||
| 240 | kfree(ctx); | ||
| 241 | return ERR_PTR(-ENOMEM); | ||
| 242 | } | ||
| 243 | memset(ctx->ah_tbl.va, 0, map_len); | ||
| 244 | ctx->ah_tbl.len = map_len; | ||
| 245 | |||
| 246 | resp.ah_tbl_len = ctx->ah_tbl.len; | ||
| 247 | resp.ah_tbl_page = ctx->ah_tbl.pa; | ||
| 248 | |||
| 249 | status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); | ||
| 250 | if (status) | ||
| 251 | goto map_err; | ||
| 252 | resp.dev_id = dev->id; | ||
| 253 | resp.max_inline_data = dev->attr.max_inline_data; | ||
| 254 | resp.wqe_size = dev->attr.wqe_size; | ||
| 255 | resp.rqe_size = dev->attr.rqe_size; | ||
| 256 | resp.dpp_wqe_size = dev->attr.wqe_size; | ||
| 257 | resp.rsvd = 0; | ||
| 258 | |||
| 259 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); | ||
| 260 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); | ||
| 261 | if (status) | ||
| 262 | goto cpy_err; | ||
| 263 | return &ctx->ibucontext; | ||
| 264 | |||
| 265 | cpy_err: | ||
| 266 | ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); | ||
| 267 | map_err: | ||
| 268 | dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, | ||
| 269 | ctx->ah_tbl.pa); | ||
| 270 | kfree(ctx); | ||
| 271 | return ERR_PTR(status); | ||
| 272 | } | ||
| 273 | |||
| 274 | int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) | ||
| 275 | { | ||
| 276 | struct ocrdma_mm *mm, *tmp; | ||
| 277 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); | ||
| 278 | struct pci_dev *pdev = uctx->dev->nic_info.pdev; | ||
| 279 | |||
| 280 | ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); | ||
| 281 | dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, | ||
| 282 | uctx->ah_tbl.pa); | ||
| 283 | |||
| 284 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | ||
| 285 | list_del(&mm->entry); | ||
| 286 | kfree(mm); | ||
| 287 | } | ||
| 288 | kfree(uctx); | ||
| 289 | return 0; | ||
| 290 | } | ||
| 291 | |||
| 292 | int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
| 293 | { | ||
| 294 | struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); | ||
| 295 | struct ocrdma_dev *dev = ucontext->dev; | ||
| 296 | unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; | ||
| 297 | u64 unmapped_db = (u64) dev->nic_info.unmapped_db; | ||
| 298 | unsigned long len = (vma->vm_end - vma->vm_start); | ||
| 299 | int status = 0; | ||
| 300 | bool found; | ||
| 301 | |||
| 302 | if (vma->vm_start & (PAGE_SIZE - 1)) | ||
| 303 | return -EINVAL; | ||
| 304 | found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); | ||
| 305 | if (!found) | ||
| 306 | return -EINVAL; | ||
| 307 | |||
| 308 | if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + | ||
| 309 | dev->nic_info.db_total_size)) && | ||
| 310 | (len <= dev->nic_info.db_page_size)) { | ||
| 311 | /* doorbell mapping */ | ||
| 312 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
| 313 | len, vma->vm_page_prot); | ||
| 314 | } else if (dev->nic_info.dpp_unmapped_len && | ||
| 315 | (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && | ||
| 316 | (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + | ||
| 317 | dev->nic_info.dpp_unmapped_len)) && | ||
| 318 | (len <= dev->nic_info.dpp_unmapped_len)) { | ||
| 319 | /* dpp area mapping */ | ||
| 320 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
| 321 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
| 322 | len, vma->vm_page_prot); | ||
| 323 | } else { | ||
| 324 | /* queue memory mapping */ | ||
| 325 | status = remap_pfn_range(vma, vma->vm_start, | ||
| 326 | vma->vm_pgoff, len, vma->vm_page_prot); | ||
| 327 | } | ||
| 328 | return status; | ||
| 329 | } | ||
| 330 | |||
| 331 | static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd, | ||
| 332 | struct ib_ucontext *ib_ctx, | ||
| 333 | struct ib_udata *udata) | ||
| 334 | { | ||
| 335 | int status; | ||
| 336 | u64 db_page_addr; | ||
| 337 | u64 dpp_page_addr = 0; | ||
| 338 | u32 db_page_size; | ||
| 339 | struct ocrdma_alloc_pd_uresp rsp; | ||
| 340 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); | ||
| 341 | |||
| 342 | rsp.id = pd->id; | ||
| 343 | rsp.dpp_enabled = pd->dpp_enabled; | ||
| 344 | db_page_addr = pd->dev->nic_info.unmapped_db + | ||
| 345 | (pd->id * pd->dev->nic_info.db_page_size); | ||
| 346 | db_page_size = pd->dev->nic_info.db_page_size; | ||
| 347 | |||
| 348 | status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); | ||
| 349 | if (status) | ||
| 350 | return status; | ||
| 351 | |||
| 352 | if (pd->dpp_enabled) { | ||
| 353 | dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr + | ||
| 354 | (pd->id * OCRDMA_DPP_PAGE_SIZE); | ||
| 355 | status = ocrdma_add_mmap(uctx, dpp_page_addr, | ||
| 356 | OCRDMA_DPP_PAGE_SIZE); | ||
| 357 | if (status) | ||
| 358 | goto dpp_map_err; | ||
| 359 | rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); | ||
| 360 | rsp.dpp_page_addr_lo = dpp_page_addr; | ||
| 361 | } | ||
| 362 | |||
| 363 | status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); | ||
| 364 | if (status) | ||
| 365 | goto ucopy_err; | ||
| 366 | |||
| 367 | pd->uctx = uctx; | ||
| 368 | return 0; | ||
| 369 | |||
| 370 | ucopy_err: | ||
| 371 | if (pd->dpp_enabled) | ||
| 372 | ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE); | ||
| 373 | dpp_map_err: | ||
| 374 | ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); | ||
| 375 | return status; | ||
| 376 | } | ||
| 377 | |||
| 378 | struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, | ||
| 379 | struct ib_ucontext *context, | ||
| 380 | struct ib_udata *udata) | ||
| 381 | { | ||
| 382 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | ||
| 383 | struct ocrdma_pd *pd; | ||
| 384 | int status; | ||
| 385 | |||
| 386 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | ||
| 387 | if (!pd) | ||
| 388 | return ERR_PTR(-ENOMEM); | ||
| 389 | pd->dev = dev; | ||
| 390 | if (udata && context) { | ||
| 391 | pd->dpp_enabled = (dev->nic_info.dev_family == | ||
| 392 | OCRDMA_GEN2_FAMILY) ? true : false; | ||
| 393 | pd->num_dpp_qp = | ||
| 394 | pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; | ||
| 395 | } | ||
| 396 | status = ocrdma_mbx_alloc_pd(dev, pd); | ||
| 397 | if (status) { | ||
| 398 | kfree(pd); | ||
| 399 | return ERR_PTR(status); | ||
| 400 | } | ||
| 401 | atomic_set(&pd->use_cnt, 0); | ||
| 402 | |||
| 403 | if (udata && context) { | ||
| 404 | status = ocrdma_copy_pd_uresp(pd, context, udata); | ||
| 405 | if (status) | ||
| 406 | goto err; | ||
| 407 | } | ||
| 408 | return &pd->ibpd; | ||
| 409 | |||
| 410 | err: | ||
| 411 | ocrdma_dealloc_pd(&pd->ibpd); | ||
| 412 | return ERR_PTR(status); | ||
| 413 | } | ||
| 414 | |||
| 415 | int ocrdma_dealloc_pd(struct ib_pd *ibpd) | ||
| 416 | { | ||
| 417 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | ||
| 418 | struct ocrdma_dev *dev = pd->dev; | ||
| 419 | int status; | ||
| 420 | u64 usr_db; | ||
| 421 | |||
| 422 | if (atomic_read(&pd->use_cnt)) { | ||
| 423 | ocrdma_err("%s(%d) pd=0x%x is in use.\n", | ||
| 424 | __func__, dev->id, pd->id); | ||
| 425 | status = -EFAULT; | ||
| 426 | goto dealloc_err; | ||
| 427 | } | ||
| 428 | status = ocrdma_mbx_dealloc_pd(dev, pd); | ||
| 429 | if (pd->uctx) { | ||
| 430 | u64 dpp_db = dev->nic_info.dpp_unmapped_addr + | ||
| 431 | (pd->id * OCRDMA_DPP_PAGE_SIZE); | ||
| 432 | if (pd->dpp_enabled) | ||
| 433 | ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE); | ||
| 434 | usr_db = dev->nic_info.unmapped_db + | ||
| 435 | (pd->id * dev->nic_info.db_page_size); | ||
| 436 | ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); | ||
| 437 | } | ||
| 438 | kfree(pd); | ||
| 439 | dealloc_err: | ||
| 440 | return status; | ||
| 441 | } | ||
| 442 | |||
| 443 | static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, | ||
| 444 | int acc, u32 num_pbls, | ||
| 445 | u32 addr_check) | ||
| 446 | { | ||
| 447 | int status; | ||
| 448 | struct ocrdma_mr *mr; | ||
| 449 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | ||
| 450 | struct ocrdma_dev *dev = pd->dev; | ||
| 451 | |||
| 452 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { | ||
| 453 | ocrdma_err("%s(%d) leaving err, invalid access rights\n", | ||
| 454 | __func__, dev->id); | ||
| 455 | return ERR_PTR(-EINVAL); | ||
| 456 | } | ||
| 457 | |||
| 458 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | ||
| 459 | if (!mr) | ||
| 460 | return ERR_PTR(-ENOMEM); | ||
| 461 | mr->hwmr.dev = dev; | ||
| 462 | mr->hwmr.fr_mr = 0; | ||
| 463 | mr->hwmr.local_rd = 1; | ||
| 464 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | ||
| 465 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | ||
| 466 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | ||
| 467 | mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; | ||
| 468 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | ||
| 469 | mr->hwmr.num_pbls = num_pbls; | ||
| 470 | |||
| 471 | status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check); | ||
| 472 | if (status) { | ||
| 473 | kfree(mr); | ||
| 474 | return ERR_PTR(-ENOMEM); | ||
| 475 | } | ||
| 476 | mr->pd = pd; | ||
| 477 | atomic_inc(&pd->use_cnt); | ||
| 478 | mr->ibmr.lkey = mr->hwmr.lkey; | ||
| 479 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | ||
| 480 | mr->ibmr.rkey = mr->hwmr.lkey; | ||
| 481 | return mr; | ||
| 482 | } | ||
| 483 | |||
| 484 | struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) | ||
| 485 | { | ||
| 486 | struct ocrdma_mr *mr; | ||
| 487 | |||
| 488 | mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE); | ||
| 489 | if (IS_ERR(mr)) | ||
| 490 | return ERR_CAST(mr); | ||
| 491 | |||
| 492 | return &mr->ibmr; | ||
| 493 | } | ||
| 494 | |||
| 495 | static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, | ||
| 496 | struct ocrdma_hw_mr *mr) | ||
| 497 | { | ||
| 498 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 499 | int i = 0; | ||
| 500 | |||
| 501 | if (mr->pbl_table) { | ||
| 502 | for (i = 0; i < mr->num_pbls; i++) { | ||
| 503 | if (!mr->pbl_table[i].va) | ||
| 504 | continue; | ||
| 505 | dma_free_coherent(&pdev->dev, mr->pbl_size, | ||
| 506 | mr->pbl_table[i].va, | ||
| 507 | mr->pbl_table[i].pa); | ||
| 508 | } | ||
| 509 | kfree(mr->pbl_table); | ||
| 510 | mr->pbl_table = NULL; | ||
| 511 | } | ||
| 512 | } | ||
| 513 | |||
| 514 | static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes) | ||
| 515 | { | ||
| 516 | u32 num_pbls = 0; | ||
| 517 | u32 idx = 0; | ||
| 518 | int status = 0; | ||
| 519 | u32 pbl_size; | ||
| 520 | |||
| 521 | do { | ||
| 522 | pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); | ||
| 523 | if (pbl_size > MAX_OCRDMA_PBL_SIZE) { | ||
| 524 | status = -EFAULT; | ||
| 525 | break; | ||
| 526 | } | ||
| 527 | num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); | ||
| 528 | num_pbls = num_pbls / (pbl_size / sizeof(u64)); | ||
| 529 | idx++; | ||
| 530 | } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl); | ||
| 531 | |||
| 532 | mr->hwmr.num_pbes = num_pbes; | ||
| 533 | mr->hwmr.num_pbls = num_pbls; | ||
| 534 | mr->hwmr.pbl_size = pbl_size; | ||
| 535 | return status; | ||
| 536 | } | ||
| 537 | |||
| 538 | static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) | ||
| 539 | { | ||
| 540 | int status = 0; | ||
| 541 | int i; | ||
| 542 | u32 dma_len = mr->pbl_size; | ||
| 543 | struct pci_dev *pdev = dev->nic_info.pdev; | ||
| 544 | void *va; | ||
| 545 | dma_addr_t pa; | ||
| 546 | |||
| 547 | mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * | ||
| 548 | mr->num_pbls, GFP_KERNEL); | ||
| 549 | |||
| 550 | if (!mr->pbl_table) | ||
| 551 | return -ENOMEM; | ||
| 552 | |||
| 553 | for (i = 0; i < mr->num_pbls; i++) { | ||
| 554 | va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); | ||
| 555 | if (!va) { | ||
| 556 | ocrdma_free_mr_pbl_tbl(dev, mr); | ||
| 557 | status = -ENOMEM; | ||
| 558 | break; | ||
| 559 | } | ||
| 560 | memset(va, 0, dma_len); | ||
| 561 | mr->pbl_table[i].va = va; | ||
| 562 | mr->pbl_table[i].pa = pa; | ||
| 563 | } | ||
| 564 | return status; | ||
| 565 | } | ||
| 566 | |||
| 567 | static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, | ||
| 568 | u32 num_pbes) | ||
| 569 | { | ||
| 570 | struct ocrdma_pbe *pbe; | ||
| 571 | struct ib_umem_chunk *chunk; | ||
| 572 | struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; | ||
| 573 | struct ib_umem *umem = mr->umem; | ||
| 574 | int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; | ||
| 575 | |||
| 576 | if (!mr->hwmr.num_pbes) | ||
| 577 | return; | ||
| 578 | |||
| 579 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | ||
| 580 | pbe_cnt = 0; | ||
| 581 | |||
| 582 | shift = ilog2(umem->page_size); | ||
| 583 | |||
| 584 | list_for_each_entry(chunk, &umem->chunk_list, list) { | ||
| 585 | /* get all the dma regions from the chunk. */ | ||
| 586 | for (i = 0; i < chunk->nmap; i++) { | ||
| 587 | pages = sg_dma_len(&chunk->page_list[i]) >> shift; | ||
| 588 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { | ||
| 589 | /* store the page address in pbe */ | ||
| 590 | pbe->pa_lo = | ||
| 591 | cpu_to_le32(sg_dma_address | ||
| 592 | (&chunk->page_list[i]) + | ||
| 593 | (umem->page_size * pg_cnt)); | ||
| 594 | pbe->pa_hi = | ||
| 595 | cpu_to_le32(upper_32_bits | ||
| 596 | ((sg_dma_address | ||
| 597 | (&chunk->page_list[i]) + | ||
| 598 | umem->page_size * pg_cnt))); | ||
| 599 | pbe_cnt += 1; | ||
| 600 | total_num_pbes += 1; | ||
| 601 | pbe++; | ||
| 602 | |||
| 603 | /* if done building pbes, issue the mbx cmd. */ | ||
| 604 | if (total_num_pbes == num_pbes) | ||
| 605 | return; | ||
| 606 | |||
| 607 | /* if the given pbl is full storing the pbes, | ||
| 608 | * move to next pbl. | ||
| 609 | */ | ||
| 610 | if (pbe_cnt == | ||
| 611 | (mr->hwmr.pbl_size / sizeof(u64))) { | ||
| 612 | pbl_tbl++; | ||
| 613 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | ||
| 614 | pbe_cnt = 0; | ||
| 615 | } | ||
| 616 | } | ||
| 617 | } | ||
| 618 | } | ||
| 619 | } | ||
| 620 | |||
| 621 | struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, | ||
| 622 | u64 usr_addr, int acc, struct ib_udata *udata) | ||
| 623 | { | ||
| 624 | int status = -ENOMEM; | ||
| 625 | struct ocrdma_dev *dev; | ||
| 626 | struct ocrdma_mr *mr; | ||
| 627 | struct ocrdma_pd *pd; | ||
| 628 | u32 num_pbes; | ||
| 629 | |||
| 630 | pd = get_ocrdma_pd(ibpd); | ||
| 631 | dev = pd->dev; | ||
| 632 | |||
| 633 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) | ||
| 634 | return ERR_PTR(-EINVAL); | ||
| 635 | |||
| 636 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | ||
| 637 | if (!mr) | ||
| 638 | return ERR_PTR(status); | ||
| 639 | mr->hwmr.dev = dev; | ||
| 640 | mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); | ||
| 641 | if (IS_ERR(mr->umem)) { | ||
| 642 | status = -EFAULT; | ||
| 643 | goto umem_err; | ||
| 644 | } | ||
| 645 | num_pbes = ib_umem_page_count(mr->umem); | ||
| 646 | status = ocrdma_get_pbl_info(mr, num_pbes); | ||
| 647 | if (status) | ||
| 648 | goto umem_err; | ||
| 649 | |||
| 650 | mr->hwmr.pbe_size = mr->umem->page_size; | ||
| 651 | mr->hwmr.fbo = mr->umem->offset; | ||
| 652 | mr->hwmr.va = usr_addr; | ||
| 653 | mr->hwmr.len = len; | ||
| 654 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | ||
| 655 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | ||
| 656 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | ||
| 657 | mr->hwmr.local_rd = 1; | ||
| 658 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | ||
| 659 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | ||
| 660 | if (status) | ||
| 661 | goto umem_err; | ||
| 662 | build_user_pbes(dev, mr, num_pbes); | ||
| 663 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); | ||
| 664 | if (status) | ||
| 665 | goto mbx_err; | ||
| 666 | mr->pd = pd; | ||
| 667 | atomic_inc(&pd->use_cnt); | ||
| 668 | mr->ibmr.lkey = mr->hwmr.lkey; | ||
| 669 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | ||
| 670 | mr->ibmr.rkey = mr->hwmr.lkey; | ||
| 671 | |||
| 672 | return &mr->ibmr; | ||
| 673 | |||
| 674 | mbx_err: | ||
| 675 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | ||
| 676 | umem_err: | ||
| 677 | kfree(mr); | ||
| 678 | return ERR_PTR(status); | ||
| 679 | } | ||
| 680 | |||
| 681 | int ocrdma_dereg_mr(struct ib_mr *ib_mr) | ||
| 682 | { | ||
| 683 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); | ||
| 684 | struct ocrdma_dev *dev = mr->hwmr.dev; | ||
| 685 | int status; | ||
| 686 | |||
| 687 | status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); | ||
| 688 | |||
| 689 | if (mr->hwmr.fr_mr == 0) | ||
| 690 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | ||
| 691 | |||
| 692 | atomic_dec(&mr->pd->use_cnt); | ||
| 693 | /* it could be user registered memory. */ | ||
| 694 | if (mr->umem) | ||
| 695 | ib_umem_release(mr->umem); | ||
| 696 | kfree(mr); | ||
| 697 | return status; | ||
| 698 | } | ||
| 699 | |||
| 700 | static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, | ||
| 701 | struct ib_ucontext *ib_ctx) | ||
| 702 | { | ||
| 703 | int status; | ||
| 704 | struct ocrdma_ucontext *uctx; | ||
| 705 | struct ocrdma_create_cq_uresp uresp; | ||
| 706 | |||
| 707 | uresp.cq_id = cq->id; | ||
| 708 | uresp.page_size = cq->len; | ||
| 709 | uresp.num_pages = 1; | ||
| 710 | uresp.max_hw_cqe = cq->max_hw_cqe; | ||
| 711 | uresp.page_addr[0] = cq->pa; | ||
| 712 | uresp.db_page_addr = cq->dev->nic_info.unmapped_db; | ||
| 713 | uresp.db_page_size = cq->dev->nic_info.db_page_size; | ||
| 714 | uresp.phase_change = cq->phase_change ? 1 : 0; | ||
| 715 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||
| 716 | if (status) { | ||
| 717 | ocrdma_err("%s(%d) copy error cqid=0x%x.\n", | ||
| 718 | __func__, cq->dev->id, cq->id); | ||
| 719 | goto err; | ||
| 720 | } | ||
| 721 | uctx = get_ocrdma_ucontext(ib_ctx); | ||
| 722 | status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); | ||
| 723 | if (status) | ||
| 724 | goto err; | ||
| 725 | status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); | ||
| 726 | if (status) { | ||
| 727 | ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); | ||
| 728 | goto err; | ||
| 729 | } | ||
| 730 | cq->ucontext = uctx; | ||
| 731 | err: | ||
| 732 | return status; | ||
| 733 | } | ||
| 734 | |||
| 735 | struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, | ||
| 736 | struct ib_ucontext *ib_ctx, | ||
| 737 | struct ib_udata *udata) | ||
| 738 | { | ||
| 739 | struct ocrdma_cq *cq; | ||
| 740 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | ||
| 741 | int status; | ||
| 742 | struct ocrdma_create_cq_ureq ureq; | ||
| 743 | |||
| 744 | if (udata) { | ||
| 745 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | ||
| 746 | return ERR_PTR(-EFAULT); | ||
| 747 | } else | ||
| 748 | ureq.dpp_cq = 0; | ||
| 749 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | ||
| 750 | if (!cq) | ||
| 751 | return ERR_PTR(-ENOMEM); | ||
| 752 | |||
| 753 | spin_lock_init(&cq->cq_lock); | ||
| 754 | spin_lock_init(&cq->comp_handler_lock); | ||
| 755 | atomic_set(&cq->use_cnt, 0); | ||
| 756 | INIT_LIST_HEAD(&cq->sq_head); | ||
| 757 | INIT_LIST_HEAD(&cq->rq_head); | ||
| 758 | cq->dev = dev; | ||
| 759 | |||
| 760 | status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq); | ||
| 761 | if (status) { | ||
| 762 | kfree(cq); | ||
| 763 | return ERR_PTR(status); | ||
| 764 | } | ||
| 765 | if (ib_ctx) { | ||
| 766 | status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx); | ||
| 767 | if (status) | ||
| 768 | goto ctx_err; | ||
| 769 | } | ||
| 770 | cq->phase = OCRDMA_CQE_VALID; | ||
| 771 | cq->arm_needed = true; | ||
| 772 | dev->cq_tbl[cq->id] = cq; | ||
| 773 | |||
| 774 | return &cq->ibcq; | ||
| 775 | |||
| 776 | ctx_err: | ||
| 777 | ocrdma_mbx_destroy_cq(dev, cq); | ||
| 778 | kfree(cq); | ||
| 779 | return ERR_PTR(status); | ||
| 780 | } | ||
| 781 | |||
| 782 | int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, | ||
| 783 | struct ib_udata *udata) | ||
| 784 | { | ||
| 785 | int status = 0; | ||
| 786 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | ||
| 787 | |||
| 788 | if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { | ||
| 789 | status = -EINVAL; | ||
| 790 | return status; | ||
| 791 | } | ||
| 792 | ibcq->cqe = new_cnt; | ||
| 793 | return status; | ||
| 794 | } | ||
| 795 | |||
| 796 | int ocrdma_destroy_cq(struct ib_cq *ibcq) | ||
| 797 | { | ||
| 798 | int status; | ||
| 799 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | ||
| 800 | struct ocrdma_dev *dev = cq->dev; | ||
| 801 | |||
| 802 | if (atomic_read(&cq->use_cnt)) | ||
| 803 | return -EINVAL; | ||
| 804 | |||
| 805 | status = ocrdma_mbx_destroy_cq(dev, cq); | ||
| 806 | |||
| 807 | if (cq->ucontext) { | ||
| 808 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len); | ||
| 809 | ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db, | ||
| 810 | dev->nic_info.db_page_size); | ||
| 811 | } | ||
| 812 | dev->cq_tbl[cq->id] = NULL; | ||
| 813 | |||
| 814 | kfree(cq); | ||
| 815 | return status; | ||
| 816 | } | ||
| 817 | |||
| 818 | static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | ||
| 819 | { | ||
| 820 | int status = -EINVAL; | ||
| 821 | |||
| 822 | if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { | ||
| 823 | dev->qp_tbl[qp->id] = qp; | ||
| 824 | status = 0; | ||
| 825 | } | ||
| 826 | return status; | ||
| 827 | } | ||
| 828 | |||
| 829 | static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | ||
| 830 | { | ||
| 831 | dev->qp_tbl[qp->id] = NULL; | ||
| 832 | } | ||
| 833 | |||
| 834 | static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | ||
| 835 | struct ib_qp_init_attr *attrs) | ||
| 836 | { | ||
| 837 | if (attrs->qp_type != IB_QPT_GSI && | ||
| 838 | attrs->qp_type != IB_QPT_RC && | ||
| 839 | attrs->qp_type != IB_QPT_UD) { | ||
| 840 | ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", | ||
| 841 | __func__, dev->id, attrs->qp_type); | ||
| 842 | return -EINVAL; | ||
| 843 | } | ||
| 844 | if (attrs->cap.max_send_wr > dev->attr.max_wqe) { | ||
| 845 | ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", | ||
| 846 | __func__, dev->id, attrs->cap.max_send_wr); | ||
| 847 | ocrdma_err("%s(%d) supported send_wr=0x%x\n", | ||
| 848 | __func__, dev->id, dev->attr.max_wqe); | ||
| 849 | return -EINVAL; | ||
| 850 | } | ||
| 851 | if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { | ||
| 852 | ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", | ||
| 853 | __func__, dev->id, attrs->cap.max_recv_wr); | ||
| 854 | ocrdma_err("%s(%d) supported recv_wr=0x%x\n", | ||
| 855 | __func__, dev->id, dev->attr.max_rqe); | ||
| 856 | return -EINVAL; | ||
| 857 | } | ||
| 858 | if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { | ||
| 859 | ocrdma_err("%s(%d) unsupported inline data size=0x%x" | ||
| 860 | " requested\n", __func__, dev->id, | ||
| 861 | attrs->cap.max_inline_data); | ||
| 862 | ocrdma_err("%s(%d) supported inline data size=0x%x\n", | ||
| 863 | __func__, dev->id, dev->attr.max_inline_data); | ||
| 864 | return -EINVAL; | ||
| 865 | } | ||
| 866 | if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { | ||
| 867 | ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", | ||
| 868 | __func__, dev->id, attrs->cap.max_send_sge); | ||
| 869 | ocrdma_err("%s(%d) supported send_sge=0x%x\n", | ||
| 870 | __func__, dev->id, dev->attr.max_send_sge); | ||
| 871 | return -EINVAL; | ||
| 872 | } | ||
| 873 | if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { | ||
| 874 | ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", | ||
| 875 | __func__, dev->id, attrs->cap.max_recv_sge); | ||
| 876 | ocrdma_err("%s(%d) supported recv_sge=0x%x\n", | ||
| 877 | __func__, dev->id, dev->attr.max_recv_sge); | ||
| 878 | return -EINVAL; | ||
| 879 | } | ||
| 880 | /* unprivileged user space cannot create special QP */ | ||
| 881 | if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { | ||
| 882 | ocrdma_err | ||
| 883 | ("%s(%d) Userspace can't create special QPs of type=0x%x\n", | ||
| 884 | __func__, dev->id, attrs->qp_type); | ||
| 885 | return -EINVAL; | ||
| 886 | } | ||
| 887 | /* allow creating only one GSI type of QP */ | ||
| 888 | if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { | ||
| 889 | ocrdma_err("%s(%d) GSI special QPs already created.\n", | ||
| 890 | __func__, dev->id); | ||
| 891 | return -EINVAL; | ||
| 892 | } | ||
| 893 | /* verify consumer QPs are not trying to use GSI QP's CQ */ | ||
| 894 | if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { | ||
| 895 | if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || | ||
| 896 | (dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq))) { | ||
| 897 | ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", | ||
| 898 | __func__, dev->id); | ||
| 899 | return -EINVAL; | ||
| 900 | } | ||
| 901 | } | ||
| 902 | return 0; | ||
| 903 | } | ||
| 904 | |||
| 905 | static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | ||
| 906 | struct ib_udata *udata, int dpp_offset, | ||
| 907 | int dpp_credit_lmt, int srq) | ||
| 908 | { | ||
| 909 | int status = 0; | ||
| 910 | u64 usr_db; | ||
| 911 | struct ocrdma_create_qp_uresp uresp; | ||
| 912 | struct ocrdma_dev *dev = qp->dev; | ||
| 913 | struct ocrdma_pd *pd = qp->pd; | ||
| 914 | |||
| 915 | memset(&uresp, 0, sizeof(uresp)); | ||
| 916 | usr_db = dev->nic_info.unmapped_db + | ||
| 917 | (pd->id * dev->nic_info.db_page_size); | ||
| 918 | uresp.qp_id = qp->id; | ||
| 919 | uresp.sq_dbid = qp->sq.dbid; | ||
| 920 | uresp.num_sq_pages = 1; | ||
| 921 | uresp.sq_page_size = qp->sq.len; | ||
| 922 | uresp.sq_page_addr[0] = qp->sq.pa; | ||
| 923 | uresp.num_wqe_allocated = qp->sq.max_cnt; | ||
| 924 | if (!srq) { | ||
| 925 | uresp.rq_dbid = qp->rq.dbid; | ||
| 926 | uresp.num_rq_pages = 1; | ||
| 927 | uresp.rq_page_size = qp->rq.len; | ||
| 928 | uresp.rq_page_addr[0] = qp->rq.pa; | ||
| 929 | uresp.num_rqe_allocated = qp->rq.max_cnt; | ||
| 930 | } | ||
| 931 | uresp.db_page_addr = usr_db; | ||
| 932 | uresp.db_page_size = dev->nic_info.db_page_size; | ||
| 933 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 934 | uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; | ||
| 935 | uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ? | ||
| 936 | OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET; | ||
| 937 | uresp.db_shift = (qp->id < 128) ? 24 : 16; | ||
| 938 | } else { | ||
| 939 | uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET; | ||
| 940 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | ||
| 941 | uresp.db_shift = 16; | ||
| 942 | } | ||
| 943 | uresp.free_wqe_delta = qp->sq.free_delta; | ||
| 944 | uresp.free_rqe_delta = qp->rq.free_delta; | ||
| 945 | |||
| 946 | if (qp->dpp_enabled) { | ||
| 947 | uresp.dpp_credit = dpp_credit_lmt; | ||
| 948 | uresp.dpp_offset = dpp_offset; | ||
| 949 | } | ||
| 950 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||
| 951 | if (status) { | ||
| 952 | ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); | ||
| 953 | goto err; | ||
| 954 | } | ||
| 955 | status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], | ||
| 956 | uresp.sq_page_size); | ||
| 957 | if (status) | ||
| 958 | goto err; | ||
| 959 | |||
| 960 | if (!srq) { | ||
| 961 | status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], | ||
| 962 | uresp.rq_page_size); | ||
| 963 | if (status) | ||
| 964 | goto rq_map_err; | ||
| 965 | } | ||
| 966 | return status; | ||
| 967 | rq_map_err: | ||
| 968 | ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); | ||
| 969 | err: | ||
| 970 | return status; | ||
| 971 | } | ||
| 972 | |||
| 973 | static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | ||
| 974 | struct ocrdma_pd *pd) | ||
| 975 | { | ||
| 976 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 977 | qp->sq_db = dev->nic_info.db + | ||
| 978 | (pd->id * dev->nic_info.db_page_size) + | ||
| 979 | OCRDMA_DB_GEN2_SQ_OFFSET; | ||
| 980 | qp->rq_db = dev->nic_info.db + | ||
| 981 | (pd->id * dev->nic_info.db_page_size) + | ||
| 982 | ((qp->id < 128) ? | ||
| 983 | OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET); | ||
| 984 | } else { | ||
| 985 | qp->sq_db = dev->nic_info.db + | ||
| 986 | (pd->id * dev->nic_info.db_page_size) + | ||
| 987 | OCRDMA_DB_SQ_OFFSET; | ||
| 988 | qp->rq_db = dev->nic_info.db + | ||
| 989 | (pd->id * dev->nic_info.db_page_size) + | ||
| 990 | OCRDMA_DB_RQ_OFFSET; | ||
| 991 | } | ||
| 992 | } | ||
| 993 | |||
| 994 | static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) | ||
| 995 | { | ||
| 996 | qp->wqe_wr_id_tbl = | ||
| 997 | kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, | ||
| 998 | GFP_KERNEL); | ||
| 999 | if (qp->wqe_wr_id_tbl == NULL) | ||
| 1000 | return -ENOMEM; | ||
| 1001 | qp->rqe_wr_id_tbl = | ||
| 1002 | kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); | ||
| 1003 | if (qp->rqe_wr_id_tbl == NULL) | ||
| 1004 | return -ENOMEM; | ||
| 1005 | |||
| 1006 | return 0; | ||
| 1007 | } | ||
| 1008 | |||
| 1009 | static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, | ||
| 1010 | struct ocrdma_pd *pd, | ||
| 1011 | struct ib_qp_init_attr *attrs) | ||
| 1012 | { | ||
| 1013 | qp->pd = pd; | ||
| 1014 | spin_lock_init(&qp->q_lock); | ||
| 1015 | INIT_LIST_HEAD(&qp->sq_entry); | ||
| 1016 | INIT_LIST_HEAD(&qp->rq_entry); | ||
| 1017 | |||
| 1018 | qp->qp_type = attrs->qp_type; | ||
| 1019 | qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; | ||
| 1020 | qp->max_inline_data = attrs->cap.max_inline_data; | ||
| 1021 | qp->sq.max_sges = attrs->cap.max_send_sge; | ||
| 1022 | qp->rq.max_sges = attrs->cap.max_recv_sge; | ||
| 1023 | qp->state = OCRDMA_QPS_RST; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd) | ||
| 1027 | { | ||
| 1028 | atomic_inc(&pd->use_cnt); | ||
| 1029 | atomic_inc(&qp->sq_cq->use_cnt); | ||
| 1030 | atomic_inc(&qp->rq_cq->use_cnt); | ||
| 1031 | if (qp->srq) | ||
| 1032 | atomic_inc(&qp->srq->use_cnt); | ||
| 1033 | qp->ibqp.qp_num = qp->id; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, | ||
| 1037 | struct ib_qp_init_attr *attrs) | ||
| 1038 | { | ||
| 1039 | if (attrs->qp_type == IB_QPT_GSI) { | ||
| 1040 | dev->gsi_qp_created = 1; | ||
| 1041 | dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); | ||
| 1042 | dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); | ||
| 1043 | } | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, | ||
| 1047 | struct ib_qp_init_attr *attrs, | ||
| 1048 | struct ib_udata *udata) | ||
| 1049 | { | ||
| 1050 | int status; | ||
| 1051 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | ||
| 1052 | struct ocrdma_qp *qp; | ||
| 1053 | struct ocrdma_dev *dev = pd->dev; | ||
| 1054 | struct ocrdma_create_qp_ureq ureq; | ||
| 1055 | u16 dpp_credit_lmt, dpp_offset; | ||
| 1056 | |||
| 1057 | status = ocrdma_check_qp_params(ibpd, dev, attrs); | ||
| 1058 | if (status) | ||
| 1059 | goto gen_err; | ||
| 1060 | |||
| 1061 | memset(&ureq, 0, sizeof(ureq)); | ||
| 1062 | if (udata) { | ||
| 1063 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | ||
| 1064 | return ERR_PTR(-EFAULT); | ||
| 1065 | } | ||
| 1066 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | ||
| 1067 | if (!qp) { | ||
| 1068 | status = -ENOMEM; | ||
| 1069 | goto gen_err; | ||
| 1070 | } | ||
| 1071 | qp->dev = dev; | ||
| 1072 | ocrdma_set_qp_init_params(qp, pd, attrs); | ||
| 1073 | |||
| 1074 | mutex_lock(&dev->dev_lock); | ||
| 1075 | status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, | ||
| 1076 | ureq.dpp_cq_id, | ||
| 1077 | &dpp_offset, &dpp_credit_lmt); | ||
| 1078 | if (status) | ||
| 1079 | goto mbx_err; | ||
| 1080 | |||
| 1081 | /* user space QP's wr_id table are managed in library */ | ||
| 1082 | if (udata == NULL) { | ||
| 1083 | qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | | ||
| 1084 | OCRDMA_QP_FAST_REG); | ||
| 1085 | status = ocrdma_alloc_wr_id_tbl(qp); | ||
| 1086 | if (status) | ||
| 1087 | goto map_err; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | status = ocrdma_add_qpn_map(dev, qp); | ||
| 1091 | if (status) | ||
| 1092 | goto map_err; | ||
| 1093 | ocrdma_set_qp_db(dev, qp, pd); | ||
| 1094 | if (udata) { | ||
| 1095 | status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, | ||
| 1096 | dpp_credit_lmt, | ||
| 1097 | (attrs->srq != NULL)); | ||
| 1098 | if (status) | ||
| 1099 | goto cpy_err; | ||
| 1100 | } | ||
| 1101 | ocrdma_store_gsi_qp_cq(dev, attrs); | ||
| 1102 | ocrdma_set_qp_use_cnt(qp, pd); | ||
| 1103 | mutex_unlock(&dev->dev_lock); | ||
| 1104 | return &qp->ibqp; | ||
| 1105 | |||
| 1106 | cpy_err: | ||
| 1107 | ocrdma_del_qpn_map(dev, qp); | ||
| 1108 | map_err: | ||
| 1109 | ocrdma_mbx_destroy_qp(dev, qp); | ||
| 1110 | mbx_err: | ||
| 1111 | mutex_unlock(&dev->dev_lock); | ||
| 1112 | kfree(qp->wqe_wr_id_tbl); | ||
| 1113 | kfree(qp->rqe_wr_id_tbl); | ||
| 1114 | kfree(qp); | ||
| 1115 | ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); | ||
| 1116 | gen_err: | ||
| 1117 | return ERR_PTR(status); | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
| 1121 | int attr_mask) | ||
| 1122 | { | ||
| 1123 | int status = 0; | ||
| 1124 | struct ocrdma_qp *qp; | ||
| 1125 | struct ocrdma_dev *dev; | ||
| 1126 | enum ib_qp_state old_qps; | ||
| 1127 | |||
| 1128 | qp = get_ocrdma_qp(ibqp); | ||
| 1129 | dev = qp->dev; | ||
| 1130 | if (attr_mask & IB_QP_STATE) | ||
| 1131 | status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps); | ||
| 1132 | /* if new and previous states are same hw doesn't need to | ||
| 1133 | * know about it. | ||
| 1134 | */ | ||
| 1135 | if (status < 0) | ||
| 1136 | return status; | ||
| 1137 | status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps); | ||
| 1138 | return status; | ||
| 1139 | } | ||
| 1140 | |||
| 1141 | int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
| 1142 | int attr_mask, struct ib_udata *udata) | ||
| 1143 | { | ||
| 1144 | unsigned long flags; | ||
| 1145 | int status = -EINVAL; | ||
| 1146 | struct ocrdma_qp *qp; | ||
| 1147 | struct ocrdma_dev *dev; | ||
| 1148 | enum ib_qp_state old_qps, new_qps; | ||
| 1149 | |||
| 1150 | qp = get_ocrdma_qp(ibqp); | ||
| 1151 | dev = qp->dev; | ||
| 1152 | |||
| 1153 | /* syncronize with multiple context trying to change, retrive qps */ | ||
| 1154 | mutex_lock(&dev->dev_lock); | ||
| 1155 | /* syncronize with wqe, rqe posting and cqe processing contexts */ | ||
| 1156 | spin_lock_irqsave(&qp->q_lock, flags); | ||
| 1157 | old_qps = get_ibqp_state(qp->state); | ||
| 1158 | if (attr_mask & IB_QP_STATE) | ||
| 1159 | new_qps = attr->qp_state; | ||
| 1160 | else | ||
| 1161 | new_qps = old_qps; | ||
| 1162 | spin_unlock_irqrestore(&qp->q_lock, flags); | ||
| 1163 | |||
| 1164 | if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { | ||
| 1165 | ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " | ||
| 1166 | "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", | ||
| 1167 | __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, | ||
| 1168 | old_qps, new_qps); | ||
| 1169 | goto param_err; | ||
| 1170 | } | ||
| 1171 | |||
| 1172 | status = _ocrdma_modify_qp(ibqp, attr, attr_mask); | ||
| 1173 | if (status > 0) | ||
| 1174 | status = 0; | ||
| 1175 | param_err: | ||
| 1176 | mutex_unlock(&dev->dev_lock); | ||
| 1177 | return status; | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) | ||
| 1181 | { | ||
| 1182 | switch (mtu) { | ||
| 1183 | case 256: | ||
| 1184 | return IB_MTU_256; | ||
| 1185 | case 512: | ||
| 1186 | return IB_MTU_512; | ||
| 1187 | case 1024: | ||
| 1188 | return IB_MTU_1024; | ||
| 1189 | case 2048: | ||
| 1190 | return IB_MTU_2048; | ||
| 1191 | case 4096: | ||
| 1192 | return IB_MTU_4096; | ||
| 1193 | default: | ||
| 1194 | return IB_MTU_1024; | ||
| 1195 | } | ||
| 1196 | } | ||
| 1197 | |||
| 1198 | static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) | ||
| 1199 | { | ||
| 1200 | int ib_qp_acc_flags = 0; | ||
| 1201 | |||
| 1202 | if (qp_cap_flags & OCRDMA_QP_INB_WR) | ||
| 1203 | ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; | ||
| 1204 | if (qp_cap_flags & OCRDMA_QP_INB_RD) | ||
| 1205 | ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; | ||
| 1206 | return ib_qp_acc_flags; | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | int ocrdma_query_qp(struct ib_qp *ibqp, | ||
| 1210 | struct ib_qp_attr *qp_attr, | ||
| 1211 | int attr_mask, struct ib_qp_init_attr *qp_init_attr) | ||
| 1212 | { | ||
| 1213 | int status; | ||
| 1214 | u32 qp_state; | ||
| 1215 | struct ocrdma_qp_params params; | ||
| 1216 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | ||
| 1217 | struct ocrdma_dev *dev = qp->dev; | ||
| 1218 | |||
| 1219 | memset(¶ms, 0, sizeof(params)); | ||
| 1220 | mutex_lock(&dev->dev_lock); | ||
| 1221 | status = ocrdma_mbx_query_qp(dev, qp, ¶ms); | ||
| 1222 | mutex_unlock(&dev->dev_lock); | ||
| 1223 | if (status) | ||
| 1224 | goto mbx_err; | ||
| 1225 | qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); | ||
| 1226 | qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); | ||
| 1227 | qp_attr->path_mtu = | ||
| 1228 | ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & | ||
| 1229 | OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> | ||
| 1230 | OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; | ||
| 1231 | qp_attr->path_mig_state = IB_MIG_MIGRATED; | ||
| 1232 | qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; | ||
| 1233 | qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; | ||
| 1234 | qp_attr->dest_qp_num = | ||
| 1235 | params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; | ||
| 1236 | |||
| 1237 | qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); | ||
| 1238 | qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; | ||
| 1239 | qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; | ||
| 1240 | qp_attr->cap.max_send_sge = qp->sq.max_sges; | ||
| 1241 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; | ||
| 1242 | qp_attr->cap.max_inline_data = dev->attr.max_inline_data; | ||
| 1243 | qp_init_attr->cap = qp_attr->cap; | ||
| 1244 | memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0], | ||
| 1245 | sizeof(params.dgid)); | ||
| 1246 | qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & | ||
| 1247 | OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; | ||
| 1248 | qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; | ||
| 1249 | qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & | ||
| 1250 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | ||
| 1251 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | ||
| 1252 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | ||
| 1253 | OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> | ||
| 1254 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; | ||
| 1255 | |||
| 1256 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | ||
| 1257 | qp_attr->ah_attr.port_num = 1; | ||
| 1258 | qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & | ||
| 1259 | OCRDMA_QP_PARAMS_SL_MASK) >> | ||
| 1260 | OCRDMA_QP_PARAMS_SL_SHIFT; | ||
| 1261 | qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & | ||
| 1262 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> | ||
| 1263 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; | ||
| 1264 | qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & | ||
| 1265 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> | ||
| 1266 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; | ||
| 1267 | qp_attr->retry_cnt = | ||
| 1268 | (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> | ||
| 1269 | OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; | ||
| 1270 | qp_attr->min_rnr_timer = 0; | ||
| 1271 | qp_attr->pkey_index = 0; | ||
| 1272 | qp_attr->port_num = 1; | ||
| 1273 | qp_attr->ah_attr.src_path_bits = 0; | ||
| 1274 | qp_attr->ah_attr.static_rate = 0; | ||
| 1275 | qp_attr->alt_pkey_index = 0; | ||
| 1276 | qp_attr->alt_port_num = 0; | ||
| 1277 | qp_attr->alt_timeout = 0; | ||
| 1278 | memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); | ||
| 1279 | qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> | ||
| 1280 | OCRDMA_QP_PARAMS_STATE_SHIFT; | ||
| 1281 | qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; | ||
| 1282 | qp_attr->max_dest_rd_atomic = | ||
| 1283 | params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; | ||
| 1284 | qp_attr->max_rd_atomic = | ||
| 1285 | params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; | ||
| 1286 | qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & | ||
| 1287 | OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; | ||
| 1288 | mbx_err: | ||
| 1289 | return status; | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) | ||
| 1293 | { | ||
| 1294 | int i = idx / 32; | ||
| 1295 | unsigned int mask = (1 << (idx % 32)); | ||
| 1296 | |||
| 1297 | if (srq->idx_bit_fields[i] & mask) | ||
| 1298 | srq->idx_bit_fields[i] &= ~mask; | ||
| 1299 | else | ||
| 1300 | srq->idx_bit_fields[i] |= mask; | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) | ||
| 1304 | { | ||
| 1305 | int free_cnt; | ||
| 1306 | if (q->head >= q->tail) | ||
| 1307 | free_cnt = (q->max_cnt - q->head) + q->tail; | ||
| 1308 | else | ||
| 1309 | free_cnt = q->tail - q->head; | ||
| 1310 | if (q->free_delta) | ||
| 1311 | free_cnt -= q->free_delta; | ||
| 1312 | return free_cnt; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | static int is_hw_sq_empty(struct ocrdma_qp *qp) | ||
| 1316 | { | ||
| 1317 | return (qp->sq.tail == qp->sq.head && | ||
| 1318 | ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0); | ||
| 1319 | } | ||
| 1320 | |||
| 1321 | static int is_hw_rq_empty(struct ocrdma_qp *qp) | ||
| 1322 | { | ||
| 1323 | return (qp->rq.tail == qp->rq.head) ? 1 : 0; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) | ||
| 1327 | { | ||
| 1328 | return q->va + (q->head * q->entry_size); | ||
| 1329 | } | ||
| 1330 | |||
| 1331 | static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, | ||
| 1332 | u32 idx) | ||
| 1333 | { | ||
| 1334 | return q->va + (idx * q->entry_size); | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) | ||
| 1338 | { | ||
| 1339 | q->head = (q->head + 1) & q->max_wqe_idx; | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) | ||
| 1343 | { | ||
| 1344 | q->tail = (q->tail + 1) & q->max_wqe_idx; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | /* discard the cqe for a given QP */ | ||
| 1348 | static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) | ||
| 1349 | { | ||
| 1350 | unsigned long cq_flags; | ||
| 1351 | unsigned long flags; | ||
| 1352 | int discard_cnt = 0; | ||
| 1353 | u32 cur_getp, stop_getp; | ||
| 1354 | struct ocrdma_cqe *cqe; | ||
| 1355 | u32 qpn = 0; | ||
| 1356 | |||
| 1357 | spin_lock_irqsave(&cq->cq_lock, cq_flags); | ||
| 1358 | |||
| 1359 | /* traverse through the CQEs in the hw CQ, | ||
| 1360 | * find the matching CQE for a given qp, | ||
| 1361 | * mark the matching one discarded by clearing qpn. | ||
| 1362 | * ring the doorbell in the poll_cq() as | ||
| 1363 | * we don't complete out of order cqe. | ||
| 1364 | */ | ||
| 1365 | |||
| 1366 | cur_getp = cq->getp; | ||
| 1367 | /* find upto when do we reap the cq. */ | ||
| 1368 | stop_getp = cur_getp; | ||
| 1369 | do { | ||
| 1370 | if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) | ||
| 1371 | break; | ||
| 1372 | |||
| 1373 | cqe = cq->va + cur_getp; | ||
| 1374 | /* if (a) done reaping whole hw cq, or | ||
| 1375 | * (b) qp_xq becomes empty. | ||
| 1376 | * then exit | ||
| 1377 | */ | ||
| 1378 | qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; | ||
| 1379 | /* if previously discarded cqe found, skip that too. */ | ||
| 1380 | /* check for matching qp */ | ||
| 1381 | if (qpn == 0 || qpn != qp->id) | ||
| 1382 | goto skip_cqe; | ||
| 1383 | |||
| 1384 | /* mark cqe discarded so that it is not picked up later | ||
| 1385 | * in the poll_cq(). | ||
| 1386 | */ | ||
| 1387 | discard_cnt += 1; | ||
| 1388 | cqe->cmn.qpn = 0; | ||
| 1389 | if (is_cqe_for_sq(cqe)) | ||
| 1390 | ocrdma_hwq_inc_tail(&qp->sq); | ||
| 1391 | else { | ||
| 1392 | if (qp->srq) { | ||
| 1393 | spin_lock_irqsave(&qp->srq->q_lock, flags); | ||
| 1394 | ocrdma_hwq_inc_tail(&qp->srq->rq); | ||
| 1395 | ocrdma_srq_toggle_bit(qp->srq, cur_getp); | ||
| 1396 | spin_unlock_irqrestore(&qp->srq->q_lock, flags); | ||
| 1397 | |||
| 1398 | } else | ||
| 1399 | ocrdma_hwq_inc_tail(&qp->rq); | ||
| 1400 | } | ||
| 1401 | skip_cqe: | ||
| 1402 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | ||
| 1403 | } while (cur_getp != stop_getp); | ||
| 1404 | spin_unlock_irqrestore(&cq->cq_lock, cq_flags); | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | static void ocrdma_del_flush_qp(struct ocrdma_qp *qp) | ||
| 1408 | { | ||
| 1409 | int found = false; | ||
| 1410 | unsigned long flags; | ||
| 1411 | struct ocrdma_dev *dev = qp->dev; | ||
| 1412 | /* sync with any active CQ poll */ | ||
| 1413 | |||
| 1414 | spin_lock_irqsave(&dev->flush_q_lock, flags); | ||
| 1415 | found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); | ||
| 1416 | if (found) | ||
| 1417 | list_del(&qp->sq_entry); | ||
| 1418 | if (!qp->srq) { | ||
| 1419 | found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); | ||
| 1420 | if (found) | ||
| 1421 | list_del(&qp->rq_entry); | ||
| 1422 | } | ||
| 1423 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | ||
| 1424 | } | ||
| 1425 | |||
| 1426 | int ocrdma_destroy_qp(struct ib_qp *ibqp) | ||
| 1427 | { | ||
| 1428 | int status; | ||
| 1429 | struct ocrdma_pd *pd; | ||
| 1430 | struct ocrdma_qp *qp; | ||
| 1431 | struct ocrdma_dev *dev; | ||
| 1432 | struct ib_qp_attr attrs; | ||
| 1433 | int attr_mask = IB_QP_STATE; | ||
| 1434 | unsigned long flags; | ||
| 1435 | |||
| 1436 | qp = get_ocrdma_qp(ibqp); | ||
| 1437 | dev = qp->dev; | ||
| 1438 | |||
| 1439 | attrs.qp_state = IB_QPS_ERR; | ||
| 1440 | pd = qp->pd; | ||
| 1441 | |||
| 1442 | /* change the QP state to ERROR */ | ||
| 1443 | _ocrdma_modify_qp(ibqp, &attrs, attr_mask); | ||
| 1444 | |||
| 1445 | /* ensure that CQEs for newly created QP (whose id may be same with | ||
| 1446 | * one which just getting destroyed are same), dont get | ||
| 1447 | * discarded until the old CQEs are discarded. | ||
| 1448 | */ | ||
| 1449 | mutex_lock(&dev->dev_lock); | ||
| 1450 | status = ocrdma_mbx_destroy_qp(dev, qp); | ||
| 1451 | |||
| 1452 | /* | ||
| 1453 | * acquire CQ lock while destroy is in progress, in order to | ||
| 1454 | * protect against proessing in-flight CQEs for this QP. | ||
| 1455 | */ | ||
| 1456 | spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); | ||
| 1457 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) | ||
| 1458 | spin_lock(&qp->rq_cq->cq_lock); | ||
| 1459 | |||
| 1460 | ocrdma_del_qpn_map(dev, qp); | ||
| 1461 | |||
| 1462 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) | ||
| 1463 | spin_unlock(&qp->rq_cq->cq_lock); | ||
| 1464 | spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); | ||
| 1465 | |||
| 1466 | if (!pd->uctx) { | ||
| 1467 | ocrdma_discard_cqes(qp, qp->sq_cq); | ||
| 1468 | ocrdma_discard_cqes(qp, qp->rq_cq); | ||
| 1469 | } | ||
| 1470 | mutex_unlock(&dev->dev_lock); | ||
| 1471 | |||
| 1472 | if (pd->uctx) { | ||
| 1473 | ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len); | ||
| 1474 | if (!qp->srq) | ||
| 1475 | ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len); | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | ocrdma_del_flush_qp(qp); | ||
| 1479 | |||
| 1480 | atomic_dec(&qp->pd->use_cnt); | ||
| 1481 | atomic_dec(&qp->sq_cq->use_cnt); | ||
| 1482 | atomic_dec(&qp->rq_cq->use_cnt); | ||
| 1483 | if (qp->srq) | ||
| 1484 | atomic_dec(&qp->srq->use_cnt); | ||
| 1485 | kfree(qp->wqe_wr_id_tbl); | ||
| 1486 | kfree(qp->rqe_wr_id_tbl); | ||
| 1487 | kfree(qp); | ||
| 1488 | return status; | ||
| 1489 | } | ||
| 1490 | |||
| 1491 | static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) | ||
| 1492 | { | ||
| 1493 | int status; | ||
| 1494 | struct ocrdma_create_srq_uresp uresp; | ||
| 1495 | |||
| 1496 | uresp.rq_dbid = srq->rq.dbid; | ||
| 1497 | uresp.num_rq_pages = 1; | ||
| 1498 | uresp.rq_page_addr[0] = srq->rq.pa; | ||
| 1499 | uresp.rq_page_size = srq->rq.len; | ||
| 1500 | uresp.db_page_addr = srq->dev->nic_info.unmapped_db + | ||
| 1501 | (srq->pd->id * srq->dev->nic_info.db_page_size); | ||
| 1502 | uresp.db_page_size = srq->dev->nic_info.db_page_size; | ||
| 1503 | uresp.num_rqe_allocated = srq->rq.max_cnt; | ||
| 1504 | uresp.free_rqe_delta = 1; | ||
| 1505 | if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 1506 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; | ||
| 1507 | uresp.db_shift = 24; | ||
| 1508 | } else { | ||
| 1509 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | ||
| 1510 | uresp.db_shift = 16; | ||
| 1511 | } | ||
| 1512 | |||
| 1513 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||
| 1514 | if (status) | ||
| 1515 | return status; | ||
| 1516 | status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], | ||
| 1517 | uresp.rq_page_size); | ||
| 1518 | if (status) | ||
| 1519 | return status; | ||
| 1520 | return status; | ||
| 1521 | } | ||
| 1522 | |||
| 1523 | struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, | ||
| 1524 | struct ib_srq_init_attr *init_attr, | ||
| 1525 | struct ib_udata *udata) | ||
| 1526 | { | ||
| 1527 | int status = -ENOMEM; | ||
| 1528 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | ||
| 1529 | struct ocrdma_dev *dev = pd->dev; | ||
| 1530 | struct ocrdma_srq *srq; | ||
| 1531 | |||
| 1532 | if (init_attr->attr.max_sge > dev->attr.max_recv_sge) | ||
| 1533 | return ERR_PTR(-EINVAL); | ||
| 1534 | if (init_attr->attr.max_wr > dev->attr.max_rqe) | ||
| 1535 | return ERR_PTR(-EINVAL); | ||
| 1536 | |||
| 1537 | srq = kzalloc(sizeof(*srq), GFP_KERNEL); | ||
| 1538 | if (!srq) | ||
| 1539 | return ERR_PTR(status); | ||
| 1540 | |||
| 1541 | spin_lock_init(&srq->q_lock); | ||
| 1542 | srq->dev = dev; | ||
| 1543 | srq->pd = pd; | ||
| 1544 | srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); | ||
| 1545 | status = ocrdma_mbx_create_srq(srq, init_attr, pd); | ||
| 1546 | if (status) | ||
| 1547 | goto err; | ||
| 1548 | |||
| 1549 | if (udata == NULL) { | ||
| 1550 | srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, | ||
| 1551 | GFP_KERNEL); | ||
| 1552 | if (srq->rqe_wr_id_tbl == NULL) | ||
| 1553 | goto arm_err; | ||
| 1554 | |||
| 1555 | srq->bit_fields_len = (srq->rq.max_cnt / 32) + | ||
| 1556 | (srq->rq.max_cnt % 32 ? 1 : 0); | ||
| 1557 | srq->idx_bit_fields = | ||
| 1558 | kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); | ||
| 1559 | if (srq->idx_bit_fields == NULL) | ||
| 1560 | goto arm_err; | ||
| 1561 | memset(srq->idx_bit_fields, 0xff, | ||
| 1562 | srq->bit_fields_len * sizeof(u32)); | ||
| 1563 | } | ||
| 1564 | |||
| 1565 | if (init_attr->attr.srq_limit) { | ||
| 1566 | status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); | ||
| 1567 | if (status) | ||
| 1568 | goto arm_err; | ||
| 1569 | } | ||
| 1570 | |||
| 1571 | atomic_set(&srq->use_cnt, 0); | ||
| 1572 | if (udata) { | ||
| 1573 | status = ocrdma_copy_srq_uresp(srq, udata); | ||
| 1574 | if (status) | ||
| 1575 | goto arm_err; | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | atomic_inc(&pd->use_cnt); | ||
| 1579 | return &srq->ibsrq; | ||
| 1580 | |||
| 1581 | arm_err: | ||
| 1582 | ocrdma_mbx_destroy_srq(dev, srq); | ||
| 1583 | err: | ||
| 1584 | kfree(srq->rqe_wr_id_tbl); | ||
| 1585 | kfree(srq->idx_bit_fields); | ||
| 1586 | kfree(srq); | ||
| 1587 | return ERR_PTR(status); | ||
| 1588 | } | ||
| 1589 | |||
| 1590 | int ocrdma_modify_srq(struct ib_srq *ibsrq, | ||
| 1591 | struct ib_srq_attr *srq_attr, | ||
| 1592 | enum ib_srq_attr_mask srq_attr_mask, | ||
| 1593 | struct ib_udata *udata) | ||
| 1594 | { | ||
| 1595 | int status = 0; | ||
| 1596 | struct ocrdma_srq *srq; | ||
| 1597 | |||
| 1598 | srq = get_ocrdma_srq(ibsrq); | ||
| 1599 | if (srq_attr_mask & IB_SRQ_MAX_WR) | ||
| 1600 | status = -EINVAL; | ||
| 1601 | else | ||
| 1602 | status = ocrdma_mbx_modify_srq(srq, srq_attr); | ||
| 1603 | return status; | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | ||
| 1607 | { | ||
| 1608 | int status; | ||
| 1609 | struct ocrdma_srq *srq; | ||
| 1610 | |||
| 1611 | srq = get_ocrdma_srq(ibsrq); | ||
| 1612 | status = ocrdma_mbx_query_srq(srq, srq_attr); | ||
| 1613 | return status; | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | int ocrdma_destroy_srq(struct ib_srq *ibsrq) | ||
| 1617 | { | ||
| 1618 | int status; | ||
| 1619 | struct ocrdma_srq *srq; | ||
| 1620 | struct ocrdma_dev *dev; | ||
| 1621 | |||
| 1622 | srq = get_ocrdma_srq(ibsrq); | ||
| 1623 | dev = srq->dev; | ||
| 1624 | if (atomic_read(&srq->use_cnt)) { | ||
| 1625 | ocrdma_err("%s(%d) err, srq=0x%x in use\n", | ||
| 1626 | __func__, dev->id, srq->id); | ||
| 1627 | return -EAGAIN; | ||
| 1628 | } | ||
| 1629 | |||
| 1630 | status = ocrdma_mbx_destroy_srq(dev, srq); | ||
| 1631 | |||
| 1632 | if (srq->pd->uctx) | ||
| 1633 | ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); | ||
| 1634 | |||
| 1635 | atomic_dec(&srq->pd->use_cnt); | ||
| 1636 | kfree(srq->idx_bit_fields); | ||
| 1637 | kfree(srq->rqe_wr_id_tbl); | ||
| 1638 | kfree(srq); | ||
| 1639 | return status; | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | /* unprivileged verbs and their support functions. */ | ||
| 1643 | static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, | ||
| 1644 | struct ocrdma_hdr_wqe *hdr, | ||
| 1645 | struct ib_send_wr *wr) | ||
| 1646 | { | ||
| 1647 | struct ocrdma_ewqe_ud_hdr *ud_hdr = | ||
| 1648 | (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); | ||
| 1649 | struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); | ||
| 1650 | |||
| 1651 | ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; | ||
| 1652 | if (qp->qp_type == IB_QPT_GSI) | ||
| 1653 | ud_hdr->qkey = qp->qkey; | ||
| 1654 | else | ||
| 1655 | ud_hdr->qkey = wr->wr.ud.remote_qkey; | ||
| 1656 | ud_hdr->rsvd_ahid = ah->id; | ||
| 1657 | } | ||
| 1658 | |||
| 1659 | static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, | ||
| 1660 | struct ocrdma_sge *sge, int num_sge, | ||
| 1661 | struct ib_sge *sg_list) | ||
| 1662 | { | ||
| 1663 | int i; | ||
| 1664 | |||
| 1665 | for (i = 0; i < num_sge; i++) { | ||
| 1666 | sge[i].lrkey = sg_list[i].lkey; | ||
| 1667 | sge[i].addr_lo = sg_list[i].addr; | ||
| 1668 | sge[i].addr_hi = upper_32_bits(sg_list[i].addr); | ||
| 1669 | sge[i].len = sg_list[i].length; | ||
| 1670 | hdr->total_len += sg_list[i].length; | ||
| 1671 | } | ||
| 1672 | if (num_sge == 0) | ||
| 1673 | memset(sge, 0, sizeof(*sge)); | ||
| 1674 | } | ||
| 1675 | |||
| 1676 | static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, | ||
| 1677 | struct ocrdma_hdr_wqe *hdr, | ||
| 1678 | struct ocrdma_sge *sge, | ||
| 1679 | struct ib_send_wr *wr, u32 wqe_size) | ||
| 1680 | { | ||
| 1681 | if (wr->send_flags & IB_SEND_INLINE) { | ||
| 1682 | if (wr->sg_list[0].length > qp->max_inline_data) { | ||
| 1683 | ocrdma_err("%s() supported_len=0x%x," | ||
| 1684 | " unspported len req=0x%x\n", __func__, | ||
| 1685 | qp->max_inline_data, wr->sg_list[0].length); | ||
| 1686 | return -EINVAL; | ||
| 1687 | } | ||
| 1688 | memcpy(sge, | ||
| 1689 | (void *)(unsigned long)wr->sg_list[0].addr, | ||
| 1690 | wr->sg_list[0].length); | ||
| 1691 | hdr->total_len = wr->sg_list[0].length; | ||
| 1692 | wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); | ||
| 1693 | hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); | ||
| 1694 | } else { | ||
| 1695 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | ||
| 1696 | if (wr->num_sge) | ||
| 1697 | wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); | ||
| 1698 | else | ||
| 1699 | wqe_size += sizeof(struct ocrdma_sge); | ||
| 1700 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | ||
| 1701 | } | ||
| 1702 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | ||
| 1703 | return 0; | ||
| 1704 | } | ||
| 1705 | |||
| 1706 | static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | ||
| 1707 | struct ib_send_wr *wr) | ||
| 1708 | { | ||
| 1709 | int status; | ||
| 1710 | struct ocrdma_sge *sge; | ||
| 1711 | u32 wqe_size = sizeof(*hdr); | ||
| 1712 | |||
| 1713 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | ||
| 1714 | ocrdma_build_ud_hdr(qp, hdr, wr); | ||
| 1715 | sge = (struct ocrdma_sge *)(hdr + 2); | ||
| 1716 | wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); | ||
| 1717 | } else | ||
| 1718 | sge = (struct ocrdma_sge *)(hdr + 1); | ||
| 1719 | |||
| 1720 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | ||
| 1721 | return status; | ||
| 1722 | } | ||
| 1723 | |||
| 1724 | static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | ||
| 1725 | struct ib_send_wr *wr) | ||
| 1726 | { | ||
| 1727 | int status; | ||
| 1728 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | ||
| 1729 | struct ocrdma_sge *sge = ext_rw + 1; | ||
| 1730 | u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); | ||
| 1731 | |||
| 1732 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | ||
| 1733 | if (status) | ||
| 1734 | return status; | ||
| 1735 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | ||
| 1736 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | ||
| 1737 | ext_rw->lrkey = wr->wr.rdma.rkey; | ||
| 1738 | ext_rw->len = hdr->total_len; | ||
| 1739 | return 0; | ||
| 1740 | } | ||
| 1741 | |||
| 1742 | static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | ||
| 1743 | struct ib_send_wr *wr) | ||
| 1744 | { | ||
| 1745 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | ||
| 1746 | struct ocrdma_sge *sge = ext_rw + 1; | ||
| 1747 | u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + | ||
| 1748 | sizeof(struct ocrdma_hdr_wqe); | ||
| 1749 | |||
| 1750 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | ||
| 1751 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | ||
| 1752 | hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); | ||
| 1753 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | ||
| 1754 | |||
| 1755 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | ||
| 1756 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | ||
| 1757 | ext_rw->lrkey = wr->wr.rdma.rkey; | ||
| 1758 | ext_rw->len = hdr->total_len; | ||
| 1759 | } | ||
| 1760 | |||
| 1761 | static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) | ||
| 1762 | { | ||
| 1763 | u32 val = qp->sq.dbid | (1 << 16); | ||
| 1764 | |||
| 1765 | iowrite32(val, qp->sq_db); | ||
| 1766 | } | ||
| 1767 | |||
| 1768 | int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | ||
| 1769 | struct ib_send_wr **bad_wr) | ||
| 1770 | { | ||
| 1771 | int status = 0; | ||
| 1772 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | ||
| 1773 | struct ocrdma_hdr_wqe *hdr; | ||
| 1774 | unsigned long flags; | ||
| 1775 | |||
| 1776 | spin_lock_irqsave(&qp->q_lock, flags); | ||
| 1777 | if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { | ||
| 1778 | spin_unlock_irqrestore(&qp->q_lock, flags); | ||
| 1779 | return -EINVAL; | ||
| 1780 | } | ||
| 1781 | |||
| 1782 | while (wr) { | ||
| 1783 | if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || | ||
| 1784 | wr->num_sge > qp->sq.max_sges) { | ||
| 1785 | status = -ENOMEM; | ||
| 1786 | break; | ||
| 1787 | } | ||
| 1788 | hdr = ocrdma_hwq_head(&qp->sq); | ||
| 1789 | hdr->cw = 0; | ||
| 1790 | if (wr->send_flags & IB_SEND_SIGNALED) | ||
| 1791 | hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1792 | if (wr->send_flags & IB_SEND_FENCE) | ||
| 1793 | hdr->cw |= | ||
| 1794 | (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1795 | if (wr->send_flags & IB_SEND_SOLICITED) | ||
| 1796 | hdr->cw |= | ||
| 1797 | (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1798 | hdr->total_len = 0; | ||
| 1799 | switch (wr->opcode) { | ||
| 1800 | case IB_WR_SEND_WITH_IMM: | ||
| 1801 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1802 | hdr->immdt = ntohl(wr->ex.imm_data); | ||
| 1803 | case IB_WR_SEND: | ||
| 1804 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | ||
| 1805 | ocrdma_build_send(qp, hdr, wr); | ||
| 1806 | break; | ||
| 1807 | case IB_WR_SEND_WITH_INV: | ||
| 1808 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1809 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | ||
| 1810 | hdr->lkey = wr->ex.invalidate_rkey; | ||
| 1811 | status = ocrdma_build_send(qp, hdr, wr); | ||
| 1812 | break; | ||
| 1813 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
| 1814 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1815 | hdr->immdt = ntohl(wr->ex.imm_data); | ||
| 1816 | case IB_WR_RDMA_WRITE: | ||
| 1817 | hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); | ||
| 1818 | status = ocrdma_build_write(qp, hdr, wr); | ||
| 1819 | break; | ||
| 1820 | case IB_WR_RDMA_READ_WITH_INV: | ||
| 1821 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1822 | case IB_WR_RDMA_READ: | ||
| 1823 | ocrdma_build_read(qp, hdr, wr); | ||
| 1824 | break; | ||
| 1825 | case IB_WR_LOCAL_INV: | ||
| 1826 | hdr->cw |= | ||
| 1827 | (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); | ||
| 1828 | hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) / | ||
| 1829 | OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; | ||
| 1830 | hdr->lkey = wr->ex.invalidate_rkey; | ||
| 1831 | break; | ||
| 1832 | default: | ||
| 1833 | status = -EINVAL; | ||
| 1834 | break; | ||
| 1835 | } | ||
| 1836 | if (status) { | ||
| 1837 | *bad_wr = wr; | ||
| 1838 | break; | ||
| 1839 | } | ||
| 1840 | if (wr->send_flags & IB_SEND_SIGNALED) | ||
| 1841 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; | ||
| 1842 | else | ||
| 1843 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; | ||
| 1844 | qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; | ||
| 1845 | ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & | ||
| 1846 | OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); | ||
| 1847 | /* make sure wqe is written before adapter can access it */ | ||
| 1848 | wmb(); | ||
| 1849 | /* inform hw to start processing it */ | ||
| 1850 | ocrdma_ring_sq_db(qp); | ||
| 1851 | |||
| 1852 | /* update pointer, counter for next wr */ | ||
| 1853 | ocrdma_hwq_inc_head(&qp->sq); | ||
| 1854 | wr = wr->next; | ||
| 1855 | } | ||
| 1856 | spin_unlock_irqrestore(&qp->q_lock, flags); | ||
| 1857 | return status; | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) | ||
| 1861 | { | ||
| 1862 | u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp)); | ||
| 1863 | |||
| 1864 | iowrite32(val, qp->rq_db); | ||
| 1865 | } | ||
| 1866 | |||
| 1867 | static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, | ||
| 1868 | u16 tag) | ||
| 1869 | { | ||
| 1870 | u32 wqe_size = 0; | ||
| 1871 | struct ocrdma_sge *sge; | ||
| 1872 | if (wr->num_sge) | ||
| 1873 | wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); | ||
| 1874 | else | ||
| 1875 | wqe_size = sizeof(*sge) + sizeof(*rqe); | ||
| 1876 | |||
| 1877 | rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << | ||
| 1878 | OCRDMA_WQE_SIZE_SHIFT); | ||
| 1879 | rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); | ||
| 1880 | rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | ||
| 1881 | rqe->total_len = 0; | ||
| 1882 | rqe->rsvd_tag = tag; | ||
| 1883 | sge = (struct ocrdma_sge *)(rqe + 1); | ||
| 1884 | ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); | ||
| 1885 | ocrdma_cpu_to_le32(rqe, wqe_size); | ||
| 1886 | } | ||
| 1887 | |||
| 1888 | int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | ||
| 1889 | struct ib_recv_wr **bad_wr) | ||
| 1890 | { | ||
| 1891 | int status = 0; | ||
| 1892 | unsigned long flags; | ||
| 1893 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | ||
| 1894 | struct ocrdma_hdr_wqe *rqe; | ||
| 1895 | |||
| 1896 | spin_lock_irqsave(&qp->q_lock, flags); | ||
| 1897 | if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { | ||
| 1898 | spin_unlock_irqrestore(&qp->q_lock, flags); | ||
| 1899 | *bad_wr = wr; | ||
| 1900 | return -EINVAL; | ||
| 1901 | } | ||
| 1902 | while (wr) { | ||
| 1903 | if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || | ||
| 1904 | wr->num_sge > qp->rq.max_sges) { | ||
| 1905 | *bad_wr = wr; | ||
| 1906 | status = -ENOMEM; | ||
| 1907 | break; | ||
| 1908 | } | ||
| 1909 | rqe = ocrdma_hwq_head(&qp->rq); | ||
| 1910 | ocrdma_build_rqe(rqe, wr, 0); | ||
| 1911 | |||
| 1912 | qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; | ||
| 1913 | /* make sure rqe is written before adapter can access it */ | ||
| 1914 | wmb(); | ||
| 1915 | |||
| 1916 | /* inform hw to start processing it */ | ||
| 1917 | ocrdma_ring_rq_db(qp); | ||
| 1918 | |||
| 1919 | /* update pointer, counter for next wr */ | ||
| 1920 | ocrdma_hwq_inc_head(&qp->rq); | ||
| 1921 | wr = wr->next; | ||
| 1922 | } | ||
| 1923 | spin_unlock_irqrestore(&qp->q_lock, flags); | ||
| 1924 | return status; | ||
| 1925 | } | ||
| 1926 | |||
| 1927 | /* cqe for srq's rqe can potentially arrive out of order. | ||
| 1928 | * index gives the entry in the shadow table where to store | ||
| 1929 | * the wr_id. tag/index is returned in cqe to reference back | ||
| 1930 | * for a given rqe. | ||
| 1931 | */ | ||
| 1932 | static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) | ||
| 1933 | { | ||
| 1934 | int row = 0; | ||
| 1935 | int indx = 0; | ||
| 1936 | |||
| 1937 | for (row = 0; row < srq->bit_fields_len; row++) { | ||
| 1938 | if (srq->idx_bit_fields[row]) { | ||
| 1939 | indx = ffs(srq->idx_bit_fields[row]); | ||
| 1940 | indx = (row * 32) + (indx - 1); | ||
| 1941 | if (indx >= srq->rq.max_cnt) | ||
| 1942 | BUG(); | ||
| 1943 | ocrdma_srq_toggle_bit(srq, indx); | ||
| 1944 | break; | ||
| 1945 | } | ||
| 1946 | } | ||
| 1947 | |||
| 1948 | if (row == srq->bit_fields_len) | ||
| 1949 | BUG(); | ||
| 1950 | return indx; | ||
| 1951 | } | ||
| 1952 | |||
| 1953 | static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) | ||
| 1954 | { | ||
| 1955 | u32 val = srq->rq.dbid | (1 << 16); | ||
| 1956 | |||
| 1957 | iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); | ||
| 1958 | } | ||
| 1959 | |||
| 1960 | int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
| 1961 | struct ib_recv_wr **bad_wr) | ||
| 1962 | { | ||
| 1963 | int status = 0; | ||
| 1964 | unsigned long flags; | ||
| 1965 | struct ocrdma_srq *srq; | ||
| 1966 | struct ocrdma_hdr_wqe *rqe; | ||
| 1967 | u16 tag; | ||
| 1968 | |||
| 1969 | srq = get_ocrdma_srq(ibsrq); | ||
| 1970 | |||
| 1971 | spin_lock_irqsave(&srq->q_lock, flags); | ||
| 1972 | while (wr) { | ||
| 1973 | if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || | ||
| 1974 | wr->num_sge > srq->rq.max_sges) { | ||
| 1975 | status = -ENOMEM; | ||
| 1976 | *bad_wr = wr; | ||
| 1977 | break; | ||
| 1978 | } | ||
| 1979 | tag = ocrdma_srq_get_idx(srq); | ||
| 1980 | rqe = ocrdma_hwq_head(&srq->rq); | ||
| 1981 | ocrdma_build_rqe(rqe, wr, tag); | ||
| 1982 | |||
| 1983 | srq->rqe_wr_id_tbl[tag] = wr->wr_id; | ||
| 1984 | /* make sure rqe is written before adapter can perform DMA */ | ||
| 1985 | wmb(); | ||
| 1986 | /* inform hw to start processing it */ | ||
| 1987 | ocrdma_ring_srq_db(srq); | ||
| 1988 | /* update pointer, counter for next wr */ | ||
| 1989 | ocrdma_hwq_inc_head(&srq->rq); | ||
| 1990 | wr = wr->next; | ||
| 1991 | } | ||
| 1992 | spin_unlock_irqrestore(&srq->q_lock, flags); | ||
| 1993 | return status; | ||
| 1994 | } | ||
| 1995 | |||
| 1996 | static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) | ||
| 1997 | { | ||
| 1998 | enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR; | ||
| 1999 | |||
| 2000 | switch (status) { | ||
| 2001 | case OCRDMA_CQE_GENERAL_ERR: | ||
| 2002 | ibwc_status = IB_WC_GENERAL_ERR; | ||
| 2003 | break; | ||
| 2004 | case OCRDMA_CQE_LOC_LEN_ERR: | ||
| 2005 | ibwc_status = IB_WC_LOC_LEN_ERR; | ||
| 2006 | break; | ||
| 2007 | case OCRDMA_CQE_LOC_QP_OP_ERR: | ||
| 2008 | ibwc_status = IB_WC_LOC_QP_OP_ERR; | ||
| 2009 | break; | ||
| 2010 | case OCRDMA_CQE_LOC_EEC_OP_ERR: | ||
| 2011 | ibwc_status = IB_WC_LOC_EEC_OP_ERR; | ||
| 2012 | break; | ||
| 2013 | case OCRDMA_CQE_LOC_PROT_ERR: | ||
| 2014 | ibwc_status = IB_WC_LOC_PROT_ERR; | ||
| 2015 | break; | ||
| 2016 | case OCRDMA_CQE_WR_FLUSH_ERR: | ||
| 2017 | ibwc_status = IB_WC_WR_FLUSH_ERR; | ||
| 2018 | break; | ||
| 2019 | case OCRDMA_CQE_MW_BIND_ERR: | ||
| 2020 | ibwc_status = IB_WC_MW_BIND_ERR; | ||
| 2021 | break; | ||
| 2022 | case OCRDMA_CQE_BAD_RESP_ERR: | ||
| 2023 | ibwc_status = IB_WC_BAD_RESP_ERR; | ||
| 2024 | break; | ||
| 2025 | case OCRDMA_CQE_LOC_ACCESS_ERR: | ||
| 2026 | ibwc_status = IB_WC_LOC_ACCESS_ERR; | ||
| 2027 | break; | ||
| 2028 | case OCRDMA_CQE_REM_INV_REQ_ERR: | ||
| 2029 | ibwc_status = IB_WC_REM_INV_REQ_ERR; | ||
| 2030 | break; | ||
| 2031 | case OCRDMA_CQE_REM_ACCESS_ERR: | ||
| 2032 | ibwc_status = IB_WC_REM_ACCESS_ERR; | ||
| 2033 | break; | ||
| 2034 | case OCRDMA_CQE_REM_OP_ERR: | ||
| 2035 | ibwc_status = IB_WC_REM_OP_ERR; | ||
| 2036 | break; | ||
| 2037 | case OCRDMA_CQE_RETRY_EXC_ERR: | ||
| 2038 | ibwc_status = IB_WC_RETRY_EXC_ERR; | ||
| 2039 | break; | ||
| 2040 | case OCRDMA_CQE_RNR_RETRY_EXC_ERR: | ||
| 2041 | ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; | ||
| 2042 | break; | ||
| 2043 | case OCRDMA_CQE_LOC_RDD_VIOL_ERR: | ||
| 2044 | ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; | ||
| 2045 | break; | ||
| 2046 | case OCRDMA_CQE_REM_INV_RD_REQ_ERR: | ||
| 2047 | ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; | ||
| 2048 | break; | ||
| 2049 | case OCRDMA_CQE_REM_ABORT_ERR: | ||
| 2050 | ibwc_status = IB_WC_REM_ABORT_ERR; | ||
| 2051 | break; | ||
| 2052 | case OCRDMA_CQE_INV_EECN_ERR: | ||
| 2053 | ibwc_status = IB_WC_INV_EECN_ERR; | ||
| 2054 | break; | ||
| 2055 | case OCRDMA_CQE_INV_EEC_STATE_ERR: | ||
| 2056 | ibwc_status = IB_WC_INV_EEC_STATE_ERR; | ||
| 2057 | break; | ||
| 2058 | case OCRDMA_CQE_FATAL_ERR: | ||
| 2059 | ibwc_status = IB_WC_FATAL_ERR; | ||
| 2060 | break; | ||
| 2061 | case OCRDMA_CQE_RESP_TIMEOUT_ERR: | ||
| 2062 | ibwc_status = IB_WC_RESP_TIMEOUT_ERR; | ||
| 2063 | break; | ||
| 2064 | default: | ||
| 2065 | ibwc_status = IB_WC_GENERAL_ERR; | ||
| 2066 | break; | ||
| 2067 | }; | ||
| 2068 | return ibwc_status; | ||
| 2069 | } | ||
| 2070 | |||
| 2071 | static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, | ||
| 2072 | u32 wqe_idx) | ||
| 2073 | { | ||
| 2074 | struct ocrdma_hdr_wqe *hdr; | ||
| 2075 | struct ocrdma_sge *rw; | ||
| 2076 | int opcode; | ||
| 2077 | |||
| 2078 | hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); | ||
| 2079 | |||
| 2080 | ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; | ||
| 2081 | /* Undo the hdr->cw swap */ | ||
| 2082 | opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; | ||
| 2083 | switch (opcode) { | ||
| 2084 | case OCRDMA_WRITE: | ||
| 2085 | ibwc->opcode = IB_WC_RDMA_WRITE; | ||
| 2086 | break; | ||
| 2087 | case OCRDMA_READ: | ||
| 2088 | rw = (struct ocrdma_sge *)(hdr + 1); | ||
| 2089 | ibwc->opcode = IB_WC_RDMA_READ; | ||
| 2090 | ibwc->byte_len = rw->len; | ||
| 2091 | break; | ||
| 2092 | case OCRDMA_SEND: | ||
| 2093 | ibwc->opcode = IB_WC_SEND; | ||
| 2094 | break; | ||
| 2095 | case OCRDMA_LKEY_INV: | ||
| 2096 | ibwc->opcode = IB_WC_LOCAL_INV; | ||
| 2097 | break; | ||
| 2098 | default: | ||
| 2099 | ibwc->status = IB_WC_GENERAL_ERR; | ||
| 2100 | ocrdma_err("%s() invalid opcode received = 0x%x\n", | ||
| 2101 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | ||
| 2102 | break; | ||
| 2103 | }; | ||
| 2104 | } | ||
| 2105 | |||
| 2106 | static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, | ||
| 2107 | struct ocrdma_cqe *cqe) | ||
| 2108 | { | ||
| 2109 | if (is_cqe_for_sq(cqe)) { | ||
| 2110 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | ||
| 2111 | cqe->flags_status_srcqpn) & | ||
| 2112 | ~OCRDMA_CQE_STATUS_MASK); | ||
| 2113 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | ||
| 2114 | cqe->flags_status_srcqpn) | | ||
| 2115 | (OCRDMA_CQE_WR_FLUSH_ERR << | ||
| 2116 | OCRDMA_CQE_STATUS_SHIFT)); | ||
| 2117 | } else { | ||
| 2118 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | ||
| 2119 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | ||
| 2120 | cqe->flags_status_srcqpn) & | ||
| 2121 | ~OCRDMA_CQE_UD_STATUS_MASK); | ||
| 2122 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | ||
| 2123 | cqe->flags_status_srcqpn) | | ||
| 2124 | (OCRDMA_CQE_WR_FLUSH_ERR << | ||
| 2125 | OCRDMA_CQE_UD_STATUS_SHIFT)); | ||
| 2126 | } else { | ||
| 2127 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | ||
| 2128 | cqe->flags_status_srcqpn) & | ||
| 2129 | ~OCRDMA_CQE_STATUS_MASK); | ||
| 2130 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | ||
| 2131 | cqe->flags_status_srcqpn) | | ||
| 2132 | (OCRDMA_CQE_WR_FLUSH_ERR << | ||
| 2133 | OCRDMA_CQE_STATUS_SHIFT)); | ||
| 2134 | } | ||
| 2135 | } | ||
| 2136 | } | ||
| 2137 | |||
| 2138 | static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | ||
| 2139 | struct ocrdma_qp *qp, int status) | ||
| 2140 | { | ||
| 2141 | bool expand = false; | ||
| 2142 | |||
| 2143 | ibwc->byte_len = 0; | ||
| 2144 | ibwc->qp = &qp->ibqp; | ||
| 2145 | ibwc->status = ocrdma_to_ibwc_err(status); | ||
| 2146 | |||
| 2147 | ocrdma_flush_qp(qp); | ||
| 2148 | ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL); | ||
| 2149 | |||
| 2150 | /* if wqe/rqe pending for which cqe needs to be returned, | ||
| 2151 | * trigger inflating it. | ||
| 2152 | */ | ||
| 2153 | if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { | ||
| 2154 | expand = true; | ||
| 2155 | ocrdma_set_cqe_status_flushed(qp, cqe); | ||
| 2156 | } | ||
| 2157 | return expand; | ||
| 2158 | } | ||
| 2159 | |||
| 2160 | static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | ||
| 2161 | struct ocrdma_qp *qp, int status) | ||
| 2162 | { | ||
| 2163 | ibwc->opcode = IB_WC_RECV; | ||
| 2164 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | ||
| 2165 | ocrdma_hwq_inc_tail(&qp->rq); | ||
| 2166 | |||
| 2167 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | ||
| 2168 | } | ||
| 2169 | |||
| 2170 | static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | ||
| 2171 | struct ocrdma_qp *qp, int status) | ||
| 2172 | { | ||
| 2173 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | ||
| 2174 | ocrdma_hwq_inc_tail(&qp->sq); | ||
| 2175 | |||
| 2176 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | ||
| 2177 | } | ||
| 2178 | |||
| 2179 | |||
| 2180 | static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, | ||
| 2181 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc, | ||
| 2182 | bool *polled, bool *stop) | ||
| 2183 | { | ||
| 2184 | bool expand; | ||
| 2185 | int status = (le32_to_cpu(cqe->flags_status_srcqpn) & | ||
| 2186 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | ||
| 2187 | |||
| 2188 | /* when hw sq is empty, but rq is not empty, so we continue | ||
| 2189 | * to keep the cqe in order to get the cq event again. | ||
| 2190 | */ | ||
| 2191 | if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { | ||
| 2192 | /* when cq for rq and sq is same, it is safe to return | ||
| 2193 | * flush cqe for RQEs. | ||
| 2194 | */ | ||
| 2195 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | ||
| 2196 | *polled = true; | ||
| 2197 | status = OCRDMA_CQE_WR_FLUSH_ERR; | ||
| 2198 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); | ||
| 2199 | } else { | ||
| 2200 | /* stop processing further cqe as this cqe is used for | ||
| 2201 | * triggering cq event on buddy cq of RQ. | ||
| 2202 | * When QP is destroyed, this cqe will be removed | ||
| 2203 | * from the cq's hardware q. | ||
| 2204 | */ | ||
| 2205 | *polled = false; | ||
| 2206 | *stop = true; | ||
| 2207 | expand = false; | ||
| 2208 | } | ||
| 2209 | } else { | ||
| 2210 | *polled = true; | ||
| 2211 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | ||
| 2212 | } | ||
| 2213 | return expand; | ||
| 2214 | } | ||
| 2215 | |||
| 2216 | static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, | ||
| 2217 | struct ocrdma_cqe *cqe, | ||
| 2218 | struct ib_wc *ibwc, bool *polled) | ||
| 2219 | { | ||
| 2220 | bool expand = false; | ||
| 2221 | int tail = qp->sq.tail; | ||
| 2222 | u32 wqe_idx; | ||
| 2223 | |||
| 2224 | if (!qp->wqe_wr_id_tbl[tail].signaled) { | ||
| 2225 | expand = true; /* CQE cannot be consumed yet */ | ||
| 2226 | *polled = false; /* WC cannot be consumed yet */ | ||
| 2227 | } else { | ||
| 2228 | ibwc->status = IB_WC_SUCCESS; | ||
| 2229 | ibwc->wc_flags = 0; | ||
| 2230 | ibwc->qp = &qp->ibqp; | ||
| 2231 | ocrdma_update_wc(qp, ibwc, tail); | ||
| 2232 | *polled = true; | ||
| 2233 | wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK; | ||
| 2234 | if (tail != wqe_idx) | ||
| 2235 | expand = true; /* Coalesced CQE can't be consumed yet */ | ||
| 2236 | } | ||
| 2237 | ocrdma_hwq_inc_tail(&qp->sq); | ||
| 2238 | return expand; | ||
| 2239 | } | ||
| 2240 | |||
| 2241 | static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | ||
| 2242 | struct ib_wc *ibwc, bool *polled, bool *stop) | ||
| 2243 | { | ||
| 2244 | int status; | ||
| 2245 | bool expand; | ||
| 2246 | |||
| 2247 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | ||
| 2248 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | ||
| 2249 | |||
| 2250 | if (status == OCRDMA_CQE_SUCCESS) | ||
| 2251 | expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); | ||
| 2252 | else | ||
| 2253 | expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); | ||
| 2254 | return expand; | ||
| 2255 | } | ||
| 2256 | |||
| 2257 | static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) | ||
| 2258 | { | ||
| 2259 | int status; | ||
| 2260 | |||
| 2261 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | ||
| 2262 | OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; | ||
| 2263 | ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & | ||
| 2264 | OCRDMA_CQE_SRCQP_MASK; | ||
| 2265 | ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & | ||
| 2266 | OCRDMA_CQE_PKEY_MASK; | ||
| 2267 | ibwc->wc_flags = IB_WC_GRH; | ||
| 2268 | ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> | ||
| 2269 | OCRDMA_CQE_UD_XFER_LEN_SHIFT); | ||
| 2270 | return status; | ||
| 2271 | } | ||
| 2272 | |||
| 2273 | static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, | ||
| 2274 | struct ocrdma_cqe *cqe, | ||
| 2275 | struct ocrdma_qp *qp) | ||
| 2276 | { | ||
| 2277 | unsigned long flags; | ||
| 2278 | struct ocrdma_srq *srq; | ||
| 2279 | u32 wqe_idx; | ||
| 2280 | |||
| 2281 | srq = get_ocrdma_srq(qp->ibqp.srq); | ||
| 2282 | wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT; | ||
| 2283 | ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; | ||
| 2284 | spin_lock_irqsave(&srq->q_lock, flags); | ||
| 2285 | ocrdma_srq_toggle_bit(srq, wqe_idx); | ||
| 2286 | spin_unlock_irqrestore(&srq->q_lock, flags); | ||
| 2287 | ocrdma_hwq_inc_tail(&srq->rq); | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | ||
| 2291 | struct ib_wc *ibwc, bool *polled, bool *stop, | ||
| 2292 | int status) | ||
| 2293 | { | ||
| 2294 | bool expand; | ||
| 2295 | |||
| 2296 | /* when hw_rq is empty, but wq is not empty, so continue | ||
| 2297 | * to keep the cqe to get the cq event again. | ||
| 2298 | */ | ||
| 2299 | if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { | ||
| 2300 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | ||
| 2301 | *polled = true; | ||
| 2302 | status = OCRDMA_CQE_WR_FLUSH_ERR; | ||
| 2303 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | ||
| 2304 | } else { | ||
| 2305 | *polled = false; | ||
| 2306 | *stop = true; | ||
| 2307 | expand = false; | ||
| 2308 | } | ||
| 2309 | } else | ||
| 2310 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); | ||
| 2311 | return expand; | ||
| 2312 | } | ||
| 2313 | |||
| 2314 | static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, | ||
| 2315 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc) | ||
| 2316 | { | ||
| 2317 | ibwc->opcode = IB_WC_RECV; | ||
| 2318 | ibwc->qp = &qp->ibqp; | ||
| 2319 | ibwc->status = IB_WC_SUCCESS; | ||
| 2320 | |||
| 2321 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) | ||
| 2322 | ocrdma_update_ud_rcqe(ibwc, cqe); | ||
| 2323 | else | ||
| 2324 | ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); | ||
| 2325 | |||
| 2326 | if (is_cqe_imm(cqe)) { | ||
| 2327 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | ||
| 2328 | ibwc->wc_flags |= IB_WC_WITH_IMM; | ||
| 2329 | } else if (is_cqe_wr_imm(cqe)) { | ||
| 2330 | ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | ||
| 2331 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | ||
| 2332 | ibwc->wc_flags |= IB_WC_WITH_IMM; | ||
| 2333 | } else if (is_cqe_invalidated(cqe)) { | ||
| 2334 | ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); | ||
| 2335 | ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; | ||
| 2336 | } | ||
| 2337 | if (qp->ibqp.srq) | ||
| 2338 | ocrdma_update_free_srq_cqe(ibwc, cqe, qp); | ||
| 2339 | else { | ||
| 2340 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | ||
| 2341 | ocrdma_hwq_inc_tail(&qp->rq); | ||
| 2342 | } | ||
| 2343 | } | ||
| 2344 | |||
| 2345 | static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | ||
| 2346 | struct ib_wc *ibwc, bool *polled, bool *stop) | ||
| 2347 | { | ||
| 2348 | int status; | ||
| 2349 | bool expand = false; | ||
| 2350 | |||
| 2351 | ibwc->wc_flags = 0; | ||
| 2352 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) | ||
| 2353 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | ||
| 2354 | OCRDMA_CQE_UD_STATUS_MASK) >> | ||
| 2355 | OCRDMA_CQE_UD_STATUS_SHIFT; | ||
| 2356 | else | ||
| 2357 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | ||
| 2358 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | ||
| 2359 | |||
| 2360 | if (status == OCRDMA_CQE_SUCCESS) { | ||
| 2361 | *polled = true; | ||
| 2362 | ocrdma_poll_success_rcqe(qp, cqe, ibwc); | ||
| 2363 | } else { | ||
| 2364 | expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, | ||
| 2365 | status); | ||
| 2366 | } | ||
| 2367 | return expand; | ||
| 2368 | } | ||
| 2369 | |||
| 2370 | static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, | ||
| 2371 | u16 cur_getp) | ||
| 2372 | { | ||
| 2373 | if (cq->phase_change) { | ||
| 2374 | if (cur_getp == 0) | ||
| 2375 | cq->phase = (~cq->phase & OCRDMA_CQE_VALID); | ||
| 2376 | } else | ||
| 2377 | /* clear valid bit */ | ||
| 2378 | cqe->flags_status_srcqpn = 0; | ||
| 2379 | } | ||
| 2380 | |||
| 2381 | static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, | ||
| 2382 | struct ib_wc *ibwc) | ||
| 2383 | { | ||
| 2384 | u16 qpn = 0; | ||
| 2385 | int i = 0; | ||
| 2386 | bool expand = false; | ||
| 2387 | int polled_hw_cqes = 0; | ||
| 2388 | struct ocrdma_qp *qp = NULL; | ||
| 2389 | struct ocrdma_dev *dev = cq->dev; | ||
| 2390 | struct ocrdma_cqe *cqe; | ||
| 2391 | u16 cur_getp; bool polled = false; bool stop = false; | ||
| 2392 | |||
| 2393 | cur_getp = cq->getp; | ||
| 2394 | while (num_entries) { | ||
| 2395 | cqe = cq->va + cur_getp; | ||
| 2396 | /* check whether valid cqe or not */ | ||
| 2397 | if (!is_cqe_valid(cq, cqe)) | ||
| 2398 | break; | ||
| 2399 | qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); | ||
| 2400 | /* ignore discarded cqe */ | ||
| 2401 | if (qpn == 0) | ||
| 2402 | goto skip_cqe; | ||
| 2403 | qp = dev->qp_tbl[qpn]; | ||
| 2404 | BUG_ON(qp == NULL); | ||
| 2405 | |||
| 2406 | if (is_cqe_for_sq(cqe)) { | ||
| 2407 | expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, | ||
| 2408 | &stop); | ||
| 2409 | } else { | ||
| 2410 | expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, | ||
| 2411 | &stop); | ||
| 2412 | } | ||
| 2413 | if (expand) | ||
| 2414 | goto expand_cqe; | ||
| 2415 | if (stop) | ||
| 2416 | goto stop_cqe; | ||
| 2417 | /* clear qpn to avoid duplicate processing by discard_cqe() */ | ||
| 2418 | cqe->cmn.qpn = 0; | ||
| 2419 | skip_cqe: | ||
| 2420 | polled_hw_cqes += 1; | ||
| 2421 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | ||
| 2422 | ocrdma_change_cq_phase(cq, cqe, cur_getp); | ||
| 2423 | expand_cqe: | ||
| 2424 | if (polled) { | ||
| 2425 | num_entries -= 1; | ||
| 2426 | i += 1; | ||
| 2427 | ibwc = ibwc + 1; | ||
| 2428 | polled = false; | ||
| 2429 | } | ||
| 2430 | } | ||
| 2431 | stop_cqe: | ||
| 2432 | cq->getp = cur_getp; | ||
| 2433 | if (polled_hw_cqes || expand || stop) { | ||
| 2434 | ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited, | ||
| 2435 | polled_hw_cqes); | ||
| 2436 | } | ||
| 2437 | return i; | ||
| 2438 | } | ||
| 2439 | |||
| 2440 | /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ | ||
| 2441 | static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, | ||
| 2442 | struct ocrdma_qp *qp, struct ib_wc *ibwc) | ||
| 2443 | { | ||
| 2444 | int err_cqes = 0; | ||
| 2445 | |||
| 2446 | while (num_entries) { | ||
| 2447 | if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) | ||
| 2448 | break; | ||
| 2449 | if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { | ||
| 2450 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | ||
| 2451 | ocrdma_hwq_inc_tail(&qp->sq); | ||
| 2452 | } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { | ||
| 2453 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | ||
| 2454 | ocrdma_hwq_inc_tail(&qp->rq); | ||
| 2455 | } else | ||
| 2456 | return err_cqes; | ||
| 2457 | ibwc->byte_len = 0; | ||
| 2458 | ibwc->status = IB_WC_WR_FLUSH_ERR; | ||
| 2459 | ibwc = ibwc + 1; | ||
| 2460 | err_cqes += 1; | ||
| 2461 | num_entries -= 1; | ||
| 2462 | } | ||
| 2463 | return err_cqes; | ||
| 2464 | } | ||
| 2465 | |||
| 2466 | int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | ||
| 2467 | { | ||
| 2468 | int cqes_to_poll = num_entries; | ||
| 2469 | struct ocrdma_cq *cq = NULL; | ||
| 2470 | unsigned long flags; | ||
| 2471 | struct ocrdma_dev *dev; | ||
| 2472 | int num_os_cqe = 0, err_cqes = 0; | ||
| 2473 | struct ocrdma_qp *qp; | ||
| 2474 | |||
| 2475 | cq = get_ocrdma_cq(ibcq); | ||
| 2476 | dev = cq->dev; | ||
| 2477 | |||
| 2478 | /* poll cqes from adapter CQ */ | ||
| 2479 | spin_lock_irqsave(&cq->cq_lock, flags); | ||
| 2480 | num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); | ||
| 2481 | spin_unlock_irqrestore(&cq->cq_lock, flags); | ||
| 2482 | cqes_to_poll -= num_os_cqe; | ||
| 2483 | |||
| 2484 | if (cqes_to_poll) { | ||
| 2485 | wc = wc + num_os_cqe; | ||
| 2486 | /* adapter returns single error cqe when qp moves to | ||
| 2487 | * error state. So insert error cqes with wc_status as | ||
| 2488 | * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ | ||
| 2489 | * respectively which uses this CQ. | ||
| 2490 | */ | ||
| 2491 | spin_lock_irqsave(&dev->flush_q_lock, flags); | ||
| 2492 | list_for_each_entry(qp, &cq->sq_head, sq_entry) { | ||
| 2493 | if (cqes_to_poll == 0) | ||
| 2494 | break; | ||
| 2495 | err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); | ||
| 2496 | cqes_to_poll -= err_cqes; | ||
| 2497 | num_os_cqe += err_cqes; | ||
| 2498 | wc = wc + err_cqes; | ||
| 2499 | } | ||
| 2500 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | ||
| 2501 | } | ||
| 2502 | return num_os_cqe; | ||
| 2503 | } | ||
| 2504 | |||
| 2505 | int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | ||
| 2506 | { | ||
| 2507 | struct ocrdma_cq *cq; | ||
| 2508 | unsigned long flags; | ||
| 2509 | struct ocrdma_dev *dev; | ||
| 2510 | u16 cq_id; | ||
| 2511 | u16 cur_getp; | ||
| 2512 | struct ocrdma_cqe *cqe; | ||
| 2513 | |||
| 2514 | cq = get_ocrdma_cq(ibcq); | ||
| 2515 | cq_id = cq->id; | ||
| 2516 | dev = cq->dev; | ||
| 2517 | |||
| 2518 | spin_lock_irqsave(&cq->cq_lock, flags); | ||
| 2519 | if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) | ||
| 2520 | cq->armed = true; | ||
| 2521 | if (cq_flags & IB_CQ_SOLICITED) | ||
| 2522 | cq->solicited = true; | ||
| 2523 | |||
| 2524 | cur_getp = cq->getp; | ||
| 2525 | cqe = cq->va + cur_getp; | ||
| 2526 | |||
| 2527 | /* check whether any valid cqe exist or not, if not then safe to | ||
| 2528 | * arm. If cqe is not yet consumed, then let it get consumed and then | ||
| 2529 | * we arm it to avoid false interrupts. | ||
| 2530 | */ | ||
| 2531 | if (!is_cqe_valid(cq, cqe) || cq->arm_needed) { | ||
| 2532 | cq->arm_needed = false; | ||
| 2533 | ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0); | ||
| 2534 | } | ||
| 2535 | spin_unlock_irqrestore(&cq->cq_lock, flags); | ||
| 2536 | return 0; | ||
| 2537 | } | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h new file mode 100644 index 000000000000..e6483439f25f --- /dev/null +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | /******************************************************************* | ||
| 2 | * This file is part of the Emulex RoCE Device Driver for * | ||
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * | ||
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | ||
| 5 | * EMULEX and SLI are trademarks of Emulex. * | ||
| 6 | * www.emulex.com * | ||
| 7 | * * | ||
| 8 | * This program is free software; you can redistribute it and/or * | ||
| 9 | * modify it under the terms of version 2 of the GNU General * | ||
| 10 | * Public License as published by the Free Software Foundation. * | ||
| 11 | * This program is distributed in the hope that it will be useful. * | ||
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
| 17 | * more details, a copy of which can be found in the file COPYING * | ||
| 18 | * included with this package. * | ||
| 19 | * | ||
| 20 | * Contact Information: | ||
| 21 | * linux-drivers@emulex.com | ||
| 22 | * | ||
| 23 | * Emulex | ||
| 24 | * 3333 Susan Street | ||
| 25 | * Costa Mesa, CA 92626 | ||
| 26 | *******************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __OCRDMA_VERBS_H__ | ||
| 29 | #define __OCRDMA_VERBS_H__ | ||
| 30 | |||
| 31 | #include <linux/version.h> | ||
| 32 | int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, | ||
| 33 | struct ib_send_wr **bad_wr); | ||
| 34 | int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, | ||
| 35 | struct ib_recv_wr **bad_wr); | ||
| 36 | |||
| 37 | int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); | ||
| 38 | int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags); | ||
| 39 | |||
| 40 | int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props); | ||
| 41 | int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); | ||
| 42 | int ocrdma_modify_port(struct ib_device *, u8 port, int mask, | ||
| 43 | struct ib_port_modify *props); | ||
| 44 | |||
| 45 | void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); | ||
| 46 | int ocrdma_query_gid(struct ib_device *, u8 port, | ||
| 47 | int index, union ib_gid *gid); | ||
| 48 | int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); | ||
| 49 | |||
| 50 | struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *, | ||
| 51 | struct ib_udata *); | ||
| 52 | int ocrdma_dealloc_ucontext(struct ib_ucontext *); | ||
| 53 | |||
| 54 | int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma); | ||
| 55 | |||
| 56 | struct ib_pd *ocrdma_alloc_pd(struct ib_device *, | ||
| 57 | struct ib_ucontext *, struct ib_udata *); | ||
| 58 | int ocrdma_dealloc_pd(struct ib_pd *pd); | ||
| 59 | |||
| 60 | struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector, | ||
| 61 | struct ib_ucontext *, struct ib_udata *); | ||
| 62 | int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); | ||
| 63 | int ocrdma_destroy_cq(struct ib_cq *); | ||
| 64 | |||
| 65 | struct ib_qp *ocrdma_create_qp(struct ib_pd *, | ||
| 66 | struct ib_qp_init_attr *attrs, | ||
| 67 | struct ib_udata *); | ||
| 68 | int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, | ||
| 69 | int attr_mask); | ||
| 70 | int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, | ||
| 71 | int attr_mask, struct ib_udata *udata); | ||
| 72 | int ocrdma_query_qp(struct ib_qp *, | ||
| 73 | struct ib_qp_attr *qp_attr, | ||
| 74 | int qp_attr_mask, struct ib_qp_init_attr *); | ||
| 75 | int ocrdma_destroy_qp(struct ib_qp *); | ||
| 76 | |||
| 77 | struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, | ||
| 78 | struct ib_udata *); | ||
| 79 | int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *, | ||
| 80 | enum ib_srq_attr_mask, struct ib_udata *); | ||
| 81 | int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *); | ||
| 82 | int ocrdma_destroy_srq(struct ib_srq *); | ||
| 83 | int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *, | ||
| 84 | struct ib_recv_wr **bad_recv_wr); | ||
| 85 | |||
| 86 | int ocrdma_dereg_mr(struct ib_mr *); | ||
| 87 | struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc); | ||
| 88 | struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *, | ||
| 89 | struct ib_phys_buf *buffer_list, | ||
| 90 | int num_phys_buf, int acc, u64 *iova_start); | ||
| 91 | struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, | ||
| 92 | u64 virt, int acc, struct ib_udata *); | ||
| 93 | |||
| 94 | #endif /* __OCRDMA_VERBS_H__ */ | ||
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 6b811e3e8bd1..7e62f4137148 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
| @@ -530,8 +530,6 @@ struct qib_pportdata { | |||
| 530 | /* qib_lflags driver is waiting for */ | 530 | /* qib_lflags driver is waiting for */ |
| 531 | u32 state_wanted; | 531 | u32 state_wanted; |
| 532 | spinlock_t lflags_lock; | 532 | spinlock_t lflags_lock; |
| 533 | /* number of (port-specific) interrupts for this port -- saturates... */ | ||
| 534 | u32 int_counter; | ||
| 535 | 533 | ||
| 536 | /* ref count for each pkey */ | 534 | /* ref count for each pkey */ |
| 537 | atomic_t pkeyrefs[4]; | 535 | atomic_t pkeyrefs[4]; |
| @@ -543,24 +541,26 @@ struct qib_pportdata { | |||
| 543 | u64 *statusp; | 541 | u64 *statusp; |
| 544 | 542 | ||
| 545 | /* SendDMA related entries */ | 543 | /* SendDMA related entries */ |
| 546 | spinlock_t sdma_lock; | 544 | |
| 547 | struct qib_sdma_state sdma_state; | 545 | /* read mostly */ |
| 548 | unsigned long sdma_buf_jiffies; | ||
| 549 | struct qib_sdma_desc *sdma_descq; | 546 | struct qib_sdma_desc *sdma_descq; |
| 547 | struct qib_sdma_state sdma_state; | ||
| 548 | dma_addr_t sdma_descq_phys; | ||
| 549 | volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ | ||
| 550 | dma_addr_t sdma_head_phys; | ||
| 551 | u16 sdma_descq_cnt; | ||
| 552 | |||
| 553 | /* read/write using lock */ | ||
| 554 | spinlock_t sdma_lock ____cacheline_aligned_in_smp; | ||
| 555 | struct list_head sdma_activelist; | ||
| 550 | u64 sdma_descq_added; | 556 | u64 sdma_descq_added; |
| 551 | u64 sdma_descq_removed; | 557 | u64 sdma_descq_removed; |
| 552 | u16 sdma_descq_cnt; | ||
| 553 | u16 sdma_descq_tail; | 558 | u16 sdma_descq_tail; |
| 554 | u16 sdma_descq_head; | 559 | u16 sdma_descq_head; |
| 555 | u16 sdma_next_intr; | ||
| 556 | u16 sdma_reset_wait; | ||
| 557 | u8 sdma_generation; | 560 | u8 sdma_generation; |
| 558 | struct tasklet_struct sdma_sw_clean_up_task; | ||
| 559 | struct list_head sdma_activelist; | ||
| 560 | 561 | ||
| 561 | dma_addr_t sdma_descq_phys; | 562 | struct tasklet_struct sdma_sw_clean_up_task |
| 562 | volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ | 563 | ____cacheline_aligned_in_smp; |
| 563 | dma_addr_t sdma_head_phys; | ||
| 564 | 564 | ||
| 565 | wait_queue_head_t state_wait; /* for state_wanted */ | 565 | wait_queue_head_t state_wait; /* for state_wanted */ |
| 566 | 566 | ||
| @@ -873,7 +873,14 @@ struct qib_devdata { | |||
| 873 | * pio_writing. | 873 | * pio_writing. |
| 874 | */ | 874 | */ |
| 875 | spinlock_t pioavail_lock; | 875 | spinlock_t pioavail_lock; |
| 876 | 876 | /* | |
| 877 | * index of last buffer to optimize search for next | ||
| 878 | */ | ||
| 879 | u32 last_pio; | ||
| 880 | /* | ||
| 881 | * min kernel pio buffer to optimize search | ||
| 882 | */ | ||
| 883 | u32 min_kernel_pio; | ||
| 877 | /* | 884 | /* |
| 878 | * Shadow copies of registers; size indicates read access size. | 885 | * Shadow copies of registers; size indicates read access size. |
| 879 | * Most of them are readonly, but some are write-only register, | 886 | * Most of them are readonly, but some are write-only register, |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 6fc9365ba8a6..8895cfec5019 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
| 39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
| 40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
| 41 | #include <linux/prefetch.h> | ||
| 41 | 42 | ||
| 42 | #include "qib.h" | 43 | #include "qib.h" |
| 43 | 44 | ||
| @@ -481,8 +482,10 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) | |||
| 481 | etail = qib_hdrget_index(rhf_addr); | 482 | etail = qib_hdrget_index(rhf_addr); |
| 482 | updegr = 1; | 483 | updegr = 1; |
| 483 | if (tlen > sizeof(*hdr) || | 484 | if (tlen > sizeof(*hdr) || |
| 484 | etype >= RCVHQ_RCV_TYPE_NON_KD) | 485 | etype >= RCVHQ_RCV_TYPE_NON_KD) { |
| 485 | ebuf = qib_get_egrbuf(rcd, etail); | 486 | ebuf = qib_get_egrbuf(rcd, etail); |
| 487 | prefetch_range(ebuf, tlen - sizeof(*hdr)); | ||
| 488 | } | ||
| 486 | } | 489 | } |
| 487 | if (!eflags) { | 490 | if (!eflags) { |
| 488 | u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2; | 491 | u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2; |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index d0c64d514813..4d352b90750a 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
| @@ -3132,6 +3132,7 @@ static void get_6120_chip_params(struct qib_devdata *dd) | |||
| 3132 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); | 3132 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); |
| 3133 | dd->piobcnt2k = val & ~0U; | 3133 | dd->piobcnt2k = val & ~0U; |
| 3134 | dd->piobcnt4k = val >> 32; | 3134 | dd->piobcnt4k = val >> 32; |
| 3135 | dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1; | ||
| 3135 | /* these may be adjusted in init_chip_wc_pat() */ | 3136 | /* these may be adjusted in init_chip_wc_pat() */ |
| 3136 | dd->pio2kbase = (u32 __iomem *) | 3137 | dd->pio2kbase = (u32 __iomem *) |
| 3137 | (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase); | 3138 | (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase); |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 3c722f79d6f6..86a0ba7ca0c2 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
| @@ -4157,6 +4157,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd) | |||
| 4157 | dd->cspec->sdmabufcnt; | 4157 | dd->cspec->sdmabufcnt; |
| 4158 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; | 4158 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; |
| 4159 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ | 4159 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ |
| 4160 | dd->last_pio = dd->cspec->lastbuf_for_pio; | ||
| 4160 | dd->pbufsctxt = dd->lastctxt_piobuf / | 4161 | dd->pbufsctxt = dd->lastctxt_piobuf / |
| 4161 | (dd->cfgctxts - dd->first_user_ctxt); | 4162 | (dd->cfgctxts - dd->first_user_ctxt); |
| 4162 | 4163 | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 060b96064469..c881e744c091 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -6379,6 +6379,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
| 6379 | dd->cspec->sdmabufcnt; | 6379 | dd->cspec->sdmabufcnt; |
| 6380 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; | 6380 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; |
| 6381 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ | 6381 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ |
| 6382 | dd->last_pio = dd->cspec->lastbuf_for_pio; | ||
| 6382 | dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? | 6383 | dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? |
| 6383 | dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; | 6384 | dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; |
| 6384 | 6385 | ||
| @@ -7708,7 +7709,7 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) | |||
| 7708 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | 7709 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); |
| 7709 | msleep(20); | 7710 | msleep(20); |
| 7710 | /* Set Frequency Loop Bandwidth */ | 7711 | /* Set Frequency Loop Bandwidth */ |
| 7711 | ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5)); | 7712 | ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5)); |
| 7712 | /* Enable Frequency Loop */ | 7713 | /* Enable Frequency Loop */ |
| 7713 | ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); | 7714 | ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); |
| 7714 | /* Set Timing Loop Bandwidth */ | 7715 | /* Set Timing Loop Bandwidth */ |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index cf0cd30adc8d..dc14e100a7f1 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
| @@ -102,6 +102,8 @@ void qib_set_ctxtcnt(struct qib_devdata *dd) | |||
| 102 | dd->cfgctxts = qib_cfgctxts; | 102 | dd->cfgctxts = qib_cfgctxts; |
| 103 | else | 103 | else |
| 104 | dd->cfgctxts = dd->ctxtcnt; | 104 | dd->cfgctxts = dd->ctxtcnt; |
| 105 | dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : | ||
| 106 | dd->cfgctxts - dd->first_user_ctxt; | ||
| 105 | } | 107 | } |
| 106 | 108 | ||
| 107 | /* | 109 | /* |
| @@ -402,7 +404,6 @@ static void enable_chip(struct qib_devdata *dd) | |||
| 402 | if (rcd) | 404 | if (rcd) |
| 403 | dd->f_rcvctrl(rcd->ppd, rcvmask, i); | 405 | dd->f_rcvctrl(rcd->ppd, rcvmask, i); |
| 404 | } | 406 | } |
| 405 | dd->freectxts = dd->cfgctxts - dd->first_user_ctxt; | ||
| 406 | } | 407 | } |
| 407 | 408 | ||
| 408 | static void verify_interrupt(unsigned long opaque) | 409 | static void verify_interrupt(unsigned long opaque) |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index c4ff788823b5..43390217a026 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
| @@ -396,6 +396,7 @@ static int get_linkdowndefaultstate(struct qib_pportdata *ppd) | |||
| 396 | 396 | ||
| 397 | static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) | 397 | static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) |
| 398 | { | 398 | { |
| 399 | int valid_mkey = 0; | ||
| 399 | int ret = 0; | 400 | int ret = 0; |
| 400 | 401 | ||
| 401 | /* Is the mkey in the process of expiring? */ | 402 | /* Is the mkey in the process of expiring? */ |
| @@ -406,23 +407,36 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) | |||
| 406 | ibp->mkeyprot = 0; | 407 | ibp->mkeyprot = 0; |
| 407 | } | 408 | } |
| 408 | 409 | ||
| 409 | /* M_Key checking depends on Portinfo:M_Key_protect_bits */ | 410 | if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 || |
| 410 | if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 && | 411 | ibp->mkey == smp->mkey) |
| 411 | ibp->mkey != smp->mkey && | 412 | valid_mkey = 1; |
| 412 | (smp->method == IB_MGMT_METHOD_SET || | 413 | |
| 413 | smp->method == IB_MGMT_METHOD_TRAP_REPRESS || | 414 | /* Unset lease timeout on any valid Get/Set/TrapRepress */ |
| 414 | (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) { | 415 | if (valid_mkey && ibp->mkey_lease_timeout && |
| 415 | if (ibp->mkey_violations != 0xFFFF) | 416 | (smp->method == IB_MGMT_METHOD_GET || |
| 416 | ++ibp->mkey_violations; | 417 | smp->method == IB_MGMT_METHOD_SET || |
| 417 | if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) | 418 | smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) |
| 418 | ibp->mkey_lease_timeout = jiffies + | ||
| 419 | ibp->mkey_lease_period * HZ; | ||
| 420 | /* Generate a trap notice. */ | ||
| 421 | qib_bad_mkey(ibp, smp); | ||
| 422 | ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | ||
| 423 | } else if (ibp->mkey_lease_timeout) | ||
| 424 | ibp->mkey_lease_timeout = 0; | 419 | ibp->mkey_lease_timeout = 0; |
| 425 | 420 | ||
| 421 | if (!valid_mkey) { | ||
| 422 | switch (smp->method) { | ||
| 423 | case IB_MGMT_METHOD_GET: | ||
| 424 | /* Bad mkey not a violation below level 2 */ | ||
| 425 | if (ibp->mkeyprot < 2) | ||
| 426 | break; | ||
| 427 | case IB_MGMT_METHOD_SET: | ||
| 428 | case IB_MGMT_METHOD_TRAP_REPRESS: | ||
| 429 | if (ibp->mkey_violations != 0xFFFF) | ||
| 430 | ++ibp->mkey_violations; | ||
| 431 | if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) | ||
| 432 | ibp->mkey_lease_timeout = jiffies + | ||
| 433 | ibp->mkey_lease_period * HZ; | ||
| 434 | /* Generate a trap notice. */ | ||
| 435 | qib_bad_mkey(ibp, smp); | ||
| 436 | ret = 1; | ||
| 437 | } | ||
| 438 | } | ||
| 439 | |||
| 426 | return ret; | 440 | return ret; |
| 427 | } | 441 | } |
| 428 | 442 | ||
| @@ -450,6 +464,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 450 | ibp = to_iport(ibdev, port_num); | 464 | ibp = to_iport(ibdev, port_num); |
| 451 | ret = check_mkey(ibp, smp, 0); | 465 | ret = check_mkey(ibp, smp, 0); |
| 452 | if (ret) | 466 | if (ret) |
| 467 | ret = IB_MAD_RESULT_FAILURE; | ||
| 453 | goto bail; | 468 | goto bail; |
| 454 | } | 469 | } |
| 455 | } | 470 | } |
| @@ -631,7 +646,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 631 | struct qib_devdata *dd; | 646 | struct qib_devdata *dd; |
| 632 | struct qib_pportdata *ppd; | 647 | struct qib_pportdata *ppd; |
| 633 | struct qib_ibport *ibp; | 648 | struct qib_ibport *ibp; |
| 634 | char clientrereg = 0; | 649 | u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80); |
| 635 | unsigned long flags; | 650 | unsigned long flags; |
| 636 | u16 lid, smlid; | 651 | u16 lid, smlid; |
| 637 | u8 lwe; | 652 | u8 lwe; |
| @@ -781,12 +796,6 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 781 | 796 | ||
| 782 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; | 797 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; |
| 783 | 798 | ||
| 784 | if (pip->clientrereg_resv_subnetto & 0x80) { | ||
| 785 | clientrereg = 1; | ||
| 786 | event.event = IB_EVENT_CLIENT_REREGISTER; | ||
| 787 | ib_dispatch_event(&event); | ||
| 788 | } | ||
| 789 | |||
| 790 | /* | 799 | /* |
| 791 | * Do the port state change now that the other link parameters | 800 | * Do the port state change now that the other link parameters |
| 792 | * have been set. | 801 | * have been set. |
| @@ -844,10 +853,15 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 844 | smp->status |= IB_SMP_INVALID_FIELD; | 853 | smp->status |= IB_SMP_INVALID_FIELD; |
| 845 | } | 854 | } |
| 846 | 855 | ||
| 856 | if (clientrereg) { | ||
| 857 | event.event = IB_EVENT_CLIENT_REREGISTER; | ||
| 858 | ib_dispatch_event(&event); | ||
| 859 | } | ||
| 860 | |||
| 847 | ret = subn_get_portinfo(smp, ibdev, port); | 861 | ret = subn_get_portinfo(smp, ibdev, port); |
| 848 | 862 | ||
| 849 | if (clientrereg) | 863 | /* restore re-reg bit per o14-12.2.1 */ |
| 850 | pip->clientrereg_resv_subnetto |= 0x80; | 864 | pip->clientrereg_resv_subnetto |= clientrereg; |
| 851 | 865 | ||
| 852 | goto get_only; | 866 | goto get_only; |
| 853 | 867 | ||
| @@ -1835,6 +1849,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, | |||
| 1835 | port_num && port_num <= ibdev->phys_port_cnt && | 1849 | port_num && port_num <= ibdev->phys_port_cnt && |
| 1836 | port != port_num) | 1850 | port != port_num) |
| 1837 | (void) check_mkey(to_iport(ibdev, port_num), smp, 0); | 1851 | (void) check_mkey(to_iport(ibdev, port_num), smp, 0); |
| 1852 | ret = IB_MAD_RESULT_FAILURE; | ||
| 1838 | goto bail; | 1853 | goto bail; |
| 1839 | } | 1854 | } |
| 1840 | 1855 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 7e7e16fbee99..1ce56b51ab1a 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
| @@ -1038,6 +1038,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |||
| 1038 | goto bail_swq; | 1038 | goto bail_swq; |
| 1039 | } | 1039 | } |
| 1040 | RCU_INIT_POINTER(qp->next, NULL); | 1040 | RCU_INIT_POINTER(qp->next, NULL); |
| 1041 | qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL); | ||
| 1042 | if (!qp->s_hdr) { | ||
| 1043 | ret = ERR_PTR(-ENOMEM); | ||
| 1044 | goto bail_qp; | ||
| 1045 | } | ||
| 1041 | qp->timeout_jiffies = | 1046 | qp->timeout_jiffies = |
| 1042 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | 1047 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / |
| 1043 | 1000UL); | 1048 | 1000UL); |
| @@ -1159,6 +1164,7 @@ bail_ip: | |||
| 1159 | vfree(qp->r_rq.wq); | 1164 | vfree(qp->r_rq.wq); |
| 1160 | free_qpn(&dev->qpn_table, qp->ibqp.qp_num); | 1165 | free_qpn(&dev->qpn_table, qp->ibqp.qp_num); |
| 1161 | bail_qp: | 1166 | bail_qp: |
| 1167 | kfree(qp->s_hdr); | ||
| 1162 | kfree(qp); | 1168 | kfree(qp); |
| 1163 | bail_swq: | 1169 | bail_swq: |
| 1164 | vfree(swq); | 1170 | vfree(swq); |
| @@ -1214,6 +1220,7 @@ int qib_destroy_qp(struct ib_qp *ibqp) | |||
| 1214 | else | 1220 | else |
| 1215 | vfree(qp->r_rq.wq); | 1221 | vfree(qp->r_rq.wq); |
| 1216 | vfree(qp->s_wq); | 1222 | vfree(qp->s_wq); |
| 1223 | kfree(qp->s_hdr); | ||
| 1217 | kfree(qp); | 1224 | kfree(qp); |
| 1218 | return 0; | 1225 | return 0; |
| 1219 | } | 1226 | } |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 765b4cbaa020..b641416148eb 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
| @@ -244,9 +244,9 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
| 244 | int ret = 0; | 244 | int ret = 0; |
| 245 | int delta; | 245 | int delta; |
| 246 | 246 | ||
| 247 | ohdr = &qp->s_hdr.u.oth; | 247 | ohdr = &qp->s_hdr->u.oth; |
| 248 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | 248 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) |
| 249 | ohdr = &qp->s_hdr.u.l.oth; | 249 | ohdr = &qp->s_hdr->u.l.oth; |
| 250 | 250 | ||
| 251 | /* | 251 | /* |
| 252 | * The lock is needed to synchronize between the sending tasklet, | 252 | * The lock is needed to synchronize between the sending tasklet, |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index b4b37e47321a..c0ee7e095d81 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
| @@ -688,17 +688,17 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, | |||
| 688 | nwords = (qp->s_cur_size + extra_bytes) >> 2; | 688 | nwords = (qp->s_cur_size + extra_bytes) >> 2; |
| 689 | lrh0 = QIB_LRH_BTH; | 689 | lrh0 = QIB_LRH_BTH; |
| 690 | if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { | 690 | if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { |
| 691 | qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh, | 691 | qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, |
| 692 | &qp->remote_ah_attr.grh, | 692 | &qp->remote_ah_attr.grh, |
| 693 | qp->s_hdrwords, nwords); | 693 | qp->s_hdrwords, nwords); |
| 694 | lrh0 = QIB_LRH_GRH; | 694 | lrh0 = QIB_LRH_GRH; |
| 695 | } | 695 | } |
| 696 | lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | | 696 | lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | |
| 697 | qp->remote_ah_attr.sl << 4; | 697 | qp->remote_ah_attr.sl << 4; |
| 698 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | 698 | qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); |
| 699 | qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | 699 | qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
| 700 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); | 700 | qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); |
| 701 | qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | | 701 | qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | |
| 702 | qp->remote_ah_attr.src_path_bits); | 702 | qp->remote_ah_attr.src_path_bits); |
| 703 | bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); | 703 | bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); |
| 704 | bth0 |= extra_bytes << 20; | 704 | bth0 |= extra_bytes << 20; |
| @@ -758,7 +758,7 @@ void qib_do_send(struct work_struct *work) | |||
| 758 | * If the packet cannot be sent now, return and | 758 | * If the packet cannot be sent now, return and |
| 759 | * the send tasklet will be woken up later. | 759 | * the send tasklet will be woken up later. |
| 760 | */ | 760 | */ |
| 761 | if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, | 761 | if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, |
| 762 | qp->s_cur_sge, qp->s_cur_size)) | 762 | qp->s_cur_sge, qp->s_cur_size)) |
| 763 | break; | 763 | break; |
| 764 | /* Record that s_hdr is empty. */ | 764 | /* Record that s_hdr is empty. */ |
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index dae51604cfcd..dd9cd49d0979 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
| @@ -503,8 +503,11 @@ static ssize_t show_nctxts(struct device *device, | |||
| 503 | struct qib_devdata *dd = dd_from_dev(dev); | 503 | struct qib_devdata *dd = dd_from_dev(dev); |
| 504 | 504 | ||
| 505 | /* Return the number of user ports (contexts) available. */ | 505 | /* Return the number of user ports (contexts) available. */ |
| 506 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts - | 506 | /* The calculation below deals with a special case where |
| 507 | dd->first_user_ctxt); | 507 | * cfgctxts is set to 1 on a single-port board. */ |
| 508 | return scnprintf(buf, PAGE_SIZE, "%u\n", | ||
| 509 | (dd->first_user_ctxt > dd->cfgctxts) ? 0 : | ||
| 510 | (dd->cfgctxts - dd->first_user_ctxt)); | ||
| 508 | } | 511 | } |
| 509 | 512 | ||
| 510 | static ssize_t show_nfreectxts(struct device *device, | 513 | static ssize_t show_nfreectxts(struct device *device, |
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c index 1bf626c40172..31d3561400a4 100644 --- a/drivers/infiniband/hw/qib/qib_tx.c +++ b/drivers/infiniband/hw/qib/qib_tx.c | |||
| @@ -295,6 +295,7 @@ u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, | |||
| 295 | 295 | ||
| 296 | nbufs = last - first + 1; /* number in range to check */ | 296 | nbufs = last - first + 1; /* number in range to check */ |
| 297 | if (dd->upd_pio_shadow) { | 297 | if (dd->upd_pio_shadow) { |
| 298 | update_shadow: | ||
| 298 | /* | 299 | /* |
| 299 | * Minor optimization. If we had no buffers on last call, | 300 | * Minor optimization. If we had no buffers on last call, |
| 300 | * start out by doing the update; continue and do scan even | 301 | * start out by doing the update; continue and do scan even |
| @@ -304,37 +305,39 @@ u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, | |||
| 304 | updated++; | 305 | updated++; |
| 305 | } | 306 | } |
| 306 | i = first; | 307 | i = first; |
| 307 | rescan: | ||
| 308 | /* | 308 | /* |
| 309 | * While test_and_set_bit() is atomic, we do that and then the | 309 | * While test_and_set_bit() is atomic, we do that and then the |
| 310 | * change_bit(), and the pair is not. See if this is the cause | 310 | * change_bit(), and the pair is not. See if this is the cause |
| 311 | * of the remaining armlaunch errors. | 311 | * of the remaining armlaunch errors. |
| 312 | */ | 312 | */ |
| 313 | spin_lock_irqsave(&dd->pioavail_lock, flags); | 313 | spin_lock_irqsave(&dd->pioavail_lock, flags); |
| 314 | if (dd->last_pio >= first && dd->last_pio <= last) | ||
| 315 | i = dd->last_pio + 1; | ||
| 316 | if (!first) | ||
| 317 | /* adjust to min possible */ | ||
| 318 | nbufs = last - dd->min_kernel_pio + 1; | ||
| 314 | for (j = 0; j < nbufs; j++, i++) { | 319 | for (j = 0; j < nbufs; j++, i++) { |
| 315 | if (i > last) | 320 | if (i > last) |
| 316 | i = first; | 321 | i = !first ? dd->min_kernel_pio : first; |
| 317 | if (__test_and_set_bit((2 * i) + 1, shadow)) | 322 | if (__test_and_set_bit((2 * i) + 1, shadow)) |
| 318 | continue; | 323 | continue; |
| 319 | /* flip generation bit */ | 324 | /* flip generation bit */ |
| 320 | __change_bit(2 * i, shadow); | 325 | __change_bit(2 * i, shadow); |
| 321 | /* remember that the buffer can be written to now */ | 326 | /* remember that the buffer can be written to now */ |
| 322 | __set_bit(i, dd->pio_writing); | 327 | __set_bit(i, dd->pio_writing); |
| 328 | if (!first && first != last) /* first == last on VL15, avoid */ | ||
| 329 | dd->last_pio = i; | ||
| 323 | break; | 330 | break; |
| 324 | } | 331 | } |
| 325 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | 332 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); |
| 326 | 333 | ||
| 327 | if (j == nbufs) { | 334 | if (j == nbufs) { |
| 328 | if (!updated) { | 335 | if (!updated) |
| 329 | /* | 336 | /* |
| 330 | * First time through; shadow exhausted, but may be | 337 | * First time through; shadow exhausted, but may be |
| 331 | * buffers available, try an update and then rescan. | 338 | * buffers available, try an update and then rescan. |
| 332 | */ | 339 | */ |
| 333 | update_send_bufs(dd); | 340 | goto update_shadow; |
| 334 | updated++; | ||
| 335 | i = first; | ||
| 336 | goto rescan; | ||
| 337 | } | ||
| 338 | no_send_bufs(dd); | 341 | no_send_bufs(dd); |
| 339 | buf = NULL; | 342 | buf = NULL; |
| 340 | } else { | 343 | } else { |
| @@ -422,14 +425,20 @@ void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, | |||
| 422 | __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT | 425 | __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT |
| 423 | + start, dd->pioavailshadow); | 426 | + start, dd->pioavailshadow); |
| 424 | __set_bit(start, dd->pioavailkernel); | 427 | __set_bit(start, dd->pioavailkernel); |
| 428 | if ((start >> 1) < dd->min_kernel_pio) | ||
| 429 | dd->min_kernel_pio = start >> 1; | ||
| 425 | } else { | 430 | } else { |
| 426 | __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, | 431 | __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, |
| 427 | dd->pioavailshadow); | 432 | dd->pioavailshadow); |
| 428 | __clear_bit(start, dd->pioavailkernel); | 433 | __clear_bit(start, dd->pioavailkernel); |
| 434 | if ((start >> 1) > dd->min_kernel_pio) | ||
| 435 | dd->min_kernel_pio = start >> 1; | ||
| 429 | } | 436 | } |
| 430 | start += 2; | 437 | start += 2; |
| 431 | } | 438 | } |
| 432 | 439 | ||
| 440 | if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1) | ||
| 441 | dd->last_pio = dd->min_kernel_pio - 1; | ||
| 433 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | 442 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); |
| 434 | 443 | ||
| 435 | dd->f_txchk_change(dd, ostart, len, avail, rcd); | 444 | dd->f_txchk_change(dd, ostart, len, avail, rcd); |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 7ce2ac2ed219..ce7387ff5d91 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
| @@ -72,9 +72,9 @@ int qib_make_uc_req(struct qib_qp *qp) | |||
| 72 | goto done; | 72 | goto done; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | ohdr = &qp->s_hdr.u.oth; | 75 | ohdr = &qp->s_hdr->u.oth; |
| 76 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | 76 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) |
| 77 | ohdr = &qp->s_hdr.u.l.oth; | 77 | ohdr = &qp->s_hdr->u.l.oth; |
| 78 | 78 | ||
| 79 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | 79 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ |
| 80 | hwords = 5; | 80 | hwords = 5; |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 828609fa4d28..a468bf2d4465 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
| @@ -321,11 +321,11 @@ int qib_make_ud_req(struct qib_qp *qp) | |||
| 321 | 321 | ||
| 322 | if (ah_attr->ah_flags & IB_AH_GRH) { | 322 | if (ah_attr->ah_flags & IB_AH_GRH) { |
| 323 | /* Header size in 32-bit words. */ | 323 | /* Header size in 32-bit words. */ |
| 324 | qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh, | 324 | qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, |
| 325 | &ah_attr->grh, | 325 | &ah_attr->grh, |
| 326 | qp->s_hdrwords, nwords); | 326 | qp->s_hdrwords, nwords); |
| 327 | lrh0 = QIB_LRH_GRH; | 327 | lrh0 = QIB_LRH_GRH; |
| 328 | ohdr = &qp->s_hdr.u.l.oth; | 328 | ohdr = &qp->s_hdr->u.l.oth; |
| 329 | /* | 329 | /* |
| 330 | * Don't worry about sending to locally attached multicast | 330 | * Don't worry about sending to locally attached multicast |
| 331 | * QPs. It is unspecified by the spec. what happens. | 331 | * QPs. It is unspecified by the spec. what happens. |
| @@ -333,7 +333,7 @@ int qib_make_ud_req(struct qib_qp *qp) | |||
| 333 | } else { | 333 | } else { |
| 334 | /* Header size in 32-bit words. */ | 334 | /* Header size in 32-bit words. */ |
| 335 | lrh0 = QIB_LRH_BTH; | 335 | lrh0 = QIB_LRH_BTH; |
| 336 | ohdr = &qp->s_hdr.u.oth; | 336 | ohdr = &qp->s_hdr->u.oth; |
| 337 | } | 337 | } |
| 338 | if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { | 338 | if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { |
| 339 | qp->s_hdrwords++; | 339 | qp->s_hdrwords++; |
| @@ -346,15 +346,15 @@ int qib_make_ud_req(struct qib_qp *qp) | |||
| 346 | lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ | 346 | lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ |
| 347 | else | 347 | else |
| 348 | lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12; | 348 | lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12; |
| 349 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | 349 | qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); |
| 350 | qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ | 350 | qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ |
| 351 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); | 351 | qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); |
| 352 | lid = ppd->lid; | 352 | lid = ppd->lid; |
| 353 | if (lid) { | 353 | if (lid) { |
| 354 | lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); | 354 | lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); |
| 355 | qp->s_hdr.lrh[3] = cpu_to_be16(lid); | 355 | qp->s_hdr->lrh[3] = cpu_to_be16(lid); |
| 356 | } else | 356 | } else |
| 357 | qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; | 357 | qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE; |
| 358 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | 358 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) |
| 359 | bth0 |= IB_BTH_SOLICITED; | 359 | bth0 |= IB_BTH_SOLICITED; |
| 360 | bth0 |= extra_bytes << 20; | 360 | bth0 |= extra_bytes << 20; |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 0c19ef0c4123..487606024659 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
| @@ -367,9 +367,10 @@ struct qib_rwq { | |||
| 367 | 367 | ||
| 368 | struct qib_rq { | 368 | struct qib_rq { |
| 369 | struct qib_rwq *wq; | 369 | struct qib_rwq *wq; |
| 370 | spinlock_t lock; /* protect changes in this struct */ | ||
| 371 | u32 size; /* size of RWQE array */ | 370 | u32 size; /* size of RWQE array */ |
| 372 | u8 max_sge; | 371 | u8 max_sge; |
| 372 | spinlock_t lock /* protect changes in this struct */ | ||
| 373 | ____cacheline_aligned_in_smp; | ||
| 373 | }; | 374 | }; |
| 374 | 375 | ||
| 375 | struct qib_srq { | 376 | struct qib_srq { |
| @@ -412,31 +413,75 @@ struct qib_ack_entry { | |||
| 412 | */ | 413 | */ |
| 413 | struct qib_qp { | 414 | struct qib_qp { |
| 414 | struct ib_qp ibqp; | 415 | struct ib_qp ibqp; |
| 415 | struct qib_qp *next; /* link list for QPN hash table */ | 416 | /* read mostly fields above and below */ |
| 416 | struct qib_qp *timer_next; /* link list for qib_ib_timer() */ | ||
| 417 | struct list_head iowait; /* link for wait PIO buf */ | ||
| 418 | struct list_head rspwait; /* link for waititing to respond */ | ||
| 419 | struct ib_ah_attr remote_ah_attr; | 417 | struct ib_ah_attr remote_ah_attr; |
| 420 | struct ib_ah_attr alt_ah_attr; | 418 | struct ib_ah_attr alt_ah_attr; |
| 421 | struct qib_ib_header s_hdr; /* next packet header to send */ | 419 | struct qib_qp *next; /* link list for QPN hash table */ |
| 422 | atomic_t refcount; | 420 | struct qib_swqe *s_wq; /* send work queue */ |
| 423 | wait_queue_head_t wait; | ||
| 424 | wait_queue_head_t wait_dma; | ||
| 425 | struct timer_list s_timer; | ||
| 426 | struct work_struct s_work; | ||
| 427 | struct qib_mmap_info *ip; | 421 | struct qib_mmap_info *ip; |
| 422 | struct qib_ib_header *s_hdr; /* next packet header to send */ | ||
| 423 | unsigned long timeout_jiffies; /* computed from timeout */ | ||
| 424 | |||
| 425 | enum ib_mtu path_mtu; | ||
| 426 | u32 remote_qpn; | ||
| 427 | u32 pmtu; /* decoded from path_mtu */ | ||
| 428 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | ||
| 429 | u32 s_size; /* send work queue size */ | ||
| 430 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ | ||
| 431 | |||
| 432 | u8 state; /* QP state */ | ||
| 433 | u8 qp_access_flags; | ||
| 434 | u8 alt_timeout; /* Alternate path timeout for this QP */ | ||
| 435 | u8 timeout; /* Timeout for this QP */ | ||
| 436 | u8 s_srate; | ||
| 437 | u8 s_mig_state; | ||
| 438 | u8 port_num; | ||
| 439 | u8 s_pkey_index; /* PKEY index to use */ | ||
| 440 | u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ | ||
| 441 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ | ||
| 442 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ | ||
| 443 | u8 s_retry_cnt; /* number of times to retry */ | ||
| 444 | u8 s_rnr_retry_cnt; | ||
| 445 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | ||
| 446 | u8 s_max_sge; /* size of s_wq->sg_list */ | ||
| 447 | u8 s_draining; | ||
| 448 | |||
| 449 | /* start of read/write fields */ | ||
| 450 | |||
| 451 | atomic_t refcount ____cacheline_aligned_in_smp; | ||
| 452 | wait_queue_head_t wait; | ||
| 453 | |||
| 454 | |||
| 455 | struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1] | ||
| 456 | ____cacheline_aligned_in_smp; | ||
| 457 | struct qib_sge_state s_rdma_read_sge; | ||
| 458 | |||
| 459 | spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ | ||
| 460 | unsigned long r_aflags; | ||
| 461 | u64 r_wr_id; /* ID for current receive WQE */ | ||
| 462 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ | ||
| 463 | u32 r_len; /* total length of r_sge */ | ||
| 464 | u32 r_rcv_len; /* receive data len processed */ | ||
| 465 | u32 r_psn; /* expected rcv packet sequence number */ | ||
| 466 | u32 r_msn; /* message sequence number */ | ||
| 467 | |||
| 468 | u8 r_state; /* opcode of last packet received */ | ||
| 469 | u8 r_flags; | ||
| 470 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ | ||
| 471 | |||
| 472 | struct list_head rspwait; /* link for waititing to respond */ | ||
| 473 | |||
| 474 | struct qib_sge_state r_sge; /* current receive data */ | ||
| 475 | struct qib_rq r_rq; /* receive work queue */ | ||
| 476 | |||
| 477 | spinlock_t s_lock ____cacheline_aligned_in_smp; | ||
| 428 | struct qib_sge_state *s_cur_sge; | 478 | struct qib_sge_state *s_cur_sge; |
| 479 | u32 s_flags; | ||
| 429 | struct qib_verbs_txreq *s_tx; | 480 | struct qib_verbs_txreq *s_tx; |
| 430 | struct qib_mregion *s_rdma_mr; | 481 | struct qib_swqe *s_wqe; |
| 431 | struct qib_sge_state s_sge; /* current send request data */ | 482 | struct qib_sge_state s_sge; /* current send request data */ |
| 432 | struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]; | 483 | struct qib_mregion *s_rdma_mr; |
| 433 | struct qib_sge_state s_ack_rdma_sge; | ||
| 434 | struct qib_sge_state s_rdma_read_sge; | ||
| 435 | struct qib_sge_state r_sge; /* current receive data */ | ||
| 436 | spinlock_t r_lock; /* used for APM */ | ||
| 437 | spinlock_t s_lock; | ||
| 438 | atomic_t s_dma_busy; | 484 | atomic_t s_dma_busy; |
| 439 | u32 s_flags; | ||
| 440 | u32 s_cur_size; /* size of send packet in bytes */ | 485 | u32 s_cur_size; /* size of send packet in bytes */ |
| 441 | u32 s_len; /* total length of s_sge */ | 486 | u32 s_len; /* total length of s_sge */ |
| 442 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ | 487 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ |
| @@ -447,60 +492,34 @@ struct qib_qp { | |||
| 447 | u32 s_psn; /* current packet sequence number */ | 492 | u32 s_psn; /* current packet sequence number */ |
| 448 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ | 493 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ |
| 449 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ | 494 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ |
| 450 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ | 495 | u32 s_head; /* new entries added here */ |
| 451 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ | 496 | u32 s_tail; /* next entry to process */ |
| 452 | u64 r_wr_id; /* ID for current receive WQE */ | 497 | u32 s_cur; /* current work queue entry */ |
| 453 | unsigned long r_aflags; | 498 | u32 s_acked; /* last un-ACK'ed entry */ |
| 454 | u32 r_len; /* total length of r_sge */ | 499 | u32 s_last; /* last completed entry */ |
| 455 | u32 r_rcv_len; /* receive data len processed */ | 500 | u32 s_ssn; /* SSN of tail entry */ |
| 456 | u32 r_psn; /* expected rcv packet sequence number */ | 501 | u32 s_lsn; /* limit sequence number (credit) */ |
| 457 | u32 r_msn; /* message sequence number */ | ||
| 458 | u16 s_hdrwords; /* size of s_hdr in 32 bit words */ | 502 | u16 s_hdrwords; /* size of s_hdr in 32 bit words */ |
| 459 | u16 s_rdma_ack_cnt; | 503 | u16 s_rdma_ack_cnt; |
| 460 | u8 state; /* QP state */ | ||
| 461 | u8 s_state; /* opcode of last packet sent */ | 504 | u8 s_state; /* opcode of last packet sent */ |
| 462 | u8 s_ack_state; /* opcode of packet to ACK */ | 505 | u8 s_ack_state; /* opcode of packet to ACK */ |
| 463 | u8 s_nak_state; /* non-zero if NAK is pending */ | 506 | u8 s_nak_state; /* non-zero if NAK is pending */ |
| 464 | u8 r_state; /* opcode of last packet received */ | ||
| 465 | u8 r_nak_state; /* non-zero if NAK is pending */ | 507 | u8 r_nak_state; /* non-zero if NAK is pending */ |
| 466 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | ||
| 467 | u8 r_flags; | ||
| 468 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ | ||
| 469 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ | ||
| 470 | u8 qp_access_flags; | ||
| 471 | u8 s_max_sge; /* size of s_wq->sg_list */ | ||
| 472 | u8 s_retry_cnt; /* number of times to retry */ | ||
| 473 | u8 s_rnr_retry_cnt; | ||
| 474 | u8 s_retry; /* requester retry counter */ | 508 | u8 s_retry; /* requester retry counter */ |
| 475 | u8 s_rnr_retry; /* requester RNR retry counter */ | 509 | u8 s_rnr_retry; /* requester RNR retry counter */ |
| 476 | u8 s_pkey_index; /* PKEY index to use */ | ||
| 477 | u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ | ||
| 478 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ | ||
| 479 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ | 510 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ |
| 480 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ | 511 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ |
| 481 | u8 s_srate; | 512 | |
| 482 | u8 s_draining; | 513 | struct qib_sge_state s_ack_rdma_sge; |
| 483 | u8 s_mig_state; | 514 | struct timer_list s_timer; |
| 484 | u8 timeout; /* Timeout for this QP */ | 515 | struct list_head iowait; /* link for wait PIO buf */ |
| 485 | u8 alt_timeout; /* Alternate path timeout for this QP */ | 516 | |
| 486 | u8 port_num; | 517 | struct work_struct s_work; |
| 487 | enum ib_mtu path_mtu; | 518 | |
| 488 | u32 pmtu; /* decoded from path_mtu */ | 519 | wait_queue_head_t wait_dma; |
| 489 | u32 remote_qpn; | 520 | |
| 490 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | 521 | struct qib_sge r_sg_list[0] /* verified SGEs */ |
| 491 | u32 s_size; /* send work queue size */ | 522 | ____cacheline_aligned_in_smp; |
| 492 | u32 s_head; /* new entries added here */ | ||
| 493 | u32 s_tail; /* next entry to process */ | ||
| 494 | u32 s_cur; /* current work queue entry */ | ||
| 495 | u32 s_acked; /* last un-ACK'ed entry */ | ||
| 496 | u32 s_last; /* last completed entry */ | ||
| 497 | u32 s_ssn; /* SSN of tail entry */ | ||
| 498 | u32 s_lsn; /* limit sequence number (credit) */ | ||
| 499 | unsigned long timeout_jiffies; /* computed from timeout */ | ||
| 500 | struct qib_swqe *s_wq; /* send work queue */ | ||
| 501 | struct qib_swqe *s_wqe; | ||
| 502 | struct qib_rq r_rq; /* receive work queue */ | ||
| 503 | struct qib_sge r_sg_list[0]; /* verified SGEs */ | ||
| 504 | }; | 523 | }; |
| 505 | 524 | ||
| 506 | /* | 525 | /* |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index db43b3117168..0ab8c9cc3a78 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
| @@ -573,10 +573,9 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
| 573 | 573 | ||
| 574 | err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, | 574 | err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, |
| 575 | non_blocking); | 575 | non_blocking); |
| 576 | if (err) { | 576 | if (err) |
| 577 | iscsi_destroy_endpoint(ep); | ||
| 578 | return ERR_PTR(err); | 577 | return ERR_PTR(err); |
| 579 | } | 578 | |
| 580 | return ep; | 579 | return ep; |
| 581 | } | 580 | } |
| 582 | 581 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 14224ba44fd8..2dddabd8fcf9 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
| @@ -613,8 +613,9 @@ id_failure: | |||
| 613 | ib_conn->cma_id = NULL; | 613 | ib_conn->cma_id = NULL; |
| 614 | addr_failure: | 614 | addr_failure: |
| 615 | ib_conn->state = ISER_CONN_DOWN; | 615 | ib_conn->state = ISER_CONN_DOWN; |
| 616 | iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */ | ||
| 616 | connect_failure: | 617 | connect_failure: |
| 617 | iser_conn_release(ib_conn, 1); | 618 | iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ |
| 618 | return err; | 619 | return err; |
| 619 | } | 620 | } |
| 620 | 621 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0fe18850c838..ec2dafe8ae5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -51,6 +51,8 @@ | |||
| 51 | #define FW_VERSION_MINOR 1 | 51 | #define FW_VERSION_MINOR 1 |
| 52 | #define FW_VERSION_MICRO 0 | 52 | #define FW_VERSION_MICRO 0 |
| 53 | 53 | ||
| 54 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) | ||
| 55 | |||
| 54 | enum { | 56 | enum { |
| 55 | MAX_NPORTS = 4, /* max # of ports */ | 57 | MAX_NPORTS = 4, /* max # of ports */ |
| 56 | SERNUM_LEN = 24, /* Serial # length */ | 58 | SERNUM_LEN = 24, /* Serial # length */ |
| @@ -64,6 +66,15 @@ enum { | |||
| 64 | MEM_MC | 66 | MEM_MC |
| 65 | }; | 67 | }; |
| 66 | 68 | ||
| 69 | enum { | ||
| 70 | MEMWIN0_APERTURE = 65536, | ||
| 71 | MEMWIN0_BASE = 0x30000, | ||
| 72 | MEMWIN1_APERTURE = 32768, | ||
| 73 | MEMWIN1_BASE = 0x28000, | ||
| 74 | MEMWIN2_APERTURE = 2048, | ||
| 75 | MEMWIN2_BASE = 0x1b800, | ||
| 76 | }; | ||
| 77 | |||
| 67 | enum dev_master { | 78 | enum dev_master { |
| 68 | MASTER_CANT, | 79 | MASTER_CANT, |
| 69 | MASTER_MAY, | 80 | MASTER_MAY, |
| @@ -403,6 +414,9 @@ struct sge_txq { | |||
| 403 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ | 414 | struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ |
| 404 | struct sge_qstat *stat; /* queue status entry */ | 415 | struct sge_qstat *stat; /* queue status entry */ |
| 405 | dma_addr_t phys_addr; /* physical address of the ring */ | 416 | dma_addr_t phys_addr; /* physical address of the ring */ |
| 417 | spinlock_t db_lock; | ||
| 418 | int db_disabled; | ||
| 419 | unsigned short db_pidx; | ||
| 406 | }; | 420 | }; |
| 407 | 421 | ||
| 408 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ | 422 | struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ |
| @@ -475,6 +489,7 @@ struct adapter { | |||
| 475 | void __iomem *regs; | 489 | void __iomem *regs; |
| 476 | struct pci_dev *pdev; | 490 | struct pci_dev *pdev; |
| 477 | struct device *pdev_dev; | 491 | struct device *pdev_dev; |
| 492 | unsigned int mbox; | ||
| 478 | unsigned int fn; | 493 | unsigned int fn; |
| 479 | unsigned int flags; | 494 | unsigned int flags; |
| 480 | 495 | ||
| @@ -504,6 +519,8 @@ struct adapter { | |||
| 504 | void **tid_release_head; | 519 | void **tid_release_head; |
| 505 | spinlock_t tid_release_lock; | 520 | spinlock_t tid_release_lock; |
| 506 | struct work_struct tid_release_task; | 521 | struct work_struct tid_release_task; |
| 522 | struct work_struct db_full_task; | ||
| 523 | struct work_struct db_drop_task; | ||
| 507 | bool tid_release_task_busy; | 524 | bool tid_release_task_busy; |
| 508 | 525 | ||
| 509 | struct dentry *debugfs_root; | 526 | struct dentry *debugfs_root; |
| @@ -605,6 +622,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); | |||
| 605 | void t4_sge_init(struct adapter *adap); | 622 | void t4_sge_init(struct adapter *adap); |
| 606 | void t4_sge_start(struct adapter *adap); | 623 | void t4_sge_start(struct adapter *adap); |
| 607 | void t4_sge_stop(struct adapter *adap); | 624 | void t4_sge_stop(struct adapter *adap); |
| 625 | extern int dbfifo_int_thresh; | ||
| 608 | 626 | ||
| 609 | #define for_each_port(adapter, iter) \ | 627 | #define for_each_port(adapter, iter) \ |
| 610 | for (iter = 0; iter < (adapter)->params.nports; ++iter) | 628 | for (iter = 0; iter < (adapter)->params.nports; ++iter) |
| @@ -719,4 +737,9 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | |||
| 719 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, | 737 | int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, |
| 720 | unsigned int vf, unsigned int eqid); | 738 | unsigned int vf, unsigned int eqid); |
| 721 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); | 739 | int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); |
| 740 | void t4_db_full(struct adapter *adapter); | ||
| 741 | void t4_db_dropped(struct adapter *adapter); | ||
| 742 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len); | ||
| 743 | int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, | ||
| 744 | u32 addr, u32 val); | ||
| 722 | #endif /* __CXGB4_H__ */ | 745 | #endif /* __CXGB4_H__ */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index b126b98065a9..e1f96fbb48c1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -149,15 +149,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter, | |||
| 149 | #endif | 149 | #endif |
| 150 | 150 | ||
| 151 | enum { | 151 | enum { |
| 152 | MEMWIN0_APERTURE = 65536, | ||
| 153 | MEMWIN0_BASE = 0x30000, | ||
| 154 | MEMWIN1_APERTURE = 32768, | ||
| 155 | MEMWIN1_BASE = 0x28000, | ||
| 156 | MEMWIN2_APERTURE = 2048, | ||
| 157 | MEMWIN2_BASE = 0x1b800, | ||
| 158 | }; | ||
| 159 | |||
| 160 | enum { | ||
| 161 | MAX_TXQ_ENTRIES = 16384, | 152 | MAX_TXQ_ENTRIES = 16384, |
| 162 | MAX_CTRL_TXQ_ENTRIES = 1024, | 153 | MAX_CTRL_TXQ_ENTRIES = 1024, |
| 163 | MAX_RSPQ_ENTRIES = 16384, | 154 | MAX_RSPQ_ENTRIES = 16384, |
| @@ -371,6 +362,15 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) | |||
| 371 | uhash | mhash, sleep); | 362 | uhash | mhash, sleep); |
| 372 | } | 363 | } |
| 373 | 364 | ||
| 365 | int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ | ||
| 366 | module_param(dbfifo_int_thresh, int, 0644); | ||
| 367 | MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); | ||
| 368 | |||
| 369 | int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */ | ||
| 370 | module_param(dbfifo_drain_delay, int, 0644); | ||
| 371 | MODULE_PARM_DESC(dbfifo_drain_delay, | ||
| 372 | "usecs to sleep while draining the dbfifo"); | ||
| 373 | |||
| 374 | /* | 374 | /* |
| 375 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. | 375 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. |
| 376 | * If @mtu is -1 it is left unchanged. | 376 | * If @mtu is -1 it is left unchanged. |
| @@ -389,6 +389,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) | |||
| 389 | return ret; | 389 | return ret; |
| 390 | } | 390 | } |
| 391 | 391 | ||
| 392 | static struct workqueue_struct *workq; | ||
| 393 | |||
| 392 | /** | 394 | /** |
| 393 | * link_start - enable a port | 395 | * link_start - enable a port |
| 394 | * @dev: the port to enable | 396 | * @dev: the port to enable |
| @@ -2196,7 +2198,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | |||
| 2196 | adap->tid_release_head = (void **)((uintptr_t)p | chan); | 2198 | adap->tid_release_head = (void **)((uintptr_t)p | chan); |
| 2197 | if (!adap->tid_release_task_busy) { | 2199 | if (!adap->tid_release_task_busy) { |
| 2198 | adap->tid_release_task_busy = true; | 2200 | adap->tid_release_task_busy = true; |
| 2199 | schedule_work(&adap->tid_release_task); | 2201 | queue_work(workq, &adap->tid_release_task); |
| 2200 | } | 2202 | } |
| 2201 | spin_unlock_bh(&adap->tid_release_lock); | 2203 | spin_unlock_bh(&adap->tid_release_lock); |
| 2202 | } | 2204 | } |
| @@ -2366,6 +2368,16 @@ unsigned int cxgb4_port_chan(const struct net_device *dev) | |||
| 2366 | } | 2368 | } |
| 2367 | EXPORT_SYMBOL(cxgb4_port_chan); | 2369 | EXPORT_SYMBOL(cxgb4_port_chan); |
| 2368 | 2370 | ||
| 2371 | unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) | ||
| 2372 | { | ||
| 2373 | struct adapter *adap = netdev2adap(dev); | ||
| 2374 | u32 v; | ||
| 2375 | |||
| 2376 | v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | ||
| 2377 | return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v); | ||
| 2378 | } | ||
| 2379 | EXPORT_SYMBOL(cxgb4_dbfifo_count); | ||
| 2380 | |||
| 2369 | /** | 2381 | /** |
| 2370 | * cxgb4_port_viid - get the VI id of a port | 2382 | * cxgb4_port_viid - get the VI id of a port |
| 2371 | * @dev: the net device for the port | 2383 | * @dev: the net device for the port |
| @@ -2413,6 +2425,59 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | |||
| 2413 | } | 2425 | } |
| 2414 | EXPORT_SYMBOL(cxgb4_iscsi_init); | 2426 | EXPORT_SYMBOL(cxgb4_iscsi_init); |
| 2415 | 2427 | ||
| 2428 | int cxgb4_flush_eq_cache(struct net_device *dev) | ||
| 2429 | { | ||
| 2430 | struct adapter *adap = netdev2adap(dev); | ||
| 2431 | int ret; | ||
| 2432 | |||
| 2433 | ret = t4_fwaddrspace_write(adap, adap->mbox, | ||
| 2434 | 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000); | ||
| 2435 | return ret; | ||
| 2436 | } | ||
| 2437 | EXPORT_SYMBOL(cxgb4_flush_eq_cache); | ||
| 2438 | |||
| 2439 | static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) | ||
| 2440 | { | ||
| 2441 | u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8; | ||
| 2442 | __be64 indices; | ||
| 2443 | int ret; | ||
| 2444 | |||
| 2445 | ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); | ||
| 2446 | if (!ret) { | ||
| 2447 | indices = be64_to_cpu(indices); | ||
| 2448 | *cidx = (indices >> 25) & 0xffff; | ||
| 2449 | *pidx = (indices >> 9) & 0xffff; | ||
| 2450 | } | ||
| 2451 | return ret; | ||
| 2452 | } | ||
| 2453 | |||
| 2454 | int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, | ||
| 2455 | u16 size) | ||
| 2456 | { | ||
| 2457 | struct adapter *adap = netdev2adap(dev); | ||
| 2458 | u16 hw_pidx, hw_cidx; | ||
| 2459 | int ret; | ||
| 2460 | |||
| 2461 | ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); | ||
| 2462 | if (ret) | ||
| 2463 | goto out; | ||
| 2464 | |||
| 2465 | if (pidx != hw_pidx) { | ||
| 2466 | u16 delta; | ||
| 2467 | |||
| 2468 | if (pidx >= hw_pidx) | ||
| 2469 | delta = pidx - hw_pidx; | ||
| 2470 | else | ||
| 2471 | delta = size - hw_pidx + pidx; | ||
| 2472 | wmb(); | ||
| 2473 | t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), | ||
| 2474 | V_QID(qid) | V_PIDX(delta)); | ||
| 2475 | } | ||
| 2476 | out: | ||
| 2477 | return ret; | ||
| 2478 | } | ||
| 2479 | EXPORT_SYMBOL(cxgb4_sync_txq_pidx); | ||
| 2480 | |||
| 2416 | static struct pci_driver cxgb4_driver; | 2481 | static struct pci_driver cxgb4_driver; |
| 2417 | 2482 | ||
| 2418 | static void check_neigh_update(struct neighbour *neigh) | 2483 | static void check_neigh_update(struct neighbour *neigh) |
| @@ -2446,6 +2511,144 @@ static struct notifier_block cxgb4_netevent_nb = { | |||
| 2446 | .notifier_call = netevent_cb | 2511 | .notifier_call = netevent_cb |
| 2447 | }; | 2512 | }; |
| 2448 | 2513 | ||
| 2514 | static void drain_db_fifo(struct adapter *adap, int usecs) | ||
| 2515 | { | ||
| 2516 | u32 v; | ||
| 2517 | |||
| 2518 | do { | ||
| 2519 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 2520 | schedule_timeout(usecs_to_jiffies(usecs)); | ||
| 2521 | v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); | ||
| 2522 | if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0) | ||
| 2523 | break; | ||
| 2524 | } while (1); | ||
| 2525 | } | ||
| 2526 | |||
| 2527 | static void disable_txq_db(struct sge_txq *q) | ||
| 2528 | { | ||
| 2529 | spin_lock_irq(&q->db_lock); | ||
| 2530 | q->db_disabled = 1; | ||
| 2531 | spin_unlock_irq(&q->db_lock); | ||
| 2532 | } | ||
| 2533 | |||
| 2534 | static void enable_txq_db(struct sge_txq *q) | ||
| 2535 | { | ||
| 2536 | spin_lock_irq(&q->db_lock); | ||
| 2537 | q->db_disabled = 0; | ||
| 2538 | spin_unlock_irq(&q->db_lock); | ||
| 2539 | } | ||
| 2540 | |||
| 2541 | static void disable_dbs(struct adapter *adap) | ||
| 2542 | { | ||
| 2543 | int i; | ||
| 2544 | |||
| 2545 | for_each_ethrxq(&adap->sge, i) | ||
| 2546 | disable_txq_db(&adap->sge.ethtxq[i].q); | ||
| 2547 | for_each_ofldrxq(&adap->sge, i) | ||
| 2548 | disable_txq_db(&adap->sge.ofldtxq[i].q); | ||
| 2549 | for_each_port(adap, i) | ||
| 2550 | disable_txq_db(&adap->sge.ctrlq[i].q); | ||
| 2551 | } | ||
| 2552 | |||
| 2553 | static void enable_dbs(struct adapter *adap) | ||
| 2554 | { | ||
| 2555 | int i; | ||
| 2556 | |||
| 2557 | for_each_ethrxq(&adap->sge, i) | ||
| 2558 | enable_txq_db(&adap->sge.ethtxq[i].q); | ||
| 2559 | for_each_ofldrxq(&adap->sge, i) | ||
| 2560 | enable_txq_db(&adap->sge.ofldtxq[i].q); | ||
| 2561 | for_each_port(adap, i) | ||
| 2562 | enable_txq_db(&adap->sge.ctrlq[i].q); | ||
| 2563 | } | ||
| 2564 | |||
| 2565 | static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) | ||
| 2566 | { | ||
| 2567 | u16 hw_pidx, hw_cidx; | ||
| 2568 | int ret; | ||
| 2569 | |||
| 2570 | spin_lock_bh(&q->db_lock); | ||
| 2571 | ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); | ||
| 2572 | if (ret) | ||
| 2573 | goto out; | ||
| 2574 | if (q->db_pidx != hw_pidx) { | ||
| 2575 | u16 delta; | ||
| 2576 | |||
| 2577 | if (q->db_pidx >= hw_pidx) | ||
| 2578 | delta = q->db_pidx - hw_pidx; | ||
| 2579 | else | ||
| 2580 | delta = q->size - hw_pidx + q->db_pidx; | ||
| 2581 | wmb(); | ||
| 2582 | t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), | ||
| 2583 | V_QID(q->cntxt_id) | V_PIDX(delta)); | ||
| 2584 | } | ||
| 2585 | out: | ||
| 2586 | q->db_disabled = 0; | ||
| 2587 | spin_unlock_bh(&q->db_lock); | ||
| 2588 | if (ret) | ||
| 2589 | CH_WARN(adap, "DB drop recovery failed.\n"); | ||
| 2590 | } | ||
| 2591 | static void recover_all_queues(struct adapter *adap) | ||
| 2592 | { | ||
| 2593 | int i; | ||
| 2594 | |||
| 2595 | for_each_ethrxq(&adap->sge, i) | ||
| 2596 | sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); | ||
| 2597 | for_each_ofldrxq(&adap->sge, i) | ||
| 2598 | sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); | ||
| 2599 | for_each_port(adap, i) | ||
| 2600 | sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); | ||
| 2601 | } | ||
| 2602 | |||
| 2603 | static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) | ||
| 2604 | { | ||
| 2605 | mutex_lock(&uld_mutex); | ||
| 2606 | if (adap->uld_handle[CXGB4_ULD_RDMA]) | ||
| 2607 | ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], | ||
| 2608 | cmd); | ||
| 2609 | mutex_unlock(&uld_mutex); | ||
| 2610 | } | ||
| 2611 | |||
| 2612 | static void process_db_full(struct work_struct *work) | ||
| 2613 | { | ||
| 2614 | struct adapter *adap; | ||
| 2615 | |||
| 2616 | adap = container_of(work, struct adapter, db_full_task); | ||
| 2617 | |||
| 2618 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | ||
| 2619 | drain_db_fifo(adap, dbfifo_drain_delay); | ||
| 2620 | t4_set_reg_field(adap, A_SGE_INT_ENABLE3, | ||
| 2621 | F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, | ||
| 2622 | F_DBFIFO_HP_INT | F_DBFIFO_LP_INT); | ||
| 2623 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); | ||
| 2624 | } | ||
| 2625 | |||
| 2626 | static void process_db_drop(struct work_struct *work) | ||
| 2627 | { | ||
| 2628 | struct adapter *adap; | ||
| 2629 | |||
| 2630 | adap = container_of(work, struct adapter, db_drop_task); | ||
| 2631 | |||
| 2632 | t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); | ||
| 2633 | disable_dbs(adap); | ||
| 2634 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); | ||
| 2635 | drain_db_fifo(adap, 1); | ||
| 2636 | recover_all_queues(adap); | ||
| 2637 | enable_dbs(adap); | ||
| 2638 | } | ||
| 2639 | |||
| 2640 | void t4_db_full(struct adapter *adap) | ||
| 2641 | { | ||
| 2642 | t4_set_reg_field(adap, A_SGE_INT_ENABLE3, | ||
| 2643 | F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0); | ||
| 2644 | queue_work(workq, &adap->db_full_task); | ||
| 2645 | } | ||
| 2646 | |||
| 2647 | void t4_db_dropped(struct adapter *adap) | ||
| 2648 | { | ||
| 2649 | queue_work(workq, &adap->db_drop_task); | ||
| 2650 | } | ||
| 2651 | |||
| 2449 | static void uld_attach(struct adapter *adap, unsigned int uld) | 2652 | static void uld_attach(struct adapter *adap, unsigned int uld) |
| 2450 | { | 2653 | { |
| 2451 | void *handle; | 2654 | void *handle; |
| @@ -2479,6 +2682,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
| 2479 | lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); | 2682 | lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); |
| 2480 | lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); | 2683 | lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); |
| 2481 | lli.fw_vers = adap->params.fw_vers; | 2684 | lli.fw_vers = adap->params.fw_vers; |
| 2685 | lli.dbfifo_int_thresh = dbfifo_int_thresh; | ||
| 2482 | 2686 | ||
| 2483 | handle = ulds[uld].add(&lli); | 2687 | handle = ulds[uld].add(&lli); |
| 2484 | if (IS_ERR(handle)) { | 2688 | if (IS_ERR(handle)) { |
| @@ -2649,6 +2853,8 @@ static void cxgb_down(struct adapter *adapter) | |||
| 2649 | { | 2853 | { |
| 2650 | t4_intr_disable(adapter); | 2854 | t4_intr_disable(adapter); |
| 2651 | cancel_work_sync(&adapter->tid_release_task); | 2855 | cancel_work_sync(&adapter->tid_release_task); |
| 2856 | cancel_work_sync(&adapter->db_full_task); | ||
| 2857 | cancel_work_sync(&adapter->db_drop_task); | ||
| 2652 | adapter->tid_release_task_busy = false; | 2858 | adapter->tid_release_task_busy = false; |
| 2653 | adapter->tid_release_head = NULL; | 2859 | adapter->tid_release_head = NULL; |
| 2654 | 2860 | ||
| @@ -3593,6 +3799,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
| 3593 | 3799 | ||
| 3594 | adapter->pdev = pdev; | 3800 | adapter->pdev = pdev; |
| 3595 | adapter->pdev_dev = &pdev->dev; | 3801 | adapter->pdev_dev = &pdev->dev; |
| 3802 | adapter->mbox = func; | ||
| 3596 | adapter->fn = func; | 3803 | adapter->fn = func; |
| 3597 | adapter->msg_enable = dflt_msg_enable; | 3804 | adapter->msg_enable = dflt_msg_enable; |
| 3598 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | 3805 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
| @@ -3601,6 +3808,8 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
| 3601 | spin_lock_init(&adapter->tid_release_lock); | 3808 | spin_lock_init(&adapter->tid_release_lock); |
| 3602 | 3809 | ||
| 3603 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); | 3810 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); |
| 3811 | INIT_WORK(&adapter->db_full_task, process_db_full); | ||
| 3812 | INIT_WORK(&adapter->db_drop_task, process_db_drop); | ||
| 3604 | 3813 | ||
| 3605 | err = t4_prep_adapter(adapter); | 3814 | err = t4_prep_adapter(adapter); |
| 3606 | if (err) | 3815 | if (err) |
| @@ -3788,6 +3997,10 @@ static int __init cxgb4_init_module(void) | |||
| 3788 | { | 3997 | { |
| 3789 | int ret; | 3998 | int ret; |
| 3790 | 3999 | ||
| 4000 | workq = create_singlethread_workqueue("cxgb4"); | ||
| 4001 | if (!workq) | ||
| 4002 | return -ENOMEM; | ||
| 4003 | |||
| 3791 | /* Debugfs support is optional, just warn if this fails */ | 4004 | /* Debugfs support is optional, just warn if this fails */ |
| 3792 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 4005 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
| 3793 | if (!cxgb4_debugfs_root) | 4006 | if (!cxgb4_debugfs_root) |
| @@ -3803,6 +4016,8 @@ static void __exit cxgb4_cleanup_module(void) | |||
| 3803 | { | 4016 | { |
| 3804 | pci_unregister_driver(&cxgb4_driver); | 4017 | pci_unregister_driver(&cxgb4_driver); |
| 3805 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ | 4018 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ |
| 4019 | flush_workqueue(workq); | ||
| 4020 | destroy_workqueue(workq); | ||
| 3806 | } | 4021 | } |
| 3807 | 4022 | ||
| 3808 | module_init(cxgb4_init_module); | 4023 | module_init(cxgb4_init_module); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index b1d39b8d141a..d79980c5fc63 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
| @@ -163,6 +163,12 @@ enum cxgb4_state { | |||
| 163 | CXGB4_STATE_DETACH | 163 | CXGB4_STATE_DETACH |
| 164 | }; | 164 | }; |
| 165 | 165 | ||
| 166 | enum cxgb4_control { | ||
| 167 | CXGB4_CONTROL_DB_FULL, | ||
| 168 | CXGB4_CONTROL_DB_EMPTY, | ||
| 169 | CXGB4_CONTROL_DB_DROP, | ||
| 170 | }; | ||
| 171 | |||
| 166 | struct pci_dev; | 172 | struct pci_dev; |
| 167 | struct l2t_data; | 173 | struct l2t_data; |
| 168 | struct net_device; | 174 | struct net_device; |
| @@ -212,6 +218,7 @@ struct cxgb4_lld_info { | |||
| 212 | unsigned short ucq_density; /* # of user CQs/page */ | 218 | unsigned short ucq_density; /* # of user CQs/page */ |
| 213 | void __iomem *gts_reg; /* address of GTS register */ | 219 | void __iomem *gts_reg; /* address of GTS register */ |
| 214 | void __iomem *db_reg; /* address of kernel doorbell */ | 220 | void __iomem *db_reg; /* address of kernel doorbell */ |
| 221 | int dbfifo_int_thresh; /* doorbell fifo int threshold */ | ||
| 215 | }; | 222 | }; |
| 216 | 223 | ||
| 217 | struct cxgb4_uld_info { | 224 | struct cxgb4_uld_info { |
| @@ -220,11 +227,13 @@ struct cxgb4_uld_info { | |||
| 220 | int (*rx_handler)(void *handle, const __be64 *rsp, | 227 | int (*rx_handler)(void *handle, const __be64 *rsp, |
| 221 | const struct pkt_gl *gl); | 228 | const struct pkt_gl *gl); |
| 222 | int (*state_change)(void *handle, enum cxgb4_state new_state); | 229 | int (*state_change)(void *handle, enum cxgb4_state new_state); |
| 230 | int (*control)(void *handle, enum cxgb4_control control, ...); | ||
| 223 | }; | 231 | }; |
| 224 | 232 | ||
| 225 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); | 233 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); |
| 226 | int cxgb4_unregister_uld(enum cxgb4_uld type); | 234 | int cxgb4_unregister_uld(enum cxgb4_uld type); |
| 227 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); | 235 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); |
| 236 | unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); | ||
| 228 | unsigned int cxgb4_port_chan(const struct net_device *dev); | 237 | unsigned int cxgb4_port_chan(const struct net_device *dev); |
| 229 | unsigned int cxgb4_port_viid(const struct net_device *dev); | 238 | unsigned int cxgb4_port_viid(const struct net_device *dev); |
| 230 | unsigned int cxgb4_port_idx(const struct net_device *dev); | 239 | unsigned int cxgb4_port_idx(const struct net_device *dev); |
| @@ -236,4 +245,6 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | |||
| 236 | const unsigned int *pgsz_order); | 245 | const unsigned int *pgsz_order); |
| 237 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, | 246 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, |
| 238 | unsigned int skb_len, unsigned int pull_len); | 247 | unsigned int skb_len, unsigned int pull_len); |
| 248 | int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); | ||
| 249 | int cxgb4_flush_eq_cache(struct net_device *dev); | ||
| 239 | #endif /* !__CXGB4_OFLD_H */ | 250 | #endif /* !__CXGB4_OFLD_H */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 2dae7959f000..e111d974afd8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -767,8 +767,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | |||
| 767 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | 767 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) |
| 768 | { | 768 | { |
| 769 | wmb(); /* write descriptors before telling HW */ | 769 | wmb(); /* write descriptors before telling HW */ |
| 770 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), | 770 | spin_lock(&q->db_lock); |
| 771 | QID(q->cntxt_id) | PIDX(n)); | 771 | if (!q->db_disabled) { |
| 772 | t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), | ||
| 773 | V_QID(q->cntxt_id) | V_PIDX(n)); | ||
| 774 | } | ||
| 775 | q->db_pidx = q->pidx; | ||
| 776 | spin_unlock(&q->db_lock); | ||
| 772 | } | 777 | } |
| 773 | 778 | ||
| 774 | /** | 779 | /** |
| @@ -2081,6 +2086,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) | |||
| 2081 | q->stops = q->restarts = 0; | 2086 | q->stops = q->restarts = 0; |
| 2082 | q->stat = (void *)&q->desc[q->size]; | 2087 | q->stat = (void *)&q->desc[q->size]; |
| 2083 | q->cntxt_id = id; | 2088 | q->cntxt_id = id; |
| 2089 | spin_lock_init(&q->db_lock); | ||
| 2084 | adap->sge.egr_map[id - adap->sge.egr_start] = q; | 2090 | adap->sge.egr_map[id - adap->sge.egr_start] = q; |
| 2085 | } | 2091 | } |
| 2086 | 2092 | ||
| @@ -2415,6 +2421,18 @@ void t4_sge_init(struct adapter *adap) | |||
| 2415 | RXPKTCPLMODE | | 2421 | RXPKTCPLMODE | |
| 2416 | (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); | 2422 | (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); |
| 2417 | 2423 | ||
| 2424 | /* | ||
| 2425 | * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows | ||
| 2426 | * and generate an interrupt when this occurs so we can recover. | ||
| 2427 | */ | ||
| 2428 | t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, | ||
| 2429 | V_HP_INT_THRESH(M_HP_INT_THRESH) | | ||
| 2430 | V_LP_INT_THRESH(M_LP_INT_THRESH), | ||
| 2431 | V_HP_INT_THRESH(dbfifo_int_thresh) | | ||
| 2432 | V_LP_INT_THRESH(dbfifo_int_thresh)); | ||
| 2433 | t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, | ||
| 2434 | F_ENABLE_DROP); | ||
| 2435 | |||
| 2418 | for (i = v = 0; i < 32; i += 4) | 2436 | for (i = v = 0; i < 32; i += 4) |
| 2419 | v |= (PAGE_SHIFT - 10) << i; | 2437 | v |= (PAGE_SHIFT - 10) << i; |
| 2420 | t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); | 2438 | t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index d1ec111aebd8..32e1dd566a14 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -868,11 +868,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) | |||
| 868 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | 868 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); |
| 869 | } | 869 | } |
| 870 | 870 | ||
| 871 | typedef void (*int_handler_t)(struct adapter *adap); | ||
| 872 | |||
| 871 | struct intr_info { | 873 | struct intr_info { |
| 872 | unsigned int mask; /* bits to check in interrupt status */ | 874 | unsigned int mask; /* bits to check in interrupt status */ |
| 873 | const char *msg; /* message to print or NULL */ | 875 | const char *msg; /* message to print or NULL */ |
| 874 | short stat_idx; /* stat counter to increment or -1 */ | 876 | short stat_idx; /* stat counter to increment or -1 */ |
| 875 | unsigned short fatal; /* whether the condition reported is fatal */ | 877 | unsigned short fatal; /* whether the condition reported is fatal */ |
| 878 | int_handler_t int_handler; /* platform-specific int handler */ | ||
| 876 | }; | 879 | }; |
| 877 | 880 | ||
| 878 | /** | 881 | /** |
| @@ -905,6 +908,8 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, | |||
| 905 | } else if (acts->msg && printk_ratelimit()) | 908 | } else if (acts->msg && printk_ratelimit()) |
| 906 | dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, | 909 | dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, |
| 907 | status & acts->mask); | 910 | status & acts->mask); |
| 911 | if (acts->int_handler) | ||
| 912 | acts->int_handler(adapter); | ||
| 908 | mask |= acts->mask; | 913 | mask |= acts->mask; |
| 909 | } | 914 | } |
| 910 | status &= mask; | 915 | status &= mask; |
| @@ -1013,7 +1018,9 @@ static void sge_intr_handler(struct adapter *adapter) | |||
| 1013 | { ERR_INVALID_CIDX_INC, | 1018 | { ERR_INVALID_CIDX_INC, |
| 1014 | "SGE GTS CIDX increment too large", -1, 0 }, | 1019 | "SGE GTS CIDX increment too large", -1, 0 }, |
| 1015 | { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, | 1020 | { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, |
| 1016 | { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, | 1021 | { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, |
| 1022 | { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, | ||
| 1023 | { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, | ||
| 1017 | { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, | 1024 | { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, |
| 1018 | "SGE IQID > 1023 received CPL for FL", -1, 0 }, | 1025 | "SGE IQID > 1023 received CPL for FL", -1, 0 }, |
| 1019 | { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, | 1026 | { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, |
| @@ -1034,10 +1041,10 @@ static void sge_intr_handler(struct adapter *adapter) | |||
| 1034 | }; | 1041 | }; |
| 1035 | 1042 | ||
| 1036 | v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | | 1043 | v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | |
| 1037 | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); | 1044 | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); |
| 1038 | if (v) { | 1045 | if (v) { |
| 1039 | dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", | 1046 | dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", |
| 1040 | (unsigned long long)v); | 1047 | (unsigned long long)v); |
| 1041 | t4_write_reg(adapter, SGE_INT_CAUSE1, v); | 1048 | t4_write_reg(adapter, SGE_INT_CAUSE1, v); |
| 1042 | t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); | 1049 | t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); |
| 1043 | } | 1050 | } |
| @@ -1513,6 +1520,7 @@ void t4_intr_enable(struct adapter *adapter) | |||
| 1513 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | | 1520 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | |
| 1514 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | | 1521 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | |
| 1515 | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | | 1522 | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | |
| 1523 | F_DBFIFO_HP_INT | F_DBFIFO_LP_INT | | ||
| 1516 | EGRESS_SIZE_ERR); | 1524 | EGRESS_SIZE_ERR); |
| 1517 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); | 1525 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); |
| 1518 | t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); | 1526 | t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); |
| @@ -1986,6 +1994,54 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, | |||
| 1986 | (var).retval_len16 = htonl(FW_LEN16(var)); \ | 1994 | (var).retval_len16 = htonl(FW_LEN16(var)); \ |
| 1987 | } while (0) | 1995 | } while (0) |
| 1988 | 1996 | ||
| 1997 | int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, | ||
| 1998 | u32 addr, u32 val) | ||
| 1999 | { | ||
| 2000 | struct fw_ldst_cmd c; | ||
| 2001 | |||
| 2002 | memset(&c, 0, sizeof(c)); | ||
| 2003 | c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | | ||
| 2004 | F_FW_CMD_WRITE | | ||
| 2005 | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); | ||
| 2006 | c.cycles_to_len16 = htonl(FW_LEN16(c)); | ||
| 2007 | c.u.addrval.addr = htonl(addr); | ||
| 2008 | c.u.addrval.val = htonl(val); | ||
| 2009 | |||
| 2010 | return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); | ||
| 2011 | } | ||
| 2012 | |||
| 2013 | /* | ||
| 2014 | * t4_mem_win_read_len - read memory through PCIE memory window | ||
| 2015 | * @adap: the adapter | ||
| 2016 | * @addr: address of first byte requested aligned on 32b. | ||
| 2017 | * @data: len bytes to hold the data read | ||
| 2018 | * @len: amount of data to read from window. Must be <= | ||
| 2019 | * MEMWIN0_APERATURE after adjusting for 16B alignment | ||
| 2020 | * requirements of the the memory window. | ||
| 2021 | * | ||
| 2022 | * Read len bytes of data from MC starting at @addr. | ||
| 2023 | */ | ||
| 2024 | int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) | ||
| 2025 | { | ||
| 2026 | int i; | ||
| 2027 | int off; | ||
| 2028 | |||
| 2029 | /* | ||
| 2030 | * Align on a 16B boundary. | ||
| 2031 | */ | ||
| 2032 | off = addr & 15; | ||
| 2033 | if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) | ||
| 2034 | return -EINVAL; | ||
| 2035 | |||
| 2036 | t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15); | ||
| 2037 | t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET); | ||
| 2038 | |||
| 2039 | for (i = 0; i < len; i += 4) | ||
| 2040 | *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); | ||
| 2041 | |||
| 2042 | return 0; | ||
| 2043 | } | ||
| 2044 | |||
| 1989 | /** | 2045 | /** |
| 1990 | * t4_mdio_rd - read a PHY register through MDIO | 2046 | * t4_mdio_rd - read a PHY register through MDIO |
| 1991 | * @adap: the adapter | 2047 | * @adap: the adapter |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 0adc5bcec7c4..111fc323f155 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
| @@ -190,6 +190,59 @@ | |||
| 190 | #define SGE_DEBUG_DATA_LOW 0x10d4 | 190 | #define SGE_DEBUG_DATA_LOW 0x10d4 |
| 191 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 | 191 | #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 |
| 192 | 192 | ||
| 193 | #define S_LP_INT_THRESH 12 | ||
| 194 | #define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) | ||
| 195 | #define S_HP_INT_THRESH 28 | ||
| 196 | #define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) | ||
| 197 | #define A_SGE_DBFIFO_STATUS 0x10a4 | ||
| 198 | |||
| 199 | #define S_ENABLE_DROP 13 | ||
| 200 | #define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) | ||
| 201 | #define F_ENABLE_DROP V_ENABLE_DROP(1U) | ||
| 202 | #define A_SGE_DOORBELL_CONTROL 0x10a8 | ||
| 203 | |||
| 204 | #define A_SGE_CTXT_CMD 0x11fc | ||
| 205 | #define A_SGE_DBQ_CTXT_BADDR 0x1084 | ||
| 206 | |||
| 207 | #define A_SGE_PF_KDOORBELL 0x0 | ||
| 208 | |||
| 209 | #define S_QID 15 | ||
| 210 | #define V_QID(x) ((x) << S_QID) | ||
| 211 | |||
| 212 | #define S_PIDX 0 | ||
| 213 | #define V_PIDX(x) ((x) << S_PIDX) | ||
| 214 | |||
| 215 | #define M_LP_COUNT 0x7ffU | ||
| 216 | #define S_LP_COUNT 0 | ||
| 217 | #define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) | ||
| 218 | |||
| 219 | #define M_HP_COUNT 0x7ffU | ||
| 220 | #define S_HP_COUNT 16 | ||
| 221 | #define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) | ||
| 222 | |||
| 223 | #define A_SGE_INT_ENABLE3 0x1040 | ||
| 224 | |||
| 225 | #define S_DBFIFO_HP_INT 8 | ||
| 226 | #define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT) | ||
| 227 | #define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U) | ||
| 228 | |||
| 229 | #define S_DBFIFO_LP_INT 7 | ||
| 230 | #define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT) | ||
| 231 | #define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U) | ||
| 232 | |||
| 233 | #define S_DROPPED_DB 0 | ||
| 234 | #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB) | ||
| 235 | #define F_DROPPED_DB V_DROPPED_DB(1U) | ||
| 236 | |||
| 237 | #define S_ERR_DROPPED_DB 18 | ||
| 238 | #define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) | ||
| 239 | #define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U) | ||
| 240 | |||
| 241 | #define A_PCIE_MEM_ACCESS_OFFSET 0x306c | ||
| 242 | |||
| 243 | #define M_HP_INT_THRESH 0xfU | ||
| 244 | #define M_LP_INT_THRESH 0xfU | ||
| 245 | |||
| 193 | #define PCIE_PF_CLI 0x44 | 246 | #define PCIE_PF_CLI 0x44 |
| 194 | #define PCIE_INT_CAUSE 0x3004 | 247 | #define PCIE_INT_CAUSE 0x3004 |
| 195 | #define UNXSPLCPLERR 0x20000000U | 248 | #define UNXSPLCPLERR 0x20000000U |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index edcfd7ec7802..ad53f796b574 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
| @@ -1620,4 +1620,19 @@ struct fw_hdr { | |||
| 1620 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) | 1620 | #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) |
| 1621 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) | 1621 | #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) |
| 1622 | #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) | 1622 | #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) |
| 1623 | |||
| 1624 | #define S_FW_CMD_OP 24 | ||
| 1625 | #define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) | ||
| 1626 | |||
| 1627 | #define S_FW_CMD_REQUEST 23 | ||
| 1628 | #define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) | ||
| 1629 | #define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) | ||
| 1630 | |||
| 1631 | #define S_FW_CMD_WRITE 21 | ||
| 1632 | #define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) | ||
| 1633 | #define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) | ||
| 1634 | |||
| 1635 | #define S_FW_LDST_CMD_ADDRSPACE 0 | ||
| 1636 | #define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) | ||
| 1637 | |||
| 1623 | #endif /* _T4FW_INTERFACE_H_ */ | 1638 | #endif /* _T4FW_INTERFACE_H_ */ |
diff --git a/drivers/net/ethernet/emulex/benet/Makefile b/drivers/net/ethernet/emulex/benet/Makefile index a60cd8051135..1a91b276940d 100644 --- a/drivers/net/ethernet/emulex/benet/Makefile +++ b/drivers/net/ethernet/emulex/benet/Makefile | |||
| @@ -4,4 +4,4 @@ | |||
| 4 | 4 | ||
| 5 | obj-$(CONFIG_BE2NET) += be2net.o | 5 | obj-$(CONFIG_BE2NET) += be2net.o |
| 6 | 6 | ||
| 7 | be2net-y := be_main.o be_cmds.o be_ethtool.o | 7 | be2net-y := be_main.o be_cmds.o be_ethtool.o be_roce.o |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index ff4eb8fe25d5..c5c4c0e83bd1 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/u64_stats_sync.h> | 32 | #include <linux/u64_stats_sync.h> |
| 33 | 33 | ||
| 34 | #include "be_hw.h" | 34 | #include "be_hw.h" |
| 35 | #include "be_roce.h" | ||
| 35 | 36 | ||
| 36 | #define DRV_VER "4.2.220u" | 37 | #define DRV_VER "4.2.220u" |
| 37 | #define DRV_NAME "be2net" | 38 | #define DRV_NAME "be2net" |
| @@ -102,7 +103,8 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
| 102 | #define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ | 103 | #define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ |
| 103 | 104 | ||
| 104 | #define MAX_TX_QS 8 | 105 | #define MAX_TX_QS 8 |
| 105 | #define MAX_MSIX_VECTORS MAX_RSS_QS | 106 | #define MAX_ROCE_EQS 5 |
| 107 | #define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */ | ||
| 106 | #define BE_TX_BUDGET 256 | 108 | #define BE_TX_BUDGET 256 |
| 107 | #define BE_NAPI_WEIGHT 64 | 109 | #define BE_NAPI_WEIGHT 64 |
| 108 | #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ | 110 | #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ |
| @@ -405,6 +407,17 @@ struct be_adapter { | |||
| 405 | u32 tx_fc; /* Tx flow control */ | 407 | u32 tx_fc; /* Tx flow control */ |
| 406 | bool stats_cmd_sent; | 408 | bool stats_cmd_sent; |
| 407 | u8 generation; /* BladeEngine ASIC generation */ | 409 | u8 generation; /* BladeEngine ASIC generation */ |
| 410 | u32 if_type; | ||
| 411 | struct { | ||
| 412 | u8 __iomem *base; /* Door Bell */ | ||
| 413 | u32 size; | ||
| 414 | u32 total_size; | ||
| 415 | u64 io_addr; | ||
| 416 | } roce_db; | ||
| 417 | u32 num_msix_roce_vec; | ||
| 418 | struct ocrdma_dev *ocrdma_dev; | ||
| 419 | struct list_head entry; | ||
| 420 | |||
| 408 | u32 flash_status; | 421 | u32 flash_status; |
| 409 | struct completion flash_compl; | 422 | struct completion flash_compl; |
| 410 | 423 | ||
| @@ -441,6 +454,10 @@ struct be_adapter { | |||
| 441 | #define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \ | 454 | #define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \ |
| 442 | (adapter->pdev->device == OC_DEVICE_ID4)) | 455 | (adapter->pdev->device == OC_DEVICE_ID4)) |
| 443 | 456 | ||
| 457 | #define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \ | ||
| 458 | adapter->sli_family == SKYHAWK_SLI_FAMILY) && \ | ||
| 459 | (adapter->function_mode & RDMA_ENABLED)) | ||
| 460 | |||
| 444 | extern const struct ethtool_ops be_ethtool_ops; | 461 | extern const struct ethtool_ops be_ethtool_ops; |
| 445 | 462 | ||
| 446 | #define msix_enabled(adapter) (adapter->num_msix_vec > 0) | 463 | #define msix_enabled(adapter) (adapter->num_msix_vec > 0) |
| @@ -597,6 +614,12 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter) | |||
| 597 | } | 614 | } |
| 598 | } | 615 | } |
| 599 | 616 | ||
| 617 | static inline bool be_type_2_3(struct be_adapter *adapter) | ||
| 618 | { | ||
| 619 | return (adapter->if_type == SLI_INTF_TYPE_2 || | ||
| 620 | adapter->if_type == SLI_INTF_TYPE_3) ? true : false; | ||
| 621 | } | ||
| 622 | |||
| 600 | extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, | 623 | extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, |
| 601 | u16 num_popped); | 624 | u16 num_popped); |
| 602 | extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); | 625 | extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); |
| @@ -606,4 +629,16 @@ extern bool be_is_wol_supported(struct be_adapter *adapter); | |||
| 606 | extern bool be_pause_supported(struct be_adapter *adapter); | 629 | extern bool be_pause_supported(struct be_adapter *adapter); |
| 607 | extern u32 be_get_fw_log_level(struct be_adapter *adapter); | 630 | extern u32 be_get_fw_log_level(struct be_adapter *adapter); |
| 608 | 631 | ||
| 632 | /* | ||
| 633 | * internal function to initialize-cleanup roce device. | ||
| 634 | */ | ||
| 635 | extern void be_roce_dev_add(struct be_adapter *); | ||
| 636 | extern void be_roce_dev_remove(struct be_adapter *); | ||
| 637 | |||
| 638 | /* | ||
| 639 | * internal function to open-close roce device during ifup-ifdown. | ||
| 640 | */ | ||
| 641 | extern void be_roce_dev_open(struct be_adapter *); | ||
| 642 | extern void be_roce_dev_close(struct be_adapter *); | ||
| 643 | |||
| 609 | #endif /* BE_H */ | 644 | #endif /* BE_H */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index b24623cce07b..8d06ea381741 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | * Costa Mesa, CA 92626 | 15 | * Costa Mesa, CA 92626 |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <linux/module.h> | ||
| 18 | #include "be.h" | 19 | #include "be.h" |
| 19 | #include "be_cmds.h" | 20 | #include "be_cmds.h" |
| 20 | 21 | ||
| @@ -2646,3 +2647,41 @@ err: | |||
| 2646 | spin_unlock_bh(&adapter->mcc_lock); | 2647 | spin_unlock_bh(&adapter->mcc_lock); |
| 2647 | return status; | 2648 | return status; |
| 2648 | } | 2649 | } |
| 2650 | |||
| 2651 | int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, | ||
| 2652 | int wrb_payload_size, u16 *cmd_status, u16 *ext_status) | ||
| 2653 | { | ||
| 2654 | struct be_adapter *adapter = netdev_priv(netdev_handle); | ||
| 2655 | struct be_mcc_wrb *wrb; | ||
| 2656 | struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload; | ||
| 2657 | struct be_cmd_req_hdr *req; | ||
| 2658 | struct be_cmd_resp_hdr *resp; | ||
| 2659 | int status; | ||
| 2660 | |||
| 2661 | spin_lock_bh(&adapter->mcc_lock); | ||
| 2662 | |||
| 2663 | wrb = wrb_from_mccq(adapter); | ||
| 2664 | if (!wrb) { | ||
| 2665 | status = -EBUSY; | ||
| 2666 | goto err; | ||
| 2667 | } | ||
| 2668 | req = embedded_payload(wrb); | ||
| 2669 | resp = embedded_payload(wrb); | ||
| 2670 | |||
| 2671 | be_wrb_cmd_hdr_prepare(req, hdr->subsystem, | ||
| 2672 | hdr->opcode, wrb_payload_size, wrb, NULL); | ||
| 2673 | memcpy(req, wrb_payload, wrb_payload_size); | ||
| 2674 | be_dws_cpu_to_le(req, wrb_payload_size); | ||
| 2675 | |||
| 2676 | status = be_mcc_notify_wait(adapter); | ||
| 2677 | if (cmd_status) | ||
| 2678 | *cmd_status = (status & 0xffff); | ||
| 2679 | if (ext_status) | ||
| 2680 | *ext_status = 0; | ||
| 2681 | memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); | ||
| 2682 | be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); | ||
| 2683 | err: | ||
| 2684 | spin_unlock_bh(&adapter->mcc_lock); | ||
| 2685 | return status; | ||
| 2686 | } | ||
| 2687 | EXPORT_SYMBOL(be_roce_mcc_cmd); | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 0b1029b60f69..9625bf420c16 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
| @@ -1062,6 +1062,7 @@ struct be_cmd_resp_modify_eq_delay { | |||
| 1062 | /* The HW can come up in either of the following multi-channel modes | 1062 | /* The HW can come up in either of the following multi-channel modes |
| 1063 | * based on the skew/IPL. | 1063 | * based on the skew/IPL. |
| 1064 | */ | 1064 | */ |
| 1065 | #define RDMA_ENABLED 0x4 | ||
| 1065 | #define FLEX10_MODE 0x400 | 1066 | #define FLEX10_MODE 0x400 |
| 1066 | #define VNIC_MODE 0x20000 | 1067 | #define VNIC_MODE 0x20000 |
| 1067 | #define UMC_ENABLED 0x1000000 | 1068 | #define UMC_ENABLED 0x1000000 |
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index f38b58c8dbba..d9fb0c501fa1 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h | |||
| @@ -100,11 +100,13 @@ | |||
| 100 | #define SLI_INTF_REV_SHIFT 4 | 100 | #define SLI_INTF_REV_SHIFT 4 |
| 101 | #define SLI_INTF_FT_MASK 0x00000001 | 101 | #define SLI_INTF_FT_MASK 0x00000001 |
| 102 | 102 | ||
| 103 | #define SLI_INTF_TYPE_2 2 | ||
| 104 | #define SLI_INTF_TYPE_3 3 | ||
| 103 | 105 | ||
| 104 | /* SLI family */ | 106 | /* SLI family */ |
| 105 | #define BE_SLI_FAMILY 0x0 | 107 | #define BE_SLI_FAMILY 0x0 |
| 106 | #define LANCER_A0_SLI_FAMILY 0xA | 108 | #define LANCER_A0_SLI_FAMILY 0xA |
| 107 | 109 | #define SKYHAWK_SLI_FAMILY 0x2 | |
| 108 | 110 | ||
| 109 | /********* ISR0 Register offset **********/ | 111 | /********* ISR0 Register offset **********/ |
| 110 | #define CEV_ISR0_OFFSET 0xC18 | 112 | #define CEV_ISR0_OFFSET 0xC18 |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 081c77701168..08efd308d78a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -2151,10 +2151,17 @@ static uint be_num_rss_want(struct be_adapter *adapter) | |||
| 2151 | static void be_msix_enable(struct be_adapter *adapter) | 2151 | static void be_msix_enable(struct be_adapter *adapter) |
| 2152 | { | 2152 | { |
| 2153 | #define BE_MIN_MSIX_VECTORS 1 | 2153 | #define BE_MIN_MSIX_VECTORS 1 |
| 2154 | int i, status, num_vec; | 2154 | int i, status, num_vec, num_roce_vec = 0; |
| 2155 | 2155 | ||
| 2156 | /* If RSS queues are not used, need a vec for default RX Q */ | 2156 | /* If RSS queues are not used, need a vec for default RX Q */ |
| 2157 | num_vec = min(be_num_rss_want(adapter), num_online_cpus()); | 2157 | num_vec = min(be_num_rss_want(adapter), num_online_cpus()); |
| 2158 | if (be_roce_supported(adapter)) { | ||
| 2159 | num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS, | ||
| 2160 | (num_online_cpus() + 1)); | ||
| 2161 | num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS); | ||
| 2162 | num_vec += num_roce_vec; | ||
| 2163 | num_vec = min(num_vec, MAX_MSIX_VECTORS); | ||
| 2164 | } | ||
| 2158 | num_vec = max(num_vec, BE_MIN_MSIX_VECTORS); | 2165 | num_vec = max(num_vec, BE_MIN_MSIX_VECTORS); |
| 2159 | 2166 | ||
| 2160 | for (i = 0; i < num_vec; i++) | 2167 | for (i = 0; i < num_vec; i++) |
| @@ -2171,7 +2178,17 @@ static void be_msix_enable(struct be_adapter *adapter) | |||
| 2171 | } | 2178 | } |
| 2172 | return; | 2179 | return; |
| 2173 | done: | 2180 | done: |
| 2174 | adapter->num_msix_vec = num_vec; | 2181 | if (be_roce_supported(adapter)) { |
| 2182 | if (num_vec > num_roce_vec) { | ||
| 2183 | adapter->num_msix_vec = num_vec - num_roce_vec; | ||
| 2184 | adapter->num_msix_roce_vec = | ||
| 2185 | num_vec - adapter->num_msix_vec; | ||
| 2186 | } else { | ||
| 2187 | adapter->num_msix_vec = num_vec; | ||
| 2188 | adapter->num_msix_roce_vec = 0; | ||
| 2189 | } | ||
| 2190 | } else | ||
| 2191 | adapter->num_msix_vec = num_vec; | ||
| 2175 | return; | 2192 | return; |
| 2176 | } | 2193 | } |
| 2177 | 2194 | ||
| @@ -2283,6 +2300,8 @@ static int be_close(struct net_device *netdev) | |||
| 2283 | struct be_eq_obj *eqo; | 2300 | struct be_eq_obj *eqo; |
| 2284 | int i; | 2301 | int i; |
| 2285 | 2302 | ||
| 2303 | be_roce_dev_close(adapter); | ||
| 2304 | |||
| 2286 | be_async_mcc_disable(adapter); | 2305 | be_async_mcc_disable(adapter); |
| 2287 | 2306 | ||
| 2288 | if (!lancer_chip(adapter)) | 2307 | if (!lancer_chip(adapter)) |
| @@ -2391,6 +2410,7 @@ static int be_open(struct net_device *netdev) | |||
| 2391 | if (!status) | 2410 | if (!status) |
| 2392 | be_link_status_update(adapter, link_status); | 2411 | be_link_status_update(adapter, link_status); |
| 2393 | 2412 | ||
| 2413 | be_roce_dev_open(adapter); | ||
| 2394 | return 0; | 2414 | return 0; |
| 2395 | err: | 2415 | err: |
| 2396 | be_close(adapter->netdev); | 2416 | be_close(adapter->netdev); |
| @@ -3232,6 +3252,24 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) | |||
| 3232 | iounmap(adapter->csr); | 3252 | iounmap(adapter->csr); |
| 3233 | if (adapter->db) | 3253 | if (adapter->db) |
| 3234 | iounmap(adapter->db); | 3254 | iounmap(adapter->db); |
| 3255 | if (adapter->roce_db.base) | ||
| 3256 | pci_iounmap(adapter->pdev, adapter->roce_db.base); | ||
| 3257 | } | ||
| 3258 | |||
| 3259 | static int lancer_roce_map_pci_bars(struct be_adapter *adapter) | ||
| 3260 | { | ||
| 3261 | struct pci_dev *pdev = adapter->pdev; | ||
| 3262 | u8 __iomem *addr; | ||
| 3263 | |||
| 3264 | addr = pci_iomap(pdev, 2, 0); | ||
| 3265 | if (addr == NULL) | ||
| 3266 | return -ENOMEM; | ||
| 3267 | |||
| 3268 | adapter->roce_db.base = addr; | ||
| 3269 | adapter->roce_db.io_addr = pci_resource_start(pdev, 2); | ||
| 3270 | adapter->roce_db.size = 8192; | ||
| 3271 | adapter->roce_db.total_size = pci_resource_len(pdev, 2); | ||
| 3272 | return 0; | ||
| 3235 | } | 3273 | } |
| 3236 | 3274 | ||
| 3237 | static int be_map_pci_bars(struct be_adapter *adapter) | 3275 | static int be_map_pci_bars(struct be_adapter *adapter) |
| @@ -3240,11 +3278,18 @@ static int be_map_pci_bars(struct be_adapter *adapter) | |||
| 3240 | int db_reg; | 3278 | int db_reg; |
| 3241 | 3279 | ||
| 3242 | if (lancer_chip(adapter)) { | 3280 | if (lancer_chip(adapter)) { |
| 3243 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0), | 3281 | if (be_type_2_3(adapter)) { |
| 3244 | pci_resource_len(adapter->pdev, 0)); | 3282 | addr = ioremap_nocache( |
| 3245 | if (addr == NULL) | 3283 | pci_resource_start(adapter->pdev, 0), |
| 3246 | return -ENOMEM; | 3284 | pci_resource_len(adapter->pdev, 0)); |
| 3247 | adapter->db = addr; | 3285 | if (addr == NULL) |
| 3286 | return -ENOMEM; | ||
| 3287 | adapter->db = addr; | ||
| 3288 | } | ||
| 3289 | if (adapter->if_type == SLI_INTF_TYPE_3) { | ||
| 3290 | if (lancer_roce_map_pci_bars(adapter)) | ||
| 3291 | goto pci_map_err; | ||
| 3292 | } | ||
| 3248 | return 0; | 3293 | return 0; |
| 3249 | } | 3294 | } |
| 3250 | 3295 | ||
| @@ -3269,14 +3314,19 @@ static int be_map_pci_bars(struct be_adapter *adapter) | |||
| 3269 | if (addr == NULL) | 3314 | if (addr == NULL) |
| 3270 | goto pci_map_err; | 3315 | goto pci_map_err; |
| 3271 | adapter->db = addr; | 3316 | adapter->db = addr; |
| 3272 | 3317 | if (adapter->sli_family == SKYHAWK_SLI_FAMILY) { | |
| 3318 | adapter->roce_db.size = 4096; | ||
| 3319 | adapter->roce_db.io_addr = | ||
| 3320 | pci_resource_start(adapter->pdev, db_reg); | ||
| 3321 | adapter->roce_db.total_size = | ||
| 3322 | pci_resource_len(adapter->pdev, db_reg); | ||
| 3323 | } | ||
| 3273 | return 0; | 3324 | return 0; |
| 3274 | pci_map_err: | 3325 | pci_map_err: |
| 3275 | be_unmap_pci_bars(adapter); | 3326 | be_unmap_pci_bars(adapter); |
| 3276 | return -ENOMEM; | 3327 | return -ENOMEM; |
| 3277 | } | 3328 | } |
| 3278 | 3329 | ||
| 3279 | |||
| 3280 | static void be_ctrl_cleanup(struct be_adapter *adapter) | 3330 | static void be_ctrl_cleanup(struct be_adapter *adapter) |
| 3281 | { | 3331 | { |
| 3282 | struct be_dma_mem *mem = &adapter->mbox_mem_alloced; | 3332 | struct be_dma_mem *mem = &adapter->mbox_mem_alloced; |
| @@ -3382,6 +3432,8 @@ static void __devexit be_remove(struct pci_dev *pdev) | |||
| 3382 | if (!adapter) | 3432 | if (!adapter) |
| 3383 | return; | 3433 | return; |
| 3384 | 3434 | ||
| 3435 | be_roce_dev_remove(adapter); | ||
| 3436 | |||
| 3385 | unregister_netdev(adapter->netdev); | 3437 | unregister_netdev(adapter->netdev); |
| 3386 | 3438 | ||
| 3387 | be_clear(adapter); | 3439 | be_clear(adapter); |
| @@ -3495,17 +3547,27 @@ static int be_dev_type_check(struct be_adapter *adapter) | |||
| 3495 | break; | 3547 | break; |
| 3496 | case BE_DEVICE_ID2: | 3548 | case BE_DEVICE_ID2: |
| 3497 | case OC_DEVICE_ID2: | 3549 | case OC_DEVICE_ID2: |
| 3498 | case OC_DEVICE_ID5: | ||
| 3499 | adapter->generation = BE_GEN3; | 3550 | adapter->generation = BE_GEN3; |
| 3500 | break; | 3551 | break; |
| 3501 | case OC_DEVICE_ID3: | 3552 | case OC_DEVICE_ID3: |
| 3502 | case OC_DEVICE_ID4: | 3553 | case OC_DEVICE_ID4: |
| 3503 | pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf); | 3554 | pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf); |
| 3555 | adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> | ||
| 3556 | SLI_INTF_IF_TYPE_SHIFT; | ||
| 3504 | if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> | 3557 | if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> |
| 3505 | SLI_INTF_IF_TYPE_SHIFT; | 3558 | SLI_INTF_IF_TYPE_SHIFT; |
| 3506 | |||
| 3507 | if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) || | 3559 | if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) || |
| 3508 | if_type != 0x02) { | 3560 | !be_type_2_3(adapter)) { |
| 3561 | dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n"); | ||
| 3562 | return -EINVAL; | ||
| 3563 | } | ||
| 3564 | adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >> | ||
| 3565 | SLI_INTF_FAMILY_SHIFT); | ||
| 3566 | adapter->generation = BE_GEN3; | ||
| 3567 | break; | ||
| 3568 | case OC_DEVICE_ID5: | ||
| 3569 | pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf); | ||
| 3570 | if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) { | ||
| 3509 | dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n"); | 3571 | dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n"); |
| 3510 | return -EINVAL; | 3572 | return -EINVAL; |
| 3511 | } | 3573 | } |
| @@ -3774,6 +3836,8 @@ static int __devinit be_probe(struct pci_dev *pdev, | |||
| 3774 | if (status != 0) | 3836 | if (status != 0) |
| 3775 | goto unsetup; | 3837 | goto unsetup; |
| 3776 | 3838 | ||
| 3839 | be_roce_dev_add(adapter); | ||
| 3840 | |||
| 3777 | dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev), | 3841 | dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev), |
| 3778 | adapter->port_num); | 3842 | adapter->port_num); |
| 3779 | 3843 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c new file mode 100644 index 000000000000..deecc44b3617 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_roce.c | |||
| @@ -0,0 +1,182 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005 - 2011 Emulex | ||
| 3 | * All rights reserved. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or | ||
| 6 | * modify it under the terms of the GNU General Public License version 2 | ||
| 7 | * as published by the Free Software Foundation. The full GNU General | ||
| 8 | * Public License is included in this distribution in the file called COPYING. | ||
| 9 | * | ||
| 10 | * Contact Information: | ||
| 11 | * linux-drivers@emulex.com | ||
| 12 | * | ||
| 13 | * Emulex | ||
| 14 | * 3333 Susan Street | ||
| 15 | * Costa Mesa, CA 92626 | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/mutex.h> | ||
| 19 | #include <linux/list.h> | ||
| 20 | #include <linux/netdevice.h> | ||
| 21 | #include <linux/module.h> | ||
| 22 | |||
| 23 | #include "be.h" | ||
| 24 | #include "be_cmds.h" | ||
| 25 | |||
| 26 | static struct ocrdma_driver *ocrdma_drv; | ||
| 27 | static LIST_HEAD(be_adapter_list); | ||
| 28 | static DEFINE_MUTEX(be_adapter_list_lock); | ||
| 29 | |||
| 30 | static void _be_roce_dev_add(struct be_adapter *adapter) | ||
| 31 | { | ||
| 32 | struct be_dev_info dev_info; | ||
| 33 | int i, num_vec; | ||
| 34 | struct pci_dev *pdev = adapter->pdev; | ||
| 35 | |||
| 36 | if (!ocrdma_drv) | ||
| 37 | return; | ||
| 38 | if (pdev->device == OC_DEVICE_ID5) { | ||
| 39 | /* only msix is supported on these devices */ | ||
| 40 | if (!msix_enabled(adapter)) | ||
| 41 | return; | ||
| 42 | /* DPP region address and length */ | ||
| 43 | dev_info.dpp_unmapped_addr = pci_resource_start(pdev, 2); | ||
| 44 | dev_info.dpp_unmapped_len = pci_resource_len(pdev, 2); | ||
| 45 | } else { | ||
| 46 | dev_info.dpp_unmapped_addr = 0; | ||
| 47 | dev_info.dpp_unmapped_len = 0; | ||
| 48 | } | ||
| 49 | dev_info.pdev = adapter->pdev; | ||
| 50 | if (adapter->sli_family == SKYHAWK_SLI_FAMILY) | ||
| 51 | dev_info.db = adapter->db; | ||
| 52 | else | ||
| 53 | dev_info.db = adapter->roce_db.base; | ||
| 54 | dev_info.unmapped_db = adapter->roce_db.io_addr; | ||
| 55 | dev_info.db_page_size = adapter->roce_db.size; | ||
| 56 | dev_info.db_total_size = adapter->roce_db.total_size; | ||
| 57 | dev_info.netdev = adapter->netdev; | ||
| 58 | memcpy(dev_info.mac_addr, adapter->netdev->dev_addr, ETH_ALEN); | ||
| 59 | dev_info.dev_family = adapter->sli_family; | ||
| 60 | if (msix_enabled(adapter)) { | ||
| 61 | /* provide all the vectors, so that EQ creation response | ||
| 62 | * can decide which one to use. | ||
| 63 | */ | ||
| 64 | num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec; | ||
| 65 | dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX; | ||
| 66 | dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS); | ||
| 67 | /* provide start index of the vector, | ||
| 68 | * so in case of linear usage, | ||
| 69 | * it can use the base as starting point. | ||
| 70 | */ | ||
| 71 | dev_info.msix.start_vector = adapter->num_evt_qs; | ||
| 72 | for (i = 0; i < dev_info.msix.num_vectors; i++) { | ||
| 73 | dev_info.msix.vector_list[i] = | ||
| 74 | adapter->msix_entries[i].vector; | ||
| 75 | } | ||
| 76 | } else { | ||
| 77 | dev_info.msix.num_vectors = 0; | ||
| 78 | dev_info.intr_mode = BE_INTERRUPT_MODE_INTX; | ||
| 79 | } | ||
| 80 | adapter->ocrdma_dev = ocrdma_drv->add(&dev_info); | ||
| 81 | } | ||
| 82 | |||
| 83 | void be_roce_dev_add(struct be_adapter *adapter) | ||
| 84 | { | ||
| 85 | if (be_roce_supported(adapter)) { | ||
| 86 | INIT_LIST_HEAD(&adapter->entry); | ||
| 87 | mutex_lock(&be_adapter_list_lock); | ||
| 88 | list_add_tail(&adapter->entry, &be_adapter_list); | ||
| 89 | |||
| 90 | /* invoke add() routine of roce driver only if | ||
| 91 | * valid driver registered with add method and add() is not yet | ||
| 92 | * invoked on a given adapter. | ||
| 93 | */ | ||
| 94 | _be_roce_dev_add(adapter); | ||
| 95 | mutex_unlock(&be_adapter_list_lock); | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | void _be_roce_dev_remove(struct be_adapter *adapter) | ||
| 100 | { | ||
| 101 | if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev) | ||
| 102 | ocrdma_drv->remove(adapter->ocrdma_dev); | ||
| 103 | adapter->ocrdma_dev = NULL; | ||
| 104 | } | ||
| 105 | |||
| 106 | void be_roce_dev_remove(struct be_adapter *adapter) | ||
| 107 | { | ||
| 108 | if (be_roce_supported(adapter)) { | ||
| 109 | mutex_lock(&be_adapter_list_lock); | ||
| 110 | _be_roce_dev_remove(adapter); | ||
| 111 | list_del(&adapter->entry); | ||
| 112 | mutex_unlock(&be_adapter_list_lock); | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 116 | void _be_roce_dev_open(struct be_adapter *adapter) | ||
| 117 | { | ||
| 118 | if (ocrdma_drv && adapter->ocrdma_dev && | ||
| 119 | ocrdma_drv->state_change_handler) | ||
| 120 | ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0); | ||
| 121 | } | ||
| 122 | |||
| 123 | void be_roce_dev_open(struct be_adapter *adapter) | ||
| 124 | { | ||
| 125 | if (be_roce_supported(adapter)) { | ||
| 126 | mutex_lock(&be_adapter_list_lock); | ||
| 127 | _be_roce_dev_open(adapter); | ||
| 128 | mutex_unlock(&be_adapter_list_lock); | ||
| 129 | } | ||
| 130 | } | ||
| 131 | |||
| 132 | void _be_roce_dev_close(struct be_adapter *adapter) | ||
| 133 | { | ||
| 134 | if (ocrdma_drv && adapter->ocrdma_dev && | ||
| 135 | ocrdma_drv->state_change_handler) | ||
| 136 | ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1); | ||
| 137 | } | ||
| 138 | |||
| 139 | void be_roce_dev_close(struct be_adapter *adapter) | ||
| 140 | { | ||
| 141 | if (be_roce_supported(adapter)) { | ||
| 142 | mutex_lock(&be_adapter_list_lock); | ||
| 143 | _be_roce_dev_close(adapter); | ||
| 144 | mutex_unlock(&be_adapter_list_lock); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | int be_roce_register_driver(struct ocrdma_driver *drv) | ||
| 149 | { | ||
| 150 | struct be_adapter *dev; | ||
| 151 | |||
| 152 | mutex_lock(&be_adapter_list_lock); | ||
| 153 | if (ocrdma_drv) { | ||
| 154 | mutex_unlock(&be_adapter_list_lock); | ||
| 155 | return -EINVAL; | ||
| 156 | } | ||
| 157 | ocrdma_drv = drv; | ||
| 158 | list_for_each_entry(dev, &be_adapter_list, entry) { | ||
| 159 | struct net_device *netdev; | ||
| 160 | _be_roce_dev_add(dev); | ||
| 161 | netdev = dev->netdev; | ||
| 162 | if (netif_running(netdev) && netif_oper_up(netdev)) | ||
| 163 | _be_roce_dev_open(dev); | ||
| 164 | } | ||
| 165 | mutex_unlock(&be_adapter_list_lock); | ||
| 166 | return 0; | ||
| 167 | } | ||
| 168 | EXPORT_SYMBOL(be_roce_register_driver); | ||
| 169 | |||
| 170 | void be_roce_unregister_driver(struct ocrdma_driver *drv) | ||
| 171 | { | ||
| 172 | struct be_adapter *dev; | ||
| 173 | |||
| 174 | mutex_lock(&be_adapter_list_lock); | ||
| 175 | list_for_each_entry(dev, &be_adapter_list, entry) { | ||
| 176 | if (dev->ocrdma_dev) | ||
| 177 | _be_roce_dev_remove(dev); | ||
| 178 | } | ||
| 179 | ocrdma_drv = NULL; | ||
| 180 | mutex_unlock(&be_adapter_list_lock); | ||
| 181 | } | ||
| 182 | EXPORT_SYMBOL(be_roce_unregister_driver); | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h new file mode 100644 index 000000000000..db4ea8081c07 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_roce.h | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005 - 2011 Emulex | ||
| 3 | * All rights reserved. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or | ||
| 6 | * modify it under the terms of the GNU General Public License version 2 | ||
| 7 | * as published by the Free Software Foundation. The full GNU General | ||
| 8 | * Public License is included in this distribution in the file called COPYING. | ||
| 9 | * | ||
| 10 | * Contact Information: | ||
| 11 | * linux-drivers@emulex.com | ||
| 12 | * | ||
| 13 | * Emulex | ||
| 14 | * 3333 Susan Street | ||
| 15 | * Costa Mesa, CA 92626 | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef BE_ROCE_H | ||
| 19 | #define BE_ROCE_H | ||
| 20 | |||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/netdevice.h> | ||
| 23 | |||
| 24 | struct ocrdma_dev; | ||
| 25 | |||
| 26 | enum be_interrupt_mode { | ||
| 27 | BE_INTERRUPT_MODE_MSIX = 0, | ||
| 28 | BE_INTERRUPT_MODE_INTX = 1, | ||
| 29 | BE_INTERRUPT_MODE_MSI = 2, | ||
| 30 | }; | ||
| 31 | |||
| 32 | #define MAX_ROCE_MSIX_VECTORS 16 | ||
| 33 | struct be_dev_info { | ||
| 34 | u8 __iomem *db; | ||
| 35 | u64 unmapped_db; | ||
| 36 | u32 db_page_size; | ||
| 37 | u32 db_total_size; | ||
| 38 | u64 dpp_unmapped_addr; | ||
| 39 | u32 dpp_unmapped_len; | ||
| 40 | struct pci_dev *pdev; | ||
| 41 | struct net_device *netdev; | ||
| 42 | u8 mac_addr[ETH_ALEN]; | ||
| 43 | u32 dev_family; | ||
| 44 | enum be_interrupt_mode intr_mode; | ||
| 45 | struct { | ||
| 46 | int num_vectors; | ||
| 47 | int start_vector; | ||
| 48 | u32 vector_list[MAX_ROCE_MSIX_VECTORS]; | ||
| 49 | } msix; | ||
| 50 | }; | ||
| 51 | |||
| 52 | /* ocrdma driver register's the callback functions with nic driver. */ | ||
| 53 | struct ocrdma_driver { | ||
| 54 | unsigned char name[32]; | ||
| 55 | struct ocrdma_dev *(*add) (struct be_dev_info *dev_info); | ||
| 56 | void (*remove) (struct ocrdma_dev *); | ||
| 57 | void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); | ||
| 58 | }; | ||
| 59 | |||
| 60 | enum { | ||
| 61 | BE_DEV_UP = 0, | ||
| 62 | BE_DEV_DOWN = 1 | ||
| 63 | }; | ||
| 64 | |||
| 65 | /* APIs for RoCE driver to register callback handlers, | ||
| 66 | * which will be invoked when device is added, removed, ifup, ifdown | ||
| 67 | */ | ||
| 68 | int be_roce_register_driver(struct ocrdma_driver *drv); | ||
| 69 | void be_roce_unregister_driver(struct ocrdma_driver *drv); | ||
| 70 | |||
| 71 | /* API for RoCE driver to issue mailbox commands */ | ||
| 72 | int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, | ||
| 73 | int wrb_payload_size, u16 *cmd_status, u16 *ext_status); | ||
| 74 | |||
| 75 | #endif /* BE_ROCE_H */ | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 8be20e7ea3d1..06fef5b44f77 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c | |||
| @@ -124,9 +124,6 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | |||
| 124 | 124 | ||
| 125 | spin_lock(&bitmap->lock); | 125 | spin_lock(&bitmap->lock); |
| 126 | bitmap_clear(bitmap->table, obj, cnt); | 126 | bitmap_clear(bitmap->table, obj, cnt); |
| 127 | bitmap->last = min(bitmap->last, obj); | ||
| 128 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | ||
| 129 | & bitmap->mask; | ||
| 130 | bitmap->avail += cnt; | 127 | bitmap->avail += cnt; |
| 131 | spin_unlock(&bitmap->lock); | 128 | spin_unlock(&bitmap->lock); |
| 132 | } | 129 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 24429a99190d..68f5cd6cb3c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -118,6 +118,20 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) | |||
| 118 | mlx4_dbg(dev, " %s\n", fname[i]); | 118 | mlx4_dbg(dev, " %s\n", fname[i]); |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | ||
| 122 | { | ||
| 123 | static const char * const fname[] = { | ||
| 124 | [0] = "RSS support", | ||
| 125 | [1] = "RSS Toeplitz Hash Function support", | ||
| 126 | [2] = "RSS XOR Hash Function support" | ||
| 127 | }; | ||
| 128 | int i; | ||
| 129 | |||
| 130 | for (i = 0; i < ARRAY_SIZE(fname); ++i) | ||
| 131 | if (fname[i] && (flags & (1LL << i))) | ||
| 132 | mlx4_dbg(dev, " %s\n", fname[i]); | ||
| 133 | } | ||
| 134 | |||
| 121 | int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) | 135 | int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) |
| 122 | { | 136 | { |
| 123 | struct mlx4_cmd_mailbox *mailbox; | 137 | struct mlx4_cmd_mailbox *mailbox; |
| @@ -346,6 +360,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 346 | #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 | 360 | #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 |
| 347 | #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b | 361 | #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b |
| 348 | #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d | 362 | #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d |
| 363 | #define QUERY_DEV_CAP_RSS_OFFSET 0x2e | ||
| 349 | #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f | 364 | #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f |
| 350 | #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 | 365 | #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 |
| 351 | #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 | 366 | #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 |
| @@ -390,6 +405,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 390 | #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 | 405 | #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 |
| 391 | #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 | 406 | #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 |
| 392 | 407 | ||
| 408 | dev_cap->flags2 = 0; | ||
| 393 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 409 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| 394 | if (IS_ERR(mailbox)) | 410 | if (IS_ERR(mailbox)) |
| 395 | return PTR_ERR(mailbox); | 411 | return PTR_ERR(mailbox); |
| @@ -439,6 +455,17 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 439 | else | 455 | else |
| 440 | dev_cap->max_gso_sz = 1 << field; | 456 | dev_cap->max_gso_sz = 1 << field; |
| 441 | 457 | ||
| 458 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); | ||
| 459 | if (field & 0x20) | ||
| 460 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; | ||
| 461 | if (field & 0x10) | ||
| 462 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; | ||
| 463 | field &= 0xf; | ||
| 464 | if (field) { | ||
| 465 | dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; | ||
| 466 | dev_cap->max_rss_tbl_sz = 1 << field; | ||
| 467 | } else | ||
| 468 | dev_cap->max_rss_tbl_sz = 0; | ||
| 442 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); | 469 | MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); |
| 443 | dev_cap->max_rdma_global = 1 << (field & 0x3f); | 470 | dev_cap->max_rdma_global = 1 << (field & 0x3f); |
| 444 | MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); | 471 | MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); |
| @@ -632,8 +659,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 632 | dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); | 659 | dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); |
| 633 | mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); | 660 | mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); |
| 634 | mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); | 661 | mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); |
| 662 | mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); | ||
| 635 | 663 | ||
| 636 | dump_dev_cap_flags(dev, dev_cap->flags); | 664 | dump_dev_cap_flags(dev, dev_cap->flags); |
| 665 | dump_dev_cap_flags2(dev, dev_cap->flags2); | ||
| 637 | 666 | ||
| 638 | out: | 667 | out: |
| 639 | mlx4_free_cmd_mailbox(dev, mailbox); | 668 | mlx4_free_cmd_mailbox(dev, mailbox); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index e1a5fa56bcbc..64c0399e4b78 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h | |||
| @@ -79,6 +79,7 @@ struct mlx4_dev_cap { | |||
| 79 | u64 trans_code[MLX4_MAX_PORTS + 1]; | 79 | u64 trans_code[MLX4_MAX_PORTS + 1]; |
| 80 | u16 stat_rate_support; | 80 | u16 stat_rate_support; |
| 81 | u64 flags; | 81 | u64 flags; |
| 82 | u64 flags2; | ||
| 82 | int reserved_uars; | 83 | int reserved_uars; |
| 83 | int uar_size; | 84 | int uar_size; |
| 84 | int min_page_sz; | 85 | int min_page_sz; |
| @@ -110,6 +111,7 @@ struct mlx4_dev_cap { | |||
| 110 | u32 reserved_lkey; | 111 | u32 reserved_lkey; |
| 111 | u64 max_icm_sz; | 112 | u64 max_icm_sz; |
| 112 | int max_gso_sz; | 113 | int max_gso_sz; |
| 114 | int max_rss_tbl_sz; | ||
| 113 | u8 supported_port_types[MLX4_MAX_PORTS + 1]; | 115 | u8 supported_port_types[MLX4_MAX_PORTS + 1]; |
| 114 | u8 suggested_type[MLX4_MAX_PORTS + 1]; | 116 | u8 suggested_type[MLX4_MAX_PORTS + 1]; |
| 115 | u8 default_sense[MLX4_MAX_PORTS + 1]; | 117 | u8 default_sense[MLX4_MAX_PORTS + 1]; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 984ace44104f..2e024a68fa81 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -272,10 +272,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 272 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; | 272 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; |
| 273 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); | 273 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); |
| 274 | dev->caps.flags = dev_cap->flags; | 274 | dev->caps.flags = dev_cap->flags; |
| 275 | dev->caps.flags2 = dev_cap->flags2; | ||
| 275 | dev->caps.bmme_flags = dev_cap->bmme_flags; | 276 | dev->caps.bmme_flags = dev_cap->bmme_flags; |
| 276 | dev->caps.reserved_lkey = dev_cap->reserved_lkey; | 277 | dev->caps.reserved_lkey = dev_cap->reserved_lkey; |
| 277 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | 278 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; |
| 278 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 279 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
| 280 | dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; | ||
| 279 | 281 | ||
| 280 | /* Sense port always allowed on supported devices for ConnectX1 and 2 */ | 282 | /* Sense port always allowed on supported devices for ConnectX1 and 2 */ |
| 281 | if (dev->pdev->device != 0x1003) | 283 | if (dev->pdev->device != 0x1003) |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6d028247f79d..6e27fa99e8b9 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -98,6 +98,12 @@ enum { | |||
| 98 | MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 | 98 | MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 |
| 99 | }; | 99 | }; |
| 100 | 100 | ||
| 101 | enum { | ||
| 102 | MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0, | ||
| 103 | MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, | ||
| 104 | MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2 | ||
| 105 | }; | ||
| 106 | |||
| 101 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) | 107 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) |
| 102 | 108 | ||
| 103 | enum { | 109 | enum { |
| @@ -292,11 +298,13 @@ struct mlx4_caps { | |||
| 292 | u32 max_msg_sz; | 298 | u32 max_msg_sz; |
| 293 | u32 page_size_cap; | 299 | u32 page_size_cap; |
| 294 | u64 flags; | 300 | u64 flags; |
| 301 | u64 flags2; | ||
| 295 | u32 bmme_flags; | 302 | u32 bmme_flags; |
| 296 | u32 reserved_lkey; | 303 | u32 reserved_lkey; |
| 297 | u16 stat_rate_support; | 304 | u16 stat_rate_support; |
| 298 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 305 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
| 299 | int max_gso_sz; | 306 | int max_gso_sz; |
| 307 | int max_rss_tbl_sz; | ||
| 300 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; | 308 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; |
| 301 | int reserved_qps; | 309 | int reserved_qps; |
| 302 | int reserved_qps_base[MLX4_NUM_QP_REGION]; | 310 | int reserved_qps_base[MLX4_NUM_QP_REGION]; |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 96005d75893c..338388ba260a 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -234,7 +234,8 @@ struct mlx4_wqe_mlx_seg { | |||
| 234 | u8 owner; | 234 | u8 owner; |
| 235 | u8 reserved1[2]; | 235 | u8 reserved1[2]; |
| 236 | u8 opcode; | 236 | u8 opcode; |
| 237 | u8 reserved2[3]; | 237 | __be16 sched_prio; |
| 238 | u8 reserved2; | ||
| 238 | u8 size; | 239 | u8 size; |
| 239 | /* | 240 | /* |
| 240 | * [17] VL15 | 241 | * [17] VL15 |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index b513f57e1725..3d81b90cc315 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
| @@ -160,7 +160,7 @@ struct ib_rmpp_hdr { | |||
| 160 | 160 | ||
| 161 | typedef u64 __bitwise ib_sa_comp_mask; | 161 | typedef u64 __bitwise ib_sa_comp_mask; |
| 162 | 162 | ||
| 163 | #define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n)) | 163 | #define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << (n))) |
| 164 | 164 | ||
| 165 | /* | 165 | /* |
| 166 | * ib_sa_hdr and ib_sa_mad structures must be packed because they have | 166 | * ib_sa_hdr and ib_sa_mad structures must be packed because they have |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c3cca5a4dacd..07996af8265a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -605,7 +605,7 @@ enum ib_qp_type { | |||
| 605 | IB_QPT_UD, | 605 | IB_QPT_UD, |
| 606 | IB_QPT_RAW_IPV6, | 606 | IB_QPT_RAW_IPV6, |
| 607 | IB_QPT_RAW_ETHERTYPE, | 607 | IB_QPT_RAW_ETHERTYPE, |
| 608 | /* Save 8 for RAW_PACKET */ | 608 | IB_QPT_RAW_PACKET = 8, |
| 609 | IB_QPT_XRC_INI = 9, | 609 | IB_QPT_XRC_INI = 9, |
| 610 | IB_QPT_XRC_TGT, | 610 | IB_QPT_XRC_TGT, |
| 611 | IB_QPT_MAX | 611 | IB_QPT_MAX |
| @@ -964,7 +964,7 @@ struct ib_qp { | |||
| 964 | struct ib_srq *srq; | 964 | struct ib_srq *srq; |
| 965 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ | 965 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
| 966 | struct list_head xrcd_list; | 966 | struct list_head xrcd_list; |
| 967 | atomic_t usecnt; /* count times opened */ | 967 | atomic_t usecnt; /* count times opened, mcast attaches */ |
| 968 | struct list_head open_list; | 968 | struct list_head open_list; |
| 969 | struct ib_qp *real_qp; | 969 | struct ib_qp *real_qp; |
| 970 | struct ib_uobject *uobject; | 970 | struct ib_uobject *uobject; |
